hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b7a3ac719d785e13f7265ad9a05f4b664dd99826
| 3,685
|
py
|
Python
|
MIssions_to_Mars/scrape_mars_mission.py
|
pwickliff1/web-scraping-challenge
|
56e2b5e6905c17fce762aeb2ae1ad8cb0f48f8d6
|
[
"ADSL"
] | null | null | null |
MIssions_to_Mars/scrape_mars_mission.py
|
pwickliff1/web-scraping-challenge
|
56e2b5e6905c17fce762aeb2ae1ad8cb0f48f8d6
|
[
"ADSL"
] | null | null | null |
MIssions_to_Mars/scrape_mars_mission.py
|
pwickliff1/web-scraping-challenge
|
56e2b5e6905c17fce762aeb2ae1ad8cb0f48f8d6
|
[
"ADSL"
] | null | null | null |
# Dependencies
from bs4 import BeautifulSoup
import requests
import re
import pandas as pd
def scrape():
mars_data = {}
# Url of website to be scraped
url = 'https://mars.nasa.gov/news/'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
# get the latest news title
news_title = soup.find('div', class_="content_title").text.strip()
# Get latest news paragraph text
news_p = soup.find('div', class_="rollover_description").text.strip()
# Url of JPL Featured Space Image to scrape
url = 'https://www.jpl.nasa.gov'
query = '/spaceimages/?search=&category=Mars'
response = requests.get(url+query)
soup = BeautifulSoup(response.text, 'html.parser')
# Get image url
image = soup.find('div', class_="carousel_items")
featured_image_url = url+image.a['data-fancybox-href']
# Trick Twitter to bypass Javascript check to get Mars Weather
user_agent_old_phone = 'Nokia5310XpressMusic_CMCC/2.0 (10.10) Profile/MIDP-2.1 '\
'Configuration/CLDC-1.1 UCWEB/2.0 (Java; U; MIDP-2.0; en-US; '\
'Nokia5310XpressMusic) U2/1.0.0 UCBrowser/9.5.0.449 U2/1.0.0 Mobile'
headers = { 'User-Agent': user_agent_old_phone}
# Scrape twitter for Mars Weather
url_twitter = 'https://twitter.com/marswxreport?lang=en'
resp = requests.get(url_twitter, headers=headers) # Send request
code = resp.status_code # HTTP response code
if code == 200:
soup = BeautifulSoup(resp.text, 'lxml') # Parsing the HTML
#print(soup.prettify())
else:
print(f'Error to load Twitter: {code}')
# Get Mars weather
mars_weather = soup.find(text=re.compile("InSight"));
# Get Mars facts data from webpage
url = 'https://space-facts.com/mars/'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
# Get table data
#table = soup.find_all('tr')
table = soup.find(id="tablepress-p-mars-no-2")
# Get Mars facts data from webpage and convert to html table string
url = 'https://space-facts.com/mars/'
# Scrape using pandas
tables = pd.read_html(url)
mars_facts = tables[2]
mars_facts.columns = ['Description', 'Value']
mars_facts.set_index('Description', inplace=True)
# Convert to html string
mars_facts_html = mars_facts.to_html()
# Url for main page
url_main ='https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
# Python dictionary with urls for pictures of each of Mars' Hemispheres
hemisphere_image_urls = [
{"title": "Cerberus Hemisphere Enhanced", "img_url": "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/cerberus_enhanced.tif/full.jpg"},
{"title": "Schiaparelli Hemisphere Enhanced", "img_url": "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/schiaparelli_enhanced.tif/full.jpg"},
{"title": "Syrtis_Major Hemisphere Enhanced", "img_url": "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/syrtis_major_enhanced.tif/full.jpg"},
{"title": "Valles_Marineris Hemisphere Enhanced", "img_url": "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/valles_marineris_enhanced.tif/full.jpg"}
]
mars_data["news_title"] = news_title
mars_data["news_p"] = news_p
mars_data["featured_image_url"] = featured_image_url
mars_data["mars_weather"] = mars_weather
#mars_data["mars_facts"] = mars_facts
mars_data["mars_facts_html"] = mars_facts_html
mars_data["hemisphere_image_urls"] = hemisphere_image_urls
# Return Results
return mars_data
| 35.095238
| 172
| 0.686567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,082
| 0.564993
|
b7a7160ec0048a1f0be91a335c4abd54fb69fa8b
| 10,165
|
py
|
Python
|
guiapp/meditech_nls_parser/old/meditech_nls_to_xml.py
|
gcampuzano14/PathISTabs
|
ae29a0b71647ecb32fc40e234b5c7276ab5333d9
|
[
"MIT"
] | 1
|
2017-07-28T14:01:32.000Z
|
2017-07-28T14:01:32.000Z
|
guiapp/meditech_nls_parser/old/meditech_nls_to_xml.py
|
gcampuzano14/PathISTabs
|
ae29a0b71647ecb32fc40e234b5c7276ab5333d9
|
[
"MIT"
] | null | null | null |
guiapp/meditech_nls_parser/old/meditech_nls_to_xml.py
|
gcampuzano14/PathISTabs
|
ae29a0b71647ecb32fc40e234b5c7276ab5333d9
|
[
"MIT"
] | 1
|
2019-02-14T06:07:24.000Z
|
2019-02-14T06:07:24.000Z
|
#!/usr/bin/env python
import os
import re
import csv
def fil():
filetext = os.path.join(os.path.dirname(__file__),'meditech_data','RAW')
#filetext = "C:/Users/gcampuzanozuluaga/Dropbox/Programming/Python/APPS/meditech_data/MT/RAW"
outtext_temp = os.path.join(os.path.dirname(__file__),'meditech_data','CLEANEDTEMP')
outtext = os.path.join(os.path.dirname(__file__),'meditech_data','CLEANED')
#outtext_temp = "C:/Users/gcampuzanozuluaga/Dropbox/Programming/Python/APPS/meditech_data/CLEANEDTEMP"
#outtext = "C:/Users/gcampuzanozuluaga/Dropbox/Programming/Python/APPS/meditech_data/CLEANED"
outxml = os.path.join(os.path.dirname(__file__),'meditech_data','MEDITECH_NLS.xml')
#outxml = "C:/Users/gcampuzanozuluaga/Dropbox/Programming/Python/APPS/meditech_data/MEDITECH_NLS.xml"
outtabdelim = os.path.join(os.path.dirname(__file__),'meditech_data','TAB_DELIM_MEDITECH_NLS.txt')
#outjson = "C:/Users/gcampuzanozuluaga/Dropbox/Programming/Python/APPS/meditech_data/MT/XML/MEDITECH_NLS.xml"
#outtabdelim = "C:/Users/gcampuzanozuluaga/Dropbox/Programming/Python/APPS/meditech_data/TAB_DELIM_MEDITECH_NLS.txt"
os.open(outxml, os.O_RDWR | os.O_CREAT)
with open(outxml, "w+") as out:
out.write("<UMH_PATHOLOGY>\n")
for f in os.listdir(filetext):
path = os.path.join(filetext, f)
clean_name = f[:-4] + "_CLEAN.TXT"
clean_path_temp = os.path.join(outtext_temp, clean_name)
clean_path = os.path.join(outtext, clean_name)
cleaner(path, clean_path_temp, clean_path)
mapper(f, clean_path, outxml, outtabdelim)
with open(outxml, "a+") as out:
out.write("</UMH_PATHOLOGY>\n")
# with open(outxml, "r") as out:
# t = out.read()
# with open(outxml, "w") as out:
# u = t.encode('latin1','xmlcharrefreplace').decode('utf8','xmlcharrefreplace')
# u = u.decode("utf-8").replace(u"\u2022", "*").u.encode("utf-8")
# out.write(u)
def mapper(filename, text, outxml, outtabdelim):
# text = "C:/Users/germancz/Dropbox/MEDITECH_NLS_2005.TXT_PROCESSED.txt"
logger = os.path.join(os.path.dirname(__file__),'meditech_data','meditech_case_log.txt')
#logger = "C:/Users/gcampuzanozuluaga/Dropbox/Programming/Python/APPS/meditech_data/MT/XML/meditech_case_log.txt"
os.open(text, os.O_RDWR)
with open(text, "r+") as log:
t = log.read()
print "mapping"
ptnameb = re.findall(r"PATIENT:\s*([\w\s]*\w),?(\D*?\w*)?\s+ACCT\s*#:\s*(\S+)?\s*LOC:?.*?U\s*#:\s*(\S+)?\s*\n*"
"AGE\/SEX:\s+(\d+)\/(\w{1})\s+DOB:(\d*\/*\d*\/*\d*)\s*?.*?\n*.*?\n*"
"Path\s+#:\s+(\d+:\w+:\w+)\s+\w+\s+Received:\s+(\d{2}\/\d{2}\/\d{2})\s*-\s*\d{4}\s+"
"Collected:\s*(\S+)\s*\n*(.*?)(\d+\/\d+\/\d+)\s*\d{4}\s*?\n*?.*?\n?(?=PATIENT:)", t, re.S)
newlistt = []
count = 0
print "creating list"
arr = (ptnameb, newlistt)
for e in arr[0]:
#print e
li = list(e)
arr[1].append(li)
count += 1
print count
os.open(logger, os.O_RDWR | os.O_CREAT)
with open(logger, "a+") as log:
t = str(filename) + ": \n" + "COUNT: " + str(count) + "\n"
log.write(t)
reducer (newlistt, outxml, outtabdelim)
def reducer(newlistt, outxml, outtabdelim):
# outxml = "C:/Users/gcampuzanozuluaga/Dropbox/MT/XML/MEDITECH_NLS.xml"
tabinit = " "*2 + "<MEDITECH_PATHOLOGY>\n" + " "*4
tabend = " "*2 + "</MEDITECH_PATHOLOGY>\n"
with open(outxml, "a+") as out:
# out.write("<UMH_PATHOLOGY>\n")
for e in newlistt:
t = e[7].rfind(":") + 1
nice_accession = e[7][t:]
xml = (tabinit
+ "<FIRST_NAME>" + e[0] + "</FIRST_NAME>\n"
+ " "*4 + "<LAST_NAME>" + e[1] + "</LAST_NAME>\n"
+ " "*4 + "<AGE>" + e[4] + "</AGE>\n"
+ " "*4 + "<SEX>" + e[5] + "</SEX>\n"
+ " "*4 + "<DOB>" + e[6] + "</DOB>\n"
+ " "*4 + "<ACCOUNT_NUM>" + e[2] + "</ACCOUNT_NUM>\n"
+ " "*4 + "<U_NUMBER>" + e[3] + "</U_NUMBER>\n"
+ " "*4 + "<ACCESSION_NUMBER_RAW>" + e[7] + "</ACCESSION_NUMBER_RAW>\n"
+ " "*4 + "<ACCESSION_NUMBER>" + nice_accession + "</ACCESSION_NUMBER>\n"
+ " "*4 + "<RECEIVED>" + e[8] + "</RECEIVED>\n"
+ " "*4 + "<COLLECTED>" + e[9] + "</COLLECTED>\n"
+ " "*4 + "<SIGNOUT_DATE>" + e[11] + "</SIGNOUT_DATE>\n"
+ " "*4 + "<TEXT>" + "<![CDATA[\n" + e[10] + "\n]]>" + "</TEXT>\n"
+ tabend)
#print xml
out.write(xml)
os.open(outtabdelim, os.O_RDWR | os.O_CREAT)
with open(outtabdelim, 'wb') as csvfile:
result_writer = csv.writer(csvfile, delimiter = "\t")
#with open(outtabdelim, "w+") as outtab:
result_writer.writerow(["FIRST_NAME", " SECOND_NAME", " U_NUMBER", " DOB", " AGE", " SEX", " ACCESSION_NUMBER", " RECEIVED", " SIGNOUT_DATE", " DX"])
for e in newlistt:
dxtext = str(e[10]).replace("\n"," ")
dxtext = dxtext.replace("\f"," ")
dxtext = re.sub(r"\s{2,}"," ", dxtext, re.S)
dxtext = re.sub(r'^[-+=*\/]{1,}','', dxtext)
dx = dxtext.lower()
if "malignant" in dx or "malignancy" in dx or "carcinoma" in dx or "cancer" in dx or "neoplasm" in dx or "sarcoma" in dx or "lymphoma" in dx or "blastoma" in dx:
t = e[7].rfind(":") + 1
nice_accession = e[7][t:]
t = e[7].rfind(":") + 1
nice_accession = e[7][t:]
patientstr = [str(e[0]), str(e[1]), str(e[3]), str(e[6]), str(e[4]), str(e[5]), str(e[1]), nice_accession, str(e[8]), str(e[11]), dxtext ]
result_writer.writerow(patientstr)
#outtab.write(patientstr)
# out.write("</UMH_PATHOLOGY>\n")
# print count
def cleaner(filetext, outtext_temp, outtext):
continuedline = re.compile(r"\n.*\(Continued\)\s*\n", re.MULTILINE)
disclaimer = re.compile(r"\s*This\sreport\sis\sprivileged,\sconfidential\sand\sexempt\sfrom\sdisclosure\sunder\sapplicable\slaw\.\s*\n"
"\s+If\syou\sreceive\sthis\sreport\sinadvertently,\splease\scall\s\(305\)\s325-5587\sand\s*\n"
"\s+return\sthe\sreport\sto\sus\sby\smail\.\s*\n", re.MULTILINE)
disclaimer_two = re.compile(r"\s*This\sreport\sis\sprivileged,\sconfidential\sand\sexempt\sfrom\sdisclosure\sunder\sapplicable\slaw\.\s*\n"
"\s+If\syou\sreceive\sthis\sreport\sinadvertently,\splease\scall\s\(305\)\s325-5587\sand", re.MULTILINE)
headings = re.compile(r"\s*?RUN\s+DATE:\s+\d{2}\/\d{2}\/\d{2}\s+ADVANCED\s+PATHOLOGY\s+ASSOCIATES\s+PAGE\s+\d+\s*\n"
"\s*RUN\s+TIME:\s+\d{4}\s+\*\*\*\sFINAL\sREPORT\s\*\*\*\s*\n+"
"\s*SURGICAL\s+PATHOLOGY\s+REPORT\s*\n\s+1400\s+NW\s+12th\s+Avenue,\s+Miami,\s+FL\s+33136\s*\n"
"\s+Telephone:\s+305-325-5587\s+FAX:\s+305-325-5899\s*\n", re.MULTILINE)
lines = re.compile(r"-{5,}", re.MULTILINE)
doub_space = re.compile(r"\n\n", re.MULTILINE)
illegalxml = re.compile(u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF\u0192]')
# illegalxml = re.compile(u"[\x00-\x08\x0b\x0c\x0e-\x1F\u0000-\uD800-\uDFFF\u000B\u000C\u000E-\u001F\u007F-\u0084\u0086-\u009F\uD800-\uDFFF"
# "\uFDD0-\uFDFEF\uFFFE\uFFFF\u0192]")
os.open(filetext, os.O_RDWR)
with open(filetext, "r+") as log:
titler = log.read()
t = continuedline.sub("", titler)
t = disclaimer.sub("", t)
t = disclaimer_two.sub("", t)
t = headings.sub("", t)
# t = middle_case.sub(,t)
t = lines.sub("\n", t)
t = illegalxml.sub("?", t)
nline = doub_space.findall(t)
while len(nline) > 0:
t = doub_space.sub("\n", t)
nline = doub_space.findall(t)
print "newlines"
os.open(outtext_temp, os.O_RDWR | os.O_CREAT)
with open(outtext_temp, "w+") as out:
out.write(t)
with open(outtext_temp, "a") as endline:
endline.write("\nPATIENT: XXXX,XXXXX\n")
inbetween(outtext_temp, outtext)
def inbetween(outtext_temp, outtext):
# text = "C:/Users/gcampuzanozuluaga/Dropbox/MT/CLEANED/MEDITECH_NLS_2011_CLEAN.TXT"
# outtext = "C:/Users/gcampuzanozuluaga/Dropbox/MT/CLEANED/TESTOUT.TXT"
os.open(outtext_temp, os.O_RDWR)
with open(outtext_temp, "r+") as log:
t = log.read()
prevline = 0
count = 0
splitstuff = t.splitlines()
linecount = 0
for line in splitstuff:
# print line
if re.match(r"PATIENT:\s*(?:[\w\s]*\w),?(?:\D*?\w*)?\s+ACCT\s*#:\s*(?:\S+)?\s*LOC:?.*?U\s*#:\s*(?:\S+)?\s*", line):
# print "pte"
patient_head = line + "\n" + splitstuff[linecount + 1] + "\n" + splitstuff[linecount + 2]
prevline = linecount + 5
indicatepte = 1
if linecount > prevline:
if len(line) > 0:
indicatepte = 0
else:
pass
if re.match(r"Path\s+#:\s+(\d+:\w+:\w+)\s+\w+\s+Received:\s+(\d{2}\/\d{2}\/\d{2})\s*-\s*\d{4}\s+Collected:\s*(\S+)\s*", line):
count += 1
if indicatepte == 0:
line = patient_head + "\n" + line
splitstuff[linecount] = line
#print line
linecount += 1
os.open(outtext, os.O_RDWR | os.O_CREAT)
with open(outtext, "w+") as out:
for e in splitstuff:
t = e + "\n"
out.write(t)
print count
fil()
| 50.073892
| 174
| 0.52789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,240
| 0.417118
|
b7a7213417448a10f646593e2af28f99d94c2f47
| 3,144
|
py
|
Python
|
paper_plots/small_vs_large_box.py
|
finn-dodgson/DeepHalos
|
86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c
|
[
"MIT"
] | null | null | null |
paper_plots/small_vs_large_box.py
|
finn-dodgson/DeepHalos
|
86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c
|
[
"MIT"
] | null | null | null |
paper_plots/small_vs_large_box.py
|
finn-dodgson/DeepHalos
|
86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c
|
[
"MIT"
] | null | null | null |
import numpy as np
from plots import plots_for_predictions as pp
from utilss import distinct_colours as dc
import matplotlib.pyplot as plt
c = dc.get_distinct(4)
path = '/Users/luisals/Documents/deep_halos_files/mass_range_13.4/random_20sims_200k/lr5e-5/'
p1 = np.load(path + "seed_20/predicted_sim_6_epoch_09.npy")
t1 = np.load(path + "seed_20/true_sim_6_epoch_09.npy")
p_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/raw/predicted_sim_L200_N1024_genetIC3_epoch_10.npy")
t_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/raw/true_sim_L200_N1024_genetIC3_epoch_10.npy")
path_av = "/Users/luisals/Documents/deep_halos_files/mass_range_13.4/random_20sims_200k/averaged_boxes/log_alpha_-4.3/"
p_av = np.load(path_av + "predicted_sim_6_epoch_32.npy")
t_av = np.load(path_av + "true_sim_6_epoch_32.npy")
p_av_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/avg/predicted_sim_L200_N1024_genetIC3_epoch_18.npy")
t_av_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/avg/true_sim_L200_N1024_genetIC3_epoch_18.npy")
# Raw-density case
f1, a, m = pp.plot_histogram_predictions(p1, t1, radius_bins=False, particle_ids=None, errorbars=False,
label=r"$L_\mathrm{box}=50 \, \mathrm{Mpc} \,/ \,h$", color="C0")
f11, a1, m1 = pp.plot_histogram_predictions(p_big, t_big, radius_bins=False, particle_ids=None, errorbars=False, fig=f1,
axes=a, color="C1", label=r"$L_\mathrm{box}=200 \, \mathrm{Mpc} \,/ \,h$")
a1[0].set_ylabel(r"$n_{\mathrm{particles}}$", fontsize=16)
[a.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$", fontsize=16) for a in a1]
plt.savefig("/Users/lls/Documents/Papers/dlhalos_paper/small_vs_large_box.pdf")
# Averaged-density case
f1, a, m = pp.plot_histogram_predictions(p_av, t_av, radius_bins=False, particle_ids=None, errorbars=False,
label=r"$L_\mathrm{box}=50 \, \mathrm{Mpc} \,/ \,h$", color="C0")
f11, a1, m1 = pp.plot_histogram_predictions(p_av_big, t_av_big, radius_bins=False, particle_ids=None, errorbars=False, fig=f1,
axes=a, color="C1", label=r"$L_\mathrm{box}=200 \, \mathrm{Mpc} \,/ \,h$")
a1[0].set_ylabel(r"$n_{\mathrm{particles}}$", fontsize=16)
[a.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$", fontsize=16) for a in a1]
plt.savefig("/Users/luisals/Documents/Papers/dlhalos_paper/averaged_small_vs_large_box.pdf")
# Averaged-density case
f1, a, m = pp.plot_histogram_predictions(p_big, t_big, radius_bins=False, particle_ids=None, errorbars=False,
label="Raw density", color="C0")
f11, a1, m1 = pp.plot_histogram_predictions(p_av_big, t_av_big, radius_bins=False, particle_ids=None, errorbars=False, fig=f1,
axes=a, color="C1", label="Averaged density")
a1[0].set_ylabel(r"$n_{\mathrm{particles}}$", fontsize=16)
[a.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$", fontsize=16) for a in a1]
plt.savefig("/Users/luisals/Documents/Papers/dlhalos_paper/raw_vs_averaged_large_box.pdf")
| 60.461538
| 126
| 0.699746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,436
| 0.456743
|
b7a964ee74b74803fefa91500045e19a16c2244a
| 85,945
|
py
|
Python
|
regions.py
|
greggy/python-ipgeobase
|
593e4dc7e55e0c90a1979e586f03c013f6ac057a
|
[
"BSD-3-Clause"
] | 1
|
2017-11-12T11:26:25.000Z
|
2017-11-12T11:26:25.000Z
|
regions.py
|
greggy/python-ipgeobase
|
593e4dc7e55e0c90a1979e586f03c013f6ac057a
|
[
"BSD-3-Clause"
] | null | null | null |
regions.py
|
greggy/python-ipgeobase
|
593e4dc7e55e0c90a1979e586f03c013f6ac057a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
REGIONS = {'USRI': 'Rhode Island', 'UY02': 'Canelones', 'KR21': 'Ulsan-gwangyoksi', 'KR20': 'Kyongsang-namdo', 'KM02': 'Grande Comore', 'KM03': 'Moheli', 'CO22': 'Putumayo', 'BN18': 'Zou', 'BN17': 'Plateau', 'BN16': 'Oueme', 'BN15': 'Tutong', 'BN14': 'Littoral', 'BN13': 'Donga', 'BN12': 'Kouffo', 'BN11': 'Collines', 'BN10': 'Temburong', 'KP09': 'Kangwon-do', 'KP08': 'Kaesong-si', 'KP07': 'Hwanghae-bukto', 'KP06': 'Hwanghae-namdo', 'KP01': 'Chagang-do', 'KP03': 'Hamgyong-namdo', 'IS40': 'Norourland Eystra', 'CO28': 'Tolima', 'TH77': 'Amnat Charoen', 'CO29': 'Valle del Cauca', 'TH76': 'Udon Thani', 'IS44': 'Vestfiroir', 'IS45': 'Vesturland', 'MK80': 'Plasnica', 'MK81': 'Podares', 'MK82': 'Prilep', 'MK83': 'Probistip', 'MK84': 'Radovis', 'MK85': 'Rankovce', 'MK86': 'Resen', 'MK87': 'Rosoman', 'MK88': 'Rostusa', 'MK89': 'Samokov', 'NG23': 'Kaduna', 'NG22': 'Cross River', 'SZ05': 'Praslin', 'SZ04': 'Shiselweni', 'SZ03': 'Manzini', 'SZ02': 'Lubombo', 'SZ01': 'Hhohho', 'NG26': 'Benue', 'BG41': 'Gabrovo', 'BG40': 'Dobrich', 'BG43': 'Khaskovo', 'BG42': 'Grad Sofiya', 'BG45': 'Kyustendil', 'BG44': 'Kurdzhali', 'BG47': 'Montana', 'BG46': 'Lovech', 'BG49': 'Pernik', 'BG48': 'Pazardzhik', 'NG24': 'Katsina', 'DO19': 'Salcedo', 'KH18': 'Svay Rieng', 'KH19': 'Takeo', 'KH12': 'Pursat', 'KH13': 'Preah Vihear', 'KH10': 'Mondulkiri', 'KH11': 'Phnum Penh', 'KH16': 'Siem Reap', 'KH17': 'Stung Treng', 'KH14': 'Prey Veng', 'KH15': 'Ratanakiri Kiri', 'GN15': 'Kerouane', 'GN16': 'Kindia', 'GN17': 'Kissidougou', 'GN10': 'Forecariah', 'GN11': 'Fria', 'GN12': 'Gaoual', 'GN13': 'Gueckedou', 'GN18': 'Koundara', 'GN19': 'Kouroussa', 'GBR1': 'Ballymoney', 'GBR3': 'Belfast', 'GBR2': 'Banbridge', 'GBR5': 'Castlereagh', 'GBR4': 'Carrickfergus', 'GBR7': 'Cookstown', 'GBR6': 'Coleraine', 'GBR9': 'Down', 'GBR8': 'Craigavon', 'CZ87': 'Plzensky kraj', 'CZ86': 'Pardubicky kraj', 'CZ85': 'Moravskoslezsky kraj', 'CZ84': 'Olomoucky kraj', 'CZ83': 'Liberecky kraj', 'CZ82': 'Kralovehradecky kraj', 'CZ81': 'Karlovarsky kraj', 'CZ80': 'Vysocina', 'CZ89': 'Ustecky kraj', 'CZ88': 'Stredocesky kraj', 'MD87': 'Soroca', 'MD86': 'Soldanesti', 'MD85': 'Singerei', 'MD84': 'Riscani', 'MD83': 'Rezina', 'MD81': 'Ocnita', 'MD80': 'Nisporeni', 'VN55': 'Binh Thuan', 'MD89': 'Straseni', 'MD88': 'Stefan-Voda', 'CN09': 'Henan', 'CN08': 'Heilongjiang', 'CN03': 'Jiangxi', 'CN02': 'Zhejiang', 'CN01': 'Anhui', 'CN07': 'Fujian', 'CN06': 'Qinghai', 'CN05': 'Jilin', 'CN04': 'Jiangsu', 'DE04': 'Hamburg', 'VN50': 'Vinh Phu', 'PE09': 'Huancavelica', 'PE08': 'Cusco', 'PE01': 'Amazonas', 'PE03': 'Apurimac', 'PE02': 'Ancash', 'PE05': 'Ayacucho', 'PE04': 'Arequipa', 'PE07': 'Callao', 'PE06': 'Cajamarca', 'AE02': 'Ajman', 'DE08': 'Rheinland-Pfalz', 'AE01': 'Abu Dhabi', 'AE06': 'Sharjah', 'AE07': 'Umm Al Quwain', 'GY19': 'Upper Takutu-Upper Essequibo', 'GY18': 'Upper Demerara-Berbice', 'AE04': 'Fujairah', 'AF08': 'Ghazni', 'GY13': 'East Berbice-Corentyne', 'GY12': 'Demerara-Mahaica', 'GY11': 'Cuyuni-Mazaruni', 'GY10': 'Barima-Waini', 'GY17': 'Potaro-Siparuni', 'GY16': 'Pomeroon-Supenaam', 'GY15': 'Mahaica-Berbice', 'GY14': 'Essequibo Islands-West Demerara', 'BI10': 'Bururi', 'BI11': 'Cankuzo', 'BI12': 'Cibitoke', 'BI13': 'Gitega', 'BI14': 'Karuzi', 'BI15': 'Kayanza', 'BI16': 'Kirundo', 'BI17': 'Makamba', 'BI18': 'Muyinga', 'BI19': 'Ngozi', 'IE02': 'Cavan', 'USSC': 'South Carolina', 'AF02': 'Badghis', 'USSD': 'South Dakota', 'IE06': 'Donegal', 'IE04': 'Cork', 'MK93': 'Sopotnica', 'MK92': 'Sopiste', 'MK91': 'Sipkovica', 'MK90': 'Saraj', 'MK97': 'Staro Nagoricane', 'MK96': 'Star Dojran', 'MK95': 'Staravina', 'MK94': 'Srbinovo', 'MK99': 'Struga', 'MK98': 'Stip', 'CD01': 'Bandundu', 'KG01': 'Bishkek', 'CD05': 'Katanga', 'CD04': 'Kasai-Oriental', 'CD06': 'Kinshasa', 'SD32': 'Bahr al Ghazal', 'LB02': 'Al Janub', 'LB03': 'Liban-Nord', 'LB01': 'Beqaa', 'LB06': 'Liban-Sud', 'LB07': 'Nabatiye', 'LB04': 'Beyrouth', 'LB05': 'Mont-Liban', 'LB08': 'Beqaa', 'LB09': 'Liban-Nord', 'KH05': 'Kampong Thum', 'KH04': 'Kampong Speu', 'KH07': 'Kandal', 'KH06': 'Kampot', 'KH01': 'Batdambang', 'KH03': 'Kampong Chhnang', 'KH02': 'Kampong Cham', 'KH09': 'Kracheh', 'KH08': 'Koh Kong', 'PT02': 'Aveiro', 'PT03': 'Beja', 'PT04': 'Braga', 'PT05': 'Braganca', 'PT06': 'Castelo Branco', 'PT07': 'Coimbra', 'PT08': 'Evora', 'PT09': 'Faro', 'GN07': 'Dinguiraye', 'GN06': 'Dalaba', 'GN05': 'Dabola', 'GN04': 'Conakry', 'GN03': 'Boke', 'GN02': 'Boffa', 'GN01': 'Beyla', 'GN09': 'Faranah', 'SI17': 'Crnomelj', 'SI16': 'Crna na Koroskem', 'SK06': 'Trencin', 'SK07': 'Trnava', 'SK04': 'Nitra', 'SK05': 'Presov', 'SK02': 'Bratislava', 'SK03': 'Kosice', 'SK01': 'Banska Bystrica', 'SK08': 'Zilina', 'GBW5': 'South Lanarkshire', 'MX27': 'Tabasco', 'CN32': 'Sichuan', 'CN33': 'Chongqing', 'CN30': 'Guangdong', 'CN31': 'Hainan', 'BH17': 'Al Janubiyah', 'VE18': 'Portuguesa', 'VE19': 'Sucre', 'VE11': 'Falcon', 'VE12': 'Guarico', 'VE13': 'Lara', 'VE14': 'Merida', 'VE15': 'Miranda', 'VE16': 'Monagas', 'VE17': 'Nueva Esparta', 'BH16': 'Al Asimah', 'NE07': 'Zinder', 'MX10': 'Durango', 'MX11': 'Guanajuato', 'MX12': 'Guerrero', 'MX13': 'Hidalgo', 'MX14': 'Jalisco', 'MX15': 'Mexico', 'MX16': 'Michoacan de Ocampo', 'MX17': 'Morelos', 'MX18': 'Nayarit', 'MX19': 'Nuevo Leon', 'DZ55': 'Tipaza', 'BI02': 'Bujumbura', 'BI09': 'Bubanza', 'CV10': 'Sao Nicolau', 'CV11': 'Sao Vicente', 'CV13': 'Mosteiros', 'CV14': 'Praia', 'CV15': 'Santa Catarina', 'CV16': 'Santa Cruz', 'CV17': 'Sao Domingos', 'CV18': 'Sao Filipe', 'CV19': 'Sao Miguel', 'PHB8': 'Cotabato', 'PHB9': 'Dagupan', 'PHB3': 'Calbayog', 'ER01': 'Anseba', 'ER03': "Debubawi K'eyih Bahri", 'ER02': 'Debub', 'ER05': "Ma'akel", 'ER04': 'Gash Barka', 'ER06': "Semenawi K'eyih Bahri", 'NE08': 'Niamey', 'TH80': 'Sa Kaeo', 'PHB5': 'Canlaon', 'TD08': 'Logone Occidental', 'TD09': 'Logone Oriental', 'TD04': 'Chari-Baguirmi', 'TD05': 'Guera', 'TD06': 'Kanem', 'TD07': 'Lac', 'TD01': 'Batha', 'TD02': 'Biltine', 'TD03': 'Borkou-Ennedi-Tibesti', 'LB11': 'Baalbek-Hermel', 'LB10': 'Aakk', 'PT13': 'Leiria', 'PT11': 'Guarda', 'PT10': 'Madeira', 'PT17': 'Porto', 'PT16': 'Portalegre', 'PT14': 'Lisboa', 'PT19': 'Setubal', 'PT18': 'Santarem', 'GN32': 'Kankan', 'GN33': 'Koubia', 'GN30': 'Coyah', 'GN31': 'Dubreka', 'GN36': 'Lola', 'GN37': 'Mandiana', 'GN34': 'Labe', 'GN35': 'Lelouma', 'GN38': 'Nzerekore', 'GN39': 'Siguiri', 'MW22': 'Salima', 'ES56': 'Catalonia', 'ES55': 'Castilla y Leon', 'JM14': 'Saint Thomas', 'ES54': 'Castilla-La Mancha', 'JP47': 'Okinawa', 'MW26': 'Balaka', 'JM16': 'Westmoreland', 'ES52': 'Aragon', 'CN21': 'Ningxia', 'CN20': 'Nei Mongol', 'CN23': 'Shanghai', 'CN22': 'Beijing', 'CN25': 'Shandong', 'CN24': 'Shanxi', 'CN26': 'Shaanxi', 'CN29': 'Yunnan', 'CN28': 'Tianjin', 'JM11': 'Saint Elizabeth', 'LT59': 'Marijampoles Apskritis', 'LT58': 'Klaipedos Apskritis', 'JM12': 'Saint James', 'VE03': 'Apure', 'VE02': 'Anzoategui', 'VE01': 'Amazonas', 'JP41': 'Tottori', 'LT57': 'Kauno Apskritis', 'LT56': 'Alytaus Apskritis', 'VE05': 'Barinas', 'VE04': 'Aragua', 'FRB2': 'Lorraine', 'FRB1': 'Limousin', 'VN54': 'Binh Dinh', 'FRB6': 'Picardie', 'FRB7': 'Poitou-Charentes', 'EC08': 'El Oro', 'EC09': 'Esmeraldas', 'FRB4': 'Nord-Pas-de-Calais', 'EC01': 'Galapagos', 'EC02': 'Azuay', 'EC03': 'Bolivar', 'EC04': 'Canar', 'EC05': 'Carchi', 'EC06': 'Chimborazo', 'EC07': 'Cotopaxi', 'MX03': 'Baja California Sur', 'MX02': 'Baja California', 'MX01': 'Aguascalientes', 'MX07': 'Coahuila de Zaragoza', 'MX06': 'Chihuahua', 'MX05': 'Chiapas', 'MX04': 'Campeche', 'CAPE': 'Prince Edward Island', 'MX09': 'Distrito Federal', 'MX08': 'Colima', 'SI66': 'Loski Potok', 'SI64': 'Logatec', 'ZM07': 'Southern', 'ZM06': 'North-Western', 'SI62': 'Ljubno', 'ZM05': 'Northern', 'PA06': 'Herrera', 'PA01': 'Bocas del Toro', 'UG90': 'Mukono', 'CV02': 'Brava', 'CV01': 'Boa Vista', 'KY08': 'Western', 'ZM01': 'Western', 'CV05': 'Paul', 'CV04': 'Maio', 'KY04': 'South Town', 'KY05': 'Spot Bay', 'KY06': 'Stake Bay', 'CV08': 'Sal', 'KY01': 'Creek', 'KY02': 'Eastern', 'KY03': 'Midland', 'US44': 'Rhode Island', 'MZ03': 'Inhambane', 'US47': 'Tennessee', 'US40': 'Oklahoma', 'US41': 'Oregon', 'US42': 'Pennsylvania', 'IQ08': 'Dahuk', 'IQ09': 'Dhi Qar', 'IQ06': 'Babil', 'IQ07': 'Baghdad', 'IQ04': 'Al Qadisiyah', 'IQ05': 'As Sulaymaniyah', 'IQ02': 'Al Basrah', 'LA14': 'Xiangkhoang', 'IQ01': 'Al Anbar', 'TD14': 'Tandjile', 'TD13': 'Salamat', 'TD12': 'Ouaddai', 'TD11': 'Moyen-Chari', 'TD10': 'Mayo-Kebbi', 'GT22': 'Zacapa', 'GT21': 'Totonicapan', 'GT20': 'Suchitepequez', 'GBA8': 'Blackburn with Darwen', 'GBA9': 'Blackpool', 'PT22': 'Viseu', 'PT23': 'Azores', 'PT20': 'Viana do Castelo', 'PT21': 'Vila Real', 'GN29': 'Yomou', 'GN28': 'Tougue', 'EE10': 'Parnu', 'GN25': 'Pita', 'GN27': 'Telimele', 'GN21': 'Macenta', 'GBA2': 'Barnet', 'GN23': 'Mamou', 'GN22': 'Mali', 'GR47': 'Dhodhekanisos', 'GBA3': 'Barnsley', 'GR46': 'Lasithi', 'GBA4': 'Bath and North East Somerset', 'GBA5': 'Bedfordshire', 'GR44': 'Rethimni', 'GBA6': 'Bexley', 'GBA7': 'Birmingham', 'AD08': 'Escaldes-Engordany', 'AD03': 'Encamp', 'AD02': 'Canillo', 'AD07': 'Andorra la Vella', 'AD06': 'Sant Julia de Loria', 'AD05': 'Ordino', 'AD04': 'La Massana', 'CG08': 'Plateaux', 'LY57': 'Gharyan', 'CG01': 'Bouenza', 'CG06': 'Likouala', 'CG07': 'Niari', 'CG04': 'Kouilou', 'CG05': 'Lekoumou', 'VN44': 'Dac Lac', 'VN45': 'Dong Nai', 'LT64': 'Utenos Apskritis', 'LT65': 'Vilniaus Apskritis', 'LT62': 'Taurages Apskritis', 'LT63': 'Telsiu Apskritis', 'LT60': 'Panevezio Apskritis', 'LT61': 'Siauliu Apskritis', 'VN49': 'Song Be', 'JP28': 'Nara', 'JP29': 'Niigata', 'JP24': 'Miyagi', 'JP25': 'Miyazaki', 'JP26': 'Nagano', 'JP27': 'Nagasaki', 'JP20': 'Kochi', 'JP21': 'Kumamoto', 'JP22': 'Kyoto', 'JP23': 'Mie', 'EC19': 'Tungurahua', 'EC18': 'Pichincha', 'EC13': 'Los Rios', 'EC12': 'Loja', 'EC11': 'Imbabura', 'EC10': 'Guayas', 'EC17': 'Pastaza', 'EC15': 'Morona-Santiago', 'EC14': 'Manabi', 'SI04': 'Bohinj', 'SI05': 'Borovnica', 'SI06': 'Bovec', 'SI07': 'Brda', 'SI01': 'Ajdovscina', 'SI02': 'Beltinci', 'SI03': 'Bled', 'MX32': 'Zacatecas', 'SI09': 'Brezovica', 'MX30': 'Veracruz-Llave', 'MX31': 'Yucatan', 'DO18': 'Puerto Plata', 'PHA9': 'Cabanatuan', 'PHA8': 'Butuan', 'PHA7': 'Batangas City', 'PHA6': 'Basilan City', 'PHA5': 'Bais', 'PHA4': 'Baguio', 'PHA3': 'Bago', 'PHA2': 'Bacolod', 'PHA1': 'Angeles', 'JP42': 'Toyama', 'BI21': 'Ruyigi', 'BI20': 'Rutana', 'BI23': 'Mwaro', 'BI22': 'Muramvya', 'PA10': 'Veraguas', 'MZ10': 'Manica', 'MZ11': 'Maputo', 'IQ18': 'Salah ad Din', 'PH29': 'Ilocos Sur', 'IQ11': 'Arbil', 'IQ10': 'Diyala', 'IQ13': "At Ta'mim", 'IQ12': "Karbala'", 'IQ15': 'Ninawa', 'IQ14': 'Maysan', 'IQ17': 'An Najaf', 'IQ16': 'Wasit', 'TJ02': 'Khatlon', 'TJ03': 'Sughd', 'TJ01': 'Kuhistoni Badakhshon', 'GT12': 'Peten', 'GT13': 'Quetzaltenango', 'GT10': 'Jalapa', 'GT11': 'Jutiapa', 'GT16': 'Sacatepequez', 'GT17': 'San Marcos', 'GT14': 'Quiche', 'GT15': 'Retalhuleu', 'GT18': 'Santa Rosa', 'GT19': 'Solola', 'PSWE': 'West Bank', 'PY23': 'Alto Paraguay', 'PY21': 'Nueva Asuncion', 'PY20': 'Chaco', 'QA06': 'Ar Rayyan', 'QA04': 'Al Khawr', 'QA05': 'Al Wakrah Municipality', 'QA02': 'Al Ghuwariyah', 'QA03': 'Al Jumaliyah', 'QA01': 'Ad Dawhah', 'QA08': 'Madinat ach Shamal', 'QA09': 'Umm Salal', 'GBU5': 'East Dunbartonshire', 'UY03': 'Cerro Largo', 'UY01': 'Artigas', 'UY06': 'Flores', 'UY07': 'Florida', 'UY04': 'Colonia', 'UY05': 'Durazno', 'UY08': 'Lavalleja', 'UY09': 'Maldonado', 'CH10': 'Inner-Rhoden', 'GBU6': 'East Lothian', 'CH11': 'Luzern', 'CG14': 'Cuvette-Ouest', 'CG11': 'Pool', 'CG10': 'Sangha', 'CG13': 'Cuvette', 'CG12': 'Brazzaville', 'UA24': "Volyns'ka Oblast'", 'UA25': "Zakarpats'ka Oblast'", 'UA26': "Zaporiz'ka Oblast'", 'UA27': "Zhytomyrs'ka Oblast'", 'UA20': "Sevastopol'", 'UA21': "Sums'ka Oblast'", 'UA22': "Ternopil's'ka Oblast'", 'UA23': "Vinnyts'ka Oblast'", 'IT20': 'Veneto', 'US11': 'District of Columbia', 'VE21': 'Trujillo', 'VE20': 'Tachira', 'VE23': 'Zulia', 'VE22': 'Yaracuy', 'VE25': 'Distrito Federal', 'VN52': 'Ho Chi Minh', 'VN51': 'Ha Noi', 'VE26': 'Vargas', 'VN59': 'Ha Tay', 'VN58': 'Ha Giang', 'LY62': 'Yafran', 'JP39': 'Tokushima', 'JP38': 'Tochigi', 'JP37': 'Shizuoka', 'JP36': 'Shimane', 'JP35': 'Shiga', 'JP34': 'Saitama', 'JP33': 'Saga', 'JP32': 'Osaka', 'JP31': 'Okayama', 'JP30': 'Oita', 'MX29': 'Tlaxcala', 'MX28': 'Tamaulipas', 'SI15': 'Crensovci', 'SI14': 'Cerkno', 'SI13': 'Cerknica', 'SI12': 'Cerklje na Gorenjskem', 'SI11': 'Celje', 'MX21': 'Puebla', 'MX20': 'Oaxaca', 'MX23': 'Quintana Roo', 'MX22': 'Queretaro de Arteaga', 'MX25': 'Sinaloa', 'MX24': 'San Luis Potosi', 'SI19': 'Divaca', 'MX26': 'Sonora', 'LY60': 'Surt', 'MD61': 'Basarabeasca', 'MD60': 'Balti', 'MD63': 'Briceni', 'KN10': 'Saint Paul Charlestown', 'KN11': 'Saint Peter Basseterre', 'KN12': 'Saint Thomas Lowland', 'MD62': 'Bender', 'KN15': 'Trinity Palmetto Point', 'PHB2': 'Cagayan de Oro', 'MD65': 'Cantemir', 'PHB1': 'Cadiz', 'PHB6': 'Cavite City', 'PHB7': 'Cebu City', 'PHB4': 'Caloocan', 'MD64': 'Cahul', 'CI75': 'Bafing', 'CI74': 'Agneby', 'CI77': 'Denguele', 'CI76': 'Bas-Sassandra', 'CI79': 'Fromager', 'CI78': 'Dix-Huit Montagnes', 'FRB5': 'Pays de la Loire', 'EC24': 'Orellana', 'EC22': 'Sucumbios', 'EC23': 'Napo', 'EC20': 'Zamora-Chinchipe', 'CV20': 'Tarrafal', 'KW02': 'Al Kuwayt', 'KW01': 'Al Ahmadi', 'KW07': 'Al Farwaniyah', 'KW05': 'Al Jahra', 'CH19': 'Thurgau', 'KW08': 'Hawalli', 'KW09': 'Mubarak al Kabir', 'PA09': 'San Blas', 'PA08': 'Panama', 'ZM09': 'Lusaka', 'ZM08': 'Copperbelt', 'PA05': 'Darien', 'PA04': 'Colon', 'PA07': 'Los Santos', 'ZM04': 'Luapula', 'ZM03': 'Eastern', 'ZM02': 'Central', 'PA03': 'Cocle', 'PA02': 'Chiriqui', 'MZ01': 'Cabo Delgado', 'US45': 'South Carolina', 'US46': 'South Dakota', 'MZ02': 'Gaza', 'MZ05': 'Sofala', 'MZ04': 'Maputo', 'MZ07': 'Niassa', 'MZ06': 'Nampula', 'MZ09': 'Zambezia', 'MZ08': 'Tete', 'US48': 'Texas', 'US49': 'Utah', 'GD05': 'Saint Mark', 'GD04': 'Saint John', 'GD06': 'Saint Patrick', 'GD01': 'Saint Andrew', 'GD03': 'Saint George', 'GD02': 'Saint David', 'IS09': 'Eyjafjardarsysla', 'IS05': 'Austur-Hunavatnssysla', 'IS06': 'Austur-Skaftafellssysla', 'IS07': 'Borgarfjardarsysla', 'IS03': 'Arnessysla', 'AU02': 'New South Wales', 'AU03': 'Northern Territory', 'AU01': 'Australian Capital Territory', 'AU06': 'Tasmania', 'AU07': 'Victoria', 'AU04': 'Queensland', 'AU05': 'South Australia', 'AU08': 'Western Australia', 'BO09': 'Tarija', 'BM10': 'Southampton', 'BM11': 'Warwick', 'GT09': 'Izabal', 'GT08': 'Huehuetenango', 'GT05': 'El Progreso', 'GT04': 'Chiquimula', 'GT07': 'Guatemala', 'GT06': 'Escuintla', 'GT01': 'Alta Verapaz', 'GT03': 'Chimaltenango', 'GT02': 'Baja Verapaz', 'PY19': 'Canindeyu', 'PY12': 'Misiones', 'PY13': 'Neembucu', 'PY10': 'Guaira', 'PY11': 'Itapua', 'PY16': 'Presidente Hayes', 'PY17': 'San Pedro', 'PY15': 'Paraguari', 'QA11': 'Jariyan al Batnah', 'QA10': 'Al Wakrah', 'QA12': "Umm Sa'id", 'AF41': 'Daykondi', 'AF40': 'Parvan', 'AF42': 'Panjshir', 'GBY1': 'Flintshire', 'GBY2': 'Gwynedd', 'GBY3': 'Merthyr Tydfil', 'GBY4': 'Monmouthshire', 'GBY5': 'Neath Port Talbot', 'GBY6': 'Newport', 'GBY7': 'Pembrokeshire', 'GBY8': 'Powys', 'GBY9': 'Rhondda Cynon Taff', 'UY19': 'Treinta y Tres', 'UY18': 'Tacuarembo', 'UY15': 'Salto', 'UY14': 'Rocha', 'UY17': 'Soriano', 'UY16': 'San Jose', 'UY11': 'Paysandu', 'UY10': 'Montevideo', 'UY13': 'Rivera', 'UY12': 'Rio Negro', 'CL16': 'Arica y Parinacota', 'CL17': 'Los Rios', 'CL14': 'Los Lagos', 'CL15': 'Tarapaca', 'SN14': 'Saint-Louis', 'SN15': 'Matam', 'CL10': 'Magallanes y de la Antartica Chilena', 'CL11': 'Maule', 'BZ01': 'Belize', 'BZ02': 'Cayo', 'BZ03': 'Corozal', 'BZ04': 'Orange Walk', 'BZ05': 'Stann Creek', 'BZ06': 'Toledo', 'CASK': 'Saskatchewan', 'RU87': 'Yamal-Nenets', 'RU86': 'Voronezh', 'RU85': 'Vologda', 'RU84': 'Volgograd', 'RU83': 'Vladimir', 'VN67': 'Ninh Binh', 'VN64': 'Quang Tri', 'VN65': 'Nam Ha', 'VN68': 'Ninh Thuan', 'VN69': 'Phu Yen', 'RU89': 'Yevrey', 'RU88': "Yaroslavl'", 'JP08': 'Fukushima', 'JP09': 'Gifu', 'JP02': 'Akita', 'JP03': 'Aomori', 'JP01': 'Aichi', 'JP06': 'Fukui', 'JP07': 'Fukuoka', 'JP04': 'Chiba', 'JP05': 'Ehime', 'SI22': 'Dol pri Ljubljani', 'SI20': 'Dobrepolje', 'SI26': 'Duplek', 'SI27': 'Gorenja Vas-Poljane', 'SI24': 'Dornava', 'SI25': 'Dravograd', 'SI28': 'Gorisnica', 'SI29': 'Gornja Radgona', 'GBE4': 'Essex', 'GBE5': 'Gateshead', 'GBE6': 'Gloucestershire', 'GBE7': 'Greenwich', 'GBE1': 'East Riding of Yorkshire', 'GBE2': 'East Sussex', 'GBE3': 'Enfield', 'GBE8': 'Hackney', 'GBE9': 'Halton', 'KN03': 'Saint George Basseterre', 'KN02': 'Saint Anne Sandy Point', 'KN01': 'Christ Church Nichola Town', 'KN07': 'Saint John Figtree', 'KN06': 'Saint John Capisterre', 'KN05': 'Saint James Windward', 'KN04': 'Saint George Gingerland', 'KN09': 'Saint Paul Capisterre', 'KN08': 'Saint Mary Cayon', 'CI88': 'Sud-Bandama', 'CI89': 'Sud-Comoe', 'CI84': 'Moyen-Cavally', 'CI85': 'Moyen-Comoe', 'CI86': "N'zi-Comoe", 'CI87': 'Savanes', 'CI80': 'Haut-Sassandra', 'CI81': 'Lacs', 'CI82': 'Lagunes', 'CI83': 'Marahoue', 'NG16': 'Ogun', 'AZ18': 'Fuzuli', 'AZ19': 'Gadabay', 'NG11': 'Federal Capital Territory', 'US56': 'Wyoming', 'US55': 'Wisconsin', 'US54': 'West Virginia', 'US53': 'Washington', 'US51': 'Virginia', 'US50': 'Vermont', 'PHC5': 'Dumaguete', 'PHC4': 'Dipolog', 'PHC7': 'Gingoog', 'PHC6': 'General Santos', 'PHC1': 'Danao', 'PHC3': 'Davao City', 'PHC2': 'Dapitan', 'CO38': 'Magdalena', 'PHC9': 'Iloilo City', 'SN12': 'Ziguinchor', 'SN13': 'Louga', 'TH48': 'Chanthaburi', 'TH49': 'Trat', 'TH44': 'Chachoengsao', 'TH45': 'Prachin Buri', 'IS15': 'Kjosarsysla', 'TH47': 'Rayong', 'TH40': 'Krung Thep', 'TH41': 'Phayao', 'TH42': 'Samut Prakan', 'IS10': 'Gullbringusysla', 'BM07': "Saint George's", 'BM06': 'Saint George', 'BM05': 'Pembroke', 'BM04': 'Paget', 'BM03': 'Hamilton', 'BM02': 'Hamilton', 'BM01': 'Devonshire', 'BM09': 'Smiths', 'BM08': 'Sandys', 'SIK5': 'Preddvor', 'USDC': 'District of Columbia', 'PY05': 'Caazapa', 'PY04': 'Caaguazu', 'PY07': 'Concepcion', 'PY06': 'Central', 'PY01': 'Alto Parana', 'PY03': 'Boqueron', 'PY02': 'Amambay', 'PY08': 'Cordillera', 'AF30': 'Balkh', 'AF31': 'Jowzjan', 'AF32': 'Samangan', 'AF33': 'Sar-e Pol', 'AF34': 'Konar', 'AF35': 'Laghman', 'AF36': 'Paktia', 'AF37': 'Khowst', 'AF38': 'Nurestan', 'AF39': 'Oruzgan', 'GBX3': 'Bridgend', 'GBX2': 'Blaenau Gwent', 'GBX1': 'Isle of Anglesey', 'ML10': 'Kidal', 'GBX6': 'Ceredigion', 'GBX5': 'Cardiff', 'GBX4': 'Caerphilly', 'GBX9': 'Denbighshire', 'GBX8': 'Conwy', 'AM08': "Syunik'", 'AM09': 'Tavush', 'AM02': 'Ararat', 'AM03': 'Armavir', 'AM01': 'Aragatsotn', 'AM06': 'Lorri', 'AM07': 'Shirak', 'AM04': "Geghark'unik'", 'AM05': "Kotayk'", 'CL09': 'Los Lagos', 'CL08': "Libertador General Bernardo O'Higgins", 'SN09': 'Fatick', 'SN03': 'Diourbel', 'SN01': 'Dakar', 'CL02': 'Aisen del General Carlos Ibanez del Campo', 'SN07': 'Thies', 'CL04': 'Araucania', 'SN05': 'Tambacounda', 'CL06': 'Bio-Bio', 'SN10': 'Kaolack', 'UA08': "Khersons'ka Oblast'", 'UA09': "Khmel'nyts'ka Oblast'", 'USMN': 'Minnesota', 'UA02': "Chernihivs'ka Oblast'", 'UA03': "Chernivets'ka Oblast'", 'UA01': "Cherkas'ka Oblast'", 'UA06': "Ivano-Frankivs'ka Oblast'", 'UA07': "Kharkivs'ka Oblast'", 'UA04': "Dnipropetrovs'ka Oblast'", 'UA05': "Donets'ka Oblast'", 'SN11': 'Kolda', 'VN62': 'Khanh Hoa', 'VN79': 'Hai Duong', 'VN78': 'Da Nang', 'VN63': 'Kon Tum', 'VN75': 'Tra Vinh', 'VN74': 'Thua Thien', 'VN77': 'Vinh Long', 'VN76': 'Tuyen Quang', 'VN71': 'Quang Ngai', 'VN60': 'Ha Tinh', 'VN73': 'Soc Trang', 'VN72': 'Quang Tri', 'VN61': 'Hoa Binh', 'VN66': 'Nghe An', 'RU82': 'Ust-Orda Buryat', 'JP15': 'Ishikawa', 'JP14': 'Ibaraki', 'JP17': 'Kagawa', 'JP16': 'Iwate', 'JP11': 'Hiroshima', 'RU81': "Ul'yanovsk", 'JP13': 'Hyogo', 'JP12': 'Hokkaido', 'IL03': 'HaZafon', 'IL02': 'HaMerkaz', 'IL01': 'HaDarom', 'RU80': 'Udmurt', 'JP19': 'Kanagawa', 'JP18': 'Kagoshima', 'IL05': 'Tel Aviv', 'IL04': 'Hefa', 'SI39': 'Ivancna Gorica', 'SI38': 'Ilirska Bistrica', 'SI35': 'Hrpelje-Kozina', 'SI34': 'Hrastnik', 'SI37': 'Ig', 'SI36': 'Idrija', 'SI31': 'Gornji Petrovci', 'SI30': 'Gornji Grad', 'SI32': 'Grosuplje', 'EE20': 'Viljandimaa', 'GBD6': 'Dorset', 'GBD5': 'Doncaster', 'GBD4': 'Devon', 'GBD3': 'Derbyshire', 'GBD2': 'Derby', 'GBD1': 'Darlington', 'GBD9': 'Ealing', 'GBD8': 'Durham', 'BR01': 'Acre', 'BR02': 'Alagoas', 'BR03': 'Amapa', 'BR04': 'Amazonas', 'BR05': 'Bahia', 'BR06': 'Ceara', 'BR07': 'Distrito Federal', 'BR08': 'Espirito Santo', 'CI92': 'Zanzan', 'CI91': 'Worodougou', 'CI90': 'Vallee du Bandama', 'PK03': 'North-West Frontier', 'PK02': 'Balochistan', 'IT10': 'Marche', 'IT11': 'Molise', 'IT12': 'Piemonte', 'IT13': 'Puglia', 'IT14': 'Sardegna', 'IT15': 'Sicilia', 'IT16': 'Toscana', 'IT17': 'Trentino-Alto Adige', 'IT18': 'Umbria', 'IT19': "Valle d'Aosta", 'PHD1': 'Iriga', 'PHD2': 'La Carlota', 'PHD3': 'Laoag', 'PHD4': 'Lapu-Lapu', 'PHD5': 'Legaspi', 'PHD6': 'Lipa', 'PHD7': 'Lucena', 'PHD8': 'Mandaue', 'PHD9': 'Manila', 'PK08': 'Islamabad', 'BB01': 'Christ Church', 'IS28': 'Skagafjardarsysla', 'IS29': 'Snafellsnes- og Hnappadalssysla', 'BB04': 'Saint James', 'BB05': 'Saint John', 'TH59': 'Ranong', 'TH58': 'Chumphon', 'TH57': 'Prachuap Khiri Khan', 'IS23': 'Rangarvallasysla', 'TH55': 'Samut Sakhon', 'TH54': 'Samut Songkhram', 'TH53': 'Nakhon Pathom', 'TH52': 'Ratchaburi', 'TH51': 'Suphan Buri', 'TH50': 'Kanchanaburi', 'BT20': 'Thimphu', 'BT21': 'Tongsa', 'BT22': 'Wangdi Phodrang', 'CR07': 'Puntarenas', 'CR06': 'Limon', 'CR04': 'Heredia', 'CR03': 'Guanacaste', 'CR02': 'Cartago', 'CR01': 'Alajuela', 'CR08': 'San Jose', 'HN02': 'Choluteca', 'HN03': 'Colon', 'HN01': 'Atlantida', 'HN06': 'Cortes', 'HN07': 'El Paraiso', 'HN04': 'Comayagua', 'HN05': 'Copan', 'HN08': 'Francisco Morazan', 'HN09': 'Gracias a Dios', 'GL01': 'Nordgronland', 'GL03': 'Vestgronland', 'GL02': 'Ostgronland', 'ET50': 'Hareri Hizb', 'ET51': 'Oromiya', 'ET52': 'Sumale', 'ET53': 'Tigray', 'ET54': 'YeDebub Biheroch Bihereseboch na Hizboch', 'AF23': 'Kandahar', 'AF27': 'Vardak', 'IE29': 'Westmeath', 'AF24': 'Kondoz', 'IE24': 'Roscommon', 'IE25': 'Sligo', 'IE26': 'Tipperary', 'IE27': 'Waterford', 'IE20': 'Mayo', 'IE21': 'Meath', 'IE22': 'Monaghan', 'IE23': 'Offaly', 'ML07': 'Koulikoro', 'ML06': 'Sikasso', 'ML05': 'Segou', 'ML04': 'Mopti', 'ML03': 'Kayes', 'AM10': "Vayots' Dzor", 'ML01': 'Bamako', 'ML09': 'Gao', 'ML08': 'Tombouctou', 'UA15': "L'vivs'ka Oblast'", 'UA14': "Luhans'ka Oblast'", 'UA17': "Odes'ka Oblast'", 'UA16': "Mykolayivs'ka Oblast'", 'UA11': 'Krym', 'UA10': "Kirovohrads'ka Oblast'", 'UA13': "Kyyivs'ka Oblast'", 'UA12': 'Kyyiv', 'GBV9': 'Orkney', 'UA19': "Rivnens'ka Oblast'", 'UA18': "Poltavs'ka Oblast'", 'GBV8': 'North Lanarkshire', 'GBV5': 'Midlothian', 'GBV4': 'Inverclyde', 'VN09': 'Dong Thap', 'VN01': 'An Giang', 'VN03': 'Ben Tre', 'VN05': 'Cao Bang', 'NG56': 'Nassarawa', 'MV41': 'Meemu', 'MV40': 'Maale', 'MV43': 'Noonu', 'PL80': 'Podkarpackie', 'SI49': 'Komen', 'PL82': 'Pomorskie', 'MV42': 'Gnaviyani', 'PL84': 'Swietokrzyskie', 'PL85': 'Warminsko-Mazurskie', 'PL86': 'Wielkopolskie', 'PL87': 'Zachodniopomorskie', 'SI40': 'Izola-Isola', 'MV45': 'Shaviyani', 'SI42': 'Jursinci', 'SI44': 'Kanal', 'SI45': 'Kidricevo', 'SI46': 'Kobarid', 'MV44': 'Raa', 'MV47': 'Vaavu', 'MV46': 'Thaa', 'GBG2': 'Isle of Wight', 'GBG3': 'Islington', 'GBG1': 'Hounslow', 'GBG6': 'Kingston upon Hull', 'GBG7': 'Kingston upon Thames', 'GBG4': 'Kensington and Chelsea', 'GBG5': 'Kent', 'GBG8': 'Kirklees', 'GBG9': 'Knowsley', 'MKC3': 'Zelino', 'BR13': 'Maranhao', 'BR11': 'Mato Grosso do Sul', 'MKC2': 'Zelenikovo', 'BR17': 'Paraiba', 'BR16': 'Para', 'BR15': 'Minas Gerais', 'BR14': 'Mato Grosso', 'FJ04': 'Rotuma', 'FJ05': 'Western', 'BR18': 'Parana', 'FJ01': 'Central', 'FJ02': 'Eastern', 'FJ03': 'Northern', 'MKC6': 'Zrnovci', 'PH08': 'Batanes', 'PH09': 'Batangas', 'PH04': 'Aklan', 'PH05': 'Albay', 'PH06': 'Antique', 'PH07': 'Bataan', 'PH01': 'Abra', 'PH02': 'Agusan del Norte', 'PH03': 'Agusan del Sur', 'IT03': 'Calabria', 'IT02': 'Basilicata', 'IT01': 'Abruzzi', 'IT07': 'Lazio', 'IT06': 'Friuli-Venezia Giulia', 'IT05': 'Emilia-Romagna', 'IT04': 'Campania', 'IT09': 'Lombardia', 'IT08': 'Liguria', 'PHE3': 'Olongapo', 'PHE2': 'Naga', 'PHE1': 'Marawi', 'PHE7': 'Pagadian', 'PHE6': 'Ozamis', 'PHE5': 'Oroquieta', 'PHE4': 'Ormoc', 'PHE9': 'Pasay', 'PHE8': 'Palayan', 'TH68': 'Songkhla', 'TH69': 'Pattani', 'TH62': 'Phuket', 'TH63': 'Krabi', 'TH60': 'Surat Thani', 'TH61': 'Phangnga', 'TH66': 'Phatthalung', 'TH67': 'Satun', 'TH64': 'Nakhon Si Thammarat', 'TH65': 'Trang', 'MS01': 'Saint Anthony', 'MS02': 'Saint Georges', 'MS03': 'Saint Peter', 'VE24': 'Dependencias Federales', 'SIB3': 'Sentilj', 'DE12': 'Mecklenburg-Vorpommern', 'DE13': 'Sachsen', 'DE10': 'Schleswig-Holstein', 'DE11': 'Brandenburg', 'DE16': 'Berlin', 'DE14': 'Sachsen-Anhalt', 'DE15': 'Thuringen', 'MU20': 'Savanne', 'MU21': 'Agalega Islands', 'MU22': 'Cargados Carajos', 'MU23': 'Rodrigues', 'HN18': 'Yoro', 'HN15': 'Olancho', 'HN14': 'Ocotepeque', 'HN17': 'Valle', 'HN16': 'Santa Barbara', 'HN11': 'Islas de la Bahia', 'HN10': 'Intibuca', 'HN13': 'Lempira', 'HN12': 'La Paz', 'IS35': 'Vestur-Hunavatnssysla', 'IS34': 'Vestur-Bardastrandarsysla', 'IS37': 'Vestur-Skaftafellssysla', 'IS36': 'Vestur-Isafjardarsysla', 'IS31': 'Sudur-Mulasysla', 'IS30': 'Strandasysla', 'IS32': 'Sudur-Tingeyjarsysla', 'HT03': 'Nord-Ouest', 'USFL': 'Florida', 'USFM': 'Federated States of Micronesia', 'ET49': 'Gambela Hizboch', 'ET48': 'Dire Dawa', 'ZW10': 'Harare', 'ET47': 'Binshangul Gumuz', 'ET46': 'Amara', 'ET45': 'Afar', 'ET44': 'Adis Abeba', 'AF18': 'Nangarhar', 'AF19': 'Nimruz', 'AF17': 'Lowgar', 'AF14': 'Kapisa', 'AF13': 'Kabol', 'AF10': 'Helmand', 'AF11': 'Herat', 'GBZ1': 'Swansea', 'GBZ3': 'Vale of Glamorgan', 'GBZ2': 'Torfaen', 'GBZ4': 'Wrexham', 'AO08': 'Huambo', 'AO09': 'Huila', 'AO01': 'Benguela', 'AO02': 'Bie', 'AO03': 'Cabinda', 'AO04': 'Cuando Cubango', 'AO05': 'Cuanza Norte', 'AO06': 'Cuanza Sul', 'AO07': 'Cunene', 'TO03': 'Vava', 'NP08': 'Lumbini', 'NP09': 'Mahakali', 'NP06': 'Karnali', 'NP07': 'Kosi', 'NP04': 'Gandaki', 'NP05': 'Janakpur', 'NP02': 'Bheri', 'NP03': 'Dhawalagiri', 'NP01': 'Bagmati', 'VN13': 'Hai Phong', 'UG84': 'Kitgum', 'UG85': 'Kyenjojo', 'UG86': 'Mayuge', 'UG87': 'Mbale', 'UG80': 'Kaberamaido', 'UG81': 'Kamwenge', 'UG82': 'Kanungu', 'UG83': 'Kayunga', 'UG88': 'Moroto', 'UG89': 'Mpigi', 'SI53': 'Kranjska Gora', 'SI52': 'Kranj', 'SI51': 'Kozje', 'SI50': 'Koper-Capodistria', 'SI57': 'Lasko', 'SI55': 'Kungota', 'SI54': 'Krsko', 'GBF9': 'Hillingdon', 'GBF8': 'Hertford', 'EE08': 'Laane-Virumaa', 'EE09': 'Narva', 'GBF5': 'Hartlepool', 'GBF4': 'Harrow', 'GBF7': 'Herefordshire', 'GBF6': 'Havering', 'GBF1': 'Hammersmith and Fulham', 'EE03': 'Ida-Virumaa', 'GBF3': 'Haringey', 'GBF2': 'Hampshire', 'BR28': 'Sergipe', 'BR29': 'Goias', 'BR26': 'Santa Catarina', 'BR27': 'Sao Paulo', 'BR24': 'Rondonia', 'BR25': 'Roraima', 'BR22': 'Rio Grande do Norte', 'BR23': 'Rio Grande do Sul', 'BR20': 'Piaui', 'BR21': 'Rio de Janeiro', 'SI76': 'Mislinja', 'PH19': 'Catanduanes', 'PH18': 'Capiz', 'PH17': 'Camiguin', 'PH16': 'Camarines Sur', 'PH15': 'Camarines Norte', 'PH14': 'Cagayan', 'PH13': 'Bulacan', 'PH12': 'Bukidnon', 'PH11': 'Bohol', 'PH10': 'Benguet', 'US08': 'Colorado', 'US09': 'Connecticut', 'CH25': 'Zurich', 'CH24': 'Zug', 'CH23': 'Vaud', 'CH22': 'Valais', 'CH21': 'Uri', 'CH20': 'Ticino', 'US01': 'Alabama', 'US02': 'Alaska', 'US04': 'Arizona', 'US05': 'Arkansas', 'US06': 'California', 'NA25': 'Kavango', 'PHF8': 'Silay', 'PHF9': 'Surigao', 'PHF6': 'San Jose', 'PHF7': 'San Pablo', 'PHF4': 'San Carlos', 'PHF5': 'San Carlos', 'PHF2': 'Quezon City', 'PHF3': 'Roxas', 'PHF1': 'Puerto Princesa', 'LA02': 'Champasak', 'NA21': 'Windhoek', 'USGA': 'Georgia', 'NA20': 'Karasburg', 'NA23': 'Hereroland Oos', 'USGU': 'Guam', 'BT06': 'Chhukha', 'BT07': 'Chirang', 'BT05': 'Bumthang', 'BT08': 'Daga', 'BT09': 'Geylegphug', 'KZ14': 'Qyzylorda', 'KZ15': 'East Kazakhstan', 'KZ16': 'North Kazakhstan', 'KZ17': 'Zhambyl', 'KZ10': 'South Kazakhstan', 'KZ11': 'Pavlodar', 'KZ12': 'Qaraghandy', 'KZ13': 'Qostanay', 'LU02': 'Grevenmacher', 'PHC8': 'Iligan', 'TH75': 'Ubon Ratchathani', 'IS41': 'Norourland Vestra', 'IS42': 'Suourland', 'IS43': 'Suournes', 'TH71': 'Ubon Ratchathani', 'TH70': 'Yala', 'TH73': 'Nakhon Phanom', 'TH72': 'Yasothon', 'TH79': 'Nong Bua Lamphu', 'TH78': 'Mukdahan', 'ZW05': 'Mashonaland West', 'ZW04': 'Mashonaland East', 'ZW07': 'Matabeleland South', 'ZW06': 'Matabeleland North', 'ZW01': 'Manicaland', 'ZW03': 'Mashonaland Central', 'ZW02': 'Midlands', 'ZW09': 'Bulawayo', 'ZW08': 'Masvingo', 'FM02': 'Pohnpei', 'FM01': 'Kosrae', 'DE05': 'Hessen', 'IS17': 'Myrasysla', 'DE07': 'Nordrhein-Westfalen', 'DE06': 'Niedersachsen', 'DE01': 'Baden-Wurttemberg', 'DE03': 'Bremen', 'DE02': 'Bayern', 'NR08': 'Denigomodu', 'DE09': 'Saarland', 'TH46': 'Chon Buri', 'NR09': 'Ewa', 'AF09': 'Ghowr', 'SC20': 'Pointe La Rue', 'SC21': 'Port Glaud', 'AF01': 'Badakhshan', 'IE03': 'Clare', 'AF03': 'Baghlan', 'IE01': 'Carlow', 'AF05': 'Bamian', 'IE07': 'Dublin', 'AF07': 'Faryab', 'AF06': 'Farah', 'VN39': 'Lang Son', 'TH43': 'Nakhon Nayok', 'AO19': 'Bengo', 'AO18': 'Lunda Sul', 'AO13': 'Namibe', 'AO12': 'Malanje', 'AO17': 'Lunda Norte', 'AO16': 'Zaire', 'AO15': 'Uige', 'AO14': 'Moxico', 'TW04': "T'ai-wan", 'TW03': "T'ai-pei", 'TW02': 'Kao-hsiung', 'TW01': 'Fu-chien', 'NP11': 'Narayani', 'NP10': 'Mechi', 'NP13': 'Sagarmatha', 'NP12': 'Rapti', 'NP14': 'Seti', 'EG05': 'Al Gharbiyah', 'SIA7': 'Rogaska Slatina', 'SIA6': 'Rogasovci', 'SIA3': 'Radovljica', 'SIA2': 'Radlje ob Dravi', 'SIA1': 'Radenci', 'SIA8': 'Rogatec', 'CAMB': 'Manitoba', 'EG03': 'Al Buhayrah', 'JP46': 'Yamanashi', 'JM15': 'Trelawny', 'JP44': 'Yamagata', 'JP45': 'Yamaguchi', 'JM10': 'Saint Catherine', 'JP43': 'Wakayama', 'JP40': 'Tokyo', 'JM13': 'Saint Mary', 'UG97': 'Yumbe', 'UG96': 'Wakiso', 'UG95': 'Soroti', 'UG94': 'Sironko', 'UG93': 'Rukungiri', 'UG92': 'Pader', 'UG91': 'Nakapiripirit', 'SI61': 'Ljubljana', 'SI68': 'Lukovica', 'EE19': 'Valgamaa', 'EE18': 'Tartumaa', 'VN24': 'Long An', 'VN23': 'Lam Dong', 'VN20': 'Ho Chi Minh', 'VN21': 'Kien Giang', 'EE11': 'Parnumaa', 'GBA1': 'Barking and Dagenham', 'EE13': 'Raplamaa', 'EE12': 'Polvamaa', 'EE15': 'Sillamae', 'EE14': 'Saaremaa', 'EE17': 'Tartu', 'EE16': 'Tallinn', 'BR31': 'Tocantins', 'BR30': 'Pernambuco', 'IR43': 'Khorasan-e Shemali', 'PH28': 'Ilocos Norte', 'RUCI': 'Chechnya Republic', 'PH22': 'Basilan', 'PH23': 'Eastern Samar', 'PH20': 'Cavite', 'PH21': 'Cebu', 'PH26': 'Davao Oriental', 'PH27': 'Ifugao', 'PH24': 'Davao', 'PH25': 'Davao del Sur', 'CH12': 'Neuchatel', 'CH13': 'Nidwalden', 'US19': 'Iowa', 'US18': 'Indiana', 'CH16': 'Schaffhausen', 'CH17': 'Schwyz', 'CH14': 'Obwalden', 'CH15': 'Sankt Gallen', 'US13': 'Georgia', 'US12': 'Florida', 'CH18': 'Solothurn', 'US10': 'Delaware', 'US17': 'Illinois', 'US16': 'Idaho', 'US15': 'Hawaii', 'PHG8': 'Aurora', 'PHG1': 'Tacloban', 'PHG3': 'Tagbilaran', 'PHG2': 'Tagaytay', 'PHG5': 'Toledo', 'PHG4': 'Tangub', 'PHG7': 'Zamboanga', 'PHG6': 'Trece Martires', 'BT11': 'Lhuntshi', 'BT10': 'Ha', 'BT13': 'Paro', 'BT12': 'Mongar', 'BT15': 'Punakha', 'BT14': 'Pemagatsel', 'BT17': 'Samdrup', 'BT16': 'Samchi', 'BT19': 'Tashigang', 'BT18': 'Shemgang', 'KZ07': 'West Kazakhstan', 'KZ06': 'Atyrau', 'KZ05': 'Astana', 'KZ04': 'Aqtobe', 'KZ03': 'Aqmola', 'KZ02': 'Almaty City', 'KZ01': 'Almaty', 'KZ09': 'Mangghystau', 'KZ08': 'Bayqonyr', 'KR19': 'Taejon-jikhalsi', 'TH01': 'Mae Hong Son', 'TH02': 'Chiang Mai', 'TH03': 'Chiang Rai', 'TH04': 'Nan', 'TH05': 'Lamphun', 'TH06': 'Lampang', 'TH07': 'Phrae', 'TH08': 'Tak', 'TH09': 'Sukhothai', 'FM04': 'Yap', 'TN28': 'Madanin', 'TN29': 'Gabes', 'TN22': 'Siliana', 'TN23': 'Sousse', 'TN27': 'Ben Arous', 'UG52': 'Mbarara', 'DK18': 'Midtjylland', 'DK19': 'Nordjylland', 'DK17': 'Hovedstaden', 'UG50': 'Masindi', 'DZ24': 'Jijel', 'DZ25': 'Laghouat', 'NG50': 'Rivers', 'NG51': 'Sokoto', 'DZ20': 'Blida', 'CH08': 'Glarus', 'NG54': 'Ekiti', 'NG55': 'Gombe', 'IE15': 'Laois', 'IE14': 'Leitrim', 'IE16': 'Limerick', 'IE11': 'Kerry', 'IE10': 'Galway', 'IE13': 'Kilkenny', 'IE12': 'Kildare', 'IE19': 'Louth', 'IE18': 'Longford', 'SIB2': 'Sencur', 'HT09': 'Nord', 'SIB1': 'Semic', 'SIB6': 'Sevnica', 'SIB7': 'Sezana', 'SIB4': 'Sentjernej', 'USDE': 'Delaware', 'SIB8': 'Skocjan', 'SIB9': 'Skofja Loka', 'HT06': 'Artibonite', 'HT07': 'Centre', 'LR21': 'Gbarpolu', 'JM09': 'Saint Ann', 'JM08': 'Saint Andrew', 'JM07': 'Portland', 'JM04': 'Manchester', 'JM02': 'Hanover', 'JM01': 'Clarendon', 'AO20': 'Luanda', 'SI71': 'Medvode', 'VN80': 'Ha Nam', 'SI73': 'Metlika', 'SI72': 'Menges', 'SI74': 'Mezica', 'SI77': 'Moravce', 'VN81': 'Hung Yen', 'SI79': 'Mozirje', 'SI78': 'Moravske Toplice', 'VN82': 'Nam Dinh', 'GBO9': 'Waltham Forest', 'VN84': 'Quang Nam', 'LU01': 'Diekirch', 'LU03': 'Luxembourg', 'VN85': 'Thai Nguyen', 'VN86': 'Vinh Puc Province', 'VN87': 'Can Tho', 'FM03': 'Chuuk', 'VN30': 'Quang Ninh', 'VN33': 'Tay Ninh', 'VN32': 'Son La', 'VN35': 'Thai Binh', 'VN34': 'Thanh Hoa', 'VN37': 'Tien Giang', 'VN89': 'Lai Chau', 'NR04': 'Anibare', 'NR05': 'Baiti', 'NR06': 'Boe', 'NR07': 'Buada', 'NR01': 'Aiwo', 'NR02': 'Anabar', 'NR03': 'Anetan', 'EG08': 'Al Jizah', 'EG09': 'Al Minufiyah', 'EG04': 'Al Fayyum', 'GBO6': 'Trafford', 'EG06': 'Al Iskandariyah', 'EG07': "Al Isma'iliyah", 'EG01': 'Ad Daqahliyah', 'EG02': 'Al Bahr al Ahmar', 'GBO7': 'Wakefield', 'PH35': 'Lanao del Sur', 'PH34': 'Lanao del Norte', 'PH37': 'Leyte', 'PH36': 'La Union', 'PH31': 'Isabela', 'PH30': 'Iloilo', 'PH33': 'Laguna', 'PH32': 'Kalinga-Apayao', 'PH39': 'Masbate', 'PH38': 'Marinduque', 'US26': 'Michigan', 'US27': 'Minnesota', 'US24': 'Maryland', 'US25': 'Massachusetts', 'US22': 'Louisiana', 'US23': 'Maine', 'US20': 'Kansas', 'US21': 'Kentucky', 'US28': 'Mississippi', 'US29': 'Missouri', 'NG52': 'Bayelsa', 'NG53': 'Ebonyi', 'DZ26': 'Mascara', 'DZ27': "M'sila", 'CH09': 'Graubunden', 'DZ21': 'Bouira', 'DZ22': 'Djelfa', 'DZ23': 'Guelma', 'CH05': 'Bern', 'CH04': 'Basel-Stadt', 'CH07': 'Geneve', 'CH06': 'Fribourg', 'CH01': 'Aargau', 'DZ29': 'Oum el Bouaghi', 'CH03': 'Basel-Landschaft', 'CH02': 'Ausser-Rhoden', 'VE08': 'Cojedes', 'USAK': 'Alaska', 'USAL': 'Alabama', 'USAA': 'Armed Forces Americas', 'USAE': 'Armed Forces Europe', 'USAZ': 'Arizona', 'USAS': 'American Samoa', 'USAR': 'Arkansas', 'USAP': 'Armed Forces Pacific', 'ES51': 'Andalucia', 'MU13': 'Flacq', 'MU12': 'Black River', 'MU15': 'Moka', 'MU14': 'Grand Port', 'MU17': 'Plaines Wilhems', 'MU16': 'Pamplemousses', 'MU19': 'Riviere du Rempart', 'MU18': 'Port Louis', 'TH13': 'Phichit', 'TH12': 'Phitsanulok', 'TH11': 'Kamphaeng Phet', 'TH10': 'Uttaradit', 'TH17': 'Nong Khai', 'TH16': 'Nakhon Sawan', 'TH15': 'Uthai Thani', 'TH14': 'Phetchabun', 'TH18': 'Loei', 'TN39': 'Manouba', 'TN38': 'Aiana', 'TN33': 'Sidi Bou Zid', 'TN32': 'Sfax', 'TN31': 'Kebili', 'TN37': 'Zaghouan', 'TN36': 'Tunis', 'TN35': 'Tozeur', 'TN34': 'Tataouine', 'TG25': 'Plateaux', 'TG24': 'Maritime', 'TG26': 'Savanes', 'TG23': 'Kara', 'TG22': 'Centrale', 'NG25': 'Anambra', 'YE24': 'Lahij', 'YE25': 'Ta', 'YE20': "Al Bayda'", 'YE21': 'Al Jawf', 'YE22': 'Hajjah', 'YE23': 'Ibb', 'TR58': 'Sivas', 'CZ52': 'Hlavni mesto Praha', 'MM08': 'Mandalay', 'MM09': 'Pegu', 'MM06': 'Kayah State', 'MM07': 'Magwe', 'MM04': 'Kachin State', 'MM05': 'Karan State', 'MM02': 'Chin State', 'MM03': 'Irrawaddy', 'MM01': 'Rakhine State', 'VE06': 'Bolivar', 'RS01': 'Kosovo', 'RS02': 'Vojvodina', 'SIC9': 'Store', 'SIC8': 'Starse', 'SIC5': 'Smarje pri Jelsah', 'SIC4': 'Slovenske Konjice', 'SIC7': 'Sostanj', 'SIC6': 'Smartno ob Paki', 'SIC1': 'Skofljica', 'HT15': 'Nippes', 'TR54': 'Sakarya', 'TO01': 'Ha', 'GBX7': 'Carmarthenshire', 'SE08': 'Jonkopings Lan', 'SE09': 'Kalmar Lan', 'AR24': 'Tucuman', 'AR23': 'Tierra del Fuego', 'AR22': 'Santiago del Estero', 'AR21': 'Santa Fe', 'AR20': 'Santa Cruz', 'SE02': 'Blekinge Lan', 'SE03': 'Gavleborgs Lan', 'SE05': 'Gotlands Lan', 'SE06': 'Hallands Lan', 'SE07': 'Jamtlands Lan', 'SI88': 'Osilnica', 'SI89': 'Pesnica', 'SI84': 'Nova Gorica', 'SI86': 'Odranci', 'SI87': 'Ormoz', 'SI80': 'Murska Sobota', 'SI81': 'Muta', 'SI82': 'Naklo', 'SI83': 'Nazarje', 'GBC6': 'Cornwall', 'GBC7': 'Coventry', 'GBC4': 'Camden', 'GBC5': 'Cheshire', 'GBC2': 'Calderdale', 'GBC3': 'Cambridgeshire', 'GBC1': 'Bury', 'MD58': 'Stinga Nistrului', 'MD59': 'Anenii Noi', 'GBC8': 'Croydon', 'GBC9': 'Cumbria', 'NR14': 'Yaren', 'NR13': 'Uaboe', 'NR12': 'Nibok', 'NR11': 'Meneng', 'NR10': 'Ijuw', 'EG19': "Bur Sa'id", 'EG18': 'Bani Suwayf', 'EG17': 'Asyut', 'EG16': 'Aswan', 'EG15': 'As Suways', 'EG14': 'Ash Sharqiyah', 'EG13': 'Al Wadi al Jadid', 'EG12': 'Al Qalyubiyah', 'EG11': 'Al Qahirah', 'EG10': 'Al Minya', 'ZA10': 'North-West', 'PH41': 'Mindoro Oriental', 'PH42': 'Misamis Occidental', 'PH43': 'Misamis Oriental', 'PH44': 'Mountain', 'PH45': 'Negros Occidental', 'PH46': 'Negros Oriental', 'PH47': 'Nueva Ecija', 'PH48': 'Nueva Vizcaya', 'PH49': 'Palawan', 'JO12': 'At Tafilah', 'JO13': 'Az Zarqa', 'JO10': 'Al Mafraq', 'JO11': 'Amman Governorate', 'JO16': 'Amman', 'JO14': 'Irbid', 'NG05': 'Lagos', 'US31': 'Nebraska', 'US30': 'Montana', 'US33': 'New Hampshire', 'US32': 'Nevada', 'US35': 'New Mexico', 'US34': 'New Jersey', 'US37': 'North Carolina', 'US36': 'New York', 'US39': 'Ohio', 'US38': 'North Dakota', 'DZ37': 'Annaba', 'DZ36': 'Ain Temouchent', 'DZ35': 'Ain Defla', 'DZ34': 'Adrar', 'DZ33': 'Tebessa', 'NG48': 'Ondo', 'DZ31': 'Skikda', 'DZ30': 'Sidi Bel Abbes', 'NG45': 'Abia', 'NG44': 'Yobe', 'NG47': 'Enugu', 'NG46': 'Bauchi', 'NG41': 'Kogi', 'NG40': 'Kebbi', 'DZ39': 'Bordj Bou Arreridj', 'DZ38': 'Bechar', 'CL01': 'Valparaiso', 'CL03': 'Antofagasta', 'CL05': 'Atacama', 'CL07': 'Coquimbo', 'AE05': 'Ras Al Khaimah', 'BO05': 'Oruro', 'BO04': 'La Paz', 'BO07': 'Potosi', 'BO06': 'Pando', 'BO01': 'Chuquisaca', 'TH28': 'Buriram', 'TH29': 'Surin', 'TH26': 'Chaiyaphum', 'TH27': 'Nakhon Ratchasima', 'TH24': 'Maha Sarakham', 'TH25': 'Roi Et', 'TH22': 'Khon Kaen', 'TH23': 'Kalasin', 'TH20': 'Sakon Nakhon', 'TH21': 'Nakhon Phanom', 'TN06': 'Jendouba', 'TN02': 'Kasserine', 'TN03': 'Kairouan', 'SIL3': 'Ruse', 'NA27': 'Namaland', 'MV37': 'Haa Dhaalu', 'MW04': 'Chitipa', 'MW05': 'Thyolo', 'MW06': 'Dedza', 'MW07': 'Dowa', 'MW02': 'Chikwawa', 'MW03': 'Chiradzulu', 'MW08': 'Karonga', 'MW09': 'Kasungu', 'TR82': 'Cankiri', 'TR83': 'Gaziantep', 'TR80': 'Sirnak', 'TR81': 'Adana', 'TR86': 'Ardahan', 'TR87': 'Bartin', 'TR84': 'Kars', 'TR85': 'Zonguldak', 'TR88': 'Igdir', 'TR89': 'Karabuk', 'SD44': 'Central Equatoria State', 'MM11': 'Shan State', 'MM10': 'Sagaing', 'MM13': 'Mon State', 'MM12': 'Tenasserim', 'MM14': 'Rangoon', 'MM17': 'Yangon', 'BF15': 'Bam', 'BF19': 'Boulkiemde', 'NG37': 'Edo', 'SL01': 'Eastern', 'SL03': 'Southern', 'SL02': 'Northern', 'SL04': 'Western Area', 'SID8': 'Velike Lasce', 'SID1': 'Sveti Jurij', 'SID2': 'Tolmin', 'SID3': 'Trbovlje', 'SID4': 'Trebnje', 'SID5': 'Trzic', 'SID6': 'Turnisce', 'SID7': 'Velenje', 'DZ51': 'Relizane', 'AR12': 'La Rioja', 'AR13': 'Mendoza', 'AR10': 'Jujuy', 'AR11': 'La Pampa', 'AR16': 'Rio Negro', 'AR17': 'Salta', 'LY61': 'Tarabulus', 'AR15': 'Neuquen', 'SE12': 'Kronobergs Lan', 'AR18': 'San Juan', 'AR19': 'San Luis', 'SE16': 'Ostergotlands Lan', 'SE15': 'Orebro Lan', 'SE14': 'Norrbottens Lan', 'SI99': 'Radece', 'SI98': 'Racam', 'SI97': 'Puconci', 'SI94': 'Postojna', 'SI92': 'Podcetrtek', 'SI91': 'Pivka', 'VN70': 'Quang Binh', 'GBB1': 'Bolton', 'GBB3': 'Bracknell Forest', 'GBB2': 'Bournemouth', 'GBB5': 'Brent', 'GBB4': 'Bradford', 'GBB7': 'Bristol', 'GBB6': 'Brighton and Hove', 'GBB9': 'Buckinghamshire', 'GBB8': 'Bromley', 'RU18': 'Evenk', 'RU19': 'Ingush', 'RU14': 'Chita', 'RU15': 'Chukot', 'RU16': 'Chuvashia', 'RU17': 'Dagestan', 'RU10': 'Bryansk', 'RU11': 'Buryat', 'RU12': 'Chechnya', 'RU13': 'Chelyabinsk', 'EG22': 'Matruh', 'EG23': 'Qina', 'EG20': 'Dumyat', 'EG21': 'Kafr ash Shaykh', 'EG26': "Janub Sina'", 'EG27': "Shamal Sina'", 'EG24': 'Suhaj', 'ZA03': 'Free State', 'ZA02': 'KwaZulu-Natal', 'ZA01': 'North-Western Province', 'PH50': 'Pampanga', 'PH57': 'North Cotabato', 'PH56': 'Maguindanao', 'PH55': 'Samar', 'PH54': 'Romblon', 'ZA09': 'Limpopo', 'ZA08': 'Northern Cape', 'JO09': 'Al Karak', 'JO07': 'Ma', 'JO02': "Al Balqa'", 'TZ22': 'Zanzibar North', 'RO28': 'Neamt', 'AZ38': 'Qabala', 'AZ39': 'Qax', 'AZ34': 'Naftalan', 'AZ35': 'Naxcivan', 'AZ36': 'Neftcala', 'AZ37': 'Oguz', 'AZ30': 'Lankaran', 'AZ31': 'Lerik', 'AZ32': 'Masalli', 'JP10': 'Gumma', 'DZ03': 'Batna', 'DZ01': 'Alger', 'DZ06': 'Medea', 'DZ07': 'Mostaganem', 'DZ04': 'Constantine', 'VC05': 'Saint Patrick', 'VC04': 'Saint George', 'NG32': 'Oyo', 'VC06': 'Grenadines', 'VC01': 'Charlotte', 'NG35': 'Adamawa', 'VC03': 'Saint David', 'VC02': 'Saint Andrew', 'USCT': 'Connecticut', 'IL06': 'Yerushalayim', 'USCA': 'California', 'USCO': 'Colorado', 'ZA07': 'Mpumalanga', 'ZA06': 'Gauteng', 'ZA05': 'Eastern Cape', 'TH39': 'Pathum Thani', 'TH38': 'Nonthaburi', 'TH31': 'Narathiwat', 'TH30': 'Sisaket', 'TH33': 'Sing Buri', 'TH32': 'Chai Nat', 'TH35': 'Ang Thong', 'TH34': 'Lop Buri', 'TH37': 'Saraburi', 'TH36': 'Phra Nakhon Si Ayutthaya', 'TN10': 'Qafsah', 'TN15': 'Al Mahdia', 'VN53': 'Ba Ria-Vung Tau', 'TN17': 'Bajah', 'TN16': 'Al Munastir', 'TN19': 'Nabeul', 'TN18': 'Bizerte', 'LC10': 'Vieux-Fort', 'LC11': 'Praslin', 'DK21': 'Syddanmark', 'DK20': 'Sjelland', 'MW17': 'Nkhata Bay', 'MW16': 'Ntcheu', 'MW15': 'Mzimba', 'MW13': 'Mchinji', 'MW12': 'Mangochi', 'MW11': 'Lilongwe', 'MW19': 'Nsanje', 'MW18': 'Nkhotakota', 'TR91': 'Osmaniye', 'TR90': 'Kilis', 'TR93': 'Duzce', 'TR92': 'Yalova', 'YE08': 'Al Hudaydah', 'YE02': 'Adan', 'YE03': 'Al Mahrah', 'YE01': 'Abyan', 'YE06': 'Al Ghaydah', 'YE04': 'Hadramawt', 'YE05': 'Shabwah', 'CZ78': 'Jihomoravsky kraj', 'CZ79': 'Jihocesky kraj', 'BF20': 'Ganzourgou', 'BF21': 'Gnagna', 'BF28': 'Kouritenga', 'GBQ1': 'Wirral', 'SIE9': 'Zavrc', 'SIE3': 'Vodice', 'SIE2': 'Vitanje', 'SIE1': 'Vipava', 'SIE7': 'Zagorje ob Savi', 'SIE6': 'Vuzenica', 'SIE5': 'Vrhnika', 'SE26': 'Stockholms Lan', 'SE27': 'Skane Lan', 'SE24': 'Vasternorrlands Lan', 'SE25': 'Vastmanlands Lan', 'SE22': 'Varmlands Lan', 'SE23': 'Vasterbottens Lan', 'SE21': 'Uppsala Lan', 'SE28': 'Vastra Gotaland', 'LY52': 'Awbari', 'LY53': 'Az Zawiyah', 'LY50': 'Al Khums', 'LY51': 'An Nuqat al Khams', 'AR09': 'Formosa', 'AR08': 'Entre Rios', 'LY54': 'Banghazi', 'LY55': 'Darnah', 'AR05': 'Cordoba', 'AR04': 'Chubut', 'AR07': 'Distrito Federal', 'AR06': 'Corrientes', 'AR01': 'Buenos Aires', 'AR03': 'Chaco', 'AR02': 'Catamarca', 'NE03': 'Dosso', 'NE02': 'Diffa', 'NE01': 'Agadez', 'CA08': 'Ontario', 'CA09': 'Prince Edward Island', 'LR22': 'River Gee', 'NE04': 'Maradi', 'CA04': 'New Brunswick', 'CA05': 'Newfoundland and Labrador', 'CA07': 'Nova Scotia', 'CA01': 'Alberta', 'CA02': 'British Columbia', 'CA03': 'Manitoba', 'GBD7': 'Dudley', 'GBM8': 'Southwark', 'GBM9': 'Staffordshire', 'MD78': 'Ialoveni', 'EE21': 'Vorumaa', 'GBM4': 'Southampton', 'GBM5': 'Southend-on-Sea', 'GBM6': 'South Gloucestershire', 'GBM7': 'South Tyneside', 'MD72': 'Dubasari', 'GBM1': 'Slough', 'GBM2': 'Solihull', 'GBM3': 'Somerset', 'RU09': 'Belgorod', 'RU08': 'Bashkortostan', 'RU07': "Astrakhan'", 'RU06': "Arkhangel'sk", 'RU05': 'Amur', 'RU04': 'Altaisky krai', 'RU03': 'Gorno-Altay', 'RU02': 'Aginsky Buryatsky AO', 'RU01': 'Adygeya', 'PH68': 'Quirino', 'PH69': 'Siquijor', 'PH66': 'Zamboanga del Sur', 'PH67': 'Northern Samar', 'PH64': 'Zambales', 'PH65': 'Zamboanga del Norte', 'PH62': 'Surigao del Sur', 'PH63': 'Tarlac', 'PH60': 'Sulu', 'PH61': 'Surigao del Norte', 'CO24': 'Risaralda', 'CO25': 'San Andres y Providencia', 'CO26': 'Santander', 'CO27': 'Sucre', 'CO20': 'Narino', 'CO21': 'Norte de Santander', 'AZ29': 'Lankaran', 'AZ28': 'Lacin', 'AZ27': 'Kurdamir', 'AZ26': 'Kalbacar', 'AZ25': 'Ismayilli', 'AZ24': 'Imisli', 'AZ23': 'Haciqabul', 'AZ22': 'Goycay', 'AZ21': 'Goranboy', 'AZ20': 'Ganca', 'NG29': 'Kano', 'NG28': 'Imo', 'DZ19': 'Biskra', 'DZ18': 'Bejaia', 'DZ15': 'Tlemcen', 'DZ14': 'Tizi Ouzou', 'NG21': 'Akwa Ibom', 'NG27': 'Borno', 'DZ10': 'Saida', 'DZ13': 'Tiaret', 'DZ12': 'Setif', 'NA05': 'Grootfontein', 'FI14': 'Eastern Finland', 'FI15': 'Western Finland', 'FI13': 'Southern Finland', 'VU10': 'Malakula', 'VU11': 'Paama', 'VU12': 'Pentecote', 'VU13': 'Sanma', 'VU14': 'Shepherd', 'VU15': 'Tafea', 'VU16': 'Malampa', 'VU17': 'Penama', 'NG57': 'Zamfara', 'HT13': 'Sud-Est', 'HT12': 'Sud', 'HT11': 'Ouest', 'HT10': 'Nord-Est', 'BA02': 'Republika Srpska', 'BA01': 'Federation of Bosnia and Herzegovina', 'SIC2': 'Slovenj Gradec', 'USLA': 'Louisiana', 'LC09': 'Soufriere', 'LC08': 'Micoud', 'LC03': 'Castries', 'LC02': 'Dauphin', 'LC01': 'Anse-la-Raye', 'LC07': 'Laborie', 'LC06': 'Gros-Islet', 'LC05': 'Dennery', 'LC04': 'Choiseul', 'ES57': 'Extremadura', 'MW23': 'Zomba', 'MW20': 'Ntchisi', 'MW21': 'Rumphi', 'ES53': 'Canarias', 'MW27': 'Likoma', 'MW24': 'Blantyre', 'MW25': 'Mwanza', 'MW28': 'Machinga', 'MW29': 'Mulanje', 'ES59': 'Pais Vasco', 'ES58': 'Galicia', 'YE15': 'Sa', 'YE14': "Ma'rib", 'YE16': 'San', 'YE11': 'Dhamar', 'YE10': 'Al Mahwit', 'MN19': 'Uvs', 'UG29': 'Bushenyi', 'BF34': 'Passore', 'BF36': 'Sanguie', 'BF33': 'Oudalan', 'MN13': 'Hovsgol', 'AL50': 'Tirane', 'AL51': 'Vlore', 'HR20': 'Zagrebacka', 'HR21': 'Grad Zagreb', 'LY49': 'Al Jabal al Akhdar', 'LY48': 'Al Fatih', 'LY45': 'Zlitan', 'LY47': 'Ajdabiya', 'LY41': 'Tarhunah', 'LY42': 'Tubruq', 'CA14': 'Nunavut', 'CA13': 'Northwest Territories', 'CA12': 'Yukon Territory', 'CA11': 'Saskatchewan', 'CA10': 'Quebec', 'FR99': 'Basse-Normandie', 'FR98': 'Auvergne', 'FR97': 'Aquitaine', 'MD69': 'Criuleni', 'MD68': 'Cimislia', 'GBL9': 'Sheffield', 'GBL8': 'Sefton', 'GBL7': 'Sandwell', 'GBL6': 'Shropshire', 'GBL5': 'Salford', 'GBL4': 'Rutland', 'GBL3': 'Rotherham', 'GBL2': 'Rochdale', 'GBL1': 'Richmond upon Thames', 'MD66': 'Calarasi', 'RU38': 'Krasnodar', 'RU39': 'Krasnoyarsk', 'SIF2': 'Ziri', 'SIF3': 'Zrece', 'SIF1': 'Zelezniki', 'RU32': 'Khanty-Mansiy', 'RU33': 'Kirov', 'RU30': 'Khabarovsk', 'RU31': 'Khakass', 'RU36': 'Koryak', 'RU37': 'Kostroma', 'RU34': 'Komi', 'RU35': 'Komi-Permyak', 'RW12': 'Kigali', 'RW13': 'Nord', 'RW11': 'Est', 'RW14': 'Ouest', 'RW15': 'Sud', 'BO08': 'Santa Cruz', 'MO01': 'Ilhas', 'PH71': 'Sultan Kudarat', 'PH70': 'South Cotabato', 'PH72': 'Tawitawi', 'CO37': 'Caldas', 'CO36': 'Boyaca', 'CO35': 'Bolivar', 'CO34': 'Distrito Especial', 'CO33': 'Cundinamarca', 'CO32': 'Casanare', 'CO31': 'Vichada', 'CO30': 'Vaupes', 'AZ12': 'Beylaqan', 'AZ13': 'Bilasuvar', 'AZ10': 'Balakan', 'AZ11': 'Barda', 'AZ16': 'Daskasan', 'AZ17': 'Davaci', 'AZ14': 'Cabrayil', 'AZ15': 'Calilabad', 'WS02': 'Aiga-i-le-Tai', 'WS03': 'Atua', 'WS06': 'Va', 'WS07': 'Gagaifomauga', 'WS04': 'Fa', 'WS05': 'Gaga', 'WS08': 'Palauli', 'WS09': 'Satupa', 'USMT': 'Montana', 'USMS': 'Mississippi', 'USMP': 'Northern Mariana Islands', 'USMO': 'Missouri', 'FI06': 'Lapland', 'FI01': 'Aland', 'USMH': 'Marshall Islands', 'USME': 'Maine', 'USMD': 'Maryland', 'USMA': 'Massachusetts', 'FI08': 'Oulu', 'PK07': 'Northern Areas', 'PK06': 'Azad Kashmir', 'PK05': 'Sindh', 'PK04': 'Punjab', 'RO38': 'Vaslui', 'RO39': 'Valcea', 'PK01': 'Federally Administered Tribal Areas', 'RO34': 'Suceava', 'RO35': 'Teleorman', 'RO36': 'Timis', 'RO37': 'Tulcea', 'RO30': 'Prahova', 'RO31': 'Salaj', 'RO32': 'Satu Mare', 'RO33': 'Sibiu', 'IR38': 'Qazvin', 'IR39': 'Qom', 'MD51': 'Gagauzia', 'IR34': 'Markazi', 'IR35': 'Mazandaran', 'IR36': 'Zanjan', 'IR37': 'Golestan', 'IR30': 'Khorasan', 'IR31': 'Yazd', 'IR32': 'Ardabil', 'IR33': 'East Azarbaijan', 'MD57': 'Chisinau', 'BB02': 'Saint Andrew', 'BB03': 'Saint George', 'NO20': 'Vestfold', 'BB06': 'Saint Joseph', 'BB07': 'Saint Lucy', 'BB08': 'Saint Michael', 'BB09': 'Saint Peter', 'IS20': 'Nordur-Mulasysla', 'IS21': 'Nordur-Tingeyjarsysla', 'BF59': 'Koulpelogo', 'ES60': 'Comunidad Valenciana', 'MK47': 'Konce', 'MW30': 'Phalombe', 'MK48': 'Kondovo', 'MK49': 'Konopiste', 'BF48': 'Bougouriba', 'BF49': 'Boulgou', 'SC22': 'Saint Louis', 'SC23': 'Takamaka', 'BF40': 'Soum', 'BF42': 'Tapoa', 'BF44': 'Zoundweogo', 'BF45': 'Bale', 'BF46': 'Banwa', 'BF47': 'Bazega', 'AL49': 'Shkoder', 'AL48': 'Lezhe', 'AL47': 'Kukes', 'AL46': 'Korce', 'AL45': 'Gjirokaster', 'AL44': 'Fier', 'AL43': 'Elbasan', 'AL42': 'Durres', 'AL41': 'Diber', 'AL40': 'Berat', 'LK10': 'Kandy', 'LK11': 'Kegalla', 'LK12': 'Kurunegala', 'LK14': 'Matale', 'LK15': 'Matara', 'LK16': 'Moneragala', 'LK17': 'Nuwara Eliya', 'LK18': 'Polonnaruwa', 'LK19': 'Puttalam', 'LV18': 'Limbazu', 'CF11': 'Ouaka', 'CF12': 'Ouham', 'CF13': 'Ouham-Pende', 'CF14': 'Cuvette-Ouest', 'CF15': 'Nana-Grebizi', 'CF16': 'Sangha-Mbaere', 'CF17': 'Ombella-Mpoko', 'CF18': 'Bangui', 'LY30': 'Murzuq', 'LY34': 'Sabha', 'LR01': 'Bong', 'LR06': 'Maryland', 'LR07': 'Monrovia', 'LR04': 'Grand Cape Mount', 'LR05': 'Lofa', 'LR09': 'Nimba', 'CANB': 'New Brunswick', 'RU25': 'Kaluga', 'RU24': 'Kalmyk', 'GBO8': 'Walsall', 'VN83': 'Phu Tho', 'RU21': 'Ivanovo', 'RU20': 'Irkutsk', 'RU23': 'Kaliningrad', 'RU22': 'Kabardin-Balkar', 'GBO2': 'Telford and Wrekin', 'GBO3': 'Thurrock', 'GBO1': 'Tameside', 'RU29': 'Kemerovo', 'RU28': 'Karelia', 'GBO4': 'Torbay', 'GBO5': 'Tower Hamlets', 'SIG4': 'Dobrova-Horjul-Polhov Gradec', 'SIG7': 'Domzale', 'CANL': 'Newfoundland', 'PH40': 'Mindoro Occidental', 'RW09': 'Kigali', 'ZA11': 'Western Cape', 'RW07': 'Kibungo', 'RW06': 'Gitarama', 'RW01': 'Butare', 'GA01': 'Estuaire', 'GA02': 'Haut-Ogooue', 'GA03': 'Moyen-Ogooue', 'GA04': 'Ngounie', 'GA05': 'Nyanga', 'GA06': 'Ogooue-Ivindo', 'GA07': 'Ogooue-Lolo', 'GA08': 'Ogooue-Maritime', 'GA09': 'Woleu-Ntem', 'TT08': 'Saint George', 'TT09': 'Saint Patrick', 'TT04': 'Nariva', 'TT05': 'Port-of-Spain', 'TT06': 'Saint Andrew', 'TT07': 'Saint David', 'TT01': 'Arima', 'TT02': 'Caroni', 'TT03': 'Mayaro', 'CANS': 'Nova Scotia', 'AZ05': 'Agstafa', 'AZ04': 'Agdas', 'AZ07': 'Ali Bayramli', 'AZ06': 'Agsu', 'AZ01': 'Abseron', 'AZ03': 'Agdam', 'AZ02': 'Agcabadi', 'AZ09': 'Baki', 'AZ08': 'Astara', 'CO02': 'Antioquia', 'CO03': 'Arauca', 'CO01': 'Amazonas', 'CO06': 'Boyaca Department', 'CO07': 'Caldas Department', 'CO04': 'Atlantico', 'CO05': 'Bolivar Department', 'CO08': 'Caqueta', 'CO09': 'Cauca', 'WS11': 'Vaisigano', 'WS10': 'Tuamasaga', 'GQ03': 'Annobon', 'GQ04': 'Bioko Norte', 'GQ05': 'Bioko Sur', 'GQ06': 'Centro Sur', 'GQ07': 'Kie-Ntem', 'GQ08': 'Litoral', 'GQ09': 'Wele-Nzas', 'TZ24': 'Rukwa', 'TZ25': 'Zanzibar Urban', 'TZ26': 'Arusha', 'TZ27': 'Manyara', 'TZ20': 'Pemba South', 'TZ21': 'Zanzibar Central', 'RO29': 'Olt', 'TZ23': 'Dar es Salaam', 'RO27': 'Mures', 'RO26': 'Mehedinti', 'RO25': 'Maramures', 'RO23': 'Iasi', 'RO22': 'Ialomita', 'RO21': 'Hunedoara', 'RO20': 'Harghita', 'IR29': 'Kerman', 'IR28': 'Esfahan', 'IR27': 'Zanjan', 'IR26': 'Tehran', 'IR25': 'Semnan', 'IR24': 'Markazi', 'IR23': 'Lorestan', 'IR22': 'Bushehr', 'IR21': 'Zanjan', 'LY56': 'Ghadamis', 'USNY': 'New York', 'USNV': 'Nevada', 'USNJ': 'New Jersey', 'USNH': 'New Hampshire', 'USNM': 'New Mexico', 'USNC': 'North Carolina', 'USND': 'North Dakota', 'USNE': 'Nebraska', 'VU18': 'Shefa', 'NO18': 'Troms', 'NO19': 'Vest-Agder', 'NO16': 'Sor-Trondelag', 'NO17': 'Telemark', 'NO14': 'Rogaland', 'NO15': 'Sogn og Fjordane', 'NO12': 'Oslo', 'NO13': 'Ostfold', 'NO10': 'Nord-Trondelag', 'NO11': 'Oppland', 'NG49': 'Plateau', 'MK44': 'Kisela Voda', 'MK45': 'Klecevce', 'MK46': 'Kocani', 'BF58': 'Kossi', 'MK40': 'Karbinci', 'MK41': 'Karpos', 'MK42': 'Kavadarci', 'MK43': 'Kicevo', 'BF53': 'Kadiogo', 'BF52': 'Ioba', 'BF51': 'Houet', 'BF50': 'Gourma', 'BF57': 'Kompienga', 'BF56': 'Komondjari', 'BF55': 'Komoe', 'BF54': 'Kenedougou', 'BY03': "Hrodzyenskaya Voblasts'", 'BY02': "Homyel'skaya Voblasts'", 'BY01': "Brestskaya Voblasts'", 'BY07': "Vitsyebskaya Voblasts'", 'BY06': "Mahilyowskaya Voblasts'", 'BY05': "Minskaya Voblasts'", 'BY04': 'Minsk', 'LK03': 'Badulla', 'LK02': 'Anuradhapura', 'LK01': 'Amparai', 'LK07': 'Hambantota', 'LK06': 'Galle', 'LK04': 'Batticaloa', 'LK09': 'Kalutara', 'CANT': 'Northwest Territories', 'CANU': 'Nunavut', 'PSGZ': 'Gaza', 'NG43': 'Taraba', 'NG42': 'Osun', 'CF03': 'Haute-Kotto', 'CF02': 'Basse-Kotto', 'CF01': 'Bamingui-Bangoran', 'CF07': 'Lobaye', 'CF06': 'Kemo', 'CF05': 'Haut-Mbomou', 'CF04': 'Mambere-Kadei', 'CF09': 'Nana-Mambere', 'CF08': 'Mbomou', 'KI01': 'Gilbert Islands', 'KI02': 'Line Islands', 'KI03': 'Phoenix Islands', 'HR08': 'Licko-Senjska', 'HR09': 'Medimurska', 'HR06': 'Koprivnicko-Krizevacka', 'HR07': 'Krapinsko-Zagorska', 'HR04': 'Istarska', 'HR05': 'Karlovacka', 'HR02': 'Brodsko-Posavska', 'HR03': 'Dubrovacko-Neretvanska', 'HR01': 'Bjelovarsko-Bilogorska', 'RU50': 'Nenets', 'LR19': 'Grand Gedeh', 'LR18': 'River Cess', 'VN92': 'Dien Bien', 'LR14': 'Montserrado', 'LR17': 'Margibi', 'LR11': 'Grand Bassa', 'LR10': 'Sino', 'LR13': 'Maryland', 'LR12': 'Grand Cape Mount', 'GBN5': 'Suffolk', 'GBN4': 'Stoke-on-Trent', 'GBN7': 'Surrey', 'GBN6': 'Sunderland', 'GBN1': 'St. Helens', 'GBN3': 'Stockton-on-Tees', 'GBN2': 'Stockport', 'GBN9': 'Swindon', 'GBN8': 'Sutton', 'VN93': 'Hau Giang', 'RU51': 'Nizhegorod', 'VN91': 'Dak Nong', 'VN90': 'Lao Cai', 'RU54': 'Omsk', 'RU55': 'Orenburg', 'RU56': 'Orel', 'RU57': 'Penza', 'RU58': "Perm'", 'RU59': "Primor'ye", 'SIH4': 'Jesenice', 'SIH6': 'Kamnik', 'SIH7': 'Kocevje', 'SH01': 'Ascension', 'SH03': 'Tristan da Cunha', 'SH02': 'Saint Helena', 'LI22': 'River Gee', 'LI21': 'Gbarpolu', 'TT12': 'Victoria', 'TT11': 'Tobago', 'TT10': 'San Fernando', 'AZ70': 'Zaqatala', 'AZ71': 'Zardab', 'BS30': 'Kemps Bay', 'BS31': 'Marsh Harbour', 'BS32': 'Nichollstown and Berry Islands', 'BS33': 'Rock Sound', 'CO19': 'Meta', 'DZ43': 'El Oued', 'DZ40': 'Boumerdes', 'DZ41': 'Chlef', 'CO15': 'Guainia', 'CO14': 'Guaviare', 'CO17': 'La Guajira', 'CO16': 'Huila', 'CO11': 'Choco', 'CO10': 'Cesar', 'DZ48': 'Mila', 'DZ49': 'Naama', 'USOH': 'Ohio', 'USOK': 'Oklahoma', 'RO18': 'Galati', 'RO19': 'Gorj', 'USOR': 'Oregon', 'RO12': 'Caras-Severin', 'RO13': 'Cluj', 'RO10': 'Bucuresti', 'RO11': 'Buzau', 'RO16': 'Dambovita', 'RO17': 'Dolj', 'RO14': 'Constanta', 'RO15': 'Covasna', 'IR18': 'Semnan Province', 'IR19': 'Markazi', 'IR12': 'Kerman', 'IR13': 'Bakhtaran', 'IR10': 'Ilam', 'IR11': 'Hormozgan', 'IR16': 'Kordestan', 'IR17': 'Mazandaran', 'IR15': 'Khuzestan', 'NA14': 'Outjo', 'NA15': 'Owambo', 'NA16': 'Rehoboth', 'NA17': 'Swakopmund', 'NA10': 'Maltahohe', 'NA11': 'Okahandja', 'NA12': 'Omaruru', 'NA13': 'Otjiwarongo', 'NA18': 'Tsumeb', 'VE09': 'Delta Amacuro', 'BH08': 'Al Mintaqah al Gharbiyah', 'BH09': 'Mintaqat Juzur Hawar', 'BH02': 'Al Manamah', 'BH01': 'Al Hadd', 'BH06': 'Sitrah', 'BH05': 'Jidd Hafs', 'BD86': 'Sylhet', 'BD84': 'Chittagong', 'BD85': 'Barisal', 'BD82': 'Khulna', 'BD83': 'Rajshahi', 'BD81': 'Dhaka', 'AF26': 'Takhar', 'NO09': 'Nordland', 'NO08': 'More og Romsdal', 'VU09': 'Epi', 'VU08': 'Efate', 'NO01': 'Akershus', 'VE07': 'Carabobo', 'NO02': 'Aust-Agder', 'NO05': 'Finnmark', 'NO04': 'Buskerud', 'NO07': 'Hordaland', 'NO06': 'Hedmark', 'KN13': 'Saint Thomas Middle Island', 'AF29': 'Paktika', 'AF28': 'Zabol', 'ES07': 'Islas Baleares', 'SY04': 'Ar Raqqah', 'SY05': "As Suwayda'", 'SY06': 'Dar', 'SY07': 'Dayr az Zawr', 'SY01': 'Al Hasakah', 'SY02': 'Al Ladhiqiyah', 'SY03': 'Al Qunaytirah', 'SY08': 'Rif Dimashq', 'SY09': 'Halab', 'BF66': 'Nayala', 'BF67': 'Noumbiel', 'BF64': 'Namentenga', 'BF65': 'Naouri', 'BF62': 'Loroum', 'BF63': 'Mouhoun', 'BF60': 'Kourweogo', 'BF61': 'Leraba', 'BF68': 'Oubritenga', 'BF69': 'Poni', 'MK57': 'Kumanovo', 'MK56': 'Kukurecani', 'MK55': 'Kuklis', 'MK54': 'Krusevo', 'MK53': 'Krivogastani', 'MK52': 'Kriva Palanka', 'MK51': 'Kratovo', 'SC09': 'Bel Air', 'SC06': 'Baie Lazare', 'SC07': 'Baie Sainte Anne', 'SC04': 'Anse Louis', 'SC05': 'Anse Royale', 'SC02': 'Anse Boileau', 'SC03': 'Anse Etoile', 'MK59': 'Lipkovo', 'MK58': 'Labunista', 'LK36': 'Western', 'LK34': 'Southern', 'LK35': 'Uva', 'LK32': 'North Western', 'LK33': 'Sabaragamuwa', 'LK30': 'North Central', 'LK31': 'Northern', 'AM11': 'Yerevan', 'LY13': "Ash Shati'", 'HR19': 'Zadarska', 'HR18': 'Vukovarsko-Srijemska', 'HR11': 'Pozesko-Slavonska', 'HR10': 'Osjecko-Baranjska', 'HR13': 'Sibensko-Kninska', 'HR12': 'Primorsko-Goranska', 'HR15': 'Splitsko-Dalmatinska', 'HR14': 'Sisacko-Moslavacka', 'HR17': 'Viroviticko-Podravska', 'HR16': 'Varazdinska', 'GBI1': 'Luton', 'GBI2': 'Manchester', 'GBI3': 'Medway', 'GBI4': 'Merton', 'GBI5': 'Middlesbrough', 'GBI6': 'Milton Keynes', 'GBI7': 'Newcastle upon Tyne', 'GBI8': 'Newham', 'GBI9': 'Norfolk', 'RU43': 'Lipetsk', 'ID10': 'Yogyakarta', 'RU41': 'Kursk', 'RU40': 'Kurgan', 'RU47': 'Moskva', 'RU46': 'Mordovia', 'RU45': 'Mariy-El', 'RU44': 'Magadan', 'RU49': 'Murmansk', 'RU48': 'Moscow City', 'ID12': 'Kalimantan Selatan', 'ID13': 'Kalimantan Tengah', 'SII9': 'Luce', 'SII7': 'Loska Dolina', 'SII6': 'Ljutomer', 'SII5': 'Litija', 'SII3': 'Lenart', 'SII2': 'Kuzma', 'CAON': 'Ontario', 'LI10': 'Triesenberg', 'LI11': 'Vaduz', 'DZ46': 'Illizi', 'DZ47': 'Khenchela', 'DZ44': 'El Tarf', 'DZ45': 'Ghardaia', 'CD12': 'Sud-Kivu', 'CD10': 'Maniema', 'CD11': 'Nord-Kivu', 'DZ42': 'El Bayadh', 'BS35': 'San Salvador and Rum Cay', 'AZ63': 'Xizi', 'SV10': 'San Salvador', 'AZ61': 'Xankandi', 'AZ60': 'Xacmaz', 'AZ67': 'Yevlax', 'AZ66': 'Yardimli', 'AZ65': 'Xocavand', 'AZ64': 'Xocali', 'AZ69': 'Zangilan', 'AZ68': 'Yevlax', 'SV12': 'San Vicente', 'BS23': 'New Providence', 'BS22': 'Harbour Island', 'DZ53': 'Tamanghasset', 'DZ52': 'Souk Ahras', 'BS27': "Governor's Harbour", 'BS26': 'Fresh Creek', 'BS25': 'Freeport', 'BS24': 'Acklins and Crooked Islands', 'BS29': 'High Rock', 'BS28': 'Green Turtle Cay', 'CO12': 'Cordoba', 'BO03': 'El Beni', 'GBU8': 'Edinburgh', 'BO02': 'Cochabamba', 'GBU9': 'Falkirk', 'RO05': 'Bihor', 'RO04': 'Bacau', 'RO07': 'Botosani', 'MY09': 'Pulau Pinang', 'RO01': 'Alba', 'RO03': 'Arges', 'RO02': 'Arad', 'MY02': 'Kedah', 'MY03': 'Kelantan', 'MY01': 'Johor', 'MY06': 'Pahang', 'MY07': 'Perak', 'MY04': 'Melaka', 'MY05': 'Negeri Sembilan', 'TZ02': 'Pwani', 'TZ03': 'Dodoma', 'TZ06': 'Kilimanjaro', 'TZ07': 'Lindi', 'TZ04': 'Iringa', 'TZ05': 'Kigoma', 'TZ08': 'Mara', 'TZ09': 'Mbeya', 'BH15': 'Al Muharraq', 'NA06': 'Kaokoland', 'AT09': 'Wien', 'NA04': 'Gobabis', 'BH11': 'Al Mintaqah al Wusta', 'BH10': 'Al Mintaqah ash Shamaliyah', 'NA01': 'Bethanien', 'BH12': 'Madinat', 'AT03': 'Niederosterreich', 'AT02': 'Karnten', 'AT01': 'Burgenland', 'BH19': 'Al Wusta', 'BH18': 'Ash Shamaliyah', 'AT05': 'Salzburg', 'NA08': 'Keetmanshoop', 'GW06': 'Cacheu', 'GW07': 'Tombali', 'GW04': 'Oio', 'GW05': 'Bolama', 'GR50': 'Khios', 'GR51': 'Lesvos', 'GW01': 'Bafata', 'USHI': 'Hawaii', 'IR05': 'Kohkiluyeh va Buyer Ahmadi', 'IR04': 'Sistan va Baluchestan', 'IR07': 'Fars', 'IR01': 'Azarbayjan-e Bakhtari', 'IR03': 'Chahar Mahall va Bakhtiari', 'IR09': 'Hamadan', 'IR08': 'Gilan', 'TR28': 'Giresun', 'TR24': 'Erzincan', 'TR25': 'Erzurum', 'TR26': 'Eskisehir', 'TR20': 'Denizli', 'TR21': 'Diyarbakir', 'TR22': 'Edirne', 'TR23': 'Elazig', 'SI47': 'Kobilje', 'SY14': 'Tartus', 'SY13': 'Dimashq', 'SY12': 'Idlib', 'SY11': 'Hims', 'SY10': 'Hamah', 'ID41': 'Sulawesi Barat', 'ID40': 'Kepulauan Riau', 'BF71': 'Seno', 'BF70': 'Sanmatenga', 'BF73': 'Sourou', 'BF72': 'Sissili', 'BF75': 'Yagha', 'BF74': 'Tuy', 'BF77': 'Ziro', 'BF76': 'Yatenga', 'DO14': 'Maria Trinidad Sanchez', 'DO15': 'Monte Cristi', 'DO16': 'Pedernales', 'DO17': 'Peravia', 'DO10': 'La Altagracia', 'DO11': 'Elias Pina', 'DO12': 'La Romana', 'MK62': 'Makedonska Kamenica', 'MK63': 'Makedonski Brod', 'MK60': 'Lozovo', 'MK61': 'Lukovo', 'MK66': 'Miravci', 'MK67': 'Mogila', 'MK64': 'Mavrovi Anovi', 'MK65': 'Meseista', 'SC11': 'Cascade', 'SC10': 'Bel Ombre', 'MK68': 'Murtino', 'MK69': 'Negotino', 'SC15': 'La Digue', 'SC14': "Grand' Anse", 'SC17': 'Mont Buxton', 'SC16': 'La Riviere Anglaise', 'NI18': 'Region Autonoma Atlantico Sur', 'NI14': 'Rio San Juan', 'NI15': 'Rivas', 'NI16': 'Zelaya', 'NI17': 'Autonoma Atlantico Norte', 'NI10': 'Managua', 'NI11': 'Masaya', 'NI12': 'Matagalpa', 'NI13': 'Nueva Segovia', 'LK29': 'Central', 'LK28': 'Vavuniya', 'LK21': 'Trincomalee', 'LK20': 'Ratnapura', 'LK23': 'Colombo', 'LK25': 'Jaffna', 'LK24': 'Gampaha', 'LK27': 'Mullaittivu', 'LK26': 'Mannar', 'CM04': 'Est', 'CM05': 'Littoral', 'LY03': 'Al Aziziyah', 'LY05': 'Al Jufrah', 'LY08': 'Al Kufrah', 'SO05': 'Galguduud', 'GBH3': 'Leeds', 'GBH2': 'Lancashire', 'GBH1': 'Lambeth', 'GBH7': 'Lincolnshire', 'GBH6': 'Lewisham', 'GBH5': 'Leicestershire', 'GBH4': 'Leicester', 'GBH9': 'London', 'GBH8': 'Liverpool', 'RU78': "Tyumen'", 'RU79': 'Tuva', 'RU76': 'Tula', 'RU77': "Tver'", 'RU74': 'Taymyr', 'RU75': 'Tomsk', 'RU72': 'Tambovskaya oblast', 'RU73': 'Tatarstan', 'RU70': "Stavropol'", 'RU71': 'Sverdlovsk', 'SIJ9': 'Piran', 'SIJ2': 'Maribor', 'SIJ1': 'Majsperk', 'SIJ7': 'Novo Mesto', 'SIJ5': 'Miren-Kostanjevica', 'LI01': 'Balzers', 'AE03': 'Dubai', 'LI03': 'Gamprin', 'LI02': 'Eschen', 'LI05': 'Planken', 'LI04': 'Mauren', 'LI07': 'Schaan', 'LI06': 'Ruggell', 'LI09': 'Triesen', 'LI08': 'Schellenberg', 'KG02': 'Chuy', 'KG03': 'Jalal-Abad', 'UG28': 'Bundibugyo', 'CD02': 'Equateur', 'KG06': 'Talas', 'KG07': 'Ysyk-Kol', 'KG04': 'Naryn', 'KG05': 'Osh', 'CD09': 'Orientale', 'CD08': 'Bas-Congo', 'KG08': 'Osh', 'KG09': 'Batken', 'UG26': 'Apac', 'AZ58': 'Tovuz', 'AZ59': 'Ucar', 'AZ56': 'Susa', 'AZ57': 'Tartar', 'AZ54': 'Sumqayit', 'AZ55': 'Susa', 'AZ52': 'Samux', 'AZ53': 'Siyazan', 'AZ50': 'Samaxi', 'AZ51': 'Samkir', 'MC03': 'Monte-Carlo', 'BS18': 'Ragged Island', 'BS16': 'Mayaguana', 'BS15': 'Long Island', 'BS13': 'Inagua', 'BS10': 'Exuma', 'KY07': 'West End', 'MO02': 'Macau', 'MY15': 'Labuan', 'MY14': 'Kuala Lumpur', 'MY17': 'Putrajaya', 'MY16': 'Sabah', 'MY11': 'Sarawak', 'MY13': 'Terengganu', 'MY12': 'Selangor', 'USIA': 'Iowa', 'USID': 'Idaho', 'USIN': 'Indiana', 'USIL': 'Illinois', 'PH53': 'Rizal', 'TZ19': 'Kagera', 'TZ18': 'Tanga', 'TZ15': 'Shinyanga', 'TZ14': 'Ruvuma', 'TZ17': 'Tabora', 'TZ16': 'Singida', 'TZ11': 'Mtwara', 'TZ10': 'Morogoro', 'TZ13': 'Pemba North', 'TZ12': 'Mwanza', 'NA32': 'Kunene', 'NA33': 'Ohangwena', 'NA30': 'Hardap', 'NA31': 'Karas', 'NA36': 'Omusati', 'NA37': 'Oshana', 'NA34': 'Okavango', 'NA35': 'Omaheke', 'LA17': 'Louangphrabang', 'NA38': 'Oshikoto', 'NA39': 'Otjozondjupa', 'LA13': 'Xaignabouri', 'LA10': 'Savannakhet', 'LA11': 'Vientiane', 'BJ08': 'Atakora', 'BJ09': 'Atlanyique', 'BJ07': 'Alibori', 'GW11': 'Bissau', 'GW10': 'Gabu', 'GR45': 'Iraklion', 'GW12': 'Biombo', 'GR43': 'Khania', 'GR42': 'Lakonia', 'GR41': 'Arkadhia', 'GR40': 'Messinia', 'GR49': 'Kikladhes', 'GR48': 'Samos', 'USVI': 'Virgin Islands', 'CO18': 'Magdalena Department', 'ES27': 'La Rioja', 'ES29': 'Madrid', 'TR39': 'Kirklareli', 'TR38': 'Kayseri', 'TR37': 'Kastamonu', 'TR35': 'Izmir', 'TR34': 'Istanbul', 'TR33': 'Isparta', 'TR32': 'Mersin', 'TR31': 'Hatay', 'PL81': 'Podlaskie', 'PL83': 'Slaskie', 'NZE9': 'Canterbury', 'NZE8': 'Bay of Plenty', 'NZE7': 'Auckland', 'DO09': 'Independencia', 'DO08': 'Espaillat', 'TH56': 'Phetchaburi', 'DO06': 'Duarte', 'DO05': 'Distrito Nacional', 'DO04': 'Dajabon', 'DO03': 'Barahona', 'DO02': 'Baoruco', 'DO01': 'Azua', 'MK79': 'Petrovec', 'MK78': 'Pehcevo', 'MK75': 'Orasac', 'MK74': 'Ohrid', 'MK77': 'Oslomej', 'MK76': 'Orizari', 'MK71': 'Novaci', 'MK70': 'Negotino-Polosko', 'MK73': 'Oblesevo', 'MK72': 'Novo Selo', 'RU52': 'Novgorod', 'NI09': 'Madriz', 'NI08': 'Leon', 'NI07': 'Jinotega', 'NI06': 'Granada', 'NI05': 'Esteli', 'NI04': 'Chontales', 'NI03': 'Chinandega', 'NI02': 'Carazo', 'NI01': 'Boaco', 'RU53': 'Novosibirsk', 'IQ03': 'Al Muthanna', 'HU41': 'Salgotarjan', 'HU40': 'Zalaegerszeg', 'HU43': 'Erd', 'HU42': 'Szekszard', 'GBK8': 'Redbridge', 'GBK9': 'Redcar and Cleveland', 'GBK6': 'Portsmouth', 'GBK7': 'Reading', 'GBK4': 'Plymouth', 'GBK5': 'Poole', 'GBK2': 'Oxfordshire', 'GBK3': 'Peterborough', 'GBK1': 'Oldham', 'RU69': 'Smolensk', 'RU68': 'North Ossetia', 'SIK7': 'Ptuj', 'RU61': 'Rostov', 'RU60': 'Pskov', 'RU63': 'Sakha', 'RU62': "Ryazan'", 'RU65': 'Samara', 'RU64': 'Sakhalin', 'RU67': 'Saratov', 'RU66': 'Saint Petersburg City', 'SA08': 'Al Qasim', 'SA09': 'Al Qurayyat', 'SA05': 'Al Madinah', 'SA06': 'Ash Sharqiyah', 'SA02': 'Al Bahah', 'SA03': 'Al Jawf', 'SO20': 'Woqooyi Galbeed', 'SO21': 'Awdal', 'SO22': 'Sool', 'CAAB': 'Alberta', 'UG37': 'Kampala', 'UG36': 'Kalangala', 'UG31': 'Hoima', 'UG30': 'Gulu', 'UG33': 'Jinja', 'UG39': 'Kapchorwa', 'UG38': 'Kamuli', 'AZ49': 'Salyan', 'AZ48': 'Saki', 'AZ41': 'Qobustan', 'AZ40': 'Qazax', 'AZ43': 'Qubadli', 'AZ42': 'Quba', 'AZ45': 'Saatli', 'AZ44': 'Qusar', 'AZ47': 'Saki', 'AZ46': 'Sabirabad', 'BS05': 'Bimini', 'BS06': 'Cat Island', 'MA46': 'Fes-Boulemane', 'MA47': 'Marrakech-Tensift-Al Haouz', 'MA45': 'Grand Casablanca', 'MA48': 'Meknes-Tafilalet', 'MA49': 'Rabat-Sale-Zemmour-Zaer', 'VU07': 'Torba', 'VU06': 'Aoba', 'VU05': 'Ambrym', 'LA09': 'Saravan', 'LA08': 'Phongsali', 'NA29': 'Erongo', 'NA28': 'Caprivi', 'LA01': 'Attapu', 'NA24': 'Hereroland Wes', 'LA03': 'Houaphan', 'NA26': 'Mariental', 'LA05': 'Louang Namtha', 'LA04': 'Khammouan', 'LA07': 'Oudomxai', 'NA22': 'Damaraland', 'MR12': 'Inchiri', 'MR10': 'Guidimaka', 'MR11': 'Tiris Zemmour', 'BJ18': 'Zou', 'BJ13': 'Donga', 'BJ12': 'Kouffo', 'BJ11': 'Collines', 'BJ10': 'Borgou', 'BJ17': 'Plateau', 'BJ16': 'Oueme', 'BJ15': 'Mono', 'BJ14': 'Littoral', 'CU08': 'Cienfuegos', 'CU09': 'Granma', 'CU01': 'Pinar del Rio', 'CU02': 'Ciudad de la Habana', 'CU03': 'Matanzas', 'CU04': 'Isla de la Juventud', 'CU05': 'Camaguey', 'CU07': 'Ciego de Avila', 'GE04': 'Ajaria', 'GE05': 'Akhalgoris Raioni', 'GE06': "Akhalk'alak'is Raioni", 'GE07': "Akhalts'ikhis Raioni", 'GE01': 'Abashis Raioni', 'GE02': 'Abkhazia', 'GE03': 'Adigenis Raioni', 'GE08': 'Akhmetis Raioni', 'GE09': 'Ambrolauris Raioni', 'ES31': 'Murcia', 'ES32': 'Navarra', 'ES34': 'Asturias', 'ES39': 'Cantabria', 'TR08': 'Artvin', 'TR09': 'Aydin', 'TR02': 'Adiyaman', 'TR03': 'Afyonkarahisar', 'TR07': 'Antalya', 'TR04': 'Agri', 'TR05': 'Amasya', 'USMI': 'Michigan', 'DO32': 'Monte Plata', 'DO33': 'San Cristobal', 'DO30': 'La Vega', 'DO31': 'Monsenor Nouel', 'DO36': 'San Jose de Ocoa', 'DO37': 'Santo Domingo', 'DO34': 'Distrito Nacional', 'DO35': 'Peravia', 'MK08': 'Bogdanci', 'MK09': 'Bogomila', 'MK01': 'Aracinovo', 'MK02': 'Bac', 'MK03': 'Belcista', 'MK04': 'Berovo', 'MK05': 'Bistrica', 'MK06': 'Bitola', 'MK07': 'Blatec', 'CABC': 'British Columbia', 'RU27': 'Karachay-Cherkess', 'NZF6': 'Northland', 'NZF7': 'Otago', 'NZF4': 'Marlborough', 'NZF5': 'Nelson', 'NZF2': "Hawke's Bay", 'NZF3': 'Manawatu-Wanganui', 'NZF1': 'Gisborne', 'VN46': 'Dong Thap', 'NZF8': 'Southland', 'NZF9': 'Taranaki', 'HU38': 'Tatabanya', 'HU39': 'Veszprem', 'AR14': 'Misiones', 'HU30': 'Kaposvar', 'HU31': 'Kecskemet', 'HU32': 'Nagykanizsa', 'HU33': 'Nyiregyhaza', 'HU34': 'Sopron', 'HU35': 'Szekesfehervar', 'HU36': 'Szolnok', 'HU37': 'Szombathely', 'GBJ9': 'Nottinghamshire', 'GBJ8': 'Nottingham', 'GBJ1': 'Northamptonshire', 'GBJ3': 'North Lincolnshire', 'GBJ2': 'North East Lincolnshire', 'GBJ5': 'North Tyneside', 'GBJ4': 'North Somerset', 'GBJ7': 'North Yorkshire', 'GBJ6': 'Northumberland', 'SIL1': 'Ribnica', 'DM10': 'Saint Paul', 'DM11': 'Saint Peter', 'SIL7': 'Sentjur pri Celju', 'SIL8': 'Slovenska Bistrica', 'SA19': 'Tabuk', 'SA17': 'Jizan', 'SA16': 'Najran', 'SA15': 'Al Hudud ash Shamaliyah', 'SA14': 'Makkah', 'SA13': "Ha'il", 'SD40': 'Al Wahadah State', 'SA10': 'Ar Riyad', 'UG40': 'Kasese', 'UG41': 'Kibale', 'UG42': 'Kiboga', 'UG43': 'Kisoro', 'UG45': 'Kotido', 'UG46': 'Kumi', 'UG47': 'Lira', 'CL12': 'Region Metropolitana', 'MA51': 'Doukkala-Abda', 'MA50': 'Chaouia-Ouardigha', 'MA53': 'Guelmim-Es Smara', 'MA52': 'Gharb-Chrarda-Beni Hssen', 'MA55': 'Souss-Massa-Dr', 'MA54': 'Oriental', 'MA57': 'Tanger-Tetouan', 'MA56': 'Tadla-Azilal', 'MA59': 'La', 'MA58': 'Taza-Al Hoceima-Taounate', 'CL13': 'Tarapaca', 'USKY': 'Kentucky', 'USKS': 'Kansas', 'MR09': 'Tagant', 'MR08': 'Dakhlet Nouadhibou', 'MR01': 'Hodh Ech Chargui', 'MR03': 'Assaba', 'MR02': 'Hodh El Gharbi', 'MR05': 'Brakna', 'MR04': 'Gorgol', 'MR07': 'Adrar', 'MR06': 'Trarza', 'BB11': 'Saint Thomas', 'BB10': 'Saint Philip', 'CU13': 'Las Tunas', 'CU12': 'Holguin', 'CU11': 'La Habana', 'CU10': 'Guantanamo', 'CU16': 'Villa Clara', 'CU15': 'Santiago de Cuba', 'CU14': 'Sancti Spiritus', 'GE17': "Dedop'listsqaros Raioni", 'GE16': "Ch'okhatauris Raioni", 'GE15': "Ch'khorotsqus Raioni", 'GE14': "Chiat'ura", 'GE13': 'Borjomis Raioni', 'GE12': 'Bolnisis Raioni', 'GE11': "Baghdat'is Raioni", 'GE10': 'Aspindzis Raioni', 'GE19': "Dushet'is Raioni", 'GE18': 'Dmanisis Raioni', 'TR15': 'Burdur', 'TR14': 'Bolu', 'TR17': 'Canakkale', 'TR16': 'Bursa', 'TR11': 'Bilecik', 'TR10': 'Balikesir', 'TR13': 'Bitlis', 'TR12': 'Bingol', 'TR19': 'Corum', 'BE10': 'Brabant Wallon', 'BE11': 'Brussels Hoofdstedelijk Gewest', 'BE12': 'Vlaams-Brabant', 'SC08': 'Beau Vallon', 'USTN': 'Tennessee', 'MK50': 'Kosel', 'DO25': 'Santiago', 'DO24': 'San Pedro De Macoris', 'DO27': 'Valverde', 'DO26': 'Santiago Rodriguez', 'DO21': 'Sanchez Ramirez', 'DO20': 'Samana', 'DO23': 'San Juan', 'DO29': 'Hato Mayor', 'DO28': 'El Seibo', 'SC01': 'Anse aux Pins', 'MK19': 'Cesinovo', 'MK18': 'Centar Zupa', 'MK13': 'Cair', 'MK12': 'Brvenica', 'MK11': 'Bosilovo', 'MK10': 'Bogovinje', 'MK17': 'Centar', 'MK16': 'Cegrane', 'MK15': 'Caska', 'MK14': 'Capari', 'BG33': 'Mikhaylovgrad', 'BG38': 'Blagoevgrad', 'BG39': 'Burgas', 'NZG1': 'Waikato', 'ID11': 'Kalimantan Barat', 'NZG3': 'West Coast', 'NZG2': 'Wellington', 'ID14': 'Kalimantan Timur', 'ID15': 'Lampung', 'ID16': 'Maluku', 'ID17': 'Nusa Tenggara Barat', 'ID18': 'Nusa Tenggara Timur', 'ID19': 'Riau', 'GBU4': 'East Ayrshire', 'SV11': 'Santa Ana', 'HU29': 'Hodmezovasarhely', 'SV13': 'Sonsonate', 'SV14': 'Usulutan', 'GBU1': 'Clackmannanshire', 'GBU2': 'Dumfries and Galloway', 'GBU3': 'Dundee City', 'HU23': 'Veszprem', 'HU22': 'Vas', 'HU21': 'Tolna', 'HU20': 'Jasz-Nagykun-Szolnok', 'HU27': 'Dunaujvaros', 'HU26': 'Bekescsaba', 'HU25': 'Gyor', 'HU24': 'Zala', 'MKC4': 'Zitose', 'DM09': 'Saint Patrick', 'DM08': 'Saint Mark', 'DM05': 'Saint John', 'DM04': 'Saint George', 'DM07': 'Saint Luke', 'DM06': 'Saint Joseph', 'DM03': 'Saint David', 'DM02': 'Saint Andrew', 'SD30': 'Ash Shamaliyah', 'SD31': 'Ash Sharqiyah', 'SA20': 'Al Jawf', 'SD33': 'Darfur', 'SD34': 'Kurdufan', 'SD35': 'Upper Nile', 'SO02': 'Banaadir', 'SO03': 'Bari', 'SO01': 'Bakool', 'SO06': 'Gedo', 'SO07': 'Hiiraan', 'SO04': 'Bay', 'CM07': 'Nord-Ouest', 'CM08': 'Ouest', 'CM09': 'Sud-Ouest', 'SO08': 'Jubbada Dhexe', 'SO09': 'Jubbada Hoose', 'PH51': 'Pangasinan', 'LV28': 'Talsu', 'LV29': 'Tukuma', 'LV20': 'Madonas', 'LV21': 'Ogres', 'LV22': 'Preilu', 'LV23': 'Rezekne', 'LV24': 'Rezeknes', 'LV25': 'Riga', 'LV26': 'Rigas', 'LV27': 'Saldus', 'MC01': 'La Condamine', 'MC02': 'Monaco', 'IR42': 'Khorasan-e Razavi', 'UG56': 'Mubende', 'UG59': 'Ntungamo', 'UG58': 'Nebbi', 'PH59': 'Southern Leyte', 'PH58': 'Sorsogon', 'PG18': 'Sandaun', 'PG19': 'Enga', 'PG10': 'East New Britain', 'PG11': 'East Sepik', 'PG12': 'Madang', 'PG13': 'Manus', 'PG14': 'Morobe', 'PG15': 'New Ireland', 'PG16': 'Western Highlands', 'PG17': 'West New Britain', 'RO41': 'Calarasi', 'RO40': 'Vrancea', 'RO43': 'Ilfov', 'RO42': 'Giurgiu', 'GR18': 'Thesprotia', 'GR19': 'Preveza', 'GR10': 'Grevena', 'GR11': 'Kozani', 'GR12': 'Imathia', 'GR13': 'Thessaloniki', 'GR14': 'Kavala', 'GR15': 'Khalkidhiki', 'GR16': 'Pieria', 'GR17': 'Ioannina', 'USTX': 'Texas', 'GE22': 'Goris Raioni', 'GE23': 'Gurjaanis Raioni', 'GE20': 'Gardabanis Raioni', 'GE21': 'Gori', 'GE26': 'Kaspis Raioni', 'GE27': 'Kharagaulis Raioni', 'GE24': 'Javis Raioni', 'GE25': "K'arelis Raioni", 'IR41': 'Khorasan-e Janubi', 'IR40': 'Yazd', 'GE28': 'Khashuris Raioni', 'GE29': 'Khobis Raioni', 'TR60': 'Tokat', 'TR61': 'Trabzon', 'TR62': 'Tunceli', 'TR63': 'Sanliurfa', 'TR64': 'Usak', 'TR65': 'Van', 'TR66': 'Yozgat', 'TR68': 'Ankara', 'TR69': 'Gumushane', 'BE09': 'West-Vlaanderen', 'BE08': 'Oost-Vlaanderen', 'BE07': 'Namur', 'BE06': 'Luxembourg', 'BE05': 'Limburg', 'BE04': 'Liege', 'BE03': 'Hainaut', 'BE01': 'Antwerpen', 'UZ14': 'Toshkent', 'UZ12': 'Surkhondaryo', 'UZ13': 'Toshkent', 'UZ10': 'Samarqand', 'UZ11': 'Sirdaryo', 'AZ33': 'Mingacevir', 'NG39': 'Jigawa', 'MK26': 'Dobrusevo', 'MK27': 'Dolna Banjica', 'MK24': 'Demir Hisar', 'MK25': 'Demir Kapija', 'MK22': 'Delcevo', 'MK23': 'Delogozdi', 'MK20': 'Cucer-Sandevo', 'MK21': 'Debar', 'MV38': 'Kaafu', 'MV39': 'Lhaviyani', 'MK28': 'Dolneni', 'MK29': 'Dorce Petrov', 'NG30': 'Kwara', 'NG31': 'Niger', 'DZ09': 'Oran', 'NG36': 'Delta', 'GM04': 'Upper River', 'GM05': 'Western', 'GM07': 'North Bank', 'GM01': 'Banjul', 'GM02': 'Lower River', 'GM03': 'Central River', 'CAQC': 'Quebec', 'ID03': 'Bengkulu', 'ID02': 'Bali', 'ID01': 'Aceh', 'ID07': 'Jawa Tengah', 'ID06': 'Jawa Barat', 'ID05': 'Jambi', 'ID04': 'Jakarta Raya', 'CO23': 'Quindio', 'ID09': 'Papua', 'ID08': 'Jawa Timur', 'SV03': 'Chalatenango', 'GBT6': 'Aberdeenshire', 'GBT5': 'Aberdeen City', 'GBT4': 'Strabane', 'GBT3': 'Omagh', 'GBT2': 'North Down', 'GBT1': 'Newtownabbey', 'RU42': 'Leningrad', 'SV09': 'San Miguel', 'SV08': 'Morazan', 'GBT9': 'Scottish Borders', 'GBT8': 'Argyll and Bute', 'MV34': 'Gaafu Alifu', 'MV35': 'Gaafu Dhaalu', 'SIN5': 'Zalec', 'SIN2': 'Videm', 'SIN3': 'Vojnik', 'MKA9': 'Vasilevo', 'SD28': "Al Istiwa'iyah", 'MKA3': 'Suto Orizari', 'MKA2': 'Studenicani', 'MKA1': 'Strumica', 'MKA7': 'Topolcani', 'MKA6': 'Tetovo', 'MKA5': 'Tearce', 'MKA4': 'Sveti Nikole', 'CM13': 'Nord', 'CM12': 'Extreme-Nord', 'CM11': 'Centre', 'CM10': 'Adamaoua', 'SO19': 'Togdheer', 'SO18': 'Nugaal', 'CM14': 'Sud', 'SO14': 'Shabeellaha Hoose', 'SO16': 'Woqooyi Galbeed', 'SO11': 'Nugaal', 'SO10': 'Mudug', 'SO13': 'Shabeellaha Dhexe', 'SO12': 'Sanaag', 'LV33': 'Ventspils', 'LV32': 'Ventspils', 'LV31': 'Valmieras', 'LV30': 'Valkas', 'UG69': 'Katakwi', 'UG66': 'Bugiri', 'UG67': 'Busia', 'UG65': 'Adjumani', 'UG60': 'Pallisa', 'UG61': 'Rakai', 'FRA7': 'Haute-Normandie', 'FRA6': 'Franche-Comte', 'FRA5': 'Corse', 'FRA4': 'Champagne-Ardenne', 'FRA3': 'Centre', 'FRA2': 'Bretagne', 'HU10': 'Hajdu-Bihar', 'HU11': 'Heves', 'FRA9': 'Languedoc-Roussillon', 'FRA8': 'Ile-de-France', 'PG09': 'Eastern Highlands', 'PG08': 'Chimbu', 'PG03': 'Milne Bay', 'PG02': 'Gulf', 'PG01': 'Central', 'PG07': 'North Solomons', 'PG06': 'Western', 'PG05': 'Southern Highlands', 'PG04': 'Northern', 'IN30': 'Arunachal Pradesh', 'IN31': 'Mizoram', 'IN32': 'Daman and Diu', 'VN47': 'Kien Giang', 'IN34': 'Bihar', 'IN35': 'Madhya Pradesh', 'IN36': 'Uttar Pradesh', 'IN37': 'Chhattisgarh', 'IN38': 'Jharkhand', 'IN39': 'Uttarakhand', 'USUT': 'Utah', 'VN43': 'An Giang', 'MD67': 'Causeni', 'OM04': 'Ash Sharqiyah', 'OM05': 'Az Zahirah', 'OM06': 'Masqat', 'OM07': 'Musandam', 'OM01': 'Ad Dakhiliyah', 'OM02': 'Al Batinah', 'OM03': 'Al Wusta', 'OM08': 'Zufar', 'GR09': 'Kastoria', 'GR08': 'Florina', 'PHH2': 'Quezon', 'PHH3': 'Negros Occidental', 'GR03': 'Xanthi', 'GR02': 'Rodhopi', 'GR01': 'Evros', 'GR07': 'Pella', 'GR06': 'Kilkis', 'GR05': 'Serrai', 'GR04': 'Drama', 'GE39': 'Ninotsmindis Raioni', 'GE38': "Mts'khet'is Raioni", 'GE35': 'Marneulis Raioni', 'GE34': 'Lentekhis Raioni', 'GE37': 'Mestiis Raioni', 'GE36': 'Martvilis Raioni', 'GE31': "K'ut'aisi", 'GE30': 'Khonis Raioni', 'GE33': "Lanch'khut'is Raioni", 'GE32': 'Lagodekhis Raioni', 'TR73': 'Nigde', 'TR72': 'Mardin', 'TR71': 'Konya', 'TR70': 'Hakkari', 'TR77': 'Bayburt', 'TR76': 'Batman', 'TR75': 'Aksaray', 'TR74': 'Siirt', 'TR79': 'Kirikkale', 'TR78': 'Karaman', 'IE31': 'Wicklow', 'IE30': 'Wexford', 'UZ09': 'Qoraqalpoghiston', 'UZ08': 'Qashqadaryo', 'UZ01': 'Andijon', 'KM01': 'Anjouan', 'UZ03': 'Farghona', 'UZ02': 'Bukhoro', 'UZ05': 'Khorazm', 'UZ04': 'Jizzakh', 'UZ07': 'Nawoiy', 'UZ06': 'Namangan', 'BW10': 'Southern', 'BW11': 'North-West', 'MK31': 'Dzepciste', 'MK30': 'Drugovo', 'MK33': 'Gevgelija', 'MK32': 'Gazi Baba', 'MK35': 'Gradsko', 'MK34': 'Gostivar', 'MK37': 'Izvor', 'MK36': 'Ilinden', 'MK39': 'Kamenjane', 'MK38': 'Jegunovce', 'RU26': 'Kamchatka', 'ID38': 'Sulawesi Selatan', 'ID39': 'Irian Jaya Barat', 'ID36': 'Papua', 'ID37': 'Riau', 'ID34': 'Gorontalo', 'ID35': 'Kepulauan Bangka Belitung', 'ID32': 'Sumatera Selatan', 'ID33': 'Banten', 'ID30': 'Jawa Barat', 'ID31': 'Sulawesi Utara', 'GBW2': 'Renfrewshire', 'GBW3': 'Shetland Islands', 'GBW1': 'Perth and Kinross', 'GBW6': 'Stirling', 'GBW7': 'West Dunbartonshire', 'GBW4': 'South Ayrshire', 'AZ62': 'Xanlar', 'GBW8': 'Eilean Siar', 'GBW9': 'West Lothian', 'MKB6': 'Vranestica', 'MKB7': 'Vrapciste', 'MKB4': 'Vinica', 'MKB5': 'Vitoliste', 'MKB2': 'Velesta', 'MKB3': 'Vevcani', 'MKB1': 'Veles', 'MKB8': 'Vratnica', 'MKB9': 'Vrutok', 'LV06': 'Daugavpils', 'LV07': 'Daugavpils', 'LV04': 'Bauskas', 'LV05': 'Cesu', 'LV02': 'Aluksnes', 'LV03': 'Balvu', 'LV01': 'Aizkraukles', 'LV08': 'Dobeles', 'LV09': 'Gulbenes', 'UG79': 'Kabarole', 'UG78': 'Iganga', 'DZ50': 'Ouargla', 'UG71': 'Masaka', 'UG70': 'Luwero', 'UG73': 'Nakasongola', 'UG72': 'Moyo', 'UG74': 'Sembabule', 'UG77': 'Arua', 'UG76': 'Tororo', 'HU01': 'Bacs-Kiskun', 'FRB3': 'Midi-Pyrenees', 'HU03': 'Bekes', 'HU02': 'Baranya', 'HU05': 'Budapest', 'HU04': 'Borsod-Abauj-Zemplen', 'HU07': 'Debrecen', 'HU06': 'Csongrad', 'HU09': 'Gyor-Moson-Sopron', 'HU08': 'Fejer', 'FRB8': "Provence-Alpes-Cote d'Azur", 'FRB9': 'Rhone-Alpes', 'DZ54': 'Tindouf', 'GBT7': 'Angus', 'SV02': 'Cabanas', 'DZ56': 'Tissemsilt', 'SV01': 'Ahuachapan', 'CAYT': 'Yukon Territory', 'SV07': 'La Union', 'SV06': 'La Paz', 'SV05': 'La Libertad', 'SI08': 'Brezice', 'SV04': 'Cuscatlan', 'IN23': 'Punjab', 'IN22': 'Puducherry', 'IN21': 'Orissa', 'IN20': 'Nagaland', 'IN26': 'Tripura', 'IN25': 'Tamil Nadu', 'IN24': 'Rajasthan', 'IN29': 'Sikkim', 'IN28': 'West Bengal', 'NZ10': 'Chatham Islands', 'TN14': 'El Kef', 'GR36': 'Argolis', 'GR37': 'Korinthia', 'GR34': 'Evvoia', 'GR35': 'Attiki', 'GR32': 'Fokis', 'GR33': 'Voiotia', 'GR30': 'Evritania', 'GR31': 'Aitolia kai Akarnania', 'GR38': 'Akhaia', 'GR39': 'Ilia', 'USVT': 'Vermont', 'GE48': 'Samtrediis Raioni', 'GE49': 'Senakis Raioni', 'USVA': 'Virginia', 'GE40': 'Onis Raioni', 'GE41': "Ozurget'is Raioni", 'GE42': "P'ot'i", 'GE43': 'Qazbegis Raioni', 'GE44': 'Qvarlis Raioni', 'GE45': "Rust'avi", 'GE46': "Sach'kheris Raioni", 'GE47': 'Sagarejos Raioni', 'TR48': 'Mugla', 'TR49': 'Mus', 'TR46': 'Kahramanmaras', 'TR44': 'Malatya', 'TR45': 'Manisa', 'TR43': 'Kutahya', 'TR40': 'Kirsehir', 'TR41': 'Kocaeli', 'SD29': 'Al Khartum', 'MKA8': 'Valandovo', 'BW09': 'South-East', 'BW08': 'North-East', 'BW05': 'Kgatleng', 'BW04': 'Kgalagadi', 'BW06': 'Kweneng', 'BW01': 'Central', 'BW03': 'Ghanzi', 'VN88': 'Dak Lak', 'SD27': 'Al Wusta', 'ID29': 'Maluku Utara', 'ID28': 'Maluku', 'ID21': 'Sulawesi Tengah', 'ID20': 'Sulawesi Selatan', 'ID23': 'Sulawesi Utara', 'ID22': 'Sulawesi Tenggara', 'ID25': 'Sumatera Selatan', 'ID24': 'Sumatera Barat', 'ID26': 'Sumatera Utara', 'AG01': 'Barbuda', 'AG03': 'Saint George', 'AG04': 'Saint John', 'AG05': 'Saint Mary', 'AG06': 'Saint Paul', 'AG07': 'Saint Peter', 'AG08': 'Saint Philip', 'AG09': 'Redonda', 'GBV7': 'North Ayrshire', 'GBV6': 'Moray', 'GBV1': 'Fife', 'MY08': 'Perlis', 'GBV3': 'Highland', 'GBV2': 'Glasgow City', 'RO06': 'Bistrita-Nasaud', 'CV07': 'Ribeira Grande', 'MKC1': 'Zajas', 'KE01': 'Central', 'KE02': 'Coast', 'KE03': 'Eastern', 'MKC5': 'Zletovo', 'KE05': 'Nairobi Area', 'KE06': 'North-Eastern', 'KE07': 'Nyanza', 'KE08': 'Rift Valley', 'KE09': 'Western', 'LV11': 'Jelgava', 'LV10': 'Jekabpils', 'LV13': 'Jurmala', 'LV12': 'Jelgavas', 'LV15': 'Kuldigas', 'LV14': 'Kraslavas', 'LV17': 'Liepajas', 'LV16': 'Liepaja', 'LV19': 'Ludzas', 'RO09': 'Brasov', 'RO08': 'Braila', 'MN25': 'Orhon', 'MN24': 'Govisumber', 'MN23': 'Darhan-Uul', 'MN22': 'Erdenet', 'MN21': 'Bulgan', 'MN20': 'Ulaanbaatar', 'FRC1': 'Alsace', 'ST01': 'Principe', 'ST02': 'Sao Tome', 'BG50': 'Pleven', 'IN18': 'Meghalaya', 'IN19': 'Karnataka', 'IN16': 'Maharashtra', 'IN17': 'Manipur', 'IN14': 'Lakshadweep', 'IN12': 'Jammu and Kashmir', 'IN13': 'Kerala', 'IN10': 'Haryana', 'IN11': 'Himachal Pradesh', 'HU16': 'Pest', 'USWY': 'Wyoming', 'HU17': 'Somogy', 'USWV': 'West Virginia', 'HU14': 'Nograd', 'HU15': 'Pecs', 'USWI': 'Wisconsin', 'BG58': 'Sofiya', 'HU12': 'Komarom-Esztergom', 'TM02': 'Balkan', 'USWA': 'Washington', 'HU13': 'Miskolc', 'KR18': 'Kwangju-jikhalsi', 'FRA1': 'Bourgogne', 'KR14': 'Kyongsang-bukto', 'KR15': 'Taegu-jikhalsi', 'KR16': 'Cholla-namdo', 'KR17': "Ch'ungch'ong-namdo", 'KR10': 'Pusan-jikhalsi', 'KR11': "Seoul-t'ukpyolsi", 'KR12': "Inch'on-jikhalsi", 'KR13': 'Kyonggi-do', 'PG20': 'National Capital', 'HU18': 'Szabolcs-Szatmar-Bereg', 'HU19': 'Szeged', 'GR21': 'Larisa', 'GR20': 'Arta', 'GR23': 'Kardhitsa', 'GR22': 'Trikala', 'GR25': 'Kerkira', 'GR24': 'Magnisia', 'GR27': 'Kefallinia', 'GR26': 'Levkas', 'GR29': 'Fthiotis', 'GR28': 'Zakinthos', 'GE59': 'Tsalkis Raioni', 'GE58': 'Tsalenjikhis Raioni', 'GE53': "T'erjolis Raioni", 'GE52': "T'elavis Raioni", 'GE51': "T'bilisi", 'GE50': 'Sighnaghis Raioni', 'GE57': "Ts'ageris Raioni", 'GE56': 'Tqibuli', 'GE55': "T'ianet'is Raioni", 'GE54': "T'et'ritsqaros Raioni", 'TR59': 'Tekirdag', 'NA07': 'Karibib', 'BH14': 'Madinat Hamad', 'TR50': 'Nevsehir', 'TR53': 'Rize', 'TR52': 'Ordu', 'TR55': 'Samsun', 'TO02': 'Tongatapu', 'TR57': 'Sinop', 'AT08': 'Vorarlberg', 'NA03': 'Boesmanland', 'NA02': 'Caprivi Oos', 'BH13': 'Ar Rifa', 'AT07': 'Tirol', 'AT06': 'Steiermark', 'NA09': 'Luderitz', 'MV05': 'Laamu', 'AT04': 'Oberosterreich', 'MV01': 'Seenu', 'GBQ8': 'Armagh', 'GBQ9': 'Ballymena', 'GBU7': 'East Renfrewshire', 'GBQ2': 'Wokingham', 'GBQ3': 'Wolverhampton', 'GBQ4': 'Worcestershire', 'GBQ5': 'York', 'GBQ6': 'Antrim', 'GBQ7': 'Ards', 'EE06': 'Kohtla-Jarve', 'EE07': 'Laanemaa', 'EE04': 'Jarvamaa', 'MN18': 'Tov', 'EE05': 'Jogevamaa', 'MN12': 'Hovd', 'EE02': 'Hiiumaa', 'MN10': 'Govi-Altay', 'MN11': 'Hentiy', 'MN16': 'Selenge', 'MN17': 'Suhbaatar', 'MN14': 'Omnogovi', 'MN15': 'Ovorhangay', 'GW02': 'Quinara', 'EE01': 'Harjumaa', 'SB10': 'Central', 'SB11': 'Western', 'SB12': 'Choiseul', 'SB13': 'Rennell and Bellona', 'MV36': 'Haa Alifu', 'IN09': 'Gujarat', 'IN01': 'Andaman and Nicobar Islands', 'IN03': 'Assam', 'IN02': 'Andhra Pradesh', 'IN05': 'Chandigarh', 'BS34': 'Sandy Point', 'IN07': 'Delhi', 'IN06': 'Dadra and Nagar Haveli', 'CY04': 'Nicosia', 'CY05': 'Limassol', 'CY06': 'Paphos', 'IN33': 'Goa', 'CY01': 'Famagusta', 'CY02': 'Kyrenia', 'CY03': 'Larnaca', 'MV30': 'Alifu', 'MG04': 'Toamasina', 'MG05': 'Antananarivo', 'MG06': 'Toliara', 'MG01': 'Antsiranana', 'MG02': 'Fianarantsoa', 'MG03': 'Mahajanga', 'KR06': 'Kangwon-do', 'KR05': "Ch'ungch'ong-bukto", 'KR03': 'Cholla-bukto', 'KR01': 'Cheju-do', 'MV31': 'Baa', 'SR14': 'Nickerie', 'SR15': 'Para', 'SR16': 'Paramaribo', 'SR17': 'Saramacca', 'SR10': 'Brokopondo', 'SR11': 'Commewijne', 'SR12': 'Coronie', 'SR13': 'Marowijne', 'SR18': 'Sipaliwini', 'SR19': 'Wanica', 'MV32': 'Dhaalu', 'MV33': 'Faafu ', 'GE64': 'Zugdidis Raioni', 'GE62': "Zestap'onis Raioni", 'GE63': 'Zugdidi', 'GE60': 'Tsqaltubo', 'GE61': 'Vanis Raioni', 'USPR': 'Puerto Rico', 'USPW': 'Palau', 'USPA': 'Pennsylvania', 'GH10': 'Upper East', 'GH11': 'Upper West', 'GBS7': 'Magherafelt', 'BG63': 'Vidin', 'BG62': 'Veliko Turnovo', 'BG61': 'Varna', 'BG60': 'Turgovishte', 'GBS4': 'Limavady', 'BG65': 'Yambol', 'BG64': 'Vratsa', 'GBS5': 'Lisburn', 'SM04': 'Faetano', 'SM05': 'Fiorentino', 'GBP9': 'Windsor and Maidenhead', 'GBP8': 'Wiltshire', 'GBP3': 'Warwickshire', 'GBP2': 'Warrington', 'GBP1': 'Wandsworth', 'GBS1': 'Dungannon', 'GBP7': 'Wigan', 'GBP6': 'West Sussex', 'GBP5': 'Westminster', 'GBP4': 'West Berkshire', 'DJ01': 'Ali Sabieh', 'DJ06': 'Dikhil', 'DJ07': 'Djibouti', 'DJ04': 'Obock', 'DJ05': 'Tadjoura', 'DJ08': 'Arta', 'LY58': 'Misratah', 'LY59': 'Sawfajjin', 'KH30': 'Pailin', 'NL02': 'Friesland', 'NL03': 'Gelderland', 'NL01': 'Drenthe', 'NL06': 'Noord-Brabant', 'NL07': 'Noord-Holland', 'NL04': 'Groningen', 'NL05': 'Limburg', 'NL08': 'Overijssel', 'NL09': 'Utrecht', 'MN05': 'Darhan', 'MN07': 'Dornogovi', 'MN06': 'Dornod', 'MN01': 'Arhangay', 'MN03': 'Bayan-Olgiy', 'MN02': 'Bayanhongor', 'LR20': 'Lofa', 'MN09': 'Dzavhan', 'MN08': 'Dundgovi', 'NE06': 'Tahoua', 'NE05': 'Niamey', 'JM17': 'Kingston', 'CH26': 'Jura', 'SB07': 'Isabel', 'SB06': 'Guadalcanal', 'SB03': 'Malaita', 'SB09': 'Temotu', 'SB08': 'Makira', 'PE23': 'Tacna', 'PE22': 'San Martin', 'PE21': 'Puno', 'PE20': 'Piura', 'PE25': 'Ucayali', 'PE24': 'Tumbes', 'MD79': 'Leova', 'MD76': 'Glodeni', 'PL79': 'Opolskie', 'PL78': 'Mazowieckie', 'MD77': 'Hincesti', 'PL75': 'Lubelskie', 'PL74': 'Lodzkie', 'PL77': 'Malopolskie', 'PL76': 'Lubuskie', 'MD74': 'Falesti', 'PL73': 'Kujawsko-Pomorskie', 'PL72': 'Dolnoslaskie', 'MD75': 'Floresti', 'MD73': 'Edinet', 'MD70': 'Donduseni', 'BF78': 'Zondoma', 'MD71': 'Drochia', 'SC19': 'Plaisance', 'BN08': 'Belait', 'BN09': 'Brunei and Muara', 'SC18': 'Mont Fleuri', 'BN07': 'Alibori', 'KP12': "P'yongyang-si", 'KP13': 'Yanggang-do', 'KP11': "P'yongan-bukto", 'KP17': 'Hamgyong-bukto', 'KP14': "Namp'o-si", 'KP15': "P'yongan-namdo", 'KP18': 'Najin Sonbong-si', 'SC13': "Grand' Anse", 'SC12': 'Glacis', 'GH01': 'Greater Accra', 'GH03': 'Brong-Ahafo', 'GH02': 'Ashanti', 'GH05': 'Eastern', 'GH04': 'Central', 'GH06': 'Northern', 'GH09': 'Western', 'GH08': 'Volta', 'BG56': 'Sliven', 'BG57': 'Smolyan', 'BG54': 'Shumen', 'BG55': 'Silistra', 'BG52': 'Razgrad', 'BG53': 'Ruse', 'RU90': 'Permskiy Kray', 'BG51': 'Plovdiv', 'TM05': 'Mary', 'TM04': 'Lebap', 'TM01': 'Ahal', 'TM03': 'Dashoguz', 'BG59': 'Stara Zagora', 'SE18': 'Sodermanlands Lan', 'HU28': 'Eger', 'RU91': 'Krasnoyarskiy Kray', 'CN11': 'Hunan', 'HT14': "Grand' Anse", 'KH29': 'Batdambang', 'KH25': 'Banteay Meanchey', 'NL15': 'Overijssel', 'NL16': 'Flevoland', 'NL11': 'Zuid-Holland', 'NL10': 'Zeeland', 'LS18': 'Quthing', 'LS19': 'Thaba-Tseka', 'LS10': 'Berea', 'LS11': 'Butha-Buthe', 'LS12': 'Leribe', 'LS13': 'Mafeteng', 'LS14': 'Maseru', 'LS15': 'Mohales Hoek', 'LS16': 'Mokhotlong', 'LS17': 'Qachas Nek', 'GBS6': 'Derry', 'SM01': 'Acquaviva', 'SM02': 'Chiesanuova', 'SM03': 'Domagnano', 'GBS2': 'Fermanagh', 'GBS3': 'Larne', 'SM06': 'Borgo Maggiore', 'SM07': 'San Marino', 'SM08': 'Monte Giardino', 'SM09': 'Serravalle', 'GBS8': 'Moyle', 'GBS9': 'Newry and Mourne', 'CZ90': 'Zlinsky kraj', 'MD90': 'Taraclia', 'MD91': 'Telenesti', 'MD92': 'Ungheni', 'CN18': 'Guizhou', 'CN19': 'Liaoning', 'CN10': 'Hebei', 'SE10': 'Dalarnas Lan', 'CN12': 'Hubei', 'CN13': 'Xinjiang', 'CN14': 'Xizang', 'CN15': 'Gansu', 'CN16': 'Guangxi', 'PE18': 'Moquegua', 'PE19': 'Pasco', 'PE16': 'Loreto', 'PE17': 'Madre de Dios', 'PE14': 'Lambayeque', 'PE15': 'Lima', 'PE12': 'Junin', 'PE13': 'La Libertad', 'PE10': 'Huanuco', 'PE11': 'Ica'}
| 21,486.25
| 85,919
| 0.602339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 69,704
| 0.81103
|
b7a9d3a60ea32292f1f1a315f0e465e4cf861159
| 236
|
py
|
Python
|
configs/semantic_guided/CE.py
|
hukkelas/full_body_anonymization
|
c61745b137c84ffb742ef6ab2f4721db4acf22b7
|
[
"MIT"
] | 27
|
2022-01-06T20:15:24.000Z
|
2022-03-29T11:54:49.000Z
|
configs/semantic_guided/CE.py
|
hukkelas/full_body_anonymization
|
c61745b137c84ffb742ef6ab2f4721db4acf22b7
|
[
"MIT"
] | 2
|
2022-03-17T06:04:23.000Z
|
2022-03-25T08:50:57.000Z
|
configs/semantic_guided/CE.py
|
hukkelas/full_body_anonymization
|
c61745b137c84ffb742ef6ab2f4721db4acf22b7
|
[
"MIT"
] | 2
|
2022-01-07T13:16:59.000Z
|
2022-01-16T02:10:50.000Z
|
_base_config_ = ["base.py"]
generator = dict(
input_cse=True,
use_cse=True
)
discriminator=dict(
pred_only_cse=False,
pred_only_semantic=True
)
loss = dict(
gan_criterion=dict(type="segmentation", seg_weight=.1)
)
| 15.733333
| 58
| 0.699153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.097458
|
b7ac37d5a23a4e9b74f4c14b649516c082ec539d
| 676
|
py
|
Python
|
test/compiler/test-encode.py
|
xupingmao/minipy
|
5bce2f238925eb92fe9ff7d935f59ef68daa257a
|
[
"MIT"
] | 52
|
2016-07-11T10:14:35.000Z
|
2021-12-09T09:10:43.000Z
|
test/compiler/test-encode.py
|
xupingmao/snake
|
c956f151ed1ebd2faeaf1565352b59ca5a8fa0b4
|
[
"MIT"
] | 13
|
2016-07-24T13:50:37.000Z
|
2019-03-02T06:56:18.000Z
|
test/compiler/test-encode.py
|
xupingmao/snake
|
c956f151ed1ebd2faeaf1565352b59ca5a8fa0b4
|
[
"MIT"
] | 9
|
2017-01-27T10:46:04.000Z
|
2021-12-09T09:10:46.000Z
|
# -*- coding:utf-8 -*-
# @author xupingmao <578749341@qq.com>
# @since 2020/10/20 00:19:47
# @modified 2020/10/20 00:42:52
import sys
sys.path.append("src/python")
from mp_encode import *
def test_compile(fname):
input_fname = "test/compiler/case/%s-input.py" % fname
output_fname = "test/compiler/case/%s-output.txt" % fname
code = load(input_fname)
expect = load(output_fname)
bytecode = dis_code(code, True)
if expect != bytecode:
print("expect:\n%s \n\nactual:\n%s\n" % (expect, bytecode))
raise Exception("assert failed, fname=%s" % fname)
test_compile("assign-number")
test_compile("assign-multi")
test_compile("if-in")
| 25.037037
| 67
| 0.668639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 289
| 0.427515
|
b7ad32170df3a150ccda1de3243e006af407c590
| 3,719
|
py
|
Python
|
paragen/optim/__init__.py
|
godweiyang/ParaGen
|
9665d1244ea38a41fc06b4e0a7f6411985e2221f
|
[
"Apache-2.0"
] | null | null | null |
paragen/optim/__init__.py
|
godweiyang/ParaGen
|
9665d1244ea38a41fc06b4e0a7f6411985e2221f
|
[
"Apache-2.0"
] | null | null | null |
paragen/optim/__init__.py
|
godweiyang/ParaGen
|
9665d1244ea38a41fc06b4e0a7f6411985e2221f
|
[
"Apache-2.0"
] | null | null | null |
from copy import deepcopy
from inspect import getfullargspec
import importlib
import json
import os
import logging
logger = logging.getLogger(__name__)
from torch.optim.optimizer import Optimizer
from paragen.optim.optimizer import Optimizer
from paragen.utils.rate_schedulers import create_rate_scheduler
from paragen.utils.runtime import Environment
from paragen.utils.registry import setup_registry
register_optim, create_optim, registry = setup_registry('optim', Optimizer, force_extend=False)
def build_optimizer(model, configs, enable_apex=False):
configs = deepcopy(configs)
name = configs.pop('class')
kwargs = {}
for k, v in configs.items():
try:
v = eval(v)
except:
pass
finally:
kwargs[k] = v
configs = kwargs
logger.info('Creating {} class with configs \n{}\n'.format(name,
json.dumps(configs, indent=4, sort_keys=True)))
lr = configs.pop('lr')
lr_scheduler = create_rate_scheduler(lr)
lr_scheduler.build()
args = getfullargspec(Optimizer).args[4:]
optimizer_kwargs = {}
for key in args:
if key in configs:
optimizer_kwargs[key] = configs.pop(key)
if name.lower() in registry:
cls = registry[name.lower()]
else:
import importlib
mod = importlib.import_module('torch.optim')
cls = getattr(mod, name)
if 'no_decay' in configs:
named_parameters = model.named_parameters()
no_decay = configs.pop('no_decay')
weight_decay = configs.pop('weight_decay')
grouped_parameters = [
{'params': [p for n, p in named_parameters if not any(nd in n for nd in no_decay)],
'weight_decay': weight_decay},
{'params': [p for n, p in named_parameters if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
else:
grouped_parameters = model.parameters()
optimizer = cls(grouped_parameters, lr=lr_scheduler.rate, **configs)
env = Environment()
if env.distributed_world > 1:
import horovod.torch as hvd
hvd_kwargs = {}
if 'update_frequency' in optimizer_kwargs:
hvd_kwargs['backward_passes_per_step'] = optimizer_kwargs['update_frequency']
if env.fp16 and not enable_apex:
hvd_kwargs['compression'] = hvd.Compression.fp16
optimizer = hvd.DistributedOptimizer(optimizer,
named_parameters=model.named_parameters(),
**hvd_kwargs)
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
if enable_apex:
from apex import amp
update_frequency = optimizer_kwargs['update_frequency'] if 'update_frequency' in optimizer_kwargs else 1
model, optimizer = amp.initialize(model, optimizer,
opt_level='O1',
num_losses=update_frequency)
optimizer_kwargs['enable_apex'] = enable_apex
optimizer = Optimizer(model=model, optimizer=optimizer, lr_scheduler=lr_scheduler, **optimizer_kwargs)
return model, optimizer
modules_dir = os.path.dirname(__file__)
for file in os.listdir(modules_dir):
path = os.path.join(modules_dir, file)
if (
not file.startswith('_')
and not file.startswith('.')
and (file.endswith('.py') or os.path.isdir(path))
):
module_name = file[:file.find('.py')] if file.endswith('.py') else file
module = importlib.import_module('paragen.optim.' + module_name)
| 35.419048
| 112
| 0.633235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 313
| 0.084162
|
b7adb4099781c23875a3639ae37c4e4a5bb88239
| 2,860
|
py
|
Python
|
joyvillage/joy/views.py
|
IreneMercy/Joy-Village-Backup
|
f038e33aa5c28337bc20c6fcd3273ef37d283564
|
[
"MIT"
] | null | null | null |
joyvillage/joy/views.py
|
IreneMercy/Joy-Village-Backup
|
f038e33aa5c28337bc20c6fcd3273ef37d283564
|
[
"MIT"
] | 9
|
2021-03-19T09:58:55.000Z
|
2022-03-12T00:22:50.000Z
|
joyvillage/joy/views.py
|
IreneMercy/Joy-Foundation
|
f038e33aa5c28337bc20c6fcd3273ef37d283564
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseRedirect
from .models import Events, Gallery, News, Careers, Partners
from django.core.mail import send_mail,BadHeaderError
from django.conf import settings
from.forms import ContactForm
from django.contrib import messages
def home(request):
if request.method == 'GET':
form = ContactForm()
else:
form = ContactForm(request.POST or None)
if form.is_valid():
subject = form.cleaned_data['subject']
from_email = form.cleaned_data['from_email']
message = form.cleaned_data['message']
msg_mail = str(message) + " " + str(from_email)
try:
send_mail(subject, msg_mail , from_email, ['kimkidati@gmail.com'], fail_silently=False)
except BadHeaderError:
return HttpResponse('Invalid header found.')
events = Events.objects.all()[:3]
context = {
'events':events,
'form':form,
}
return render(request,'home.html', context)
def events(request):
events = Events.objects.all()
context = {
'events':events,
}
return render(request, 'events.html', context)
def gallery(request):
gallery = Gallery.objects.all()
context = {
'gallery':gallery,
}
return render(request, 'gallery.html', context)
def about(request):
return render(request, 'about.html')
def news(request):
news = News.objects.all()
context = {
'news':news
}
return render(request, 'news.html', context)
def tenders(request):
return render(request, 'tenders.html')
def partners(request):
partners = Partners.objects.all()
context = {
'partners':partners
}
return render(request, 'partners.html', context)
def careers(request):
careers = Careers.objects.all()
context = {
'careers':careers
}
return render(request, 'careers.html', context)
def contact(request):
if request.method == 'GET':
form = ContactForm()
else:
form = ContactForm(request.POST or None)
if form.is_valid():
subject = form.cleaned_data['subject']
from_email = form.cleaned_data['from_email']
message = form.cleaned_data['message']
msg_mail = str(message) + " " + str(from_email)
try:
send_mail(subject, msg_mail , from_email, ['kimkidati@gmail.com'], fail_silently=False)
except BadHeaderError:
return HttpResponse('Invalid header found.')
messages.add_message(request, messages.SUCCESS, 'Email sent successfully.')
return render(request, "contact.html", {'form': form,})
def successView(request):
return HttpResponse('Success! Thank you for your message.')
| 29.484536
| 103
| 0.640909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 408
| 0.142657
|
b7ae3072a6046637d4bd29d21eabd8d449e98c65
| 274
|
py
|
Python
|
src/events/admin.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
src/events/admin.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
src/events/admin.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
from django.contrib import admin
from django_summernote.admin import SummernoteModelAdmin
from .models import Event, Speaker
class EventAdmin(SummernoteModelAdmin):
summernote_fields = ("content",)
admin.site.register(Event, EventAdmin)
admin.site.register(Speaker)
| 22.833333
| 56
| 0.813869
| 76
| 0.277372
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.032847
|
b7af3d0ba516d34beefe1df3aa1a2a39558521ee
| 318
|
py
|
Python
|
python/cython_build.py
|
n1tk/batch_jaro_winkler
|
421c7e3a5bedce89e8c00216b90d32d1629073a2
|
[
"MIT"
] | 22
|
2020-04-30T17:56:29.000Z
|
2022-01-19T21:05:15.000Z
|
python/cython_build.py
|
n1tk/batch_jaro_winkler
|
421c7e3a5bedce89e8c00216b90d32d1629073a2
|
[
"MIT"
] | 2
|
2021-01-19T14:07:22.000Z
|
2021-11-24T16:32:46.000Z
|
python/cython_build.py
|
n1tk/batch_jaro_winkler
|
421c7e3a5bedce89e8c00216b90d32d1629073a2
|
[
"MIT"
] | 3
|
2020-10-28T20:56:29.000Z
|
2022-02-25T23:29:05.000Z
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import sys
python_version = sys.version_info[0]
setup(
name='batch_jaro_winkler',
ext_modules=cythonize([Extension('batch_jaro_winkler', ['cbatch_jaro_winkler.pyx'])], language_level=python_version)
)
| 24.461538
| 118
| 0.811321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 65
| 0.204403
|
b7afa43603c9767401a62b1a4fe6fc631887605a
| 6,647
|
py
|
Python
|
venv/lib/python3.7/site-packages/google/type/postal_address_pb2.py
|
nicholasadamou/StockBird
|
257396479667863d4ee122ea46adb86087c9aa78
|
[
"Apache-2.0"
] | 15
|
2020-06-29T08:33:39.000Z
|
2022-02-12T00:28:51.000Z
|
venv/lib/python3.7/site-packages/google/type/postal_address_pb2.py
|
nicholasadamou/StockBird
|
257396479667863d4ee122ea46adb86087c9aa78
|
[
"Apache-2.0"
] | 21
|
2020-03-01T18:21:09.000Z
|
2020-05-26T14:49:08.000Z
|
venv/lib/python3.7/site-packages/google/type/postal_address_pb2.py
|
nicholasadamou/StockBird
|
257396479667863d4ee122ea46adb86087c9aa78
|
[
"Apache-2.0"
] | 11
|
2020-06-29T08:40:24.000Z
|
2022-02-24T17:39:16.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/type/postal_address.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/type/postal_address.proto',
package='google.type',
syntax='proto3',
serialized_options=_b('\n\017com.google.typeB\022PostalAddressProtoP\001ZFgoogle.golang.org/genproto/googleapis/type/postaladdress;postaladdress\242\002\003GTP'),
serialized_pb=_b('\n google/type/postal_address.proto\x12\x0bgoogle.type\"\xfd\x01\n\rPostalAddress\x12\x10\n\x08revision\x18\x01 \x01(\x05\x12\x13\n\x0bregion_code\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t\x12\x13\n\x0bpostal_code\x18\x04 \x01(\t\x12\x14\n\x0csorting_code\x18\x05 \x01(\t\x12\x1b\n\x13\x61\x64ministrative_area\x18\x06 \x01(\t\x12\x10\n\x08locality\x18\x07 \x01(\t\x12\x13\n\x0bsublocality\x18\x08 \x01(\t\x12\x15\n\raddress_lines\x18\t \x03(\t\x12\x12\n\nrecipients\x18\n \x03(\t\x12\x14\n\x0corganization\x18\x0b \x01(\tBu\n\x0f\x63om.google.typeB\x12PostalAddressProtoP\x01ZFgoogle.golang.org/genproto/googleapis/type/postaladdress;postaladdress\xa2\x02\x03GTPb\x06proto3')
)
_POSTALADDRESS = _descriptor.Descriptor(
name='PostalAddress',
full_name='google.type.PostalAddress',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='revision', full_name='google.type.PostalAddress.revision', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='region_code', full_name='google.type.PostalAddress.region_code', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='language_code', full_name='google.type.PostalAddress.language_code', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='postal_code', full_name='google.type.PostalAddress.postal_code', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sorting_code', full_name='google.type.PostalAddress.sorting_code', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='administrative_area', full_name='google.type.PostalAddress.administrative_area', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='locality', full_name='google.type.PostalAddress.locality', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sublocality', full_name='google.type.PostalAddress.sublocality', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='address_lines', full_name='google.type.PostalAddress.address_lines', index=8,
number=9, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='recipients', full_name='google.type.PostalAddress.recipients', index=9,
number=10, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='organization', full_name='google.type.PostalAddress.organization', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=50,
serialized_end=303,
)
DESCRIPTOR.message_types_by_name['PostalAddress'] = _POSTALADDRESS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PostalAddress = _reflection.GeneratedProtocolMessageType('PostalAddress', (_message.Message,), dict(
DESCRIPTOR = _POSTALADDRESS,
__module__ = 'google.type.postal_address_pb2'
# @@protoc_insertion_point(class_scope:google.type.PostalAddress)
))
_sym_db.RegisterMessage(PostalAddress)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 47.141844
| 711
| 0.746352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,902
| 0.286144
|
b7aff83ef7243e9bfde2d005caee3accba55ac07
| 612
|
py
|
Python
|
dataprep/tests/data_connector/test_integration.py
|
dylanzxc/dataprep
|
21ef62936eeec28495763f35fcb3cb07f71de6f6
|
[
"MIT"
] | 1
|
2020-12-18T13:16:38.000Z
|
2020-12-18T13:16:38.000Z
|
dataprep/tests/data_connector/test_integration.py
|
bexxmodd/dataprep
|
3aaa626bfaf9270cf56ffb70280cbcd8031f3ac3
|
[
"MIT"
] | null | null | null |
dataprep/tests/data_connector/test_integration.py
|
bexxmodd/dataprep
|
3aaa626bfaf9270cf56ffb70280cbcd8031f3ac3
|
[
"MIT"
] | null | null | null |
from ...data_connector import Connector
from os import environ
def test_data_connector() -> None:
token = environ["DATAPREP_DATA_CONNECTOR_YELP_TOKEN"]
dc = Connector("yelp", _auth={"access_token": token})
df = dc.query("businesses", term="ramen", location="vancouver")
assert len(df) > 0
dc.info()
schema = dc.show_schema("businesses")
assert len(schema) > 0
df = dc.query("businesses", _count=120, term="ramen", location="vancouver")
assert len(df) == 120
df = dc.query("businesses", _count=10000, term="ramen", location="vancouver")
assert len(df) < 1000
| 24.48
| 81
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 158
| 0.25817
|
b7b0446dcbe64c12f7cc04baa8e64d8c7155c9fb
| 6,294
|
py
|
Python
|
plot_helpers.py
|
aspuru-guzik-group/QNODE
|
1ec3767c7b574eaeb7b0ec05d1be54983f8972bd
|
[
"MIT"
] | 14
|
2021-11-06T19:32:56.000Z
|
2022-02-08T19:53:28.000Z
|
plot_helpers.py
|
aspuru-guzik-group/QNODE
|
1ec3767c7b574eaeb7b0ec05d1be54983f8972bd
|
[
"MIT"
] | null | null | null |
plot_helpers.py
|
aspuru-guzik-group/QNODE
|
1ec3767c7b574eaeb7b0ec05d1be54983f8972bd
|
[
"MIT"
] | null | null | null |
import os
import torch
import numpy as np
import math
import matplotlib.pyplot as plt
from matplotlib import cm
from qutip import *
import imageio
plt.rcParams['axes.labelsize'] = 16
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)
def animate_recon(xt, xm, xe, title=''):
"""x is [ts,3]"""
images = []
for x, label, col in zip([xt, xm, xe],['training dynamics', 'latent neural ode reconstruction','latent neural ode extrapolation' ], ['black','limegreen', 'blue']):
for i, v in enumerate(x):
bloch = bloch_format(Bloch())
bloch.add_vectors(v)
bloch.vector_color =[col]
bloch.render()
s = x[:i+1]
#print(v, s[-1])
bloch.axes.plot(s[:,1], -s[:,0], s[:,2], color=col)
if label =='latent neural ode reconstruction':
bloch.axes.plot(xt[:,1], -xt[:,0], xt[:,2], color='black')
if label =='latent neural ode extrapolation':
bloch.axes.plot(xt[:,1], -xt[:,0], xt[:,2], color='black')
bloch.axes.plot(xm[:,1], -xm[:,0], xm[:,2], color='limegreen')
plt.suptitle(label, fontdict={'color':col})
plt.savefig('exp/temp_file.png')
images.append(imageio.imread('exp/temp_file.png'))
imageio.mimsave('exp/'+title+'.gif', images, duration=0.05)
def plot_bloch_vectors(xm, title=''):
# xm is np x 3
bloch = bloch_format(Bloch())
for i, vm in enumerate(xm):
bloch.add_vectors(vm)
bloch.vector_color =['black']
bloch.render()
plt.suptitle(r'interpolated initial states $|\Psi_0 \rangle $')
plt.savefig('exp/bvecs'+title+'.pdf', bbox_inches='tight')
def animate_traj(xt, title=''):
"""xt, xm is [ts,3] --> generate gif of both simultaneously"""
images = []
for i, vt in enumerate(xt):
bloch = bloch_format(Bloch())
bloch.add_vectors(vt)
bloch.vector_color =['black']
bloch.render()
t = xt[:i+1]
bloch.axes.plot(t[:,1], -t[:,0], t[:,2], color='black', label='dynamics')
#plt.legend(loc='lower center')
#plt.suptitle('latent neural ode --', fontdict={'color':'limegreen'})
#plt.title('True quantum dynamics', fontdict={'color':'black'})
plt.savefig('exp/temp_file.png', bbox_inches='tight')
images.append(imageio.imread('exp/temp_file.png'))
imageio.mimsave('exp/'+title+'.gif', images, duration=0.05)
def animate_recon_(xt, xm, title=''):
"""xt, xm is [ts,3] --> generate gif of both simultaneously"""
images = []
for i, (vt, vm) in enumerate(zip(xt,xm)):
bloch = bloch_format(Bloch())
bloch.add_vectors(vt)
bloch.add_vectors(vm)
bloch.vector_color =['black', 'limegreen']
bloch.render()
t = xt[:i+1]
m = xm[:i+1]
bloch.axes.plot(t[:,1], -t[:,0], t[:,2], color='black', label='train')
bloch.axes.plot(m[:,1], -m[:,0], m[:,2], color='limegreen', label='neural ode')
#plt.legend(loc='lower center')
plt.suptitle('latent neural ode --', fontdict={'color':'limegreen'})
plt.title('True quantum dynamics', fontdict={'color':'black'})
plt.savefig('exp/temp_file.png')
images.append(imageio.imread('exp/temp_file.png'))
imageio.mimsave('exp/'+title+'.gif', images, duration=0.05)
def animate_single_traj(x, title=''):
"""x is [ts,3]"""
images = []
for i, v in enumerate(x):
bloch = Bloch()
bloch.add_vectors(v)
bloch.add_points(v)
bloch.render()
s = x[:i+1]
print(v, s[-1])
bloch.axes.plot(s[:,1], -s[:,0], s[:,2], color='limegreen')
plt.savefig('exp/temp_file.png')
images.append(imageio.imread('exp/temp_file.png'))
imageio.mimsave('exp/traj'+title+'.gif', images, duration=0.125)
os.remove('exp/temp_file.png')
def plot_traj_bloch(x, title='', col='limegreen',view=[0,90]):
bloch = bloch_format(Bloch(), view)#[-40,30])
bloch.render()
bloch.axes.plot(x[:,1], -x[:,0], x[:,2], color=col)
plt.savefig('exp/'+title)
def construct_gif(xs, title=''):
""" constructs a gif of stationary bloch trajectories """
cmap = cm.get_cmap('Greens', len(xs))
cols = cmap(range(len(xs)))
images = []
for i, x in enumerate(xs):
filename='temp_file.png'
plot_traj_bloch(x, filename)
images.append(imageio.imread('exp/'+filename))
imageio.mimsave('exp/'+title+'.gif', images, duration=0.5)
os.remove('exp/temp_file.png')
def bloch_format(bloch, view=[0, 90]):
bloch.frame_color = 'gray'
bloch.frame_num = 6
bloch.frame_alpha = 0.15
bloch.sphere_alpha = 0.1
bloch.sphere_color = 'whitesmoke'
bloch.view = view
bloch.ylabel = ['','']
bloch.xlabel = ['','']
bloch.zlabel = ['','']
return bloch
def slerp(val, low, high):
omega = np.arccos(np.clip(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)), -1, 1))
so = np.sin(omega)
if so == 0.:
return (1.0-val) * low + val * high # L'Hopital's rule/LERP
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega) / so * high
def get_latent_interp(z1, z2, num_steps, linear=False):
zs = []
ratios = np.linspace(0, 1, num_steps)
print(ratios)
for ratio in ratios:
if linear:
v = (1.0 - ratio) * z1 + ratio * z2
else:
v = slerp(ratio, z1, z2)
zs.append(v)
return zs
def normalize(a):
a = a - np.real(a).min()
return a/np.abs(a).max()
def norm(s):
s =np.sum(s**2,-1) **.5
return s
def get_interpolate(model, data, i, j, n_steps=8):
nts = len(data.train_time_steps)
ts = torch.from_numpy(data.train_time_steps).float()
x1 = data.train_expect_data[[i]]
x2 = data.train_expect_data[[j]]
trajs = np.concatenate((x1, x2), axis=0).reshape((2, nts, 3))
trajs = torch.from_numpy(trajs).float()
z0 = model.encode(trajs, ts, reconstruct=True)
z1, z2 = z0[0,:], z0[1,:]
zs = get_latent_interp(z1, z2, n_steps)
return zs
def round_3sf(num_list):
trimmed = []
for num in num_list:
trimmed.append(round(num, 3 - int(math.floor(math.log10(abs(num)))) - 1))
return trimmed
| 35.965714
| 167
| 0.58659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,346
| 0.213854
|
b7b1005f9392e0dbc05eef28e9be52e84c7d4a58
| 17,348
|
py
|
Python
|
state.py
|
Lekensteyn/wgll
|
11ac7925687bfc1866d3702f821230233fe3a8de
|
[
"MIT"
] | 1
|
2021-11-27T17:29:43.000Z
|
2021-11-27T17:29:43.000Z
|
state.py
|
Lekensteyn/wgll
|
11ac7925687bfc1866d3702f821230233fe3a8de
|
[
"MIT"
] | null | null | null |
state.py
|
Lekensteyn/wgll
|
11ac7925687bfc1866d3702f821230233fe3a8de
|
[
"MIT"
] | null | null | null |
# State tracking for WireGuard protocol operations.
# Author: Peter Wu <peter@lekensteyn.nl>
# Licensed under the MIT license <http://opensource.org/licenses/MIT>.
import base64
import hashlib
import inspect
import socket
import traceback
from noise_wg import NoiseWG, crypto_scalarmult_base, aead_encrypt, aead_decrypt
def calc_mac1(key, data):
mac1_key = hashlib.blake2s(b'mac1----' + key.pub).digest()
return hashlib.blake2s(data, digest_size=16, key=mac1_key).digest()
def is_bytes(value):
# Check for __bytes__ due to PublicKey / PrivateKey.
return type(value) == bytes or hasattr(value, '__bytes__')
def to_bytes(data, length, byteorder='big'):
if not data:
data = 0
if type(data) == int:
if not length:
# Indeterminate length, just expand it.
length = (data.bit_length() + 7) // 8
return data.to_bytes(length, byteorder)
if type(data) == str:
data = base64.b64decode(data)
elif not is_bytes(data):
raise RuntimeError(f'Expected bytes, got: {data!r}')
else:
data = bytes(data)
if length and len(data) != length:
print(f'Warning: want {length}, got length {len(data)}: {data!r}')
traceback.print_stack()
return data
class Storage:
def __init__(self, name, spec, variables):
self.name = name
self.spec = spec
self.instances = []
self.variables = variables
def add(self, *args, **kwargs):
return self.add_object(self.spec(*args, **kwargs))
def add_object(self, obj):
i = len(self.instances)
obj.name = f'{self.name}_{i}'
# De-duplicate
for obj2 in self.instances:
if repr(obj2) == repr(obj):
obj = obj2
break
else:
self.instances.append(obj)
self.variables[obj.name] = obj
print(f'{obj.name} = {obj}')
return obj
def resolve(self, name):
'''Resolves an item name (or the item itself) to a matching item in this
storage.'''
if name == None:
assert self.instances, f'No previous instance found for {self.name}'
return self.instances[-1]
if isinstance(name, self.spec):
name = name.name
assert self.instances, f'No instances found for {name}'
# XXX maybe this could split the name and directly use it as index.
for instance in self.instances[::-1]:
if instance.name == name:
return instance
raise RuntimeError(f'Instance name {name} not found')
class Base:
def __repr__(self):
try:
fields = self.fields
except AttributeError:
fields = list(inspect.signature(self.__init__).parameters)
params = []
for field in fields:
value = getattr(self, field)
# XXX should repr dump the full values or refer to the state name?
if hasattr(value, 'name') and False:
display = getattr(value, 'name')
elif is_bytes(value):
# Cannot just check type(value) because of PublicKey.
value = bytes(value)
if not value.replace(b'\0', b''):
# Simplify display
display = None
elif len(value) > 16:
display = repr(base64.b64encode(value).decode('utf8'))
else:
display = "b'%s'" % ''.join('\\x%02x' % x for x in value)
else:
display = repr(value)
params.append(f'{field}={display}')
params = ', '.join(params)
return f'{self.__class__.__name__}({params})'
class Address(Base):
def __init__(self, host, port):
self.host = host
self.port = int(port)
self.address = (self.host, self.port)
class LocalAddress(Address):
def __init__(self, host, port):
super().__init__(host, port)
self._socket = None
@property
def socket(self):
if not self._socket:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.bind((self.host, self.port))
print(f'{self.name}: Created socket {self._socket}')
return self._socket
class PublicKey:
def __init__(self, pub):
self.pub = to_bytes(pub, 32, byteorder='little')
def __bytes__(self):
return self.pub
def __repr__(self):
return repr(self.pub)
class PrivateKey:
def __init__(self, priv):
self.priv = to_bytes(priv, 32, byteorder='little')
self.pub = PublicKey(crypto_scalarmult_base(self.priv))
def __bytes__(self):
return self.priv
def __repr__(self):
return repr(self.priv)
class StateI0(Base):
def __init__(self, SpubR, EprivI, SprivI, time, psk):
if not SpubR:
raise RuntimeError('Missing SpubR')
self.SpubR = PublicKey(SpubR)
self.EprivI = PrivateKey(EprivI)
self.SprivI = PrivateKey(SprivI)
self.time = to_bytes(time, 12)
self.psk = to_bytes(psk, 32)
self._compute_hs()
@property
def EpubI(self):
return self.EprivI.pub
@property
def SpubI(self):
return self.SprivI.pub
def _compute_hs(self):
hs = NoiseWG()
# pre-message
hs.mix_hash(self.SpubR)
# message from initiator to responder
hs.mix_hash(self.EpubI)
hs.mix_key(self.EpubI)
hs.mix_dh(self.EprivI, self.SpubR)
self.enc_SpubI = hs.encrypt_and_hash(self.SpubI)
hs.mix_dh(self.SprivI, self.SpubR)
self.enc_time = hs.encrypt_and_hash(self.time)
self.handshake_state = hs
class StateR0(Base):
def __init__(self, EprivR, SprivR, psk):
self.EprivR = PrivateKey(EprivR)
self.SprivR = PrivateKey(SprivR)
self.psk = to_bytes(psk, 32)
def EpubI(self):
return crypto_scalarmult_base(self.EprivR)
class StateI1(Base):
fields = ['Tsend', 'Trecv']
def __init__(self, StateI0, EpubR):
if not StateI0:
raise RuntimeError('Missing handshake initiation state')
if not EpubR:
raise RuntimeError('Missing handshake initiation details')
self._compute_hs(StateI0, EpubR, StateI0.handshake_state.copy())
def _compute_hs(self, StateI0, EpubR, hs):
hs.mix_hash(EpubR)
hs.mix_key(EpubR)
hs.mix_dh(StateI0.EprivI, EpubR)
hs.mix_dh(StateI0.SprivI, EpubR)
hs.mix_key_and_hash(StateI0.psk)
self.enc_empty = hs.encrypt_and_hash(b'')
self.Tsend, self.Trecv = hs.split()
class StateR1(Base):
# SpubI and time are not really needed by the handshake, but perhaps this
# could serve as debugging aid.
fields = ['SpubI', 'time', 'Tsend', 'Trecv']
def __init__(self, StateR0, EpubI, enc_SpubI, enc_time):
if not StateR0:
raise RuntimeError('Missing handshake response state')
if not EpubI or not enc_SpubI or not enc_time:
raise RuntimeError('Missing handshake response details')
self._compute_hs(StateR0, EpubI, enc_SpubI, enc_time)
def _compute_hs(self, StateR0, EpubI, enc_SpubI, enc_time):
hs = NoiseWG()
# pre-message
hs.mix_hash(StateR0.SprivR.pub)
# message from initiator to responder
hs.mix_hash(EpubI)
hs.mix_key(EpubI)
hs.mix_dh(StateR0.SprivR, EpubI)
self.SpubI = PublicKey(hs.decrypt_and_hash(enc_SpubI))
hs.mix_dh(StateR0.SprivR, self.SpubI)
self.time = hs.decrypt_and_hash(enc_time)
# message from responder to initiator
self.EpubR = StateR0.EprivR.pub
hs.mix_hash(self.EpubR)
hs.mix_key(self.EpubR)
hs.mix_dh(StateR0.EprivR, EpubI)
hs.mix_dh(StateR0.EprivR, self.SpubI)
hs.mix_key_and_hash(StateR0.psk)
self.enc_empty = hs.encrypt_and_hash(b'')
self.Trecv, self.Tsend = hs.split()
class Data(Base):
def __init__(self, data):
self.data = to_bytes(data, 0)
class Field:
def __init__(self, name, size, constructor=None, fixed=None):
self.name = name
self.size = size
self.fixed = fixed
if constructor is None:
def constructor(data): return to_bytes(data, size)
self._constructor = constructor
def parse_value(self, value):
return self._constructor(value)
class Message(Base):
def __init__(self, *args, **kwargs):
# Do not expose fixed fields through the constructor.
self.fields = [f.name for f in self.fields_desc if not f.fixed]
for i, value in enumerate(args):
name = self.fields[i]
assert name not in kwargs, f'Duplicate parameter: {name}'
kwargs[name] = value
for f in self.fields_desc:
val = kwargs.pop(f.name, None)
val = f.parse_value(val)
assert not f.size or len(bytes(val)) == f.size, \
f'Expected size {f.size} for {f.name}, got {len(val)}: {val!r}'
setattr(self, f.name, val)
assert not kwargs, f'Unexpected parameters: {kwargs}'
def __bytes__(self):
bs = b''
for f in self.fields_desc:
val = f.fixed
if val is None:
val = bytes(getattr(self, f.name))
assert not f.size or len(val) == f.size, \
f'Expected size {f.size} for {f.name}, got {len(val)}: {val!r}'
bs += val
return bs
@classmethod
def from_bytes(cls, bs):
min_size = sum(f.size for f in cls.fields_desc)
assert len(bs) >= min_size, f'Missing data: {len(bs)} < {min_size}'
fields = {}
for fs in cls.fields_desc:
if not fs.size:
# No explicit size set, consume remaining data
value, bs = bs, None
else:
value, bs = bs[:fs.size], bs[fs.size:]
# Ignore values in fixed fields.
if not fs.fixed:
value = fs.parse_value(value)
fields[fs.name] = value
assert not bs, f'Trailing data: {bs}'
return cls(**fields)
class MsgType1(Message):
fields_desc = (
Field('type', 4, fixed=b'\1\0\0\0'),
Field('sender', 4, lambda x: to_bytes(x, 4, 'little')),
Field('EpubI', 32, PublicKey),
Field('enc_SpubI', 48),
Field('enc_time', 28),
Field('mac1', 16, fixed=b'\0' * 16), # overwritten later
Field('mac2', 16),
)
def __init__(self, *args, SpubR=None, **kwargs):
super().__init__(*args, **kwargs)
self.SpubR = PublicKey(SpubR)
def __bytes__(self):
msg = super().__bytes__()
msg = msg[:-32]
msg += calc_mac1(self.SpubR, msg)
msg += self.mac2
return msg
class MsgType2(Message):
fields_desc = (
Field('type', 4, fixed=b'\2\0\0\0'),
Field('sender', 4, lambda x: to_bytes(x, 4, 'little')),
Field('receiver', 4, lambda x: to_bytes(x, 4, 'little')),
Field('EpubR', 32, PublicKey),
Field('enc_empty', 16),
Field('mac1', 16, fixed=b'\0' * 16), # overwritten later
Field('mac2', 16),
)
def __init__(self, *args, SpubI=None, **kwargs):
super().__init__(*args, **kwargs)
self.SpubI = PublicKey(SpubI)
def __bytes__(self):
msg = super().__bytes__()
msg = msg[:-32]
msg += calc_mac1(self.SpubI, msg)
msg += self.mac2
return msg
class MsgType3(Message):
fields_desc = (
Field('type', 4, fixed=b'\3\0\0\0'),
Field('receiver', 4, lambda x: to_bytes(x, 4, 'little')),
Field('nonce', 24),
Field('enc_cookie', 32),
)
class MsgType4(Message):
fields_desc = (
Field('type', 4, fixed=b'\4\0\0\0'),
Field('receiver', 4, lambda x: to_bytes(x, 4, 'little')),
Field('counter', 8, lambda x: to_bytes(x, 8, 'little')),
Field('enc_payload', 0),
)
class State:
def __init__(self):
variables = {}
self.addrL = Storage('addrL', LocalAddress, variables)
self.addrR = Storage('addrR', Address, variables)
self.StateI0 = Storage('StateI0', StateI0, variables)
self.StateI1 = Storage('StateI1', StateI1, variables)
self.StateR0 = Storage('StateR0', StateR0, variables)
self.StateR1 = Storage('StateR1', StateR1, variables)
self.MsgType1 = Storage('MsgType1', MsgType1, variables)
self.MsgType2 = Storage('MsgType2', MsgType2, variables)
self.MsgType3 = Storage('MsgType3', MsgType3, variables)
self.MsgType4 = Storage('MsgType4', MsgType4, variables)
self.Data = Storage('Data', Data, variables)
self.variables = {}
def _wait_for_message(self, what, addrL):
addrL = self.addrL.resolve(addrL)
msg_class = what.spec
print(f'Wait for {msg_class.__name__} on {addrL}')
# XXX increase this for testing data messages with higher MTU?
data, address = addrL.socket.recvfrom(4096)
addrR = self.addrR.add(*address)
msg = msg_class.from_bytes(data)
what.add_object(msg)
return msg, addrR
def _send_message(self, what, msg, addrR, addrL):
msg = what.resolve(msg)
addrR = self.addrR.resolve(addrR)
addrL = self.addrL.resolve(addrL)
addrL.socket.sendto(bytes(msg), addrR.address)
def set_local(self, host, port):
return self.addrL.add(host, port)
def set_remote(self, host, port):
return self.addrR.add(host, port)
def noise_init(self, SpubR=None, EprivI=None, SprivI=None, time=None, psk=None):
return self.StateI0.add(SpubR, EprivI, SprivI, time, psk)
def noise_resp(self, EprivR=None, SprivR=None, psk=None):
return self.StateR0.add(EprivR, SprivR, psk)
def make_init(self, sender=None, StateI0=None):
sender = to_bytes(sender, 4, 'little')
StateI0 = self.StateI0.resolve(StateI0)
return self.MsgType1.add(sender, StateI0.EpubI.pub, StateI0.enc_SpubI,
StateI0.enc_time, SpubR=StateI0.SpubR.pub)
def send_init(self, MsgType1=None, addrR=None, addrL=None):
self._send_message(self.MsgType1, MsgType1, addrR, addrL)
def wait_for_init(self, addrL=None):
return self._wait_for_message(self.MsgType1, addrL)
def process_init(self, MsgType1=None, StateR0=None):
MsgType1 = self.MsgType1.resolve(MsgType1)
StateR0 = self.StateR0.resolve(StateR0)
return self.StateR1.add(StateR0, MsgType1.EpubI, MsgType1.enc_SpubI,
MsgType1.enc_time)
def make_resp(self, MsgType1=None, sender=None, StateR1=None):
MsgType1 = self.MsgType1.resolve(MsgType1)
receiver = MsgType1.sender
sender = to_bytes(sender, 4, 'little')
StateR1 = self.StateR1.resolve(StateR1)
return self.MsgType2.add(sender, receiver, StateR1.EpubR.pub,
StateR1.enc_empty,
SpubI=StateR1.SpubI.pub)
def send_resp(self, MsgType2=None, addrR=None, addrL=None):
self._send_message(self.MsgType2, MsgType2, addrR, addrL)
def wait_for_resp(self, addrL=None):
return self._wait_for_message(self.MsgType2, addrL)
def process_resp(self, MsgType2=None, StateI0=None):
MsgType2 = self.MsgType2.resolve(MsgType2)
StateI0 = self.StateI0.resolve(StateI0)
return self.StateI1.add(StateI0, MsgType2.EpubR)
def _make_data(self, receiver=None, counter=None, Tsend=None, data=None):
receiver = to_bytes(receiver, 4, 'little')
counter = to_bytes(counter, 8, 'little')
assert len(Tsend) == 32
data = data or b''
nonce = int.from_bytes(counter, 'little')
enc_data = aead_encrypt(Tsend, nonce, data, b'')
return self.MsgType4.add(receiver, counter, enc_data)
def make_data_as_init(self, receiver=None, counter=None, TsendI=None, data=None):
StateI1 = self.StateI1.resolve(TsendI)
return self._make_data(receiver, counter, StateI1.Tsend, data)
def make_data_as_resp(self, receiver=None, counter=None, TsendR=None, data=None):
StateR1 = self.StateR1.resolve(TsendR)
return self._make_data(receiver, counter, StateR1.Tsend, data)
def send_data(self, MsgType4=None, addrR=None, addrL=None):
self._send_message(self.MsgType4, MsgType4, addrR, addrL)
def wait_for_data(self, addrL=None):
return self._wait_for_message(self.MsgType4, addrL)
def _process_data(self, MsgType4=None, Trecv=None):
assert len(Trecv) == 32
MsgType4 = self.MsgType4.resolve(MsgType4)
nonce = int.from_bytes(MsgType4.counter, 'little')
data = aead_decrypt(Trecv, nonce, MsgType4.enc_payload, b'')
return self.Data.add(data)
def process_data_as_init(self, MsgType4=None, TrecvI=None):
StateI1 = self.StateI1.resolve(TrecvI)
return self._process_data(MsgType4, StateI1.Trecv)
def process_data_as_resp(self, MsgType4=None, TrecvR=None):
StateR1 = self.StateR1.resolve(TrecvR)
return self._process_data(MsgType4, StateR1.Trecv)
| 34.557769
| 85
| 0.606006
| 16,033
| 0.924199
| 0
| 0
| 1,069
| 0.061621
| 0
| 0
| 2,407
| 0.138748
|
b7b19fe3229937c45439013a0185385ffb6134b0
| 2,007
|
py
|
Python
|
BAMF_Detect/modules/dendroid.py
|
bwall/bamfdetect
|
3b0b96a8b2285a1a4b3e3cf5ed1b5ad422b91ed1
|
[
"MIT"
] | 152
|
2015-02-04T16:34:53.000Z
|
2021-07-27T19:00:40.000Z
|
BAMF_Detect/modules/dendroid.py
|
bwall/bamfdetect
|
3b0b96a8b2285a1a4b3e3cf5ed1b5ad422b91ed1
|
[
"MIT"
] | null | null | null |
BAMF_Detect/modules/dendroid.py
|
bwall/bamfdetect
|
3b0b96a8b2285a1a4b3e3cf5ed1b5ad422b91ed1
|
[
"MIT"
] | 30
|
2015-03-31T10:20:32.000Z
|
2022-02-09T16:17:04.000Z
|
from common import Modules, data_strings, load_yara_rules, AndroidParseModule, ModuleMetadata
from base64 import b64decode
from string import printable
class dendroid(AndroidParseModule):
def __init__(self):
md = ModuleMetadata(
module_name="dendroid",
bot_name="Dendroid",
description="Android RAT",
authors=["Brian Wallace (@botnet_hunter)"],
version="1.0.0",
date="August 18, 2014",
references=[]
)
AndroidParseModule.__init__(self, md)
self.yara_rules = None
pass
def _generate_yara_rules(self):
if self.yara_rules is None:
self.yara_rules = load_yara_rules("dendroid.yara")
return self.yara_rules
def get_bot_information(self, file_data):
results = {}
uri = None
password = None
for s in data_strings(file_data, charset="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwx yz0123456789+/="):
try:
line = b64decode(s)
if len(line) == 0:
continue
valid = True
for c in line:
if c not in printable:
valid = False
if not valid:
continue
if line.lower().startswith("https://") or line.lower().startswith("http://"):
uri = line
continue
if uri is not None:
password = line
break
except TypeError:
continue
if uri is not None:
results["c2_uri"] = uri
if password is not None:
try:
password.decode("utf8")
results["password"] = password
except UnicodeDecodeError:
results["password"] = "h" + password.encode("hex")
return results
Modules.list.append(dendroid())
| 33.45
| 119
| 0.521176
| 1,819
| 0.906328
| 0
| 0
| 0
| 0
| 0
| 0
| 233
| 0.116094
|
b7b264987ed766841bcdbbf47f7266aa100f16c2
| 8,942
|
py
|
Python
|
corehq/ex-submodules/couchforms/tests/test_analytics.py
|
caktus/commcare-hq
|
fae9c0f792b4889b768252d3895d9d1884ce0d3b
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/ex-submodules/couchforms/tests/test_analytics.py
|
caktus/commcare-hq
|
fae9c0f792b4889b768252d3895d9d1884ce0d3b
|
[
"BSD-3-Clause"
] | 40
|
2021-07-20T15:55:08.000Z
|
2022-03-25T17:21:36.000Z
|
corehq/ex-submodules/couchforms/tests/test_analytics.py
|
caktus/commcare-hq
|
fae9c0f792b4889b768252d3895d9d1884ce0d3b
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import uuid
from django.test import TestCase
from mock import patch
from requests import ConnectionError
from couchforms.analytics import (
app_has_been_submitted_to_in_last_30_days,
domain_has_submission_in_last_30_days,
get_all_xmlns_app_id_pairs_submitted_to_in_domain,
get_exports_by_form,
get_first_form_submission_received,
get_form_analytics_metadata,
get_last_form_submission_received,
get_number_of_forms_in_domain,
update_analytics_indexes,
)
from couchforms.models import XFormInstance, XFormError
from pillowtop.es_utils import initialize_index_and_mapping
from testapps.test_pillowtop.utils import process_pillow_changes
from corehq.apps.es.tests.utils import es_test
from corehq.elastic import get_es_new, send_to_elasticsearch
from corehq.form_processor.interfaces.processor import FormProcessorInterface
from corehq.form_processor.tests.utils import FormProcessorTestUtils
from corehq.form_processor.utils import TestFormMetadata
from corehq.pillows.mappings.xform_mapping import XFORM_INDEX_INFO
from corehq.util.elastic import ensure_index_deleted
from corehq.util.test_utils import (
DocTestMixin,
disable_quickcache,
get_form_ready_to_save,
trap_extra_setup,
)
@es_test
@disable_quickcache
class ExportsFormsAnalyticsTest(TestCase, DocTestMixin):
maxDiff = None
@classmethod
def setUpClass(cls):
super(ExportsFormsAnalyticsTest, cls).setUpClass()
from casexml.apps.case.tests.util import delete_all_xforms
from corehq.apps.app_manager.models import Application, Module, Form
delete_all_xforms()
with trap_extra_setup(ConnectionError, msg="cannot connect to elasicsearch"):
cls.es = get_es_new()
initialize_index_and_mapping(cls.es, XFORM_INDEX_INFO)
cls.domain = 'exports_forms_analytics_domain'
cls.app_id_1 = 'a' + uuid.uuid4().hex
cls.app_id_2 = 'b' + uuid.uuid4().hex
cls.xmlns_1 = 'my://crazy.xmlns/'
cls.xmlns_2 = 'my://crazy.xmlns/app'
cls.apps = [
Application(_id=cls.app_id_2, domain=cls.domain,
modules=[Module(forms=[Form(xmlns=cls.xmlns_2)])])
]
for app in cls.apps:
app.save()
cls.forms = [
XFormInstance(domain=cls.domain,
app_id=cls.app_id_1, xmlns=cls.xmlns_1),
XFormInstance(domain=cls.domain,
app_id=cls.app_id_1, xmlns=cls.xmlns_1),
XFormInstance(domain=cls.domain,
app_id=cls.app_id_2, xmlns=cls.xmlns_2),
]
cls.error_forms = [XFormError(domain=cls.domain)]
cls.all_forms = cls.forms + cls.error_forms
for form in cls.all_forms:
form.save()
send_to_elasticsearch('forms', form.to_json())
cls.es.indices.refresh(XFORM_INDEX_INFO.index)
update_analytics_indexes()
@classmethod
def tearDownClass(cls):
for form in cls.all_forms:
form.delete()
for app in cls.apps:
app.delete()
ensure_index_deleted(XFORM_INDEX_INFO.index)
super(ExportsFormsAnalyticsTest, cls).tearDownClass()
def test_get_form_analytics_metadata__no_match(self):
self.assertIsNone(
get_form_analytics_metadata(self.domain, self.app_id_1, self.xmlns_2))
def test_get_form_analytics_metadata__no_app(self):
self.assertEqual(
get_form_analytics_metadata(self.domain, self.app_id_1, self.xmlns_1),
{'submissions': 2, 'xmlns': 'my://crazy.xmlns/'}
)
def test_get_form_analytics_metadata__app(self):
self.assertEqual(get_form_analytics_metadata(self.domain, self.app_id_2, self.xmlns_2), {
'app': {'id': self.app_id_2, 'langs': [], 'name': None},
'app_deleted': False,
'form': {'id': 0, 'name': {}},
'module': {'id': 0, 'name': {}},
'submissions': 1,
'xmlns': 'my://crazy.xmlns/app'
})
def test_get_exports_by_form(self):
self.assertEqual(get_exports_by_form(self.domain), [{
'value': {'xmlns': 'my://crazy.xmlns/', 'submissions': 2},
'key': ['exports_forms_analytics_domain', self.app_id_1,
'my://crazy.xmlns/']
}, {
'value': {
'xmlns': 'my://crazy.xmlns/app',
'form': {'name': {}, 'id': 0},
'app': {'langs': [], 'name': None, 'id': self.app_id_2},
'module': {'name': {}, 'id': 0},
'app_deleted': False, 'submissions': 1},
'key': ['exports_forms_analytics_domain', self.app_id_2,
'my://crazy.xmlns/app']
}])
TEST_ES_META = {
XFORM_INDEX_INFO.index: XFORM_INDEX_INFO
}
@disable_quickcache
class CouchformsESAnalyticsTest(TestCase):
domain = 'hqadmin-es-accessor'
@classmethod
def setUpClass(cls):
super(CouchformsESAnalyticsTest, cls).setUpClass()
@patch('couchforms.analytics.FormES.index', XFORM_INDEX_INFO.index)
@patch('corehq.apps.es.es_query.ES_META', TEST_ES_META)
@patch('corehq.elastic.ES_META', TEST_ES_META)
def create_form_and_sync_to_es(received_on):
with process_pillow_changes('xform-pillow', {'skip_ucr': True}):
with process_pillow_changes('DefaultChangeFeedPillow'):
metadata = TestFormMetadata(domain=cls.domain, app_id=cls.app_id,
xmlns=cls.xmlns, received_on=received_on)
form = get_form_ready_to_save(metadata, is_db_test=True)
form_processor = FormProcessorInterface(domain=cls.domain)
form_processor.save_processed_models([form])
return form
from casexml.apps.case.tests.util import delete_all_xforms
delete_all_xforms()
cls.now = datetime.datetime.utcnow()
cls._60_days = datetime.timedelta(days=60)
cls.domain = 'my_crazy_analytics_domain'
cls.app_id = uuid.uuid4().hex
cls.xmlns = 'my://crazy.xmlns/'
with trap_extra_setup(ConnectionError):
cls.elasticsearch = get_es_new()
initialize_index_and_mapping(cls.elasticsearch, XFORM_INDEX_INFO)
cls.forms = [create_form_and_sync_to_es(cls.now), create_form_and_sync_to_es(cls.now - cls._60_days)]
cls.elasticsearch.indices.refresh(XFORM_INDEX_INFO.index)
@classmethod
def tearDownClass(cls):
ensure_index_deleted(XFORM_INDEX_INFO.index)
FormProcessorTestUtils.delete_all_cases_forms_ledgers(cls.domain)
super(CouchformsESAnalyticsTest, cls).tearDownClass()
@patch('couchforms.analytics.FormES.index', XFORM_INDEX_INFO.index)
@patch('corehq.apps.es.es_query.ES_META', TEST_ES_META)
@patch('corehq.elastic.ES_META', TEST_ES_META)
def test_get_number_of_cases_in_domain(self):
self.assertEqual(
get_number_of_forms_in_domain(self.domain),
len(self.forms)
)
@patch('couchforms.analytics.FormES.index', XFORM_INDEX_INFO.index)
@patch('corehq.apps.es.es_query.ES_META', TEST_ES_META)
@patch('corehq.elastic.ES_META', TEST_ES_META)
def test_domain_has_submission_in_last_30_days(self):
self.assertEqual(
domain_has_submission_in_last_30_days(self.domain), True)
@patch('couchforms.analytics.FormES.index', XFORM_INDEX_INFO.index)
@patch('corehq.apps.es.es_query.ES_META', TEST_ES_META)
@patch('corehq.elastic.ES_META', TEST_ES_META)
def test_get_first_form_submission_received(self):
self.assertEqual(
get_first_form_submission_received(self.domain),
self.now - self._60_days)
@patch('couchforms.analytics.FormES.index', XFORM_INDEX_INFO.index)
@patch('corehq.apps.es.es_query.ES_META', TEST_ES_META)
@patch('corehq.elastic.ES_META', TEST_ES_META)
def test_get_last_form_submission_received(self):
self.assertEqual(
get_last_form_submission_received(self.domain), self.now)
@patch('couchforms.analytics.FormES.index', XFORM_INDEX_INFO.index)
@patch('corehq.apps.es.es_query.ES_META', TEST_ES_META)
@patch('corehq.elastic.ES_META', TEST_ES_META)
def test_app_has_been_submitted_to_in_last_30_days(self):
self.assertEqual(
app_has_been_submitted_to_in_last_30_days(self.domain, self.app_id),
True)
@patch('couchforms.analytics.FormES.index', XFORM_INDEX_INFO.index)
@patch('corehq.apps.es.es_query.ES_META', TEST_ES_META)
@patch('corehq.elastic.ES_META', TEST_ES_META)
def test_get_all_xmlns_app_id_pairs_submitted_to_in_domain(self):
self.assertEqual(
get_all_xmlns_app_id_pairs_submitted_to_in_domain(self.domain),
{(self.xmlns, self.app_id)})
| 40.645455
| 113
| 0.674122
| 7,569
| 0.846455
| 0
| 0
| 7,618
| 0.851935
| 0
| 0
| 1,307
| 0.146164
|
b7b2bf3009f886fb399b6a73378d628699d8010a
| 462
|
py
|
Python
|
src/pythae/models/svae/svae_config.py
|
eknag/benchmark_VAE
|
8b727f29a68aff7771c4c97aff15f75f88320e1f
|
[
"Apache-2.0"
] | 1
|
2022-03-20T20:23:59.000Z
|
2022-03-20T20:23:59.000Z
|
src/pythae/models/svae/svae_config.py
|
eknag/benchmark_VAE
|
8b727f29a68aff7771c4c97aff15f75f88320e1f
|
[
"Apache-2.0"
] | null | null | null |
src/pythae/models/svae/svae_config.py
|
eknag/benchmark_VAE
|
8b727f29a68aff7771c4c97aff15f75f88320e1f
|
[
"Apache-2.0"
] | null | null | null |
from pydantic.dataclasses import dataclass
from ...models import VAEConfig
@dataclass
class SVAEConfig(VAEConfig):
r"""
:math:`\mathcal{S}`-VAE model config config class
Parameters:
input_dim (int): The input_data dimension
latent_dim (int): The latent space dimension in which lives the hypersphere. Default: None.
reconstruction_loss (str): The reconstruction loss to use ['bce', 'mse']. Default: 'mse'
"""
pass
| 27.176471
| 99
| 0.690476
| 372
| 0.805195
| 0
| 0
| 383
| 0.829004
| 0
| 0
| 330
| 0.714286
|
b7b3ac20c4a0978bc24a666dd7cd9ac25c00bc3a
| 7,486
|
py
|
Python
|
amck/imat/download_data.py
|
aaronmckinstry706/imaterialist
|
ebed3f3bd9e231f0852aa30c7bbfafe94d5dbce1
|
[
"MIT"
] | null | null | null |
amck/imat/download_data.py
|
aaronmckinstry706/imaterialist
|
ebed3f3bd9e231f0852aa30c7bbfafe94d5dbce1
|
[
"MIT"
] | null | null | null |
amck/imat/download_data.py
|
aaronmckinstry706/imaterialist
|
ebed3f3bd9e231f0852aa30c7bbfafe94d5dbce1
|
[
"MIT"
] | null | null | null |
# Parts of code taken from https://www.kaggle.com/aloisiodn/python-3-download-multi-proc-prog-bar-resume by Dourado.
# Improvements on the original script:
# * you can choose which dataset to download;
# * uses threads instead of processes;
# * unpacks data into .../label/id.jpg directory structure, which can be used easily via classes in PyTorch;
# * performance-relevant parameters are command line arguments.
# For performance parameters, the recommended values (from my machine; probably requires tweaking for others) are 100
# connection pools, 128 threads. Not all images with working URLs will be retrieved, but about 90-95% of them will. As
# a consequence, to ensure that nearly all images have been downloaded, repeat the script 3-4 times.
import argparse
import io
import json
import logging
import multiprocessing.pool as pool
import pathlib
import random
import sys
import typing
import urllib3
import PIL.Image as Image
from tqdm import tqdm
# Get command line arguments.
arg_parser = argparse.ArgumentParser(
description='Downloads the data files using the links given in the JSON training, validation, and test files. '
'Assumes that the files are stored in the directory data/metadata (relative to the current working '
'directory). Training files will be written to data/training/label_id/image_id.jpg, validation files '
'will be written to data/validation/label_id/image_id.jpg, and test files will be written to '
'data/testing/image_id.jpg.')
arg_parser.add_argument(
'--num-pools', '-p', type=int, default=10, help='Number of connection pools to cache at one time.')
arg_parser.add_argument(
'--num-workers', '-w', type=int, default=8, help='Number of threads to perform downloads.')
arg_parser.add_argument(
'--verbose', '-v', action='count', help='Print additional output messages. Can be passed multiple times. Once '
'prints additional status information, and two or more times prints '
'debugging information.', default=0)
arg_parser.add_argument(
'--limit', '-l', type=int, default=sys.maxsize, help='Maximum number of files to download before stopping.')
arg_parser.add_argument(
'--re-download', action='store_true', default=False, help='Whether to re-download existing files.')
arg_parser.add_argument(
'--dataset', '-d', type=str, choices={'training', 'validation', 'testing'}, help='Which dataset to download.')
parsed_args = arg_parser.parse_args()
# Set up logging.
urllib3.disable_warnings()
LOGGER = logging.getLogger(__name__)
STDOUT_HANDLER = logging.StreamHandler(sys.stdout)
if parsed_args.verbose == 1:
STDOUT_HANDLER.setLevel(logging.INFO)
elif parsed_args.verbose >= 2:
STDOUT_HANDLER.setLevel(logging.DEBUG)
LOGGER.addHandler(STDOUT_HANDLER)
LOGGER.setLevel(logging.DEBUG)
# Initialize globals.
failed_downloads = []
http = urllib3.PoolManager(num_pools=parsed_args.num_pools)
def download_image(url: str, filepath: pathlib.Path):
global parsed_args
global http
file_exists = filepath.exists()
if parsed_args.re_download and file_exists:
filepath.unlink()
elif not parsed_args.re_download and file_exists:
return
response = http.request('GET', url, timeout=urllib3.Timeout(10))
image_data = response.data
pil_image = Image.open(io.BytesIO(image_data))
pil_image_rgb = pil_image.convert('RGB')
pil_image_rgb.save(str(filepath), format='JPEG', quality=90)
def download_labeled_image(info: typing.Tuple[str, int, int, pathlib.Path]):
global failed_downloads
url: str = info[0]
image_id: int = info[1]
label_id: int = info[2]
base_dir: pathlib.Path = info[3]
label_dir = base_dir.joinpath(str(label_id))
filepath = label_dir.joinpath(str(image_id) + '.jpg')
label_dir.mkdir(parents=True, exist_ok=True)
try:
download_image(url, filepath)
except Exception as e:
failed_downloads.append((image_id, str(e)))
def download_unlabeled_image(info: typing.Tuple[str, int, pathlib.Path]):
global failed_downloads
url: str = info[0]
image_id: int = info[1]
base_dir: pathlib.Path = info[2]
label_dir = base_dir.joinpath('dummy-class')
filepath = label_dir.joinpath(str(image_id) + '.jpg')
label_dir.mkdir(parents=True, exist_ok=True)
try:
download_image(url, filepath)
except Exception as e:
failed_downloads.append((image_id, str(e)))
training_base_dir = pathlib.Path('data/training')
validation_base_dir = pathlib.Path('data/validation')
testing_base_dir = pathlib.Path('data/testing')
metadata_base_dir = pathlib.Path('data/metadata')
with metadata_base_dir.joinpath('train.json').open('r') as training_urls_file:
training_urls_json = json.load(training_urls_file)
with metadata_base_dir.joinpath('validation.json').open('r') as validation_urls_file:
validation_urls_json = json.load(validation_urls_file)
with metadata_base_dir.joinpath('test.json').open('r') as testing_urls_file:
testing_urls_json = json.load(testing_urls_file)
num_training_images = len(training_urls_json['images'])
num_validation_images = len(validation_urls_json['images'])
num_testing_images = len(testing_urls_json['images'])
LOGGER.info('{} training images, {} validation images, and {} testing images.'.format(
num_training_images, num_validation_images, num_testing_images))
thread_pool = pool.ThreadPool(processes=parsed_args.num_workers)
if parsed_args.dataset == 'training':
training_image_info = []
for image_info, annotation_info in zip(training_urls_json['images'], training_urls_json['annotations']):
training_image_info.append((image_info['url'][0], image_info['image_id'], annotation_info['label_id'],
training_base_dir))
random.shuffle(training_image_info)
with tqdm(total=len(training_image_info), desc='Training images') as t:
for i, _ in enumerate(thread_pool.imap_unordered(download_labeled_image, training_image_info)):
t.update(1)
if i >= parsed_args.limit:
break
elif parsed_args.dataset == 'validation':
validation_image_info = []
for image_info, annotation_info in zip(validation_urls_json['images'], validation_urls_json['annotations']):
validation_image_info.append((image_info['url'][0], image_info['image_id'], annotation_info['label_id'],
validation_base_dir))
random.shuffle(validation_image_info)
with tqdm(total=len(validation_image_info), desc='Validation images') as t:
for i, _ in enumerate(thread_pool.imap_unordered(download_labeled_image, validation_image_info)):
t.update(1)
if i >= parsed_args.limit:
break
elif parsed_args.dataset == 'testing':
testing_image_info = []
for image_info in testing_urls_json['images']:
testing_image_info.append((image_info['url'][0], image_info['image_id'], testing_base_dir))
random.shuffle(testing_image_info)
with tqdm(total=len(testing_image_info), desc='Testing images') as t:
for i, _ in enumerate(thread_pool.imap_unordered(download_unlabeled_image, testing_image_info)):
t.update(1)
if i >= parsed_args.limit:
break
LOGGER.info('{} images could not be retrieved.'.format(len(failed_downloads)))
| 41.821229
| 118
| 0.716137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,236
| 0.298691
|
b7b473cc7728c8bc405a3042f957fe877f0c27c2
| 790
|
py
|
Python
|
JTP Recap./2.Program_IO/function.py
|
SNP0301/Study_Python
|
bb7ce7a0d915131c281152e7faee5b57c290df6b
|
[
"MIT"
] | null | null | null |
JTP Recap./2.Program_IO/function.py
|
SNP0301/Study_Python
|
bb7ce7a0d915131c281152e7faee5b57c290df6b
|
[
"MIT"
] | null | null | null |
JTP Recap./2.Program_IO/function.py
|
SNP0301/Study_Python
|
bb7ce7a0d915131c281152e7faee5b57c290df6b
|
[
"MIT"
] | null | null | null |
"""
Function
def function_name(arg1, arg2, ...) :
<op 1>
<op 2>
...
Function with undefined amount of input
def fn_name(*args) --> args' elements make tuple.
kwargs = Keyword Parameter
>>> def print_kwargs(**kwargs):
... print(kwargs)
...
>>> print_kwargs(a=1)
{'a':1}
>>> print_kwargs(name='foo', age=3)
{'age':3, 'name':'foo'}
**args_name = make args_name as a dictionary
clearing & assignment : element should be added in the last part of args
Lambda : another method to make fn
lambda arg1, arg2, .. : operation_of_fn
>>> add = lambda a,b : a+b
>>> result = add(3,4)
>>> print(result)
7
lambda can return result with out expression 'return'
Contents Source : https://wikidocs.net/24
"""
| 22.571429
| 72
| 0.596203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 790
| 1
|
b7b56be6d7b0d5e174db26b106b5f7914f624c31
| 2,437
|
py
|
Python
|
tweet/common.py
|
skiwheelr/URS
|
45746c945af2ea62a5ddb675c6a8e4e2fe4b5a93
|
[
"MIT"
] | 4
|
2021-02-15T02:02:38.000Z
|
2022-03-04T16:38:44.000Z
|
tweet/common.py
|
skiwheelr/URS
|
45746c945af2ea62a5ddb675c6a8e4e2fe4b5a93
|
[
"MIT"
] | null | null | null |
tweet/common.py
|
skiwheelr/URS
|
45746c945af2ea62a5ddb675c6a8e4e2fe4b5a93
|
[
"MIT"
] | 2
|
2021-02-11T01:10:27.000Z
|
2021-02-25T19:20:04.000Z
|
all
import tweepy, config, users, re, groupy
from tweepy import OAuthHandler
from tweepy import API
print(tweepy.__version__)
auth = OAuthHandler(config.consumer_key, config.consumer_secret)
auth.set_access_token(config.access_token,config.access_token_secret)
api = tweepy.API(auth)
from groupy.client import Client
client = Client.from_token(config.groupme_token)
def messenger(tickr):
for group in client.groups.list():
if group.name=="COMMonMENTions":
# print(group.name)
# msg ="Mentioned by pharmdca and mrzackmorris: "+ str(tickr)
message = group.post(text="(<50 Tweets) Mentioned by @ripster47, @pharmdca and @mrzackmorris: "+ str(tickr))
exp = r'\$([A-Z]{3,4})'
one = []
two = []
three = []
all = []
#mrzackmorris
for user in users.list[:1]:
userID = user
tweets = api.user_timeline(screen_name=userID,count=100, include_rts = False, tweet_mode='extended')
for info in tweets:
if re.findall(exp,info.full_text):
for ticker in re.findall(exp,info.full_text):
if ticker not in one:
one.append(ticker)
# print(user, " mentioned ", re.findall(exp,info.full_text))
print(user, "mentioned", one)
#pharmdca
for user in users.list[1:2]:
userID = user
tweets = api.user_timeline(screen_name=userID,count=100, include_rts = False, tweet_mode='extended')
for info in tweets:
if re.findall(exp,info.full_text):
for ticker in re.findall(exp,info.full_text):
if ticker not in two:
two.append(ticker)
# print(user, " mentioned ", re.findall(exp,info.full_text))
print(user, "mentioned", two)
#ripster47
for user in users.list[2:3]:
userID = user
tweets = api.user_timeline(screen_name=userID,count=100, include_rts = False, tweet_mode='extended')
for info in tweets:
if re.findall(exp,info.full_text):
for ticker in re.findall(exp,info.full_text):
if ticker not in three:
three.append(ticker)
# print(user, " mentioned ", re.findall(exp,info.full_text))
print(user, "mentioned", three)
a_set = set(one)
b_set = set(two)
c_set = set(three)
if (a_set & b_set & c_set):
all.append(a_set & b_set & c_set)
print("All 3 mentioned ", all)
messenger(all)
else: print("Nothing Notable")
| 33.847222
| 120
| 0.636849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 492
| 0.201888
|
b7b6119291ad7a11b8a2999460f70e3f10db60c8
| 8,486
|
py
|
Python
|
crystalpy/examples/PlotData1D.py
|
oasys-kit/crystalpy
|
4852851c622ef914844178577dea4075e92cd8dd
|
[
"MIT"
] | null | null | null |
crystalpy/examples/PlotData1D.py
|
oasys-kit/crystalpy
|
4852851c622ef914844178577dea4075e92cd8dd
|
[
"MIT"
] | null | null | null |
crystalpy/examples/PlotData1D.py
|
oasys-kit/crystalpy
|
4852851c622ef914844178577dea4075e92cd8dd
|
[
"MIT"
] | null | null | null |
"""
---OK---
"""
from collections import OrderedDict
import copy
import numpy as np
from crystalpy.examples.Values import Interval
class PlotData1D(object):
"""
Represents a 1D plot. The graph data together with related information.
"""
def __init__(self, title, title_x_axis, title_y_axis):
"""
Constructor.
:param title: Plot title.
:param title_x_axis: X axis' title.
:param title_y_axis: Y axis' title.
"""
# Set titles.
self.title = title
self.title_x_axis = title_x_axis
self.title_y_axis = title_y_axis
# Initialize X and Y ranges.
self.x_min = None
self.x_max = None
self.y_min = None
self.y_max = None
# Initialize X and Y data.
self.x = None
self.y = None
# Initialize plot information to empty ordered dictionary.
self._plot_info = OrderedDict()
def set_x_min(self, x_min):
"""
Sets x range minimum.
:param x_min: X range minimum.
"""
self.x_min = x_min
def set_x_max(self, x_max):
"""
Sets X range maximum.
:param x_max: X range maximum.
"""
self.x_max = x_max
def set_y_min(self, y_min):
"""
Sets Y range minimum.
:param y_min: Y range minimum.
"""
self.y_min = y_min
def set_y_max(self, y_max):
"""
Sets Y range maximum.
:param y_max: Y range maximum.
"""
self.y_max = y_max
def set_x(self, x):
"""
Sets X data.
:param x: x data.
"""
self.x = x
def set_y(self, y):
"""
Sets Y data.
:param y: y data.
"""
self.y = y
def _set_interval_to_zero(self, indices, lower=True, upper=True):
"""
Sets the y's to zero in certain intervals of x's (extrema included).
:param indices: pair with the two extrema of the x interval.
:param lower: if True include the lower end of the interval.
:param upper: if True include the upper end of the interval.
"""
try:
inf_index = indices.inf
sup_index = indices.sup
# adjust the indices according to the lower and upper parameters.
if not lower:
inf_index += 1
if not upper:
sup_index -= 1
# in the index range defined by inf_index and sup_index, set the y's to zero.
for i in range(inf_index, sup_index + 1):
self.y[i] = 0
except TypeError:
print("\nERROR: could not set the values to zero in the specified intervals.\n")
def _unwrap_interval(self, indices, deg, lower=True, upper=True):
"""
Unwraps the y data vector in a certain interval.
:param indices: indices determining the interval to unwrap.
:param deg: True if values are in degrees. False if radians.
:param lower: if True include the lower end of the interval.
:param upper: if True include the upper end of the interval.
"""
inf_index = indices.inf
sup_index = indices.sup
# adjust the indices according to the lower and upper parameters.
if not lower:
inf_index += 1
if not upper:
sup_index -= 1
# numpy.unwrap works on data in radians, so if the data is in degrees, it needs to be converted.
if deg:
self.y = np.deg2rad(self.y)
# cut out the part to unwrap and then stitch it back on.
temp = self.y[inf_index:sup_index + 1]
self.y[inf_index:sup_index + 1] = np.unwrap(temp)
# convert back to degrees.
self.y = np.rad2deg(self.y)
return
# cut out the part to unwrap and then stitch it back on.
temp = self.y[inf_index:sup_index + 1]
self.y[inf_index:sup_index + 1] = np.unwrap(temp)
def _optimize_interval(self, indices, phase_limits):
"""
Takes an interval and restricts it so that the extrema match the points where the phase
becomes bigger(smaller) than some upper(lower) limit.
:param indices: indices corresponding to the interval to be optimized.
:param phase_limits: the limits of the phase to be used for the optimization, [min, max].
:return: indices of the optimized interval.
"""
inf = indices.inf
sup = indices.sup
# check the intervals.
if (self.y[inf] > phase_limits[1] or
self.y[inf] < phase_limits[0]):
print("\nERROR in PlotData1D._optimize_interval: First value in the interval exceeds limitations.")
return indices
if (self.y[sup] > phase_limits[1] or
self.y[sup] < phase_limits[0]):
print("\nERROR in PlotData1D._optimize_interval: Last value in the interval exceeds limitations.")
return indices
# starting from the lower end.
i = inf # counter initialization.
while phase_limits[0] < self.y[i] < phase_limits[1]:
i += 1
# if the conditions are not satisfied for index i:
new_inf = i - 1
# starting from the upper end.
i = sup # counter initialization.
while phase_limits[0] < self.y[i] < phase_limits[1]:
i -= 1
# if the conditions are not satisfied for index i:
new_sup = i + 1
new_indices = Interval(new_inf, new_sup)
# check that the inf is smaller than (or equal to) the sup.
if not new_indices.check_extrema():
print("\nERROR in PlotData1D._optimize_interval: The phase might be undersampled.")
return indices
return new_indices
def smart_unwrap(self, intervals, intervals_number, phase_limits, deg):
"""
Unwraps data correctly by avoiding discontinuities.
:param intervals: list of pairs. Each element is a pair with the two extrema of the x interval.
:param phase_limits: min and max tolerable values for the phase plot, [min, max].
:param intervals_number: number of intervals to set to zero.
:param deg: True if values are in degrees. False if radians.
"""
if intervals_number == 0:
if deg:
self.y = np.deg2rad(self.y) # unwrap works with radians.
self.y = np.unwrap(self.y)
self.y = np.rad2deg(self.y) # convert back to degrees.
return
self.y = np.unwrap(self.y)
return
# transform self.x into a numpy.ndarray object.
x = np.asarray(self.x)
# careful! only works with monotonic sequences.
temp_index = x.argmin()
for interval in intervals:
inf = interval.inf
sup = interval.sup
# find the indices of the y array corresponding to inf and sup.
inf_index = abs(x - inf).argmin()
sup_index = abs(x - sup).argmin()
# optimize the interval.
indices = Interval(inf_index, sup_index)
new_indices = self._optimize_interval(indices, phase_limits)
# unwrap the data before the interval.
indices_to_unwrap = Interval(temp_index, new_indices.inf)
self._unwrap_interval(indices_to_unwrap, deg, lower=True, upper=False)
# set the interval to zero.
indices_to_set = new_indices
self._set_interval_to_zero(indices_to_set, lower=True, upper=False)
temp_index = new_indices.sup
# careful! only works with monotonic sequences.
indices_to_unwrap = Interval(temp_index, x.argmax())
self._unwrap_interval(indices_to_unwrap, deg, lower=True, upper=True)
def add_xy_point(self, x_point, y_point):
"""
Adds an x-y point.
:param x_point: x coordinate.
:param y_point: y coordinate.
"""
self.x.append(x_point)
self.y.append(y_point)
def add_plot_info(self, name, info):
"""
Adds a plot info.
:param name: Name of the info.
:param info: The info.
"""
self._plot_info[name] = info
def plot_info(self):
"""
Returns the plot info copy.
:return: The plot info.
"""
return copy.deepcopy(self._plot_info)
| 32.389313
| 111
| 0.582371
| 8,350
| 0.983974
| 0
| 0
| 0
| 0
| 0
| 0
| 4,024
| 0.474193
|
b7b6ad2f30eeeef89f8be32e1b423aba5b40b98c
| 51
|
py
|
Python
|
src/deep_dialog/usersims/__init__.py
|
Yuqing2018/tcbot_python3
|
583ce1b670f7c67669ff437e69eb09832e784da6
|
[
"MIT"
] | null | null | null |
src/deep_dialog/usersims/__init__.py
|
Yuqing2018/tcbot_python3
|
583ce1b670f7c67669ff437e69eb09832e784da6
|
[
"MIT"
] | null | null | null |
src/deep_dialog/usersims/__init__.py
|
Yuqing2018/tcbot_python3
|
583ce1b670f7c67669ff437e69eb09832e784da6
|
[
"MIT"
] | null | null | null |
from .usersim_rule import *
from .realUser import *
| 25.5
| 27
| 0.784314
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b7b6cb22cdbe422b0a500a1606789a7d83af799b
| 38,534
|
py
|
Python
|
hs_file_types/models/geofeature.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | null | null | null |
hs_file_types/models/geofeature.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | null | null | null |
hs_file_types/models/geofeature.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import logging
import shutil
import zipfile
import xmltodict
from lxml import etree
from osgeo import ogr, osr
from django.core.exceptions import ValidationError
from django.db import models, transaction
from django.utils.html import strip_tags
from django.template import Template, Context
from dominate.tags import legend, table, tbody, tr, th, div
from hs_core.models import Title, CoreMetaData
from hs_core.hydroshare import utils
from hs_core.forms import CoverageTemporalForm
from hs_core.signals import post_add_geofeature_aggregation
from hs_geographic_feature_resource.models import GeographicFeatureMetaDataMixin, \
OriginalCoverage, GeometryInformation, FieldInformation
from base import AbstractFileMetaData, AbstractLogicalFile, FileTypeContext
UNKNOWN_STR = "unknown"
class GeoFeatureFileMetaData(GeographicFeatureMetaDataMixin, AbstractFileMetaData):
# the metadata element models are from the geographic feature resource type app
model_app_label = 'hs_geographic_feature_resource'
def get_metadata_elements(self):
elements = super(GeoFeatureFileMetaData, self).get_metadata_elements()
elements += [self.originalcoverage, self.geometryinformation]
elements += list(self.fieldinformations.all())
return elements
@classmethod
def get_metadata_model_classes(cls):
metadata_model_classes = super(GeoFeatureFileMetaData, cls).get_metadata_model_classes()
metadata_model_classes['originalcoverage'] = OriginalCoverage
metadata_model_classes['geometryinformation'] = GeometryInformation
metadata_model_classes['fieldinformation'] = FieldInformation
return metadata_model_classes
def get_html(self):
"""overrides the base class function"""
html_string = super(GeoFeatureFileMetaData, self).get_html()
html_string += self.geometryinformation.get_html()
if self.spatial_coverage:
html_string += self.spatial_coverage.get_html()
if self.originalcoverage:
html_string += self.originalcoverage.get_html()
if self.temporal_coverage:
html_string += self.temporal_coverage.get_html()
html_string += self._get_field_informations_html()
template = Template(html_string)
context = Context({})
return template.render(context)
def _get_field_informations_html(self):
root_div = div(cls="content-block")
with root_div:
legend('Field Information')
with table(style="width: 100%;"):
with tbody():
with tr(cls='row'):
th('Name')
th('Type')
th('Width')
th('Precision')
for field_info in self.fieldinformations.all():
field_info.get_html(pretty=False)
return root_div.render()
def get_html_forms(self, datatset_name_form=True):
"""overrides the base class function to generate html needed for metadata editing"""
root_div = div("{% load crispy_forms_tags %}")
with root_div:
super(GeoFeatureFileMetaData, self).get_html_forms()
with div(cls="content-block"):
div("{% crispy geometry_information_form %}")
with div(cls="content-block"):
div("{% crispy spatial_coverage_form %}")
with div(cls="content-block"):
div("{% crispy original_coverage_form %}")
template = Template(root_div.render())
context_dict = dict()
context_dict["geometry_information_form"] = self.get_geometry_information_form()
update_action = "/hsapi/_internal/GeoFeatureLogicalFile/{0}/{1}/{2}/update-file-metadata/"
create_action = "/hsapi/_internal/GeoFeatureLogicalFile/{0}/{1}/add-file-metadata/"
temp_cov_form = self.get_temporal_coverage_form()
if self.temporal_coverage:
form_action = update_action.format(self.logical_file.id, "coverage",
self.temporal_coverage.id)
temp_cov_form.action = form_action
else:
form_action = create_action.format(self.logical_file.id, "coverage")
temp_cov_form.action = form_action
context_dict["temp_form"] = temp_cov_form
context_dict['original_coverage_form'] = self.get_original_coverage_form()
context_dict['spatial_coverage_form'] = self.get_spatial_coverage_form()
context = Context(context_dict)
rendered_html = template.render(context)
rendered_html += self._get_field_informations_html()
return rendered_html
def get_geometry_information_form(self):
return GeometryInformation.get_html_form(resource=None, element=self.geometryinformation,
file_type=True, allow_edit=False)
def get_original_coverage_form(self):
return OriginalCoverage.get_html_form(resource=None, element=self.originalcoverage,
file_type=True, allow_edit=False)
@classmethod
def validate_element_data(cls, request, element_name):
"""overriding the base class method"""
# the only metadata that we are allowing for editing is the temporal coverage
element_name = element_name.lower()
if element_name != 'coverage' or 'start' not in request.POST:
err_msg = 'Data for temporal coverage is missing'
return {'is_valid': False, 'element_data_dict': None, "errors": err_msg}
element_form = CoverageTemporalForm(data=request.POST)
if element_form.is_valid():
return {'is_valid': True, 'element_data_dict': element_form.cleaned_data}
else:
return {'is_valid': False, 'element_data_dict': None, "errors": element_form.errors}
def get_xml(self, pretty_print=True):
"""Generates ORI+RDF xml for this aggregation metadata"""
# get the xml root element and the xml element to which contains all other elements
RDF_ROOT, container_to_add_to = super(GeoFeatureFileMetaData, self)._get_xml_containers()
if self.geometryinformation:
self.geometryinformation.add_to_xml_container(container_to_add_to)
for fieldinfo in self.fieldinformations.all():
fieldinfo.add_to_xml_container(container_to_add_to)
if self.originalcoverage:
self.originalcoverage.add_to_xml_container(container_to_add_to)
return CoreMetaData.XML_HEADER + '\n' + etree.tostring(RDF_ROOT, encoding='UTF-8',
pretty_print=pretty_print)
class GeoFeatureLogicalFile(AbstractLogicalFile):
metadata = models.OneToOneField(GeoFeatureFileMetaData, related_name="logical_file")
data_type = "GeographicFeature"
@classmethod
def get_allowed_uploaded_file_types(cls):
"""only .zip or .shp file can be set to this logical file group"""
# See Shapefile format:
# http://resources.arcgis.com/en/help/main/10.2/index.html#//005600000003000000
return (".zip", ".shp", ".shx", ".dbf", ".prj",
".sbx", ".sbn", ".cpg", ".xml", ".fbn",
".fbx", ".ain", ".aih", ".atx", ".ixs",
".mxs")
@classmethod
def get_main_file_type(cls):
"""The main file type for this aggregation"""
return ".shp"
@classmethod
def get_allowed_storage_file_types(cls):
"""file types allowed in this logical file group are the followings"""
return [".shp", ".shx", ".dbf", ".prj",
".sbx", ".sbn", ".cpg", ".xml", ".fbn",
".fbx", ".ain", ".aih", ".atx", ".ixs",
".mxs"
]
@classmethod
def create(cls, resource):
"""this custom method MUST be used to create an instance of this class"""
feature_metadata = GeoFeatureFileMetaData.objects.create(keywords=[])
# Note we are not creating the logical file record in DB at this point
# the caller must save this to DB
return cls(metadata=feature_metadata, resource=resource)
@staticmethod
def get_aggregation_display_name():
return 'Geographic Feature Content: The multiple files that are part of a geographic ' \
'shapefile'
@staticmethod
def get_aggregation_type_name():
return "GeographicFeatureAggregation"
# used in discovery faceting to aggregate native and composite content types
@staticmethod
def get_discovery_content_type():
"""Return a human-readable content type for discovery.
This must agree between Composite Types and native types.
"""
return "Geographic Feature (ESRI Shapefiles)"
@property
def supports_resource_file_move(self):
"""resource files that are part of this logical file can't be moved"""
return False
@property
def supports_resource_file_add(self):
"""doesn't allow a resource file to be added"""
return False
@property
def supports_resource_file_rename(self):
"""resource files that are part of this logical file can't be renamed"""
return False
@property
def supports_delete_folder_on_zip(self):
"""does not allow the original folder to be deleted upon zipping of that folder"""
return False
@classmethod
def check_files_for_aggregation_type(cls, files):
"""Checks if the specified files can be used to set this aggregation type
:param files: a list of ResourceFile objects
:return If the files meet the requirements of this aggregation type, then returns this
aggregation class name, otherwise empty string.
"""
if _check_if_shape_files(files, temp_files=False):
return cls.__name__
else:
return ""
@classmethod
def set_file_type(cls, resource, user, file_id=None, folder_path=None):
""" Creates a GeoFeatureLogicalFile (aggregation) from a .shp or a .zip resource file """
log = logging.getLogger()
with FileTypeContext(aggr_cls=cls, user=user, resource=resource, file_id=file_id,
folder_path=folder_path,
post_aggr_signal=post_add_geofeature_aggregation,
is_temp_file=True) as ft_ctx:
res_file = ft_ctx.res_file
try:
meta_dict, shape_files, shp_res_files = extract_metadata_and_files(resource,
res_file)
except ValidationError as ex:
log.exception(ex.message)
raise ex
file_name = res_file.file_name
# file name without the extension
base_file_name = file_name[:-len(res_file.extension)]
xml_file = ''
for f in shape_files:
if f.lower().endswith('.shp.xml'):
xml_file = f
break
file_folder = res_file.file_folder
upload_folder = file_folder
file_type_success = False
res_files_to_delete = []
msg = "GeoFeature aggregation. Error when creating aggregation. Error:{}"
with transaction.atomic():
try:
if res_file.extension.lower() == ".zip":
files_to_upload = shape_files
res_files_for_aggr = []
res_files_to_delete.append(res_file)
else:
files_to_upload = []
res_files_for_aggr = shp_res_files
# create a GeoFeature logical file object
logical_file = cls.create_aggregation(dataset_name=base_file_name,
resource=resource,
res_files=res_files_for_aggr,
new_files_to_upload=files_to_upload,
folder_path=upload_folder)
log.info("GeoFeature aggregation - files were added to the aggregation.")
add_metadata(resource, meta_dict, xml_file, logical_file)
log.info("GeoFeature aggregation and resource level metadata updated.")
file_type_success = True
ft_ctx.logical_file = logical_file
ft_ctx.res_files_to_delete = res_files_to_delete
except Exception as ex:
msg = msg.format(ex.message)
log.exception(msg)
if not file_type_success:
raise ValidationError(msg)
@classmethod
def _validate_set_file_type_inputs(cls, resource, file_id=None, folder_path=None):
res_file, folder_path = super(GeoFeatureLogicalFile, cls)._validate_set_file_type_inputs(
resource, file_id, folder_path)
if folder_path is None and res_file.extension.lower() not in ('.zip', '.shp'):
# when a file is specified by the user for creating this file type it must be a
# zip or shp file
raise ValidationError("Not a valid geographic feature file.")
return res_file, folder_path
@classmethod
def get_primary_resouce_file(cls, resource_files):
"""Gets a resource file that has extension .shp from the list of files *resource_files* """
res_files = [f for f in resource_files if f.extension.lower() == '.shp']
return res_files[0] if res_files else None
def create_aggregation_xml_documents(self, create_map_xml=True):
super(GeoFeatureLogicalFile, self).create_aggregation_xml_documents(create_map_xml)
self.metadata.is_dirty = False
self.metadata.save()
def extract_metadata_and_files(resource, res_file, file_type=True):
"""
validates shape files and extracts metadata
:param resource: an instance of BaseResource
:param res_file: an instance of ResourceFile
:param file_type: A flag to control if extraction being done for file type or resource type
:return: a dict of extracted metadata, a list file paths of shape related files on the
temp directory, a list of resource files retrieved from iRODS for this processing
"""
shape_files, shp_res_files = get_all_related_shp_files(resource, res_file, file_type=file_type)
temp_dir = os.path.dirname(shape_files[0])
if not _check_if_shape_files(shape_files):
if res_file.extension.lower() == '.shp':
err_msg = "There was a problem parsing the component files associated with " \
"{folder_path} as a geographic shapefile. This may be because a component " \
"file is corrupt or missing. The .shp, .shx, and .dbf shapefile component " \
"files are required. Other shapefile component files " \
"(.cpg, .prj, .sbn, .sbx, .xml, .fbn, .fbx, .ain, .aih, .atx, .ixs, .mxs) " \
"should also be added where available."
err_msg = err_msg.format(folder_path=res_file.short_path)
else:
err_msg = "One or more dependent shape files are missing in the selected zip file " \
"or one or more files are not of shape file type."
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
raise ValidationError(err_msg)
shp_file = ''
for f in shape_files:
if f.lower().endswith('.shp'):
shp_file = f
break
try:
meta_dict = extract_metadata(shp_file_full_path=shp_file)
return meta_dict, shape_files, shp_res_files
except Exception as ex:
# remove temp dir
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
if file_type:
msg = "GeoFeature file type. Error when setting file type. Error:{}"
else:
msg = "Failed to parse the .shp file. Error{}"
msg = msg.format(ex.message)
raise ValidationError(msg)
def add_metadata(resource, metadata_dict, xml_file, logical_file=None):
"""
creates/updates metadata at resource and file level
:param resource: an instance of BaseResource
:param metadata_dict: dict containing extracted metadata
:param xml_file: file path (on temp directory) of the xml file that is part of the
geo feature files
:param logical_file: an instance of GeoFeatureLogicalFile if metadata needs to be part of the
logical file
:return:
"""
# populate resource and logical file level metadata
target_obj = logical_file if logical_file is not None else resource
if "coverage" in metadata_dict.keys():
coverage_dict = metadata_dict["coverage"]['Coverage']
target_obj.metadata.coverages.all().filter(type='box').delete()
target_obj.metadata.create_element('coverage',
type=coverage_dict['type'],
value=coverage_dict['value'])
originalcoverage_dict = metadata_dict["originalcoverage"]['originalcoverage']
if target_obj.metadata.originalcoverage is not None:
target_obj.metadata.originalcoverage.delete()
target_obj.metadata.create_element('originalcoverage', **originalcoverage_dict)
field_info_array = metadata_dict["field_info_array"]
target_obj.metadata.fieldinformations.all().delete()
for field_info in field_info_array:
field_info_dict = field_info["fieldinformation"]
target_obj.metadata.create_element('fieldinformation', **field_info_dict)
geometryinformation_dict = metadata_dict["geometryinformation"]
if target_obj.metadata.geometryinformation is not None:
target_obj.metadata.geometryinformation.delete()
target_obj.metadata.create_element('geometryinformation', **geometryinformation_dict)
if xml_file:
shp_xml_metadata_list = parse_shp_xml(xml_file)
for shp_xml_metadata in shp_xml_metadata_list:
if 'description' in shp_xml_metadata:
# overwrite existing description metadata - at the resource level
if not resource.metadata.description:
abstract = shp_xml_metadata['description']['abstract']
resource.metadata.create_element('description',
abstract=abstract)
elif 'title' in shp_xml_metadata:
title = shp_xml_metadata['title']['value']
title_element = resource.metadata.title
if title_element.value.lower() == 'untitled resource':
resource.metadata.update_element('title', title_element.id, value=title)
if logical_file is not None:
logical_file.dataset_name = title
logical_file.save()
elif 'subject' in shp_xml_metadata:
# append new keywords to existing keywords - at the resource level
existing_keywords = [subject.value.lower() for
subject in resource.metadata.subjects.all()]
keyword = shp_xml_metadata['subject']['value']
if keyword.lower() not in existing_keywords:
resource.metadata.create_element('subject', value=keyword)
# add keywords at the logical file level
if logical_file is not None:
if keyword not in logical_file.metadata.keywords:
logical_file.metadata.keywords += [keyword]
logical_file.metadata.save()
def get_all_related_shp_files(resource, selected_resource_file, file_type):
"""
This helper function copies all the related shape files to a temp directory
and return a list of those temp file paths as well as a list of existing related
resource file objects
:param resource: an instance of BaseResource to which the *selecetd_resource_file* belongs
:param selected_resource_file: an instance of ResourceFile selected by the user to set
GeoFeaureFile type (the file must be a .shp or a .zip file)
:param file_type: a flag (True/False) to control resource VS file type actions
:return: a list of temp file paths for all related shape files, and a list of corresponding
resource file objects
"""
def collect_shape_resource_files(res_file):
# compare without the file extension (-4)
if res_file.short_path.lower().endswith('.shp.xml'):
if selected_resource_file.short_path[:-4] == res_file.short_path[:-8]:
shape_res_files.append(f)
elif selected_resource_file.short_path[:-4] == res_file.short_path[:-4]:
shape_res_files.append(res_file)
shape_temp_files = []
shape_res_files = []
temp_dir = ''
if selected_resource_file.extension.lower() == '.shp':
for f in resource.files.all():
if f.file_folder == selected_resource_file.file_folder:
if f.extension.lower() == '.xml' and not f.file_name.lower().endswith('.shp.xml'):
continue
if f.extension.lower() in GeoFeatureLogicalFile.get_allowed_storage_file_types():
collect_shape_resource_files(f)
for f in shape_res_files:
temp_file = utils.get_file_from_irods(f)
if not temp_dir:
temp_dir = os.path.dirname(temp_file)
else:
file_temp_dir = os.path.dirname(temp_file)
dst_dir = os.path.join(temp_dir, os.path.basename(temp_file))
shutil.copy(temp_file, dst_dir)
shutil.rmtree(file_temp_dir)
temp_file = dst_dir
shape_temp_files.append(temp_file)
elif selected_resource_file.extension.lower() == '.zip':
temp_file = utils.get_file_from_irods(selected_resource_file)
temp_dir = os.path.dirname(temp_file)
if not zipfile.is_zipfile(temp_file):
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
raise ValidationError('Selected file is not a zip file')
zf = zipfile.ZipFile(temp_file, 'r')
zf.extractall(temp_dir)
zf.close()
for dirpath, _, filenames in os.walk(temp_dir):
for name in filenames:
if name == selected_resource_file.file_name:
# skip the user selected zip file
continue
file_path = os.path.abspath(os.path.join(dirpath, name))
shape_temp_files.append(file_path)
shape_res_files.append(selected_resource_file)
return shape_temp_files, shape_res_files
def _check_if_shape_files(files, temp_files=True):
"""
checks if the list of file temp paths in *files* are part of shape files
must have all these file extensions: (shp, shx, dbf)
:param files: list of files located in temp directory in django if temp_file is True, otherwise
list of resource files are from django db
:param temp_files: a flag to treat list of files *files* as temp files or not
:return: True/False
"""
# Note: this is the original function (check_fn_for_shp) in geo feature resource receivers.py
# used by is_shapefiles
# at least needs to have 3 mandatory files: shp, shx, dbf
if len(files) >= 3:
# check that there are no files with same extension
if temp_files:
# files are on temp directory
file_extensions = set([os.path.splitext(os.path.basename(f).lower())[1] for f in files])
else:
# files are in db
file_extensions = set([f.extension.lower() for f in files])
if len(file_extensions) != len(files):
return False
# check if there is the xml file
xml_file = ''
for f in files:
if temp_files:
# files are on temp directory
if f.lower().endswith('.shp.xml'):
xml_file = f
else:
# files are in db
if f.file_name.lower().endswith('.shp.xml'):
xml_file = f
if temp_files:
# files are on temp directory
file_names = set([os.path.splitext(os.path.basename(f))[0] for f in files if
not f.lower().endswith('.shp.xml')])
else:
# files are in db
file_names = set([os.path.splitext(os.path.basename(f.file_name))[0] for f in files if
not f.file_name.lower().endswith('.shp.xml')])
if len(file_names) > 1:
# file names are not the same
return False
# check if xml file name matches with other file names
if xml_file:
# -8 for '.shp.xml'
if temp_files:
# files are on temp directory
xml_file_name = os.path.basename(xml_file)
else:
# files are in db
xml_file_name = xml_file.file_name
if xml_file_name[:-8] not in file_names:
return False
for ext in file_extensions:
if ext not in GeoFeatureLogicalFile.get_allowed_storage_file_types():
return False
for ext in ('.shp', '.shx', '.dbf'):
if ext not in file_extensions:
return False
else:
return False
# test if we can open the shp file
if temp_files:
# files are on temp directory
shp_file = [f for f in files if f.lower().endswith('.shp')][0]
driver = ogr.GetDriverByName('ESRI Shapefile')
dataset = driver.Open(shp_file)
if dataset is None:
return False
dataset = None
return True
def extract_metadata(shp_file_full_path):
"""
Collects metadata from a .shp file specified by *shp_file_full_path*
:param shp_file_full_path:
:return: returns a dict of collected metadata
"""
try:
metadata_dict = {}
# wgs84 extent
parsed_md_dict = parse_shp(shp_file_full_path)
if parsed_md_dict["wgs84_extent_dict"]["westlimit"] != UNKNOWN_STR:
wgs84_dict = parsed_md_dict["wgs84_extent_dict"]
# if extent is a point, create point type coverage
if wgs84_dict["westlimit"] == wgs84_dict["eastlimit"] \
and wgs84_dict["northlimit"] == wgs84_dict["southlimit"]:
coverage_dict = {"Coverage": {"type": "point",
"value": {
"east": wgs84_dict["eastlimit"],
"north": wgs84_dict["northlimit"],
"units": wgs84_dict["units"],
"projection": wgs84_dict["projection"]
}}}
else: # otherwise, create box type coverage
coverage_dict = {"Coverage": {"type": "box",
"value": parsed_md_dict["wgs84_extent_dict"]}}
metadata_dict["coverage"] = coverage_dict
# original extent
original_coverage_dict = {}
original_coverage_dict["originalcoverage"] = {"northlimit":
parsed_md_dict
["origin_extent_dict"]["northlimit"],
"southlimit":
parsed_md_dict
["origin_extent_dict"]["southlimit"],
"westlimit":
parsed_md_dict
["origin_extent_dict"]["westlimit"],
"eastlimit":
parsed_md_dict
["origin_extent_dict"]["eastlimit"],
"projection_string":
parsed_md_dict
["origin_projection_string"],
"projection_name":
parsed_md_dict["origin_projection_name"],
"datum": parsed_md_dict["origin_datum"],
"unit": parsed_md_dict["origin_unit"]
}
metadata_dict["originalcoverage"] = original_coverage_dict
# field
field_info_array = []
field_name_list = parsed_md_dict["field_meta_dict"]['field_list']
for field_name in field_name_list:
field_info_dict_item = {}
field_info_dict_item['fieldinformation'] = \
parsed_md_dict["field_meta_dict"]["field_attr_dict"][field_name]
field_info_array.append(field_info_dict_item)
metadata_dict['field_info_array'] = field_info_array
# geometry
geometryinformation = {"featureCount": parsed_md_dict["feature_count"],
"geometryType": parsed_md_dict["geometry_type"]}
metadata_dict["geometryinformation"] = geometryinformation
return metadata_dict
except:
raise ValidationError("Parsing of shapefiles failed!")
def parse_shp(shp_file_path):
"""
:param shp_file_path: full file path fo the .shp file
output dictionary format
shp_metadata_dict["origin_projection_string"]: original projection string
shp_metadata_dict["origin_projection_name"]: origin_projection_name
shp_metadata_dict["origin_datum"]: origin_datum
shp_metadata_dict["origin_unit"]: origin_unit
shp_metadata_dict["field_meta_dict"]["field_list"]: list [fieldname1, fieldname2...]
shp_metadata_dict["field_meta_dict"]["field_attr_dic"]:
dict {"fieldname": dict {
"fieldName":fieldName,
"fieldTypeCode":fieldTypeCode,
"fieldType":fieldType,
"fieldWidth:fieldWidth,
"fieldPrecision:fieldPrecision"
}
}
shp_metadata_dict["feature_count"]: feature count
shp_metadata_dict["geometry_type"]: geometry_type
shp_metadata_dict["origin_extent_dict"]:
dict{"west": east, "north":north, "east":east, "south":south}
shp_metadata_dict["wgs84_extent_dict"]:
dict{"west": east, "north":north, "east":east, "south":south}
"""
shp_metadata_dict = {}
# read shapefile
driver = ogr.GetDriverByName('ESRI Shapefile')
dataset = driver.Open(shp_file_path)
# get layer
layer = dataset.GetLayer()
# get spatialRef from layer
spatialRef_from_layer = layer.GetSpatialRef()
if spatialRef_from_layer is not None:
shp_metadata_dict["origin_projection_string"] = str(spatialRef_from_layer)
prj_name = spatialRef_from_layer.GetAttrValue('projcs')
if prj_name is None:
prj_name = spatialRef_from_layer.GetAttrValue('geogcs')
shp_metadata_dict["origin_projection_name"] = prj_name
shp_metadata_dict["origin_datum"] = spatialRef_from_layer.GetAttrValue('datum')
shp_metadata_dict["origin_unit"] = spatialRef_from_layer.GetAttrValue('unit')
else:
shp_metadata_dict["origin_projection_string"] = UNKNOWN_STR
shp_metadata_dict["origin_projection_name"] = UNKNOWN_STR
shp_metadata_dict["origin_datum"] = UNKNOWN_STR
shp_metadata_dict["origin_unit"] = UNKNOWN_STR
field_list = []
filed_attr_dic = {}
field_meta_dict = {"field_list": field_list, "field_attr_dict": filed_attr_dic}
shp_metadata_dict["field_meta_dict"] = field_meta_dict
# get Attributes
layerDefinition = layer.GetLayerDefn()
for i in range(layerDefinition.GetFieldCount()):
fieldName = layerDefinition.GetFieldDefn(i).GetName()
field_list.append(fieldName)
attr_dict = {}
field_meta_dict["field_attr_dict"][fieldName] = attr_dict
attr_dict["fieldName"] = fieldName
fieldTypeCode = layerDefinition.GetFieldDefn(i).GetType()
attr_dict["fieldTypeCode"] = fieldTypeCode
fieldType = layerDefinition.GetFieldDefn(i).GetFieldTypeName(fieldTypeCode)
attr_dict["fieldType"] = fieldType
fieldWidth = layerDefinition.GetFieldDefn(i).GetWidth()
attr_dict["fieldWidth"] = fieldWidth
fieldPrecision = layerDefinition.GetFieldDefn(i).GetPrecision()
attr_dict["fieldPrecision"] = fieldPrecision
# get layer extent
layer_extent = layer.GetExtent()
# get feature count
featureCount = layer.GetFeatureCount()
shp_metadata_dict["feature_count"] = featureCount
# get a feature from layer
feature = layer.GetNextFeature()
# get geometry from feature
geom = feature.GetGeometryRef()
# get geometry name
shp_metadata_dict["geometry_type"] = geom.GetGeometryName()
# reproject layer extent
# source SpatialReference
source = spatialRef_from_layer
# target SpatialReference
target = osr.SpatialReference()
target.ImportFromEPSG(4326)
# create two key points from layer extent
left_upper_point = ogr.Geometry(ogr.wkbPoint)
left_upper_point.AddPoint(layer_extent[0], layer_extent[3]) # left-upper
right_lower_point = ogr.Geometry(ogr.wkbPoint)
right_lower_point.AddPoint(layer_extent[1], layer_extent[2]) # right-lower
# source map always has extent, even projection is unknown
shp_metadata_dict["origin_extent_dict"] = {}
shp_metadata_dict["origin_extent_dict"]["westlimit"] = layer_extent[0]
shp_metadata_dict["origin_extent_dict"]["northlimit"] = layer_extent[3]
shp_metadata_dict["origin_extent_dict"]["eastlimit"] = layer_extent[1]
shp_metadata_dict["origin_extent_dict"]["southlimit"] = layer_extent[2]
# reproject to WGS84
shp_metadata_dict["wgs84_extent_dict"] = {}
if source is not None:
# define CoordinateTransformation obj
transform = osr.CoordinateTransformation(source, target)
# project two key points
left_upper_point.Transform(transform)
right_lower_point.Transform(transform)
shp_metadata_dict["wgs84_extent_dict"]["westlimit"] = left_upper_point.GetX()
shp_metadata_dict["wgs84_extent_dict"]["northlimit"] = left_upper_point.GetY()
shp_metadata_dict["wgs84_extent_dict"]["eastlimit"] = right_lower_point.GetX()
shp_metadata_dict["wgs84_extent_dict"]["southlimit"] = right_lower_point.GetY()
shp_metadata_dict["wgs84_extent_dict"]["projection"] = "WGS 84 EPSG:4326"
shp_metadata_dict["wgs84_extent_dict"]["units"] = "Decimal degrees"
else:
shp_metadata_dict["wgs84_extent_dict"]["westlimit"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["northlimit"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["eastlimit"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["southlimit"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["projection"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["units"] = UNKNOWN_STR
return shp_metadata_dict
def parse_shp_xml(shp_xml_full_path):
"""
Parse ArcGIS 10.X ESRI Shapefile Metadata XML. file to extract metadata for the following
elements:
title
abstract
keywords
:param shp_xml_full_path: Expected fullpath to the .shp.xml file
:return: a list of metadata dict
"""
metadata = []
try:
if os.path.isfile(shp_xml_full_path):
with open(shp_xml_full_path) as fd:
xml_dict = xmltodict.parse(fd.read())
dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']
if 'idCitation' in dataIdInfo_dict:
if 'resTitle' in dataIdInfo_dict['idCitation']:
if '#text' in dataIdInfo_dict['idCitation']['resTitle']:
title_value = dataIdInfo_dict['idCitation']['resTitle']['#text']
else:
title_value = dataIdInfo_dict['idCitation']['resTitle']
title_max_length = Title._meta.get_field('value').max_length
if len(title_value) > title_max_length:
title_value = title_value[:title_max_length-1]
title = {'title': {'value': title_value}}
metadata.append(title)
if 'idAbs' in dataIdInfo_dict:
description_value = strip_tags(dataIdInfo_dict['idAbs'])
description = {'description': {'abstract': description_value}}
metadata.append(description)
if 'searchKeys' in dataIdInfo_dict:
searchKeys_dict = dataIdInfo_dict['searchKeys']
if 'keyword' in searchKeys_dict:
keyword_list = []
if type(searchKeys_dict["keyword"]) is list:
keyword_list += searchKeys_dict["keyword"]
else:
keyword_list.append(searchKeys_dict["keyword"])
for k in keyword_list:
metadata.append({'subject': {'value': k}})
except Exception:
# Catch any exception silently and return an empty list
# Due to the variant format of ESRI Shapefile Metadata XML
# among different ArcGIS versions, an empty list will be returned
# if any exception occurs
metadata = []
finally:
return metadata
| 44.547977
| 100
| 0.61338
| 13,291
| 0.344916
| 0
| 0
| 7,921
| 0.205559
| 0
| 0
| 11,815
| 0.306612
|
b7b9397bbe8da7007b207fc6a20f4e6b29704b5a
| 1,313
|
py
|
Python
|
tests/test_node.py
|
mjholtkamp/py-iptree
|
97025f03247dbc91424898fd28f7be1448d5fce5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_node.py
|
mjholtkamp/py-iptree
|
97025f03247dbc91424898fd28f7be1448d5fce5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_node.py
|
mjholtkamp/py-iptree
|
97025f03247dbc91424898fd28f7be1448d5fce5
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from iptree import IPNode
class TestIPNode(unittest.TestCase):
def test_node_ipv4(self):
node = IPNode('0.0.0.0/0')
node.add(IPNode('127.0.0.1/32'))
assert '127.0.0.1/32' in node
assert '192.0.2.1/32' not in node
def test_node_ipv6(self):
node = IPNode('::/0')
node.add(IPNode('::1/128'))
assert '::1/128' in node
assert '2001:db8::1/128' not in node
def test_node_aggregate(self):
root = IPNode('::/0')
child = IPNode('2001:db8::/32')
child.add(IPNode('2001:db8:cafe::1'))
child.add(IPNode('2001:db8:cafe::2'))
root.add(child)
leafs = list(root.aggregate())
assert root.children == {}
assert child.parent is None
assert child.children == {}
assert len(leafs) == 2
def test_node_iter_does_not_empty(self):
root = IPNode('::/0')
root.add(IPNode('2001:db8::1'))
assert [x.network for x in root] == ['2001:db8::1']
# repeat to show that __iter__ does not empty children
assert [x.network for x in root] == ['2001:db8::1']
def test_user_data(self):
data = {
'user': 'data',
}
root = IPNode('::/0', data=data)
assert root.data['user'] == 'data'
| 28.543478
| 62
| 0.552932
| 1,267
| 0.964966
| 0
| 0
| 0
| 0
| 0
| 0
| 280
| 0.213252
|
b7b981be89ff3733793d9540e9d6338ff12dd2df
| 208
|
py
|
Python
|
Codewars/8kyu/invert-values/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codewars/8kyu/invert-values/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codewars/8kyu/invert-values/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python - 3.4.3
Test.it('Basic Tests')
Test.assert_equals(invert([1, 2, 3, 4, 5]), [-1, -2, -3, -4, -5])
Test.assert_equals(invert([1, -2, 3, -4, 5]), [-1, 2, -3, 4, -5])
Test.assert_equals(invert([]), [])
| 29.714286
| 65
| 0.548077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.139423
|
b7ba3cc002a49b7880bf396084b5e2214c7d5cdf
| 1,239
|
py
|
Python
|
locustfile_create_order.py
|
Ashutosh-Kaushik/ss-load-test-locust
|
286e3cecad39f5ce991717e117392332a9810d64
|
[
"Apache-2.0"
] | 1
|
2022-03-31T05:34:33.000Z
|
2022-03-31T05:34:33.000Z
|
locustfile_create_order.py
|
Ashutosh-Kaushik/ss-load-test-locust
|
286e3cecad39f5ce991717e117392332a9810d64
|
[
"Apache-2.0"
] | null | null | null |
locustfile_create_order.py
|
Ashutosh-Kaushik/ss-load-test-locust
|
286e3cecad39f5ce991717e117392332a9810d64
|
[
"Apache-2.0"
] | null | null | null |
import csv
import random
import warnings
import os
from locust import HttpUser, task, between
body = {
"campaignid":"5kXk20gGDISJdM5el5IT",
"walletamount":"0"
}
header = {
"Host": "fkhapi.sastasundar.com",
"Apptype": "N",
"Appversion": "4.0.4",
"Appversioncode": "109",
"Deviceid": "81653dce-0dd2-4201-8916-4aecbdd89269",
"Devicedensity": "320",
"Devicedensitytype": "xhdpi",
"Deviceheight": "1184",
"Devicewidth": "768",
"Devicename": "Unknown Google Nexus 4",
"Deviceosinfo": "5.1",
"Networkinfo": "Wifi",
"Accesstoken": "PDWZ5pStjE",
"Refdeviceid": "4dd29c0f2f8d1842",
"Userid": "4937724",
"Pincode": "700120",
"Is_panindia": "0",
"Warehouse_id": "1",
"Content-Type": "application/json",
"Content-Length": "56",
"Accept-Encoding": "gzip, deflate",
"User-Agent": "okhttp/5.0.0-alpha.2"
}
class SastaSundarCheckout(HttpUser):
host = os.getenv('TARGET_URL', 'https://fkhapi.sastasundar.com')
def on_start(self):
warnings.filterwarnings("ignore")
self.client.verify = False
@task
def sasta_sundar_search_query(self):
response = self.client.post("/orderinfo/createorder", headers=header, json=body)
| 25.8125
| 88
| 0.637611
| 348
| 0.280872
| 0
| 0
| 135
| 0.108959
| 0
| 0
| 656
| 0.529459
|
b7bae45b9bc9f5897fbcca4960ef6f0a8e4b7699
| 1,358
|
py
|
Python
|
compy/plot/grid.py
|
tilleyd/compy
|
b63188341d0c66c0ecd810e29918e569c519b296
|
[
"MIT"
] | null | null | null |
compy/plot/grid.py
|
tilleyd/compy
|
b63188341d0c66c0ecd810e29918e569c519b296
|
[
"MIT"
] | null | null | null |
compy/plot/grid.py
|
tilleyd/compy
|
b63188341d0c66c0ecd810e29918e569c519b296
|
[
"MIT"
] | null | null | null |
"""Contains the grid class to create multiple figures."""
from typing import Optional, Tuple
from .figure import Figure
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
class Grid:
def __init__(
self, rows: int, cols: int, size: Optional[Tuple[float, float]] = None
):
"""Creates a grid containing multiple subfigures.
Args:
rows: Number of figure rows.
cols: Number of figure columns.
size: Optional size in inches, (width, height).
"""
self.rows = rows
self.cols = cols
self.grid = gridspec.GridSpec(rows, cols)
self.figure = plt.figure(figsize=size)
self.figures = []
for r in range(rows):
row = []
for c in range(cols):
ax = plt.subplot(self.grid[r, c])
fig = Figure(ax=ax)
row.append(fig)
self.figures.append(row)
def get_figure(self, row: int, col: int) -> Figure:
"""Return the figure at a specified row and column."""
return self.figures[row][col]
def show(self):
"""Show the figure when in interactive mode."""
self.figure.show()
def save(self, path):
"""Save the figure to a image or pdf file path."""
self.figure.savefig(path, bbox_inches="tight")
| 29.521739
| 78
| 0.581001
| 1,163
| 0.856406
| 0
| 0
| 0
| 0
| 0
| 0
| 436
| 0.32106
|
b7bb34b596fa5c54c79635a6e0d79f862d482ebf
| 30,005
|
py
|
Python
|
parallel_accel/shared/parallel_accel/shared/schemas/external.py
|
google/parallel_accel
|
b58fda1c3a22f2aaa9a97337d602cd72c49ee8be
|
[
"Apache-2.0"
] | 1
|
2021-12-19T21:17:02.000Z
|
2021-12-19T21:17:02.000Z
|
parallel_accel/shared/parallel_accel/shared/schemas/external.py
|
google/parallel_accel
|
b58fda1c3a22f2aaa9a97337d602cd72c49ee8be
|
[
"Apache-2.0"
] | null | null | null |
parallel_accel/shared/parallel_accel/shared/schemas/external.py
|
google/parallel_accel
|
b58fda1c3a22f2aaa9a97337d602cd72c49ee8be
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The ParallelAccel Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module provides types definitions."""
import dataclasses
import enum
import json
import time
from typing import Any, Dict, List, Optional, Union
import uuid
import linear_algebra
import marshmallow
import marshmallow_dataclass
import marshmallow_enum
#####################################
# Utility functions #
#####################################
def decode(
schema: marshmallow.Schema, data: str, **kwargs
) -> dataclasses.dataclass:
"""Decodes input string using provided schema.
Args:
schema: Schema to be used for deserialization.
data: JSON-encoded data to be deserialized.
**kwargs: Extra keyworded arguments to be passed to
`marshmallow.Schemas.loads` method.
Returns:
Deserialized `dataclasses.dataclass` object.
"""
return schema.loads(data, **kwargs)
def encode(
schema: marshmallow.Schema, data: dataclasses.dataclass, **kwargs
) -> str:
"""Encodes input data using provided schema.
Args:
schema: Schema to be used for serialization.
data: Dataclass object to be serialized.
**kwargs: Extra keyworded arguments to be passed to
`marshmallow.Schemas.dumps` method.
Returns:
JSON-encoded serialized data.
"""
return schema.dumps(data, separators=(",", ":"), **kwargs)
#####################################
# Types aliases #
#####################################
OperatorsType = List[linear_algebra.ops.ProbBasisAxisSum]
#####################################
# marshmallow helpers #
#####################################
_SerializedLinearAlgebraObject = Dict[str, Any]
_SerializedProbBasisAxisSums = List[List[Dict[str, Any]]]
# `linear_algebra` offers only functions to dump and load objects from the JSON encoded
# string, and does not support builtin dict objects. When we call json.dumps()
# over already JSON encoded string, all quotation marks and brackets are
# prefixed with the backslash. Instead, we can convert JSON object to the dict
# type and reduce serialized object size.
def _deserialize_linear_algebra_object(data: _SerializedLinearAlgebraObject) -> Any:
"""Deserializes linear_algebra object from dict type.
Since `linear_algebra` does not provide function to load objects from builtin dict
objects, we need some workaround here: first we dump the dict object into
JSON encoded string, then parse them into `linear_algebra` object.
Args:
data: Dict encoded linear_algebra object.
Returns:
Deserialized linear_algebra object.
"""
return linear_algebra.read_json(json_text=json.dumps(data))
def _serialize_linear_algebra_object(obj: Any) -> _SerializedLinearAlgebraObject:
"""Serializes linear_algebra object to dict type.
Since `linear_algebra` does not provide function to dump objects into builtin dict
objects, we need some workaround here: first we dump the `linear_algebra` object into
JSON encoded string, then parsing them into dict object.
Args:
data: linear_algebra object to be encoded.
Returns:
Serialized linear_algebra object.
"""
return json.loads(linear_algebra.to_json(obj))
class _LinearAlgebraField(marshmallow.fields.Field):
"""`marshmallow.fields.Field` that serializes and deserializes `linear_algebra` type
object."""
def _serialize(
self, value: Any, *_args, **_kwargs
) -> _SerializedLinearAlgebraObject:
"""See base class documentation."""
return _serialize_linear_algebra_object(value)
def _deserialize(
self, value: _SerializedLinearAlgebraObject, *_args, **_kwargs
) -> Any:
"""See base class documentation."""
try:
return _deserialize_linear_algebra_object(value)
except json.JSONDecodeError as ex:
raise marshmallow.ValidationError("Not a JSON object") from ex
class _OperatorsField(marshmallow.fields.Field):
"""`marshmallow.fields.Field` that serializes and deserializes
`linear_algebra.ProbBasisAxisSum` operators."""
def _serialize(
self, value: OperatorsType, _attr, _obj, **kwargs
) -> _SerializedProbBasisAxisSums:
"""See base class documentation."""
if not isinstance(value, list):
value = [value]
return [[_serialize_linear_algebra_object(term) for term in op] for op in value]
def _deserialize(
self, value: _SerializedProbBasisAxisSums, _attr, _obj, **kwargs
) -> OperatorsType:
"""See base class documentation."""
try:
return [
sum([_deserialize_linear_algebra_object(term) for term in op])
for op in value
]
except json.JSONDecodeError as ex:
raise marshmallow.ValidationError("Not a JSON object") from ex
Graph = marshmallow_dataclass.NewType(
"Graph", linear_algebra.Graph, field=_LinearAlgebraField
)
Operators = marshmallow_dataclass.NewType(
"Operators", OperatorsType, field=_OperatorsField
)
ParamResolver = marshmallow_dataclass.NewType(
"ParamResolver", linear_algebra.ParamResolver, field=_LinearAlgebraField
)
Result = marshmallow_dataclass.NewType("Result", linear_algebra.Result, field=_LinearAlgebraField)
Sweepable = marshmallow_dataclass.NewType(
"Sweepable", linear_algebra.study.Sweepable, field=_LinearAlgebraField
)
#####################################
# Server side events #
#####################################
@dataclasses.dataclass
class ServerSideEvent:
"""Base class for server side event.
Both `event` and `timestamp` fields are auto-populated if using default
values:
- `event` is set to the class name
- `timestamp` is set to the current time
Attributes:
id: Event unique id.
data: Event payload.
event: Event name.
timestamp: Event timestamp (in UNIX seconds).
"""
id: uuid.UUID # pylint: disable=invalid-name
data: Any
event: str = dataclasses.field(default="")
timestamp: int = dataclasses.field(default=0)
def __post_init__(self) -> None:
if self.event == "":
self.event = self.__class__.__name__
if self.timestamp == 0:
self.timestamp = int(time.time())
@dataclasses.dataclass
class StreamTimeoutEvent(ServerSideEvent):
"""Server side event that indicates the stream connection reached the
maximum timeout (10 minutes)."""
data: Optional[Any] = dataclasses.field(default=None)
#####################################
# API relevant types #
#####################################
@dataclasses.dataclass
class APIError:
"""API error response.
Attributes:
code: HTTP error code.
message: Error details.
"""
code: int
message: str
#####################################
# Jobs relevant types #
#####################################
@dataclasses.dataclass
class BatchJobContext:
"""Simulation batch job context.
Attributes:
acyclic_graphs (List[linear_algebra.Graph]): List of acyclic_graphs to be run as a batch.
params (List[linear_algebra.study.Sweepable]): List of parameters to be used
with acyclic_graphs, same size as list of acyclic_graphs.
"""
acyclic_graphs: List[Graph]
params: List[Sweepable]
def __post_init__(self) -> None:
if len(self.acyclic_graphs) != len(self.params):
raise ValueError(
"Number of sweeps parameters has to match number of acyclic_graphs"
)
@dataclasses.dataclass
class JobContext:
"""Simulation job context.
Attributes:
acyclic_graph (linear_algebra.Graph): Graph to be run.
param_resolver (linear_algebra.ParamResolver): ParamResolver to be used with the
acyclic_graph.
"""
acyclic_graph: Graph
param_resolver: ParamResolver
@dataclasses.dataclass
class SweepJobContext:
"""Simulation sweep job context.
Attributes:
acyclic_graph (linear_algebra.Graph): Graph to be run.
params (linear_algebra.study.Sweepable): Parameters to be used with the
acyclic_graph.
"""
acyclic_graph: Graph
params: Sweepable
class JobStatus(enum.IntEnum):
"""Current job status.
Attributes:
NOT_STARTED: The job was added to the queue.
IN_PROGRESS: The job is being processed.
COMPLETE: Simulation has been completed successfully.
ERROR: Simulation has failed.
"""
NOT_STARTED = 0
IN_PROGRESS = 1
COMPLETE = 2
ERROR = 3
@dataclasses.dataclass
class JobProgress:
"""Job computation progress.
Attributes:
current: Number of completed work units.
total: Total number of work units.
"""
completed: int = dataclasses.field(default=0)
total: int = dataclasses.field(default=1)
def __post_init__(self) -> None:
if self.completed < 0:
raise ValueError("Current work unit cannot be less than zero")
if self.total < 1:
raise ValueError("Total number of work units cannot be less than 1")
if self.completed > self.total:
raise ValueError(
"Current work unit cannot be greater than total work units"
)
@dataclasses.dataclass
class JobResult:
"""Simulation job result.
Attributes:
id: Unique job id.
status: Current job status.
error_message: Optional error message explaining why the computation
failed, only set if the `status` is
:attr:`parallel_accel.client.schemas.JobStatus.ERROR`.
progress: Optional computation progress, only set if the `status` is
:attr:`parallel_accel.client.schemas.JobStatus.IN_PROGRESS`.
result: Optional simulation job result, only set if the `status` is
:attr:`parallel_accel.client.schemas.JobStatus.COMPLETE`.
"""
id: uuid.UUID # pylint: disable=invalid-name
status: JobStatus = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
JobStatus, by_value=True
)
}
)
error_message: Optional[str] = dataclasses.field(default=None)
progress: Optional[JobProgress] = dataclasses.field(default=None)
result: Optional[Any] = dataclasses.field(default=None)
def __post_init__(self) -> None:
if self.status == JobStatus.IN_PROGRESS and self.progress is None:
raise ValueError("Missing job progress")
if self.status == JobStatus.ERROR:
if not self.error_message:
raise ValueError("Missing error messsage")
if self.result:
raise ValueError("Failed job cannot have result field")
if self.status == JobStatus.COMPLETE:
if not self.result:
raise ValueError("Missing job result")
if self.error_message:
raise ValueError(
"Completed job cannot have error_message field"
)
if (
self.progress is not None
and self.progress.total != self.progress.completed
):
raise ValueError("Not all work units are marked as completed")
@dataclasses.dataclass
class JobStatusEvent(ServerSideEvent):
"""Job status changed event.
Attributes:
data: Simulation job result.
"""
data: JobResult
@dataclasses.dataclass
class JobSubmitted:
"""Submitted job.
Attributes:
id: Unique job id.
"""
id: uuid.UUID # pylint: disable=invalid-name
#####################################
# Expectation job relevant types #
#####################################
@dataclasses.dataclass
class ExpectationBatchJobContext(BatchJobContext):
"""Expectation values batch job context.
Attributes:
operators (List[List[linear_algebra.ops.ProbBasisAxisSum]]): List of list of
`linear_algebra.ops.ProbBasisAxisSum` operators, same size as list of acyclic_graphs.
"""
operators: List[Operators]
def __post_init__(self) -> None:
super().__post_init__()
if len(self.operators) != len(self.acyclic_graphs):
raise ValueError(
"Number of operators has to match number of acyclic_graphs"
)
@dataclasses.dataclass
class ExpectationBatchJobResult(JobResult):
"""Expectation values batch job result.
Attributes:
result: List of expectation values list, same size as number of
acyclic_graphs. Each element has the outer size of input sweep parameters
and the inner size of input operators size.
"""
result: Optional[List[List[List[float]]]] = dataclasses.field(default=None)
@dataclasses.dataclass
class ExpectationJobContext(JobContext):
"""Expectation values job context.
Attributes:
operators (linear_algebra.ops.ProbBasisAxisSum): List of `linear_algebra.ops.ProbBasisAxisSum` operators.
"""
operators: Operators
@dataclasses.dataclass
class ExpectationJobResult(JobResult):
"""Expectation values job result.
Attributes:
result: List of floats, same size as input operators size.
"""
result: Optional[List[float]] = dataclasses.field(default=None)
@dataclasses.dataclass
class ExpectationSweepJobContext(SweepJobContext):
"""Expectation values sweep job context.
Attributes:
operators (List[linear_algebra.ops.ProbBasisAxisSum]): List of `linear_algebra.ops.ProbBasisAxisSum`
operators, same size as list of acyclic_graphs.
"""
operators: Operators
@dataclasses.dataclass
class ExpectationSweepJobResult(JobResult):
"""Expectation values sweep job result.
Attributes:
result: List of expectation values list. The outer size is the same as
input sweep size, the inner size is the same size as input operators
size.
"""
result: Optional[List[List[float]]] = dataclasses.field(default=None)
@dataclasses.dataclass
class ExpectationJobStatusEvent(JobStatusEvent):
"""Expectation job status changed event.
Attributes:
data: Expectation job result.
"""
data: Union[
ExpectationJobResult,
ExpectationBatchJobResult,
ExpectationSweepJobResult,
]
########################################
# Noisy expectation job relevant types #
########################################
@dataclasses.dataclass
class NoisyExpectationJobContext(ExpectationJobContext):
"""Noisy expectation job context.
Attributes:
num_samples: Number of times the operators will run. Can be specified as
a single value or list of same size as input operators.
"""
# We cannot set default field value for Union type
num_samples: Union[int, List[int]]
def __post_init__(self) -> None:
if isinstance(self.num_samples, list) and (
len(self.num_samples) != len(self.operators)
):
raise ValueError(
"Number of num_samples has to match number of operators"
)
@dataclasses.dataclass
class NoisyExpectationJobResult(ExpectationJobResult):
"""Noisy expectation job result."""
@dataclasses.dataclass
class NoisyExpectationJobStatusEvent(JobStatusEvent):
"""Noisy expecation job status changed event.
Attributes:
data: Noisy expecation job result.
"""
data: NoisyExpectationJobResult
#####################################
# Sample job relevant types #
#####################################
@dataclasses.dataclass
class SampleBatchJobContext(BatchJobContext):
"""Sample batch job context.
Attributes:
repetitions: Number of times the acyclic_graphs will run. Can be specified as
a single value or list of same size as input acyclic_graphs.
"""
class RepetitionsValidator(
marshmallow.validate.Validator
): # pylint: disable=too-few-public-methods
"""A Helper class for validating repetitions field value."""
def __call__(
self, value: Union[int, List[int]]
) -> Union[int, List[int]]:
if isinstance(value, list) and not all(x > 0 for x in value):
raise marshmallow.ValidationError(
"All elements must be greater than or equal to 1"
)
if isinstance(value, int) and not value > 0:
raise marshmallow.ValidationError(
"Must be greater than or equal to 1"
)
return value
# We cannot set default field value for Union type
repetitions: Union[int, List[int]] = dataclasses.field(
metadata={"validate": RepetitionsValidator()}
)
def __post_init__(self) -> None:
super().__post_init__()
if isinstance(self.repetitions, list) and (
len(self.repetitions) != len(self.acyclic_graphs)
):
raise ValueError(
"Number of repetitions has to match number of acyclic_graphs"
)
@dataclasses.dataclass
class SampleBatchJobResult(JobResult):
"""Sample batch job result.
Attributes:
result (Optional[List[List[linear_algebra.Result]]]): Output from running the
acyclic_graph.
"""
result: Optional[List[List[Result]]] = dataclasses.field(default=None)
@dataclasses.dataclass
class SampleJobContext(JobContext):
"""Sample job context.
Attributes:
repetitions: Number of times the acyclic_graph will run.
"""
repetitions: int = dataclasses.field(
default=1, metadata={"validate": marshmallow.validate.Range(min=1)}
)
@dataclasses.dataclass
class SampleJobResult(JobResult):
"""Sample job result.
Attributes:
result: Output from running the acyclic_graph.
"""
result: Optional[Result] = dataclasses.field(default=None)
@dataclasses.dataclass
class SampleSweepJobContext(SweepJobContext):
"""Sample sweep job context.
Attributes:
repetitions: Number of times the acyclic_graph will run.
"""
repetitions: int = dataclasses.field(
default=1, metadata={"validate": marshmallow.validate.Range(min=1)}
)
@dataclasses.dataclass
class SampleSweepJobResult(JobResult):
"""Sample sweep job result.
Attributes:
result: Output from running the acyclic_graph.
"""
result: Optional[List[Result]] = dataclasses.field(default=None)
@dataclasses.dataclass
class SampleJobStatusEvent(JobStatusEvent):
"""Sample job status changed event.
Attributes:
data: Sample job result.
"""
data: Union[SampleJobResult, SampleBatchJobResult, SampleSweepJobResult]
#####################################
# Jobs queue relevant types #
#####################################
class JobType(enum.IntEnum):
"""Simulation job type.
Attributes:
SAMPLE: Sampling.
EXPECTATION: Expectation values.
NOISY_EXPECTATION: Noisy expectation values.
"""
SAMPLE = 0
EXPECTATION = 1
NOISY_EXPECTATION = 2
@dataclasses.dataclass
class JobsQueue:
"""Current status of jobs queue.
Attributes:
ids: List of pending jobs ids.
"""
ids: List[uuid.UUID] = dataclasses.field(default_factory=[])
@dataclasses.dataclass
class PendingJob:
"""Queued job details.
Attributes:
id: Unique job id.
status: Current job status.
type: Job type.
"""
id: uuid.UUID # pylint: disable=invalid-name
status: JobStatus = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
JobStatus,
by_value=True,
)
}
)
type: JobType = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
JobType, by_value=True
)
}
)
def __post_init__(self) -> None:
if self.status in (JobStatus.COMPLETE, JobStatus.ERROR):
raise ValueError(
f"PendingJob cannot have {self.status.name} status"
)
#####################################
# Tasks relevant types #
#####################################
class TaskState(enum.IntEnum):
"""Current task state.
Attributes:
PENDING: Task is scheduled for execution.
RUNNING: Task is running.
DONE: Task is finished.
"""
PENDING = 0
RUNNING = 1
DONE = 2
@dataclasses.dataclass
class TaskStatus:
"""Current task status.
Attributes:
state: Current task state.
error: Optional error message explaining why the task failed, only set
if the state is :attr:`parallel_accel.client.schemas.TaskState.DONE` and the
`success` flag is False.
success: Optional flag indicating whether task finished successfully,
only set if the task state is
:attr:`parallel_accel.client.schemas.TaskState.DONE`.
"""
state: TaskState = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
TaskState, by_value=True
)
}
)
error: Optional[str] = dataclasses.field(default=None)
success: Optional[bool] = dataclasses.field(default=None)
def __post_init__(self) -> None:
"""See base class documentation."""
if self.state != TaskState.DONE and (
(self.error is not None) or (self.success is not None)
):
field = "error" if self.error is not None else "success"
raise ValueError(f"Unfinished task cannot have {field} field.")
@dataclasses.dataclass
class TaskSubmitted:
"""Submitted task.
Attributes:
id: Unique task id.
"""
id: uuid.UUID # pylint: disable=invalid-name
@dataclasses.dataclass
class TaskStatusEvent(ServerSideEvent):
"""Task status changed event.
Attributes:
data: Task status.
"""
data: TaskStatus
#####################################
# Worker relevant types #
#####################################
class WorkerState(enum.IntEnum):
"""ASIC worker state.
Attributes:
BOOTING: Worker is booting.
ERROR: Worker encountered an error.
IDLE: Worker is idling.
OFFLINE: Worker is offline.
PROCESSING_JOB: Worker is processing a job.
SHUTTING_DOWN: Worker is shutting down.
"""
OFFLINE = 0
BOOTING = 1
SHUTTING_DOWN = 2
IDLE = 3
PROCESSING_JOB = 4
ERROR = 5
@dataclasses.dataclass
class Worker:
"""Current status of the ASIC worker.
Attributes:
state: Current worker state.
error: Optional error message explaining problem with the worker, only
set when the `state` is
:attr:`parallel_accel.client.schemas.WorkerState.ERROR`.
job_id: Currently processed job id, only set when the `state` is
:obj:`parallel_accel.client.schemas.WorkerState.PROCESSING_JOB`.
"""
state: WorkerState = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
WorkerState, by_value=True
)
}
)
error: Optional[str] = dataclasses.field(default=None)
job_id: Optional[uuid.UUID] = dataclasses.field(default=None)
def __post_init__(self) -> None:
"""See base class documentation."""
if (
self.state
not in (
WorkerState.PROCESSING_JOB,
WorkerState.ERROR,
)
and ((self.error is not None) or (self.job_id is not None))
):
raise ValueError(
"Cannot have extra properties for the worker status "
f"{self.state.name}"
)
if self.state == WorkerState.ERROR:
if not self.error:
raise ValueError("Missing error messsage")
if self.job_id:
raise ValueError("Cannot have job_id field for the ERROR state")
if self.state == WorkerState.PROCESSING_JOB:
if not self.job_id:
raise ValueError("Missing job id")
if self.error:
raise ValueError("Cannot have error field for the IDLE state")
#####################################
# marshmallow schemas #
#####################################
class _SSERenderer:
"""A helper class for serializing and deserializing objects to server side
events message format.
The server side event message is UTF-8 text data separated by a pair of
newline characters.
"""
@staticmethod
def dumps(obj: Dict[str, Any], *_args, **_kwargs) -> str:
r"""Encodes input object into text string.
Args:
obj: Object to be serialized.
Returns:
Text string in format:
{key}: {value}\n
...
\n
"""
result = ""
for key in ("event", "id", "timestamp", "data"):
value = obj.get(key, None)
if not value:
continue
if key == "data":
value = json.dumps(value, separators=(",", ":"))
result += f"{key}: {value}\n"
result += "\n"
return result
@staticmethod
def loads( # pylint: disable=invalid-name
s: str, *_args, **_kwargs
) -> Dict[str, Any]:
"""Decodes input text string into dict object.
Args:
s: Text string to be decoded.
Returns:
Dict object.
"""
obj = {}
for line in s.split("\n"):
line = line.strip()
if not line:
continue
key, value = line.split(": ")
if key == "data":
value = json.loads(value)
obj[key] = value
return obj
class _BaseSchema(marshmallow.Schema):
"""Base `marshmallow.schema.Schema` for ParallelAccel related schemas.
This is a helper schema that provides custom `marsobj_fnllow.post_dump` method,
that excludes all None fields from the final serialization result.
"""
@marshmallow.post_dump
def remove_empty_fields( # pylint: disable=no-self-use
self, data: Dict, **_kwargs
) -> Dict[str, Any]:
"""Removes all None fields from the input data.
Args:
data: Input data dictionary object.
Returns:
Filtered dictionary object.
"""
return {k: v for k, v in data.items() if v is not None}
class _SSEBaseSchema(_BaseSchema):
"""Base `marshmallow.schema.Schema` for ParallelAccel service server side events."""
class Meta: # pylint: disable=too-few-public-methods
"""Metadata passed to the `marshmallow.schemas.Schema` constructor."""
render_module = _SSERenderer
(
APIErrorSchema,
ExpectationBatchJobContextSchema,
ExpectationBatchJobResultSchema,
ExpectationJobContextSchema,
ExpectationJobResultSchema,
ExpectationJobStatusEventSchema,
ExpectationSweepJobContextSchema,
ExpectationSweepJobResultSchema,
JobProgressSchema,
JobResultSchema,
JobStatusEventSchema,
JobSubmittedSchema,
JobsQueueSchema,
NoisyExpectationJobContextSchema,
NoisyExpectationJobResultSchema,
NoisyExpectationJobStatusEventSchema,
PendingJobSchema,
SampleBatchJobContextSchema,
SampleBatchJobResultSchema,
SampleJobContextSchema,
SampleJobResultSchema,
SampleJobStatusEventSchema,
SampleSweepJobContextSchema,
SampleSweepJobResultSchema,
ServerSideEventSchema,
StreamTimeoutEventSchema,
TaskStatusEventSchema,
TaskStatusSchema,
TaskSubmittedSchema,
WorkerSchema,
) = tuple(
marshmallow_dataclass.class_schema(x, base_schema=y)()
for x, y in (
(APIError, None),
(ExpectationBatchJobContext, None),
(ExpectationBatchJobResult, _BaseSchema),
(ExpectationJobContext, None),
(ExpectationJobResult, _BaseSchema),
(ExpectationJobStatusEvent, _SSEBaseSchema),
(ExpectationSweepJobContext, None),
(ExpectationSweepJobResult, _BaseSchema),
(JobProgress, None),
(JobResult, _BaseSchema),
(JobStatusEvent, _SSEBaseSchema),
(JobSubmitted, None),
(JobsQueue, None),
(NoisyExpectationJobContext, None),
(NoisyExpectationJobResult, _BaseSchema),
(NoisyExpectationJobStatusEvent, _SSEBaseSchema),
(PendingJob, None),
(SampleBatchJobContext, None),
(SampleBatchJobResult, _BaseSchema),
(SampleJobContext, None),
(SampleJobResult, _BaseSchema),
(SampleJobStatusEvent, _SSEBaseSchema),
(SampleSweepJobContext, None),
(SampleSweepJobResult, _BaseSchema),
(ServerSideEvent, _SSEBaseSchema),
(StreamTimeoutEvent, _SSEBaseSchema),
(TaskStatusEvent, _SSEBaseSchema),
(TaskStatus, _BaseSchema),
(TaskSubmitted, None),
(Worker, _BaseSchema),
)
)
| 28.549001
| 113
| 0.627629
| 21,268
| 0.708815
| 0
| 0
| 18,267
| 0.608799
| 0
| 0
| 14,465
| 0.482086
|
b7bb41a1d668770a899b52a1a2113000001bd07c
| 1,040
|
py
|
Python
|
godot-toolkit/godot_config_file.py
|
WiggleWizard/godot-toolkit
|
e17b7666ed80e3ab25ee65c43eb1ba9b13634070
|
[
"MIT"
] | null | null | null |
godot-toolkit/godot_config_file.py
|
WiggleWizard/godot-toolkit
|
e17b7666ed80e3ab25ee65c43eb1ba9b13634070
|
[
"MIT"
] | null | null | null |
godot-toolkit/godot_config_file.py
|
WiggleWizard/godot-toolkit
|
e17b7666ed80e3ab25ee65c43eb1ba9b13634070
|
[
"MIT"
] | null | null | null |
try:
from configparser import RawConfigParser
except ImportError:
from ConfigParser import RawConfigParser
class GodotConfigFile(RawConfigParser):
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key == "__name__":
continue
if (value == ""):
key = " = ".join((key, str("\"\"").replace('\n', '\n\t')))
elif (value is not None) or (self._optcre == self.OPTCRE):
key = " = ".join((key, str(value).replace('\n', '\n\t')))
fp.write("%s\n" % (key))
fp.write("\n")
| 41.6
| 79
| 0.5
| 923
| 0.8875
| 0
| 0
| 0
| 0
| 0
| 0
| 168
| 0.161538
|
b7bd5a3b425952bf3f2c75d16b604d8c223988c4
| 683
|
py
|
Python
|
DMOJ/CCC/escape room.py
|
eddiegz/Personal-C
|
f7869826216e5c665f8f646502141f0dc680e545
|
[
"MIT"
] | 3
|
2021-05-15T08:18:09.000Z
|
2021-05-17T04:41:57.000Z
|
DMOJ/CCC/escape room.py
|
eddiegz/Personal-C
|
f7869826216e5c665f8f646502141f0dc680e545
|
[
"MIT"
] | null | null | null |
DMOJ/CCC/escape room.py
|
eddiegz/Personal-C
|
f7869826216e5c665f8f646502141f0dc680e545
|
[
"MIT"
] | null | null | null |
import collections
def cal(num):
i=1
f=factor[num]
while i*i<=num:
if num%i==0 and i<=max(n,m) and num//i<=max(n,m):
f.append(i)
i+=1
return num
def dfs(i,j):
if i==m-1 and j==n-1:
return True
if i>=m and j>=n or grid[i][j] in factor:
return False
num=cal(grid[i][j])
for p in factor[num]:
nj=num//p
if dfs(p-1,nj-1) or dfs(nj-1,p-1):
return True
return False
m=int(input())
n=int(input())
grid=[]
for i in range(m):
grid.append(list(map(int,input().split())))
factor=collections.defaultdict(list)
print('yes' if dfs(0, 0) else 'no')
| 22.032258
| 58
| 0.513909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.013177
|
b7bd809bb95458cc851905998de0d036112332ab
| 2,556
|
py
|
Python
|
volume_loader.py
|
xeTaiz/deep-volumetric-ambient-occlusion
|
8492ad957f1440d5bf93139f7cf9ee4c7997b0e2
|
[
"MIT"
] | 9
|
2020-08-24T20:03:17.000Z
|
2022-02-17T07:38:55.000Z
|
volume_loader.py
|
xeTaiz/deep-volumetric-ambient-occlusion
|
8492ad957f1440d5bf93139f7cf9ee4c7997b0e2
|
[
"MIT"
] | null | null | null |
volume_loader.py
|
xeTaiz/deep-volumetric-ambient-occlusion
|
8492ad957f1440d5bf93139f7cf9ee4c7997b0e2
|
[
"MIT"
] | 1
|
2021-07-12T01:39:24.000Z
|
2021-07-12T01:39:24.000Z
|
import os
import pydicom
import numpy as np
import dicom_numpy
from utils import hidden_errors
from tf_utils import *
from pathlib import Path
def read_dicom_folder(dicom_folder, rescale=None):
''' Reads all .dcm files in `dicom_folder` and merges them to one volume
Returns:
The volume and the affine transformation from pixel indices to xyz coordinates
'''
dss = [pydicom.dcmread(str(dicom_folder/dcm)) for dcm in os.listdir(dicom_folder) if dcm.endswith('.dcm')]
vol, mat = dicom_numpy.combine_slices(dss, rescale)
return vol, dss[0]
def get_largest_dir(dirs, minsize=100):
''' Returns the dir with the most files from `dirs`'''
m = max(dirs, key=lambda d: len(os.listdir(d)) if os.path.isdir(d) else 0)
if len(os.listdir(m)) >= minsize: return m
else: return None
def get_volume_dirs(path):
path = Path(path)
return list(
filter(lambda p: p is not None,
map( get_largest_dir, # extract subdir with most files in it (highest res volume)
map( lambda p: list(p.iterdir()), # get list of actual volume directorie
map( lambda p: next(p.iterdir())/'Unknown Study', # cd into subfolders CQ500-CT-XX/Unknown Study/
filter(lambda p: p.is_dir(), # Get all dirs, no files
path.iterdir()))))) # Iterate over path directory
)
def get_volume_gen(volume_dirs, rescale=None, tf_pts=None):
''' Make a generator that loads volumes from a list of volume directories, `volume_dirs`.
Returns: (volume:np.ndarray , index_to_pos_4x4:np.ndarray) '''
def vol_gen():
for vol_dir in volume_dirs:
with hidden_errors():
try:
vol, dcm = read_dicom_folder(vol_dir, rescale)
vox_scl = np.array([dicom.PixelSpacing[0], dicom.PixelSpacing[1], dicom.SliceThickness]).astype(np.float32)
vox_scl /= vox_scl.min()
vol_name = str(vol_dir.parent.parent.parent.name)
if tf_pts is None:
peaks = get_histogram_peaks(normalized_vol)
tf_pts = get_trapezoid_tf_points_from_peaks(peaks)
except dicom_numpy.DicomImportException:
print(f'Could not load {vol_dir}')
continue
yield vol, tf_pts, vox_scl, vol_name
return vol_gen()
__all__ = ['read_dicom_folder', 'get_largest_dir', 'get_volume_gen', 'get_volume_dirs']
| 44.068966
| 127
| 0.620892
| 0
| 0
| 1,034
| 0.404538
| 0
| 0
| 0
| 0
| 705
| 0.275822
|
b7c03a0cfc79b571a75dfb907cc43fbd5d0b1bf0
| 5,056
|
py
|
Python
|
sudoku_solver/gui.py
|
andrewhalle/sudoku_solver
|
56b88c49a3637449507a9afa66ec3d617d2118a5
|
[
"MIT"
] | null | null | null |
sudoku_solver/gui.py
|
andrewhalle/sudoku_solver
|
56b88c49a3637449507a9afa66ec3d617d2118a5
|
[
"MIT"
] | null | null | null |
sudoku_solver/gui.py
|
andrewhalle/sudoku_solver
|
56b88c49a3637449507a9afa66ec3d617d2118a5
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5.QtCore import Qt, QSize, QPoint
from PyQt5.QtWidgets import QApplication, QDialog, QWidget, QLabel, QPushButton, QVBoxLayout, QHBoxLayout
from PyQt5.QtGui import QPainter, QColor, QPen, QFont
from .sudoku import Sudoku
class SudokuWidget(QWidget):
def __init__(self, parent=None):
super(SudokuWidget, self).__init__(parent)
self.sudoku = Sudoku()
self.focus_square = (0, 0)
self.setFixedSize(500, 500)
self.setFocusPolicy(Qt.ClickFocus)
def solve(self):
self.sudoku.solve()
self.update()
def clear(self):
self.sudoku = Sudoku()
self.update()
def enter(self, value):
i = self.focus_square[0]
j = self.focus_square[1]
if value < 0 or value > 9:
raise ValueError("that's not a valid sudoku value")
self.sudoku.data[i][j] = value
def moveFocusSquare(self, new_focus_square):
if not isinstance(new_focus_square, tuple) or len(new_focus_square) != 2:
raise ValueError("new focus square must be 2x2 tuple")
if new_focus_square[0] < 0 or new_focus_square[0] > 8 or new_focus_square[1] < 0 or new_focus_square[1] > 8:
raise ValueError("index out of bounds")
self.focus_square = new_focus_square
def keyPressEvent(self, event):
if event.key() == Qt.Key_Right:
if self.focus_square[0] == 8:
return
self.moveFocusSquare((self.focus_square[0] + 1, self.focus_square[1]))
self.update()
elif event.key() == Qt.Key_Left:
if self.focus_square[0] == 0:
return
self.moveFocusSquare((self.focus_square[0] - 1, self.focus_square[1]))
self.update()
elif event.key() == Qt.Key_Up:
if self.focus_square[1] == 0:
return
self.moveFocusSquare((self.focus_square[0], self.focus_square[1] - 1))
self.update()
elif event.key() == Qt.Key_Down:
if self.focus_square[1] == 8:
return
self.moveFocusSquare((self.focus_square[0], self.focus_square[1] + 1))
self.update()
elif event.text() in ["1", "2", "3", "4", "5", "6", "7", "8", "9"]:
num = int(event.text())
self.enter(num)
self.update()
elif event.key() == Qt.Key_Backspace:
self.enter(0)
self.update()
def paintEvent(self, event):
row_width = self.width() / 9
white = QColor(255, 255, 255)
black = QColor(0, 0, 0)
blue = QColor(0, 0, 255)
linePen = QPen(black)
thickPen = QPen(black)
thickPen.setWidth(2)
bluePen = QPen(blue)
bluePen.setWidth(2)
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.translate(0, 0)
painter.setPen(thickPen)
painter.setBrush(QColor(255, 255, 255))
painter.drawConvexPolygon(QPoint(0, 0), QPoint(0, self.height()), QPoint(self.width(), self.height()), QPoint(self.width(), 0))
painter.setPen(linePen)
for i in range(8):
x = (i + 1) * row_width
y = (i + 1) * row_width
if i in [2, 5]:
painter.setPen(thickPen)
painter.drawLine(x, 0, x, self.height())
painter.drawLine(0, y, self.width(), y)
if i in [2, 5]:
painter.setPen(linePen)
painter.setPen(bluePen)
x1 = (row_width * self.focus_square[0])
x2 = (row_width * (self.focus_square[0] + 1))
y1 = (row_width * self.focus_square[1])
y2 = (row_width * (self.focus_square[1] + 1))
painter.drawConvexPolygon(QPoint(x1, y1), QPoint(x1, y2), QPoint(x2, y2), QPoint(x2, y1))
painter.setPen(linePen)
painter.setFont(QFont("Arial", pointSize=20, weight=QFont.Normal))
for i in range(9):
for j in range(9):
if self.sudoku.data[i][j] != 0:
painter.drawText(row_width * i, row_width * j, row_width, row_width, Qt.AlignCenter, str(self.sudoku.data[i][j]))
class SudokuDialog(QDialog):
def __init__(self, parent=None):
super(SudokuDialog, self).__init__(parent)
layout = QHBoxLayout()
self.puzzle = SudokuWidget()
layout.addWidget(self.puzzle)
buttonLayout = QVBoxLayout()
self.solve_button = QPushButton("solve")
self.clear_button = QPushButton("clear")
self.solve_button.clicked.connect(self.puzzle.solve)
self.clear_button.clicked.connect(self.puzzle.clear)
buttonLayout.addWidget(self.solve_button)
buttonLayout.addWidget(self.clear_button)
layout.addLayout(buttonLayout)
self.setLayout(layout)
self.setFixedSize(650, 600)
self.puzzle.setFocus()
self.setWindowTitle("Sudoku Solver")
self.show()
def main():
app = QApplication([])
gui = SudokuDialog()
sys.exit(app.exec_())
| 36.637681
| 135
| 0.585443
| 4,719
| 0.933347
| 0
| 0
| 0
| 0
| 0
| 0
| 153
| 0.030261
|
b7c06f515d7439b0a77375fbc6fb115fb4977fe1
| 1,042
|
py
|
Python
|
swarmlib/util/functions.py
|
nkoutsov/swarmlib
|
fa70a5d9de50de5dacd5d499eba3b6bb72c39c05
|
[
"BSD-3-Clause"
] | null | null | null |
swarmlib/util/functions.py
|
nkoutsov/swarmlib
|
fa70a5d9de50de5dacd5d499eba3b6bb72c39c05
|
[
"BSD-3-Clause"
] | null | null | null |
swarmlib/util/functions.py
|
nkoutsov/swarmlib
|
fa70a5d9de50de5dacd5d499eba3b6bb72c39c05
|
[
"BSD-3-Clause"
] | null | null | null |
# ------------------------------------------------------------------------------------------------------
# Copyright (c) Leo Hanisch. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
#pylint: disable=invalid-name
import inspect
from functools import wraps
import landscapes.single_objective
import numpy as np
# Wrapper for landscapes.single_objective functions for inputs > 1d
def wrap_landscapes_func(landscapes_func):
@wraps(landscapes_func)
def wrapper(x):
return np.apply_along_axis(func1d=landscapes_func, axis=0, arr=x)
return wrapper
# Add all functions from landscapes.single_objective
FUNCTIONS = {
name: wrap_landscapes_func(func)
for (name, func) in inspect.getmembers(
landscapes.single_objective, inspect.isfunction
)
if name not in ['colville', 'wolfe'] # Don't include 3D and 4D functions
}
| 33.612903
| 104
| 0.600768
| 0
| 0
| 0
| 0
| 117
| 0.112284
| 0
| 0
| 562
| 0.539347
|
b7c327b6206469cd0cf73575f1196729fde0be3b
| 1,695
|
py
|
Python
|
nps/network_entity.py
|
Dry8r3aD/penta-nps
|
a4c74a2cd90eb2f95158e2040b7eca7056b062db
|
[
"MIT"
] | 6
|
2016-09-25T07:26:22.000Z
|
2022-03-16T06:30:05.000Z
|
nps/network_entity.py
|
Dry8r3aD/penta-nps
|
a4c74a2cd90eb2f95158e2040b7eca7056b062db
|
[
"MIT"
] | 14
|
2016-10-04T00:02:20.000Z
|
2017-02-22T03:06:21.000Z
|
nps/network_entity.py
|
Dry8r3aD/penta-nps
|
a4c74a2cd90eb2f95158e2040b7eca7056b062db
|
[
"MIT"
] | 5
|
2016-10-06T04:53:32.000Z
|
2019-12-08T13:48:58.000Z
|
# -*- coding: UTF-8 -*-
from collections import deque
class NetworkEntity(object):
"""Client or Server simulation network entity"""
def __init__(self, name):
# "client" or "server"
self.name = name
# simulatation packet list(queue)
# _packet_list contains send/recv PacketBuff
self._packet_list = deque()
# for scapy sniff
# ex)tp0, eth0, ...
self._interface_name = ""
self._interface_mac_addr = "00:00:00:00:00:00"
# nat!!
# port random generator for DUT
#self._nat_port = 0
#self._nat_magic_number = 99999
#self._use_nat_port = "False"
def get_name(self):
return self.name
def append_packet_list(self, packet_buff):
self._packet_list.append(packet_buff)
def pop_packet_list(self):
return self._packet_list.popleft()
def get_packet_list(self):
return self._packet_list
def is_empty_packet_list(self):
return (len(self._packet_list) == 0)
def set_interface(self, iface_name, iface_mac):
self._interface_name = iface_name
self._interface_mac_addr = iface_mac
def get_interface_name(self):
return self._interface_name
def get_interface_mac_addr(self):
return self._interface_mac_addr
# def set_use_nat_port(self, use_or_not):
# self._use_nat_port = use_or_not
#
# def get_use_nat_port(self):
# return self._use_nat_port
#
# def set_dut_nat_port(self, port):
# self._nat_port = port
#
# def get_dut_nat_port(self):
# return self._nat_port
#
# def get_nat_magic_number(self):
# return self._nat_magic_number
#
| 25.298507
| 54
| 0.645428
| 1,263
| 0.745133
| 0
| 0
| 0
| 0
| 0
| 0
| 703
| 0.414749
|
b7c3aa3be6cad1fc615356fe4a0db24f49f796d6
| 898
|
py
|
Python
|
source/_sample/scipy/interp_spline_interest.py
|
showa-yojyo/notebook
|
82c15074c24d64a1dfcb70a526bc1deb2ecffe68
|
[
"MIT"
] | 14
|
2016-04-13T08:10:02.000Z
|
2021-04-19T09:42:51.000Z
|
source/_sample/scipy/interp_spline_interest.py
|
showa-yojyo/note
|
5f262ecda3df132cb66206c465d16e174061d6b9
|
[
"MIT"
] | 88
|
2017-09-27T15:07:05.000Z
|
2019-10-02T04:05:03.000Z
|
source/_sample/scipy/interp_spline_interest.py
|
showa-yojyo/note
|
5f262ecda3df132cb66206c465d16e174061d6b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""interp_spline_interest.py: Demonstrate spline interpolation.
"""
from scipy.interpolate import splrep, splev
import numpy as np
import matplotlib.pyplot as plt
# pylint: disable=invalid-name
# Interest rates of Jan, Feb, Mar, Jun, Dec.
x = np.array([1, 2, 3, 6, 12])
y = np.array([0.080, 0.100, 0.112, 0.144, 0.266])
# Interpolate the rates.
tck = splrep(x, y)
# Print the spline curve.
np.set_printoptions(formatter={'float': '{:.3f}'.format})
print("knot vector:\n", tck[0])
print("control points:\n", tck[1])
print("degree:\n", tck[2])
# Evaluate interest rates for each month.
for i in range(1, 13):
print(f"month[{i:02d}]: {float(splev(i, tck)):.3f}%")
# Plot the interest curve.
time = np.linspace(1, 12, 1000, endpoint=True)
rate = splev(time, tck)
plt.figure()
plt.plot(time, rate, color='deeppink')
plt.xlabel("Month")
plt.ylabel("Rate (%)")
plt.show()
| 24.944444
| 63
| 0.679287
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 412
| 0.458797
|
b7c3bf02cb16b87bf7d4abf283104f4f08eda387
| 1,351
|
py
|
Python
|
Pytorch/Scratch CNN and Pytorch/part1-convnet/tests/test_sgd.py
|
Kuga23/Deep-Learning
|
86980338208c702b6bfcbcfffdb18498e389a56b
|
[
"MIT"
] | 3
|
2022-01-16T14:46:57.000Z
|
2022-02-20T22:40:16.000Z
|
Pytorch/Scratch CNN and Pytorch/part1-convnet/tests/test_sgd.py
|
Kuga23/Deep-Learning
|
86980338208c702b6bfcbcfffdb18498e389a56b
|
[
"MIT"
] | null | null | null |
Pytorch/Scratch CNN and Pytorch/part1-convnet/tests/test_sgd.py
|
Kuga23/Deep-Learning
|
86980338208c702b6bfcbcfffdb18498e389a56b
|
[
"MIT"
] | 6
|
2021-09-29T11:42:37.000Z
|
2022-02-02T02:33:51.000Z
|
import unittest
import numpy as np
from optimizer import SGD
from modules import ConvNet
from .utils import *
class TestSGD(unittest.TestCase):
""" The class containing all test cases for this assignment"""
def setUp(self):
"""Define the functions to be tested here."""
pass
def test_sgd(self):
model_list = [dict(type='Linear', in_dim=128, out_dim=10)]
criterion = dict(type='SoftmaxCrossEntropy')
model = ConvNet(model_list, criterion)
optimizer = SGD(model)
# forward once
np.random.seed(1024)
x = np.random.randn(32, 128)
np.random.seed(1024)
y = np.random.randint(10, size=32)
tmp = model.forward(x, y)
model.backward()
optimizer.update(model)
# forward twice
np.random.seed(512)
x = np.random.randn(32, 128)
np.random.seed(512)
y = np.random.randint(10, size=32)
tmp = model.forward(x, y)
model.backward()
optimizer.update(model)
expected_weights = np.load('tests/sgd_weights/w.npy')
expected_bias = np.load('tests/sgd_weights/b.npy')
self.assertAlmostEquals(np.sum(np.abs(expected_weights - model.modules[0].weight)), 0, places=6)
self.assertAlmostEquals(np.sum(np.abs(expected_bias - model.modules[0].bias)), 0)
| 28.744681
| 104
| 0.624722
| 1,236
| 0.914878
| 0
| 0
| 0
| 0
| 0
| 0
| 215
| 0.159141
|
b7c3c9491c620a60056834ce6902dd96ab059f3b
| 3,373
|
py
|
Python
|
Scripts/simulation/tunable_utils/create_object.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/tunable_utils/create_object.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/tunable_utils/create_object.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\tunable_utils\create_object.py
# Compiled at: 2020-05-07 00:26:47
# Size of source mod 2**32: 4106 bytes
from crafting.crafting_tunable import CraftingTuning
from objects.components.state import TunableStateValueReference, CommodityBasedObjectStateValue
from objects.system import create_object
from sims4.random import weighted_random_item
from sims4.tuning.tunable import TunableReference, TunableTuple, TunableList, TunableRange, AutoFactoryInit, HasTunableSingletonFactory, TunableFactory
import crafting, services, sims4
logger = sims4.log.Logger('CreateObject')
class ObjectCreator(HasTunableSingletonFactory, AutoFactoryInit):
@TunableFactory.factory_option
def get_definition(pack_safe):
return {'definition': TunableReference(description='\n The definition of the object to be created.\n ',
manager=(services.definition_manager()),
pack_safe=pack_safe)}
FACTORY_TUNABLES = {'definition': TunableReference(description='\n The definition of the object to be created.\n ',
manager=(services.definition_manager()))}
def __call__(self, **kwargs):
return create_object((self.definition), **kwargs)
def get_object_definition(self):
return self.definition
def get_footprint(self):
return self.definition.get_footprint()
@property
def id(self):
return self.definition.id
def _verify_tunable_quality_value_callback(instance_class, tunable_name, source, quality, weight):
if quality not in CraftingTuning.QUALITY_STATE.values:
logger.error('A TunableRecipeCreator {} specifies an invalid quality {}.', source, quality)
class RecipeCreator(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'recipe':TunableReference(description='\n Recipe to produce an object with.\n ',
manager=services.get_instance_manager(sims4.resources.Types.RECIPE)),
'weighted_quality':TunableList(description='\n A list of weighted quality in which the object will be created.\n \n If empty, it will apply a default quality.\n ',
tunable=TunableTuple(description='\n A possible level of quality for this item that will be generated.\n This will be randomly chosen based off weight against other items in the list.\n ',
weight=TunableRange(tunable_type=int,
default=1,
minimum=1),
quality=TunableStateValueReference(class_restrictions=CommodityBasedObjectStateValue),
verify_tunable_callback=_verify_tunable_quality_value_callback))}
def __call__(self, crafter_sim=None, post_add=None, **kwargs):
choices = [(quality.weight, quality.quality) for quality in self.weighted_quality]
quality = weighted_random_item(choices) if choices else None
return crafting.crafting_interactions.create_craftable((self.recipe), crafter_sim, quality=quality, post_add=post_add)
def get_object_definition(self):
return self.recipe.final_product.definition
| 54.403226
| 240
| 0.714794
| 2,321
| 0.688111
| 0
| 0
| 381
| 0.112956
| 0
| 0
| 1,022
| 0.302994
|
b7c4849c094e9c707d5b2331ea5e37f6828cbb6d
| 1,583
|
py
|
Python
|
题源分类/LeetCode/LeetCode日刷/python/47.全排列-ii.py
|
ZhengyangXu/Algorithm-Daily-Practice
|
3017a3d476fc9a857026190ea4fae2911058df59
|
[
"MIT"
] | null | null | null |
题源分类/LeetCode/LeetCode日刷/python/47.全排列-ii.py
|
ZhengyangXu/Algorithm-Daily-Practice
|
3017a3d476fc9a857026190ea4fae2911058df59
|
[
"MIT"
] | null | null | null |
题源分类/LeetCode/LeetCode日刷/python/47.全排列-ii.py
|
ZhengyangXu/Algorithm-Daily-Practice
|
3017a3d476fc9a857026190ea4fae2911058df59
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode.cn id=47 lang=python3
#
# [47] 全排列 II
#
# https://leetcode-cn.com/problems/permutations-ii/description/
#
# algorithms
# Medium (59.58%)
# Likes: 371
# Dislikes: 0
# Total Accepted: 78.7K
# Total Submissions: 132.1K
# Testcase Example: '[1,1,2]'
#
# 给定一个可包含重复数字的序列,返回所有不重复的全排列。
#
# 示例:
#
# 输入: [1,1,2]
# 输出:
# [
# [1,1,2],
# [1,2,1],
# [2,1,1]
# ]
#
#
# @lc code=start
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
def backtrack(nums,track,visited):
if len(nums) == len(track):
track = track[:]
res.append(track)
for i in range(len(nums)):
if visited[i]:
continue
if i > 0 and nums[i] == nums[i-1] and visited[i-1]:
break
track.append(nums[i])
visited[i] = True
backtrack(nums,track,visited)
track.pop()
visited[i] = False
nums.sort()
visited = [False]*len(nums)
res = []
track = []
backtrack(nums,track,visited)
return res
# @lc code=end
# def permuteUnique(self, nums: List[int]) -> List[List[int]]:
# def helper(nums,res,path):
# if not nums and path not in res:
# res.append(path)
# for i in range(len(nums)):
# helper(nums[:i]+nums[i+1:],res,path+[nums[i]])
# res = []
# helper(nums,res,[])
# return res
| 21.986111
| 67
| 0.475679
| 784
| 0.472005
| 0
| 0
| 0
| 0
| 0
| 0
| 787
| 0.473811
|
b7c4b41079ffcb026b138a48570833eeaf51d196
| 149
|
py
|
Python
|
testing/run-tests.py
|
8enmann/blobfile
|
34bf6fac2a0cd4ff5eb5c3e4964914758f264c0b
|
[
"Unlicense"
] | 21
|
2020-02-26T08:00:20.000Z
|
2022-02-28T00:06:50.000Z
|
testing/run-tests.py
|
8enmann/blobfile
|
34bf6fac2a0cd4ff5eb5c3e4964914758f264c0b
|
[
"Unlicense"
] | 146
|
2020-02-28T18:15:53.000Z
|
2022-03-24T06:37:57.000Z
|
testing/run-tests.py
|
8enmann/blobfile
|
34bf6fac2a0cd4ff5eb5c3e4964914758f264c0b
|
[
"Unlicense"
] | 15
|
2020-04-10T08:31:57.000Z
|
2022-02-28T03:43:02.000Z
|
import subprocess as sp
import sys
sp.run(["pip", "install", "-e", "."], check=True)
sp.run(["pytest", "blobfile"] + sys.argv[1:], check=True)
| 24.833333
| 58
| 0.604027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.261745
|
b7c583ce42f7da52ba4b620e07b7b1dce4f64729
| 6,467
|
py
|
Python
|
examples/Components/collision/PrimitiveCreation.py
|
sofa-framework/issofa
|
94855f488465bc3ed41223cbde987581dfca5389
|
[
"OML"
] | null | null | null |
examples/Components/collision/PrimitiveCreation.py
|
sofa-framework/issofa
|
94855f488465bc3ed41223cbde987581dfca5389
|
[
"OML"
] | null | null | null |
examples/Components/collision/PrimitiveCreation.py
|
sofa-framework/issofa
|
94855f488465bc3ed41223cbde987581dfca5389
|
[
"OML"
] | null | null | null |
import Sofa
import random
from cmath import *
############################################################################################
# this is a PythonScriptController example script
############################################################################################
############################################################################################
# following defs are used later in the script
############################################################################################
# utility methods
falling_speed = 0
capsule_height = 5
capsule_chain_height = 5
def createRigidCapsule(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
radius=0
if len(args)==0:
radius = random.uniform(1,3)
else:
radius = args[0]
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1)
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
SurfNode = node.createChild('Surf')
SurfNode.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x_rand)+' '+str(y_rand)+' '+str(capsule_height/2)+' '+str(-x_rand)+' '+str(-y_rand)+' '+str(- capsule_height/2))
SurfNode.createObject('MeshTopology', name='meshTopology34',edges='0 1',drawEdges='1')
SurfNode.createObject('TCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
SurfNode.createObject('RigidMapping',template='Rigid,Vec3d',name='rigid_mapping',input='@../rigidDOF',output='@falling_particle')
return node
def createFlexCapsule(parentNode,name,x,y,z,*args):
radius=0
if len(args)==0:
radius = random.uniform(1,3)
else:
radius = args[0]
node = parentNode.createChild(name)
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
node = node.createChild('Surf')
node.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x + x_rand)+' '+str(y + y_rand)+' '+str(z + z_rand + capsule_height)+' '+str(x - x_rand)+' '+str(y - y_rand)+' '+str(z - z_rand),velocity='0 0 '+str(falling_speed))
mass = node.createObject('UniformMass',name='mass')
node.createObject('MeshTopology', name='meshTopology34',edges='0 1',drawEdges='1')
node.createObject('TCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
return node
def createCapsuleChain(parentNode,name,length,x,y,z):
node = parentNode.createChild(name)
#radius=random.uniform(1,3)
radius=0.5
height=5
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
node = node.createChild('Surf')
ray = 3.0
t = 0.0
delta_t = 0.7
topo_edges=''
particles=''
velocities = ''
springs=''
for i in range(0,length):
particles += str(x + (ray * cos(t)).real)+' '+str(y + (ray * sin(t)).real)+' '+str(z + i*capsule_chain_height)+' '
t += delta_t
if i < length -1:
topo_edges += str(i)+' '+str(i + 1)+' '
springs += str(i)+' '+str(i + 1)+' 10 1 '+str(capsule_chain_height)+' '
velocities+='0 0 '+str(falling_speed)+' '
topo_edges += str(length - 2)+' '+str(length -1)
springs += str(length - 2)+' '+str(length -1)+' 10 1 '+str(capsule_chain_height)
node.createObject('MechanicalObject',template='Vec3d',name='falling_particles',position=particles,velocity=velocities)
node.createObject('StiffSpringForceField',template='Vec3d',name='springforcefield',stiffness='100',damping='1',spring=springs)
mass = node.createObject('UniformMass',name='mass')
node.createObject('MeshTopology', name='meshTopology34',edges=topo_edges,drawEdges='1')
node.createObject('TCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
return node
def createOBB(parentNode,name,x,y,z,*args):
a=0
b=0
c=0
if len(args)==0:
a=random.uniform(0.5,1.5)
b=random.uniform(0.5,1.5)
c=random.uniform(0.5,1.5)
else:
a=args[0]
b=args[1]
c=args[2]
node = parentNode.createChild(name)
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1)
node.createObject('TOBBModel',template='Rigid',name='OBB_model',extents=str(a)+' '+str(b)+' '+str(c))
return node
def createCapsule(parentNode,name,x,y,z):
if random.randint(0,1) == 0:
return createRigidCapsule(parentNode,name,x,y,z)
else:
return createFlexCapsule(parentNode,name,x,y,z)
def createCapsule(parentNode,name,x,y,z):
if random.randint(0,1) == 0:
return createRigidCapsule(parentNode,name,x,y,z)
else:
return createFlexCapsule(parentNode,name,x,y,z)
def createSphere(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
r = 0
if len(args) == 0:
r=random.uniform(1,4)
else:
r = args[0]
#meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+
# str(z)+' 0 0 0 1')
#SurfNode = node.createChild('Surf')
node.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x)+' '+str(y)+' '+str(z),velocity='0 0 '+str(falling_speed))
node.createObject('TSphereModel',template='Vec3d',name='sphere_model',radius=str(r))
node.createObject('UniformMass',name='mass',totalMass=1)
#SurfNode.createObject('RigidMapping',template='Rigid,Vec3d',name='rigid_mapping',input='@../rigidDOF',output='@falling_particle')
return node
def createRigidSphere(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
r = 0
if len(args) == 0:
r=random.uniform(1,4)
else:
r = args[0]
#meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+
# str(z)+' 0 0 0 1')
#SurfNode = node.createChild('Surf')
node.createObject('MechanicalObject',template='Rigid',name='falling_particle',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
node.createObject('TSphereModel',template='Rigid',name='sphere_model',radius=str(r))
node.createObject('UniformMass',name='mass',totalMass=1)
#SurfNode.createObject('RigidMapping',template='Rigid,Vec3d',name='rigid_mapping',input='@../rigidDOF',output='@falling_particle')
return node
| 34.216931
| 256
| 0.66043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,222
| 0.343591
|
b7c6df93916a72fa3dc3b5903a942a8fbc2d13cd
| 350
|
py
|
Python
|
examples/tensorboard/nested.py
|
dwolfschlaeger/guildai
|
f82102ad950d7c89c8f2c2eafe596b2d7109dc57
|
[
"Apache-2.0"
] | 694
|
2018-11-30T01:06:30.000Z
|
2022-03-31T14:46:26.000Z
|
examples/tensorboard/nested.py
|
dwolfschlaeger/guildai
|
f82102ad950d7c89c8f2c2eafe596b2d7109dc57
|
[
"Apache-2.0"
] | 323
|
2018-11-05T17:44:34.000Z
|
2022-03-31T16:56:41.000Z
|
examples/tensorboard/nested.py
|
dwolfschlaeger/guildai
|
f82102ad950d7c89c8f2c2eafe596b2d7109dc57
|
[
"Apache-2.0"
] | 68
|
2019-04-01T04:24:47.000Z
|
2022-02-24T17:22:04.000Z
|
import tensorboardX
with tensorboardX.SummaryWriter("foo") as w:
w.add_scalar("a", 1.0, 1)
w.add_scalar("a", 2.0, 2)
with tensorboardX.SummaryWriter("foo/bar") as w:
w.add_scalar("a", 3.0, 3)
w.add_scalar("a", 4.0, 4)
with tensorboardX.SummaryWriter("foo/bar/baz") as w:
w.add_scalar("a", 5.0, 5)
w.add_scalar("a", 6.0, 6)
| 25
| 52
| 0.634286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.128571
|
b7c7e5d7b1958fefce1bb2170ee1a05f5b0e1bc0
| 444
|
py
|
Python
|
cobalt/__init__.py
|
NicolasDenoyelle/cobalt
|
08742676214e728ed83f3a90a118b9c020a347fd
|
[
"BSD-3-Clause"
] | null | null | null |
cobalt/__init__.py
|
NicolasDenoyelle/cobalt
|
08742676214e728ed83f3a90a118b9c020a347fd
|
[
"BSD-3-Clause"
] | null | null | null |
cobalt/__init__.py
|
NicolasDenoyelle/cobalt
|
08742676214e728ed83f3a90a118b9c020a347fd
|
[
"BSD-3-Clause"
] | null | null | null |
###############################################################################
# Copyright 2020 UChicago Argonne, LLC.
# (c.f. AUTHORS, LICENSE)
# For more info, see https://xgitlab.cels.anl.gov/argo/cobalt-python-wrapper
# SPDX-License-Identifier: BSD-3-Clause
##############################################################################
import subprocess
from cobalt.cobalt import Cobalt, UserPolicy
__all__ = [ 'Cobalt', 'UserPolicy' ]
| 37
| 79
| 0.481982
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 356
| 0.801802
|
b7c83d7466393b727423c1185dc55c5006258a81
| 859
|
py
|
Python
|
anand.py
|
kyclark/py-grepper
|
ca7a17b1ffc2d666d62da6c80eb4cbc0bd2e547e
|
[
"MIT"
] | null | null | null |
anand.py
|
kyclark/py-grepper
|
ca7a17b1ffc2d666d62da6c80eb4cbc0bd2e547e
|
[
"MIT"
] | null | null | null |
anand.py
|
kyclark/py-grepper
|
ca7a17b1ffc2d666d62da6c80eb4cbc0bd2e547e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
orderNumbers = open("orders.txt", "r") #Order numbers to match
#Network path to a directory of files that has full details of the order
directoryEntries = os.scandir("")
outputFile = open("matchedData.dat", "w")
for entry in directoryEntries:
print("Currently parsing file ", entry.path)
fullOrderData = open(entry.path, "r")
#loop through each order from the ordernumber file
for orderNo in OrderNumbers:
for row in fullOrderData:
if orderNo.strip() in row:
outputFile.write(row)
#go back to start of orderdetails data to match on next order number
fullOrderData.seek(0)
#go back to order numbers again to match on the next order details file
orderNumbers.seek(0)
fullOrderData.close()
OrderNumbers.close()
outputFile.close()
print("done")
| 31.814815
| 76
| 0.696158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 377
| 0.438882
|
b7c97b1397f5b96121b2b0909bc775d38cbcd523
| 2,968
|
py
|
Python
|
tests/test_manager.py
|
Vizzuality/cog_worker
|
ae12d2fc42945fedfea4a22394247db9a73d867e
|
[
"MIT"
] | 24
|
2021-08-23T14:51:02.000Z
|
2021-12-20T09:45:10.000Z
|
tests/test_manager.py
|
Vizzuality/cog_worker
|
ae12d2fc42945fedfea4a22394247db9a73d867e
|
[
"MIT"
] | null | null | null |
tests/test_manager.py
|
Vizzuality/cog_worker
|
ae12d2fc42945fedfea4a22394247db9a73d867e
|
[
"MIT"
] | 1
|
2021-08-24T01:09:36.000Z
|
2021-08-24T01:09:36.000Z
|
import pytest
import rasterio as rio
from rasterio.io import DatasetWriter
from cog_worker import Manager
from rasterio import MemoryFile, crs
TEST_COG = "tests/roads_cog.tif"
@pytest.fixture
def molleweide_manager():
return Manager(
proj="+proj=moll",
scale=50000,
)
@pytest.fixture
def sample_function():
def myfunc(worker):
return worker.read(TEST_COG)
return myfunc
def test_preview(molleweide_manager, sample_function):
arr, bbox = molleweide_manager.preview(sample_function, max_size=123)
assert max(arr.shape) == 123, "Expected maximum array dimension to be 123px"
def test_tile(molleweide_manager, sample_function):
arr, bbox = molleweide_manager.tile(sample_function, x=1, y=2, z=3)
assert arr.shape == (1, 256, 256), "Expected 256x256 tile"
def test_chunk_execute(molleweide_manager, sample_function):
chunks = list(molleweide_manager.chunk_execute(sample_function, chunksize=123))
for arr, bbox in chunks:
assert max(arr.shape) <= 123, "Max chunk size should be 123px"
def test_chunk_params(molleweide_manager):
chunks = list(molleweide_manager.chunk_params(chunksize=123))
assert len(chunks) == 18, "Expected ~18 chunks for 123px tiles at 50km scale"
def test__open_writer(molleweide_manager):
with MemoryFile() as memfile:
with molleweide_manager._open_writer(memfile, 1, rio.ubyte) as writer:
assert isinstance(writer, DatasetWriter)
def test_chunk_save(molleweide_manager, sample_function):
full_arr = molleweide_manager.execute(sample_function)[0]
with MemoryFile() as memfile:
molleweide_manager.chunk_save(memfile, sample_function)
memfile.seek(0)
with rio.open(memfile) as src:
assert src.profile["crs"] == crs.CRS.from_string("+proj=moll")
assert src.profile["transform"][0] == 50000
arr = src.read()
assert arr.shape == full_arr.shape
assert (
abs(arr.sum() / full_arr.data.sum() - 1) < 0.002
), "Error should be less than 0.2%"
def test__write_chunk(molleweide_manager, sample_function):
with MemoryFile() as memfile:
arr, bbox = molleweide_manager.execute(sample_function)
print(arr.mask.sum())
with molleweide_manager._open_writer(memfile, 1, rio.ubyte) as writer:
molleweide_manager._write_chunk(writer, arr, bbox)
memfile.seek(0)
with rio.open(memfile) as src:
written = src.read(masked=True)
assert (written == arr).all()
assert (written.mask == arr.mask).all()
def test__chunk_bounds(molleweide_manager):
chunk = molleweide_manager._chunk_bounds(0, 0, 123)
assert chunk == (
-18040095.696147293,
2674978.852256801,
-11890095.696147293,
8824978.852256801,
)
def test__num_chunks(molleweide_manager):
assert molleweide_manager._num_chunks(123) == (6, 3)
| 31.913978
| 83
| 0.686995
| 0
| 0
| 0
| 0
| 233
| 0.078504
| 0
| 0
| 245
| 0.082547
|
b7c9f4fcfbbd13ff61698bd25e58c747a3f4a5c0
| 1,031
|
py
|
Python
|
CLIP/experiments/tagger/main_binary.py
|
ASAPP-H/clip2
|
e8ba2a3cf4be01ec26bde5107c5a2813bddf8a3b
|
[
"MIT"
] | null | null | null |
CLIP/experiments/tagger/main_binary.py
|
ASAPP-H/clip2
|
e8ba2a3cf4be01ec26bde5107c5a2813bddf8a3b
|
[
"MIT"
] | 3
|
2021-09-08T02:07:49.000Z
|
2022-03-12T00:33:51.000Z
|
CLIP/experiments/tagger/main_binary.py
|
ASAPP-H/clip2
|
e8ba2a3cf4be01ec26bde5107c5a2813bddf8a3b
|
[
"MIT"
] | null | null | null |
from train import train_model
from utils import *
import os
import sys
pwd = os.environ.get('CLIP_DIR')
DATA_DIR = "%s/data/processed/" % pwd
exp_name = "non_multilabel"
run_name = "sentence_structurel_with_crf"
train_file_name = "MIMIC_train_binary.csv"
dev_file_name = "MIMIC_val_binary.csv"
test_file_name = "test_binary.csv"
exp_name = "outputs_binary"
train = read_sentence_structure(os.path.join(DATA_DIR, train_file_name))
dev = read_sentence_structure(os.path.join(DATA_DIR, dev_file_name))
test = read_sentence_structure(os.path.join(DATA_DIR, test_file_name))
run_name = "binary"
def main(args):
train_model(
train,
dev,
test,
args[0],
exp_name,
use_crf=True,
learning_rate=float(args[1]),
epochs=int(args[2]),
writer_preds_freq=10,
embeddings_type="BioWord",
list_of_possible_tags=["followup"],
embeddings_path="%s/CLIP/experiments/tagger/embeddings" % pwd,
)
if __name__ == "__main__":
main(sys.argv[1:])
| 25.775
| 72
| 0.696411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 231
| 0.224054
|
b7caeb322abf8aa00666ef3387b5272abace4020
| 528
|
py
|
Python
|
persons/urls.py
|
nhieckqo/lei
|
f461d8dcbc8f9e037c661abb18b226aa6fa7acae
|
[
"MIT"
] | null | null | null |
persons/urls.py
|
nhieckqo/lei
|
f461d8dcbc8f9e037c661abb18b226aa6fa7acae
|
[
"MIT"
] | null | null | null |
persons/urls.py
|
nhieckqo/lei
|
f461d8dcbc8f9e037c661abb18b226aa6fa7acae
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'persons'
urlpatterns = [
path('', views.PersonsTableView.as_view(),name='persons_list'),
path('persons_details/<int:pk>',views.PersonsUpdateView.as_view(),name='persons_details_edit'),
path('persons_details/create',views.PersonsCreateView.as_view(),name='persons_details_add'),
path('persons_details/<int:pk>/delete',views.PersonsDeleteView.as_view(),name="persons_details_delete"),
path('persons_details/sort',views.event_gate, name='sort'),
]
| 40.615385
| 108
| 0.753788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 203
| 0.38447
|
b7cb10c335526f698fe7f642c39ab4db21115697
| 246
|
py
|
Python
|
logxs/__version__.py
|
minlaxz/logxs
|
e225e7a3c69b01595e1f2c11552b70e4b1540d47
|
[
"MIT"
] | null | null | null |
logxs/__version__.py
|
minlaxz/logxs
|
e225e7a3c69b01595e1f2c11552b70e4b1540d47
|
[
"MIT"
] | null | null | null |
logxs/__version__.py
|
minlaxz/logxs
|
e225e7a3c69b01595e1f2c11552b70e4b1540d47
|
[
"MIT"
] | null | null | null |
__title__ = 'logxs'
__description__ = 'Replacing with build-in `print` with nice formatting.'
__url__ = 'https://github.com/minlaxz/logxs'
__version__ = '0.3.2'
__author__ = 'Min Latt'
__author_email__ = 'minminlaxz@gmail.com'
__license__ = 'MIT'
| 35.142857
| 73
| 0.747967
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 140
| 0.569106
|
b7cb5d32a878f3d9855d96b75ff3e715c839115f
| 977
|
py
|
Python
|
src/PyMud/Systems/system.py
|
NichCritic/pymud
|
583ec16f5a75dc7b45146564b39851291dc07b6c
|
[
"MIT"
] | null | null | null |
src/PyMud/Systems/system.py
|
NichCritic/pymud
|
583ec16f5a75dc7b45146564b39851291dc07b6c
|
[
"MIT"
] | null | null | null |
src/PyMud/Systems/system.py
|
NichCritic/pymud
|
583ec16f5a75dc7b45146564b39851291dc07b6c
|
[
"MIT"
] | null | null | null |
import time
class System(object):
manditory = []
optional = []
handles = []
def __init__(self, node_factory):
self.node_factory = node_factory
def process(self):
for node in self.get_nodes():
# print(f"{self.__class__.__name__} system got message from
# {node.id}")
self.handle(node)
self.clean(node)
def handle(self, node):
pass
def clean(self, node):
[node.remove_component(c) for c in self.handles]
def get_nodes(self):
return self.node_factory.create_node_list(self.manditory,
self.optional)
class TimedSystem(System):
def is_timed_out(self, lt, ct, timeout):
if lt is None:
return False
return ct - lt > timeout
def process(self):
t = time.time()
for node in self.get_nodes():
self.handle(node, t)
self.clean(node)
| 23.261905
| 71
| 0.551689
| 959
| 0.981576
| 0
| 0
| 0
| 0
| 0
| 0
| 72
| 0.073695
|
b7cb98a29e28bbca96a3da9a3ddecb43eea2b232
| 2,918
|
py
|
Python
|
hytra/plugins/transition_feature_vector_construction/transition_feature_subtraction.py
|
m-novikov/hytra
|
0dc28deaa2571fa8bea63ca178f0e53cc1cd7508
|
[
"MIT"
] | null | null | null |
hytra/plugins/transition_feature_vector_construction/transition_feature_subtraction.py
|
m-novikov/hytra
|
0dc28deaa2571fa8bea63ca178f0e53cc1cd7508
|
[
"MIT"
] | null | null | null |
hytra/plugins/transition_feature_vector_construction/transition_feature_subtraction.py
|
m-novikov/hytra
|
0dc28deaa2571fa8bea63ca178f0e53cc1cd7508
|
[
"MIT"
] | null | null | null |
from hytra.pluginsystem import transition_feature_vector_construction_plugin
import numpy as np
from compiler.ast import flatten
class TransitionFeaturesSubtraction(
transition_feature_vector_construction_plugin.TransitionFeatureVectorConstructionPlugin
):
"""
Computes the subtraction of features in the feature vector
"""
def constructFeatureVector(
self, featureDictObjectA, featureDictObjectB, selectedFeatures
):
assert "Global<Maximum >" not in selectedFeatures
assert "Global<Minimum >" not in selectedFeatures
assert "Histrogram" not in selectedFeatures
assert "Polygon" not in selectedFeatures
features = []
for key in selectedFeatures:
if key == "RegionCenter":
continue
else:
if (
not isinstance(featureDictObjectA[key], np.ndarray)
or featureDictObjectA[key].size == 1
):
features.append(
float(featureDictObjectA[key]) - float(featureDictObjectB[key])
)
else:
features.extend(
flatten(
(
featureDictObjectA[key].astype("float32")
- featureDictObjectB[key].astype("float32")
).tolist()
)
)
# there should be no nans or infs
assert np.all(np.isfinite(np.array(features)))
return features
def getFeatureNames(self, featureDictObjectA, featureDictObjectB, selectedFeatures):
assert "Global<Maximum >" not in selectedFeatures
assert "Global<Minimum >" not in selectedFeatures
assert "Histrogram" not in selectedFeatures
assert "Polygon" not in selectedFeatures
featuresNames = []
for key in selectedFeatures:
if key == "RegionCenter":
continue
else:
if (
not isinstance(featureDictObjectA[key], np.ndarray)
or featureDictObjectA[key].size == 1
):
featuresNames.append("A[{key}]-B[{key}]".format(key=key))
else:
featuresNames.extend(
[
"A[{key}][{i}]-B[{key}][{i}]".format(key=key, i=i)
for i in range(
len(
(
featureDictObjectA[key]
- featureDictObjectB[key]
).tolist()
)
)
]
)
return featuresNames
| 35.585366
| 91
| 0.48732
| 2,786
| 0.954764
| 0
| 0
| 0
| 0
| 0
| 0
| 315
| 0.107951
|
b7cbae55dbd90dfb87f2e9c515ec5098f54466ea
| 5,438
|
py
|
Python
|
sprites/player.py
|
hectorpadin1/FICGames
|
6d75c3ef74f0d6d2881021833fe06cd67e207ab1
|
[
"MIT"
] | null | null | null |
sprites/player.py
|
hectorpadin1/FICGames
|
6d75c3ef74f0d6d2881021833fe06cd67e207ab1
|
[
"MIT"
] | null | null | null |
sprites/player.py
|
hectorpadin1/FICGames
|
6d75c3ef74f0d6d2881021833fe06cd67e207ab1
|
[
"MIT"
] | 1
|
2022-03-29T15:38:18.000Z
|
2022-03-29T15:38:18.000Z
|
from matplotlib.style import available
import pygame as pg
from sprites.character import Character
from pygame.math import Vector2
from settings import *
from math import cos, pi
from control import Controler
from sprites.gun import MachineGun, Pistol, Rifle
from managers.resourcemanager import ResourceManager as GR
from utils.observable import Observable
class Player(Character, Observable):
def __init__(self, x, y, bullets, collide_groups, observers, level):
Character.__init__(self, None, GR.PLAYER, PLAYER_HIT_RECT, x, y, PLAYER_HEALTH, collide_groups, GR.HERO_POSITIONS, 5, [8, 8, 8, 8, 3])
Observable.__init__(self, observers)
self.last_shot = 0
pg.mouse.set_pos((x+10) * SPRITE_BOX, y * SPRITE_BOX)
self.mouse = pg.mouse.get_pos()
self.controler = Controler()
self.guns = [Pistol(bullets), Rifle(bullets), MachineGun(bullets)][0:level]
self.gunSelector = 0
self.shooting = False
self.reloading = False
self.last_change = pg.time.get_ticks()
#Notificamos a observadores inicialización
self.notify("health", self.health)
if self.guns != []:
self.notify("gun", self.gunSelector)
self.notify("ammo", self.guns[self.gunSelector].current_mag)
self.notify("bullets", self.guns[self.gunSelector].bullets)
# Acciones según la configuración del controlador
def __callControler(self):
if self.health <= 0 :
if (self.numImagenPostura < 2) and (pg.time.get_ticks() - self.last_change > ANIM_DELAY*4):
self.numImagenPostura += 1
return
# Dinámicas del jugador
self.rot_speed = 0
self.vel = Vector2(0, 0)
speed = self.vel.copy()
# Movimiento de ejes
if self.controler.left():
self.vel.x = -PLAYER_SPEED
if self.controler.right():
self.vel.x = PLAYER_SPEED
if self.controler.up():
self.vel.y = -PLAYER_SPEED
if self.controler.down():
self.vel.y = PLAYER_SPEED
# Movimientos opuestos los cancelamos
if self.controler.left() and self.controler.right():
self.vel.x = 0
if self.controler.up() and self.controler.down():
self.vel.y = 0
# Movimientos diagonales
if self.vel.x!=0 and self.vel.y!=0:
self.vel *= cos(pi/4)
# Animaciones suaves
if pg.time.get_ticks() - self.last_change > ANIM_DELAY:
if speed != self.vel:
self.numImagenPostura = (self.numImagenPostura + 1)%8
else:
self.numImagenPostura = 0
self.last_change = pg.time.get_ticks()
# Comprobamos is hay que cambiar de pistola (y si podemos)
pistol = self.controler.switchPistol()
if self.guns != []:
if (pistol > 0) and (pistol <= len(self.guns)):
self.guns[self.gunSelector].cancel_reload()
self.gunSelector = pistol -1
self.notify("gun",pistol -1)
self.notify("ammo", self.guns[self.gunSelector].current_mag)
self.notify("bullets",self.guns[self.gunSelector].bullets)
else:
self.reloading = True
return
# Recargar
if (self.controler.reload()) and (self.guns[self.gunSelector].bullets > 0):
self.guns[self.gunSelector].do_reload()
self.reloading = True
self.notify("ammo",-1)
# Disparar
if self.controler.isShooting():
self.guns[self.gunSelector].shoot(self.pos, self.rot)
self.notify("ammo",self.guns[self.gunSelector].current_mag)
self.notify("bullets",self.guns[self.gunSelector].bullets)
def update_health(self, health):
if health <= 0:
self.health = 0
self.numPostura = 4
self.numImagenPostura = 0
else:
self.health = health
self.notify("health", self.health)
# Actualizamos la munición del jugador
def update_ammo(self):
for gun in self.guns:
gun.bullets = gun.MAG_SIZE
self.notify("bullets", self.guns[self.gunSelector].bullets)
def update(self, camera_pos, dt):
self.__callControler()
# Miramos a donde nos tenemos que mover y a donde mirar
direction = pg.mouse.get_pos() - Vector2(camera_pos) - self.pos
self.rot = direction.angle_to(Vector2(1, 0))
self.pos += self.vel * (dt/1000)
if self.guns != []:
self.guns[self.gunSelector].update()
if self.health <= 0:
super().update()
return
# Según si estamos recargando, o con un arma, seleccionamos una fila de la hoja u otra
if self.reloading:
self.numPostura = 3
if self.guns != [] and self.guns[self.gunSelector].reload == False:
self.notify("ammo",self.guns[self.gunSelector].current_mag)
self.notify("bullets",self.guns[self.gunSelector].bullets)
self.reloading = False
elif self.gunSelector == 0:
self.numPostura = 1
elif self.gunSelector == 1:
self.numPostura = 0
elif self.gunSelector == 2:
self.numPostura = 2
super().update()
| 36.743243
| 142
| 0.590291
| 5,075
| 0.932219
| 0
| 0
| 0
| 0
| 0
| 0
| 594
| 0.109111
|
b7cbe1a4f3d3609804f5ba47a2634ce6c4505d36
| 716
|
py
|
Python
|
yocto/poky/bitbake/lib/bb/ui/crumbs/__init__.py
|
jxtxinbing/ops-build
|
9008de2d8e100f3f868c66765742bca9fa98f3f9
|
[
"Apache-2.0"
] | 16
|
2017-01-17T15:20:43.000Z
|
2021-03-19T05:45:14.000Z
|
yocto/poky/bitbake/lib/bb/ui/crumbs/__init__.py
|
jxtxinbing/ops-build
|
9008de2d8e100f3f868c66765742bca9fa98f3f9
|
[
"Apache-2.0"
] | 415
|
2016-12-20T17:20:45.000Z
|
2018-09-23T07:59:23.000Z
|
yocto/poky/bitbake/lib/bb/ui/crumbs/__init__.py
|
jxtxinbing/ops-build
|
9008de2d8e100f3f868c66765742bca9fa98f3f9
|
[
"Apache-2.0"
] | 10
|
2016-12-20T13:24:50.000Z
|
2021-03-19T05:46:43.000Z
|
#
# Gtk+ UI pieces for BitBake
#
# Copyright (C) 2006-2007 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
| 39.777778
| 73
| 0.765363
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 699
| 0.976257
|
b7cc1da3745ec1958d532f60dd1185d8b2057b84
| 10,198
|
py
|
Python
|
mytardisbf/migrations/0001_initial_data.py
|
keithschulze/mytardisbf
|
cc15fc9af89cf96c4d860c41fe5b0f366d4ee0d6
|
[
"MIT"
] | 2
|
2020-07-09T01:21:00.000Z
|
2022-02-06T17:33:57.000Z
|
mytardisbf/migrations/0001_initial_data.py
|
keithschulze/mytardisbf
|
cc15fc9af89cf96c4d860c41fe5b0f366d4ee0d6
|
[
"MIT"
] | 14
|
2015-07-21T05:12:58.000Z
|
2017-11-16T10:46:30.000Z
|
mytardisbf/migrations/0001_initial_data.py
|
keithschulze/mytardisbf
|
cc15fc9af89cf96c4d860c41fe5b0f366d4ee0d6
|
[
"MIT"
] | 4
|
2015-08-04T10:57:29.000Z
|
2017-11-28T10:50:33.000Z
|
# -*- coding: utf-8 -*-
from django.db import migrations
from tardis.tardis_portal.models import (
Schema,
ParameterName,
DatafileParameter,
DatafileParameterSet
)
from mytardisbf.apps import (
OMESCHEMA,
BFSCHEMA
)
def forward_func(apps, schema_editor):
"""Create mytardisbf schemas and parameternames"""
db_alias = schema_editor.connection.alias
ome_schema, _ = Schema.objects\
.using(db_alias)\
.update_or_create(
name="OME Metadata",
namespace="http://tardis.edu.au/schemas/bioformats/1",
subtype=None,
hidden=True,
type=3,
immutable=True,
defaults={
'namespace': OMESCHEMA
}
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="ome",
data_type=5,
is_searchable=False,
choices="",
comparison_type=1,
full_name="OME Metadata",
units="xml",
order=1,
immutable=True,
schema=ome_schema,
defaults={
"full_name": "OMEXML Metadata"
}
)
series_schema, _ = Schema.objects\
.using(db_alias)\
.update_or_create(
name="Series Metadata",
namespace=BFSCHEMA,
subtype="",
hidden=False,
type=3,
immutable=True
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="id",
data_type=2,
is_searchable=True,
choices="",
comparison_type=8,
full_name="ID",
units="",
order=9999,
immutable=True,
schema=series_schema,
defaults={
"is_searchable": False
}
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="name",
data_type=2,
is_searchable=True,
choices="",
comparison_type=8,
full_name="Name",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="type",
data_type=2,
is_searchable=True,
choices="",
comparison_type=8,
full_name="Pixel Type",
units="",
order=9999,
immutable=True,
schema=series_schema,
defaults={
"name": "pixel_type"
}
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="dimensionorder",
data_type=2,
is_searchable=True,
choices="",
comparison_type=8,
full_name="Dimension Order",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizex",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeX",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizey",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeY",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizeZ",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeZ",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizec",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeC",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizet",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeT",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="physicalsizex",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Voxel Size X",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="physicalsizey",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Voxel Size Y",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="physicalsizez",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Voxel Size Z",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="timeincrement",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Time Increment",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="excitationwavelength",
data_type=2,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Excitation Wavelength",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="samplesperpixel",
data_type=2,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Samples per Pixel",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="emissionwavelength",
data_type=2,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Emission Wavelength",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="pinholesize",
data_type=2,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Pinhole Size",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="previewImage",
data_type=5,
is_searchable=False,
choices="",
comparison_type=1,
full_name="Preview",
units="image",
order=1,
immutable=True,
schema=series_schema,
defaults={
"name": "preview_image"
}
)
def reverse_func(apps, schema_editor):
db_alias = schema_editor.connection.alias
ome_schema = Schema.objects\
.using(db_alias)\
.get(namespace=OMESCHEMA)
ome_pn = ParameterName.objects\
.using(db_alias)\
.get(schema=ome_schema)
DatafileParameter.objects\
.using(db_alias)\
.filter(name=ome_pn)\
.delete()
DatafileParameterSet.objects\
.using(db_alias)\
.filter(schema=ome_schema)\
.delete()
ome_pn.delete()
ome_schema.delete()
bf_schema = Schema.objects\
.using(db_alias)\
.get(namespace=BFSCHEMA)
bf_param_names = [
"id", "name", "pixel_type", "dimensionorder", "sizex", "sizey", "sizez",
"sizec", "sizet", "physicalsizex", "physicalsizey", "physicalsizez",
"timeincrement", "excitationwavelength", "samplesperpixel",
"emissionwavelength", "pinholesize", "preview_image"
]
def delete_param_names(param_name_str):
pn = ParameterName.objects\
.using(db_alias)\
.get(schema=bf_schema, name=param_name_str)
DatafileParameter.objects\
.using(db_alias)\
.filter(name=pn)\
.delete()
pn.delete()
[delete_param_names(pn) for pn in bf_param_names]
DatafileParameterSet.objects\
.using(db_alias)\
.filter(schema=bf_schema)\
.delete()
bf_schema.delete()
class Migration(migrations.Migration):
"""MyTardis Schema and ParameterName migrations"""
dependencies = [
("tardis_portal", "0001_initial"),
]
operations = [
migrations.RunPython(forward_func, reverse_func),
]
| 24.995098
| 80
| 0.499314
| 247
| 0.02422
| 0
| 0
| 0
| 0
| 0
| 0
| 1,075
| 0.105413
|
b7cc56e3520e5aa20afd04452b3d297df2206e1a
| 1,473
|
py
|
Python
|
ipmanagement/models.py
|
smilelhong/ip_manage
|
7581c596a84e943dc5dea4122eca3de14263992b
|
[
"Apache-2.0"
] | null | null | null |
ipmanagement/models.py
|
smilelhong/ip_manage
|
7581c596a84e943dc5dea4122eca3de14263992b
|
[
"Apache-2.0"
] | null | null | null |
ipmanagement/models.py
|
smilelhong/ip_manage
|
7581c596a84e943dc5dea4122eca3de14263992b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import models
from datetime import datetime
# Create your models here.
class IP_Address(models.Model):
ip = models.GenericIPAddressField(verbose_name=u"IP地址")
gateway = models.GenericIPAddressField(verbose_name=u"网关")
network = models.GenericIPAddressField(verbose_name=u"网络号")
netmask = models.CharField(max_length=20,default='',null=True,blank='',verbose_name=u"掩码")
system = models.CharField(max_length=64,default='',null=True,blank='',verbose_name=u"应用系统")
apply_person = models.CharField(max_length=64,default='',null=True,blank='',verbose_name=u"申请人")
state = models.CharField(max_length=20,choices=((u"已分配",u"已分配"),(u"未分配",u"未分配")),verbose_name=u"状态")
apply_time = models.DateField(default=datetime.now(),verbose_name=u"申请时间")
class IP_Range(models.Model):
start_ip = models.GenericIPAddressField(verbose_name=u"开始IP")
end_ip = models.GenericIPAddressField(verbose_name=u"结束IP")
network = models.GenericIPAddressField(verbose_name=u"网络号")
netmask = models.CharField(max_length=20,default='',verbose_name=u"掩码")
use_ip = models.CharField(max_length=20,default='',null=True,blank='',verbose_name=u"已使用IP数")
left_ip = models.CharField(max_length=20,default='',null=True,blank='',verbose_name=u"未使用IP数")
create_time = models.DateField(default=datetime.now(),verbose_name=u"创建时间")
des = models.CharField(max_length=20,default='',null=True,blank='',verbose_name=u"描述")
| 64.043478
| 104
| 0.745418
| 1,475
| 0.929427
| 0
| 0
| 0
| 0
| 0
| 0
| 316
| 0.199118
|
b7d02035de2ed671a7db2b55074f9e4dd487d817
| 9,616
|
py
|
Python
|
tests/scripts/thread-cert/border_router/MATN_05_ReregistrationToSameMulticastGroup.py
|
kkasperczyk-no/sdk-openthread
|
385e19da1ae15f27872c2543b97276a42f102ead
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/thread-cert/border_router/MATN_05_ReregistrationToSameMulticastGroup.py
|
kkasperczyk-no/sdk-openthread
|
385e19da1ae15f27872c2543b97276a42f102ead
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/thread-cert/border_router/MATN_05_ReregistrationToSameMulticastGroup.py
|
kkasperczyk-no/sdk-openthread
|
385e19da1ae15f27872c2543b97276a42f102ead
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2021, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import unittest
import pktverify
from pktverify import packet_verifier, packet_filter, consts
from pktverify.consts import MA1, PBBR_ALOC
import config
import thread_cert
# Test description:
# The purpose of this test case is to verify that a Primary BBR (DUT) can manage
# a re-registration of a device on its network to remain receiving multicasts.
# The test also verifies the usage of UDP multicast packets across backbone and
# internal Thread network.
#
# Topology:
# ----------------(eth)------------------
# | | |
# BR_1 (Leader) ---- BR_2 HOST
# | |
# | |
# Router_1 -----------+
#
BR_1 = 1
BR_2 = 2
ROUTER_1 = 3
HOST = 4
REG_DELAY = 10
UDP_HEADER_LENGTH = 8
class MATN_05_ReregistrationToSameMulticastGroup(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
BR_1: {
'name': 'BR_1',
'is_otbr': True,
'allowlist': [BR_2, ROUTER_1],
'version': '1.2',
'router_selection_jitter': 2,
},
BR_2: {
'name': 'BR_2',
'allowlist': [BR_1, ROUTER_1],
'is_otbr': True,
'version': '1.2',
'router_selection_jitter': 2,
},
ROUTER_1: {
'name': 'Router_1',
'allowlist': [BR_1, BR_2],
'version': '1.2',
'router_selection_jitter': 2,
},
HOST: {
'name': 'Host',
'is_host': True
},
}
def test(self):
br1 = self.nodes[BR_1]
br2 = self.nodes[BR_2]
router1 = self.nodes[ROUTER_1]
host = self.nodes[HOST]
br1.set_backbone_router(reg_delay=REG_DELAY, mlr_timeout=consts.MLR_TIMEOUT_MIN)
br1.start()
self.simulator.go(10)
self.assertEqual('leader', br1.get_state())
self.assertTrue(br1.is_primary_backbone_router)
router1.start()
self.simulator.go(10)
self.assertEqual('router', router1.get_state())
br2.start()
self.simulator.go(10)
self.assertEqual('router', br2.get_state())
self.assertFalse(br2.is_primary_backbone_router)
host.start(start_radvd=False)
self.simulator.go(10)
# Router_1 registers for multicast address, MA1, at BR_1.
router1.add_ipmaddr(MA1)
self.simulator.go(5)
# 1. Host sends a ping packet to the multicast address, MA1.
self.assertTrue(
host.ping(MA1, backbone=True, ttl=10, interface=host.get_ip6_address(config.ADDRESS_TYPE.ONLINK_ULA)[0]))
# Ensure Router_1 renews its multicast registration
self.simulator.go(consts.MLR_TIMEOUT_MIN - 10)
# 4. Within MLR_TIMEOUT_MIN seconds, Host sends a ping packet to the
# multicast address, MA1. The destination port 5683 is used for the UDP
# Multicast packet transmission.
host.udp_send_host(data='PING', ipaddr=MA1, port=5683)
self.simulator.go(5)
# 6a. By internal means, Router_1 stops listening to the multicast
# address, MA1.
router1.del_ipmaddr(MA1)
# 7. After (MLR_TIMEOUT_MIN+2) seconds, Host multicasts a ping packet to
# multicast address, MA1, on the backbone link.
self.simulator.go(consts.MLR_TIMEOUT_MIN + 2)
self.assertFalse(
host.ping(MA1, backbone=True, ttl=10, interface=host.get_ip6_address(config.ADDRESS_TYPE.ONLINK_ULA)[0]))
self.collect_ipaddrs()
self.collect_rloc16s()
self.collect_rlocs()
self.collect_leader_aloc(BR_1)
self.collect_extra_vars()
def verify(self, pv: pktverify.packet_verifier.PacketVerifier):
pkts = pv.pkts
vars = pv.vars
pv.summary.show()
logging.info(f'vars = {vars}')
# Ensure the topology is formed correctly
pv.verify_attached('Router_1', 'BR_1')
pv.verify_attached('BR_2')
# Initial registration
# Router_1 registers for multicast address, MA1, at BR_1.
# Router_1 unicasts an MLR.req CoAP request to BR_1 as
# "coap://[<BR_1 RLOC or PBBR ALOC>]:MM/n/mr".
# The payload contains "IPv6Address TLV: MA1".
initial_registration_pkt = pkts.filter_wpan_src64(vars['Router_1']) \
.filter_ipv6_2dsts(vars['BR_1_RLOC'], PBBR_ALOC) \
.filter_coap_request('/n/mr') \
.filter(lambda p: p.thread_meshcop.tlv.ipv6_addr == [MA1]) \
.must_next()
# 1. Host sends a ping packet to the multicast address, MA1.
_pkt = pkts.filter_eth_src(vars['Host_ETH']) \
.filter_ipv6_dst(MA1) \
.filter_ping_request() \
.must_next()
# 2. BR_1 forwards the ping packet with multicast address, MA1, to its
# Thread Network encapsulated in an MPL packet.
pkts.filter_wpan_src64(vars['BR_1']) \
.filter_AMPLFMA(mpl_seed_id=vars['BR_1_RLOC']) \
.filter_ping_request(identifier=_pkt.icmpv6.echo.identifier) \
.must_next()
# 3. Router_1 receives the MPL packet containing an encapsulated ping
# packet to MA1, sent by Host, and unicasts a ping response packet back
# to Host.
pkts.filter_wpan_src64(vars['Router_1']) \
.filter_ipv6_dst(_pkt.ipv6.src) \
.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier) \
.must_next()
# 3a. Within MLR_TIMEOUT_MIN seconds of initial registration, Router_1
# re-registers for multicast address, MA1, at BR_1.
# Router_1 unicasts an MLR.req CoAP request to BR_1 as
# "coap://[<BR_1 RLOC or PBBR ALOC>]:MM/n/mr".
# The payload contains "IPv6Address TLV: MA1".
pkts.copy() \
.filter_wpan_src64(vars['Router_1']) \
.filter_ipv6_2dsts(vars['BR_1_RLOC'], PBBR_ALOC) \
.filter_coap_request('/n/mr') \
.filter(lambda p: p.thread_meshcop.tlv.ipv6_addr == [MA1] and
p.sniff_timestamp <= initial_registration_pkt.sniff_timestamp + consts.MLR_TIMEOUT_MIN) \
.must_next()
# 4. Within MLR_TIMEOUT_MIN seconds, Host sends a ping packet to the
# multicast address, MA1. The destination port 5683 is used for the UDP
# Multicast packet transmission.
_pkt = pkts.filter_eth_src(vars['Host_ETH']) \
.filter_ipv6_dst(MA1) \
.filter(lambda p: p.udp.length == UDP_HEADER_LENGTH + len('PING')
and p.udp.dstport == 5683) \
.must_next()
# 5. BR_1 forwards the UDP ping packet with multicast address, MA1, to
# its Thread Network encapsulated in an MPL packet.
pkts.filter_wpan_src64(vars['BR_1']) \
.filter_AMPLFMA(mpl_seed_id=vars['BR_1_RLOC']) \
.filter(lambda p: p.udp.length == _pkt.udp.length) \
.must_next()
# 6. Router_1 receives the ping packet.
# Use the port 5683 (CoAP port) to verify that the
# UDP Multicast packet is received.
pkts.filter_wpan_src64(vars['Router_1']) \
.filter(
lambda p: p.udp.length == _pkt.udp.length and p.udp.dstport == 5683) \
.must_next()
# 7. After (MLR_TIMEOUT_MIN+2) seconds, Host multicasts a ping packet to
# multicast address, MA1, on the backbone link.
_pkt = pkts.filter_eth_src(vars['Host_ETH']) \
.filter_ipv6_dst(MA1) \
.filter_ping_request() \
.must_next()
# 8. BR_1 does not forward the ping packet with multicast address, MA1,
# to its Thread Network.
pkts.filter_wpan_src64(vars['BR_1']) \
.filter_AMPLFMA(mpl_seed_id=vars['BR_1_RLOC']) \
.filter_ping_request(identifier=_pkt.icmpv6.echo.identifier) \
.must_not_next()
if __name__ == '__main__':
unittest.main()
| 39.089431
| 119
| 0.629992
| 7,165
| 0.745112
| 0
| 0
| 0
| 0
| 0
| 0
| 4,588
| 0.477121
|
b7d0fb3e2eab434c02f0ab81e51febbe5297c8ae
| 3,457
|
py
|
Python
|
senseye_cameras/input/camera_pylon.py
|
senseye-inc/senseye-cameras
|
9d9cdb95e64aaa8d08aa56bd9a79641263e65940
|
[
"BSD-3-Clause"
] | 5
|
2020-03-20T17:07:35.000Z
|
2022-01-25T23:48:52.000Z
|
senseye_cameras/input/camera_pylon.py
|
senseye-inc/senseye-cameras
|
9d9cdb95e64aaa8d08aa56bd9a79641263e65940
|
[
"BSD-3-Clause"
] | 5
|
2020-03-05T20:55:06.000Z
|
2022-03-24T22:41:56.000Z
|
senseye_cameras/input/camera_pylon.py
|
senseye-inc/senseye-cameras
|
9d9cdb95e64aaa8d08aa56bd9a79641263e65940
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import logging
try:
from pypylon import pylon
except:
pylon = None
from . input import Input
log = logging.getLogger(__name__)
# writes the framenumber to the 8-11 bytes of the image as a big-endian set of octets
def encode_framenumber(np_image, n):
for i in range(4):
np_image[0][i+7] = n & 0xFF
n>>=8
# converts time from a float in seconds to an int64 in microseconds
# writes the time to the first 7 bytes of the image as a big-endian set of octets
def encode_timestamp(np_image, timestamp):
t = int(timestamp*1e6)
for i in range(7):
np_image[0][i] = t & 0xFF
t>>=8
class CameraPylon(Input):
'''
Camera that interfaces with pylon/basler cameras.
Args:
id (int): Id of the pylon camera.
config (dict): Configuration dictionary. Accepted keywords:
pfs (str): path to a pfs file.
encode_metadata (bool): whether to bake in timestamps/frame number into the frame.
'''
def __init__(self, id=0, config={}):
if pylon is None:
raise ImportError('Pylon failed to import. Pylon camera initialization failed.')
defaults = {
'pfs': None,
'encode_metadata': False,
'format': 'rawvideo',
}
Input.__init__(self, id=id, config=config, defaults=defaults)
self.read_count = 0
def configure(self):
'''
Pylon camera configuration. Requires the pylon camera to have been opened already.
The order of these statements is important.
Populates self.config with set values.
Logs camera start.
'''
if self.config.get('pfs', None):
pylon.FeaturePersistence.Load(self.config.get('pfs'), self.input.GetNodeMap())
self.config['pixel_format'] = self.input.PixelFormat.Value
self.config['gain'] = self.input.Gain.Value
self.config['exposure_time'] = self.input.ExposureTime.Value
self.config['res'] = (self.input.Width.Value, self.input.Height.Value)
self.config['width'] = self.input.Width.Value
self.config['height'] = self.input.Height.Value
self.config['fps'] = self.input.ResultingFrameRate.GetValue()
def open(self):
self.read_count = 0
devices = pylon.TlFactory.GetInstance().EnumerateDevices()
self.input = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateDevice(devices[self.id]))
self.input.Open()
self.configure()
self.input.StopGrabbing()
self.input.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
def read(self):
frame = None
now = None
if self.input:
try:
ret = self.input.RetrieveResult(100, pylon.TimeoutHandling_ThrowException)
if ret.IsValid():
frame = ret.GetArray()
now = time.time()
if self.config.get('encode_metadata'):
encode_timestamp(frame,now)
encode_framenumber(frame,self.read_count)
self.read_count+=1
except TypeError as e:
log.error(f"{str(self)} read error: {e}")
raise
finally:
ret.Release()
return frame, now
def close(self):
self.read_count = 0
if self.input and self.input.IsOpen():
self.input.Close()
self.input = None
| 33.563107
| 102
| 0.60486
| 2,817
| 0.814868
| 0
| 0
| 0
| 0
| 0
| 0
| 1,007
| 0.291293
|
b7d28e8d5b3bd12fe72a9a971fff5626e0a64791
| 3,100
|
py
|
Python
|
vise/tests/util/phonopy/test_phonopy_input.py
|
kumagai-group/vise
|
8adfe61ad8f31767ec562f02f271e2495f357cd4
|
[
"MIT"
] | 16
|
2020-07-14T13:14:05.000Z
|
2022-03-04T13:39:30.000Z
|
vise/tests/util/phonopy/test_phonopy_input.py
|
kumagai-group/vise
|
8adfe61ad8f31767ec562f02f271e2495f357cd4
|
[
"MIT"
] | 10
|
2021-03-15T20:47:45.000Z
|
2021-08-19T00:47:12.000Z
|
vise/tests/util/phonopy/test_phonopy_input.py
|
kumagai-group/vise
|
8adfe61ad8f31767ec562f02f271e2495f357cd4
|
[
"MIT"
] | 6
|
2020-03-03T00:42:39.000Z
|
2022-02-22T02:34:47.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021. Distributed under the terms of the MIT License.
from phonopy.interface.calculator import read_crystal_structure
from phonopy.structure.atoms import PhonopyAtoms
from vise.util.phonopy.phonopy_input import structure_to_phonopy_atoms
import numpy as np
def assert_same_phonopy_atoms(actual: PhonopyAtoms,
expected: PhonopyAtoms):
assert (actual.get_cell() == expected.get_cell()).all()
assert (actual.get_scaled_positions()
== expected.get_scaled_positions()).all()
assert actual.symbols == expected.symbols
def test_phonopy_atoms_behavior(sc_structure, tmpdir):
print(tmpdir)
tmpdir.chdir()
# actual = structure_to_phonopy_atoms(sc_structure)
sc_structure.to(fmt="poscar", filename="POSCAR")
a, _ = read_crystal_structure("POSCAR")
b = PhonopyAtoms(atoms=a)
print(type(a.get_cell()))
print(a.get_atomic_numbers())
assert_same_phonopy_atoms(a, b)
def test_structure_to_phonopy_atoms(sc_structure):
actual = structure_to_phonopy_atoms(sc_structure)
expected = PhonopyAtoms(symbols=["H"],
cell=np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]),
scaled_positions=np.array([[0.0, 0.0, 0.0]]))
assert_same_phonopy_atoms(actual, expected)
#
# def test_make_phonopy_input(mc_structure, mc_structure_conv):
# actual = make_phonopy_input(unitcell=mc_structure,
# supercell_matrix=np.eye(3).tolist(),
# conventional_base=True)
# supercell_matrix = [[ 1., 1., 0.],
# [-1., 1., 0.],
# [ 0., 0., 1.]]
# supercell = mc_structure * supercell_matrix
# expected = PhonopyInput(unitcell=mc_structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
#
#
# def test_make_phonopy_input_default(mc_structure, mc_structure_conv):
# actual = make_phonopy_input(unitcell=mc_structure)
# supercell_matrix = [[ 2., 2., 0.],
# [-2., 2., 0.],
# [ 0., 0., 2.]]
# supercell = mc_structure * supercell_matrix
# expected = PhonopyInput(unitcell=mc_structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
#
#
# def test_make_phonopy_input_default_hexa():
# structure = Structure(Lattice.hexagonal(1.0, 2.0), species=["H"],
# coords=[[0.0]*3])
# actual = make_phonopy_input(unitcell=structure)
# supercell_matrix = [[2, -1, 0], [2, 1, 0], [0, 0, 2]]
# supercell = structure * supercell_matrix
# expected = PhonopyInput(unitcell=structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
| 41.333333
| 73
| 0.59
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,797
| 0.579677
|
b7d2c3d5b85f7571232ad665184ca7a2e111ef5a
| 1,419
|
py
|
Python
|
2020/day15.py
|
andypymont/adventofcode
|
912aa48fc5b31ec9202fb9654380991fc62afcd1
|
[
"MIT"
] | null | null | null |
2020/day15.py
|
andypymont/adventofcode
|
912aa48fc5b31ec9202fb9654380991fc62afcd1
|
[
"MIT"
] | null | null | null |
2020/day15.py
|
andypymont/adventofcode
|
912aa48fc5b31ec9202fb9654380991fc62afcd1
|
[
"MIT"
] | null | null | null |
"""
2020 Day 15
https://adventofcode.com/2020/day/15
"""
from collections import deque
from typing import Dict, Iterable, Optional
import aocd # type: ignore
class ElfMemoryGame:
def __init__(self, starting_numbers: Iterable[int]):
self.appearances: Dict[int, deque[int]] = {}
self.length = 0
for number in starting_numbers:
self.add(number)
def __len__(self) -> int:
return self.length
def next_number(self, previous: Optional[int] = None) -> int:
previous = previous or self.latest
appeared = self.appearances[previous]
return abs(appeared[1] - appeared[0])
def extend(self, length: int) -> None:
while self.length < length:
self.add(self.next_number())
def add(self, number: int) -> None:
if number in self.appearances:
self.appearances[number].append(self.length)
else:
self.appearances[number] = deque([self.length, self.length], maxlen=2)
self.length += 1
self.latest = number
def main() -> None:
"""
Calculate and output the solutions based on the real puzzle input.
"""
data = aocd.get_data(year=2020, day=15)
emg = ElfMemoryGame(map(int, data.split(",")))
emg.extend(2020)
print(f"Part 1: {emg.latest}")
emg.extend(30_000_000)
print(f"Part 2: {emg.latest}")
if __name__ == "__main__":
main()
| 25.8
| 82
| 0.621564
| 892
| 0.628612
| 0
| 0
| 0
| 0
| 0
| 0
| 211
| 0.148696
|
b7d37af2b6bf8f16d281543414e0b3b8888f7e5c
| 1,121
|
py
|
Python
|
src/spring-cloud/azext_spring_cloud/_validators_enterprise.py
|
SanyaKochhar/azure-cli-extensions
|
ff845c73e3110d9f4025c122c1938dd24a43cca0
|
[
"MIT"
] | 2
|
2021-03-23T02:34:41.000Z
|
2021-06-03T05:53:34.000Z
|
src/spring-cloud/azext_spring_cloud/_validators_enterprise.py
|
SanyaKochhar/azure-cli-extensions
|
ff845c73e3110d9f4025c122c1938dd24a43cca0
|
[
"MIT"
] | 21
|
2021-03-16T23:04:40.000Z
|
2022-03-24T01:45:54.000Z
|
src/spring-cloud/azext_spring_cloud/_validators_enterprise.py
|
SanyaKochhar/azure-cli-extensions
|
ff845c73e3110d9f4025c122c1938dd24a43cca0
|
[
"MIT"
] | 9
|
2021-03-11T02:59:39.000Z
|
2022-02-24T21:46:34.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods, unused-argument, redefined-builtin
from azure.cli.core.azclierror import ClientRequestError
from ._util_enterprise import is_enterprise_tier
def only_support_enterprise(cmd, namespace):
if namespace.resource_group and namespace.service and not is_enterprise_tier(cmd, namespace.resource_group, namespace.service):
raise ClientRequestError("'{}' only supports for Enterprise tier Spring instance.".format(namespace.command))
def not_support_enterprise(cmd, namespace):
if namespace.resource_group and namespace.service and is_enterprise_tier(cmd, namespace.resource_group, namespace.service):
raise ClientRequestError("'{}' doesn't support for Enterprise tier Spring instance.".format(namespace.command))
| 56.05
| 131
| 0.667261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 533
| 0.475468
|
b7d4dda1b3752a19244c734487e74c4425e170d8
| 8,796
|
py
|
Python
|
fluentql/function.py
|
RaduG/fluentql
|
653a77bb95b40724eb58744f5f8dbed9c88eaebd
|
[
"MIT"
] | 4
|
2020-04-15T10:50:03.000Z
|
2021-07-22T12:23:50.000Z
|
fluentql/function.py
|
RaduG/fluentql
|
653a77bb95b40724eb58744f5f8dbed9c88eaebd
|
[
"MIT"
] | 2
|
2020-05-24T08:54:56.000Z
|
2020-05-24T09:04:31.000Z
|
fluentql/function.py
|
RaduG/fluentql
|
653a77bb95b40724eb58744f5f8dbed9c88eaebd
|
[
"MIT"
] | null | null | null |
from typing import Any, TypeVar, Union
from types import MethodType, FunctionType
from .base_types import BooleanType, Constant, StringType, Collection, Referenceable
from .type_checking import TypeChecker
AnyArgs = TypeVar("AnyArgs")
NoArgs = TypeVar("NoArgs")
VarArgs = TypeVar("VarArgs")
T = TypeVar("T")
class WithOperatorSupport:
"""
Implements operator support.
"""
def __gt__(self, other):
return GreaterThan(self, other)
def __ge__(self, other):
return GreaterThanOrEqual(self, other)
def __lt__(self, other):
return LessThan(self, other)
def __le__(self, other):
return LessThanOrEqual(self, other)
def __eq__(self, other):
return Equals(self, other)
def __ne__(self, other):
return NotEqual(self, other)
def __add__(self, other):
return Add(self, other)
def __radd__(self, other):
return Add(other, self)
def __sub__(self, other):
return Subtract(self, other)
def __rsub__(self, other):
return Subtract(other, self)
def __mul__(self, other):
return Multiply(self, other)
def __rmul__(self, other):
return Multiply(other, self)
def __truediv__(self, other):
return Divide(self, other)
def __rtruediv__(self, other):
return Divide(other, self)
def __mod__(self, other):
return Modulo(self, other)
def __rmod__(self, other):
return Modulo(other, self)
def __and__(self, other):
return BitwiseAnd(self, other)
def __rand__(self, other):
return BitwiseAnd(other, self)
def __or__(self, other):
return BitwiseOr(self, other)
def __ror__(self, other):
return BitwiseOr(other, self)
def __xor__(self, other):
return BitwiseXor(self, other)
def __rxor__(self, other):
return BitwiseXor(other, self)
def __invert__(self):
return Not(self)
class F(Referenceable):
def __init_subclass__(cls, **kwargs):
"""
Use init_subclass to map the arguments / return value based on type
annotations, instead of going hard at it with a metaclass.
Args:
cls (type):
**kwargs (dict):
"""
cls._process_annotations()
@classmethod
def _process_annotations(cls):
"""
Set __args__ and __returns__ attributes to cls. Those will be set to
the user annotations, if any, or will default to:
AnyArgs - for __args__
Any - for __returns__
Args:
cls (object):
"""
try:
annotations = {**cls.__annotations__}
except AttributeError:
annotations = {}
# Check for "returns"
if "returns" in annotations:
cls.__returns__ = annotations.pop("returns")
elif hasattr(cls, "returns"):
cls.__returns__ = cls.returns
else:
cls.__returns__ = Any
if len(annotations) == 0:
cls.__args__ = AnyArgs
elif len(annotations) == 1 and list(annotations.values())[0] is NoArgs:
cls.__args__ = NoArgs
else:
cls.__args__ = tuple(annotations.values())
def __init__(self, *args):
self._validate_args(args)
self.__values__ = args
self.__returns__ = self._get_return_type()
def _get_return_type(self):
# If __returns__ is a function, the result of it called
# on args is the actual return type
if isinstance(self.__returns__, (FunctionType, MethodType)):
# Replace F arg types with their return values
return self.__returns__(
tuple(self.__type_checker__._matched_types),
self.__type_checker__._type_var_mapping,
)
return self.__returns__
@property
def values(self):
return self.__values__
@classmethod
def new(cls, name):
"""
Returns a new subclass of cls, with the given name.
Args:
name (str):
Returns:
type
"""
return type(name, (cls,), {})
def _validate_args(self, args):
if self.__args__ is AnyArgs:
if len(args) == 0:
raise TypeError(f"{type(self).__name__} takes at least one argument")
# All expected args are Any
arg_types = [Any] * len(args)
elif self.__args__ is NoArgs:
if len(args) > 0:
raise TypeError(f"{type(self).__name__} takes no arguments")
return
elif len(self.__args__) != len(args):
raise TypeError(
f"{type(self).__name__} takes {len(self.__args__)} arguments, {len(args)} given"
)
else:
# Replace F arg types with their return values
arg_types = [
arg.__returns__ if issubclass(type(arg), F) else type(arg)
for arg in args
]
self.__type_checker__ = TypeChecker(arg_types, self.__args__)
self.__type_checker__.validate()
class ArithmeticF(WithOperatorSupport, F):
@classmethod
def returns(cls, matched_types, type_var_mapping):
"""
If both args are Constant, the return value is Constant. Otherwise, the
return value is Collection.
Args:
args (list(type)): Argument types, in order
Returns:
type
"""
constant_type = type_var_mapping[Constant][1]
if any(Collection in t.__mro__ for t in matched_types if hasattr(t, "__mro__")):
return Collection[constant_type]
return constant_type
class BooleanF(F):
@classmethod
def returns(cls, matched_types, type_var_mapping):
"""
If both args are BooleanType, the return value is BooleanType.
Otherwise, the return value is collection.
Args:
args (list(type)): Argument types, in order
Returns:
type
"""
if any(Collection in t.__mro__ for t in matched_types if hasattr(t, "__mro__")):
return Collection[BooleanType]
return Collection[BooleanType]
class AggregateF(WithOperatorSupport, F):
@classmethod
def returns(cls, matched_types, type_var_mapping):
try:
return type_var_mapping[Constant][1]
except KeyError:
return Any
class ComparisonF(F):
pass
class OrderF(F):
pass
class Add(ArithmeticF):
a: Union[Constant, Collection[Constant]]
b: Union[Constant, Collection[Constant]]
class Subtract(ArithmeticF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class Multiply(ArithmeticF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class Divide(ArithmeticF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class Modulo(ArithmeticF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class BitwiseOr(BooleanF):
a: Union[Collection[BooleanType], BooleanType]
b: Union[Collection[BooleanType], BooleanType]
class BitwiseAnd(BooleanF):
a: Union[Collection[BooleanType], BooleanType]
b: Union[Collection[BooleanType], BooleanType]
class BitwiseXor(BooleanF):
a: Union[Collection[BooleanType], BooleanType]
b: Union[Collection[BooleanType], BooleanType]
class Equals(BooleanF):
a: Union[Constant, Collection[Constant]]
b: Union[Constant, Collection[Constant]]
class LessThan(BooleanF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class LessThanOrEqual(BooleanF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class GreaterThan(BooleanF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class GreaterThanOrEqual(BooleanF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class NotEqual(BooleanF):
a: Union[Constant, Collection[Any]]
b: Union[Constant, Collection[Any]]
class Not(BooleanF):
a: Union[BooleanType, Collection[BooleanType]]
class As(F):
a: T
b: str
returns: T
class TableStar(F):
a: Referenceable
returns: Any
class Star(F):
a: NoArgs
returns: Any
class Like(BooleanF):
a: Collection[StringType]
b: str
class In(BooleanF):
a: Collection[Any]
b: Any
class Max(AggregateF):
a: Collection[Constant]
class Min(AggregateF):
a: Collection[Constant]
class Sum(AggregateF):
a: Collection[Constant]
class Asc(OrderF):
a: Collection[Any]
returns: Collection[Any]
class Desc(OrderF):
a: Collection[Any]
returns: Collection[Any]
| 23.393617
| 96
| 0.624034
| 8,389
| 0.953729
| 0
| 0
| 2,432
| 0.276489
| 0
| 0
| 1,631
| 0.185425
|
b7d5141df884819f6f2e7164679f65c6fbc05ccf
| 5,741
|
py
|
Python
|
trainer.py
|
tkuboi/my-Punctuator
|
17c2c43f3397387b7c21a8ef25584c4fdab73f1b
|
[
"MIT"
] | 3
|
2018-11-29T02:12:12.000Z
|
2020-01-15T10:52:38.000Z
|
trainer.py
|
tkuboi/my-Punctuator
|
17c2c43f3397387b7c21a8ef25584c4fdab73f1b
|
[
"MIT"
] | 3
|
2020-01-15T10:52:25.000Z
|
2020-05-03T17:24:56.000Z
|
trainer.py
|
tkuboi/my-Punctuator
|
17c2c43f3397387b7c21a8ef25584c4fdab73f1b
|
[
"MIT"
] | 5
|
2018-11-19T13:37:31.000Z
|
2021-06-25T07:03:38.000Z
|
"""This script is for training and evaluating a model."""
import sys
import os
import traceback
import numpy as np
from functools import partial
from utils import *
from punctuator import Punctuator
from bidirectional_gru_with_gru import BidirectionalGruWithGru
from keras.callbacks import ModelCheckpoint
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D, Embedding, RepeatVector, Lambda, Dot, Multiply, Concatenate, Permute
from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape, Flatten, ThresholdedReLU
from keras.optimizers import Adam
EMBEDDING_FILE = 'data/glove.6B.50d.txt'
MODEL_FILE = 'data/model.json'
WEIGHTS_FILE = 'data/model.h5'
TEXT_FILE = 'data/utterances.txt'
BATCH = 128
EPOCH = 1000
DEV_SIZE = 100
def load_text_data(textfile):
"""Read a text file containing lines of text.
Args:
textfile: string representing a path name to a file
Returns:
list of words
"""
words = []
with open(textfile, 'r') as lines:
for line in lines:
words.extend(line.split())
return words
def main():
"""Train a model using lines of text contained in a file
and evaluates the model.
"""
#read golve vecs
#words, word_to_index, index_to_word, word_to_vec_map = read_glove_vecs(EMBEDDING_FILE)
#create word embedding matrix
#embedding_matrix = create_emb_matrix(word_to_index, word_to_vec_map)
embedding_matrix = None
#print('shape of embedding_matrix:', embedding_matrix.shape)
#load trainig text from a file
utterances = load_text_data(TEXT_FILE)
punctuator = Punctuator(None, None)
X, Y = punctuator.create_training_data(utterances[:3], False)
print(X.shape)
print(X.shape[1])
print(Y.shape)
#if a model already exists, load the model
if os.path.isfile(MODEL_FILE) and False:
punctuator.load_model(MODEL_FILE)
else:
model = BidirectionalGruWithGru.create_model(
input_shape=(X.shape[1], X.shape[2], ), embedding_matrix=None,
vocab_len=0, n_d1=128, n_d2=128, n_c=len(punctuator.labels))
print(model.summary())
punctuator.__model__ = model
#if the model has been already trained, use the pre-trained weights
if os.path.isfile(WEIGHTS_FILE):
punctuator.load_weights(WEIGHTS_FILE)
for i in range(100):
shuffle(utterances)
print(utterances[0])
#create an instance of Punctutor and create training data
X, Y = punctuator.create_training_data(utterances[:300000], False)
#shuffle the training data
shuffle(X,Y)
denom_Y = Y.swapaxes(0,1).sum((0,1))
print ('Summary of Y:', denom_Y)
print('shape of X:', X.shape)
print(X[0:10])
print('shape of Y:', Y.shape)
print(Y[0:10])
#define optimizer and compile the model
opt = Adam(lr=0.007, beta_1=0.9, beta_2=0.999, decay=0.01)
punctuator.compile(opt, loss='categorical_crossentropy', metrics=['accuracy'])
#split the training data into training set, test set, and dev set
t_size = int(X.shape[0] * 0.9)
train_X, train_Y = X[:t_size], Y[:t_size]
test_X, test_Y = X[t_size:-DEV_SIZE], Y[t_size:-DEV_SIZE]
dev_X, dev_Y = X[-DEV_SIZE:], Y[-DEV_SIZE:]
print (train_Y.swapaxes(0,1).sum((0,1)))
print (test_Y.swapaxes(0,1).sum((0,1)))
#train the model
punctuator.fit([train_X], train_Y, batch_size = BATCH,
epochs=EPOCH)
punctuator.save_model(MODEL_FILE)
punctuator.save_weights(WEIGHTS_FILE)
#evaluate the model on the dev set (or the test set)
for i,example in enumerate(dev_X):
prediction = punctuator.predict(example)
punctuator.check_result(prediction, dev_Y[i])
#manually evaluate the model on an example
examples = ["good morning chairman who I saw and members of the committee it's my pleasure to be here today I'm Elizabeth Ackles director of the office of rate payer advocates and I appreciate the chance to present on oris key activities from 2017 I have a short presentation and I'm going to move through it really quickly because you've had a long morning already and be happy to answer any questions that you have", "this was a measure that first was introduced back in 1979 known as the International bill of rights for women it is the first and only international instrument that comprehensively addresses women's rights within political cultural economic social and family life", "I'm Elizabeth Neumann from the San Francisco Department on the status of women Sita is not just about naming equal rights for women and girls it provides a framework to identify and address inequality", "we have monitored the demographics of commissioners and board members in San Francisco to assess the equality of political opportunities and after a decade of reports women are now half of appointees but white men are still over-represented and Asian and Latina men and women are underrepresented", "when the city and county faced a 300 million dollar budget deficit in 2003 a gender analysis of budget cuts by city departments identified the disproportionate effect on women and particularly women of color in the proposed layoffs and reduction of services"]
for example in examples:
words = example.split()
x = punctuator.create_live_data(words)
print x
for s in x:
print s
prediction = punctuator.predict(s)
result = punctuator.add_punctuation(prediction, words)
print(result)
if __name__ == "__main__":
main()
| 43.492424
| 1,454
| 0.708413
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,600
| 0.452883
|
b7d54fe8e9a77f05bf236b9a737834d1a8f3821a
| 5,719
|
py
|
Python
|
gqn_v2/gqn_predictor.py
|
goodmattg/tf-gqn
|
a2088761f11a9806500dcaf28edc28ecd7fc514e
|
[
"Apache-2.0"
] | null | null | null |
gqn_v2/gqn_predictor.py
|
goodmattg/tf-gqn
|
a2088761f11a9806500dcaf28edc28ecd7fc514e
|
[
"Apache-2.0"
] | null | null | null |
gqn_v2/gqn_predictor.py
|
goodmattg/tf-gqn
|
a2088761f11a9806500dcaf28edc28ecd7fc514e
|
[
"Apache-2.0"
] | null | null | null |
"""
Contains a canned predictor for a GQN.
"""
import os
import json
import numpy as np
import tensorflow as tf
from .gqn_graph import gqn_draw
from .gqn_params import create_gqn_config
def _normalize_pose(pose):
"""
Converts a camera pose into the GQN format.
Args:
pose: [x, y, z, yaw, pitch]; x, y, z in [-1, 1]; yaw, pitch in euler degree
Returns:
[x, y, z, cos(yaw), sin(yaw), cos(pitch), sin(pitch)]
"""
norm_pose = np.zeros((7, ))
norm_pose[0:3] = pose[0:3]
norm_pose[3] = np.cos(np.deg2rad(pose[3]))
norm_pose[4] = np.sin(np.deg2rad(pose[3]))
norm_pose[5] = np.cos(np.deg2rad(pose[4]))
norm_pose[6] = np.sin(np.deg2rad(pose[4]))
# print("Normalized pose: %s -> %s" % (pose, norm_pose)) # DEBUG
return norm_pose
class GqnViewPredictor(object):
"""
GQN-based view predictor.
"""
def __init__(self, model_dir):
"""
Instantiates a GqnViewPredictor from a saved checkpoint.
Args:
model_dir: Path to a GQN model. Must contain 'gqn_config.json', 'checkpoint'
and 'model.ckpt-nnnnnn'.
Returns:
GqnViewPredictor
"""
# load gqn_config.json
with open(os.path.join(model_dir, 'gqn_config.json'), 'r') as f:
gqn_config_dict = json.load(f)
self._cfg = create_gqn_config(gqn_config_dict)
self._ctx_size = self._cfg.CONTEXT_SIZE
self._dim_pose = self._cfg.POSE_CHANNELS
self._dim_img_h = self._cfg.IMG_HEIGHT
self._dim_img_w = self._cfg.IMG_WIDTH
self._dim_img_c = self._cfg.IMG_CHANNELS
# create input placeholders
self._ph_ctx_poses = tf.compat.v1.placeholder(
shape=[1, self._ctx_size, self._dim_pose],
dtype=tf.float32)
self._ph_ctx_frames = tf.compat.v1.placeholder(
shape=[1, self._ctx_size, self._dim_img_h, self._dim_img_w, self._dim_img_c],
dtype=tf.float32)
self._ph_query_pose = tf.compat.v1.placeholder(
shape=[1, self._dim_pose], dtype=tf.float32)
self._ph_tgt_frame = tf.compat.v1.placeholder( # just used for graph construction
shape=[1, self._dim_img_h, self._dim_img_w, self._dim_img_c],
dtype=tf.float32)
# re-create gqn graph
self._net, self._ep = gqn_draw(
query_pose=self._ph_query_pose,
context_frames=self._ph_ctx_frames,
context_poses=self._ph_ctx_poses,
target_frame=self._ph_tgt_frame,
model_params=self._cfg,
is_training=False)
print(">>> Instantiated GQN:") # DEBUG
for name, ep in self._ep.items():
print("\t%s\t%s" % (name, ep))
# create session
self._sess = tf.compat.v1.InteractiveSession()
# load snapshot
saver = tf.compat.v1.train.Saver()
ckpt_path = tf.train.latest_checkpoint(model_dir)
saver.restore(self._sess, save_path=ckpt_path)
print(">>> Restored parameters from: %s" % (ckpt_path, )) # DEBUG
# create data placeholders
self._context_frames = [] # list of RGB frames [H, W, C]
self._context_poses = [] # list of normalized poses [x, y, z, cos(yaw), sin(yaw), cos(pitch), sin(pitch)]
@property
def sess(self):
"""Expose the underlying tensorflow session."""
return self._sess
@property
def frame_resolution(self):
"""Returns the video resolution as (H, W, C)."""
return (self._dim_img_h, self._dim_img_w, self._dim_img_c)
def add_context_view(self, frame: np.ndarray, pose: np.ndarray):
"""
Adds a (frame, pose) tuple as context point for view interpolation.
Args:
frame: [H, W, C], in [0, 1]
pose: [x, y, z, yaw, pitch]; x, y, z in [-1, 1]; yaw, pitch in euler degree
"""
assert (frame >= 0.0).all() and (frame <= 1.0).all(), \
"The context frame is not normalized in [0.0, 1.0] (float)."
assert frame.shape == self.frame_resolution, \
"The context frame's shape %s does not fit the model's shape %s." % \
(frame.shape, self.frame_resolution)
assert pose.shape == (self._dim_pose, ) or pose.shape == (5, ), \
"The pose's shape %s does not match the specification (either %s or %s)." % \
(pose.shape, self._dim_pose, (5, ))
if pose.shape == (5, ): # assume un-normalized pose
pose = _normalize_pose(pose)
# add frame-pose pair to context
self._context_frames.append(frame)
self._context_poses.append(pose)
def clear_context(self):
"""Clears the current context."""
self._context_frames.clear()
self._context_poses.clear()
def render_query_view(self, pose: np.ndarray):
"""
Renders the scene from the given camera pose.
Args:
pose: [x, y, z, yaw, pitch]; x, y, z in [-1, 1]; yaw, pitch in euler degree
"""
assert len(self._context_frames) >= self._ctx_size \
and len(self._context_poses) >= self._ctx_size, \
"Not enough context points available. Required %d. Given: %d" % \
(self._ctx_size, np.min(len(self._context_frames), len(self._context_poses)))
assert pose.shape == (self._dim_pose, ) or pose.shape == (5, ), \
"The pose's shape %s does not match the specification (either %s or %s)." % \
(pose.shape, self._dim_pose, (5, ))
if pose.shape == (5, ): # assume un-normalized pose
pose = _normalize_pose(pose)
ctx_frames = np.expand_dims(
np.stack(self._context_frames[-self._ctx_size:]), axis=0)
ctx_poses = np.expand_dims(
np.stack(self._context_poses[-self._ctx_size:]), axis=0)
query_pose = np.expand_dims(pose, axis=0)
feed_dict = {
self._ph_query_pose : query_pose,
self._ph_ctx_frames : ctx_frames,
self._ph_ctx_poses : ctx_poses
}
[pred_frame] = self._sess.run([self._net], feed_dict=feed_dict)
pred_frame = np.clip(pred_frame, a_min=0.0, a_max=1.0)
return pred_frame
| 36.660256
| 110
| 0.652736
| 4,957
| 0.86676
| 0
| 0
| 256
| 0.044763
| 0
| 0
| 1,876
| 0.328029
|
b7d6284562e6fc98442dc3568881e4543f4597b6
| 6,054
|
py
|
Python
|
mamba/post_solve_handling.py
|
xhochy/mamba
|
249546a95abf358f116cc1b546bfb51e427001fd
|
[
"BSD-3-Clause"
] | null | null | null |
mamba/post_solve_handling.py
|
xhochy/mamba
|
249546a95abf358f116cc1b546bfb51e427001fd
|
[
"BSD-3-Clause"
] | null | null | null |
mamba/post_solve_handling.py
|
xhochy/mamba
|
249546a95abf358f116cc1b546bfb51e427001fd
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2019, QuantStack
# SPDX-License-Identifier: BSD-3-Clause
from conda.base.constants import DepsModifier, UpdateModifier
from conda._vendor.boltons.setutils import IndexedSet
from conda.core.prefix_data import PrefixData
from conda.models.prefix_graph import PrefixGraph
from conda._vendor.toolz import concatv
from conda.models.match_spec import MatchSpec
def post_solve_handling(context, prefix_data, final_precs, specs_to_add, specs_to_remove):
# Special case handling for various DepsModifier flags.
if context.deps_modifier == DepsModifier.NO_DEPS:
# In the NO_DEPS case, we need to start with the original list of packages in the
# environment, and then only modify packages that match specs_to_add or
# specs_to_remove.
#
# Help information notes that use of NO_DEPS is expected to lead to broken
# environments.
_no_deps_solution = IndexedSet(prefix_data.iter_records())
only_remove_these = set(prec
for spec in specs_to_remove
for prec in _no_deps_solution
if spec.match(prec))
_no_deps_solution -= only_remove_these
only_add_these = set(prec
for spec in specs_to_add
for prec in final_precs
if spec.match(prec))
remove_before_adding_back = set(prec.name for prec in only_add_these)
_no_deps_solution = IndexedSet(prec for prec in _no_deps_solution
if prec.name not in remove_before_adding_back)
_no_deps_solution |= only_add_these
# ssc.solution_precs = _no_deps_solution
solution_precs = _no_deps_solution
return solution_precs, specs_to_add, specs_to_remove
# TODO: check if solution is satisfiable, and emit warning if it's not
elif (context.deps_modifier == DepsModifier.ONLY_DEPS
and context.update_modifier != UpdateModifier.UPDATE_DEPS):
# Using a special instance of PrefixGraph to remove youngest child nodes that match
# the original specs_to_add. It's important to remove only the *youngest* child nodes,
# because a typical use might be `conda install --only-deps python=2 flask`, and in
# that case we'd want to keep python.
#
# What are we supposed to do if flask was already in the environment?
# We can't be removing stuff here that's already in the environment.
#
# What should be recorded for the user-requested specs in this case? Probably all
# direct dependencies of flask.
graph = PrefixGraph(final_precs, specs_to_add)
removed_nodes = graph.remove_youngest_descendant_nodes_with_specs()
specs_to_add = set(specs_to_add)
specs_to_add_names = set((s.name for s in specs_to_add))
for prec in removed_nodes:
for dep in prec.depends:
dep = MatchSpec(dep)
if dep.name not in specs_to_add_names:
specs_to_add.add(dep)
# unfreeze
specs_to_add = frozenset(specs_to_add)
# Add back packages that are already in the prefix.
specs_to_remove_names = set(spec.name for spec in specs_to_remove)
add_back = tuple(prefix_data.get(node.name, None) for node in removed_nodes
if node.name not in specs_to_remove_names)
solution_precs = tuple(
PrefixGraph(concatv(graph.graph, filter(None, add_back))).graph
)
return solution_precs, specs_to_add, specs_to_remove
return final_precs, specs_to_add, specs_to_remove
# # TODO: check if solution is satisfiable, and emit warning if it's not
# elif ssc.update_modifier == UpdateModifier.UPDATE_DEPS:
# # Here we have to SAT solve again :( It's only now that we know the dependency
# # chain of specs_to_add.
# #
# # UPDATE_DEPS is effectively making each spec in the dependency chain a user-requested
# # spec. We don't modify pinned_specs, track_features_specs, or specs_to_add. For
# # all other specs, we drop all information but name, drop target, and add them to
# # the specs_to_add that gets recorded in the history file.
# #
# # It's like UPDATE_ALL, but only for certain dependency chains.
# graph = PrefixGraph(ssc.solution_precs)
# update_names = set()
# for spec in specs_to_add:
# node = graph.get_node_by_name(spec.name)
# update_names.update(ancest_rec.name for ancest_rec in graph.all_ancestors(node))
# specs_map = {name: MatchSpec(name) for name in update_names}
# # Remove pinned_specs and any python spec (due to major-minor pinning business rule).
# # Add in the original specs_to_add on top.
# for spec in ssc.pinned_specs:
# specs_map.pop(spec.name, None)
# if "python" in specs_map:
# python_rec = prefix_data.get("python")
# py_ver = ".".join(python_rec.version.split(".")[:2]) + ".*"
# specs_map["python"] = MatchSpec(name="python", version=py_ver)
# specs_map.update({spec.name: spec for spec in specs_to_add})
# new_specs_to_add = tuple(itervalues(specs_map))
# # It feels wrong/unsafe to modify this instance, but I guess let's go with it for now.
# specs_to_add = new_specs_to_add
# ssc.solution_precs = self.solve_final_state(
# update_modifier=UpdateModifier.UPDATE_SPECS,
# deps_modifier=ssc.deps_modifier,
# prune=ssc.prune,
# ignore_pinned=ssc.ignore_pinned,
# force_remove=ssc.force_remove
# )
# ssc.prune = False
# if ssc.prune:
# graph = PrefixGraph(ssc.solution_precs, final_environment_specs)
# graph.prune()
# ssc.solution_precs = tuple(graph.graph)
# return ssc
| 47.669291
| 96
| 0.652296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,218
| 0.531549
|
b7d668041de4ae36e76a177a55158ac9e8eab418
| 264
|
py
|
Python
|
Young Physicist.py
|
techonair/Codeforces
|
1f854424e2de69ea4fdf7c6cde8ab04eddfb4566
|
[
"MIT"
] | null | null | null |
Young Physicist.py
|
techonair/Codeforces
|
1f854424e2de69ea4fdf7c6cde8ab04eddfb4566
|
[
"MIT"
] | null | null | null |
Young Physicist.py
|
techonair/Codeforces
|
1f854424e2de69ea4fdf7c6cde8ab04eddfb4566
|
[
"MIT"
] | null | null | null |
num = input()
lucky = 0
for i in num:
if i == '4' or i == '7':
lucky += 1
counter = 0
for c in str(lucky):
if c == '4' or c == '7':
counter += 1
if counter == len(str(lucky)):
print("YES")
else:
print("NO")
| 11
| 30
| 0.431818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 0.079545
|
b7d7bf07253855c146dc1edf490b5b90c54ec05e
| 477
|
py
|
Python
|
snakebids/utils/__init__.py
|
tkkuehn/snakebids
|
641026ea91c84c4403f0a654d2aaf2bfa50eaa19
|
[
"MIT"
] | null | null | null |
snakebids/utils/__init__.py
|
tkkuehn/snakebids
|
641026ea91c84c4403f0a654d2aaf2bfa50eaa19
|
[
"MIT"
] | null | null | null |
snakebids/utils/__init__.py
|
tkkuehn/snakebids
|
641026ea91c84c4403f0a654d2aaf2bfa50eaa19
|
[
"MIT"
] | null | null | null |
from snakebids.utils.output import (
Mode,
get_time_hash,
prepare_output,
retrofit_output,
write_config_file,
write_output_mode,
)
from snakebids.utils.snakemake_io import (
glob_wildcards,
regex,
update_wildcard_constraints,
)
__all__ = [
"Mode",
"get_time_hash",
"glob_wildcards",
"prepare_output",
"regex",
"retrofit_output",
"update_wildcard_constraints",
"write_config_file",
"write_output_mode",
]
| 18.346154
| 42
| 0.681342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 144
| 0.301887
|
b7d83061ac773421e6029dc4c038d3f9bc4b0679
| 659
|
py
|
Python
|
examples/custom_renderer/custom_renderer.py
|
victorbenichoux/vizno
|
87ed98f66914a27e4b71d835734ca2a17a09412f
|
[
"MIT"
] | 5
|
2020-12-02T08:46:06.000Z
|
2022-01-15T12:58:27.000Z
|
examples/custom_renderer/custom_renderer.py
|
victorbenichoux/vizno
|
87ed98f66914a27e4b71d835734ca2a17a09412f
|
[
"MIT"
] | null | null | null |
examples/custom_renderer/custom_renderer.py
|
victorbenichoux/vizno
|
87ed98f66914a27e4b71d835734ca2a17a09412f
|
[
"MIT"
] | null | null | null |
import pydantic
from vizno.renderers import ContentConfiguration, render
from vizno.report import Report
class CustomObject(pydantic.BaseModel):
parameter: int
class CustomRenderConfiguration(ContentConfiguration):
parameter: int
@render.register
def _(obj: CustomObject):
return CustomRenderConfiguration(
component="MyCustomComponent",
component_module="./my_renderer.js",
parameter=obj.parameter,
)
r = Report()
r.widget(CustomObject(parameter=10))
r.render("./output")
r.widget(
CustomObject(parameter=1000),
name="It works with a name",
description="and a description",
)
r.render("./output")
| 19.969697
| 56
| 0.728376
| 131
| 0.198786
| 0
| 0
| 203
| 0.308042
| 0
| 0
| 98
| 0.14871
|
b7d854946bf40e07210624df5e0576dbd5f15fb1
| 945
|
py
|
Python
|
coregent/net/core.py
|
landoffire/coregent
|
908aaacbb7b2b9d8ea044d47b9518e8914dad08b
|
[
"Apache-2.0"
] | 1
|
2021-04-25T07:26:07.000Z
|
2021-04-25T07:26:07.000Z
|
coregent/net/core.py
|
neurite-interactive/coregent
|
908aaacbb7b2b9d8ea044d47b9518e8914dad08b
|
[
"Apache-2.0"
] | null | null | null |
coregent/net/core.py
|
neurite-interactive/coregent
|
908aaacbb7b2b9d8ea044d47b9518e8914dad08b
|
[
"Apache-2.0"
] | 2
|
2021-06-12T23:00:12.000Z
|
2021-06-12T23:01:57.000Z
|
import abc
import collections.abc
import socket
__all__ = ['get_socket_type', 'get_server_socket', 'get_client_socket',
'SocketReader', 'SocketWriter', 'JSONReader', 'JSONWriter']
def get_socket_type(host=None, ip_type=None):
if ip_type is not None:
return ip_type
if host and ':' in host:
return socket.AF_INET6
return socket.AF_INET
def get_server_socket(host, port, ip_type=None):
sock = socket.socket(get_socket_type(host, ip_type))
sock.bind((host, port))
return sock
def get_client_socket(host, port, ip_type=None):
sock = socket.socket(get_socket_type(host, ip_type))
sock.connect((host, port))
return sock
class SocketReader(collections.abc.Iterator):
@abc.abstractmethod
def close(self):
...
class SocketWriter(abc.ABC):
@abc.abstractmethod
def send(self, message):
...
@abc.abstractmethod
def close(self):
...
| 21
| 71
| 0.671958
| 253
| 0.267725
| 0
| 0
| 164
| 0.173545
| 0
| 0
| 110
| 0.116402
|
b7d90dcc48241b77ca82bd93f406aefe69d173b9
| 360
|
py
|
Python
|
hackdayproject/urls.py
|
alstn2468/Naver_Campus_Hackday_Project
|
e8c3b638638182ccb8b4631c03cf5cb153c7278a
|
[
"MIT"
] | 1
|
2019-11-15T05:03:54.000Z
|
2019-11-15T05:03:54.000Z
|
hackdayproject/urls.py
|
alstn2468/Naver_Campus_Hackday_Project
|
e8c3b638638182ccb8b4631c03cf5cb153c7278a
|
[
"MIT"
] | null | null | null |
hackdayproject/urls.py
|
alstn2468/Naver_Campus_Hackday_Project
|
e8c3b638638182ccb8b4631c03cf5cb153c7278a
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from django.contrib import admin
import hackdayproject.main.urls as main_urls
import hackdayproject.repo.urls as repo_urls
urlpatterns = [
path('admin/', admin.site.urls),
path('oauth/', include('social_django.urls', namespace='social')),
path('', include(main_urls)),
path('repo/', include(repo_urls))
]
| 30
| 70
| 0.727778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.147222
|
b7d98d9548c561ff4d20a9c30014735028dc693b
| 19,134
|
py
|
Python
|
tests/test_ciftify_recon_all.py
|
lgrennan/ciftify
|
8488423bd081370614b676a2e1d1a8dbfd9aba1c
|
[
"MIT"
] | null | null | null |
tests/test_ciftify_recon_all.py
|
lgrennan/ciftify
|
8488423bd081370614b676a2e1d1a8dbfd9aba1c
|
[
"MIT"
] | null | null | null |
tests/test_ciftify_recon_all.py
|
lgrennan/ciftify
|
8488423bd081370614b676a2e1d1a8dbfd9aba1c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import unittest
import logging
import importlib
import copy
import os
from mock import patch
from nose.tools import raises
logging.disable(logging.CRITICAL)
ciftify_recon_all = importlib.import_module('ciftify.bin.ciftify_recon_all')
class ConvertFreesurferSurface(unittest.TestCase):
meshes = ciftify_recon_all.define_meshes('/somewhere/hcp/subject_1',
"164", ["32"], '/tmp/temp_dir', False)
@patch('ciftify.bin.ciftify_recon_all.run')
def test_secondary_type_option_adds_to_set_structure_command(self, mock_run):
secondary_type = 'GRAY_WHITE'
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
surface_secondary_type=secondary_type)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
set_structure_present = False
for item in arg_list:
args = item[0][0]
if '-set-structure' in args:
set_structure_present = True
assert '-surface-secondary-type' in args
assert secondary_type in args
# If this fails the wb_command -set-structure call is not being made
# at all. Is expected at least once regardless of secondary-type option
assert set_structure_present
@patch('ciftify.bin.ciftify_recon_all.run')
def test_secondary_type_not_set_if_option_not_used(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'])
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
set_structure_present = False
for item in arg_list:
args = item[0][0]
if '-set-structure' in args:
set_structure_present = True
assert '-surface-secondary-type' not in args
# If this fails the wb_command -set-structure call is not being made
# at all. Is expected at least once regardless of secondary-type option
assert set_structure_present
@patch('ciftify.bin.ciftify_recon_all.run')
def test_wbcommand_surface_apply_affine_called_when_cras_option_set(self,
mock_run):
cras_file = '/somewhere/cras.mat'
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
cras_mat=cras_file)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
surface_apply_calls = 0
for item in arg_list:
args = item[0][0]
if '-surface-apply-affine' in args and cras_file in args:
surface_apply_calls += 1
# The wb_command -surface-apply-affine command should be run once for
# each hemisphere
assert surface_apply_calls == 2
@patch('ciftify.bin.ciftify_recon_all.run')
def test_no_wbcommand_added_when_cras_option_not_set(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'])
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
surface_apply_calls = 0
for item in arg_list:
args = item[0][0]
if '-surface-apply-affine' in args:
surface_apply_calls += 1
assert surface_apply_calls == 0
@patch('ciftify.bin.ciftify_recon_all.run')
def test_add_to_spec_option_adds_wbcommand_call(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
add_to_spec=True)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
spec_added_calls = 0
for item in arg_list:
args = item[0][0]
if '-add-to-spec-file' in args:
spec_added_calls += 1
# Should add one call for each hemisphere
assert spec_added_calls == 2
@patch('ciftify.bin.ciftify_recon_all.run')
def test_add_to_spec_option_not_present_when_option_not_set(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
add_to_spec=False)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
spec_added_calls = 0
for item in arg_list:
args = item[0][0]
if '-add-to-spec-file' in args:
spec_added_calls += 1
assert spec_added_calls == 0
class CreateRegSphere(unittest.TestCase):
@patch('ciftify.bin.ciftify_recon_all.run_MSMSulc_registration')
@patch('ciftify.bin.ciftify_recon_all.run_fs_reg_LR')
def test_reg_sphere_is_not_set_to_none_for_any_mode(self, mock_fs_reg,
mock_msm_reg):
"""
Should fail if MSMSulc registration is implemented without supplying a
value for reg_sphere
"""
# settings stub, to allow tests to be written.
class Settings(object):
def __init__(self, name):
self.high_res = 999
self.reg_name = name
self.ciftify_data_dir = '/somedir/'
self.msm_config = None
# Test reg_sphere set when in FS mode
settings = Settings('FS')
meshes = {'AtlasSpaceNative' : ''}
subject_id = 'some_id'
reg_sphere = ciftify_recon_all.create_reg_sphere(settings, subject_id, meshes)
assert reg_sphere is not None
# Test reg_sphere set when in MSMSulc mode
settings = Settings('MSMSulc')
reg_sphere = ciftify_recon_all.create_reg_sphere(settings, subject_id, meshes)
assert reg_sphere is not None
class CopyAtlasRoiFromTemplate(unittest.TestCase):
@patch('ciftify.bin.ciftify_recon_all.link_to_template_file')
def test_does_nothing_when_roi_src_does_not_exist(self, mock_link):
hcp_dir = '/somepath/hcp'
hcp_templates_dir = '/someotherpath/ciftify/data'
mesh_settings = {'meshname' : 'some_mesh'}
subject_id = 'some_id'
ciftify_recon_all.copy_atlas_roi_from_template(hcp_dir, hcp_templates_dir,
subject_id, mesh_settings)
assert mock_link.call_count == 0
class DilateAndMaskMetric(unittest.TestCase):
@patch('ciftify.bin.ciftify_recon_all.run')
def test_does_nothing_when_dscalars_map_doesnt_mask_medial_wall(self,
mock_run):
# Stubs to allow testing
dscalars = {'some_map' : {'mask_medialwall' : False}}
mesh = {'tmpdir' : '/tmp/temp_dir',
'meshname' : 'some_mesh'}
ciftify_recon_all.dilate_and_mask_metric('some_id', mesh, dscalars)
assert mock_run.call_count == 0
class TestSettings(unittest.TestCase):
arguments = {'--hcp-data-dir' : '/somepath/pipelines/hcp',
'--fs-subjects-dir' : '/somepath/pipelines/freesurfer',
'--resample-LowRestoNative' : False,
'<Subject>' : 'STUDY_SITE_ID_01',
'--settings-yaml' : None,
'--T2': False,
'--MSMSulc': False,
'--MSM-config': None}
yaml_config = {'high_res' : "164",
'low_res' : ["32"],
'grayord_res' : [2],
'dscalars' : {},
'registration' : {'src_dir' : 'T1w',
'dest_dir' : 'MNINonLinear',
'xfms_dir' : 'MNINonLinear/xfms'},
'FSL_fnirt' : {'2mm' : {'FNIRTConfig' : 'etc/flirtsch/T1_2_MNI152_2mm.cnf'}}}
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_fs_root_dir_set_to_user_value_when_given(self, mock_ciftify,
mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to the mock directories not
# existing.
mock_exists.return_value = True
settings = ciftify_recon_all.Settings(self.arguments)
assert settings.fs_root_dir == self.arguments['--fs-subjects-dir']
@raises(SystemExit)
@patch('ciftify.config.find_freesurfer_data')
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_when_no_fs_dir_given_and_cannot_find_shell_value(self,
mock_ciftify, mock_fsl, mock_exists, mock_fs):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to the mock directories not
# existing.
mock_exists.return_value = True
# work with a deep copy of arguments to avoid modifications having any
# effect on later tests
args_copy = copy.deepcopy(self.arguments)
args_copy['--fs-subjects-dir'] = None
# Just in case the shell environment has the variable set...
mock_fs.return_value = None
settings = ciftify_recon_all.Settings(args_copy)
# Should never reach this line
assert False
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_fsl_dir_cannot_be_found(self, mock_ciftify,
mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
# This is to avoid sys.exit calls due to the mock directories not
# existing.
mock_exists.return_value = True
mock_fsl.return_value = None
settings = ciftify_recon_all.Settings(self.arguments)
# Should never reach this line
assert False
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_ciftify_data_dir_not_found(self, mock_ciftify,
mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to the mock directories not
# existing.
mock_exists.return_value = True
mock_ciftify.return_value = None
settings = ciftify_recon_all.Settings(self.arguments)
assert False
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_ciftify_data_dir_doesnt_exist(self,
mock_ciftify, mock_fsl, mock_exists):
ciftify_data = '/somepath/ciftify/data'
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = ciftify_data
mock_fsl.return_value = '/somepath/FSL'
mock_exists.side_effect = lambda path : False if path == ciftify_data else True
settings = ciftify_recon_all.Settings(self.arguments)
assert False
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_default_config_read_when_no_config_yaml_given(self,
mock_ciftify, mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
settings = ciftify_recon_all.Settings(self.arguments)
config = settings._Settings__config
assert config is not None
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_yaml_config_file_doesnt_exist(self,
mock_ciftify, mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
yaml_file = '/somepath/fake_config.yaml'
mock_exists.side_effect = lambda path: False if path == yaml_file else True
# work with a deep copy of arguments to avoid modifications having any
# effect on later tests
args_copy = copy.deepcopy(self.arguments)
args_copy['--settings-yaml'] = yaml_file
settings = ciftify_recon_all.Settings(args_copy)
assert False
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_dscalars_doesnt_contain_msmsulc_settings_when_reg_name_is_FS(
self, mock_ciftify, mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
settings = ciftify_recon_all.Settings(self.arguments)
if settings.reg_name == 'FS':
assert 'ArealDistortion_MSMSulc' not in settings.dscalars.keys()
else:
assert True
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_msm_config_set_to_none_in_fs_mode(self, mock_ciftify, mock_fsl,
mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
settings = ciftify_recon_all.Settings(self.arguments)
assert settings.msm_config is None
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_msm_config_set_to_default_when_user_config_not_given(self,
mock_ciftify, mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
# Modify copy of arguments, so changes dont effect other tests
args = copy.deepcopy(self.arguments)
args['--MSMSulc'] = True
args['--MSM-config'] = None
settings = ciftify_recon_all.Settings(args)
assert settings.msm_config is not None
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_sys_exit_raised_when_user_msm_config_doesnt_exist(self, mock_ciftify,
mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
user_config = "/some/path/nonexistent_config"
mock_exists.side_effect = lambda path: False if path == user_config else True
args = copy.deepcopy(self.arguments)
args['--MSMSulc'] = True
args['--MSM-config'] = user_config
settings = ciftify_recon_all.Settings(args)
# Test should never reach this line
assert False
@raises(SystemExit)
@patch('ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings')
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_expected_registration_path_missing(self,
mock_ciftify, mock_fsl, mock_exists, mock_yaml_settings):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
# Use copy to avoid side effects in other tests
yaml_copy = copy.deepcopy(self.yaml_config)
del yaml_copy['registration']['src_dir']
mock_yaml_settings.return_value = yaml_copy
settings = ciftify_recon_all.Settings(self.arguments)
assert False
@raises(SystemExit)
@patch('ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings')
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_resolution_not_defined_for_given_method(self,
mock_ciftify, mock_fsl, mock_exists, mock_yaml_settings):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
# Use copy to avoid side effects in other tests
yaml_copy = copy.deepcopy(self.yaml_config)
del yaml_copy['FSL_fnirt']['2mm']
mock_yaml_settings.return_value = yaml_copy
settings = ciftify_recon_all.Settings(self.arguments)
assert False
@raises(SystemExit)
@patch('ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings')
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_registration_resolution_file_doesnt_exist(self,
mock_ciftify, mock_fsl, mock_exists, mock_yaml_settings):
fsl_dir = '/somepath/FSL'
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = fsl_dir
mock_yaml_settings.return_value = self.yaml_config
required_file = os.path.join(os.path.dirname(fsl_dir),
self.yaml_config['FSL_fnirt']['2mm']['FNIRTConfig'])
mock_exists.side_effect = lambda x: False if x == required_file else True
settings = ciftify_recon_all.Settings(self.arguments)
assert False
| 40.710638
| 89
| 0.669907
| 18,865
| 0.985941
| 0
| 0
| 17,592
| 0.91941
| 0
| 0
| 6,570
| 0.343368
|
b7da270be2ee04de235dd0dfc5b966c52ba7cf65
| 35,831
|
py
|
Python
|
Wrangle OSM Dataset.py
|
Boykai/Project-3-Wrangle-OpenStreetMap-Dataset
|
493a4346ae12fb0fe853d4d07e4e8b03ef6a430f
|
[
"MIT"
] | 1
|
2017-09-01T11:07:26.000Z
|
2017-09-01T11:07:26.000Z
|
Wrangle OSM Dataset.py
|
Boykai/Project-3-Wrangle-OpenStreetMap-Dataset
|
493a4346ae12fb0fe853d4d07e4e8b03ef6a430f
|
[
"MIT"
] | null | null | null |
Wrangle OSM Dataset.py
|
Boykai/Project-3-Wrangle-OpenStreetMap-Dataset
|
493a4346ae12fb0fe853d4d07e4e8b03ef6a430f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Created on Tue Jan 17 16:19:36 2017
@author: Boykai
'''
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET # Use cElementTree or lxml if too slow
from collections import defaultdict
import re
import pprint
import string
import codecs
import json
import os
from pymongo import MongoClient
class OSMFile(object):
'''
OSM File handler
From Udacity
'''
def __init__(self, osm_file, sample_file, sample_size):
'''
Initialize a OSM File instance, saves all sampled top level tags
into sample_file.osm, saves all parameters as attributes of instance.
osm_file: Original OSM input file, downloaded from
OSM website, OSM file path. (a string)
sample_file: Sampled OSM output file, created in given sample_file
path (a string)
sample_size: A sample size that takes every sample_size-th
top level element (a non-zero, positive integer)
'''
self.osm_file = osm_file
self.sample_file = sample_file
self.sample_size = sample_size
def getSampleFile(self):
'''
@return sample file name and/or directory. (a string)
'''
return self.sample_file
def getOsmFile(self):
'''
@return OSM file name and/or directory. (a string)
'''
return self.osm_file
def getSampleSize(self):
'''
@return sample size. (a non-zero, positive integer)
'''
return self.sample_size
def getElement(self, tags=('node', 'way', 'relation')):
'''
XML tag element generator
tags: tag elements to search for in OSM file (a tuple of strings)
@yield element if it is the right type of tag
Reference:
http://stackoverflow.com/questions/3095434/inserting-newlines-in-xml-file-generated-via-xml-etree-elementtree-in-python
'''
context = iter(ET.iterparse(self.getOsmFile(), events=('start', 'end')))
_, root = next(context)
for event, elem in context:
if event == 'end' and elem.tag in tags:
yield elem
root.clear()
def createSampleFile(self):
'''
Creates and writes to sample file, a new OSM file to work with
while cleaning. By created a sample file, the time it takes to
analysis, audit, clean, and write the clean data is greatly reduced.
'''
print('Creating sample XML file...')
with open(self.getSampleFile(), 'wb') as f:
f.write("<?xml version='1.0' encoding='UTF-8'?>\n")
f.write('<osm>\n ')
k = self.getSampleSize()
# Write every kth top level element
for i, element in enumerate(self.getElement()):
if i % k == 0:
f.write(ET.tostring(element, encoding='utf-8'))
f.write('</osm>')
class CleanStreets(object):
'''
Clean Streets of OSM File
From Udacity
'''
def __init__(self, sample_file):
'''
Initialize a Clean Streets instance, saves all parameters as attributes
of the instance. Finds and returns all instances of unexpected
street suffixes.
sample_file: Sampled OSM output file, created in given sample_file
path (a string)
street_type_re: Regex created to find the street suffix for
tag attributes. (a regex)
expected: Expected street names, street names which are deemed as
acceptable format (a list of strings)
mapping: Keys that are found as street suffix for tag attributes are
to be replaced by key's value (a string dictonary of strings)
clean_streets_dict: Dictionary mapping dirty street names to clean
street names (a dictionary of strings)
expected_zip: List of valid Brooklyn zip codes (a list of strings)
'''
self.sample_file = sample_file
self.street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
self.expected = ['Alley',
'Americas',
'Atrium',
'Avenue',
'Bayside',
'Boulevard',
'Bowery',
'Broadway',
'Bushwick',
'Center',
'Circle',
'Clinton',
'Close',
'Commons',
'Court',
'Crescent',
'Drive',
'East',
'Expressway',
'Extension',
'Finest',
'Fulton',
'Gardens',
'Gratta',
'Hamilton',
'Heights',
'Highway',
'Island',
'Lafayette',
'Lane',
'Loop',
'Macdougal',
'Mall',
'MetroTech',
'Mews',
'North',
'Oval',
'Park',
'Parkway',
'Path',
'Piers',
'Place',
'Plaza',
'Promenade',
'Remsen',
'Reservation',
'Rico',
'Road',
'Roadbed',
'Rockaways',
'Row',
'Slip',
'South',
'Southwest',
'Square',
'Street',
'Terrace',
'Trail',
'Turnpike',
'Vanderbilt',
'Village',
'Warren',
'Walk',
'West',
'WestBayside',
'Willoughby']
[self.expected.append(letter) for letter in string.ascii_uppercase]
self.dirty_to_clean_streets = {'Ave' : 'Avenue',
'Ave.' : 'Avenue',
'Avene' : 'Avenue',
'Avenue,' : 'Avenue',
'avenue' : 'Avenue',
'ave' : 'Avenue',
'Blvd' : 'Boulevard',
'Crt' : 'Court',
'Ctr' : 'Court',
'Dr' : 'Drive',
'Plz' : 'Plaza',
'Rd' : 'Road',
'ST' : 'Street',
'St': 'Street',
'St.': 'Street',
'st' : 'Street',
'St ' : 'Street',
'St. ' : 'Street',
'Steet' : 'Street',
'street' : 'Street',
'Streeet' : 'Street'}
self.clean_streets_dict = {'Graham Avenue #1' : 'Graham Avenue',
'Nostrand Avenue, #107' : 'Nostrand Avenue',
'305 Schermerhorn St., Brooklyn, NY 11217' : 'Schermerhorn Street',
'1st' : '1st Avenue',
'Coney Island Avenue, Ste 200' : 'Coney Island Avenue',
'Broadway #205' : 'Broadway',
'218650358': 'NaN',
'16th Street # 3' : '16th Street',
'Hanover Square #3' : 'Hanover Square',
'Union Avenue 4B' : 'Union Avenue',
'Joralemon Street, #4CF' : 'Joralemon Street',
'Main St., Suite 500' : 'Main Street',
'Broadway #506' : 'Broadway',
'Mott St #507' : 'Mott Street',
'32nd street with 7th' : '32nd Street',
'861' : 'NaN',
'wyckoff ave unit A28' : 'Wyckoff Avenue',
'Dekalb Ave, 2nd Floor' : 'Dekalb Avenue',
'Wall Street 12th Floor' : 'Wall Street',
'Manhattan Avenue (2nd Floor)' : 'Manhattan Avenue',
'University Plz' : ' University Plaza',
'Linden Boulevard Outer Eb Rb' : 'Linden Boulevard',
'bus_stop' : 'NaN',
'DeKalb Avenue 4 floor' : 'Dekalb Avenue'}
self.expected_zip = ['11201',
'11203',
'11204',
'11205',
'11206',
'11207',
'11208',
'11209',
'11210',
'11211',
'11212',
'11213',
'11214',
'11215',
'11216',
'11217',
'11218',
'11219',
'11220',
'11221',
'11222',
'11223',
'11224',
'11225',
'11226',
'11228',
'11229',
'11230',
'11231',
'11232',
'11233',
'11234',
'11235',
'11236',
'11237',
'11238',
'11239']
def getSampleFile(self):
'''
@return sample file name and/or directory. (a string)
'''
return self.sample_file
def getStreetTypeRegex(self):
'''
@return street name type regex. (a string regex)
'''
return self.street_type_re
def getExpected(self):
'''
@return street suffixes. (a list of strings)
'''
return self.expected
def getDirtyToCleanStreets(self):
'''
@return dirty to clean streets mapping dict. (a dictionary of strings)
'''
return self.dirty_to_clean_streets
def getCleanStreetsDict(self):
'''
@return clean street dict. (a dictionary of strings)
'''
return self.clean_streets_dict
def getExpectedZip(self):
'''
@return list of expected zip codes for Brooklyn. (a list of strings)
'''
return self.expected_zip
def auditStreetType(self, street_types, street_name):
'''
Audits street type by checking if the street type is in the list
of expected street type values.
Searches street_type aganist regex to find street suffix. If the street
type is not in defaultdict set, it is added to street_types defaultdict.
The string of street_name is the value set to the street_type key
in street_types defaultdict.
street_types: Street type is a dictionary set, which is mutated within
the function, passed from audit function.
(a string defaultdict set of strings)
street_name: Street name string value found in tag attribute. (a string)
'''
m = self.getStreetTypeRegex().search(street_name)
if m:
street_type = m.group()
if street_type not in self.getExpected():
street_types[street_type].add(street_name)
def auditZipType(self, zip_types, zip_name):
'''
Audits zip code type by checking if the zip type is in the list
of expected zip type values.
The string of zip_name is the value set to the zip_type key
in zip_types defaultdict.
zip_types: Zip type is a dictionary set, which is mutated within
the function, passed from audit function.
(a string defaultdict set of strings)
zip_name: Zip name string value found in tag attribute. (a string)
'''
if zip_name not in self.getExpectedZip():
zip_types[zip_name].add('NaN')
def isStreetName(self, elem):
'''
Evaluates if tag attribute is equal to a address of type street.
elem: XML tag element object (a object)
@return: Bool if the tag attribute is equal to a address of type street.
'''
return (elem.attrib['k'] == 'addr:street')
def isZipCode(self, elem):
'''
Evaluates if tag attribute is equal to a address of type postcode.
elem: XML tag element object (a object)
@return: Bool if the tag attribute is equal to a address of type postcode.
'''
return (elem.attrib['k'] == 'addr:postcode')
def audit(self, audit_file):
'''
Iterates over XML tag elements in order to find all of the addresses
of type street.
Evaluates the tag 'v' attributes to determine if the street suffixes
are within the expected street suffix list.
@return: Defaultdict of unexpected street suffixes as keys,
the full street names as values. (a defaultdict of strings)
'''
with open(audit_file, 'r') as f:
street_types = defaultdict(set)
zip_types = defaultdict(set)
f.seek(0)
for event, elem in ET.iterparse(f, events=('start',)):
if elem.tag == 'node' or elem.tag == 'way':
for tag in elem.iter('tag'):
if self.isStreetName(tag):
self.auditStreetType(street_types, tag.attrib['v'])
if self.isZipCode(tag):
self.auditZipType(zip_types, tag.attrib['v'])
elem.clear()
street_types = self.sortStreets(street_types)
return [street_types, zip_types]
def sortStreets(self, unsorted_streets):
'''
Sorts street types defaultdict by key, with proper values.
unsorted_streets: Unsorted defaultdict of street types with values
equal to the instances of street type
(a defaultdict of strings)
@return: Sorted defaultdict of unexpected street suffixes as keys,
the full street names as values. (a defaultdict of strings)
'''
sorted_streets = {}
sorted_keys = sorted(unsorted_streets.keys())
for key in sorted_keys:
sorted_streets[key] = unsorted_streets[key]
return sorted_streets
def clean(self, unexpected_dirty_streets):
'''
Get unexpected street suffixes and replace with acceptable street
suffixes when determined that the data is unacceptably dirty.
Assumes that every key given by self.audit() is of type string.
Assumes that every assigned to a key value given by self.adult() is of
type string.
Assumes that every key given by self.audit() has valid string value.
@return: Clean sorted defaultdict of street names with correct suffixes
(a defaultdict of strings)
'''
unexpected_streets = unexpected_dirty_streets.copy()
#Iterate over unexpected street types found
for key in unexpected_streets.keys():
# Determine if unexpected street type is not acceptable
if key in self.dirty_to_clean_streets.keys():
list_of_streets = list(unexpected_streets[key])
# Iterate over streets of unacceptable street type
for i, street in enumerate(list_of_streets):
street_name = street[ : -len(key)]
good_street = (street_name + self.dirty_to_clean_streets[key])
bad_street = str(list(unexpected_streets[key])[i])
# Save each unacceptabled street as [key] to
# acceptable street as [value] in clean_streets_dict
self.clean_streets_dict[bad_street] = good_street
return self.clean_streets_dict
def writeClean(self, cleaned_streets):
'''
Get cleaned streets mapping dictionary and use that dictionary to find
and replace all bad street name tag attributes within XML file.
Iterate through XML file to find all bad instances of tag attribute
street names, and replace with correct mapping value from cleaned_streets
mapping dictionary.
Stores new cleaned XML file in 'output.osm'
celaned_streets: Clean sorted defaultdict of street names with correct suffixes
(a defaultdict of strings)
'''
with open('output.osm', 'w') as output:
output.write("<?xml version='1.0' encoding='UTF-8'?>\n")
output.write('<osm>\n ')
osm_file = open(self.getSampleFile(), 'r')
for event, elem in ET.iterparse(osm_file, events=('start', 'end')):
# Begin processing when the end of the element is reached
# Include all elements, except 'osm', for processing (so that your files are identical)
if event == 'end' and (elem.tag in ['node', 'way', 'relation', 'bounds','meta','note'] ):
for tag in elem.iter('tag'):
# Check if tag is a street name tag, set street name to street
if self.isStreetName(tag):
street = tag.attrib['v']
# If street name is in clean streets dict, replace
# dirty street with clean street value
if street in cleaned_streets.keys():
tag.attrib['v'] = cleaned_streets[street]
# Check if tag is a zip code tag, set zip code to 'NaN' if not valid
if self.isZipCode(tag):
zip_code = tag.attrib['v']
if zip_code not in self.getExpectedZip():
tag.attrib['v'] = 'NaN'
# Move the write function inside the condition, so that it only writes
# tags that you specify (i.e. everything apart from the root <osm> element)
output.write(ET.tostring(elem, encoding='utf-8'))
elem.clear()
output.write('</osm>')
osm_file.close()
class JsonFile(object):
def __init__(self, output_file):
'''
Initialize a JSON File instance, saves all parameters as attributes
of the instance. Takes in an XML file and returns a JSON file
lower: Regex created to find lowercase characters for
tag elements (a regex)
lower_colon: Regex created to find lowercase characters for
tag elements when a colon is included (a regex)
problemchars: Regex created to find special characters for
tags and tag elements (a regex)
created_tags: Tag element names, which are deemed as acceptable for
adding information (a list of strings)
output_file: XML OSM output file, created in given output_file
path (a string)
'''
self.lower = re.compile(r'^([a-z]|_)*$')
self.lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
self.problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
self.created_tags = [ 'version', 'changeset', 'timestamp', 'user', 'uid']
self.output_file = output_file
def getElement(self, file_in, tags=('node', 'way', 'relation')):
'''
XML tag element generator
tags: tag elements to search for in OSM file (a tuple of strings)
@yield element if it is the right type of tag
Reference:
http://stackoverflow.com/questions/3095434/inserting-newlines-in-xml-file-generated-via-xml-etree-elementtree-in-python
'''
context = iter(ET.iterparse(file_in, events=('start', 'end')))
_, root = next(context)
for event, elem in context:
if event == 'end' and elem.tag in tags:
yield elem
root.clear()
def shapeElement(self, element):
'''
Takes in XML element, shapes it into JSON node as dictionary, returns shaped element.
element: XML ElementTree element, which is shaped into JSON node (an ET object)
@return: node for JSON file creation (a dictionary)
'''
node = {}
address = {}
created = {}
node_refs = []
pos = []
if element.tag == 'node' or element.tag == 'way' :
node['type'] = element.tag
# Get and store GPS (lat, lon) cooridinates
if 'lat' in element.attrib.keys() and 'lon' in element.attrib.keys():
try:
lat = float(element.attrib['lat'])
lon = float(element.attrib['lon'])
pos.insert(0,lat)
pos.insert(1,lon)
except:
pass
# Get and set {tag : attrib} into dict
for k, m in element.attrib.items():
if k not in pos:
if k in self.created_tags:
created[k] = m
else:
node[k] = m
# Get and set node type into node dict
if created:
node['created'] = created
if pos:
node['pos'] = pos
if address:
node['address'] = address
if node_refs:
node['node_refs'] = node_refs
if 'lon' in node.keys():
node.pop('lon')
if 'lat' in node.keys():
node.pop('lat')
# Iterate over subtags in element, set attribs when valid
for child in element:
if child.tag == 'nd':
try:
node['node_refs'].append(child.attrib['ref'])
except:
node['node_refs'] = []
node['node_refs'].append(child.attrib['ref'])
elif child.tag == 'tag':
# Clean and set 'addr:' attrib
if self.problemchars.search(child.attrib['k']):
pass
elif child.attrib['k'].startswith('addr:'):
key = re.sub('addr:', '', child.attrib['k']).strip()
if self.lower_colon.match(key):
break
else:
try:
node['address'][key] = child.attrib['v']
except:
node['address'] = {}
node['address'][key] = child.attrib['v']
# Set already clean attrib
else:
node[child.attrib['k']] = child.attrib['v']
return node
else:
return None
def processMap(self, pretty = False):
'''
Takes an XML file, maps and creates a JSON file of the same information,
struction, and element nodes as the input XML file
pretty: If pretty, creates a human readable JSON file (a bool)
@return: List of JSON dictionary shaped node elements (a list)
'''
file_in = self.output_file
file_out = '{0}.json'.format(file_in)
data = []
'''
# Create JSON output file, shape and map each XML element
with codecs.open(file_out, 'w') as fo:
for _, element in ET.iterparse(file_in):
el = self.shapeElement(element)
if el:
data.append(el)
if pretty:
fo.write(json.dumps(el, indent=2) + '\n')
else:
fo.write(json.dumps(el) + '\n')
return data
'''
with codecs.open(file_out, 'w') as fo:
for i, element in enumerate(self.getElement(file_in)):
el = self.shapeElement(element)
if el:
data.append(el)
if pretty:
fo.write(json.dumps(el, indent = 2) + '\n')
else:
fo.write(json.dumps(el) + '\n')
return data
def mongoAggregate(cursor):
'''
Takes in pymongo aggregate cursor object, iterates through each element
within the aggregation, then returns the list of elements
cursor: pymongo aggreate cursor object, which is iterated (a cursor object)
@return: List of aggregation elements (a list)
'''
results_list = []
[results_list.append(result) for result in cursor]
return results_list
if __name__ == '__main__':
# Get OSM File, which is Brooklyn OpenStreetMap
# https://mapzen.com/data/metro-extracts/metro/brooklyn_new-york/
xml_original_file = 'brooklyn_new-york.osm' # Original OSM File input name
xml_sample_file = 'sample.osm' # Sample OSM File output name
xml_cleaned_file = 'output.osm'
sample_size = 1
# Initialize and create OSM original file and sample file
if sample_size == 1:
xml_sample_file = xml_original_file
osm = OSMFile(xml_original_file, xml_sample_file, sample_size)
if sample_size != 1:
osm.createSampleFile()
# Initialize and clean street type tag attributes
print('\nInitialzing and getting street type tag attributes...')
cleanSt = CleanStreets(xml_sample_file)
# Audit street tag attributes and store vales in unexpected_street dict
# returns street type keys with street name values dict
print('\nPerforming audit on street types...')
audit_results = cleanSt.audit(xml_sample_file)
unexpected_streets = audit_results[0]
unexpected_zips = audit_results[1]
print('There are ' + str(len(unexpected_streets.values())) + ' unique unexpected streets.')
print('Dictionary of unexpected street name types with street names: ')
pprint.pprint(unexpected_streets)
print('\nThere are ' + str(len(unexpected_zips.values())) + ' unique unexpected zip codes.')
print('Dictionary of unexpected zip code types with street names: ')
pprint.pprint(unexpected_zips)
# Clean street values and store cleaned streets in clean_street_dict
print('\nCleaning street type values...')
clean_streets_dict = cleanSt.clean(unexpected_streets)
print('There are ' + str(len(cleanSt.getCleanStreetsDict().values())) + ' street names to be replaced.')
print('Dictionary of dirty street keys and clean street values: ')
pprint.pprint(clean_streets_dict)
# Find and write clean street names to XML file, save updated XML file
print('\nCreating new output.osm file with cleaned street types...')
cleanSt.writeClean(clean_streets_dict)
clean_audit_results = cleanSt.audit(xml_sample_file)
clean_unexpected_streets = clean_audit_results[0]
print('There are ' + str(len(clean_unexpected_streets.values())) + ' unique unexpected streets.')
print('New audit after street names have been replaced with clean street names: ')
pprint.pprint(clean_unexpected_streets)
if sample_size != 1:
print('\nDeleting XML sample file...')
#os.remove(xml_sample_file)
# Initialize and create JSON file from cleaned XML output.osm file
print('\nCreating new JSON file from cleaned XML file...')
js = JsonFile(xml_cleaned_file)
data = js.processMap()
print('\nDeleting XML cleaned file...')
os.remove(xml_cleaned_file)
# Initialize and create MongoDB database from JSON document list 'data'
print('\nCreating new MongoDB database \'brooklyn\' from cleaned JSON file...')
client = MongoClient('mongodb://localhost:27017')
db = client.osm_results
db.brooklyn.insert_many(data, bypass_document_validation=True)
del data[:]
# Run and output MongoDB querires and results
print('\nRunning MongoDB queries...')
print('\nTotal number of documents: ')
print('db.brooklyn.find().count()')
print(str(db.brooklyn.find().count()))
print('\nNumber of \'way\' type documents: ')
print('db.brooklyn.find({\'type\' :\'way\'}).count()')
print(str(db.brooklyn.find({'type' :'way'}).count()))
print('\nNumber of \'node\' type documents: ')
print('db.brooklyn.find({\'type\' :\'node\'}).count()')
print(str(db.brooklyn.find({'type' :'node'}).count()))
print('\nNumber of unique users: ')
print('len(db.brooklyn.distinct(\'created.user\'))')
print(str(len(db.brooklyn.distinct('created.user'))))
print('\nTop 1 contributing user: ')
top_contributor_pipeline = [{'$group':
{'_id':'$created.user',
'count':{'$sum':1}}},
{'$sort':
{'count':1}},
{'$limit':1}]
print('db.brooklyn.aggregate(' + str(top_contributor_pipeline) + ')')
top_contributor = mongoAggregate(db.brooklyn.aggregate(top_contributor_pipeline))
print(str(top_contributor[0]))
print('\nNumber of users appearing only once (having 1 post): ')
unique_user_count_pipeline =[{'$group':
{'_id':'$created.user',
'count':{'$sum':1}}},
{'$group':
{'_id':'$count',
'num_users':{'$sum':1}}},
{'$sort':
{'_id':1}},
{'$limit':1}]
print('db.brooklyn.aggregate(' + str(unique_user_count_pipeline) + ')')
unique_user_count = mongoAggregate(db.brooklyn.aggregate(unique_user_count_pipeline))
print(str(unique_user_count[0]))
print('\nTop 10 appearing amenities: ')
top_10_amenities_pipeline = [{'$match':
{'amenity':{'$exists':1}}},
{'$group':
{'_id':'$amenity',
'count':{'$sum':1}}},
{'$sort':
{'count':1}},
{"$limit":10}]
print('db.brooklyn.aggregate(' + str(top_10_amenities_pipeline) + ')')
top_10_amenities = mongoAggregate(db.brooklyn.aggregate(top_10_amenities_pipeline))
print(str(top_10_amenities))
print('\nHighest population religion: ')
most_pop_religion_pipeline = [{'$match':
{'amenity':{'$exists':1},
'amenity':'place_of_worship'}},
{'$group':
{'_id':'$religion',
'count':{'$sum':1}}},
{'$sort':
{'count':1}},
{'$limit':1}]
print('db.brooklyn.aggregate(' + str(most_pop_religion_pipeline) + ')')
most_pop_religion = mongoAggregate(db.brooklyn.aggregate(most_pop_religion_pipeline))
print(str(most_pop_religion[0]))
print('\nMost popular cuisines: ')
most_pop_cuisine_pipeline = [{'$match':
{'amenity':{'$exists':1},
'amenity':'restaurant'}},
{'$group':
{'_id':'$cuisine',
'count':{'$sum':1}}},
{'$sort':
{'count':1}},
{'$limit':2}]
print('db.brooklyn.aggregate(' + str(most_pop_cuisine_pipeline) + ')')
most_pop_cuisine = mongoAggregate(db.brooklyn.aggregate(most_pop_cuisine_pipeline))
print(str(most_pop_cuisine[0]))
print('\nPostal Codes: ')
postal_codes_pipeline = [{'$match':
{'address.postcode':{'$exists':1},
'address.postcode':'NaN'}},
{'$group':
{'_id':'$address.postcode',
'count':{'$sum':1}}},
{'$sort':{'count':1}}]
print('db.brooklyn.aggregate(' + str(postal_codes_pipeline) + ')')
postal_codes = mongoAggregate(db.brooklyn.aggregate(postal_codes_pipeline))
print(str(postal_codes[0]))
| 40.996568
| 127
| 0.471966
| 26,787
| 0.747593
| 1,345
| 0.037537
| 0
| 0
| 0
| 0
| 16,099
| 0.449304
|
b7da43e450c1cde9be925061435a5d471ad6ae05
| 640
|
py
|
Python
|
Wrapping/Python/vtkmodules/__init__.py
|
cads-build/VTK
|
ee0c9688a082c88bfe070afc08f4eb0f0a546487
|
[
"BSD-3-Clause"
] | 1
|
2019-09-11T12:30:57.000Z
|
2019-09-11T12:30:57.000Z
|
Wrapping/Python/vtkmodules/__init__.py
|
AndyJMR/VTK
|
3cc9e5f7539107e5dbaeadc2d28f7a8db6de8571
|
[
"BSD-3-Clause"
] | null | null | null |
Wrapping/Python/vtkmodules/__init__.py
|
AndyJMR/VTK
|
3cc9e5f7539107e5dbaeadc2d28f7a8db6de8571
|
[
"BSD-3-Clause"
] | null | null | null |
r"""
Currently, this package is experimental and may change in the future.
"""
from __future__ import absolute_import
#------------------------------------------------------------------------------
# this little trick is for static builds of VTK. In such builds, if
# the user imports this Python package in a non-statically linked Python
# interpreter i.e. not of the of the VTK-python executables, then we import the
# static components importer module.
try:
from . import vtkCommonCore
except ImportError:
from . import _vtkpythonmodules_importer
#------------------------------------------------------------------------------
| 37.647059
| 79
| 0.582813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 490
| 0.765625
|
b7daad942b4ee13674b01a3bc7990323f036b3a5
| 1,176
|
py
|
Python
|
Financely/basic_app/models.py
|
Frostday/Financely
|
23226aca0ad21971cb61d13509e16651b304d207
|
[
"MIT"
] | 8
|
2021-05-28T16:09:36.000Z
|
2022-02-27T23:12:48.000Z
|
Financely/basic_app/models.py
|
Frostday/Financely
|
23226aca0ad21971cb61d13509e16651b304d207
|
[
"MIT"
] | null | null | null |
Financely/basic_app/models.py
|
Frostday/Financely
|
23226aca0ad21971cb61d13509e16651b304d207
|
[
"MIT"
] | 8
|
2021-05-28T16:01:48.000Z
|
2022-02-27T23:12:50.000Z
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Client(models.Model):
user = models.OneToOneField(User,null=True,blank= True,on_delete=models.CASCADE)
name = models.CharField(max_length=100, null=True)
# def __str__(self):
# return self.name
class Portfolio(models.Model):
client = models.OneToOneField(Client,on_delete=models.CASCADE,blank=True,null=True)
# def __str__(self):
# return self.client.name + "'s Portfolio"
class Stock(models.Model):
id = models.BigAutoField(primary_key=True)
parent_portfolio = models.ForeignKey(Portfolio,related_name="stocks",on_delete=models.CASCADE,null=True,blank=True)
stock_symbol = models.CharField(max_length=100,null=True)
stock_price = models.CharField(max_length=100,null=True,blank=True)
stock_sector_performance = models.CharField(max_length=100,null=True,blank=True)
stock_name = models.CharField(max_length=100,null=True)
quantity = models.IntegerField(default=0,null=True,blank=True)
date_added = models.DateTimeField(auto_now_add=True)
# def __str__(self):
# return self.stock_name
| 40.551724
| 119
| 0.747449
| 1,070
| 0.909864
| 0
| 0
| 0
| 0
| 0
| 0
| 190
| 0.161565
|
b7dd25cebefde2e55f7311a4ace4a861586de3c9
| 1,299
|
py
|
Python
|
lims/models/shipping.py
|
razorlabs/BRIMS-backend
|
2c5b7bd126debec459b775e9d11e96fc09975059
|
[
"MIT"
] | 1
|
2020-03-20T23:00:24.000Z
|
2020-03-20T23:00:24.000Z
|
lims/models/shipping.py
|
razorlabs/BRIMS-backend
|
2c5b7bd126debec459b775e9d11e96fc09975059
|
[
"MIT"
] | null | null | null |
lims/models/shipping.py
|
razorlabs/BRIMS-backend
|
2c5b7bd126debec459b775e9d11e96fc09975059
|
[
"MIT"
] | 1
|
2020-03-09T09:57:25.000Z
|
2020-03-09T09:57:25.000Z
|
from django.db import models
"""
ShipmentModels have a one to many relationship with boxes and aliquot
Aliquot and Box foreign keys to a ShipmentModel determine manifest contents
for shipping purposes (resolved in schema return for manifest view)
"""
class ShipmentModel(models.Model):
carrier = models.ForeignKey('CarrierModel',
on_delete=models.SET_NULL,
blank=True,
null=True)
shipment_number = models.CharField(max_length=255, blank=True, null=True)
# TODO What should we do if a destination is removed?
destination = models.ForeignKey('DestinationModel',
on_delete=models.SET_NULL,
blank=True,
null=True)
sent_date = models.DateTimeField(blank=True, null=True)
received_date = models.DateTimeField(blank=True, null=True)
notes = models.CharField(max_length=255, blank=True, null=True)
class DestinationModel(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class CarrierModel(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
| 34.184211
| 79
| 0.628176
| 1,026
| 0.789838
| 0
| 0
| 0
| 0
| 0
| 0
| 319
| 0.245574
|
b7defbba24700ce1dff5cfd0c991ccf13a0c39e0
| 1,857
|
py
|
Python
|
part-2/2-iterators/Example-consuming_iterators_manually.py
|
boconlonton/python-deep-dive
|
c01591a4943c7b77d4d2cd90a8b23423280367a3
|
[
"MIT"
] | null | null | null |
part-2/2-iterators/Example-consuming_iterators_manually.py
|
boconlonton/python-deep-dive
|
c01591a4943c7b77d4d2cd90a8b23423280367a3
|
[
"MIT"
] | null | null | null |
part-2/2-iterators/Example-consuming_iterators_manually.py
|
boconlonton/python-deep-dive
|
c01591a4943c7b77d4d2cd90a8b23423280367a3
|
[
"MIT"
] | null | null | null |
"""
Consuming Iterator manually
"""
from collections import namedtuple
def cast(data_type, value):
"""Cast the value into a correct data type"""
if data_type == 'DOUBLE':
return float(value)
elif data_type == 'STRING':
return str(value)
elif data_type == 'INT':
return int(value)
def cast_row(data_types1, data_row):
return [
cast(data_type, value)
for data_type, value in zip(data_types1, data_row)
]
# cars = []
# with open('cars.csv') as file:
# row_index = 0
# for line in file:
# if row_index == 0:
# # Header row
# headers = line.strip('\n').split(';')
# Car = namedtuple('Car', headers)
# elif row_index == 1:
# data_types = line.strip('\n').split(';')
# # print('types', data_types)
# else:
# # data row
# data = line.strip('\n').split(';')
# data = cast_row(data_types, data)
# car = Car(*data)
# cars.append(car)
# # print(data)
# row_index += 1
# with open('cars.csv') as file:
# file_iter = iter(file)
# headers = next(file_iter).strip('\n').split(';')
# Car = namedtuple('Car', headers)
# data_types = next(file_iter).strip('\n').split(';')
# for line in file_iter:
# data = line.strip('\n').split(';')
# data = cast_row(data_types, data)
# car = Car(*data)
# cars.append(car)
with open('cars.csv') as file:
file_iter = iter(file)
headers = next(file_iter).strip('\n').split(';')
Car = namedtuple('Car', headers)
data_types = next(file_iter).strip('\n').split(';')
cars = [Car(*cast_row(
data_types,
line.strip('\n').split(';')
))
for line in file_iter]
print(cars)
| 27.308824
| 58
| 0.525579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,120
| 0.603123
|
b7dfa49c85bfb3c402f6a966ce46d040dfc275f6
| 1,675
|
py
|
Python
|
instance_server/services/startpage.py
|
Geierhaas/developer-observatory
|
f2e840ab9a283ea82353a8c5bbb6b1905567fbe4
|
[
"MIT"
] | 4
|
2017-08-26T04:51:52.000Z
|
2022-01-02T23:07:48.000Z
|
instance_server/services/startpage.py
|
Geierhaas/developer-observatory
|
f2e840ab9a283ea82353a8c5bbb6b1905567fbe4
|
[
"MIT"
] | 3
|
2020-11-04T11:13:55.000Z
|
2021-03-08T19:47:52.000Z
|
instance_server/services/startpage.py
|
Geierhaas/developer-observatory
|
f2e840ab9a283ea82353a8c5bbb6b1905567fbe4
|
[
"MIT"
] | 6
|
2017-10-24T14:44:05.000Z
|
2022-01-13T14:26:05.000Z
|
#! Copyright (C) 2017 Christian Stransky
#!
#! This software may be modified and distributed under the terms
#! of the MIT license. See the LICENSE file for details.
from flask import Flask, redirect, request, make_response
from shutil import copyfile
import json
import requests
import os.path
import uuid
import urllib
app = Flask(__name__)
remote_task_file = "%landingURL%/get_ipynb/"
target_file = "/home/jupyter/tasks.ipynb"
user_data_file = "/home/jupyter/.instanceInfo"
@app.route('/')
def init():
user_id = request.args.get('userId')
token = request.args.get('token')
user_data = {}
user_data["user_id"] = user_id
user_data["token"] = token
#Check if a task file already exists on this instance
if not os.path.isfile(target_file):
#If not, then request data for this user from the landing page
task_file = urllib.request.URLopener()
task_file.retrieve(remote_task_file+user_id+"/"+token, target_file)
#Prepare the response to the client -> Redirect + set cookies for uid and token
response = make_response(redirect('/nb/notebooks/tasks.ipynb'))
response.set_cookie('userId', user_id)
response.set_cookie('token', token)
# Check if we already stored user data on this instance
if not os.path.isfile(user_data_file):
with open(user_data_file, "w") as f:
#writing the data allows us to retrieve it anytime, if the user has cookies disabled for example.
json.dump(user_data, f)
return response
if __name__ == '__main__':
#app.debug = True
app.run(host='127.0.0.1', port=60000)
| 34.183673
| 110
| 0.677612
| 0
| 0
| 0
| 0
| 1,076
| 0.642388
| 0
| 0
| 720
| 0.429851
|
b7e02aed4c2632acfe7ae12115128aac02a396d3
| 672
|
py
|
Python
|
utils/linalg.py
|
cimat-ris/TrajectoryInference
|
27d1d2d692df52b403cf6557ecba628f818cd380
|
[
"Apache-2.0"
] | 6
|
2019-11-05T00:56:06.000Z
|
2021-12-05T21:11:14.000Z
|
utils/linalg.py
|
cimat-ris/TrajectoryInference
|
27d1d2d692df52b403cf6557ecba628f818cd380
|
[
"Apache-2.0"
] | 2
|
2021-05-22T11:16:45.000Z
|
2021-05-31T00:42:07.000Z
|
utils/linalg.py
|
cimat-ris/TrajectoryInference
|
27d1d2d692df52b403cf6557ecba628f818cd380
|
[
"Apache-2.0"
] | 1
|
2021-05-22T10:35:18.000Z
|
2021-05-22T10:35:18.000Z
|
import numpy as np
import math
import logging
from termcolor import colored
# Check a matrix for: negative eigenvalues, asymmetry and negative diagonal values
def positive_definite(M,epsilon = 0.000001,verbose=False):
# Symmetrization
Mt = np.transpose(M)
M = (M + Mt)/2
eigenvalues = np.linalg.eigvals(M)
for i in range(len(eigenvalues)):
if eigenvalues[i] <= epsilon:
if verbose:
logging.error("Negative eigenvalues")
return 0
for i in range(M.shape[0]):
if M[i][i] < 0:
if verbose:
logging.error("Negative value in diagonal")
return 0
return 1
| 29.217391
| 82
| 0.616071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 148
| 0.220238
|
b7e0fbad2360576b896a69e1a30c6d6156b68c38
| 282
|
py
|
Python
|
problemsets/Codeforces/Python/A1020.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
problemsets/Codeforces/Python/A1020.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
problemsets/Codeforces/Python/A1020.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
"""
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
I=lambda:map(int,input().split())
f=abs
n,_,a,b,k=I()
while k:
p,q,u,v=I()
P=[a,b]
if a<=q<=b:P+=[q]
if a<=v<=b:P+=[v]
print([min(f(q-x)+f(v-x)for x in P)+f(p-u),f(q-v)][p==u])
k-=1
| 17.625
| 59
| 0.521277
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 88
| 0.312057
|
b7e289ea7bf92691efc481deeec6261bf7909c3b
| 850
|
py
|
Python
|
get_tweet.py
|
Na27i/tweet_generator
|
92a5156e041982dd12d9850445f15a599fb6ec5e
|
[
"MIT"
] | null | null | null |
get_tweet.py
|
Na27i/tweet_generator
|
92a5156e041982dd12d9850445f15a599fb6ec5e
|
[
"MIT"
] | null | null | null |
get_tweet.py
|
Na27i/tweet_generator
|
92a5156e041982dd12d9850445f15a599fb6ec5e
|
[
"MIT"
] | null | null | null |
import json
import sys
import pandas
args = sys.argv
if len(args) == 1 :
import main as settings
else :
import sub as settings
from requests_oauthlib import OAuth1Session
CK = settings.CONSUMER_KEY
CS = settings.CONSUMER_SECRET
AT = settings.ACCESS_TOKEN
ATS = settings.ACCESS_TOKEN_SECRET
twitter = OAuth1Session(CK, CS, AT, ATS)
tweetlist = []
url = "https://api.twitter.com/1.1/statuses/user_timeline.json"
params = {"count" : 200}
for i range(5):
res = twitter.get(url, params = params)
if res.status_code == 200:
timelines = json.loads(res.text)
for tweet in timelines:
tweetlist.append(tweet["text"])
else:
print("取得失敗(%d)" % res.status_code)
datafile = pandas.DataFrame(tweetlist)
datafile.to_csv("tweetlist.csv", encoding='utf_8_sig')
| 22.972973
| 64
| 0.661176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 114
| 0.132867
|
b7e377e1a140ad61d79142b999a2e7a703c9e2ef
| 1,284
|
py
|
Python
|
idact/detail/config/validation/validate_scratch.py
|
intdata-bsc/idact
|
54cb65a711c145351e205970c27c83e6393cccf5
|
[
"MIT"
] | 5
|
2018-12-06T15:40:34.000Z
|
2019-06-19T11:22:58.000Z
|
idact/detail/config/validation/validate_scratch.py
|
garstka/idact
|
b9c8405c94db362c4a51d6bfdf418b14f06f0da1
|
[
"MIT"
] | 9
|
2018-12-06T16:35:26.000Z
|
2019-04-28T19:01:40.000Z
|
idact/detail/config/validation/validate_scratch.py
|
intdata-bsc/idact
|
54cb65a711c145351e205970c27c83e6393cccf5
|
[
"MIT"
] | 2
|
2019-04-28T19:18:58.000Z
|
2019-06-17T06:56:28.000Z
|
"""This module contains a function for validating a scratch config entry."""
import re
from idact.detail.config.validation.validation_error_message import \
validation_error_message
VALID_SCRATCH_DESCRIPTION = 'Non-empty absolute path, or environment' \
' variable name.'
VALID_SCRATCH_REGEX = r"^(/.*)|(\$[A-Za-z][A-Za-z0-9]*)$" # noqa, pylint: disable=line-too-long
__COMPILED = re.compile(pattern=VALID_SCRATCH_REGEX)
def validate_scratch(scratch) -> str:
"""Returns the parameter if it's a valid scratch config entry, otherwise
raises an exception.
Key path is optional, non-empty string.
:param scratch: Object to validate.
:raises TypeError: On wrong type.
:raises ValueError: On regex mismatch.
"""
if not isinstance(scratch, str):
raise TypeError(validation_error_message(
label='scratch',
value=scratch,
expected=VALID_SCRATCH_DESCRIPTION,
regex=VALID_SCRATCH_REGEX))
if not __COMPILED.match(scratch):
raise ValueError(validation_error_message(
label='scratch',
value=scratch,
expected=VALID_SCRATCH_DESCRIPTION,
regex=VALID_SCRATCH_REGEX))
return scratch
| 29.181818
| 96
| 0.660436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 519
| 0.404206
|
b7e39de3f444fe8cb279979f19de1ae9ea72a25e
| 10,135
|
py
|
Python
|
paramak/parametric_components/blanket_fp.py
|
zmarkan/paramak
|
ecf9a46394adb4d6bb5744000ec6e2f74c30f2ba
|
[
"MIT"
] | null | null | null |
paramak/parametric_components/blanket_fp.py
|
zmarkan/paramak
|
ecf9a46394adb4d6bb5744000ec6e2f74c30f2ba
|
[
"MIT"
] | null | null | null |
paramak/parametric_components/blanket_fp.py
|
zmarkan/paramak
|
ecf9a46394adb4d6bb5744000ec6e2f74c30f2ba
|
[
"MIT"
] | null | null | null |
import warnings
from typing import Callable, List, Optional, Union
import mpmath
import numpy as np
import paramak
import sympy as sp
from paramak import RotateMixedShape, diff_between_angles
from paramak.parametric_components.tokamak_plasma_plasmaboundaries import \
PlasmaBoundaries
from scipy.interpolate import interp1d
class BlanketFP(RotateMixedShape):
"""A blanket volume created from plasma parameters.
Args:
thickness (float or [float] or callable or [(float), (float)]):
the thickness of the blanket (cm). If the thickness is a float then
this produces a blanket of constant thickness. If the thickness is
a tuple of floats, blanket thickness will vary linearly between the
two values. If thickness is callable, then the blanket thickness
will be a function of poloidal angle (in degrees). If thickness is
a list of two lists (thicknesses and angles) then these will be
used together with linear interpolation.
start_angle: the angle in degrees to start the blanket, measured anti
clockwise from 3 o'clock.
stop_angle: the angle in degrees to stop the blanket, measured anti
clockwise from 3 o'clock.
plasma: If not None, the parameters of the plasma Object will be used.
minor_radius: the minor radius of the plasma (cm).
major_radius: the major radius of the plasma (cm).
triangularity: the triangularity of the plasma.
elongation: the elongation of the plasma.
vertical_displacement: the vertical_displacement of the plasma (cm).
offset_from_plasma: the distance between the plasma and the blanket
(cm). If float, constant offset. If list of floats, offset will
vary linearly between the values. If callable, offset will be a
function of poloidal angle (in degrees). If a list of two lists
(angles and offsets) then these will be used together with linear
interpolation.
num_points: number of points that will describe the shape.
"""
def __init__(self,
thickness,
start_angle: float,
stop_angle: float,
plasma: Optional[Union[paramak.Plasma,
paramak.PlasmaBoundaries,
paramak.PlasmaFromPoints]] = None,
minor_radius: Optional[float] = 150.0,
major_radius: Optional[float] = 450.0,
triangularity: Optional[float] = 0.55,
elongation: Optional[float] = 2.0,
vertical_displacement: Optional[float] = 0.0,
offset_from_plasma: Optional[float] = 0.0,
num_points: Optional[int] = 50,
**kwargs):
super().__init__(**kwargs)
self.thickness = thickness
self.start_angle, self.stop_angle = None, None
self.start_angle = start_angle
self.stop_angle = stop_angle
self.plasma = plasma
self.vertical_displacement = vertical_displacement
if plasma is None:
self.minor_radius = minor_radius
self.major_radius = major_radius
self.triangularity = triangularity
self.elongation = elongation
else: # if plasma object is given, use its parameters
self.minor_radius = plasma.minor_radius
self.major_radius = plasma.major_radius
self.triangularity = plasma.triangularity
self.elongation = plasma.elongation
self.offset_from_plasma = offset_from_plasma
self.num_points = num_points
@property
def start_angle(self):
return self._start_angle
@start_angle.setter
def start_angle(self, value):
self._start_angle = value
@property
def stop_angle(self):
return self._stop_angle
@stop_angle.setter
def stop_angle(self, value):
self._stop_angle = value
@property
def minor_radius(self):
return self._minor_radius
@minor_radius.setter
def minor_radius(self, minor_radius):
self._minor_radius = minor_radius
@property
def thickness(self):
return self._thickness
@thickness.setter
def thickness(self, thickness):
self._thickness = thickness
@property
def inner_points(self):
self.find_points()
return self._inner_points
@inner_points.setter
def inner_points(self, value):
self._inner_points = value
@property
def outer_points(self):
self.find_points()
return self._outer_points
@outer_points.setter
def outer_points(self, value):
self._outer_points = value
def make_callable(self, attribute):
"""This function transforms an attribute (thickness or offset) into a
callable function of theta
"""
# if the attribute is a list, create a interpolated object of the
# values
if isinstance(attribute, (tuple, list)):
if isinstance(attribute[0], (tuple, list)) and \
isinstance(attribute[1], (tuple, list)) and \
len(attribute) == 2:
# attribute is a list of 2 lists
if len(attribute[0]) != len(attribute[1]):
raise ValueError('The length of angles list must equal \
the length of values list')
list_of_angles = np.array(attribute[0])
offset_values = attribute[1]
else:
# no list of angles is given
offset_values = attribute
list_of_angles = np.linspace(
self.start_angle,
self.stop_angle,
len(offset_values),
endpoint=True)
interpolated_values = interp1d(list_of_angles, offset_values)
def fun(theta):
if callable(attribute):
return attribute(theta)
elif isinstance(attribute, (tuple, list)):
return interpolated_values(theta)
else:
return attribute
return fun
def find_points(self, angles=None):
self._overlapping_shape = False
# create array of angles theta
if angles is None:
thetas = np.linspace(
self.start_angle,
self.stop_angle,
num=self.num_points,
endpoint=True,
)
else:
thetas = angles
# create inner points
inner_offset = self.make_callable(self.offset_from_plasma)
inner_points = self.create_offset_points(thetas, inner_offset)
inner_points[-1][2] = "straight"
self.inner_points = inner_points
# create outer points
thickness = self.make_callable(self.thickness)
def outer_offset(theta):
return inner_offset(theta) + thickness(theta)
outer_points = self.create_offset_points(np.flip(thetas), outer_offset)
outer_points[-1][2] = "straight"
self.outer_points = outer_points
# assemble
points = inner_points + outer_points
if self._overlapping_shape:
msg = ("BlanketFP: Some points with negative R coordinate have "
"been ignored.")
warnings.warn(msg)
self.points = points
return points
def create_offset_points(self, thetas, offset):
"""generates a list of points following parametric equations with an
offset
Args:
thetas (np.array): the angles in degrees.
offset (callable): offset value (cm). offset=0 will follow the
parametric equations.
Returns:
list: list of points [[R1, Z1, connection1], [R2, Z2, connection2],
...]
"""
# create sympy objects and derivatives
theta_sp = sp.Symbol("theta")
R_sp, Z_sp = self.distribution(theta_sp, pkg=sp)
R_derivative = sp.diff(R_sp, theta_sp)
Z_derivative = sp.diff(Z_sp, theta_sp)
points = []
for theta in thetas:
# get local value of derivatives
val_R_derivative = float(R_derivative.subs("theta", theta))
val_Z_derivative = float(Z_derivative.subs("theta", theta))
# get normal vector components
nx = val_Z_derivative
ny = -val_R_derivative
# normalise normal vector
normal_vector_norm = (nx ** 2 + ny ** 2) ** 0.5
nx /= normal_vector_norm
ny /= normal_vector_norm
# calculate outer points
val_R_outer = self.distribution(theta)[0] + offset(theta) * nx
val_Z_outer = self.distribution(theta)[1] + offset(theta) * ny
if float(val_R_outer) > 0:
points.append(
[float(val_R_outer), float(val_Z_outer), "spline"])
else:
self._overlapping_shape = True
return points
def distribution(self, theta, pkg=np):
"""Plasma distribution theta in degrees
Args:
theta (float or np.array or sp.Symbol): the angle(s) in degrees.
pkg (module, optional): Module to use in the funciton. If sp, as
sympy object will be returned. If np, a np.array or a float
will be returned. Defaults to np.
Returns:
(float, float) or (sympy.Add, sympy.Mul) or
(numpy.array, numpy.array): The R and Z coordinates of the
point with angle theta
"""
if pkg == np:
theta = np.radians(theta)
else:
theta = mpmath.radians(theta)
R = self.major_radius + self.minor_radius * pkg.cos(
theta + self.triangularity * pkg.sin(theta)
)
Z = (
self.elongation * self.minor_radius * pkg.sin(theta)
+ self.vertical_displacement
)
return R, Z
| 36.456835
| 79
| 0.597139
| 9,802
| 0.967144
| 0
| 0
| 1,012
| 0.099852
| 0
| 0
| 3,413
| 0.336754
|
b7e4658365995b8bd790113c73797283daaf0910
| 907
|
py
|
Python
|
3.7.1/solution.py
|
luxnlex/stepic-python
|
92a4b25391f76935c3c2a70fb8552e7f93928d9b
|
[
"MIT"
] | 1
|
2021-05-07T18:20:51.000Z
|
2021-05-07T18:20:51.000Z
|
3.7.1/solution.py
|
luxnlex/stepic-python
|
92a4b25391f76935c3c2a70fb8552e7f93928d9b
|
[
"MIT"
] | null | null | null |
3.7.1/solution.py
|
luxnlex/stepic-python
|
92a4b25391f76935c3c2a70fb8552e7f93928d9b
|
[
"MIT"
] | 2
|
2017-12-27T07:51:57.000Z
|
2020-08-03T22:10:55.000Z
|
s=str(input())
a=[]
for i in range(len(s)):
si=s[i]
a.append(si)
b=[]
n=str(input())
for j in range(len(n)):
sj=n[j]
b.append(sj)
p={}
for pi in range(len(s)):
key=s[pi]
p[key]=0
j1=0
for i in range(0,len(a)):
key=a[i]
while j1<len(b):
bj=b[0]
if key in p:
p[key]=bj
b.remove(bj)
break
c=[]
si=str(input())
for si1 in range(0,len(si)):
ci=si[si1]
c.append(ci)
co=[]
for ci in range(0,len(c)):
if c[ci] in p:
cco=c[ci]
pco=p[cco]
co.append(pco)
d=[]
di=str(input())
for sj1 in range(0,len(di)):
dj=di[sj1]
d.append(dj)
do=[]
for di in range(0,len(d)):
for key in p:
pkey=key
if p.get(key) == d[di]:
ddo=pkey
do.append(ddo)
for i in range (0,len(co)):
print(co[i],end='')
print()
for j in range (0,len(do)):
print(do[j],end='')
| 14.868852
| 31
| 0.485116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0.00441
|
b7e4fae61f0aabd32e88f180183fcddc115ab0ca
| 4,352
|
py
|
Python
|
airbyte-integrations/connectors/source-plaid/source_plaid/source.py
|
OTRI-Unipd/OTRI-airbyte
|
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
|
[
"MIT"
] | 2
|
2022-03-02T13:46:05.000Z
|
2022-03-05T12:31:28.000Z
|
airbyte-integrations/connectors/source-plaid/source_plaid/source.py
|
OTRI-Unipd/OTRI-airbyte
|
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
|
[
"MIT"
] | 29
|
2021-10-07T17:20:29.000Z
|
2021-12-27T13:07:09.000Z
|
airbyte-integrations/connectors/source-plaid/source_plaid/source.py
|
OTRI-Unipd/OTRI-airbyte
|
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
|
[
"MIT"
] | 1
|
2021-07-30T07:24:51.000Z
|
2021-07-30T07:24:51.000Z
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import datetime
import json
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple, Union
import plaid
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from plaid.api import plaid_api
from plaid.model.accounts_balance_get_request import AccountsBalanceGetRequest
from plaid.model.transactions_get_request import TransactionsGetRequest
SPEC_ENV_TO_PLAID_ENV = {
"production": plaid.Environment.Production,
"development": plaid.Environment.Development,
"sandbox": plaid.Environment.Sandbox,
}
class PlaidStream(Stream):
def __init__(self, config: Mapping[str, Any]):
plaid_config = plaid.Configuration(
host=SPEC_ENV_TO_PLAID_ENV[config["plaid_env"]], api_key={"clientId": config["client_id"], "secret": config["api_key"]}
)
api_client = plaid.ApiClient(plaid_config)
self.client = plaid_api.PlaidApi(api_client)
self.access_token = config["access_token"]
class BalanceStream(PlaidStream):
@property
def name(self):
return "balance"
@property
def primary_key(self) -> Optional[Union[str, List[str], List[List[str]]]]:
return "account_id"
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
balance_response = self.client.accounts_balance_get(AccountsBalanceGetRequest(access_token=self.access_token))
for balance in balance_response["accounts"]:
message_dict = balance["balances"].to_dict()
message_dict["account_id"] = balance["account_id"]
yield message_dict
class IncrementalTransactionStream(PlaidStream):
@property
def primary_key(self) -> Optional[Union[str, List[str], List[List[str]]]]:
return "transaction_id"
@property
def name(self):
return "transaction"
@property
def source_defined_cursor(self) -> bool:
return True
@property
def cursor_field(self) -> Union[str, List[str]]:
return "date"
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]):
return {"date": latest_record.get("date")}
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
stream_state = stream_state or {}
date = stream_state.get("date")
if not date:
date = datetime.date.fromtimestamp(0)
else:
date = datetime.date.fromisoformat(date)
if date >= datetime.datetime.utcnow().date():
return
transaction_response = self.client.transactions_get(
TransactionsGetRequest(access_token=self.access_token, start_date=date, end_date=datetime.datetime.utcnow().date())
)
yield from map(lambda x: x.to_dict(), sorted(transaction_response["transactions"], key=lambda t: t["date"]))
class SourcePlaid(AbstractSource):
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Optional[Any]]:
try:
plaid_config = plaid.Configuration(
host=SPEC_ENV_TO_PLAID_ENV[config["plaid_env"]], api_key={"clientId": config["client_id"], "secret": config["api_key"]}
)
api_client = plaid.ApiClient(plaid_config)
client = plaid_api.PlaidApi(api_client)
try:
request = AccountsBalanceGetRequest(access_token=config["access_token"])
client.accounts_balance_get(request)
return True, None
except plaid.ApiException as e:
response = json.loads(e.body)
return False, response
except Exception as error:
return False, error
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
return [BalanceStream(config), IncrementalTransactionStream(config)]
| 35.966942
| 135
| 0.667509
| 3,617
| 0.831112
| 1,402
| 0.322151
| 506
| 0.116268
| 0
| 0
| 356
| 0.081801
|
b7e5547eb715244c2608406503ff045d83d45b75
| 17,939
|
py
|
Python
|
demo/demo.py
|
taewhankim/DeepHRnet
|
c316b4a9f5f3002f6fcc0398c12d80de82195ef0
|
[
"MIT"
] | null | null | null |
demo/demo.py
|
taewhankim/DeepHRnet
|
c316b4a9f5f3002f6fcc0398c12d80de82195ef0
|
[
"MIT"
] | null | null | null |
demo/demo.py
|
taewhankim/DeepHRnet
|
c316b4a9f5f3002f6fcc0398c12d80de82195ef0
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import os
import shutil
from PIL import Image
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision
import cv2
import numpy as np
import time
import math
import _init_paths
import models
from config import cfg
from config import update_config
from core.function import get_final_preds
from utils.transforms import get_affine_transform
COCO_KEYPOINT_INDEXES = {
0: 'nose',
1: 'left_eye',
2: 'right_eye',
3: 'left_ear',
4: 'right_ear',
5: 'left_shoulder',
6: 'right_shoulder',
7: 'left_elbow',
8: 'right_elbow',
9: 'left_wrist',
10: 'right_wrist',
11: 'left_hip',
12: 'right_hip',
13: 'left_knee',
14: 'right_knee',
15: 'left_ankle',
16: 'right_ankle'
}
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
SKELETON = [
[5, 7], [7, 9],[5, 6],[6, 8], [8, 10]
]
## 수정 : 주석
# SKELETON = [
# [1, 3], [1, 0], [2, 4], [2, 0], [0, 5], [0, 6], [5, 7], [7, 9], [6, 8], [8, 10], [5, 11], [6, 12], [11, 12],
# [11, 13], [13, 15], [12, 14], [14, 16]
#]
CocoColors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
NUM_KPTS = 17
CTX = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def draw_pose(keypoints, img):
"""draw the keypoints and the skeletons.
:params keypoints: the shape should be equal to [17,2]
:params img:
"""
# 수정
# assert keypoints.shape == (NUM_KPTS, 2)
# for i in range(len(SKELETON)):
# kpt_a, kpt_b = SKELETON[i][0], SKELETON[i][1]
# x_a, y_a = keypoints[kpt_a][0], keypoints[kpt_a][1]
# x_b, y_b = keypoints[kpt_b][0], keypoints[kpt_b][1]
# cv2.circle(img, (int(x_a), int(y_a)), 6, CocoColors[i], -1)
# cv2.circle(img, (int(x_b), int(y_b)), 6, CocoColors[i], -1)
# cv2.line(img, (int(x_a), int(y_a)), (int(x_b), int(y_b)), CocoColors[i], 2)
for i in range(len(SKELETON)):
kpt_a, kpt_b = SKELETON[i][0], SKELETON[i][1]
x_a, y_a = keypoints[kpt_a][0], keypoints[kpt_a][1]
x_b, y_b = keypoints[kpt_b][0], keypoints[kpt_b][1]
cv2.circle(img, (int(x_a), int(y_a)), 10, CocoColors[i], -1)
cv2.circle(img, (int(x_b), int(y_b)), 10, CocoColors[i], -1)
cv2.line(img, (int(x_a), int(y_a)), (int(x_b), int(y_b)), CocoColors[i], 7)
def draw_bbox(box, img):
"""draw the detected bounding box on the image.
:param img:
"""
cv2.rectangle(img, box[0], box[1], color=(0, 255, 0), thickness=3)
def get_person_detection_boxes(model, img, threshold=0.5):
pred = model(img)
pred_classes = [COCO_INSTANCE_CATEGORY_NAMES[i]
for i in list(pred[0]['labels'].cpu().numpy())] # Get the Prediction Score
pred_boxes = [[(int(i[0]), int(i[1])), (int(i[2]), int(i[3]))]
for i in list(pred[0]['boxes'].detach().cpu().numpy())] # Bounding boxes
pred_score = list(pred[0]['scores'].detach().cpu().numpy())
if not pred_score or max(pred_score) < threshold:
return []
# Get list of index with score greater than threshold
pred_t = [pred_score.index(x) for x in pred_score if x > threshold][-1]
pred_boxes = pred_boxes[:pred_t + 1]
pred_classes = pred_classes[:pred_t + 1]
person_boxes = []
for idx, box in enumerate(pred_boxes):
if pred_classes[idx] == 'person':
person_boxes.append(box)
return person_boxes
def get_pose_estimation_prediction(pose_model, image, center, scale):
rotation = 0
# pose estimation transformation
trans = get_affine_transform(center, scale, rotation, cfg.MODEL.IMAGE_SIZE)
model_input = cv2.warpAffine(
image,
trans,
(int(cfg.MODEL.IMAGE_SIZE[0]), int(cfg.MODEL.IMAGE_SIZE[1])),
flags=cv2.INTER_LINEAR)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
# pose estimation inference
model_input = transform(model_input).unsqueeze(0)
# switch to evaluate mode
pose_model.eval()
with torch.no_grad():
# compute output heatmap
output = pose_model(model_input)
preds, _ = get_final_preds(
cfg,
output.clone().cpu().numpy(),
np.asarray([center]),
np.asarray([scale]))
return preds
def box_to_center_scale(box, model_image_width, model_image_height):
"""convert a box to center,scale information required for pose transformation
Parameters
----------
box : list of tuple
list of length 2 with two tuples of floats representing
bottom left and top right corner of a box
model_image_width : int
model_image_height : int
Returns
-------
(numpy array, numpy array)
Two numpy arrays, coordinates for the center of the box and the scale of the box
"""
center = np.zeros((2), dtype=np.float32)
bottom_left_corner = box[0]
top_right_corner = box[1]
box_width = top_right_corner[0] - bottom_left_corner[0]
box_height = top_right_corner[1] - bottom_left_corner[1]
bottom_left_x = bottom_left_corner[0]
bottom_left_y = bottom_left_corner[1]
center[0] = bottom_left_x + box_width * 0.5
center[1] = bottom_left_y + box_height * 0.5
aspect_ratio = model_image_width * 1.0 / model_image_height
pixel_std = 200
if box_width > aspect_ratio * box_height:
box_height = box_width * 1.0 / aspect_ratio
elif box_width < aspect_ratio * box_height:
box_width = box_height * aspect_ratio
scale = np.array(
[box_width * 1.0 / pixel_std, box_height * 1.0 / pixel_std],
dtype=np.float32)
if center[0] != -1:
scale = scale * 1.25
return center, scale
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg', type=str, default='./inference-config.yaml')
parser.add_argument('--video', type=str)
parser.add_argument('--webcam', action='store_true')
# parser.add_argument('--image', type=str)
parser.add_argument('--folder', type=str)
parser.add_argument('--write', action='store_true')
parser.add_argument('--showFps', action='store_true')
parser.add_argument('--outputDir', type=str, default='/output/', help='output path')
parser.add_argument('opts',
help='Modify config options using the command-line',
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
# args expected by supporting codebase
args.modelDir = ''
args.logDir = ''
args.dataDir = ''
args.prevModelDir = ''
return args
def getAngle(a, b, c):
ang = math.degrees(math.atan2(c[1] - b[1], c[0] - b[0]) - math.atan2(a[1] - b[1], a[0] - b[0]))
if abs(ang)>=180:
return 360- abs(ang)
else:
return abs(ang)
def main():
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
args = parse_args()
update_config(cfg, args)
box_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
box_model.to(CTX)
box_model.eval()
pose_model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(
cfg, is_train=False
)
if cfg.TEST.MODEL_FILE:
print('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
pose_model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
else:
print('expected model defined in config at TEST.MODEL_FILE')
pose_model = torch.nn.DataParallel(pose_model, device_ids=cfg.GPUS)
pose_model.to(CTX)
pose_model.eval()
# Loading an video or an image or webcam
if args.webcam:
vidcap = cv2.VideoCapture(-1)
elif args.video:
vidcap = cv2.VideoCapture(args.video)
# 수정
# elif args.image:
# image_bgr = cv2.imread(args.image)
elif args.folder:
image_list = os.listdir(args.folder)
else:
print('please use --video or --webcam or --image to define the input.')
return
csv_output_rows = []
c=0
if args.webcam or args.video:
if args.write:
save_path = '/mnt/dms/prac/output.avi'
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(save_path, fourcc, 24.0, (int(vidcap.get(3)), int(vidcap.get(4))))
while True:
ret, image_bgr = vidcap.read()
if ret:
last_time = time.time()
image = image_bgr[:, :, [2, 1, 0]]
input = []
img = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
img_tensor = torch.from_numpy(img / 255.).permute(2, 0, 1).float().to(CTX)
input.append(img_tensor)
# object detection box
pred_boxes = get_person_detection_boxes(box_model, input, threshold=0.5)
pred_boxes = [pred_boxes[0]]
for box in pred_boxes:
cv2.rectangle(image_bgr, box[0], box[1], color=(0, 255, 0),
thickness=3)
new_csv_row = []
# pose estimation
if len(pred_boxes) >= 1:
for box in pred_boxes:
csv_row = []
center, scale = box_to_center_scale(box, cfg.MODEL.IMAGE_SIZE[0], cfg.MODEL.IMAGE_SIZE[1])
image_pose = image.copy() if cfg.DATASET.COLOR_RGB else image_bgr.copy()
pose_preds = get_pose_estimation_prediction(pose_model, image_pose, center, scale)
if len(pose_preds) >= 1:
for kpt in pose_preds:
draw_pose(kpt, image_bgr)
for coord in kpt[5:11]:
x_coord, y_coord = int(coord[0]), int(coord[1])
new_csv_row.extend([x_coord, y_coord])
# draw the poses
new_coord = list(zip(new_csv_row[0::2], new_csv_row[1::2]))
ang1 = new_coord[4::-2]
ang2 = [new_coord[2], new_coord[0], new_coord[1]]
ang3 = [new_coord[0], new_coord[1], new_coord[3]]
ang4 = [new_coord[1], new_coord[3], new_coord[5]]
angles = [ang1, ang2, ang3, ang4]
for i in angles:
new_csv_row.append(getAngle(i[0], i[1], i[2]))
if args.showFps:
fps = 1 / (time.time() - last_time)
cv2.putText(image_bgr, 'fps: ' + "%.2f" % (fps), (25, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.2,
(0, 255, 0), 2)
video_name = os.path.splitext(os.path.basename(args.video))[0]
img_file_name = video_name+'_frame_'+str(c)+'.jpg'
new_csv_row.insert(0, img_file_name)
csv_output_rows.append(new_csv_row)
img_path = os.path.join(args.outputDir, 'frame_img')
if not os.path.isdir(img_path):
os.mkdir(img_path)
cv2.imwrite(os.path.join(img_path,img_file_name), image_bgr)
c+=1
if args.write:
out.write(image_bgr)
print('{}_finish'.format(img_file_name))
# cv2.imshow('demo', image_bgr)
# if cv2.waitKey(1) & 0XFF == ord('q'):
# break
else:
print('cannot load the video.')
break
csv_headers = ['Frame']
for keypoint in self.COCO_KEYPOINT_INDEXES.values():
csv_headers.extend([keypoint+'_x', keypoint+'_y'])
new_csv_headers = [i for i in csv_headers[11:23]]
new_csv_headers.insert(0,csv_headers[0])
new_csv_headers.extend(["LW_LL_LS","LL_LS_RS","LS_RS_RL","RS_RL_RW"])
csv_output_filename = os.path.join(args.outputDir, f'{video_name}_coord_data.csv')
with open(csv_output_filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(new_csv_headers)
csvwriter.writerows(csv_output_rows)
cv2.destroyAllWindows()
vidcap.release()
if args.write:
print('video has been saved as {}'.format(save_path))
out.release()
return csv_output_rows
else:
image_list.sort()
if "Thumbs.db" in image_list:
image_list.remove("Thumbs.db")
if "@eaDir" in image_list:
image_list.remove("@eaDir")
if '.DS_Store' in image_list:
image_list.remove('.DS_Store')
for imgs in image_list:
img_ori_path = os.path.join(args.folder,imgs)
image_bgr = cv2.imread(img_ori_path)
last_time = time.time()
image = image_bgr[:, :, [2, 1, 0]]
input = []
img = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
img_tensor = torch.from_numpy(img / 255.).permute(2, 0, 1).float().to(CTX)
input.append(img_tensor)
# object detection box
pred_boxes = get_person_detection_boxes(box_model, input, threshold=0.9)
pred_boxes = [pred_boxes[0]]
new_csv_row = []
# pose estimation
if len(pred_boxes) >= 1:
for box in pred_boxes:
center, scale = box_to_center_scale(box, cfg.MODEL.IMAGE_SIZE[0], cfg.MODEL.IMAGE_SIZE[1])
image_pose = image.copy() if cfg.DATASET.COLOR_RGB else image_bgr.copy()
pose_preds = get_pose_estimation_prediction(pose_model, image_pose, center, scale)
if len(pose_preds) >= 1:
for kpt in pose_preds:
draw_pose(kpt, image_bgr) # draw the poses
for coord in kpt[5:11]:
x_coord, y_coord = int(coord[0]), int(coord[1])
new_csv_row.extend([x_coord, y_coord])
# draw the poses
new_coord = list(zip(new_csv_row[0::2], new_csv_row[1::2]))
ang1 = new_coord[4::-2]
ang2 = [new_coord[2], new_coord[0], new_coord[1]]
ang3 = [new_coord[0], new_coord[1], new_coord[3]]
ang4 = [new_coord[1], new_coord[3], new_coord[5]]
angles = [ang1, ang2, ang3, ang4]
for i in angles:
new_csv_row.append(getAngle(i[0], i[1], i[2]))
if args.showFps:
fps = 1 / (time.time() - last_time)
cv2.putText(image_bgr, 'fps: ' + "%.2f" % (fps), (25, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0),
2)
kpt_img_file = 'kpt'+'_'+str(c)+'_'+imgs
new_csv_row.insert(0, imgs)
csv_output_rows.append(new_csv_row)
img_path = os.path.join(args.outputDir, 'kpt_img')
if not os.path.isdir(img_path):
os.mkdir(img_path)
cv2.imwrite(os.path.join(img_path, kpt_img_file), image_bgr)
c += 1
print('the result image has been saved as {}'.format(imgs))
csv_headers = ['Image']
for keypoint in COCO_KEYPOINT_INDEXES.values():
csv_headers.extend([keypoint + '_x', keypoint + '_y'])
new_csv_headers = [i for i in csv_headers[11:23]]
new_csv_headers.insert(0, csv_headers[0])
new_csv_headers.extend(["LW_LL_LS", "LL_LS_RS", "LS_RS_RL", "RS_RL_RW"])
for_csv = os.path.basename(os.path.dirname(args.outputDir))
csv_output_filename = os.path.join(args.outputDir, f'{for_csv}_coord_data.csv')
with open(csv_output_filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(new_csv_headers)
csvwriter.writerows(csv_output_rows)
return csv_output_rows
#
# cv2.imshow('demo', image_bgr)
# if cv2.waitKey(0) & 0XFF == ord('q'):
# cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 36.911523
| 118
| 0.572997
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,891
| 0.216708
|
b7e6129db622711592b894cfa7f14f8bbe198a09
| 2,749
|
py
|
Python
|
feemodeldata/plotting/plotwaits.py
|
bitcoinfees/bitcoin-feemodel-data
|
3eb09cf2a64b1aa23d328484bbcd7e4d55291898
|
[
"MIT"
] | 2
|
2015-07-10T20:14:54.000Z
|
2017-06-08T11:01:03.000Z
|
feemodeldata/plotting/plotwaits.py
|
bitcoinfees/bitcoin-feemodel-data
|
3eb09cf2a64b1aa23d328484bbcd7e4d55291898
|
[
"MIT"
] | null | null | null |
feemodeldata/plotting/plotwaits.py
|
bitcoinfees/bitcoin-feemodel-data
|
3eb09cf2a64b1aa23d328484bbcd7e4d55291898
|
[
"MIT"
] | null | null | null |
from __future__ import division
import sqlite3
from bisect import bisect_left
import plotly.plotly as py
from plotly.graph_objs import Scatter, Figure, Layout, Data, YAxis, XAxis
from feemodel.util import DataSample
from feemodel.app.predict import PVALS_DBFILE
from feemodeldata.plotting.plotrrd import BASEDIR
def get_waits(dbfile=PVALS_DBFILE):
db = None
try:
db = sqlite3.connect(dbfile)
txs = db.execute("select feerate, waittime from txs").fetchall()
blockheights = db.execute("select blockheight from txs").fetchall()
blockheights = [tx[0] for tx in blockheights]
return txs, min(blockheights), max(blockheights)
finally:
if db is not None:
db.close()
def get_txgroups(txs, feerates=(10000, 15000, 20000, 50000)):
"""Sort the txs by feerate."""
txs.sort()
txfeerates, _dum = zip(*txs)
idxs = [bisect_left(txfeerates, feerate) for feerate in feerates]
idxs.insert(0, 0)
print("idxs are {}.".format(idxs))
txgroups = [txs[idxs[i]:idxs[i+1]] for i in range(len(idxs)-1)]
return txgroups
def get_traces(txgroups):
traces = []
for txgroup in txgroups:
feerates, waits = zip(*txgroup)
minfeerate = min(feerates)
maxfeerate = max(feerates)
waitdata = DataSample(waits)
percentilepts = [i / 100 for i in range(1, 99)]
percentiles = [waitdata.get_percentile(p) for p in percentilepts]
percentilepts.insert(0, 0)
percentiles.insert(0, 0)
trace = Scatter(
x=percentiles,
y=percentilepts,
name="{} <= feerate <= {}".format(minfeerate, maxfeerate)
)
traces.append(trace)
return traces
def plotwaits(traces, minheight, maxheight, basedir=BASEDIR):
title = ("Empirical CDF of waittimes from blocks {}-{}".
format(minheight, maxheight))
data = Data(traces)
layout = Layout(
title=title,
yaxis=YAxis(
title="Empirical CDF",
range=[0, 1]
),
xaxis=XAxis(
title="Wait time (s)",
rangemode="tozero",
type="log"
),
hovermode="closest"
)
fig = Figure(data=data, layout=layout)
basedir = basedir if basedir.endswith('/') else basedir + '/'
filename = basedir + "waits_cdf"
return py.plot(fig, filename=filename, auto_open=False)
def main(basedir=BASEDIR):
txs, minheight, maxheight = get_waits(PVALS_DBFILE)
print("Got {} txs.".format(len(txs)))
txgroups = get_txgroups(txs)
print("Got txgroups.")
traces = get_traces(txgroups)
print("Got traces.")
url = plotwaits(traces, minheight, maxheight, basedir=basedir)
print(url)
| 28.936842
| 75
| 0.628592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 285
| 0.103674
|
b7e7c6200dfbf2600bb1a1bc581331cb427697e7
| 5,181
|
py
|
Python
|
utils/pytorch_utils.py
|
shoegazerstella/BTC-ISMIR19
|
fc4c8ef792711460d98b502ddc2e5befc800d2e5
|
[
"MIT"
] | 1
|
2020-07-23T23:46:24.000Z
|
2020-07-23T23:46:24.000Z
|
utils/pytorch_utils.py
|
shoegazerstella/BTC-ISMIR19
|
fc4c8ef792711460d98b502ddc2e5befc800d2e5
|
[
"MIT"
] | null | null | null |
utils/pytorch_utils.py
|
shoegazerstella/BTC-ISMIR19
|
fc4c8ef792711460d98b502ddc2e5befc800d2e5
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import os
import math
from utils import logger
use_cuda = torch.cuda.is_available()
# utility
def to_var(x, dtype=None):
if type(x) is np.ndarray:
x = torch.from_numpy(x)
elif type(x) is list:
x = torch.from_numpy(np.array(x, dtype=dtype))
if use_cuda:
x = x.cuda()
return Variable(x)
# optimization
# reference: http://pytorch.org/docs/master/_modules/torch/optim/lr_scheduler.html#ReduceLROnPlateau
def adjusting_learning_rate(optimizer, factor=.5, min_lr=0.00001):
for i, param_group in enumerate(optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max(old_lr*factor, min_lr)
param_group['lr'] = new_lr
logger.info('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))
def lr_annealing_function(step, start=0, end=1, r=0.9999, type="exp"):
if type == "exp":
lr = start - (start - end) * (1 - math.pow(r, step))
else:
print("not available %s annealing" % type)
return lr
def update_lr(optimizer, new_lr):
old_lr = optimizer.param_groups[0]['lr']
# logger.info("adjusting learning rate from %.6f to %.6f" % (old_lr, new_lr))
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = new_lr
def transformer_learning_rate(optimizer, model_dim, step_num, warmup_steps=4000):
for i, param_group in enumerate(optimizer.param_groups):
new_lr = model_dim**(-0.5) * min(step_num**(-0.5), step_num*warmup_steps**(-1.5))
old_lr = float(param_group['lr'])
# new_lr = max(old_lr*factor, min_lr)
param_group['lr'] = new_lr
logger.info('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))
# model save and loading
def load_model(asset_path, model, optimizer, restore_epoch=0):
if os.path.isfile(os.path.join(asset_path, 'model', 'checkpoint_%d.pth.tar' % restore_epoch)):
checkpoint = torch.load(os.path.join(asset_path, 'model', 'checkpoint_%d.pth.tar' % restore_epoch))
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
current_step = checkpoint['current_step']
logger.info("restore model with %d epoch" % restore_epoch)
else:
logger.info("no checkpoint with %d epoch" % restore_epoch)
current_step = 0
return model, optimizer, current_step
# class weighted_BCELoss(Module):
# def __init__(self, mode):
# self.mode = mode
#
# def forward(self, input, target, weight=10):
# if not (input.size() == target.size()):
# raise ValueError("Target and input must have the same size. target size ({}) "
# "!= input size ({})".format(target.size(), input.size()))
# loss_matrix = - (torch.mul(target, input.log()) + torch.mul(1 - target, (1 - input).log()))
# one_matrix = Variable(torch.ones(input.size()))
# if use_cuda:
# one_matrix = one_matrix.cuda()
# if self.mode == 'one':
# weight_matrix = (weight - 1) * target + one_matrix
# elif self.mode == 'pitch':
#
# weighted_loss_matrix = torch.mul(loss_matrix, weight_matrix)
# return torch.mean(weighted_loss_matrix)
# loss
def weighted_binary_cross_entropy(output, target, weights=None, eps=1e-12):
if weights is not None:
assert len(weights) == 2
loss = weights[1] * (target * torch.log(output + eps)) + \
weights[0] * ((1 - target) * torch.log(1 - output + eps))
else:
loss = target * torch.log(output + eps) + (1 - target) * torch.log(1 - output + eps)
return torch.neg(torch.mean(loss))
def kl_divergence(mu, sig, num_latent_group=0, freebits_ratio=2., p_mu=None, p_sigma=None, eps=1e-8):
# calculate kl divergence between two normal distribution
# mu, sig, p_mu, p_sigma: batch_size * latent_size
batch_size = mu.size(0)
latent_size = mu.size(1)
mu_square = mu * mu
sig_square = sig * sig
if p_mu is None:
kl = 0.5 * (mu_square + sig_square - torch.log(sig_square + eps) - 1)
else:
p_sig_square = p_sigma * p_sigma
p_mu_diff_square = (mu - p_mu) * (mu - p_mu)
kl = (sig_square + p_mu_diff_square)/(2*p_sig_square)
kl += torch.log(p_sigma/sig + eps)
kl -= 0.5
if num_latent_group == 0:
kl = torch.sum(kl) / batch_size
else:
group_size = latent_size // num_latent_group
kl = kl.mean(0) # mean along batch dimension
kl = kl.view(-1, group_size).sum(1) # summation along group dimension
kl = torch.clamp(kl, min=freebits_ratio) # clipping kl value
kl = kl.sum()
return kl
def vae_loss(target, prediction, mu, sig,
num_latent_group=0, freebits_ratio=2., kl_ratio=1., p_mu=None, p_sigma=None):
rec_loss = F.binary_cross_entropy(prediction, target)
kl_loss = kl_divergence(mu, sig, num_latent_group, freebits_ratio, p_mu, p_sigma)
total_loss = rec_loss + kl_ratio * kl_loss
return total_loss, rec_loss, kl_loss
| 35.244898
| 107
| 0.640803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,599
| 0.308628
|
b7e805c3fdc6130f33ad7d70c4f57afa4833b9f9
| 3,630
|
py
|
Python
|
ecosante/users/schemas/__init__.py
|
betagouv/recosante-api
|
4560b2cf2ff4dc19597792fe15a3805f6259201d
|
[
"MIT"
] | 3
|
2021-09-24T14:07:51.000Z
|
2021-12-14T13:48:34.000Z
|
ecosante/users/schemas/__init__.py
|
betagouv/recosante-api
|
4560b2cf2ff4dc19597792fe15a3805f6259201d
|
[
"MIT"
] | 187
|
2021-03-25T16:43:49.000Z
|
2022-03-23T14:40:31.000Z
|
ecosante/users/schemas/__init__.py
|
betagouv/recosante-api
|
4560b2cf2ff4dc19597792fe15a3805f6259201d
|
[
"MIT"
] | null | null | null |
from dataclasses import field
from marshmallow import Schema, ValidationError, post_load, schema
from marshmallow.validate import OneOf, Length
from marshmallow.fields import Bool, Str, List, Nested, Email
from flask_rebar import ResponseSchema, RequestSchema, errors
from ecosante.inscription.models import Inscription
from ecosante.utils.custom_fields import TempList
from ecosante.api.schemas.commune import CommuneSchema
from ecosante.extensions import celery
from indice_pollution.history.models import Commune as CommuneModel
from flask import request
def list_str(choices, max_length=None, temp=False, **kwargs):
t = TempList if temp else List
return t(
Str(validate=OneOf(choices=choices)),
required=False,
allow_none=True,
validate=Length(min=0, max=max_length) if max_length else None,
**kwargs
)
class User(Schema):
commune = Nested(CommuneSchema, required=False, allow_none=True)
uid = Str(dump_only=True)
mail = Email(required=True)
deplacement = list_str(["velo", "tec", "voiture", "aucun"])
activites = list_str(["jardinage", "bricolage", "menage", "sport", "aucun"])
enfants = list_str(["oui", "non", "aucun"], temp=True)
chauffage = list_str(["bois", "chaudiere", "appoint", "aucun"])
animaux_domestiques = list_str(["chat", "chien", "aucun"])
connaissance_produit = list_str(["medecin", "association", "reseaux_sociaux", "publicite", "ami", "autrement"])
population = list_str(["pathologie_respiratoire", "allergie_pollens", "aucun"])
indicateurs = list_str(["indice_atmo", "raep", "indice_uv", "vigilance_meteorologique"])
indicateurs_frequence = list_str(["quotidien", "hebdomadaire", "alerte"], 1)
indicateurs_media = list_str(["mail", "notifications_web"])
recommandations = list_str(["oui", "non"], 1, attribute='recommandations_actives')
recommandations_frequence = list_str(["quotidien", "hebdomadaire", "pollution"], 1)
recommandations_media = list_str(["mail", "notifications_web"])
webpush_subscriptions_info = Str(required=False, allow_none=True, load_only=True)
class Response(User, ResponseSchema):
is_active = Bool(attribute='is_active')
class RequestPOST(User, RequestSchema):
@post_load
def make_inscription(self, data, **kwargs):
inscription = Inscription.query.filter(Inscription.mail.ilike(data['mail'])).first()
if inscription:
raise ValidationError('mail already used', field_name='mail')
inscription = Inscription(**data)
return inscription
class RequestPOSTID(User, RequestSchema):
def __init__(self, **kwargs):
super_kwargs = dict(kwargs)
partial_arg = super_kwargs.pop('partial', ['mail'])
super(RequestPOSTID, self).__init__(partial=partial_arg, **super_kwargs)
@post_load
def make_inscription(self, data, **kwargs):
uid = request.view_args.get('uid')
if not uid:
raise ValidationError('uid is required')
inscription = Inscription.query.filter_by(uid=uid).first()
if not inscription:
raise errors.NotFound('uid unknown')
if 'mail' in data:
inscription_same_mail = Inscription.query.filter(
Inscription.uid != uid,
Inscription.mail == data['mail']
).first()
if inscription_same_mail:
raise errors.Conflict('user with this mail already exists')
for k, v in data.items():
setattr(inscription, k, v)
return inscription
class RequestUpdateProfile(Schema):
mail = Email(required=True)
| 43.214286
| 115
| 0.689532
| 2,754
| 0.758678
| 0
| 0
| 1,051
| 0.289532
| 0
| 0
| 607
| 0.167218
|
b7ea33cae6c817255b7381a86f5b2cf3631857b7
| 933
|
py
|
Python
|
Course 01 - Getting Started with Python/Extra Studies/Basics/ex022.py
|
marcoshsq/python_practical_exercises
|
77136cd4bc0f34acde3380ffdc5af74f7a960670
|
[
"MIT"
] | 9
|
2022-03-22T16:45:17.000Z
|
2022-03-25T20:22:35.000Z
|
Course 01 - Getting Started with Python/Extra Studies/Basics/ex022.py
|
marcoshsq/python_practical_exercises
|
77136cd4bc0f34acde3380ffdc5af74f7a960670
|
[
"MIT"
] | null | null | null |
Course 01 - Getting Started with Python/Extra Studies/Basics/ex022.py
|
marcoshsq/python_practical_exercises
|
77136cd4bc0f34acde3380ffdc5af74f7a960670
|
[
"MIT"
] | 3
|
2022-03-22T17:03:38.000Z
|
2022-03-29T17:20:55.000Z
|
import math
# Exercise 017: Right Triangle
"""Write a program that reads the length of the opposite side and the adjacent side of a right triangle.
Calculate and display the length of the hypotenuse."""
# To do this we will use the Pythagorean theorem: a^2 = b^2 + c^2
# Method 01, without the module Math:
# First we ask for the leg values
leg_a = float(input("Enter the value of leg a: "))
leg_b = float(input("Enter the value of leg b: "))
# Then we do the Pythagorean theorem: sqrt((leg_a^2)+(leg_b^2))
hyp = ((leg_a**2) + (leg_b**2)) ** 0.5
print(f"The triangle hypotenuse measures {hyp:.2f} m.u. ")
# Method 02, with the module using pow function:
hypo = math.sqrt(math.pow(leg_a, 2) + math.pow(leg_b, 2))
print(f"The triangle hypotenuse measures {hypo:.2f} m.u. ")
# Method 03 using the module with the hypotenuse function u.u
hypot = math.hypot(leg_a, leg_b)
print(f"The triangle hypotenuse measures {hypot:.2f} m.u. ")
| 38.875
| 104
| 0.710611
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 708
| 0.758842
|
b7ebf597cf4af041d284ceb92dfc3840fcf8cea7
| 146
|
py
|
Python
|
annuaire/commands/__init__.py
|
djacomy/layer-annuaire
|
b0312534e31dd98d98568a83918cf7dd583aa4c7
|
[
"MIT"
] | null | null | null |
annuaire/commands/__init__.py
|
djacomy/layer-annuaire
|
b0312534e31dd98d98568a83918cf7dd583aa4c7
|
[
"MIT"
] | null | null | null |
annuaire/commands/__init__.py
|
djacomy/layer-annuaire
|
b0312534e31dd98d98568a83918cf7dd583aa4c7
|
[
"MIT"
] | null | null | null |
"""Package groups the different commands modules."""
from annuaire.commands import download, import_lawyers
__all__ = [download, import_lawyers]
| 29.2
| 54
| 0.80137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 52
| 0.356164
|
b7eda2093d6d54b12bba13592c13c99ac642ca74
| 15,883
|
py
|
Python
|
eventsourcing/application/actors.py
|
vladimirnani/eventsourcing
|
f49d2b9aaa585073aca4dc20c59d46db5a14eb57
|
[
"BSD-3-Clause"
] | 1
|
2020-02-10T08:12:31.000Z
|
2020-02-10T08:12:31.000Z
|
eventsourcing/application/actors.py
|
vladimirnani/eventsourcing
|
f49d2b9aaa585073aca4dc20c59d46db5a14eb57
|
[
"BSD-3-Clause"
] | null | null | null |
eventsourcing/application/actors.py
|
vladimirnani/eventsourcing
|
f49d2b9aaa585073aca4dc20c59d46db5a14eb57
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from thespian.actors import *
from eventsourcing.application.process import ProcessApplication, Prompt
from eventsourcing.application.system import System, SystemRunner
from eventsourcing.domain.model.events import subscribe, unsubscribe
from eventsourcing.interface.notificationlog import RecordManagerNotificationLog
logger = logging.getLogger()
# Todo: Send timer message to run slave every so often (in master or slave?).
DEFAULT_ACTORS_LOGCFG = {
'version': 1,
'formatters': {
'normal': {
'format': '%(levelname)-8s %(message)s'
}
},
'handlers': {
# 'h': {
# 'class': 'logging.FileHandler',
# 'filename': 'hello.log',
# 'formatter': 'normal',
# 'level': logging.INFO
# }
},
'loggers': {
# '': {'handlers': ['h'], 'level': logging.DEBUG}
}
}
def start_actor_system(system_base=None, logcfg=DEFAULT_ACTORS_LOGCFG):
ActorSystem(
systemBase=system_base,
logDefs=logcfg,
)
def shutdown_actor_system():
ActorSystem().shutdown()
def start_multiproc_tcp_base_system():
start_actor_system(system_base='multiprocTCPBase')
# def start_multiproc_udp_base_system():
# start_actor_system(system_base='multiprocUDPBase')
#
#
# def start_multiproc_queue_base_system():
# start_actor_system(system_base='multiprocQueueBase')
class ActorModelRunner(SystemRunner):
"""
Uses actor model framework to run a system of process applications.
"""
def __init__(self, system: System, pipeline_ids, system_actor_name='system', shutdown_on_close=False, **kwargs):
super(ActorModelRunner, self).__init__(system=system, **kwargs)
self.pipeline_ids = list(pipeline_ids)
self.pipeline_actors = {}
self.system_actor_name = system_actor_name
# Create the system actor (singleton).
self.system_actor = self.actor_system.createActor(
actorClass=SystemActor,
globalName=self.system_actor_name
)
self.shutdown_on_close = shutdown_on_close
@property
def actor_system(self):
return ActorSystem()
def start(self):
"""
Starts all the actors to run a system of process applications.
"""
# Subscribe to broadcast prompts published by a process
# application in the parent operating system process.
subscribe(handler=self.forward_prompt, predicate=self.is_prompt)
# Initialise the system actor.
msg = SystemInitRequest(
self.system.process_classes,
self.infrastructure_class,
self.system.followings,
self.pipeline_ids
)
response = self.actor_system.ask(self.system_actor, msg)
# Keep the pipeline actor addresses, to send prompts directly.
assert isinstance(response, SystemInitResponse), type(response)
assert list(response.pipeline_actors.keys()) == self.pipeline_ids, (
"Configured pipeline IDs mismatch initialised system {} {}").format(
list(self.pipeline_actors.keys()), self.pipeline_ids
)
self.pipeline_actors = response.pipeline_actors
# Todo: Somehow know when to get a new address from the system actor.
# Todo: Command and response messages to system actor to get new pipeline address.
@staticmethod
def is_prompt(event):
return isinstance(event, Prompt)
def forward_prompt(self, prompt):
if prompt.pipeline_id in self.pipeline_actors:
pipeline_actor = self.pipeline_actors[prompt.pipeline_id]
self.actor_system.tell(pipeline_actor, prompt)
# else:
# msg = "Pipeline {} is not running.".format(prompt.pipeline_id)
# raise ValueError(msg)
def close(self):
"""Stops all the actors running a system of process applications."""
super(ActorModelRunner, self).close()
unsubscribe(handler=self.forward_prompt, predicate=self.is_prompt)
if self.shutdown_on_close:
self.shutdown()
def shutdown(self):
msg = ActorExitRequest(recursive=True)
self.actor_system.tell(self.system_actor, msg)
class SystemActor(Actor):
def __init__(self):
super(SystemActor, self).__init__()
self.pipeline_actors = {}
self.is_initialised = False
def receiveMessage(self, msg, sender):
if isinstance(msg, SystemInitRequest):
if not self.is_initialised:
self.init_pipelines(msg)
self.is_initialised = True
msg = SystemInitResponse(self.pipeline_actors.copy())
self.send(sender, msg)
def init_pipelines(self, msg):
self.process_classes = msg.process_classes
self.infrastructure_class = msg.infrastructure_class
self.system_followings = msg.system_followings
for pipeline_id in msg.pipeline_ids:
pipeline_actor = self.createActor(PipelineActor)
self.pipeline_actors[pipeline_id] = pipeline_actor
msg = PipelineInitRequest(
self.process_classes,
self.infrastructure_class,
self.system_followings,
pipeline_id
)
self.send(pipeline_actor, msg)
class PipelineActor(Actor):
def __init__(self):
super(PipelineActor, self).__init__()
self.system = None
self.process_actors = {}
self.pipeline_id = None
def receiveMessage(self, msg, sender):
if isinstance(msg, PipelineInitRequest):
# logger.info("pipeline received init: {}".format(msg))
self.init_pipeline(msg)
elif isinstance(msg, Prompt):
# logger.info("pipeline received prompt: {}".format(msg))
self.forward_prompt(msg)
def init_pipeline(self, msg):
self.pipeline_id = msg.pipeline_id
self.process_classes = msg.process_classes
self.infrastructure_class = msg.infrastructure_class
self.system_followings = msg.system_followings
self.followers = {}
for process_class_name, upstream_class_names in self.system_followings.items():
for upstream_class_name in upstream_class_names:
process_name = upstream_class_name.lower()
if process_name not in self.followers:
self.followers[process_name] = []
downstream_class_names = self.followers[process_name]
if process_class_name not in downstream_class_names:
downstream_class_names.append(process_class_name)
process_class_names = self.system_followings.keys()
for process_class_name in process_class_names:
process_actor = self.createActor(ProcessMaster)
process_name = process_class_name.lower()
self.process_actors[process_name] = process_actor
for process_class_name in process_class_names:
process_name = process_class_name.lower()
upstream_application_names = [c.lower() for c in self.system_followings[process_class_name]]
downstream_actors = {}
for downstream_class_name in self.followers[process_name]:
downstream_name = downstream_class_name.lower()
# logger.warning("sending prompt to process application {}".format(downstream_name))
process_actor = self.process_actors[downstream_name]
downstream_actors[downstream_name] = process_actor
process_class = self.process_classes[process_class_name]
msg = ProcessInitRequest(
process_class,
self.infrastructure_class,
self.pipeline_id,
upstream_application_names,
downstream_actors,
self.myAddress
)
self.send(self.process_actors[process_name], msg)
def forward_prompt(self, msg):
for downstream_class_name in self.followers[msg.process_name]:
downstream_name = downstream_class_name.lower()
process_actor = self.process_actors[downstream_name]
self.send(process_actor, msg)
class ProcessMaster(Actor):
def __init__(self):
super(ProcessMaster, self).__init__()
self.is_slave_running = False
self.last_prompts = {}
self.slave_actor = None
def receiveMessage(self, msg, sender):
if isinstance(msg, ProcessInitRequest):
self.init_process(msg)
elif isinstance(msg, Prompt):
# logger.warning("{} master received prompt: {}".format(self.process_application_class.__name__, msg))
self.consume_prompt(prompt=msg)
elif isinstance(msg, SlaveRunResponse):
# logger.info("process application master received slave finished run: {}".format(msg))
self.handle_slave_run_response()
def init_process(self, msg):
self.process_application_class = msg.process_application_class
self.infrastructure_class = msg.infrastructure_class
self.slave_actor = self.createActor(ProcessSlave)
self.send(self.slave_actor, msg)
self.run_slave()
def consume_prompt(self, prompt):
self.last_prompts[prompt.process_name] = prompt
self.run_slave()
def handle_slave_run_response(self):
self.is_slave_running = False
if self.last_prompts:
self.run_slave()
def run_slave(self):
# Don't send to slave if we think it's running, or we'll
# probably get blocked while sending the message and have
# to wait until the slave runs its loop (thespian design).
if self.slave_actor and not self.is_slave_running:
self.send(self.slave_actor, SlaveRunRequest(self.last_prompts, self.myAddress))
self.is_slave_running = True
self.last_prompts = {}
class ProcessSlave(Actor):
def __init__(self):
super(ProcessSlave, self).__init__()
self.process = None
def receiveMessage(self, msg, sender):
if isinstance(msg, ProcessInitRequest):
# logger.info("process application slave received init: {}".format(msg))
self.init_process(msg)
elif isinstance(msg, SlaveRunRequest):
# logger.info("{} process application slave received last prompts: {}".format(self.process.name, msg))
self.run_process(msg)
elif isinstance(msg, ActorExitRequest):
# logger.info("{} process application slave received exit request: {}".format(self.process.name, msg))
self.close()
def init_process(self, msg):
self.pipeline_actor = msg.pipeline_actor
self.downstream_actors = msg.downstream_actors
self.pipeline_id = msg.pipeline_id
self.upstream_application_names = msg.upstream_application_names
# Construct the process application class.
process_class = msg.process_application_class
if msg.infrastructure_class:
process_class = process_class.mixin(msg.infrastructure_class)
# Reset the database connection (for Django).
process_class.reset_connection_after_forking()
# Construct the process application.
self.process = process_class(
pipeline_id=self.pipeline_id,
)
assert isinstance(self.process, ProcessApplication)
# Subscribe the slave actor's send_prompt() method.
# - the process application will call publish_prompt()
# and the actor will receive the prompt and send it
# as a message.
subscribe(
predicate=self.is_my_prompt,
handler=self.send_prompt
)
# Close the process application persistence policy.
# - slave actor process application doesn't publish
# events, so we don't need this
self.process.persistence_policy.close()
# Unsubscribe process application's publish_prompt().
# - slave actor process application doesn't publish
# events, so we don't need this
unsubscribe(
predicate=self.process.persistence_policy.is_event,
handler=self.process.publish_prompt
)
# Construct and follow upstream notification logs.
for upstream_application_name in self.upstream_application_names:
record_manager = self.process.event_store.record_manager
# assert isinstance(record_manager, ACIDRecordManager), type(record_manager)
notification_log = RecordManagerNotificationLog(
record_manager=record_manager.clone(
application_name=upstream_application_name,
pipeline_id=self.pipeline_id
),
section_size=self.process.notification_log_section_size
)
self.process.follow(upstream_application_name, notification_log)
def run_process(self, msg):
notification_count = 0
# Just process one notification so prompts are dispatched promptly, sent
# messages only dispatched from actor after receive_message() returns.
advance_by = 1
if msg.last_prompts:
for prompt in msg.last_prompts.values():
notification_count += self.process.run(prompt, advance_by=advance_by)
else:
notification_count += self.process.run(advance_by=advance_by)
if notification_count:
# Run again, until nothing was done.
self.send(self.myAddress, SlaveRunRequest(last_prompts={}, master=msg.master))
else:
# Report back to master.
self.send(msg.master, SlaveRunResponse())
def close(self):
unsubscribe(
predicate=self.is_my_prompt,
handler=self.send_prompt
)
self.process.close()
def is_my_prompt(self, prompt):
return (
isinstance(prompt, Prompt)
and prompt.process_name == self.process.name
and prompt.pipeline_id == self.pipeline_id
)
def send_prompt(self, prompt):
for downstream_name, downstream_actor in self.downstream_actors.items():
self.send(downstream_actor, prompt)
class SystemInitRequest(object):
def __init__(self, process_classes, infrastructure_class, system_followings, pipeline_ids):
self.process_classes = process_classes
self.infrastructure_class = infrastructure_class
self.system_followings = system_followings
self.pipeline_ids = pipeline_ids
class SystemInitResponse(object):
def __init__(self, pipeline_actors):
self.pipeline_actors = pipeline_actors
class PipelineInitRequest(object):
def __init__(self, process_classes, infrastructure_class, system_followings, pipeline_id):
self.process_classes = process_classes
self.infrastructure_class = infrastructure_class
self.system_followings = system_followings
self.pipeline_id = pipeline_id
class ProcessInitRequest(object):
def __init__(self, process_application_class, infrastructure_class, pipeline_id,
upstream_application_names,
downstream_actors,
pipeline_actor):
self.process_application_class = process_application_class
self.infrastructure_class = infrastructure_class
self.pipeline_id = pipeline_id
self.upstream_application_names = upstream_application_names
self.downstream_actors = downstream_actors
self.pipeline_actor = pipeline_actor
class SlaveRunRequest(object):
def __init__(self, last_prompts, master):
self.last_prompts = last_prompts
self.master = master
class SlaveRunResponse(object):
pass
| 37.637441
| 116
| 0.661336
| 14,440
| 0.909148
| 0
| 0
| 146
| 0.009192
| 0
| 0
| 3,092
| 0.194674
|
b7edb2af66a1ef0492b215ff19713cb25d91778e
| 4,517
|
py
|
Python
|
sudoku/board.py
|
DariaMinieieva/sudoku_project
|
acfe6b6ff4e0343ad0dae597e783f9da40a7faee
|
[
"MIT"
] | 5
|
2021-05-27T09:26:30.000Z
|
2021-05-28T10:33:46.000Z
|
sudoku/board.py
|
DariaMinieieva/sudoku_project
|
acfe6b6ff4e0343ad0dae597e783f9da40a7faee
|
[
"MIT"
] | null | null | null |
sudoku/board.py
|
DariaMinieieva/sudoku_project
|
acfe6b6ff4e0343ad0dae597e783f9da40a7faee
|
[
"MIT"
] | 1
|
2021-05-28T08:43:05.000Z
|
2021-05-28T08:43:05.000Z
|
"""This module implements backtracking algorithm to solve sudoku."""
class Board:
"""
Class for sudoku board representation.
"""
NUMBERS = [1, 2, 3, 4, 5, 6, 7, 8, 9]
def __init__(self, board):
"""
Create a new board.
"""
self.board = board
def __str__(self) -> str:
"""
Return string reprentation of a board.
"""
result = ''
for line in self.board:
result += str(line) + '\n'
return result.strip()
@staticmethod
def check_rows(board) -> bool:
"""
Check if rows are filled correctly and don't have empty cells.
"""
for row in board:
numbers = list(range(1,10))
for cell in row:
if cell in numbers:
numbers.remove(cell)
else:
return False
return True
def check_colums(self) -> bool:
"""
Check if colums are filled correctly and don't have empty cells.
"""
board_1 = [[self.board[i][j] for i in range(9)] for j in range(9)]
return self.check_rows(board_1)
def check_subgrids(self) -> bool:
"""
Check if subgrids are filled correctly and don't have empty cells.
"""
board_2 = [[self.board[i][j], self.board[i][j+1], self.board[i][j+2],
self.board[i+1][j], self.board[i+1][j+1], self.board[i+1][j+2],
self.board[i+2][j], self.board[i+2][j+1], self.board[i+2][j+2]] \
for i in range(0, 9, 3) for j in range(0, 9, 3)]
return self.check_rows(board_2)
def check_board(self) -> bool:
"""
Check if board if filled correctly and doesn't have empty words.
"""
return self.check_rows(self.board) and self.check_colums() and self.check_subgrids()
def get_cell(self) -> tuple or None:
"""
Return coordinates of a first empty cell.
"""
for row in range(9):
for column in range(9):
if self.board[row][column] == 0:
return row, column
@staticmethod
def filter_values(values, used) -> set:
"""
Return set of valid numbers from values that do not appear in used
"""
return set([number for number in values if number not in used])
def filter_row(self, row) -> set:
"""
Return set of numbers that can be placed into a certain row.
"""
in_row = [number for number in self.board[row] if number != 0]
options = self.filter_values(self.NUMBERS, in_row)
return options
def filter_column(self, column) -> set:
"""
Return set of numbers that can be placed into a certain column.
"""
in_column = [self.board[i][column] for i in range(9)]
options = self.filter_values(self.NUMBERS, in_column)
return options
def filter_subgrid(self, row: int, column: int) -> set:
"""
Return set of numbers that can be placed into a certain subgrid.
"""
row_start = int(row / 3) * 3
column_start = int(column / 3) * 3
in_subgrid = []
for i in range(3):
for j in range(3):
in_subgrid.append(self.board[row_start+i][column_start+j])
options = self.filter_values(self.NUMBERS, in_subgrid)
return options
def available_options(self, row: int, column: int) -> list:
"""
Return a list of possible numbers that can be placed into a cell.
"""
for_row = self.filter_row(row)
for_column = self.filter_column(column)
for_subgrid = self.filter_subgrid(row, column)
result = for_row.intersection(for_column, for_subgrid)
return list(result)
def backtracking(self) -> list or None:
"""
Main function that implements backtracking algorithm to solve sudoku.
"""
if self.check_board():
return self.board
# get first empty cell
row, column = self.get_cell()
# get viable options
options = self.available_options(row, column)
for option in options:
self.board[row][column] = option # try viable option
# recursively fill in the board
if self.backtracking():
return self.board # return board if success
self.board[row][column] = 0 # otherwise backtracks
| 30.938356
| 92
| 0.556121
| 4,446
| 0.984282
| 0
| 0
| 618
| 0.136816
| 0
| 0
| 1,320
| 0.292229
|
b7f128c1c030f4883afe9da12b85ac98f1c9b3dd
| 9,603
|
py
|
Python
|
openfl/component/ca/ca.py
|
saransh09/openfl-1
|
beba571929a56771f2fc1671154a3dbe60b38785
|
[
"Apache-2.0"
] | null | null | null |
openfl/component/ca/ca.py
|
saransh09/openfl-1
|
beba571929a56771f2fc1671154a3dbe60b38785
|
[
"Apache-2.0"
] | 1
|
2022-03-02T18:07:11.000Z
|
2022-03-10T02:43:12.000Z
|
openfl/component/ca/ca.py
|
saransh09/openfl-1
|
beba571929a56771f2fc1671154a3dbe60b38785
|
[
"Apache-2.0"
] | 1
|
2022-03-03T00:50:15.000Z
|
2022-03-03T00:50:15.000Z
|
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Aggregator module."""
import base64
import json
import os
import platform
import shutil
import signal
import subprocess
import time
import urllib.request
from logging import getLogger
from pathlib import Path
from subprocess import call
import requests
from click import confirm
logger = getLogger(__name__)
TOKEN_DELIMITER = '.'
CA_STEP_CONFIG_DIR = Path('step_config')
CA_PKI_DIR = Path('cert')
CA_PASSWORD_FILE = Path('pass_file')
CA_CONFIG_JSON = Path('config/ca.json')
def get_system_and_architecture():
"""Get system and architecture of machine."""
uname_res = platform.uname()
system = uname_res.system.lower()
architecture_aliases = {
'x86_64': 'amd64',
'armv6l': 'armv6',
'armv7l': 'armv7',
'aarch64': 'arm64'
}
architecture = uname_res.machine.lower()
for alias in architecture_aliases:
if architecture == alias:
architecture = architecture_aliases[alias]
break
return system, architecture
def download_step_bin(url, grep_name, architecture, prefix='.', confirmation=True):
"""
Donwload step binaries from github.
Args:
url: address of latest release
grep_name: name to grep over github assets
architecture: architecture type to grep
prefix: folder path to download
confirmation: request user confirmation or not
"""
if confirmation:
confirm('CA binaries from github will be downloaded now', default=True, abort=True)
result = requests.get(url)
if result.status_code != 200:
logger.warning('Can\'t download binaries from github. Please try lately.')
return
assets = result.json().get('assets', [])
archive_urls = [
a['browser_download_url']
for a in assets
if (grep_name in a['name'] and architecture in a['name']
and 'application/gzip' in a['content_type'])
]
if len(archive_urls) == 0:
raise Exception('Applicable CA binaries from github were not found '
f'(name: {grep_name}, architecture: {architecture})')
archive_url = archive_urls[-1]
archive_url = archive_url.replace('https', 'http')
name = archive_url.split('/')[-1]
logger.info(f'Downloading {name}')
urllib.request.urlretrieve(archive_url, f'{prefix}/{name}')
shutil.unpack_archive(f'{prefix}/{name}', f'{prefix}/step')
def get_token(name, ca_url, ca_path='.'):
"""
Create authentication token.
Args:
name: common name for following certificate
(aggregator fqdn or collaborator name)
ca_url: full url of CA server
ca_path: path to ca folder
"""
ca_path = Path(ca_path)
step_config_dir = ca_path / CA_STEP_CONFIG_DIR
pki_dir = ca_path / CA_PKI_DIR
step_path, _ = get_ca_bin_paths(ca_path)
if not step_path:
raise Exception('Step-CA is not installed!\nRun `fx pki install` first')
priv_json = step_config_dir / 'secrets' / 'priv.json'
pass_file = pki_dir / CA_PASSWORD_FILE
root_crt = step_config_dir / 'certs' / 'root_ca.crt'
try:
token = subprocess.check_output(
f'{step_path} ca token {name} '
f'--key {priv_json} --root {root_crt} '
f'--password-file {pass_file} 'f'--ca-url {ca_url}', shell=True)
except subprocess.CalledProcessError as exc:
logger.error(f'Error code {exc.returncode}: {exc.output}')
return
token = token.strip()
token_b64 = base64.b64encode(token)
with open(root_crt, mode='rb') as file:
root_certificate_b = file.read()
root_ca_b64 = base64.b64encode(root_certificate_b)
return TOKEN_DELIMITER.join([
token_b64.decode('utf-8'),
root_ca_b64.decode('utf-8'),
])
def get_ca_bin_paths(ca_path):
"""Get paths of step binaries."""
ca_path = Path(ca_path)
step = None
step_ca = None
if (ca_path / 'step').exists():
dirs = os.listdir(ca_path / 'step')
for dir_ in dirs:
if 'step_' in dir_:
step = ca_path / 'step' / dir_ / 'bin' / 'step'
if 'step-ca' in dir_:
step_ca = ca_path / 'step' / dir_ / 'bin' / 'step-ca'
return step, step_ca
def certify(name, cert_path: Path, token_with_cert, ca_path: Path):
"""Create an envoy workspace."""
os.makedirs(cert_path, exist_ok=True)
token, root_certificate = token_with_cert.split(TOKEN_DELIMITER)
token = base64.b64decode(token).decode('utf-8')
root_certificate = base64.b64decode(root_certificate)
step_path, _ = get_ca_bin_paths(ca_path)
if not step_path:
url = 'http://api.github.com/repos/smallstep/cli/releases/latest'
system, arch = get_system_and_architecture()
download_step_bin(url, f'step_{system}', arch, prefix=ca_path)
step_path, _ = get_ca_bin_paths(ca_path)
if not step_path:
raise Exception('Step-CA is not installed!\nRun `fx pki install` first')
with open(f'{cert_path}/root_ca.crt', mode='wb') as file:
file.write(root_certificate)
call(f'{step_path} ca certificate {name} {cert_path}/{name}.crt '
f'{cert_path}/{name}.key --kty EC --curve P-384 -f --token {token}', shell=True)
def remove_ca(ca_path):
"""Kill step-ca process and rm ca directory."""
_check_kill_process('step-ca')
shutil.rmtree(ca_path, ignore_errors=True)
def install(ca_path, ca_url, password):
"""
Create certificate authority for federation.
Args:
ca_path: path to ca directory
ca_url: url for ca server like: 'host:port'
password: Simple password for encrypting root private keys
"""
logger.info('Creating CA')
ca_path = Path(ca_path)
ca_path.mkdir(parents=True, exist_ok=True)
step_config_dir = ca_path / CA_STEP_CONFIG_DIR
os.environ['STEPPATH'] = str(step_config_dir)
step_path, step_ca_path = get_ca_bin_paths(ca_path)
if not (step_path and step_ca_path and step_path.exists() and step_ca_path.exists()):
confirm('CA binaries from github will be downloaded now', default=True, abort=True)
system, arch = get_system_and_architecture()
url = 'http://api.github.com/repos/smallstep/certificates/releases/latest'
download_step_bin(url, f'step-ca_{system}', arch, prefix=ca_path, confirmation=False)
url = 'http://api.github.com/repos/smallstep/cli/releases/latest'
download_step_bin(url, f'step_{system}', arch, prefix=ca_path, confirmation=False)
step_config_dir = ca_path / CA_STEP_CONFIG_DIR
if (not step_config_dir.exists()
or confirm('CA exists, do you want to recreate it?', default=True)):
_create_ca(ca_path, ca_url, password)
_configure(step_config_dir)
def run_ca(step_ca, pass_file, ca_json):
"""Run CA server."""
if _check_kill_process('step-ca', confirmation=True):
logger.info('Up CA server')
call(f'{step_ca} --password-file {pass_file} {ca_json}', shell=True)
def _check_kill_process(pstring, confirmation=False):
"""Kill process by name."""
pids = []
proc = subprocess.Popen(f'ps ax | grep {pstring} | grep -v grep',
shell=True, stdout=subprocess.PIPE)
text = proc.communicate()[0].decode('utf-8')
for line in text.splitlines():
fields = line.split()
pids.append(fields[0])
if len(pids):
if confirmation and not confirm('CA server is already running. Stop him?', default=True):
return False
for pid in pids:
os.kill(int(pid), signal.SIGKILL)
time.sleep(2)
return True
def _create_ca(ca_path: Path, ca_url: str, password: str):
"""Create a ca workspace."""
import os
pki_dir = ca_path / CA_PKI_DIR
step_config_dir = ca_path / CA_STEP_CONFIG_DIR
pki_dir.mkdir(parents=True, exist_ok=True)
step_config_dir.mkdir(parents=True, exist_ok=True)
with open(f'{pki_dir}/pass_file', 'w') as f:
f.write(password)
os.chmod(f'{pki_dir}/pass_file', 0o600)
step_path, step_ca_path = get_ca_bin_paths(ca_path)
assert (step_path and step_ca_path and step_path.exists() and step_ca_path.exists())
logger.info('Create CA Config')
os.environ['STEPPATH'] = str(step_config_dir)
shutil.rmtree(step_config_dir, ignore_errors=True)
name = ca_url.split(':')[0]
call(f'{step_path} ca init --name name --dns {name} '
f'--address {ca_url} --provisioner prov '
f'--password-file {pki_dir}/pass_file', shell=True)
call(f'{step_path} ca provisioner remove prov --all', shell=True)
call(f'{step_path} crypto jwk create {step_config_dir}/certs/pub.json '
f'{step_config_dir}/secrets/priv.json --password-file={pki_dir}/pass_file', shell=True)
call(
f'{step_path} ca provisioner add provisioner {step_config_dir}/certs/pub.json',
shell=True
)
def _configure(step_config_dir):
conf_file = step_config_dir / CA_CONFIG_JSON
with open(conf_file, 'r+') as f:
data = json.load(f)
data.setdefault('authority', {}).setdefault('claims', {})
data['authority']['claims']['maxTLSCertDuration'] = f'{365 * 24}h'
data['authority']['claims']['defaultTLSCertDuration'] = f'{365 * 24}h'
data['authority']['claims']['maxUserSSHCertDuration'] = '24h'
data['authority']['claims']['defaultUserSSHCertDuration'] = '24h'
f.seek(0)
json.dump(data, f, indent=4)
f.truncate()
| 34.793478
| 97
| 0.656357
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,372
| 0.35114
|
b7f17afa5fddb406481a5085256bccee3d1bcc8c
| 574
|
py
|
Python
|
bin/optimization/cosmo_optimizer_hod_only.py
|
mclaughlin6464/pearce
|
746f2bf4bf45e904d66996e003043661a01423ba
|
[
"MIT"
] | null | null | null |
bin/optimization/cosmo_optimizer_hod_only.py
|
mclaughlin6464/pearce
|
746f2bf4bf45e904d66996e003043661a01423ba
|
[
"MIT"
] | 16
|
2016-11-04T22:24:32.000Z
|
2018-05-01T22:53:39.000Z
|
bin/optimization/cosmo_optimizer_hod_only.py
|
mclaughlin6464/pearce
|
746f2bf4bf45e904d66996e003043661a01423ba
|
[
"MIT"
] | 3
|
2016-10-04T08:07:52.000Z
|
2019-05-03T23:50:01.000Z
|
from pearce.emulator import OriginalRecipe, ExtraCrispy
import numpy as np
training_file = '/home/users/swmclau2/scratch/PearceRedMagicWpCosmo.hdf5'
em_method = 'gp'
split_method = 'random'
a = 1.0
z = 1.0/a - 1.0
fixed_params = {'z':z, 'cosmo': 1}#, 'r':0.18477483}
n_leaves, n_overlap = 5, 2
emu = ExtraCrispy(training_file,n_leaves, n_overlap, split_method, method = em_method, fixed_params=fixed_params,\
custom_mean_function = None)
results = emu.train_metric()
print results
print
print dict(zip(emu.get_param_names(), np.exp(results.x)))
| 23.916667
| 115
| 0.721254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 97
| 0.16899
|
b7f255f31605c7a9c29e736bc41dc0df25f503be
| 294
|
py
|
Python
|
tests/test_xmllint_map_html.py
|
sthagen/python-xmllint_map_html
|
23363cfe1c126bc72efddf8fea084283375e2204
|
[
"MIT"
] | null | null | null |
tests/test_xmllint_map_html.py
|
sthagen/python-xmllint_map_html
|
23363cfe1c126bc72efddf8fea084283375e2204
|
[
"MIT"
] | 16
|
2020-09-11T11:07:09.000Z
|
2020-12-06T16:42:18.000Z
|
tests/test_xmllint_map_html.py
|
sthagen/python-xmllint_map_html
|
23363cfe1c126bc72efddf8fea084283375e2204
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,unused-import,reimported
import json
import pytest # type: ignore
import xmllint_map_html.xmllint_map_html as xmh
def test_parse_ok_minimal():
job = ['[]']
parser = xmh.parse(job)
assert next(parser) == NotImplemented
| 22.615385
| 60
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 101
| 0.343537
|
b7f62fa1d5695f548ee6f73816a2ab82ef2fbcfd
| 1,318
|
py
|
Python
|
apps/transmissions/views/transmissions.py
|
felipebarraza6/amamaule
|
1da7cd542a7e610bc8fa230684770732a41520c9
|
[
"MIT"
] | null | null | null |
apps/transmissions/views/transmissions.py
|
felipebarraza6/amamaule
|
1da7cd542a7e610bc8fa230684770732a41520c9
|
[
"MIT"
] | null | null | null |
apps/transmissions/views/transmissions.py
|
felipebarraza6/amamaule
|
1da7cd542a7e610bc8fa230684770732a41520c9
|
[
"MIT"
] | null | null | null |
from rest_framework import mixins, viewsets, status
from rest_framework.permissions import (
AllowAny,
IsAuthenticated
)
from apps.transmissions.models import Transmission
from apps.transmissions.serializers import TransmissionModelSerializer, CommentModelserializer
from django_filters import rest_framework as filters
class TransmissionsViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = Transmission.objects.all().order_by('is_yt_stream')
serializer_class = TransmissionModelSerializer
filter_backends = (filters.DjangoFilterBackend,)
lookup_field = 'uuid'
def get_permissions(self):
if self.action in ['retrieve', 'list']:
permissions = [AllowAny]
else:
permissions = [IsAuthenticated]
return [p() for p in permissions]
class TransmissionFilter(filters.FilterSet):
class Meta:
model = Transmission
fields = {
'category':['exact'],
'is_live': ['exact'] ,
'required_auth': ['exact'],
'broadcast_date': ['exact', 'contains']
}
filterset_class = TransmissionFilter
| 30.651163
| 94
| 0.636571
| 985
| 0.747344
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.094082
|
b7f67bcee29d8224470eff2f3efe74022a5ab08f
| 4,751
|
py
|
Python
|
amstramdam/events/game.py
|
felix-martel/multigeo
|
2a1af9abae1fcef399744f6d88c4b1c25e8a25ab
|
[
"CC-BY-4.0",
"CC0-1.0"
] | 3
|
2020-11-28T15:00:56.000Z
|
2021-04-06T14:10:47.000Z
|
amstramdam/events/game.py
|
felix-martel/amstramdam
|
7142c34bda5aecfb5f7059a576a0ea7015a1edbc
|
[
"CC0-1.0",
"CC-BY-4.0"
] | 9
|
2021-04-11T17:28:57.000Z
|
2022-02-19T13:53:35.000Z
|
amstramdam/events/game.py
|
felix-martel/multigeo
|
2a1af9abae1fcef399744f6d88c4b1c25e8a25ab
|
[
"CC-BY-4.0",
"CC0-1.0"
] | 2
|
2020-11-17T09:34:50.000Z
|
2020-11-28T14:57:58.000Z
|
from amstramdam import app, socketio, timers, manager
from flask import session
from flask_socketio import emit
from .types import GameEndNotification, GameEndPayload
from .utils import safe_cancel, wait_and_run
from ..game.types import GameName, Coordinates
def terminate_game(game_name: GameName) -> None:
game = manager.get_game(game_name)
if game is None or not game.done:
return
game.terminate()
payload = GameEndPayload(
leaderboard=game.get_current_leaderboard(),
full=game.get_final_results(), # TODO: remove useless data
)
with app.test_request_context("/"):
status = game.status
print(
f"Ending game <{game_name}> (emitting <event:status-update> "
f"with status={status})"
)
socketio.emit(
"status-update",
GameEndNotification(status=status, payload=payload),
json=True,
broadcast=True,
room=game_name,
)
manager.relaunch_game(game_name)
def end_game(game_name: GameName, run_id: int) -> None:
# global game
game = manager.get_game(game_name)
if game is None or game.curr_run_id != run_id or game.done:
return
print(f"Ending run {game.curr_run_id+1}")
with app.test_request_context("/"):
# 1: get current place
(city_name, hint), (lon, lat) = game.current.place
answer = dict(name=city_name, lon=lon, lat=lat)
# 2: end game
records = game.current.records
results, done = game.end()
payload = dict(
results=records,
answer=answer,
leaderboard=game.get_current_leaderboard(),
done=done,
)
socketio.emit(
"status-update",
dict(status=game.status, payload=payload),
json=True,
broadcast=True,
room=game_name,
)
# 3: continue?
if done:
timers[game_name] = wait_and_run(game.wait_time, terminate_game, game_name)
else:
timers[game_name] = wait_and_run(
game.wait_time, launch_run, game_name, game.curr_run_id
)
def launch_run(game_name: GameName, run_id: int) -> None:
# global duration_thread
game = manager.get_game(game_name)
if game is None or game.curr_run_id != run_id:
return
print(f"Launching run {game.curr_run_id+1} for game <{game_name}>")
with app.test_request_context("/"):
hint = game.launch_run()
payload = dict(hint=hint, current=game.curr_run_id, total=game.n_run)
print(f"Hint is '{hint}'")
socketio.emit(
"status-update",
dict(status=game.status, payload=payload),
json=True,
room=game_name,
broadcast=True,
)
timers[game_name] = wait_and_run(
game.current.duration, end_game, game_name, game.curr_run_id
)
@socketio.on("launch")
def launch_game() -> None:
game_name = session["game"]
player = session.get("player")
if player is None:
return
game = manager.get_game(game_name)
if game is None:
return
game.launch() # GameRun(players)
payload = dict(
game=game.map_name,
runs=game.n_run,
diff=game.difficulty,
by=player,
small_scale=game.small_scale,
)
emit(
"status-update",
dict(status=game.status, payload=payload),
json=True,
broadcast=True,
room=game_name,
)
wait_and_run(3, launch_run, game_name, game.curr_run_id)
@socketio.on("guess")
def process_guess(data: Coordinates) -> None:
# global duration_thread
game_name = session["game"]
game = manager.get_game(game_name)
player = session.get("player")
if player is None or game is None:
return
# player = data["player"]
print("Receiving guess from", player)
lon, lat = data["lon"], data["lat"]
res, done = game.current.process_answer((lon, lat), player)
res["total_score"] = (
game.scores[player] + res["score"]
) # We need to add res["score"] between game.scores isn't updated yet
# emit("log", f"Player <{player}> has scored {res['score']} points", broadcast=True,
# room=game_name)
emit(
"new-guess",
dict(player=player, dist=res["dist"], delta=res["delta"], score=res["score"]),
broadcast=True,
room=game_name,
)
emit("score", res, json=True)
if done:
try:
print(f"Interrupting run {game.curr_run_id+1}\n")
safe_cancel(timers[game_name])
except AttributeError:
pass
end_game(game_name, game.curr_run_id)
| 30.261146
| 88
| 0.603662
| 0
| 0
| 0
| 0
| 1,776
| 0.373816
| 0
| 0
| 790
| 0.166281
|
b7f6d5055a8a870cf0186a412e583a2dc0833fd5
| 1,515
|
py
|
Python
|
src/glod/unittests/in_out/test_statement_csv.py
|
gordon-elliott/glod
|
a381e21455d05d9c005942a3dee4ac67e10f366a
|
[
"MIT"
] | null | null | null |
src/glod/unittests/in_out/test_statement_csv.py
|
gordon-elliott/glod
|
a381e21455d05d9c005942a3dee4ac67e10f366a
|
[
"MIT"
] | 1
|
2021-03-10T16:48:34.000Z
|
2021-03-10T16:48:34.000Z
|
src/glod/unittests/in_out/test_statement_csv.py
|
gordon-elliott/glod
|
a381e21455d05d9c005942a3dee4ac67e10f366a
|
[
"MIT"
] | null | null | null |
__copyright__ = 'Copyright(c) Gordon Elliott 2017'
"""
"""
from datetime import date
from decimal import Decimal
from io import StringIO
from unittest import TestCase
from glod.model.statement_item import StatementItem
from glod.model.account import Account
from glod.in_out.statement_item import statement_item_csv
class TestStatementCSV(TestCase):
def test_export(self):
account_no = '400400'
account = Account(8001, 'current', account_no=account_no)
date_fixture = date.today()
details = 'details fixture {}'
currency = 'EUR'
debit = Decimal('500.00')
credit = None
balance = Decimal('3433.22')
statement_items = [
StatementItem(
account,
date_fixture,
details.format(i),
currency,
debit,
credit,
balance,
)
for i in range(4)
]
actual = statement_item_csv(statement_items, StringIO()).getvalue()
expected = """account date details currency debit credit balance\r
{0} {1} details fixture 0 {2} {3} {4}\r
{0} {1} details fixture 1 {2} {3} {4}\r
{0} {1} details fixture 2 {2} {3} {4}\r
{0} {1} details fixture 3 {2} {3} {4}\r
""".format(
account_no,
date_fixture.strftime('%d/%m/%Y'),
currency,
debit,
balance
)
self.maxDiff = None
self.assertEqual(expected, actual)
| 25.677966
| 75
| 0.570957
| 1,191
| 0.786139
| 0
| 0
| 0
| 0
| 0
| 0
| 334
| 0.220462
|
b7f7145927c059a2c43b18ff8ea2eb1911103a21
| 1,072
|
py
|
Python
|
ExifExtractor.py
|
MalwareJunkie/PythonScripts
|
ad827a8aafaae4a50970c9df11b674f4472eb371
|
[
"MIT"
] | null | null | null |
ExifExtractor.py
|
MalwareJunkie/PythonScripts
|
ad827a8aafaae4a50970c9df11b674f4472eb371
|
[
"MIT"
] | null | null | null |
ExifExtractor.py
|
MalwareJunkie/PythonScripts
|
ad827a8aafaae4a50970c9df11b674f4472eb371
|
[
"MIT"
] | null | null | null |
# Tested with Python 3.6
# Install Pillow: pip install pillow
""" This script extracts exif data from JPEG images """
from PIL import Image
from PIL.ExifTags import TAGS
import sys
def getExif(img):
res = {}
exif = img._getexif()
if exif == None:
print("No exif data found!!")
sys.exit(0)
for k, v in exif.items():
dcd = TAGS.get(k, k)
res[dcd] = v
return res
def main():
try:
imgName = input("Enter the name of the JPEG image: ")
img = Image.open(imgName)
if img.format != "JPEG":
print("This only works with JPG images!!")
sys.exit(0)
except KeyboardInterrupt:
print("\nExiting!!")
sys.exit(0)
except:
print("Something went wrong!! check your input!!")
sys.exit(0)
print("Gathering exif data...")
for k, v in getExif(img).items():
try:
v = v.decode("utf-8")
except AttributeError:
pass
print(str(k) + ": ", v)
main()
| 23.304348
| 62
| 0.527052
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 307
| 0.286381
|
b7f7a2d524260e395bf0b274a89d51e8f9652827
| 240
|
py
|
Python
|
nbgrader/nbgraderformat/__init__.py
|
FrattisUC/nbgrader
|
f6402dcbb875e41ee3317be9e7af518afda9f72c
|
[
"BSD-3-Clause-Clear"
] | 2
|
2021-09-11T20:32:18.000Z
|
2021-09-11T20:32:37.000Z
|
nbgrader/nbgraderformat/__init__.py
|
FrattisUC/nbgrader
|
f6402dcbb875e41ee3317be9e7af518afda9f72c
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
nbgrader/nbgraderformat/__init__.py
|
FrattisUC/nbgrader
|
f6402dcbb875e41ee3317be9e7af518afda9f72c
|
[
"BSD-3-Clause-Clear"
] | 1
|
2019-09-13T07:46:09.000Z
|
2019-09-13T07:46:09.000Z
|
SCHEMA_VERSION = 2
from .common import ValidationError, SchemaMismatchError
from .v2 import MetadataValidatorV2 as MetadataValidator
from .v2 import read_v2 as read, write_v2 as write
from .v2 import reads_v2 as reads, writes_v2 as writes
| 34.285714
| 56
| 0.829167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b7f7e17dac70dc7137a4fbc2c1596760a4b65113
| 9,537
|
py
|
Python
|
testFiles/test_script.py
|
Janga-Lab/Penguin-1
|
f6162be3549c470416da0fab590ae7d04c74bfa5
|
[
"MIT"
] | null | null | null |
testFiles/test_script.py
|
Janga-Lab/Penguin-1
|
f6162be3549c470416da0fab590ae7d04c74bfa5
|
[
"MIT"
] | null | null | null |
testFiles/test_script.py
|
Janga-Lab/Penguin-1
|
f6162be3549c470416da0fab590ae7d04c74bfa5
|
[
"MIT"
] | null | null | null |
import h5py
from ont_fast5_api.conversion_tools import multi_to_single_fast5
from ont_fast5_api import fast5_interface
import SequenceGenerator.align as align
import SignalExtractor.Nanopolish as events
from testFiles.test_commands import *
import os, sys
import subprocess
#todo get basecall data
def basecall_test(fastPath):
files = os.listdir("Data/basecall")
#check if basecall file already exists
for f in files:
if f.endswith(".fasta") or f.endswith(".fa") or f.endswith(".fastq") or f.endswith(".fq"):
if os.stat("Data/basecall/" + f).st_size > 1000:
return
print("missing basecall file****/creating basecall file")
bcCmd = "scrappie raw " + fastPath + " > " + os.getcwd() + "/Data/basecall/reads.fa"
#create basecall file
try:
subprocess.run([bcCmd], check = True)
#scrappie_basecall(fastPath)
#checking if file not in right fast5 format(multi/single)
except subprocess.CalledProcessError:
export_scrappie_path()
print("got error / process error")
#export scrappie cmd (might not be exported correctly)
export_scrappie_path()
#checking if already in single directory
if 'single' in fastPath:
print("|||\/|| Already in single folder")
#todo insert flappie
#convert multi fast5 to single fast5 and move files into single directory.
elif 'single' not in os.listdir(fastPath):
print("converting fast5 to single fast5")
convert_fast5_type(fastPath)
scrappie_basecall_single(fastPath)
#if path doesn't exist or no files
except FileNotFoundError:
#export_scrappie_path()
print("got error / no file found ")
#scrappie_basecall_single(fastPath)
sys.exit()
#any error (default error"export scrappie and try again")
except:
export_scrappie_path()
scrappie_basecall(fastPath)
#check if basecall created successfully
if os.stat("Data/basecall/reads.fa").st_size > 0:
print("created basecall file****")
else:
print("Couldn't create basecall file")
#test to check if required files are created
def file_test(bed_file, ref_file, sam_file):
if bed_file == None:
print("bed file test failed****")
raise FileNotFoundError
#set ref file
if ref_file != None:
#fasta input
fastfile = os.getcwd() + "/Data/basecall/"
for ffile in os.listdir(fastfile):
if ffile.endswith(".fastq") or ffile.endswith(".fasta") or ffile.endswith(".fa"):
#check if fasta files exist in directory
fastfile = os.getcwd() + "/Data/basecall/" + ffile
#check if you found a fasta/fastq file in directory
if fastfile.endswith(".fastq") != True and fastfile.endswith(".fasta") != True and fastfile.endswith(".fa") != True:
print("basecall test failed****")
raise FileNotFoundError
#download reference file
else:
#use default ref files
refFlag = False
#defaultReferenceFile = "Homo_sapiens.GRCh38.dna.alt.fa"
#defaultReferenceFile = "refgenome"
defaultReferenceFile = "grch38.fna"
#defaultReferenceFile = "coli-ref.fa"
downloadedFlag = False
#check if default reference file exists
for f in os.listdir(os.getcwd()):
if f == defaultReferenceFile:
print("reference downloaded already****")
downloadedFlag = True
#download reference file
if downloadedFlag != True:
#os.system("wget -O refgenome.tar.gz ftp://igenome:G3nom3s4u@ussd-ftp.illumina.com/Homo_sapiens/Ensembl/GRCh37/Homo_sapiens_Ensembl_GRCh37.tar.gz")
#os.system("wget -O refgenome.gz ftp://ftp.ncbi.nlm.nih.gov/refseq/H_sapiens/annotation/GRCh37_latest/refseq_identifiers/GRCh37_latest_genomic.fna.gz")
os.system("wget -O grch38.fna.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/000/001/405/GCA_000001405.15_GRCh38/GCA_000001405.15_GRCh38_genomic.fna.gz")
#os.system("wget -O ftp://ftp.ensembl.org/pub/release-100/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.alt.fa.gz")
#os.system("tar -xzf refgenome.tar.gz")
#os.system("gunzip refgenome.gz")
os.system("gzip -d grch38.fna.gz")
print("gunzipping reference genome****")
#os.system("gunzip -v Homo_sapiens.GRCh38.dna.alt.fa.gz")
for f in os.listdir(os.getcwd()):
if f == "Homo_sapiens" or f == defaultReferenceFile or f == "refgenome":
refFlag = True
break
ref_file = defaultReferenceFile
#if file download wasn't successful
if refFlag == False and downloadedFlag != True:
print("ref file test failed****")
raise FileNotFoundError
#get basecalled file
fastfile = os.getcwd() + "/Data/basecall/"
for ffile in os.listdir(fastfile):
if ffile.endswith(".fastq") or ffile.endswith(".fasta") or ffile.endswith(".fa"):
#check if fast files exist in directory
fastfile += ffile
break
#if no fasta/fastq file found
if fastfile == os.getcwd() + "/Data/basecall/":
print("basecall file test failed****")
raise FileNotFoundError
if sam_file == None:
#ref file exists so align here
sam_file = get_sam_file(fastfile, ref_file)
elif sam_file == None:
print("sam file test failed****")
raise FileNotFoundError
if bed_file != None:
print("\nbed file test passed****")
if sam_file != None:
print("sam file test passed****")
return bed_file, ref_file, sam_file
def id_file_test():
for f in os.listdir("./Data/"):
if f == "Fast5_ids.txt":
print("id test passed****")
return
def get_sam_file(fastfile, ref_file):
#check if sam file exists on our directory
if "Alignment.sam" in os.listdir("Data"):
#prompt to create new sam file
choice = input("Do you want to create a new sam file?(y/n)")
if choice == 'y':
sam_file = align.minimapAligner(fastfile, ref_file)
else:
return "Data/Alignment.sam"
else:
sam_file = align.minimapAligner(fastfile, ref_file)
return sam_file
#create event info file for machine learning models
def event_check(fpath=None, filename=None, ref=None, NanopolishOnly=True):
#check if event info already exists
if "reads-ref.eventalign.txt" in os.listdir("Data") and os.stat("Data/reads-ref.eventalign.txt").st_size > 1000:
return "Data/reads-ref.eventalign.txt"
#no events
if ref != None:
#todo fix this bug
if event_align_check() == None:
print("Creating Event Align file****")
#create events(nanopolish code goes here)
#is it a single file or path
if fpath == None:
event_file = events.nanopolish_events(filename, "Data/basecall/", referenceFile=ref)
else:
event_file = events.nanopolish_events(fpath, "Data/basecall/", referenceFile=ref)
print("event file ", event_file)
show_penguin()
return event_file
else:
show_penguin()
return "Data/reads-ref.eventalign.txt"
else:
print("reference file test failed")
raise FileNotFoundError
def show_penguin():
penguin = """
=============================================================
**-..L```|
\ |
* \ |```| |```` |\ | |```| | | ``|`` |\ |
| | \ |___| |___ | \ | |___ | | | | \ |
/*\ | \ | | | \| | | | | | | | \|
|***\ | | | |____ | | |___| \|/ _|_ | |
\****\ \ | |
\***/ \ / |
\*/ /
/___/_____\
=============================================================
"""
print(penguin)
def sequence_check():
pass
def event_align_check():
for file in os.listdir("Data"):
if file == "reads-ref.eventalign.txt" and os.stat("Data/reads-ref.eventalign.txt").st_size > 1000:
print("Event Align Test Passed****")
return "Data/reads-ref.eventalign.txt"
print("Event Align Test Failed****")
return None
def convert_fast5_type(directory):
#go through fast5 files and check if the files is multi or single fast5 file
#we need a single fast5 file
for root, dirs, files in os.walk(directory):
for name in files:
if name.endswith(".fast5"):
fobj = fast5_interface.get_fast5_file(os.path.join(root, name))
if fast5_interface.check_file_type(fobj) == "multi-read":
#convert file to single fast5
print("converting fast5 file****")
multi_to_single_fast5.convert_multi_to_single(os.path.join(root, name), directory, "single")
| 37.695652
| 165
| 0.567998
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,224
| 0.442907
|
b7f84a7d5201859ed1a739cf1602952494964553
| 7,702
|
py
|
Python
|
channels/italiaserie.py
|
sodicarus/channels
|
d77402f4f460ea6daa66959aa5384aaffbff70b5
|
[
"MIT"
] | null | null | null |
channels/italiaserie.py
|
sodicarus/channels
|
d77402f4f460ea6daa66959aa5384aaffbff70b5
|
[
"MIT"
] | null | null | null |
channels/italiaserie.py
|
sodicarus/channels
|
d77402f4f460ea6daa66959aa5384aaffbff70b5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# streamondemand-pureita.- XBMC Plugin
# Canale italiaserie
# http://www.mimediacenter.info/foro/viewtopic.php?f=36&t=7808
# ------------------------------------------------------------
import re
from core import httptools
from core import logger
from core import config
from core import servertools
from core import scrapertools
from core.item import Item
from core.tmdb import infoSod
__channel__ = "italiaserie"
host = "https://italiaserie.org"
headers = [['Referer', host]]
def isGeneric():
return True
def mainlist(item):
logger.info("streamondemand-pureita -[italiaserie mainlist]")
itemlist = [Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]Serie TV - [COLOR orange]Ultime Aggiunte[/COLOR]",
url="%s/category/serie-tv/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/popcorn_serie_P.png"),
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]Serie TV - [COLOR orange]Aggiornamenti[/COLOR]",
url="%s/ultimi-episodi/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/tv_series_P.png"),
Item(channel=__channel__,
action="categorie",
title="[COLOR azure]Serie TV - [COLOR orange]Categorie[/COLOR]",
url=host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/genres_P.png"),
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]Serie TV - [COLOR orange]Animazione[/COLOR]",
url="%s/category/serie-tv/animazione-e-bambini/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/animation2_P.png"),
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]Serie TV - [COLOR orange]TV Show[/COLOR]",
url="%s/category/serie-tv/tv-show/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/new_tvshows_P.png"),
Item(channel=__channel__,
action="search",
title="[COLOR orange]Search ...[/COLOR]",
url=host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/search_P.png")]
return itemlist
# ==================================================================================================================================================
def search(item, texto):
logger.info("streamondemand-pureita - [italiaserie search]")
item.url = host + "/?s=" + texto
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# ==================================================================================================================================================
def categorie(item):
logger.info("streamondemand-pureita -[italiaserie categorie]")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.get_match(data, r'<h3 class="title">Categorie</h3>(.*?)</ul>')
patron = r'<li class=".*?"><a href="([^"]+)" >([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
if "Serie TV" in scrapedtitle or "Tv Show" in scrapedtitle or "Animazione e Bambini" in scrapedtitle:
continue
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title=scrapedtitle,
url=scrapedurl,
thumbnail='https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/genre_P.png',
folder=True))
return itemlist
# ==================================================================================================================================================
def peliculas(item):
logger.info("streamondemand-pureita -[serietvonline_co peliculas]")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<a href="([^"]+)"\s*title="([^"]+)">\s*<img src="([^<]+)"\s*alt[^>]+>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
scrapedplot=""
itemlist.append(infoSod(
Item(channel=__channel__,
action="episodes",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
show=scrapedtitle,
folder=True), tipo="tv"))
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">Next »</a>')
if next_page != "":
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Successivi >>[/COLOR]",
url=next_page,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"))
return itemlist
# ==================================================================================================================================================
def episodes(item):
logger.info("streamondemand-pureita -[italiaserie episodes]")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<a rel="nofollow"\s*target="_blank" act=".*?"\s*href="([^"]+)"\s*class="green-link">\s*<strong>([^<]+)</strong>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=__channel__,
action="findvideos",
title=scrapedtitle,
fulltitle=item.fulltitle + " - " + scrapedtitle,
show=item.show + " - " + scrapedtitle,
url=scrapedurl,
plot="[COLOR orange]" + item.title + "[/COLOR]" + item.plot,
thumbnail=item.thumbnail,
folder=True))
return itemlist
# ==================================================================================================================================================
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
servername = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(['[COLOR azure][[COLOR orange]' + servername.capitalize() + '[/COLOR]] - ', item.title])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.plot = item.plot
videoitem.channel = __channel__
return itemlist
| 43.514124
| 149
| 0.531291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,271
| 0.42464
|
b7f8e6d0c8a700576343e9ec9966950fe6696251
| 629
|
py
|
Python
|
setup.py
|
jeffleary00/greenery
|
cb5b5d037b6fd297463633d2d3315c722851161f
|
[
"MIT"
] | null | null | null |
setup.py
|
jeffleary00/greenery
|
cb5b5d037b6fd297463633d2d3315c722851161f
|
[
"MIT"
] | null | null | null |
setup.py
|
jeffleary00/greenery
|
cb5b5d037b6fd297463633d2d3315c722851161f
|
[
"MIT"
] | 1
|
2018-02-25T17:29:37.000Z
|
2018-02-25T17:29:37.000Z
|
from setuptools import setup
setup(
name='potnanny-api',
version='0.2.6',
packages=['potnanny_api'],
include_package_data=True,
description='Part of the Potnanny greenhouse controller application. Contains Flask REST API and basic web interface.',
author='Jeff Leary',
author_email='potnanny@gmail.com',
url='https://github.com/jeffleary00/potnanny-api',
install_requires=[
'requests',
'passlib',
'sqlalchemy',
'marshmallow',
'flask',
'flask-restful',
'flask-jwt-extended',
'flask-wtf',
'potnanny-core==0.2.9',
],
)
| 26.208333
| 123
| 0.616852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 337
| 0.535771
|