hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
037dec22b9e6ec23180fcd373039d43a5b32b717
| 394
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowStackwiseVirtualDualActiveDetection/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxe/tests/ShowStackwiseVirtualDualActiveDetection/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxe/tests/ShowStackwiseVirtualDualActiveDetection/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
"dad_port": {
"switches": {
1: {
"FortyGigabitEthernet1/0/3": {"status": "up"},
"FortyGigabitEthernet1/0/4": {"status": "up"},
},
2: {
"FortyGigabitEthernet2/0/3": {"status": "up"},
"FortyGigabitEthernet2/0/4": {"status": "up"},
},
}
}
}
| 26.266667
| 62
| 0.398477
|
db21eda3ae94556ad5db243da919e478a5b07a90
| 684
|
py
|
Python
|
deleteUser.py
|
cmkhoury/CS-506
|
a78b9931c25f7bfddf0bfe9e12f742bc7858228c
|
[
"MIT"
] | 1
|
2018-04-24T03:14:16.000Z
|
2018-04-24T03:14:16.000Z
|
deleteUser.py
|
cmkhoury/CS-506
|
a78b9931c25f7bfddf0bfe9e12f742bc7858228c
|
[
"MIT"
] | null | null | null |
deleteUser.py
|
cmkhoury/CS-506
|
a78b9931c25f7bfddf0bfe9e12f742bc7858228c
|
[
"MIT"
] | null | null | null |
import requests
import sqlite3 as sql
import sys
def main():
try:
with sql.connect("data/test.db") as con:
curs = con.cursor()
# inserted = con.execute("SELECT * FROM User WHERE Username = (?)", (users[i][0],))
# for row in inserted:
# if row[1] == users[i][0]: print("Found " + users[i][0])
deleted = con.execute("DELETE FROM User WHERE UID = (?)", (sys.argv[1],))
print("Deleted: " + str(deleted.rowcount) + " entries.")
con.commit()
except Exception as e:
con.rollback()
print(e)
finally:
con.close()
if __name__ == '__main__':
main()
| 25.333333
| 95
| 0.520468
|
ab336c654d4b016da27a2f40b8fe11baa9d093f9
| 21,003
|
py
|
Python
|
google-images-download.py
|
riemannzetagambit/google-images-download
|
ccabe46402d8f045e5ad03534cf99285a103eaf8
|
[
"MIT"
] | null | null | null |
google-images-download.py
|
riemannzetagambit/google-images-download
|
ccabe46402d8f045e5ad03534cf99285a103eaf8
|
[
"MIT"
] | null | null | null |
google-images-download.py
|
riemannzetagambit/google-images-download
|
ccabe46402d8f045e5ad03534cf99285a103eaf8
|
[
"MIT"
] | 3
|
2021-01-08T06:34:56.000Z
|
2021-11-09T08:37:25.000Z
|
# In[ ]:
# coding: utf-8
###### Searching and Downloading Google Images to the local disk ######
# Import Libraries
import sys
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
import urllib.request
from urllib.request import Request, urlopen
from urllib.request import URLError, HTTPError
from urllib.parse import quote
else: # If the Current Version of Python is 2.x
import urllib2
from urllib2 import Request, urlopen
from urllib2 import URLError, HTTPError
from urllib import quote
import time # Importing the time library to check the time of code execution
import os
import argparse
import ssl
import datetime
def arguments():
# Taking command line arguments from users
parser = argparse.ArgumentParser()
parser.add_argument('-k', '--keywords', help='delimited list input', type=str, required=False)
parser.add_argument('-sk', '--suffix_keywords', help='comma separated additional words added to main keyword', type=str, required=False)
parser.add_argument('-l', '--limit', help='delimited list input', type=str, required=False)
parser.add_argument('-f', '--format', help='download images with specific format', type=str, required=False,
choices=['jpg', 'gif', 'png', 'bmp', 'svg', 'webp', 'ico'])
parser.add_argument('-u', '--url', help='search with google image URL', type=str, required=False)
parser.add_argument('-x', '--single_image', help='downloading a single image from URL', type=str, required=False)
parser.add_argument('-o', '--output_directory', help='download images in a specific directory', type=str, required=False)
parser.add_argument('-d', '--delay', help='delay in seconds to wait between downloading two images', type=str, required=False)
parser.add_argument('-c', '--color', help='filter on color', type=str, required=False,
choices=['red', 'orange', 'yellow', 'green', 'teal', 'blue', 'purple', 'pink', 'white', 'gray', 'black', 'brown'])
parser.add_argument('-ct', '--color_type', help='filter on color', type=str, required=False,
choices=['full-color', 'black-and-white', 'transparent'])
parser.add_argument('-r', '--usage_rights', help='usage rights', type=str, required=False,
choices=['labled-for-reuse-with-modifications','labled-for-reuse','labled-for-noncommercial-reuse-with-modification','labled-for-nocommercial-reuse'])
parser.add_argument('-s', '--size', help='image size', type=str, required=False,
choices=['large','medium','icon'])
parser.add_argument('-t', '--type', help='image type', type=str, required=False,
choices=['face','photo','clip-art','line-drawing','animated'])
parser.add_argument('-w', '--time', help='image age', type=str, required=False,
choices=['past-24-hours','past-7-days'])
parser.add_argument('-a', '--aspect_ratio', help='comma separated additional words added to keywords', type=str, required=False,
choices=['tall', 'square', 'wide', 'panoramic'])
parser.add_argument('-si', '--similar_images', help='downloads images very similar to the image URL you provide', type=str, required=False)
parser.add_argument('-ss', '--specific_site', help='downloads images that are indexed from a specific website', type=str, required=False)
args = parser.parse_args()
#------ Initialization Complete ------#
# Downloading entire Web Document (Raw Page Content)
def download_page(url):
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib.request.Request(url, headers=headers)
resp = urllib.request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else: # If the Current Version of Python is 2.x
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers=headers)
try:
response = urllib2.urlopen(req)
except URLError: # Handling SSL certificate failed
context = ssl._create_unverified_context()
response = urlopen(req, context=context)
page = response.read()
return page
except:
return "Page Not found"
# Finding 'Next Image' from the given raw page
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: # If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"', start_line + 1)
end_content = s.find(',"ow"', start_content + 1)
content_raw = str(s[start_content + 6:end_content - 1])
return content_raw, end_content
# Getting all links with the help of '_images_get_next_image'
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) # Append all the links in the list named 'Links'
time.sleep(0.1) # Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
def similar_images():
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
try:
searchUrl = 'https://www.google.com/searchbyimage?site=search&sa=X&image_url=' + args.similar_images
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req1 = urllib.request.Request(searchUrl, headers=headers)
resp1 = urllib.request.urlopen(req1)
content = str(resp1.read())
l1 = content.find('AMhZZ')
l2 = content.find('&', l1)
urll = content[l1:l2]
newurl = "https://www.google.com/search?tbs=sbi:" + urll + "&site=search&sa=X"
req2 = urllib.request.Request(newurl, headers=headers)
resp2 = urllib.request.urlopen(req2)
# print(resp2.read())
l3 = content.find('/search?sa=X&q=')
l4 = content.find(';', l3 + 19)
urll2 = content[l3 + 19:l4]
return urll2
except:
return "Cloud not connect to Google Imagees endpoint"
else: # If the Current Version of Python is 2.x
try:
searchUrl = 'https://www.google.com/searchbyimage?site=search&sa=X&image_url=' + args.similar_images
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req1 = urllib2.Request(searchUrl, headers=headers)
resp1 = urllib2.urlopen(req1)
content = str(resp1.read())
l1 = content.find('AMhZZ')
l2 = content.find('&', l1)
urll = content[l1:l2]
newurl = "https://www.google.com/search?tbs=sbi:" + urll + "&site=search&sa=X"
#print newurl
req2 = urllib2.Request(newurl, headers=headers)
resp2 = urllib2.urlopen(req2)
# print(resp2.read())
l3 = content.find('/search?sa=X&q=')
l4 = content.find(';', l3 + 19)
urll2 = content[l3 + 19:l4]
return(urll2)
except:
return "Cloud not connect to Google Imagees endpoint"
#Building URL parameters
def build_url_parameters():
built_url = "&tbs="
counter = 0
params = {'color':[args.color,{'red':'ic:specific,isc:red', 'orange':'ic:specific,isc:orange', 'yellow':'ic:specific,isc:yellow', 'green':'ic:specific,isc:green', 'teal':'ic:specific,isc:teel', 'blue':'ic:specific,isc:blue', 'purple':'ic:specific,isc:purple', 'pink':'ic:specific,isc:pink', 'white':'ic:specific,isc:white', 'gray':'ic:specific,isc:gray', 'black':'ic:specific,isc:black', 'brown':'ic:specific,isc:brown'}],
'color_type':[args.color_type,{'full-color':'ic:color', 'black-and-white':'ic:gray','transparent':'ic:trans'}],
'usage_rights':[args.usage_rights,{'labled-for-reuse-with-modifications':'sur:fmc','labled-for-reuse':'sur:fc','labled-for-noncommercial-reuse-with-modification':'sur:fm','labled-for-nocommercial-reuse':'sur:f'}],
'size':[args.size,{'large':'isz:l','medium':'isz:m','icon':'isz:i'}],
'type':[args.type,{'face':'itp:face','photo':'itp:photo','clip-art':'itp:clip-art','line-drawing':'itp:lineart','animated':'itp:animated'}],
'time':[args.time,{'past-24-hours':'qdr:d','past-7-days':'qdr:w'}],
'aspect_ratio':[args.aspect_ratio,{'tall':'iar:t','square':'iar:s','wide':'iar:w','panoramic':'iar:xw'}],
'format':[args.format,{'jpg':'ift:jpg','gif':'ift:gif','png':'ift:png','bmp':'ift:bmp','svg':'ift:svg','webp':'webp','ico':'ift:ico'}]}
for key, value in params.items():
if value[0] is not None:
ext_param = value[1][value[0]]
# counter will tell if it is first param added or not
if counter == 0:
# add it to the built url
built_url = built_url + ext_param
counter += 1
else:
built_url = built_url + ',' + ext_param
counter += 1
return built_url
#function to download single image
def single_image(output_directory, url):
try:
os.makedirs(output_directory)
except OSError as e:
if e.errno != 17:
raise
# time.sleep might help here
pass
req = Request(url, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
response = urlopen(req, None, 10)
image_name = str(url[(url.rfind('/')) + 1:])
if '?' in image_name:
image_name = image_name[:image_name.find('?')]
if ".jpg" in image_name or ".gif" in image_name or ".png" in image_name or ".bmp" in image_name or ".svg" in image_name or ".webp" in image_name or ".ico" in image_name:
output_file = open(output_directory + "/" + image_name, 'wb')
else:
output_file = open(output_directory + "/" + image_name + ".jpg", 'wb')
image_name = image_name + ".jpg"
data = response.read()
output_file.write(data)
response.close()
print("completed ====> " + image_name)
return
def bulk_download(search_keyword,
suffix_keywords,
limit,
output_directory,
delay_time,
color=None,
url=None,
similar_images=None,
specific_site=None,
format=None):
errorCount = 0
if url:
search_keyword = [str(datetime.datetime.now()).split('.')[0]]
if similar_images:
search_keyword = [str(datetime.datetime.now()).split('.')[0]]
# appending a dummy value to Suffix Keywords array if it is blank
if len(suffix_keywords) == 0:
suffix_keywords.append('')
for sky in suffix_keywords:
i = 0
while i < len(search_keyword):
items = []
iteration = "\n" + "Item no.: " + str(i + 1) + " -->" + " Item name = " + str(search_keyword[i] + str(sky))
print(iteration)
print("Evaluating...")
search_term = search_keyword[i] + sky
dir_name = search_term + ('-' + color if color else '')
# make a search keyword directory
try:
if not os.path.exists(output_directory):
os.makedirs(output_directory)
time.sleep(0.2)
path = str(dir_name)
sub_directory = os.path.join(output_directory, path)
if not os.path.exists(sub_directory):
os.makedirs(sub_directory)
else:
path = str(dir_name)
sub_directory = os.path.join(output_directory, path)
if not os.path.exists(sub_directory):
os.makedirs(sub_directory)
except OSError as e:
if e.errno != 17:
raise
# time.sleep might help here
pass
params = build_url_parameters()
# color_param = ('&tbs=ic:specific,isc:' + args.color) if args.color else ''
# check the args and choose the URL
if url is not None:
pass
elif similar_images is not None:
keywordem = similar_images()
url = 'https://www.google.com/search?q=' + keywordem + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
elif specific_site is not None:
url = 'https://www.google.com/search?q=' + quote(
search_term) + 'site:' + specific_site + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' + params + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
else:
url = 'https://www.google.com/search?q=' + quote(
search_term) + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' + params + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
raw_html = (download_page(url))
time.sleep(0.1)
items = items + (_images_get_all_items(raw_html))
print("Total Image Links = " + str(len(items)))
#If search does not return anything, do not try to force download
if len(items) <= 1:
print('***** This search result did not return any results...please try a different search filter *****')
break
print("Starting Download...")
k = 0
success_count = 0
while (k < len(items)):
try:
image_url = items[k]
#print("\n" + str(image_url))
req = Request(image_url, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
try:
response = urlopen(req, None, 15)
image_name = str(items[k][(items[k].rfind('/')) + 1:])
if '?' in image_name:
image_name = image_name[:image_name.find('?')]
if ".jpg" in image_name or ".JPG" in image_name or ".gif" in image_name or ".png" in image_name or ".bmp" in image_name or ".svg" in image_name or ".webp" in image_name or ".ico" in image_name:
output_file = open(output_directory + "/" + dir_name + "/" + str(success_count + 1) + ". " + image_name, 'wb')
else:
if format is not None:
output_file = open(
output_directory + "/" + dir_name + "/" + str(success_count + 1) + ". " + image_name + "." + format,
'wb')
image_name = image_name + "." + format
else:
output_file = open(
output_directory + "/" + dir_name + "/" + str(success_count + 1) + ". " + image_name + ".jpg", 'wb')
image_name = image_name + ".jpg"
data = response.read()
output_file.write(data)
response.close()
print("Completed ====> " + str(success_count + 1) + ". " + image_name)
k = k + 1
success_count += 1
if success_count == limit:
break
except UnicodeEncodeError as e:
errorCount +=1
print ("UnicodeEncodeError on an image...trying next one..." + " Error: " + str(e))
k = k + 1
except HTTPError as e: # If there is any HTTPError
errorCount += 1
print("HTTPError on an image...trying next one..." + " Error: " + str(e))
k = k + 1
except URLError as e:
errorCount += 1
print("URLError on an image...trying next one..." + " Error: " + str(e))
k = k + 1
except ssl.CertificateError as e:
errorCount += 1
print("CertificateError on an image...trying next one..." + " Error: " + str(e))
k = k + 1
except IOError as e: # If there is any IOError
errorCount += 1
print("IOError on an image...trying next one..." + " Error: " + str(e))
k = k + 1
if delay_time is not None:
time.sleep(int(delay_time))
if success_count < limit:
print("\n\nUnfortunately all " + str(limit) + " could not be downloaded because some images were not downloadable. " + str(success_count) + " is all we got for this search filter!")
i = i + 1
return errorCount
if __name__ == '__main__':
args, parser = arguments()
cwd = os.path.dirname(os.path.realpath(__file__))
#Initialization and Validation of user arguments
if args.keywords:
search_keyword = [str(item) for item in args.keywords.split(',')]
#Additional words added to keywords
if args.suffix_keywords:
suffix_keywords = [" " + str(sk) for sk in args.suffix_keywords.split(',')]
else:
suffix_keywords = []
# Setting limit on number of images to be downloaded
if args.limit:
limit = int(args.limit)
if int(args.limit) >= 100:
limit = 100
else:
limit = 100
# If single_image or url argument not present then keywords is mandatory argument
if args.single_image is None and args.url is None and args.similar_images is None and args.keywords is None:
parser.error('Keywords is a required argument!')
# If this argument is present, set the custom output directory
if args.output_directory:
output_directory = os.path.join(cwd, args.output_directory)
else:
output_directory = os.path.join(cwd, 'downloads')
# Set the delay parameter if this argument is present
if args.delay:
try:
delay_time = int(args.delay)
except ValueError:
parser.error('Delay parameter should be an integer!')
else:
delay_time = 0
if args.single_image: #Download Single Image using a URL
single_image_url = args.single_image
single_image(output_directory=output_directory, url = single_image_url)
else: # or download multiple images based on keywords/keyphrase search
t0 = time.time() # start the timer
# TODO(dstone): just make this take args. But the code I inherited is totally whack anyway, so it's a big hassle
# to refactor bulk_download to take args
errorCount = bulk_download(search_keyword,
suffix_keywords,
limit,
output_directory,
delay_time=delay_time,
color=args.color,
url=args.url,
similar_images=args.similar_images,
specific_site=args.specific_site,
format=args.format)
print("\nEverything downloaded!")
print("Total Errors: " + str(errorCount) + "\n")
t1 = time.time() # stop the timer
total_time = t1 - t0 # Calculating the total time required to crawl, find and download all the links of 60,000 images
print("Total time taken: " + str(total_time) + " Seconds")
| 48.958042
| 426
| 0.562015
|
427158341546337a75c6f71a435872a6c135673e
| 13,650
|
py
|
Python
|
vspk/v5_0/nukeyservermonitor.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 19
|
2016-03-07T12:34:22.000Z
|
2020-06-11T11:09:02.000Z
|
vspk/v5_0/nukeyservermonitor.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 40
|
2016-06-13T15:36:54.000Z
|
2020-11-10T18:14:43.000Z
|
vspk/v5_0/nukeyservermonitor.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 15
|
2016-06-10T22:06:01.000Z
|
2020-12-15T18:37:42.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUKeyServerMonitorEncryptedSeedsFetcher
from .fetchers import NUKeyServerMonitorSeedsFetcher
from .fetchers import NUKeyServerMonitorSEKsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUKeyServerMonitor(NURESTObject):
""" Represents a KeyServerMonitor in the VSD
Notes:
Represents a Keyserver Monitor Snapshot.
"""
__rest_name__ = "keyservermonitor"
__resource_name__ = "keyservermonitors"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a KeyServerMonitor instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> keyservermonitor = NUKeyServerMonitor(id=u'xxxx-xxx-xxx-xxx', name=u'KeyServerMonitor')
>>> keyservermonitor = NUKeyServerMonitor(data=my_dict)
"""
super(NUKeyServerMonitor, self).__init__()
# Read/Write Attributes
self._last_update_time = None
self._last_updated_by = None
self._gateway_secured_data_record_count = None
self._keyserver_monitor_encrypted_sek_count = None
self._keyserver_monitor_encrypted_seed_count = None
self._keyserver_monitor_sek_count = None
self._keyserver_monitor_seed_count = None
self._enterprise_secured_data_record_count = None
self._entity_scope = None
self._external_id = None
self.expose_attribute(local_name="last_update_time", remote_name="lastUpdateTime", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_secured_data_record_count", remote_name="gatewaySecuredDataRecordCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="keyserver_monitor_encrypted_sek_count", remote_name="keyserverMonitorEncryptedSEKCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="keyserver_monitor_encrypted_seed_count", remote_name="keyserverMonitorEncryptedSeedCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="keyserver_monitor_sek_count", remote_name="keyserverMonitorSEKCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="keyserver_monitor_seed_count", remote_name="keyserverMonitorSeedCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_secured_data_record_count", remote_name="enterpriseSecuredDataRecordCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.key_server_monitor_encrypted_seeds = NUKeyServerMonitorEncryptedSeedsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.key_server_monitor_seeds = NUKeyServerMonitorSeedsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.key_server_monitor_seks = NUKeyServerMonitorSEKsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def last_update_time(self):
""" Get last_update_time value.
Notes:
The time the latest SEK or Seed was created/removed (milliseconds since epoch)
This attribute is named `lastUpdateTime` in VSD API.
"""
return self._last_update_time
@last_update_time.setter
def last_update_time(self, value):
""" Set last_update_time value.
Notes:
The time the latest SEK or Seed was created/removed (milliseconds since epoch)
This attribute is named `lastUpdateTime` in VSD API.
"""
self._last_update_time = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def gateway_secured_data_record_count(self):
""" Get gateway_secured_data_record_count value.
Notes:
Total number of Gateway Secured Data records
This attribute is named `gatewaySecuredDataRecordCount` in VSD API.
"""
return self._gateway_secured_data_record_count
@gateway_secured_data_record_count.setter
def gateway_secured_data_record_count(self, value):
""" Set gateway_secured_data_record_count value.
Notes:
Total number of Gateway Secured Data records
This attribute is named `gatewaySecuredDataRecordCount` in VSD API.
"""
self._gateway_secured_data_record_count = value
@property
def keyserver_monitor_encrypted_sek_count(self):
""" Get keyserver_monitor_encrypted_sek_count value.
Notes:
Total number of Keyserver Monitor Encrypted SEK records
This attribute is named `keyserverMonitorEncryptedSEKCount` in VSD API.
"""
return self._keyserver_monitor_encrypted_sek_count
@keyserver_monitor_encrypted_sek_count.setter
def keyserver_monitor_encrypted_sek_count(self, value):
""" Set keyserver_monitor_encrypted_sek_count value.
Notes:
Total number of Keyserver Monitor Encrypted SEK records
This attribute is named `keyserverMonitorEncryptedSEKCount` in VSD API.
"""
self._keyserver_monitor_encrypted_sek_count = value
@property
def keyserver_monitor_encrypted_seed_count(self):
""" Get keyserver_monitor_encrypted_seed_count value.
Notes:
Total number of Keyserver Monitor Encrypted Seed records
This attribute is named `keyserverMonitorEncryptedSeedCount` in VSD API.
"""
return self._keyserver_monitor_encrypted_seed_count
@keyserver_monitor_encrypted_seed_count.setter
def keyserver_monitor_encrypted_seed_count(self, value):
""" Set keyserver_monitor_encrypted_seed_count value.
Notes:
Total number of Keyserver Monitor Encrypted Seed records
This attribute is named `keyserverMonitorEncryptedSeedCount` in VSD API.
"""
self._keyserver_monitor_encrypted_seed_count = value
@property
def keyserver_monitor_sek_count(self):
""" Get keyserver_monitor_sek_count value.
Notes:
Total number of Keyserver Monitor SEK records
This attribute is named `keyserverMonitorSEKCount` in VSD API.
"""
return self._keyserver_monitor_sek_count
@keyserver_monitor_sek_count.setter
def keyserver_monitor_sek_count(self, value):
""" Set keyserver_monitor_sek_count value.
Notes:
Total number of Keyserver Monitor SEK records
This attribute is named `keyserverMonitorSEKCount` in VSD API.
"""
self._keyserver_monitor_sek_count = value
@property
def keyserver_monitor_seed_count(self):
""" Get keyserver_monitor_seed_count value.
Notes:
Total number of Keyserver Monitor Seed records
This attribute is named `keyserverMonitorSeedCount` in VSD API.
"""
return self._keyserver_monitor_seed_count
@keyserver_monitor_seed_count.setter
def keyserver_monitor_seed_count(self, value):
""" Set keyserver_monitor_seed_count value.
Notes:
Total number of Keyserver Monitor Seed records
This attribute is named `keyserverMonitorSeedCount` in VSD API.
"""
self._keyserver_monitor_seed_count = value
@property
def enterprise_secured_data_record_count(self):
""" Get enterprise_secured_data_record_count value.
Notes:
Total number of Enterprise Secured Data records
This attribute is named `enterpriseSecuredDataRecordCount` in VSD API.
"""
return self._enterprise_secured_data_record_count
@enterprise_secured_data_record_count.setter
def enterprise_secured_data_record_count(self, value):
""" Set enterprise_secured_data_record_count value.
Notes:
Total number of Enterprise Secured Data records
This attribute is named `enterpriseSecuredDataRecordCount` in VSD API.
"""
self._enterprise_secured_data_record_count = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| 34.125
| 188
| 0.652161
|
bb8296a5a342c98ec1381f301d89397bdf4a07c3
| 14,395
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/batch/_validators.py
|
xaliciayang/azure-cli
|
38c80c875e8a79d08d06a2f42ec82fd54934343e
|
[
"MIT"
] | 7
|
2020-04-26T09:54:05.000Z
|
2021-07-22T16:54:41.000Z
|
src/azure-cli/azure/cli/command_modules/batch/_validators.py
|
xaliciayang/azure-cli
|
38c80c875e8a79d08d06a2f42ec82fd54934343e
|
[
"MIT"
] | 120
|
2018-03-27T19:14:40.000Z
|
2020-12-10T23:53:35.000Z
|
src/azure-cli/azure/cli/command_modules/batch/_validators.py
|
xaliciayang/azure-cli
|
38c80c875e8a79d08d06a2f42ec82fd54934343e
|
[
"MIT"
] | 13
|
2020-06-30T16:23:36.000Z
|
2022-03-29T17:12:05.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import azure.batch.models
from azure.cli.core.util import get_file_json
from six.moves.urllib.parse import urlsplit # pylint: disable=import-error
# TYPES VALIDATORS
def datetime_format(value):
"""Validate the correct format of a datetime string and deserialize."""
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError
try:
datetime_obj = Deserializer.deserialize_iso(value)
except DeserializationError:
message = "Argument {} is not a valid ISO-8601 datetime format"
raise ValueError(message.format(value))
return datetime_obj
def disk_encryption_target_format(value):
"""Space seperated target disks to be encrypted. Values can either be OsDisk or TemporaryDisk"""
if value == 'OsDisk':
return azure.batch.models.DiskEncryptionTarget.os_disk
if value == 'TemporaryDisk':
return azure.batch.models.DiskEncryptionTarget.temporary_disk
message = 'Argument {} is not a valid disk_encryption_target'
raise ValueError(message.format(value))
def disk_encryption_configuration_format(value):
targets = value.split(' ')
parsed_targets = []
for target in targets:
parsed_targets.append(disk_encryption_target_format(target))
return targets
def duration_format(value):
"""Validate the correct format of a timespan string and deserilize."""
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError
try:
duration_obj = Deserializer.deserialize_duration(value)
except DeserializationError:
message = "Argument {} is not in a valid ISO-8601 duration format"
raise ValueError(message.format(value))
return duration_obj
def metadata_item_format(value):
"""Space-separated values in 'key=value' format."""
try:
data_name, data_value = value.split('=')
except ValueError:
message = ("Incorrectly formatted metadata. "
"Argument values should be in the format a=b c=d")
raise ValueError(message)
return {'name': data_name, 'value': data_value}
def environment_setting_format(value):
"""Space-separated values in 'key=value' format."""
try:
env_name, env_value = value.split('=')
except ValueError:
message = ("Incorrectly formatted environment settings. "
"Argument values should be in the format a=b c=d")
raise ValueError(message)
return {'name': env_name, 'value': env_value}
def application_package_reference_format(value):
"""Space-separated application IDs with optional version in 'id[#version]' format."""
app_reference = value.split('#', 1)
package = {'application_id': app_reference[0]}
try:
package['version'] = app_reference[1]
except IndexError: # No specified version - ignore
pass
return package
def certificate_reference_format(value):
"""Space-separated certificate thumbprints."""
cert = {'thumbprint': value, 'thumbprint_algorithm': 'sha1'}
return cert
def task_id_ranges_format(value):
"""Space-separated number ranges in 'start-end' format."""
try:
start, end = [int(i) for i in value.split('-')]
except ValueError:
message = ("Incorrectly formatted task ID range. "
"Argument values should be numbers in the format 'start-end'")
raise ValueError(message)
return {'start': start, 'end': end}
def resource_file_format(value):
"""Space-separated resource references in filename=httpurl format."""
try:
file_name, http_url = value.split('=', 1)
except ValueError:
message = ("Incorrectly formatted resource reference. "
"Argument values should be in the format filename=httpurl")
raise ValueError(message)
return {'file_path': file_name, 'http_url': http_url}
# COMMAND NAMESPACE VALIDATORS
def validate_required_parameter(namespace, parser):
"""Validates required parameters in Batch complex objects"""
if not parser.done:
parser.parse(namespace)
def storage_account_id(cmd, namespace):
"""Validate storage account name"""
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
if (namespace.storage_account and not
('/providers/Microsoft.ClassicStorage/storageAccounts/' in namespace.storage_account or
'/providers/Microsoft.Storage/storageAccounts/' in namespace.storage_account)):
storage_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_STORAGE)
acc = storage_client.storage_accounts.get_properties(namespace.resource_group_name,
namespace.storage_account)
if not acc:
raise ValueError("Storage account named '{}' not found in the resource group '{}'.".
format(namespace.storage_account, namespace.resource_group_name))
namespace.storage_account = acc.id # pylint: disable=no-member
def keyvault_id(cmd, namespace):
"""Validate storage account name"""
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
if not namespace.keyvault:
return
if '/providers/Microsoft.KeyVault/vaults/' in namespace.keyvault:
resource = namespace.keyvault.split('/')
kv_name = resource[resource.index('Microsoft.KeyVault') + 2]
kv_rg = resource[resource.index('resourceGroups') + 1]
else:
kv_name = namespace.keyvault
kv_rg = namespace.resource_group_name
try:
keyvault_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
vault = keyvault_client.vaults.get(kv_rg, kv_name)
if not vault:
raise ValueError("KeyVault named '{}' not found in the resource group '{}'.".
format(kv_name, kv_rg))
namespace.keyvault = vault.id # pylint: disable=no-member
namespace.keyvault_url = vault.properties.vault_uri
except Exception as exp:
raise ValueError('Invalid KeyVault reference: {}\n{}'.format(namespace.keyvault, exp))
def application_enabled(cmd, namespace):
"""Validates account has auto-storage enabled"""
from azure.mgmt.batch import BatchManagementClient
from azure.cli.core.commands.client_factory import get_mgmt_service_client
client = get_mgmt_service_client(cmd.cli_ctx, BatchManagementClient)
acc = client.batch_account.get(namespace.resource_group_name, namespace.account_name)
if not acc:
raise ValueError("Batch account '{}' not found.".format(namespace.account_name))
if not acc.auto_storage or not acc.auto_storage.storage_account_id: # pylint: disable=no-member
raise ValueError("Batch account '{}' needs auto-storage enabled.".
format(namespace.account_name))
def validate_pool_resize_parameters(namespace):
"""Validate pool resize parameters correct"""
if not namespace.abort and not namespace.target_dedicated_nodes:
raise ValueError("The target-dedicated-nodes parameter is required to resize the pool.")
def validate_json_file(namespace):
"""Validate the give json file existing"""
if namespace.json_file:
try:
get_file_json(namespace.json_file)
except EnvironmentError:
raise ValueError("Cannot access JSON request file: " + namespace.json_file)
except ValueError as err:
raise ValueError("Invalid JSON file: {}".format(err))
def validate_cert_file(namespace):
"""Validate the give cert file existing"""
try:
with open(namespace.certificate_file, "rb"):
pass
except EnvironmentError:
raise ValueError("Cannot access certificate file: " + namespace.certificate_file)
def validate_options(namespace):
"""Validate any flattened request header option arguments."""
try:
start = namespace.start_range
end = namespace.end_range
except AttributeError:
return
else:
namespace.ocp_range = None
del namespace.start_range
del namespace.end_range
if start or end:
start = start if start else 0
end = end if end else ""
namespace.ocp_range = "bytes={}-{}".format(start, end)
def validate_file_destination(namespace):
"""Validate the destination path for a file download."""
try:
path = namespace.destination
except AttributeError:
return
else:
# TODO: Need to confirm this logic...
file_path = path
file_dir = os.path.dirname(path)
if os.path.isdir(path):
file_name = os.path.basename(namespace.file_name)
file_path = os.path.join(path, file_name)
elif not os.path.isdir(file_dir):
try:
os.mkdir(file_dir)
except EnvironmentError as exp:
message = "Directory {} does not exist, and cannot be created: {}"
raise ValueError(message.format(file_dir, exp))
if os.path.isfile(file_path):
raise ValueError("File {} already exists.".format(file_path))
namespace.destination = file_path
# CUSTOM REQUEST VALIDATORS
def validate_pool_settings(namespace, parser):
"""Custom parsing to enfore that either PaaS or IaaS instances are configured
in the add pool request body.
"""
if not namespace.json_file:
if namespace.node_agent_sku_id and not namespace.image:
raise ValueError("Missing required argument: --image")
if namespace.image:
try:
namespace.publisher, namespace.offer, namespace.sku = namespace.image.split(':', 2)
try:
namespace.sku, namespace.version = namespace.sku.split(':', 1)
except ValueError:
pass
except ValueError:
if '/' not in namespace.image:
message = ("Incorrect format for VM image. Should be in the format: \n"
"'publisher:offer:sku[:version]' OR a URL to an ARM image.")
raise ValueError(message)
namespace.virtual_machine_image_id = namespace.image
del namespace.image
if namespace.disk_encryption_targets:
namespace.targets = namespace.disk_encryption_targets
del namespace.disk_encryption_targets
groups = ['pool.cloud_service_configuration', 'pool.virtual_machine_configuration']
parser.parse_mutually_exclusive(namespace, True, groups)
paas_sizes = ['small', 'medium', 'large', 'extralarge']
if namespace.vm_size and namespace.vm_size.lower() in paas_sizes and not namespace.os_family:
message = ("The selected VM size is incompatible with Virtual Machine Configuration. "
"Please swap for the equivalent: Standard_A1 (small), Standard_A2 "
"(medium), Standard_A3 (large), or Standard_A4 (extra large).")
raise ValueError(message)
if namespace.auto_scale_formula:
namespace.enable_auto_scale = True
def validate_cert_settings(namespace):
"""Custom parsing for certificate commands - adds default thumbprint
algorithm.
"""
namespace.thumbprint_algorithm = 'sha1'
def validate_client_parameters(cmd, namespace):
"""Retrieves Batch connection parameters from environment variables"""
from azure.mgmt.batch import BatchManagementClient
from azure.cli.core.commands.client_factory import get_mgmt_service_client
# simply try to retrieve the remaining variables from environment variables
if not namespace.account_name:
namespace.account_name = cmd.cli_ctx.config.get('batch', 'account', None)
if not namespace.account_key:
namespace.account_key = cmd.cli_ctx.config.get('batch', 'access_key', None)
if not namespace.account_endpoint:
namespace.account_endpoint = cmd.cli_ctx.config.get('batch', 'endpoint', None)
# Simple validation for account_endpoint
if not (namespace.account_endpoint.startswith('https://') or
namespace.account_endpoint.startswith('http://')):
namespace.account_endpoint = 'https://' + namespace.account_endpoint
namespace.account_endpoint = namespace.account_endpoint.rstrip('/')
# if account name is specified but no key, attempt to query if we use shared key auth
if namespace.account_name and namespace.account_endpoint and not namespace.account_key:
if cmd.cli_ctx.config.get('batch', 'auth_mode', 'shared_key') == 'shared_key':
endpoint = urlsplit(namespace.account_endpoint)
host = endpoint.netloc
client = get_mgmt_service_client(cmd.cli_ctx, BatchManagementClient)
acc = next((x for x in client.batch_account.list()
if x.name == namespace.account_name and x.account_endpoint == host), None)
if acc:
from msrestazure.tools import parse_resource_id
rg = parse_resource_id(acc.id)['resource_group']
namespace.account_key = \
client.batch_account.get_keys(rg, # pylint: disable=no-member
namespace.account_name).primary
else:
raise ValueError("Batch account '{}' not found.".format(namespace.account_name))
else:
if not namespace.account_name:
raise ValueError("Specify batch account in command line or environment variable.")
if not namespace.account_endpoint:
raise ValueError("Specify batch endpoint in command line or environment variable.")
if cmd.cli_ctx.config.get('batch', 'auth_mode', 'shared_key') == 'aad':
namespace.account_key = None
| 42.715134
| 101
| 0.665787
|
17a9c60721a6b7809001c8e1b1f277759754a8e1
| 803
|
py
|
Python
|
config/server_config.py
|
andrearosasco/SynthSegmentation
|
9083531f853757533fa69d9fca931f81c3a3ac2d
|
[
"MIT"
] | 1
|
2022-01-27T09:11:39.000Z
|
2022-01-27T09:11:39.000Z
|
config/server_config.py
|
andrearosasco/SynthSegmentation
|
9083531f853757533fa69d9fca931f81c3a3ac2d
|
[
"MIT"
] | null | null | null |
config/server_config.py
|
andrearosasco/SynthSegmentation
|
9083531f853757533fa69d9fca931f81c3a3ac2d
|
[
"MIT"
] | null | null | null |
class Config:
class General:
device = 'cuda'
class Train:
lr = 0.01
momentum = 0.9
weight_decay = 0.0001
log_every = 10
epoch = 100
class Eval:
wandb = True
class Data:
class Eval:
mb_size = 64
paths = None
class Train:
mb_size = 64
num_worker = 30
@classmethod
def to_dict(cls, target=None):
if target is None:
target = cls
res = {}
for k in dir(target):
if not k.startswith('__') and k != 'to_dict':
attr = getattr(target, k)
if type(attr) == type:
res[k] = cls.to_dict(attr)
else:
res[k] = attr
return res
| 19.119048
| 57
| 0.440847
|
1a6ff2d1c213f7a4dae6065da43b661255c0594e
| 5,239
|
py
|
Python
|
emulator/id.py
|
Adancurusul/UR408_Core
|
077712cb3d2a2dd3d9d1a0eeaae2bc71b632e159
|
[
"MIT"
] | 4
|
2020-07-13T03:12:19.000Z
|
2021-08-03T02:09:28.000Z
|
emulator/id.py
|
Adancurusul/UR408_Core
|
077712cb3d2a2dd3d9d1a0eeaae2bc71b632e159
|
[
"MIT"
] | null | null | null |
emulator/id.py
|
Adancurusul/UR408_Core
|
077712cb3d2a2dd3d9d1a0eeaae2bc71b632e159
|
[
"MIT"
] | null | null | null |
from myhdl import *
@block
def id(ins,alu_signal,mem_read,mem_write,register_write,rd_r0_mux,rd_r1_mux
,ds1_rx,ds2_rx,rd_mux0,rd_mux1,
cr_write,selector,imm,branch_offset,bra,ret,apc,jmp):
'''
ins in 16
alu_signal out 4
mem_read out 1
mem_write out 1
register_write out 8
rd_r0_mux out 1
rd_r1_mux out 1
selector out 3
cr_write out 1
ds1_rx out 3
ds2_rx out 3
imm out 8
branch_offset out 16
jmp out 1
ret out 1
apc out 1
bra out 1
'''
#opcode_r = Signal(intbv(0)[2:])
#opcode_b = Signal(intbv(1)[2:])
#opcode_sys = Signal(intbv(2)[2:])
opcode_ls = Signal(intbv(3)[2:])
funct4_0 = Signal(intbv(0)[4:])
funct4_1 = Signal(intbv(1)[4:])
funct4_2 = Signal(intbv(2)[4:])
funct4_3 = Signal(intbv(3)[4:])
funct4_4 = Signal(intbv(4)[4:])
funct4_5 = Signal(intbv(5)[4:])
#funct4_6 = Signal(intbv(6)[4:])
#funct4_7 = Signal(intbv(7)[4:])
funct4_8 = Signal(intbv(8)[4:])
funct4_9 = Signal(intbv(9)[4:])
#funct4_10 = Signal(intbv(10)[4:])
#funct4_11 = Signal(intbv(11)[4:])
#funct4_12 = Signal(intbv(12)[4:])
#funct4_13 = Signal(intbv(13)[4:])
#funct4_14 = Signal(intbv(14)[4:])
#funct4_15 = Signal(intbv(15)[4:])
#states_alu = enum('add0', 'sub0', 'and0', 'or0', 'xor0', 'sr0', 'sl0', 'sra0', 'slt0', 'sltu0', 'eq0', 'neq0')
states_opcode = enum("r","b","sys","ls")
states_rd = enum("a","b","c","d","e","f","g","h")
ins20 = Signal(intbv(0)[3:])
ins96 = Signal(intbv(0)[3:])
@always_comb
def trans_logic():
ins20.next = ins[2:0]
ins96.next = ins[9:6]
@always_comb
def id_logic():
if ins20==states_opcode.r:
#alu_signal
alu_signal.next = ins[6:2]
#register_write signal 1
register_write.next = ins[9:6]
else:
alu_signal.next = 0
# register_write signal 1
register_write.next = 0
if ins20==states_opcode.b:
bra.next = bool(1)
else:
bra.next = bool(0)
if ins20 == states_opcode.sys:
register_write[0].next = ins[6:2]==funct4_4
register_write[1].next = ins[6:2] == funct4_4
rd_r0_mux.next=ins[6:2] == funct4_4
rd_r1_mux.next=ins[6:2] == funct4_4
cr_write.next =ins[6:2] == funct4_3
#special
jmp.next = (ins[6:2]==funct4_0 or ins[6:2] ==funct4_2)
apc.next = (ins[6:2]==funct4_0 or ins[6:2]==funct4_1)
ret.next = (ins[6:2]==funct4_5)
else:
register_write[0].next = bool(0)
register_write[1].next = bool(0)
rd_r0_mux.next = bool(0)
rd_r1_mux.next = bool(0)
cr_write.next = bool(0)
# special
jmp.next = bool(0)
apc.next = bool(0)
ret.next = bool(0)
if ins20 == states_opcode.ls:
#mem
mem_read.next = (ins[6:2] == funct4_8)
mem_write.next = (ins[6:2] == funct4_9)
#register_write signal 2
#register_write[0].next = ((ins[9:6]==funct4_9)|(ins[6:2]==funct4_0))&(ins[9:6]==0)
if (ins[9:6]==funct4_9)|(ins[6:2]==funct4_0):
if ins96==states_rd.a:
register_write[0].next = 1
elif ins96 == states_rd.b:
register_write[1].next = 1
elif ins96 == states_rd.c:
register_write[2].next = 1
elif ins96 == states_rd.d:
register_write[3].next = 1
elif ins96 == states_rd.e:
register_write[4].next = 1
elif ins96 == states_rd.f:
register_write[5].next = 1
elif ins96 == states_rd.g:
register_write[6].next = 1
elif ins96 == states_rd.h:
register_write[7].next = 1
else :
register_write.next = 0
else:
register_write.next = 0
else :
mem_read.next = bool(0)
mem_write.next = bool(0)
register_write.next = 0
@always_comb
def rd_logic():
rd_mux0.next = (ins[6:2]==funct4_0)
rd_mux1.next = (ins[2:0]==opcode_ls)
#other two
#rd_r0_mux and rd_r1_mux are in id logic
#maybe need to change it
@always_comb
def ds_logic():
ds2_rx.next = ins[12:9]
ds1_rx.next = ins[12:9]
@always_comb
def cr_write_logic():
selector.next = ins[12:9]
@always_comb
def imm_branch_logic():
imm[7].next = 0
imm[7:0].next = ins[16:9]
branch_offset[15].next = ins[15]
branch_offset[14].next = ins[15]
branch_offset[13].next = ins[15]
branch_offset[12].next = ins[15]
branch_offset[11].next = ins[15]
branch_offset[10].next = ins[15]
branch_offset[9].next = ins[15]
branch_offset[8].next = ins[15]
branch_offset[8:4].next = ins[15:12]
branch_offset[4:1].next = ins[9:6]
branch_offset[0].next = 0
return instances()
| 30.817647
| 115
| 0.525673
|
7ae4cab312da5d5d9ec589b3b739a2dbec06136c
| 169
|
py
|
Python
|
relational/student_projects/2019_guth/models/MMI_Genetic/Genetic_MMI_Singlechoice.py
|
monthie/cogmods
|
62af4b8bf2effb77f26a8877d6a89949164d83f0
|
[
"MIT"
] | null | null | null |
relational/student_projects/2019_guth/models/MMI_Genetic/Genetic_MMI_Singlechoice.py
|
monthie/cogmods
|
62af4b8bf2effb77f26a8877d6a89949164d83f0
|
[
"MIT"
] | 11
|
2020-05-04T09:05:29.000Z
|
2021-04-08T13:22:34.000Z
|
relational/student_projects/2019_guth/models/MMI_Genetic/Genetic_MMI_Singlechoice.py
|
monthie/cogmods
|
62af4b8bf2effb77f26a8877d6a89949164d83f0
|
[
"MIT"
] | 12
|
2020-05-02T09:36:14.000Z
|
2021-06-22T08:10:45.000Z
|
import Genetic_MMI
class ModelApproachExp1(Genetic_MMI.ModelApproach):
def __init__(self):
Genetic_MMI.ModelApproach.__init__(self, 1, "MMI_singlechoice")
| 24.142857
| 71
| 0.775148
|
4ec8be9878cffb37324a0f3b1c5c78b7ea316d62
| 900
|
py
|
Python
|
hyperv/neutron/constants.py
|
bclau/python-neutron-plugin-hyperv
|
4e5c9f14483f7fe026f8f54f8a9be27f21d98cef
|
[
"Apache-2.0"
] | null | null | null |
hyperv/neutron/constants.py
|
bclau/python-neutron-plugin-hyperv
|
4e5c9f14483f7fe026f8f54f8a9be27f21d98cef
|
[
"Apache-2.0"
] | null | null | null |
hyperv/neutron/constants.py
|
bclau/python-neutron-plugin-hyperv
|
4e5c9f14483f7fe026f8f54f8a9be27f21d98cef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Topic for tunnel notifications between the plugin and agent
TUNNEL = 'tunnel'
# Special vlan_id value in ovs_vlan_allocations table indicating flat network
FLAT_VLAN_ID = -1
TRUNK_ENDPOINT_MODE = 5
TYPE_FLAT = 'flat'
TYPE_LOCAL = 'local'
TYPE_VLAN = 'vlan'
| 34.615385
| 78
| 0.745556
|
0042cfaa876957de33f59236313a8b5b1fc6a028
| 4,928
|
py
|
Python
|
samples/reg_export.py
|
timgates42/python-registry
|
c028c7fca99aaed835490ada8d43dfea42811d3c
|
[
"Apache-2.0"
] | 326
|
2015-01-10T20:48:33.000Z
|
2022-03-14T07:59:58.000Z
|
samples/reg_export.py
|
timgates42/python-registry
|
c028c7fca99aaed835490ada8d43dfea42811d3c
|
[
"Apache-2.0"
] | 70
|
2015-01-02T19:29:31.000Z
|
2021-06-17T16:32:03.000Z
|
samples/reg_export.py
|
timgates42/python-registry
|
c028c7fca99aaed835490ada8d43dfea42811d3c
|
[
"Apache-2.0"
] | 108
|
2015-01-07T18:20:45.000Z
|
2022-03-05T15:26:06.000Z
|
#!/usr/bin/python
# This file is part of python-registry.
#
# Copyright 2015 Willi Ballenthin <william.ballenthin@mandiant.com>
# while at Mandiant <http://www.mandiant.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import unicode_literals
import sys
from Registry import Registry
stdout = sys.stdout
if hasattr(stdout, 'buffer'):
stdout = stdout.buffer
def usage():
return " USAGE:\n\t%s <Windows Registry file> <Hive prefix> <Registry key path> [<Registry Value>]" % sys.argv[0]
def reg_format_header():
"""
@rtype: byte string
"""
return u"\ufeffWindows Registry Editor Version 5.00\r\n\r\n".encode("utf-16le")
def reg_format_value_sz(value):
"""
@rtype: str
"""
return "\"{value}\"".format(value=value.value())
def reg_format_value_dword(value):
"""
@rtype: str
"""
return "dword:%08x" % (value.value())
def reg_format_value_bin(value):
"""
result should look like the following (after the '='):
"ProductLocalizedName"=hex:40,00,25,00,50,00,72,00,6f,00,67,00,72,00,61,00,\
6d,00,46,00,69,00,6c,00,65,00,73,00,25,00,5c,00,57,00,69,00,6e,00,64,00,6f,\
00,77,00,73,00,20,00,44,00,65,00,66,00,65,00,6e,00,64,00,65,00,72,00,5c,00,\
45,00,70,00,70,00,4d,00,61,00,6e,00,69,00,66,00,65,00,73,00,74,00,2e,00,64,\
00,6c,00,6c,00,2c,00,2d,00,31,00,30,00,30,00,30,00,00,00
so we:
- format into one big line of hex
- search for places to split, at about 80 chars or less
- split, with the former receiving a backslash, and the latter getting the
prefixed whitespace
if the type of value is RegBin, then we use the type prefix "hex:",
otherwise, the type prefix is "hex(%d):" where %d is the value_type constant.
eg. RegExpandSZ is "hex(3)"
@rtype: str
"""
ret = []
s = ",".join(["%02x" % (ord(c)) for c in value.value()])
if value.value_type() == Registry.RegBin:
s = "hex:" + s
else:
s = "hex(%d):" % (value.value_type()) + s
# there might be an off by one error in here somewhere...
name_len = len(value.name()) + 2 + 1 # name + 2 * '"' + '='
split_index = 80 - name_len
while len(s) > 0:
if len(s) > split_index:
# split on a comma
while s[split_index] != ",":
split_index -= 1
ret.append(s[:split_index + 1] + "\\")
s = " " + s[split_index + 1:]
else:
ret.append(s)
s = ""
split_index = 80
return "\r\n".join(ret)
def reg_format_value(value):
return {
Registry.RegSZ: reg_format_value_sz,
Registry.RegExpandSZ: reg_format_value_bin,
Registry.RegBin: reg_format_value_bin,
Registry.RegDWord: reg_format_value_dword,
}[value.value_type()](value)
def reg_format_key_values(registry, prefix, key, values):
"""
@rtype: byte string
"""
ret = []
path = key.path().partition("\\")[2] # remove root key name ("$$$PROTO_HIV")
ret.append(u"[{prefix}\{path}]".format(prefix=prefix, path=path))
for value in values:
ret.append("\"{name}\"={value}".format(name=value.name(),
value=reg_format_value(value)))
ret.append("\r\n")
return u"\r\n".join(ret).encode("utf-16le")
def main(hive, prefix, keyname, *valuenames):
"""
@param prefix: something like "HKEY_LOCAL_MACHINE" to prepend to formatted key names.
"""
registry = Registry.Registry(hive)
key = None
try:
if keyname.startswith(registry.root().name()):
key = registry.open(keyname.partition("\\")[2])
else:
key = registry.open(keyname)
except Registry.RegistryKeyNotFoundException:
print("Error: Specified key not found")
sys.exit(-1)
values = []
if len(valuenames) != 0:
for valuename in valuenames:
if valuename == "default":
valuename = "(default)"
values.append(key.value(valuename))
else:
values = [v for v in key.values()]
stdout.write(reg_format_header())
stdout.write(reg_format_key_values(registry, prefix, key, values))
if __name__ == '__main__':
if len(sys.argv) < 4:
print(usage())
sys.exit(-1)
main(*sys.argv[1:])
| 30.233129
| 118
| 0.609375
|
4547912251eb904f9eee6f8cf2dddfe540c1a61e
| 9,040
|
py
|
Python
|
qBittorrentPostProcess.py
|
rmangaha/sickbeard_mp4_automator
|
669b9041df7a21afc554f125fefb29e5188cf656
|
[
"MIT"
] | null | null | null |
qBittorrentPostProcess.py
|
rmangaha/sickbeard_mp4_automator
|
669b9041df7a21afc554f125fefb29e5188cf656
|
[
"MIT"
] | null | null | null |
qBittorrentPostProcess.py
|
rmangaha/sickbeard_mp4_automator
|
669b9041df7a21afc554f125fefb29e5188cf656
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import re
import sys
import shutil
from autoprocess import autoProcessTV, autoProcessMovie, autoProcessTVSR, sonarr, radarr
from resources.log import getLogger
from resources.readsettings import ReadSettings
from resources.mediaprocessor import MediaProcessor
def getHost(host='localhost', port=8080, ssl=False):
protocol = "https://" if ssl else "http://"
return protocol + host + ":" + str(port) + "/"
log = getLogger("qBittorrentPostProcess")
log.info("qBittorrent post processing started.")
if len(sys.argv) != 7:
log.error("Not enough command line parameters present, are you launching this from qBittorrent?")
log.error("#Args: %L %T %R %F %N %I Category, Tracker, RootPath, ContentPath , TorrentName, InfoHash")
log.error("Length was %s" % str(len(sys.argv)))
log.error(str(sys.argv[1:]))
sys.exit(1)
try:
settings = ReadSettings()
label = sys.argv[1].lower().strip()
root_path = str(sys.argv[3])
content_path = str(sys.argv[4])
name = sys.argv[5]
torrent_hash = sys.argv[6]
categories = [settings.qBittorrent['cp'], settings.qBittorrent['sb'], settings.qBittorrent['sonarr'], settings.qBittorrent['radarr'], settings.qBittorrent['sr'], settings.qBittorrent['bypass']]
path_mapping = settings.qBittorrent['path-mapping']
log.debug("Root Path: %s." % root_path)
log.debug("Label: %s." % label)
log.debug("Categories: %s." % categories)
log.debug("Torrent hash: %s." % torrent_hash)
log.debug("Torrent name: %s." % name)
single_file = os.path.isfile(content_path)
if not label or len([x for x in categories if x.startswith(label)]) < 1:
log.error("No valid label detected.")
sys.exit(1)
if len(categories) != len(set(categories)):
log.error("Duplicate category detected. Category names must be unique.")
sys.exit(1)
# Import python-qbittorrent
try:
from qbittorrent import Client
except ImportError:
log.exception("Python module PYTHON-QBITTORRENT is required. Install with 'pip install python-qbittorrent' then try again.")
sys.exit(1)
delete_dir = False
host = getHost(settings.qBittorrent['host'], settings.qBittorrent['port'], settings.qBittorrent['ssl'])
qb = Client(host)
qb.login(settings.qBittorrent['username'], settings.qBittorrent['password'])
if settings.qBittorrent['actionbefore']:
if settings.qBittorrent['actionbefore'] == 'pause': # currently only support pausing
log.debug("Sending action %s to qBittorrent" % settings.qBittorrent['actionbefore'])
qb.pause(torrent_hash)
if settings.qBittorrent['convert']:
# Check for custom qBittorrent output_dir
if settings.qBittorrent['output_dir']:
settings.output_dir = settings.qBittorrent['output_dir']
log.debug("Overriding output_dir to %s." % settings.qBittorrent['output_dir'])
# Perform conversion.
log.info("Performing conversion")
settings.delete = False
if not settings.output_dir:
# If the user hasn't set an output directory, go up one from the root path and create a directory there as [name]-convert
suffix = "convert"
settings.output_dir = os.path.abspath(os.path.join(root_path, '..', ("%s-%s" % (re.sub(settings.regex, '_', name), suffix))))
else:
settings.output_dir = os.path.join(settings.output_dir, re.sub(settings.regex, '_', name))
if not os.path.exists(settings.output_dir):
try:
os.makedirs(settings.output_dir)
delete_dir = settings.output_dir
except:
log.exception("Unable to make output directory %s." % settings.output_dir)
mp = MediaProcessor(settings)
if single_file:
# single file
inputfile = content_path
info = mp.isValidSource(inputfile)
if info:
log.info("Processing file %s." % inputfile)
try:
output = mp.process(inputfile, reportProgress=True, info=info)
if not output:
log.error("No output file generated for single torrent download.")
sys.exit(1)
except:
log.exception("Error converting file %s." % inputfile)
else:
log.debug("Processing multiple files.")
ignore = []
for r, d, f in os.walk(root_path):
for files in f:
inputfile = os.path.join(r, files)
info = mp.isValidSource(inputfile)
if info and inputfile not in ignore:
log.info("Processing file %s." % inputfile)
try:
output = mp.process(inputfile, info=info)
if output and output.get('output'):
ignore.append(output.get('output'))
else:
log.error("Converting file failed %s." % inputfile)
except:
log.exception("Error converting file %s." % inputfile)
else:
log.debug("Ignoring file %s." % inputfile)
if len(ignore) < 1:
log.error("No output files generated for the entirety of this mutli file torrent, aborting.")
sys.exit(1)
path = settings.output_dir
else:
suffix = "copy"
# name = name[:260-len(suffix)]
if single_file:
log.info("Single File Torrent")
root, filename = os.path.split(root_path)
filename, extension = os.path.splitext(filename)
newpath = os.path.join(root, ("%s-%s" % (re.sub(settings.regex, '_', filename), suffix)))
else:
log.info("Multi File Torrent")
newpath = os.path.abspath(os.path.join(root_path, '..', ("%s-%s" % (re.sub(settings.regex, '_', name), suffix))))
if not os.path.exists(newpath):
os.makedirs(newpath)
log.debug("Creating temporary directory %s" % newpath)
if single_file:
inputfile = content_path
shutil.copy(inputfile, newpath)
log.debug("Copying %s to %s" % (inputfile, newpath))
else:
for r, d, f in os.walk(root_path):
for files in f:
inputfile = os.path.join(r, files)
shutil.copy(inputfile, newpath)
log.debug("Copying %s to %s" % (inputfile, newpath))
path = newpath
delete_dir = newpath
if categories[0].startswith(label):
log.info("Passing %s directory to Couch Potato." % path)
autoProcessMovie.process(path, settings, pathMapping=path_mapping)
elif categories[1].startswith(label):
log.info("Passing %s directory to Sickbeard." % path)
autoProcessTV.processEpisode(path, settings, pathMapping=path_mapping)
elif categories[2].startswith(label):
log.info("Passing %s directory to Sonarr." % path)
sonarr.processEpisode(path, settings, pathMapping=path_mapping)
elif categories[3].startswith(label):
log.info("Passing %s directory to Radarr." % path)
radarr.processMovie(path, settings, pathMapping=path_mapping)
elif categories[4].startswith(label):
log.info("Passing %s directory to Sickrage." % path)
autoProcessTVSR.processEpisode(path, settings, pathMapping=path_mapping)
elif categories[5].startswith(label):
log.info("Bypassing any further processing as per category.")
# Run a qbittorrent action after conversion.
if settings.qBittorrent['actionafter']:
# currently only support resuming or deleting torrent
if settings.qBittorrent['actionafter'] == 'resume':
log.debug("Sending action %s to qBittorrent" % settings.qBittorrent['actionafter'])
qb.resume(torrent_hash)
elif settings.qBittorrent['actionafter'] == 'delete':
# this will delete the torrent from qBittorrent but it WILL NOT delete the data
log.debug("Sending action %s to qBittorrent" % settings.qBittorrent['actionafter'])
qb.delete(torrent_hash)
elif settings.qBittorrent['actionafter'] == 'deletedata':
# this will delete the torrent from qBittorrent and delete data
log.debug("Sending action %s to qBittorrent" % settings.qBittorrent['actionafter'])
qb.delete_permanently(torrent_hash)
if delete_dir:
if os.path.exists(delete_dir):
try:
os.rmdir(delete_dir)
log.debug("Successfully removed tempoary directory %s." % delete_dir)
except:
log.exception("Unable to delete temporary directory")
except:
log.exception("Unexpected exception.")
sys.exit(1)
| 43.671498
| 197
| 0.609181
|
d1ce33a807058d1d7092bbec31d8c3d7ac7897ac
| 2,308
|
py
|
Python
|
intensity_normalization/util/io.py
|
radmodel/intensity-normalization
|
b7b39fc6ac74450bf5e19d32b4657dcfe96a9559
|
[
"Apache-2.0"
] | 223
|
2018-08-03T03:41:45.000Z
|
2022-03-24T13:55:02.000Z
|
intensity_normalization/util/io.py
|
radmodel/intensity-normalization
|
b7b39fc6ac74450bf5e19d32b4657dcfe96a9559
|
[
"Apache-2.0"
] | 52
|
2018-09-10T15:38:57.000Z
|
2022-03-17T19:04:37.000Z
|
intensity_normalization/util/io.py
|
radmodel/intensity-normalization
|
b7b39fc6ac74450bf5e19d32b4657dcfe96a9559
|
[
"Apache-2.0"
] | 50
|
2018-12-04T06:51:55.000Z
|
2022-03-26T16:06:11.000Z
|
# -*- coding: utf-8 -*-
"""
intensity_normalization.util.io
assortment of input/output utilities for the project
Author: Jacob Reinhold (jacob.reinhold@jhu.edu)
Created on: Jun 1, 2021
"""
__all__ = [
"gather_images",
"gather_images_and_masks",
"glob_ext",
"split_filename",
]
from pathlib import Path
from typing import List, Optional, Tuple, Union
import nibabel as nib
from intensity_normalization.type import Array, NiftiImage, PathLike
def gather_images(
dirpath: PathLike,
ext: str = "nii*",
return_data: bool = False,
) -> Union[List[NiftiImage], List[Array]]:
"""return all images of extension `ext` from a directory"""
if isinstance(dirpath, str):
dirpath = Path(dirpath)
assert dirpath.is_dir()
image_filenames = glob_ext(dirpath, ext)
images = []
for fn in image_filenames:
image = nib.load(fn)
if return_data:
image = image.get_fdata()
images.append(image)
return images
def gather_images_and_masks(
image_dir: PathLike,
mask_dir: Optional[PathLike] = None,
ext: str = "nii*",
return_data: bool = False,
) -> Union[
Tuple[List[NiftiImage], List[Optional[NiftiImage]]],
Tuple[List[Array], List[Optional[Array]]],
]:
images = gather_images(image_dir, ext, return_data)
if mask_dir is not None:
masks = gather_images(mask_dir, ext, return_data)
else:
masks = [None] * len(images)
return images, masks
def glob_ext(dirpath: PathLike, ext: str = "nii*") -> List[Path]:
"""return a sorted list of ext files for a given directory path"""
if isinstance(dirpath, str):
dirpath = Path(dirpath)
assert dirpath.is_dir()
filenames = sorted(dirpath.resolve().glob(f"*.{ext}"))
return filenames
def split_filename(
filepath: Union[str, Path],
resolve: bool = False,
) -> Tuple[Path, str, str]:
"""split a filepath into the directory, base, and extension"""
filepath = Path(filepath)
if resolve:
filepath = filepath.resolve()
path = filepath.parent
_base = Path(filepath.stem)
ext = filepath.suffix
if ext == ".gz":
ext2 = _base.suffix
base = str(_base.stem)
ext = ext2 + ext
else:
base = str(_base)
return Path(path), base, ext
| 25.644444
| 70
| 0.647314
|
6e566974c1fe7a31273d13aaa0bf136617cb022a
| 2,166
|
py
|
Python
|
tests/test_images.py
|
tongjianjun/reportlab
|
c5544d7d05dc9f74c388f534d1a271c85abb751b
|
[
"BSD-3-Clause"
] | 4
|
2019-08-13T09:54:55.000Z
|
2021-11-10T02:15:49.000Z
|
tests/test_images.py
|
cnauroth/reportlab
|
377d4ff58491dc6de48551e730c3d7f72db783e5
|
[
"BSD-3-Clause"
] | 1
|
2019-09-17T11:16:51.000Z
|
2019-09-19T11:13:53.000Z
|
tests/test_images.py
|
cnauroth/reportlab
|
377d4ff58491dc6de48551e730c3d7f72db783e5
|
[
"BSD-3-Clause"
] | null | null | null |
#!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
__version__='3.3.0'
__doc__="""Tests to do with image handling.
Most of them make use of test\\pythonpowereed.gif."""
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, printLocation
setOutDir(__name__)
import os
try:
from hashlib import md5
except ImportError:
from md5 import md5
import unittest
from reportlab.lib.utils import ImageReader
"""To avoid depending on external stuff, I made a small 5x5 image and
attach its 'file contents' here in several formats.
The image looks like this, with K=black, R=red, G=green, B=blue, W=white.
K R G B W
K R G B W
K R G B W
K R G B W
K R G B W
"""
sampleRAW = '\x00\x00\x00\xff\x00\x00\x00\xff\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\xff\x00\x00\x00\xff\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\xff\x00\x00\x00\xff\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\xff\x00\x00\x00\xff\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\xff\x00\x00\x00\xff\x00\x00\x00\xff\xff\xff\xff'
samplePNG = '\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x05\x00\x00\x00\x05\x08\x02\x00\x00\x00\x02\r\xb1\xb2\x00\x00\x00:IDATx\x9cb```\xf8\x0f\xc3\xff\xff\xff\x07\x00\x00\x00\xff\xffbb@\x05\x00\x00\x00\x00\xff\xffB\xe7\x03\x00\x00\x00\xff\xffB\xe7\x03\x00\x00\x00\xff\xffB\xe7\x03\x00\x00\x00\xff\xff\x03\x00\x9e\x01\x06\x03\x03\xc4A\xb4\x00\x00\x00\x00IEND\xaeB`\x82'
class ReaderTestCase(unittest.TestCase):
"Simplest tests to import images, work under Jython or PIL"
def test(self):
from reportlab.lib.testutils import testsFolder
from reportlab.lib.utils import rl_isfile
imageFileName = os.path.join(testsFolder,'pythonpowered.gif')
assert rl_isfile(imageFileName), "%s not found!" % imageFileName
ir = ImageReader(imageFileName)
assert ir.getSize() == (110,44)
pixels = ir.getRGBData()
assert md5(pixels).hexdigest() == '02e000bf3ffcefe9fc9660c95d7e27cf'
def makeSuite():
return makeSuiteForClasses(ReaderTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| 41.653846
| 374
| 0.724377
|
fcfb7f6f54703d320b409bfe9ee5ca463a80e61f
| 7,780
|
py
|
Python
|
ansibeautifier.py
|
BoughtData92730/ansibeautifier
|
a428fe8c924b345d9ec92c1f754922a70a6d1fd9
|
[
"MIT"
] | 1
|
2021-05-16T16:50:54.000Z
|
2021-05-16T16:50:54.000Z
|
ansibeautifier/__init__.py
|
BoughtData92730/ansibeautifier
|
a428fe8c924b345d9ec92c1f754922a70a6d1fd9
|
[
"MIT"
] | 2
|
2021-02-18T05:25:55.000Z
|
2021-02-20T06:52:58.000Z
|
ansibeautifier/__init__.py
|
BoughtData92730/ansibeautifier
|
a428fe8c924b345d9ec92c1f754922a70a6d1fd9
|
[
"MIT"
] | null | null | null |
class Beautifier:
@staticmethod
def reset():
return "\u001b[0m"
@staticmethod
def red(text="", bright=False):
if not bright:
return u"\u001b[31m" + text + "\u001b[0m"
else:
return u"\u001b[31;1m" + text + "\u001b[0m"
@staticmethod
def green(text="", bright=False):
if not bright:
return u"\u001b[32m" + text + "\u001b[0m"
else:
return u"\u001b[32;1m" + text + "\u001b[0m"
@staticmethod
def black(text="", bright=False):
if not bright:
return u"\u001b[30m" + text + "\u001b[0m"
else:
return u"\u001b[30;1m" + text + "\u001b[0m"
@staticmethod
def yellow(text="", bright=False):
if not bright:
return u"\u001b[33m" + text + "\u001b[0m"
else:
return u"\u001b[33;1m" + text + "\u001b[0m"
@staticmethod
def blue(text="", bright=False):
if not bright:
return u"\u001b[34m" + text + "\u001b[0m"
else:
return u"\u001b[34;1m" + text + "\u001b[0m"
@staticmethod
def magenta(text="", bright=False):
if not bright:
return u"\u001b[35m" + text + "\u001b[0m"
else:
return u"\u001b[35;1m" + text + "\u001b[0m"
@staticmethod
def cyan(text="", bright=False):
if not bright:
return u"\u001b[36m" + text + "\u001b[0m"
else:
return u"\u001b[36;1m" + text + "\u001b[0m"
@staticmethod
def white(text="", bright=False):
if not bright:
return u"\u001b[37m" + text + "\u001b[0m"
else:
return u"\u001b[37;1m" + text + "\u001b[0m"
@staticmethod
def always_red(text="", bright=False):
if not bright:
return u"\u001b[31m" + text
else:
return u"\u001b[31;1m" + text
@staticmethod
def always_green(text="", bright=False):
if not bright:
return u"\u001b[32m" + text
else:
return u"\u001b[32;1m" + text
@staticmethod
def always_black(text="", bright=False):
if not bright:
return u"\u001b[30m" + text
else:
return u"\u001b[30;1m" + text
@staticmethod
def always_yellow(text="", bright=False):
if not bright:
return u"\u001b[33m" + text
else:
return u"\u001b[33;1m" + text
@staticmethod
def always_blue(text="", bright=False):
if not bright:
return u"\u001b[34m" + text
else:
return u"\u001b[34;1m" + text
@staticmethod
def always_magenta(text="", bright=False):
if not bright:
return u"\u001b[35m" + text
else:
return u"\u001b[35;1m" + text
@staticmethod
def always_cyan(text="", bright=False):
if not bright:
return u"\u001b[36m" + text
else:
return u"\u001b[36;1m" + text
@staticmethod
def always_white(text="", bright=False):
if not bright:
return u"\u001b[37m" + text
else:
return u"\u001b[37;1m" + text
@staticmethod
def background_black(text="", bright=False):
if not bright:
return u"\u001b[40m" + text + "\u001b[0m"
else:
return u"\u001b[40;1m" + text + "\u001b[0m"
@staticmethod
def background_red(text="", bright=False):
if not bright:
return u"\u001b[41m" + text + "\u001b[0m"
else:
return u"\u001b[41;1m" + text + "\u001b[0m"
@staticmethod
def background_green(text="", bright=False):
if not bright:
return u"\u001b[42m" + text + "\u001b[0m"
else:
return u"\u001b[42;1m" + text + "\u001b[0m"
@staticmethod
def background_yellow(text="", bright=False):
if not bright:
return u"\u001b[43m" + text + "\u001b[0m"
else:
return u"\u001b[43;1m" + text + "\u001b[0m"
@staticmethod
def background_blue(text="", bright=False):
if not bright:
return u"\u001b[44m" + text + "\u001b[0m"
else:
return u"\u001b[44;1m" + text + "\u001b[0m"
@staticmethod
def background_magenta(text="", bright=False):
if not bright:
return u"\u001b[45m" + text + "\u001b[0m"
else:
return u"\u001b[45;1m" + text + "\u001b[0m"
@staticmethod
def background_cyan(text="", bright=False):
if not bright:
return u"\u001b[46m" + text + "\u001b[0m"
else:
return u"\u001b[46;1m" + text + "\u001b[0m"
@staticmethod
def background_white(text="", bright=False):
if not bright:
return u"\u001b[47m" + text + "\u001b[0m"
else:
return u"\u001b[47;1m" + text + "\u001b[0m"
@staticmethod
def always_background_black(text="", bright=False):
if not bright:
return u"\u001b[40m" + text
else:
return u"\u001b[40;1m" + text
@staticmethod
def always_background_red(text="", bright=False):
if not bright:
return u"\u001b[41m" + text
else:
return u"\u001b[41;1m" + text
@staticmethod
def always_background_green(text="", bright=False):
if not bright:
return u"\u001b[42m" + text
else:
return u"\u001b[42;1m" + text
@staticmethod
def always_background_yellow(text="", bright=False):
if not bright:
return u"\u001b[43m" + text
else:
return u"\u001b[43;1m" + text
@staticmethod
def always_background_blue(text="", bright=False):
if not bright:
return u"\u001b[44m" + text
else:
return u"\u001b[44;1m" + text
@staticmethod
def always_background_magenta(text="", bright=False):
if not bright:
return u"\u001b[45m" + text
else:
return u"\u001b[45;1m" + text
@staticmethod
def always_background_cyan(text="", bright=False):
if not bright:
return u"\u001b[46m" + text
else:
return u"\u001b[46;1m" + text
@staticmethod
def always_background_white(text="", bright=False):
if not bright:
return u"\u001b[47m" + text
else:
return u"\u001b[47;1m" + text
@staticmethod
def bold(text="", always=False):
if not always:
return u"\u001b[1m" + text + "\u001b[0m"
else:
return u"\u001b[1m" + text
@staticmethod
def underline(text="", always=False):
if not always:
return u"\u001b[4m" + text + "\u001b[0m"
else:
return u"\u001b[4m" + text
@staticmethod
def reverse(text="", always=False):
if not always:
return u"\u001b[7m" + text + "\u001b[0m"
else:
return u"\u001b[7m" + text
@staticmethod
def conceal(text="", always=False):
if not always:
return u"\u001b[8m" + text + "\u001b[0m"
else:
return u"\u001b[8m" + text
@staticmethod
def reset_intensity():
return u"\u001b[22m"
@staticmethod
def reset_foreground_color():
return u"\u001b[39m"
@staticmethod
def reset_background_color():
return u"\u001b[49m"
| 25.592105
| 58
| 0.505398
|
cf1aeaf3fc5ef9165b0ecda50bee189f923e352c
| 2,920
|
py
|
Python
|
src/ghutil/cli/pr/read.py
|
jwodder/ghutil
|
f0b903a76177cd49f5d48ac9890609f4da9071d9
|
[
"MIT"
] | 6
|
2017-05-29T19:29:44.000Z
|
2020-04-05T00:01:02.000Z
|
src/ghutil/cli/pr/read.py
|
jwodder/ghutil
|
f0b903a76177cd49f5d48ac9890609f4da9071d9
|
[
"MIT"
] | 2
|
2017-06-29T09:39:32.000Z
|
2020-01-07T22:48:02.000Z
|
src/ghutil/cli/pr/read.py
|
jwodder/ghutil
|
f0b903a76177cd49f5d48ac9890609f4da9071d9
|
[
"MIT"
] | 1
|
2017-06-11T16:43:47.000Z
|
2017-06-11T16:43:47.000Z
|
from datetime import datetime
from textwrap import indent
import click
from dateutil.tz import tzlocal, tzutc
from ghutil.types import Issue
EMOJI = {
'+1': '\U0001F44D',
'-1': '\U0001F44E',
'laugh': '\U0001F604',
'confused': '\U0001F615',
'heart': '\u2764',
'hooray': '\U0001F389',
}
@click.command()
@click.option('--since', metavar='TIMESTAMP',
help='Only show comments newer than the given timestamp')
@Issue.argument('issue')
def cli(issue, since):
""" View comments on an issue/PR """
output = show_comment(issue.data)
for comment in issue.comments.get(params={"since": since}):
output += '\n' + show_comment(comment)
# echo_via_pager adds a newline, so remove the "extra" newline at the end
click.echo_via_pager(output.rstrip('\r\n'))
def reformat_date(ts):
return datetime.strptime(ts, '%Y-%m-%dT%H:%M:%SZ')\
.replace(tzinfo=tzutc())\
.astimezone(tzlocal())\
.strftime('%Y-%m-%d %H:%M:%S %z')
def show_comment(obj):
# Based on the output format of "git log"
headers = []
if "title" in obj:
# Must be the actual issue object
headers.append((
'PR:' if obj.get("pull_request") else 'Issue:',
obj["title"],
))
headers.append((
'State:',
obj["state"] + (' [LOCKED]' if obj["locked"] else '')
))
else:
# Must be just a comment
headers.append(('comment', obj["id"]))
headers.append(('Author:', obj["user"]["login"]))
date = reformat_date(obj["created_at"])
if obj.get("updated_at") is not None and \
obj["updated_at"] != obj["created_at"]:
date += f' (last updated {reformat_date(obj["updated_at"])})'
headers.append(('Date:', date))
if "title" in obj:
headers.append(('Labels:',', '.join(lb["name"] for lb in obj["labels"])))
headers.append((
'Assignees:',
', '.join(u["login"] for u in obj["assignees"])
))
if obj["milestone"] is not None:
headers.append(('Milestone:', obj["milestone"]["title"]))
if obj["closed_at"] is not None:
headers.append((
'Closed:',
'{} by {}'.format(
reformat_date(obj["closed_at"]),
obj["closed_by"]["login"],
)
))
reactions = []
for k,v in sorted(obj.get("reactions", {}).items()):
if k not in ('total_count', 'url') and v:
symbol = EMOJI.get(k, ':' + k + ':')
reactions.append(f'{symbol} {v}')
if reactions:
headers.append(('Reactions:', ' '.join(reactions)))
width = max(len(k) for k,v in headers)
return ''.join(
f'{k:{width}} {v}\n' for k,v in headers
) + '\n' + indent(obj["body"], ' ' * 4).rstrip('\r\n') + '\n'
| 35.180723
| 81
| 0.530822
|
65825578874fb488c61a7f377e57b59564f9d15e
| 18,003
|
py
|
Python
|
evcouplings/utils/app.py
|
berkalpay/EVcouplings
|
9ab590ab2e59fb893b3b7a3993c4e1d13ec87d1e
|
[
"MIT"
] | null | null | null |
evcouplings/utils/app.py
|
berkalpay/EVcouplings
|
9ab590ab2e59fb893b3b7a3993c4e1d13ec87d1e
|
[
"MIT"
] | null | null | null |
evcouplings/utils/app.py
|
berkalpay/EVcouplings
|
9ab590ab2e59fb893b3b7a3993c4e1d13ec87d1e
|
[
"MIT"
] | null | null | null |
"""
evcouplings command-line app
Authors:
Thomas A. Hopf
.. todo::
Once there are different pipelines to run, there should
be individual commands for these, so will need to define additional
entry points for applications (e.g. evcomplex in addition to evcouplings).
"""
import re
from copy import deepcopy
from os import path, environ
from collections import Mapping
import click
from evcouplings import utils
from evcouplings.utils import pipeline
from evcouplings.utils.tracker import (
get_result_tracker, EStatus
)
from evcouplings.utils.system import (
create_prefix_folders, ResourceError, valid_file
)
from evcouplings.utils.config import (
check_required, InvalidParameterError,
read_config_file, write_config_file
)
# store individual config files in files with this name
CONFIG_NAME = "{}_config.txt"
def substitute_config(**kwargs):
"""
Substitute command line arguments into config file
Parameters
----------
**kwargs
Command line parameters to be substituted
into configuration file
Returns
-------
dict
Updated configuration
"""
# mapping of command line parameters to config file entries
CONFIG_MAP = {
"prefix": ("global", "prefix"),
"protein": ("global", "sequence_id"),
"seqfile": ("global", "sequence_file"),
"alignment": ("align", "input_alignment"),
"iterations": ("align", "iterations"),
"id": ("align", "seqid_filter"),
"seqcov": ("align", "minimum_sequence_coverage"),
"colcov": ("align", "minimum_column_coverage"),
"theta": ("global", "theta"),
"plmiter": ("couplings", "iterations"),
"queue": ("environment", "queue"),
"time": ("environment", "time"),
"cores": ("environment", "cores"),
"memory": ("environment", "memory"),
}
# try to read in configuration
config_file = kwargs["config"]
if not valid_file(config_file):
raise ResourceError(
"Config file does not exist or is empty: {}".format(
config_file
)
)
config = read_config_file(config_file, preserve_order=True)
# substitute command-line parameters into configuration
# (if straightforward substitution)
for param, value in kwargs.items():
if param in CONFIG_MAP and value is not None:
outer, inner = CONFIG_MAP[param]
config[outer][inner] = value
# make sure that number of CPUs requested by
# programs within pipeline does not exceed
# number of cores requested in environment
if config["environment"]["cores"] is not None:
config["global"]["cpu"] = config["environment"]["cores"]
# handle the more complicated parameters
# If alignment is given, run "existing" protocol
if kwargs.get("alignment", None) is not None:
# TODO: think about what to do if sequence_file is given
# (will not be used)
config["align"]["protocol"] = "existing"
# subregion of protein
if kwargs.get("region", None) is not None:
region = kwargs["region"]
m = re.search("(\d+)-(\d+)", region)
if m:
start, end = map(int, m.groups())
config["global"]["region"] = [start, end]
else:
raise InvalidParameterError(
"Region string does not have format "
"start-end (e.g. 5-123):".format(
region
)
)
# pipeline stages to run
if kwargs.get("stages", None) is not None:
config["stages"] = kwargs["stages"].replace(
" ", ""
).split(",")
# sequence alignment input database
if kwargs.get("database", None) is not None:
db = kwargs["database"]
# check if we have a predefined sequence database
# if so, use it; otherwise, interpret as file path
if db in config["databases"]:
config["align"]["database"] = db
else:
config["align"]["database"] = "custom"
config["databases"]["custom"] = db
# make sure bitscore and E-value thresholds are exclusively set
if kwargs.get("bitscores", None) is not None and kwargs.get("evalues", None) is not None:
raise InvalidParameterError(
"Can not specify bitscore and E-value threshold at the same time."
)
if kwargs.get("bitscores", None) is not None:
thresholds = kwargs["bitscores"]
bitscore = True
elif kwargs.get("evalues", None) is not None:
thresholds = kwargs["evalues"]
bitscore = False
else:
thresholds = None
if thresholds is not None:
T = thresholds.replace(" ", "").split(",")
try:
x_cast = [
(float(t) if "." in t else int(t)) for t in T
]
except ValueError:
raise InvalidParameterError(
"Bitscore/E-value threshold(s) must be numeric: "
"{}".format(thresholds)
)
config["align"]["use_bitscores"] = bitscore
# check if we have a single threshold (single job)
# or if we need to create an array of jobs
if len(x_cast) == 1:
config["align"]["domain_threshold"] = x_cast[0]
config["align"]["sequence_threshold"] = x_cast[0]
else:
config["batch"] = {}
for t in x_cast:
sub_prefix = ("_b" if bitscore else "_e") + str(t)
config["batch"][sub_prefix] = {
"align": {
"domain_threshold": t,
"sequence_threshold": t,
}
}
return config
def unroll_config(config):
"""
Create individual job configs from master config file
(e.g. containing batch section)
Parameters
----------
config : dict
Global run dictionary that will be split
up into individual pipeline jobs
Returns
-------
configs : dict
Dictionary of prefix to individual configurations
created by substitution from input configuration.
If no batch section is present, there will be only
one entry in the dictionary that corresponds to
the master run specified by the input configuration.
"""
# get global prefix of run
prefix = config["global"]["prefix"]
# store unrolled configurations here
configs = {}
# check if we have a single job or need to unroll
# into multiple jobs
if config.get("batch", None) is None:
configs[prefix] = config
else:
# go through all specified runs
for sub_id, delta_config in config["batch"].items():
# create copy of config and update for current subjob
sub_config = deepcopy(config)
# create prefix of subjob (may contain / to route
# subjob output to subfolder)
sub_prefix = prefix + sub_id
# these are not batch jobs anymore, so deactivate section
sub_config["batch"] = None
# create full prefix for subjob
sub_config["global"]["prefix"] = sub_prefix
# apply subconfig delta
# (assuming parameters are nested in two layers)
for section in delta_config:
# if dictionary, substitute all items on second level
if isinstance(delta_config[section], Mapping):
for param, value in delta_config[section].items():
sub_config[section][param] = value
else:
# substitute entire section (this only affects pipeline stages)
sub_config[section] = delta_config[section]
configs[sub_prefix] = sub_config
return configs
def run_jobs(configs, global_config, overwrite=False, workdir=None, abort_on_error=True, environment=None):
"""
Submit config to pipeline
Parameters
----------
configs : dict
Configurations for individual subjobs
global_config : dict
Master configuration (if only one job,
the contents of this dictionary will be
equal to the single element of config_files)
overwrite : bool, optional (default: False)
If True, allows overwriting previous run of the same
config, otherwise will fail if results from previous
execution are present
workdir : str, optional (default: None)
Workdir in which to run job (will combine
workdir and prefix in joint path)
abort_on_error : bool, optional (default: True)
Abort entire job submission if error occurs for
one of the jobs by propagating RuntimeError
environment : str, optional (default: None)
Allow to pass value for environment parameter
of submitter, will override environment.configuration
from global_config (e.g., for setting environment
variables like passwords)
Returns
-------
job_ids : dict
Mapping from subjob prefix (keys in configs parameter)
to identifier returned by submitter for each of the jobs
that was *successfully* submitted (i.e. missing keys from
configs param indicate these jobs could not be submitted).
Raises
------
RuntimeError
If error encountered during submission and abort_on_error
is True
"""
cmd_base = environ.get("EVCOUPLINGS_RUNCFG_APP") or "evcouplings_runcfg"
summ_base = environ.get("EVCOUPLINGS_SUMMARIZE_APP") or "evcouplings_summarize"
# determine output directory for config files
prefix = global_config["global"]["prefix"]
# integrate working directory into output prefix
# if it is given; if prefix contains an absolute path,
# this will override the workdir according to
# implementation of path.join()
if workdir is not None:
out_prefix = path.join(workdir, prefix)
else:
out_prefix = prefix
# save configuration file, make sure we do not overwrite previous run
# if overwrite protection is activated
# (but only if it is a valid configuration file with contents)
cfg_filename = CONFIG_NAME.format(out_prefix)
if not overwrite and valid_file(cfg_filename):
raise InvalidParameterError(
"Existing configuration file {} ".format(cfg_filename) +
"indicates current prefix {} ".format(prefix) +
"would overwrite existing results. Use --yolo " +
"flag to deactivate overwrite protection (e.g. for "
"restarting a job or running a different stage)."
)
# make sure working directory exists
create_prefix_folders(cfg_filename)
# write global config file
write_config_file(cfg_filename, global_config)
# also write individual subjob configuration files
# (we have to write these before submitting, since
# the job summarizer needs the paths to all files)
for subjob_prefix, subjob_cfg in configs.items():
# determine working dir for each subjob, since subjob
# prefix may contain slashes leading to subfolder creation
if workdir is not None:
subjob_out_prefix = path.join(workdir, subjob_prefix)
else:
subjob_out_prefix = subjob_prefix
subcfg_filename = CONFIG_NAME.format(subjob_out_prefix)
# make sure output subfolder exists
create_prefix_folders(subcfg_filename)
# write subjob configuration file
write_config_file(subcfg_filename, subjob_cfg)
# now create list of subjob config files relative to working
# directory (above, we allow to run submitted in arbitrary directory)
config_files = [
CONFIG_NAME.format(subjob_prefix) for subjob_prefix in configs
]
# create command for summarizer (needs to know all subjob config files)
summ_cmd = "{} {} {} {}".format(
summ_base,
global_config["pipeline"],
global_config["global"]["prefix"],
" ".join(config_files)
)
# create submitter from global (pre-unrolling) configuration
submitter = utils.SubmitterFactory(
global_config["environment"]["engine"],
db_path=out_prefix + "_job_database.txt"
)
# collect individual submitted jobs here
commands = []
# record subjob IDs returned by submitter for each job
job_ids = {}
# prepare individual jobs for submission
for job, job_cfg in configs.items():
job_prefix = job_cfg["global"]["prefix"]
job_cfg_file = CONFIG_NAME.format(job)
# create submission command
env = job_cfg["environment"]
cmd = utils.Command(
[
"{} {}".format(cmd_base, job_cfg_file),
summ_cmd
],
name=job_prefix,
environment=environment or env["configuration"],
workdir=workdir,
resources={
utils.EResource.queue: env["queue"],
utils.EResource.time: env["time"],
utils.EResource.mem: env["memory"],
utils.EResource.nodes: env["cores"],
utils.EResource.out: job_prefix + "_stdout.log",
utils.EResource.error: job_prefix + "_stderr.log",
}
)
# store job for later dependency creation
commands.append(cmd)
tracker = get_result_tracker(job_cfg)
try:
# finally, submit job
current_job_id = submitter.submit(cmd)
# store run identifier returned by submitter
# TODO: consider storing current_job_id using tracker right away
job_ids[job] = current_job_id
# set job status in database to pending
tracker.update(status=EStatus.PEND)
except RuntimeError as e:
# set job as failed in database
tracker.update(status=EStatus.FAIL, message=str(e))
# fail entire job submission if requested
if abort_on_error:
raise
# submit final summarizer
# (hold for now - summarizer is run after each subjob finishes)
# wait for all runs to finish (but only if blocking)
submitter.join()
# return job identifiers
return job_ids
def run(**kwargs):
"""
Exposes command line interface as a Python function.
Parameters
----------
kwargs
See click.option decorators for app() function
"""
# substitute commmand line options in config file
config = substitute_config(**kwargs)
# check minimal set of parameters is present in config
check_required(
config,
["pipeline", "stages", "global"]
)
# verify that global prefix makes sense
pipeline.verify_prefix(verify_subdir=False, **config)
# unroll batch jobs into individual pipeline jobs
sub_configs = unroll_config(config)
# run pipeline computation for each individual (unrolled) config
run_jobs(
sub_configs, config, kwargs.get("yolo", False),
kwargs.get("workdir", None)
)
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
# run settings
@click.argument('config')
@click.option("-P", "--prefix", default=None, help="Job prefix")
@click.option("-S", "--stages", default=None, help="Stages of pipeline to run (comma-separated)")
@click.option("-p", "--protein", default=None, help="Sequence identifier of query protein")
@click.option("-s", "--seqfile", default=None, help="FASTA file with query sequence")
@click.option(
"-a", "--alignment", default=None,
help="Existing sequence alignment to start from (aligned FASTA/Stockholm). Use -p to select target sequence."
)
@click.option("-r", "--region", default=None, help="Region of query sequence(e.g 25-341)")
@click.option(
"-b", "--bitscores", default=None,
help="List of alignment bitscores (comma-separated, length-normalized "
"(float) or absolute score (int))"
)
@click.option(
"-e", "--evalues", default=None,
help="List of alignment E-values (negative exponent, comma-separated)"
)
@click.option(
"-n", "--iterations", default=None, help="Number of alignment iterations", type=int
)
@click.option("-d", "--database", default=None, help="Path or name of sequence database")
@click.option(
"-i", "--id", default=None, help="Filter alignment at x% sequence identity", type=int
)
@click.option(
"-f", "--seqcov", default=None, help="Minimum % aligned positions per sequence", type=int
)
@click.option(
"-m", "--colcov", default=None, help="Minimum % aligned positions per column", type=int
)
@click.option(
"-t", "--theta", default=None,
help="Downweight sequences above this identity cutoff"
" during inference (e.g. 0.8 for 80% identity cutoff)",
type=float
)
@click.option(
"--plmiter", default=None, help="Maximum number of iterations during inference",
type=int
)
# environment configuration
@click.option("-Q", "--queue", default=None, help="Grid queue to run job(s)")
@click.option(
"-T", "--time", default=None, help="Time requirement (hours) for batch jobs", type=int
)
@click.option("-N", "--cores", default=None, help="Number of cores for batch jobs", type=int)
@click.option(
"-M", "--memory", default=None, help="Memory requirement for batch jobs (MB or 'auto')"
)
@click.option(
"-y", "--yolo", default=False, is_flag=True, help="Disable overwrite protection"
)
def app(**kwargs):
"""
EVcouplings command line interface
Any command line option specified in addition to the config file
will overwrite the corresponding setting in the config file.
Specifying a list of bitscores or E-values will result in the creation
of multiple jobs that only vary in this parameter, with all other parameters
constant.
"""
run(**kwargs)
if __name__ == '__main__':
app()
| 33.713483
| 113
| 0.628784
|
1cabbfd28085d366c9381a93bb1bfc4d2403779d
| 48,020
|
py
|
Python
|
nemo/collections/nlp/data/machine_translation/preproc_mt_data.py
|
agemagician/NeMo
|
5839aee402f314aa413b28e9042b1e1cac10a114
|
[
"Apache-2.0"
] | null | null | null |
nemo/collections/nlp/data/machine_translation/preproc_mt_data.py
|
agemagician/NeMo
|
5839aee402f314aa413b28e9042b1e1cac10a114
|
[
"Apache-2.0"
] | null | null | null |
nemo/collections/nlp/data/machine_translation/preproc_mt_data.py
|
agemagician/NeMo
|
5839aee402f314aa413b28e9042b1e1cac10a114
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import pickle
import tarfile
import tempfile
import youtokentome as yttm
from joblib import Parallel, delayed
from omegaconf import ListConfig, OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.common.tokenizers.sentencepiece_tokenizer import create_spt_model
from nemo.collections.nlp.data.language_modeling.sentence_dataset import SentenceDataset
from nemo.collections.nlp.data.machine_translation.machine_translation_dataset import TranslationDataset
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_config import MTEncDecModelConfig
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer, get_tokenizer
from nemo.utils import logging
class MTDataPreproc:
""" Automatically trains tokenizers and preprocesses machine translation data based on the MTEncDecModelConfig.
For training NMT models with datasets larger than 5M sentence pairs,
it can be inefficient to train them without first creating a tarred dataset.
If the user wants to change the tokenizer, vocab size, or batch size, for example,
they must reprocess the data with the correct configuration.
With MTDataPreproc users can sweep through data configurations and the tarred dataset will
be automatically created according to the model configuration.
To train tokenizer model and create tarred dataset specify in configuration:
model.preproc_out_dir=/path/to/preproc_out
model.encoder_tokenizer.vocab_size=32000
model.decoder_tokenizer.vocab_size=32000
model.train_ds.use_tarred_dataset=True
model.train_ds.src_file_name=/path/to/src.txt
model.train_ds.tgt_file_name=/path/to/tgt.txt
model.train_ds.tokens_in_batch=16000
Once a dataset has been constructed based on this configuration, MTDataPreproc will not process it again.
If a previously trained tokenizer model or tarred dataset is found, MTDataPreproc will not preprocess the data.
Note: the only tokenizer currently supported is YouTokenToMe.
"""
def __init__(self, cfg: MTEncDecModelConfig, trainer: Trainer = None) -> None:
self._cfg = cfg
self.global_rank = 0
self.world_size = 1
if trainer is not None:
self.global_rank = (trainer.node_rank * trainer.num_gpus) + trainer.local_rank
self.world_size = trainer.num_nodes * trainer.num_gpus
if hasattr(cfg, 'train_ds'):
supported_tokenizers = ['yttm', 'huggingface', 'sentencepiece']
supported_train_tokenizers = ['yttm', 'sentencepiece']
if (
cfg.encoder_tokenizer.get('library') not in supported_tokenizers
or cfg.decoder_tokenizer.get('library') not in supported_tokenizers
):
raise NotImplementedError(f"Currently we only support {supported_tokenizers}.")
if cfg.get('shared_tokenizer') and cfg.encoder_tokenizer.get('library') != cfg.decoder_tokenizer.get(
'library'
):
raise ValueError("Shared tokenizers cannot be from different libraries.")
# Prepare tokenizers
if (
cfg.encoder_tokenizer.get('library') in supported_train_tokenizers
or cfg.decoder_tokenizer.get('library') in supported_train_tokenizers
):
# Train tokenizer models if using yttm or sentencepiece and they don't exist
if (
cfg.encoder_tokenizer.get('library') in supported_train_tokenizers
and cfg.encoder_tokenizer.get('tokenizer_model') is None
) or (
cfg.decoder_tokenizer.get('library') in supported_train_tokenizers
and cfg.decoder_tokenizer.get('tokenizer_model') is None
):
if cfg.get('preproc_out_dir') is None:
raise ValueError('Tokenizer model training required but cfg.preproc_out_dir is None.')
if cfg.train_ds.get('src_file_name') is None or cfg.train_ds.get('tgt_file_name') is None:
raise ValueError(
'src_file_name and tgt_file_name needed to train tokenizers but could not be found.'
)
src_fname = cfg.train_ds.get('src_file_name')
tgt_fname = cfg.train_ds.get('tgt_file_name')
src_language = cfg.get('src_language')
tgt_language = cfg.get('tgt_language')
spt_symbols = None
tempdir = tempfile.TemporaryDirectory()
if cfg.get('multilingual'):
spt_symbols = []
if isinstance(src_fname, ListConfig):
fnames = (" ").join(src_fname)
src_fname = os.path.join(tempdir.name, 'src.txt')
os.system('cat %s > %s' % (fnames, src_fname))
if isinstance(tgt_fname, ListConfig):
fnames = (" ").join(tgt_fname)
tgt_fname = os.path.join(tempdir.name, 'tgt.txt')
os.system('cat %s > %s' % (fnames, tgt_fname))
if isinstance(src_language, ListConfig):
for lng in src_language:
spt_symbols.append("<" + lng + ">")
if isinstance(tgt_language, ListConfig):
for lng in tgt_language:
spt_symbols.append("<" + lng + ">")
# train tokenizer model on training data
self.encoder_tokenizer_model, self.decoder_tokenizer_model = MTDataPreproc.train_tokenizers(
out_dir=cfg.get('preproc_out_dir'),
src_fname=src_fname,
tgt_fname=tgt_fname,
shared_tokenizer=cfg.get('shared_tokenizer'),
encoder_tokenizer_vocab_size=cfg.encoder_tokenizer.get('vocab_size'),
decoder_tokenizer_vocab_size=cfg.decoder_tokenizer.get('vocab_size'),
encoder_tokenizer_name=cfg.encoder_tokenizer.get('library'),
decoder_tokenizer_name=cfg.decoder_tokenizer.get('library'),
encoder_tokenizer_coverage=cfg.encoder_tokenizer.get('coverage', 0.999),
decoder_tokenizer_coverage=cfg.decoder_tokenizer.get('coverage', 0.999),
global_rank=self.global_rank,
encoder_training_sample_size=cfg.encoder_tokenizer.get('training_sample_size', -1),
decoder_training_sample_size=cfg.decoder_tokenizer.get('training_sample_size', -1),
encoder_special_tokens=OmegaConf.to_container(cfg.encoder_tokenizer.special_tokens)
if cfg.encoder_tokenizer.special_tokens
else None,
decoder_special_tokens=OmegaConf.to_container(cfg.decoder_tokenizer.special_tokens)
if cfg.decoder_tokenizer.special_tokens
else None,
spt_symbols=spt_symbols,
multilingual=cfg.get('multilingual', False),
)
# update config
self._cfg.encoder_tokenizer.tokenizer_model = self.encoder_tokenizer_model
self._cfg.decoder_tokenizer.tokenizer_model = self.decoder_tokenizer_model
tempdir.cleanup()
else:
self.encoder_tokenizer_model = cfg.encoder_tokenizer.get('tokenizer_model')
self.decoder_tokenizer_model = cfg.decoder_tokenizer.get('tokenizer_model')
self.encoder_tokenizer, self.decoder_tokenizer = self.get_enc_dec_tokenizers(
encoder_tokenizer_name=cfg.encoder_tokenizer.get('library'),
encoder_model_name=cfg.encoder.get('model_name'),
encoder_tokenizer_model=self.encoder_tokenizer_model,
encoder_bpe_dropout=cfg.encoder_tokenizer.get('bpe_dropout', 0.0),
decoder_tokenizer_name=cfg.decoder_tokenizer.get('library'),
decoder_model_name=cfg.decoder.get('model_name'),
decoder_tokenizer_model=self.decoder_tokenizer_model,
decoder_bpe_dropout=cfg.decoder_tokenizer.get('bpe_dropout', 0.0),
)
# If using tarred dataset for training, automatically create it if needed
if cfg.train_ds.get('use_tarred_dataset'):
if cfg.train_ds.get('tar_files') is None and cfg.train_ds.get('metadata_file') is None:
if cfg.get('preproc_out_dir') is None:
raise ValueError('Data preprocessing required but cfg.preproc_out_dir is None.')
if cfg.train_ds.get('src_file_name') is None or cfg.train_ds.get('tgt_file_name') is None:
raise ValueError(
'src_file_name and tgt_file_name needed to create tarred dataset but could not be found.'
)
# Preprocess data and cache for use during training
if self.global_rank == 0:
logging.info(
f"Using tarred dataset for src: {cfg.train_ds.get('src_file_name')} and tgt: {cfg.train_ds.get('tgt_file_name')}"
)
if not cfg.get('multilingual'):
src_file_list = [cfg.train_ds.get('src_file_name')]
tgt_file_list = [cfg.train_ds.get('tgt_file_name')]
outdir_list = [cfg.get('preproc_out_dir')]
else:
src_file_list = cfg.train_ds.get('src_file_name')
tgt_file_list = cfg.train_ds.get('tgt_file_name')
if isinstance(cfg.get('src_language'), ListConfig):
langs = cfg.get('src_language')
elif isinstance(cfg.get('tgt_language'), ListConfig):
langs = cfg.get('tgt_language')
else:
raise ValueError(
"Expect either cfg.src_language or cfg.tgt_language to be a list when multilingual=True."
)
outdir_list = []
for lang in langs:
outdir_list.append(os.path.join(cfg.get('preproc_out_dir'), lang))
if len(src_file_list) != len(tgt_file_list) or len(src_file_list) != len(outdir_list):
raise ValueError(
"Number of source files, target files, and multilingual language pairs must be the same."
)
# TODO: have to get tokenizers instide .preprocess_parallel because they can't be pickled
metadata_file_list = []
for idx, src_file in enumerate(src_file_list):
self.train_tar_files, self.train_metadata_file = MTDataPreproc.preprocess_parallel_dataset(
clean=cfg.train_ds.clean,
src_fname=src_file,
tgt_fname=tgt_file_list[idx],
out_dir=outdir_list[idx],
encoder_tokenizer_name=cfg.encoder_tokenizer.get('library'),
encoder_model_name=cfg.encoder.get('model_name'),
encoder_tokenizer_model=self.encoder_tokenizer_model,
encoder_bpe_dropout=cfg.encoder_tokenizer.get('bpe_dropout', 0.0),
decoder_tokenizer_name=cfg.decoder_tokenizer.get('library'),
decoder_model_name=cfg.decoder.get('model_name'),
decoder_tokenizer_model=self.decoder_tokenizer_model,
decoder_bpe_dropout=cfg.decoder_tokenizer.get('bpe_dropout', 0.0),
max_seq_length=cfg.train_ds.get('max_seq_length', 512),
tokens_in_batch=cfg.train_ds.get('tokens_in_batch', 8192),
lines_per_dataset_fragment=cfg.train_ds.get('lines_per_dataset_fragment', 1000000),
num_batches_per_tarfile=cfg.train_ds.get('num_batches_per_tarfile', 1000),
min_seq_length=1,
global_rank=self.global_rank,
world_size=self.world_size,
n_jobs=cfg.train_ds.get('n_preproc_jobs', -2),
tar_file_prefix=cfg.train_ds.get('tar_file_prefix', 'parallel'),
)
metadata_file_list.append(self.train_metadata_file)
# update config
# self._cfg.train_ds.tar_files = self.tar_files_to_string(self.train_tar_files)
# self._cfg.train_ds.tar_files = self.train_tar_files
if not cfg.get('multilingual'):
self._cfg.train_ds.metadata_file = metadata_file_list[0]
else:
self._cfg.train_ds.metadata_file = metadata_file_list
logging.info(
f"Using tarred dataset created in folder(s) {outdir_list} and metadata created at {self._cfg.train_ds.metadata_file}"
)
elif cfg.train_ds.get('tar_files') is not None and cfg.train_ds.get('metadata_file') is None:
raise ValueError('A metadata file is required for tarred dataset but cfg.metadata_file is None.')
elif cfg.train_ds.get('tar_files') is None and cfg.train_ds.get('metadata_file') is not None:
if isinstance(cfg.train_ds.get('metadata_file'), str):
metadata_file_list = [cfg.train_ds.get('metadata_file')]
else:
metadata_file_list = cfg.train_ds.get('metadata_file')
for metadata_file in metadata_file_list:
with open(metadata_file) as metadata_reader:
metadata = json.load(metadata_reader)
if metadata['tar_files']:
logging.info(f"Using tarred dataset: {metadata['tar_files']}")
else:
raise ValueError(f'tar_files not provided and metadata does not have tar files')
else:
self.train_tar_files = cfg.train_ds.get('tar_files')
self.train_metadata_file = cfg.train_ds.get('metadata_file')
logging.info(
f"Using tarred dataset from config at {self.train_tar_files} and metadata from {self.train_metadata_file}"
)
def tar_files_to_string(self, tar_files):
""" Tar files are generated in the following format: basename.number.tar
Where number is an integer from 1 to the number of tar files.
We convert this list to a string that can be used in the model config to specify
tarred datasets: basename_OP_1..num_tar_files_CL_.tar
Args:
tar_files (List[str]): List of tar files generated by preprocess_parallel_dataset
"""
num_tar_files = len(tar_files)
split_on_dot = tar_files[0].split('.')
basename = '.'.join(split_on_dot[0:-2])
tar_file_string = f'{basename}._OP_1..{num_tar_files}_CL_.tar'
return tar_file_string
@staticmethod
def get_enc_dec_tokenizers(
encoder_tokenizer_name=None,
encoder_tokenizer_model=None,
encoder_bpe_dropout=0.0,
encoder_model_name=None,
decoder_tokenizer_name=None,
decoder_tokenizer_model=None,
decoder_bpe_dropout=0.0,
decoder_model_name=None,
):
# if encoder_tokenizer_name != 'yttm' or decoder_tokenizer_name != 'yttm':
# raise NotImplementedError(f"Currently we only support yttm tokenizer.")
encoder_tokenizer = get_nmt_tokenizer(
library=encoder_tokenizer_name,
model_name=encoder_model_name,
tokenizer_model=encoder_tokenizer_model,
bpe_dropout=encoder_bpe_dropout,
)
decoder_tokenizer = get_nmt_tokenizer(
library=decoder_tokenizer_name,
model_name=decoder_model_name,
tokenizer_model=decoder_tokenizer_model,
bpe_dropout=decoder_bpe_dropout,
)
return encoder_tokenizer, decoder_tokenizer
@staticmethod
def get_monolingual_tokenizer(
tokenizer_name=None, tokenizer_model=None, bpe_dropout=0.0,
):
if tokenizer_name != 'yttm':
raise NotImplementedError(f"Currently we only support yttm tokenizer.")
tokenizer = get_tokenizer(
tokenizer_name=tokenizer_name, tokenizer_model=tokenizer_model, bpe_dropout=bpe_dropout,
)
return tokenizer
# TODO: add local or global rank 0 decorator
@staticmethod
def preprocess_parallel_dataset(
clean,
src_fname,
tgt_fname,
out_dir,
encoder_tokenizer_name,
encoder_tokenizer_model,
encoder_bpe_dropout,
encoder_model_name,
decoder_tokenizer_name,
decoder_tokenizer_model,
decoder_bpe_dropout,
decoder_model_name,
max_seq_length,
min_seq_length,
tokens_in_batch,
lines_per_dataset_fragment,
num_batches_per_tarfile,
global_rank,
world_size,
n_jobs=-2,
tar_file_prefix='parallel',
):
"""Create tarred dataset from large paired translation data.
Args:
clean (str): Cleans source and target sentences to get rid of noisy data.
src_fname (str): path to source text data
tgt_fname (str): path to target text data
out_dir (str): path to write tarred dataset
encoder_tokenizer (Any): tokenizer for encoder
decoder_tokenizer (Any): tokenizer for decoder
max_seq_length (int): maximum sequence length
min_seq_length (int): minimum sequence length
tokens_in_batch (int): tokens per batch per GPU, effectively batch size
lines_per_dataset_fragment (int): number of lines to consider for bucketing and padding
num_batches_per_tarfile (int): number of batches (pickle files) within each tarfile
tar_file_prefix (str) : add string prefix to tar files
n_jobs (int): number of processes to use for data processing (-2 to use all but 2)
"""
os.makedirs(out_dir, exist_ok=True)
metadata_path = os.path.join(out_dir, f'metadata.tokens.{tokens_in_batch}.json')
if global_rank == 0:
tar_files_in_out_dir = glob.glob(f'{out_dir}/*.tar')
if tar_files_in_out_dir:
logging.info(
f'Tarred dataset detected: {tar_files_in_out_dir} and will be used. Remove if reprocessing.'
)
else:
filenames = [src_fname, tgt_fname]
# get number of lines so that we can create a partition of the lines of the text file
num_src_lines, num_tgt_lines = Parallel(n_jobs=2)(
delayed(MTDataPreproc._get_num_lines)(filename) for filename in filenames
)
logging.info(f'Found {num_src_lines} source lines and {num_tgt_lines} target lines.')
assert num_src_lines == num_tgt_lines, 'Number of source lines should equal number of target lines.'
# create a partition of lines that we can parallelize over
lines_partition = MTDataPreproc._get_lines_partition(num_src_lines, lines_per_dataset_fragment)
logging.info(f"Found {len(lines_partition)} fragments to parallelize over.")
# create tarfiles for each fragment in parallel
results_list = Parallel(n_jobs=n_jobs)(
delayed(MTDataPreproc._process_fragment)(
src_filename=src_fname,
tgt_filename=tgt_fname,
lines_indices=lines_indices,
out_dir=out_dir,
num_batches_per_tarfile=num_batches_per_tarfile,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
tokens_in_batch=tokens_in_batch,
encoder_tokenizer_name=encoder_tokenizer_name,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=encoder_bpe_dropout,
encoder_model_name=encoder_model_name,
decoder_tokenizer_name=decoder_tokenizer_name,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=decoder_bpe_dropout,
decoder_model_name=decoder_model_name,
fragment_index=fragment_index,
)
for fragment_index, lines_indices in enumerate(lines_partition)
)
# compute total batches so far
total_batches = sum([batch_count for batch_count, _ in results_list])
# save batches from tar files containing the left over batches (if there's enough batches)
remainder_tar_file_ctr = 0
remainder_tar_file_path = os.path.join(
out_dir, f'remainder-batches.tokens.{tokens_in_batch}.tar_file_{remainder_tar_file_ctr}.tar'
)
remainder_tar_file_ptr = tarfile.open(remainder_tar_file_path, 'w')
batch_in_tar_ctr = 0
for _, tar_file_path in results_list:
tar_file_ptr = tarfile.open(tar_file_path, 'r')
for member in tar_file_ptr.getmembers():
remainder_tar_file_ptr.addfile(member, tar_file_ptr.extractfile(member.name))
batch_in_tar_ctr += 1
if batch_in_tar_ctr == num_batches_per_tarfile:
remainder_tar_file_ctr += 1
remainder_tar_file_ptr.close()
remainder_tar_file_path = os.path.join(
out_dir,
f'remainder-batches.tokens.{tokens_in_batch}.tar_file_{remainder_tar_file_ctr}.tar',
)
remainder_tar_file_ptr = tarfile.open(remainder_tar_file_path, 'w',)
batch_in_tar_ctr = 0
tar_file_ptr.close()
os.remove(tar_file_path)
# log the number of batches remaining as they will be discarded
num_batches_discarded = len(remainder_tar_file_ptr.getmembers())
total_batches -= num_batches_discarded
logging.info(
f'Number of batches discarded: {num_batches_discarded}, total batches kept: {total_batches}'
)
remainder_tar_file_ptr.close()
os.remove(remainder_tar_file_path)
# dump metadata to json
metadata = {}
metadata['num_batches'] = total_batches
# rename tar files so they can be more easily used with CLI and YAML
tar_file_paths = glob.glob(f'{out_dir}/*.tar')
for index, path in enumerate(tar_file_paths):
os.rename(
path, os.path.join(out_dir, f'{tar_file_prefix}.batches.tokens.{tokens_in_batch}.{index}.tar')
)
# add tar files to manifest
tar_file_paths = glob.glob(f'{out_dir}/*.tar')
metadata['tar_files'] = tar_file_paths
json.dump(metadata, open(metadata_path, 'w'))
tar_file_paths = glob.glob(f'{out_dir}/*.tar')
num_tar_files = len(tar_file_paths)
if num_tar_files < world_size:
raise ValueError(
(
f'Number of tar files found: {num_tar_files} is less than world size: {world_size}. '
f'There should be at least one tar file per GPU (ideally many tar files per GPU). '
f'This may be due to dataset size, it is advisable to use at least 5M sentence pairs for tarred datasets. '
f'Decrease num_batches_per_tarfile or num_tokens_per_batch to increase the number of tarfiles. '
f'Also using shard_strategy=replicate will use all available tarfiles for every GPU. '
)
)
return tar_file_paths, metadata_path
@staticmethod
def _get_num_lines(filename):
with open(filename) as f:
for i, l in enumerate(f):
pass
return i + 1
@staticmethod
def _get_lines_partition(num_lines, lines_per_dataset_fragment):
# create partition based on fragment size
fragment_indices = []
for i in range(0, num_lines, lines_per_dataset_fragment):
fragment_indices.append([i, i + lines_per_dataset_fragment])
# modify last indices
last_indices = fragment_indices.pop()
last_indices[1] = -1
fragment_indices.append(last_indices)
# if fragment_indices[-1][1] >= num_lines:
# fragment_indices.pop()
return fragment_indices
@staticmethod
def _process_fragment(
src_filename,
tgt_filename,
lines_indices,
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
tokens_in_batch,
encoder_tokenizer_name,
encoder_tokenizer_model,
encoder_bpe_dropout,
encoder_model_name,
decoder_tokenizer_name,
decoder_tokenizer_model,
decoder_bpe_dropout,
decoder_model_name,
fragment_index,
):
start = lines_indices[0]
stop = lines_indices[1]
# write lines in partition to temporary files to be consumed by write_parallel_batches_to_tarfiles
tmp_f_src = tempfile.NamedTemporaryFile(delete=False, mode='w')
tmp_f_tgt = tempfile.NamedTemporaryFile(delete=False, mode='w')
with open(src_filename, 'r') as src_in, open(tgt_filename) as tgt_in:
for line_number, (src_line, tgt_line) in enumerate(zip(src_in, tgt_in)):
if start <= line_number and line_number < stop:
if src_line and tgt_line:
tmp_f_src.write(src_line)
tmp_f_tgt.write(tgt_line)
tmp_f_src.close()
tmp_f_tgt.close()
num_batches_from_fragment, remainder_tar_file_path = MTDataPreproc.write_parallel_batches_to_tarfiles(
out_dir=out_dir,
num_batches_per_tarfile=num_batches_per_tarfile,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
src_fname=tmp_f_src.name,
tgt_fname=tmp_f_tgt.name,
num_tokens=tokens_in_batch,
encoder_tokenizer_name=encoder_tokenizer_name,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=encoder_bpe_dropout,
encoder_model_name=encoder_model_name,
decoder_tokenizer_name=decoder_tokenizer_name,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=decoder_bpe_dropout,
decoder_model_name=decoder_model_name,
fragment_index=fragment_index,
)
os.remove(tmp_f_src.name)
os.remove(tmp_f_tgt.name)
return num_batches_from_fragment, remainder_tar_file_path
@staticmethod
def preprocess_monolingual_dataset(
clean,
fname,
out_dir,
tokenizer,
max_seq_length,
min_seq_length,
tokens_in_batch,
lines_per_dataset_fragment,
num_batches_per_tarfile,
pkl_file_prefix,
global_rank,
world_size,
):
"""Create tarred dataset from a large monolingual corpus.
Args:
clean (str): Cleans sentences to get rid of very long or short sentences.
fname (str): Path to source text data
out_dir (str): Path to write tarred dataset
tokenizer (Any): Path to tokenizer model
max_seq_length (int): maximum sequence length
min_seq_length (int): minimum sequence length
tokens_in_batch (int): tokens per batch per GPU, effectively batch size
lines_per_dataset_fragment (int): number of lines to consider for bucketing and padding
num_batches_per_tarfile (int): number of batches (pickle files) within each tarfile
global_rank (int): if set to zero, data will be processed on this node
world_size (int): total number of processes being run (for training only, set to 1 when preproc only)
"""
os.makedirs(out_dir, exist_ok=True)
tar_file_ctr = 1
num_files_in_tar = 0
num_lines = 0
shard_num = 0
global_batch_ctr = 0
tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w')
tar_file_ptr = tarfile.open(
os.path.join(out_dir, '%s-batches.tokens.%d.%d.tar' % (pkl_file_prefix, tokens_in_batch, 1)), 'w'
)
metadata_path = os.path.join(out_dir, f'metadata.tokens.{tokens_in_batch}.json')
with open(fname, 'r') as f:
for line in f:
tmp_f.write(line)
num_lines += 1
if num_lines == lines_per_dataset_fragment:
tmp_f.close()
(
tar_file_ptr,
global_batch_ctr,
num_files_in_tar,
tar_file_ctr,
) = MTDataPreproc.write_monolingual_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
tmp_f.name,
tokens_in_batch,
tokenizer,
num_files_in_tar=num_files_in_tar,
tar_file_ptr=tar_file_ptr,
tar_file_ctr=tar_file_ctr,
global_batch_ctr=global_batch_ctr,
pkl_file_prefix=pkl_file_prefix,
)
num_lines = 0
shard_num += 1
os.remove(tmp_f.name)
tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w')
tmp_f.close()
(
tar_file_ptr,
global_batch_ctr,
num_files_in_tar,
tar_file_ctr,
) = MTDataPreproc.write_monolingual_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
tmp_f.name,
tokens_in_batch,
tokenizer,
num_files_in_tar=num_files_in_tar,
tar_file_ptr=tar_file_ptr,
tar_file_ctr=tar_file_ctr,
global_batch_ctr=global_batch_ctr,
pkl_file_prefix=pkl_file_prefix,
)
tar_file_ptr.close()
os.remove(tmp_f.name)
if num_files_in_tar != num_batches_per_tarfile:
os.remove(
os.path.join(out_dir, '%s-batches.tokens.%d.%d.tar' % (pkl_file_prefix, tokens_in_batch, tar_file_ctr))
)
global_batch_ctr -= num_files_in_tar
print('Dropping %d batches because of overflow' % (num_files_in_tar))
json.dump({'num_batches': global_batch_ctr}, open(os.path.join(out_dir, 'metadata.json'), 'w'))
tar_file_paths = glob.glob(f'{out_dir}/{pkl_file_prefix}-batches.tokens.{tokens_in_batch}.*.tar')
num_tar_files = len(tar_file_paths)
if num_tar_files < world_size:
raise ValueError(
(
f'Number of tar files found: {num_tar_files} is less than world size: {world_size}. '
f'There should be at least one tar file per GPU (ideally many tar files per GPU). '
f'This may be due to dataset size, it is advisable to use at least 5M sentence pairs for tarred datasets. '
f'Decrease num_batches_per_tarfile or num_tokens_per_batch to increase the number of tarfiles. '
f'Also using shard_strategy=replicate will use all available tarfiles for every GPU. '
)
)
return tar_file_paths, metadata_path
@staticmethod
def train_tokenizers(
out_dir,
src_fname,
tgt_fname,
shared_tokenizer,
encoder_tokenizer_name,
encoder_tokenizer_vocab_size,
encoder_tokenizer_coverage,
decoder_tokenizer_name,
decoder_tokenizer_vocab_size,
decoder_tokenizer_coverage,
global_rank,
encoder_training_sample_size=-1,
decoder_training_sample_size=-1,
encoder_special_tokens=None,
decoder_special_tokens=None,
spt_symbols=None,
multilingual=False,
):
encoder_tokenizer_model = None
decoder_tokenizer_model = None
os.makedirs(out_dir, exist_ok=True)
supported_train_tokenizers = ['yttm', 'sentencepiece']
if encoder_special_tokens:
if isinstance(encoder_special_tokens, dict):
encoder_special_tokens = list(encoder_special_tokens.values())
print(encoder_special_tokens)
if decoder_special_tokens:
if isinstance(decoder_special_tokens, dict):
decoder_special_tokens = list(decoder_special_tokens.values())
if multilingual and encoder_tokenizer_name != 'sentencepiece':
raise NotImplementedError(
f"Currently we only support training setencepiece tokenizer for multilingual model."
)
if shared_tokenizer:
if (
encoder_tokenizer_name not in supported_train_tokenizers
or decoder_tokenizer_name not in supported_train_tokenizers
):
raise NotImplementedError(
f"Currently we only support tokenizers in {supported_train_tokenizers} for shared tokenizer."
)
encoder_tokenizer_model = os.path.join(
out_dir, 'shared_tokenizer.%d.BPE.model' % (encoder_tokenizer_vocab_size)
)
decoder_tokenizer_model = encoder_tokenizer_model
if global_rank == 0:
if os.path.isfile(encoder_tokenizer_model):
logging.info(
f'Shared tokenizer model {encoder_tokenizer_model} already exists. Remove file if training a new tokenizer model.'
)
else:
logging.info(
f'Shared tokenizer model {encoder_tokenizer_model} not found. Training tokenizer model.'
)
with tempfile.TemporaryDirectory() as tmp:
concat_data_path = os.path.join(tmp, 'concat_dataset.txt')
os.system('cat %s %s > %s' % (src_fname, tgt_fname, concat_data_path))
if encoder_tokenizer_name == "yttm":
yttm.BPE.train(
data=concat_data_path,
vocab_size=encoder_tokenizer_vocab_size,
model=os.path.join(out_dir, encoder_tokenizer_model),
coverage=encoder_tokenizer_coverage,
n_threads=-1,
)
else:
create_spt_model(
data_file=concat_data_path,
vocab_size=encoder_tokenizer_vocab_size,
sample_size=encoder_training_sample_size,
do_lower_case=False,
tokenizer_type='bpe',
character_coverage=encoder_tokenizer_coverage,
output_dir=out_dir,
bos=True,
eos=True,
pad=True,
control_symbols=spt_symbols,
user_defined_symbols=encoder_special_tokens,
)
os.rename(
os.path.join(out_dir, 'tokenizer.model'),
os.path.join(out_dir, encoder_tokenizer_model),
)
else:
if encoder_tokenizer_name in supported_train_tokenizers:
encoder_tokenizer_model = os.path.join(
out_dir, 'tokenizer.encoder.%d.BPE.model' % (encoder_tokenizer_vocab_size)
)
if global_rank == 0:
if os.path.isfile(encoder_tokenizer_model):
logging.info(
f'Encoder tokenizer model {encoder_tokenizer_model} already exists. Remove file if training a new tokenizer model.'
)
else:
logging.info(
f'Encoder tokenizer model {encoder_tokenizer_model} not found. Training tokenizer model.'
)
if encoder_tokenizer_name == "yttm":
yttm.BPE.train(
data=src_fname,
vocab_size=encoder_tokenizer_vocab_size,
model=encoder_tokenizer_model,
coverage=encoder_tokenizer_coverage,
n_threads=-1,
)
else:
dir_name = os.path.dirname(encoder_tokenizer_model)
create_spt_model(
data_file=src_fname,
vocab_size=encoder_tokenizer_vocab_size,
sample_size=encoder_training_sample_size,
do_lower_case=False,
tokenizer_type='bpe',
character_coverage=encoder_tokenizer_coverage,
output_dir=dir_name,
bos=True,
eos=True,
pad=True,
control_symbols=spt_symbols,
user_defined_symbols=encoder_special_tokens,
)
os.rename(os.path.join(dir_name, 'tokenizer.model'), os.path.join(encoder_tokenizer_model))
if decoder_tokenizer_name in supported_train_tokenizers:
decoder_tokenizer_model = os.path.join(
out_dir, 'tokenizer.decoder.%d.BPE.model' % (decoder_tokenizer_vocab_size)
)
if global_rank == 0:
if os.path.isfile(decoder_tokenizer_model):
logging.info(
f'Decoder tokenizer model {decoder_tokenizer_model} already exists. Remove file if training a new tokenizer model.'
)
else:
logging.info(
f'Decoder tokenizer model {decoder_tokenizer_model} not found. Training tokenizer model.'
)
if decoder_tokenizer_name == "yttm":
yttm.BPE.train(
data=tgt_fname,
vocab_size=decoder_tokenizer_vocab_size,
model=decoder_tokenizer_model,
coverage=decoder_tokenizer_coverage,
n_threads=-1,
)
else:
dir_name = os.path.dirname(decoder_tokenizer_model)
create_spt_model(
data_file=tgt_fname,
vocab_size=decoder_tokenizer_vocab_size,
sample_size=decoder_training_sample_size,
do_lower_case=False,
tokenizer_type='bpe',
character_coverage=decoder_tokenizer_coverage,
output_dir=dir_name,
bos=True,
eos=True,
pad=True,
control_symbols=spt_symbols,
user_defined_symbols=decoder_special_tokens,
)
os.rename(os.path.join(dir_name, 'tokenizer.model'), os.path.join(decoder_tokenizer_model))
return encoder_tokenizer_model, decoder_tokenizer_model
@staticmethod
def write_parallel_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
src_fname,
tgt_fname,
num_tokens,
encoder_tokenizer_name,
encoder_tokenizer_model,
encoder_bpe_dropout,
encoder_model_name,
decoder_tokenizer_name,
decoder_tokenizer_model,
decoder_bpe_dropout,
decoder_model_name,
fragment_index,
):
"""
Writes current fragment of the overall parallel corpus to tarfiles by:
(1) Creating a minibatches using a TranslationDataset object.
(2) Writing each minibatch to a pickle file.
(3) Adding pickle files to a tarfile until it reaches num_batches_per_tarfile.
"""
dataset = TranslationDataset(
dataset_src=src_fname,
dataset_tgt=tgt_fname,
tokens_in_batch=num_tokens,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
max_seq_length_diff=max_seq_length,
max_seq_length_ratio=max_seq_length,
cache_ids=False,
cache_data_per_node=False,
use_cache=False,
)
encoder_tokenizer, decoder_tokenizer = MTDataPreproc.get_enc_dec_tokenizers(
encoder_tokenizer_name,
encoder_tokenizer_model,
encoder_bpe_dropout,
encoder_model_name,
decoder_tokenizer_name,
decoder_tokenizer_model,
decoder_bpe_dropout,
decoder_model_name,
)
dataset.batchify(encoder_tokenizer, decoder_tokenizer)
tar_file_ctr = 0
tar_file_path = os.path.join(
out_dir, 'fragment-%s-batches.tokens.%d.%d.tar' % (fragment_index, num_tokens, tar_file_ctr)
)
tar_file_ptr = tarfile.open(tar_file_path, 'w')
total_batch_ctr = 0
batch_ctr = 0
for _, batch in dataset.batches.items():
total_batch_ctr += 1
batch_ctr += 1
pickle.dump(
batch,
open(os.path.join(out_dir, 'fragment-%s-batch-%d.pkl' % (fragment_index, total_batch_ctr)), 'wb'),
)
tar_file_ptr.add(os.path.join(out_dir, 'fragment-%s-batch-%d.pkl' % (fragment_index, total_batch_ctr)))
os.remove(os.path.join(out_dir, 'fragment-%s-batch-%d.pkl' % (fragment_index, total_batch_ctr)))
if batch_ctr == num_batches_per_tarfile:
tar_file_ctr += 1
tar_file_ptr.close()
tar_file_path = os.path.join(
out_dir, 'fragment-%s-batches.tokens.%d.%d.tar' % (fragment_index, num_tokens, tar_file_ctr)
)
tar_file_ptr = tarfile.open(tar_file_path, 'w',)
batch_ctr = 0
# return tar files paths that have batches remaining
remainder_tar_file_path = tar_file_ptr.name
tar_file_ptr.close()
return total_batch_ctr, remainder_tar_file_path
@staticmethod
def write_monolingual_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
fname,
num_tokens,
tokenizer,
num_files_in_tar,
tar_file_ptr,
tar_file_ctr,
global_batch_ctr,
pkl_file_prefix,
):
"""
Writes current fragment of the overall parallel corpus to tarfiles by:
(1) Creating a minibatches using a SentenceDataset object.
(2) Writing each minibatch to a pickle file.
(3) Adding pickle files to a tarfile until it reaches num_batches_per_tarfile.
"""
dataset = SentenceDataset(
tokenizer=tokenizer,
dataset=fname,
tokens_in_batch=num_tokens,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
cache_ids=False,
)
for batch in dataset.batches:
global_batch_ctr += 1
batch = {'src': batch}
pickle.dump(
batch, open(os.path.join(out_dir, '%s-batch-%d.pkl' % (pkl_file_prefix, global_batch_ctr)), 'wb')
)
if num_files_in_tar == num_batches_per_tarfile:
tar_file_ctr += 1
tar_file_ptr.close()
tar_file_ptr = tarfile.open(
os.path.join(out_dir, '%s-batches.tokens.%d.%d.tar' % (pkl_file_prefix, num_tokens, tar_file_ctr)),
'w',
)
num_files_in_tar = 0
tar_file_ptr.add(os.path.join(out_dir, '%s-batch-%d.pkl' % (pkl_file_prefix, global_batch_ctr)))
num_files_in_tar += 1
os.remove(os.path.join(out_dir, '%s-batch-%d.pkl' % (pkl_file_prefix, global_batch_ctr)))
return tar_file_ptr, global_batch_ctr, num_files_in_tar, tar_file_ctr
@property
def cfg(self):
return self._cfg
| 47.450593
| 143
| 0.574552
|
37aff32533bdfdbb64fd39dd27fa56ba20da00b5
| 642
|
py
|
Python
|
13_ResidualPlots/importFP.py
|
xavierbellagamba/4_RuptureRecognition
|
18e41f6a5a677862bb6d7d0c9adf27271c7b59fe
|
[
"MIT"
] | null | null | null |
13_ResidualPlots/importFP.py
|
xavierbellagamba/4_RuptureRecognition
|
18e41f6a5a677862bb6d7d0c9adf27271c7b59fe
|
[
"MIT"
] | null | null | null |
13_ResidualPlots/importFP.py
|
xavierbellagamba/4_RuptureRecognition
|
18e41f6a5a677862bb6d7d0c9adf27271c7b59fe
|
[
"MIT"
] | null | null | null |
import csv
data_path = './faultPlanes/Akatore.cnrs'
##################################################################
#isNumber: check if a string is a number
##################################################################
def isNumber(s):
try:
float(s)
return True
except ValueError:
return False
data = []
with open(data_path) as csvfile:
readCSV = csv.reader(csvfile, delimiter=' ')
for row in readCSV:
single_line = []
for i in range(len(row)):
if isNumber(row[i]):
single_line.append(float(row[i]))
if len(single_line) > 1:
data.append(single_line)
| 24.692308
| 66
| 0.485981
|
11a6b682a4249fd6ed1a47930648c0b9c04cc996
| 3,897
|
py
|
Python
|
var/spack/repos/builtin/packages/xsdk-examples/package.py
|
dialvarezs/spack
|
14d4203722daf3abd56a2b6c880214a1338e289f
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/xsdk-examples/package.py
|
dialvarezs/spack
|
14d4203722daf3abd56a2b6c880214a1338e289f
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/xsdk-examples/package.py
|
dialvarezs/spack
|
14d4203722daf3abd56a2b6c880214a1338e289f
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class XsdkExamples(CMakePackage, CudaPackage):
"""xSDK Examples show usage of libraries in the xSDK package."""
homepage = 'http://xsdk.info'
url = 'https://github.com/xsdk-project/xsdk-examples/archive/v0.1.0.tar.gz'
git = "https://github.com/xsdk-project/xsdk-examples"
maintainers = ['acfisher', 'balay', 'balos1', 'luszczek']
version('develop', branch='master')
version('0.3.0', sha256='e7444a403c0a69eeeb34a4068be4d6f4e5b54cbfd275629019b9236a538a739e')
version('0.2.0', sha256='cf26e3a16a83eba6fb297fb106b0934046f17cf978f96243b44d9d17ad186db6')
version('0.1.0', sha256='d24cab1db7c0872b6474d69e598df9c8e25d254d09c425fb0a6a8d6469b8018f')
depends_on('xsdk+cuda', when='+cuda')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('xsdk+cuda cuda_arch={0}'.format(sm_),
when='+cuda cuda_arch={0}'.format(sm_))
depends_on('xsdk@develop', when='@develop')
depends_on('xsdk@0.7.0', when='@0.3.0')
depends_on('xsdk@0.7.0 ^mfem+strumpack', when='@0.3.0 ^xsdk+strumpack')
depends_on('xsdk@0.7.0 ^sundials+magma', when='@0.3.0 +cuda')
depends_on('xsdk@0.6.0', when='@0.2.0')
depends_on('xsdk@0.5.0', when='@0.1.0')
depends_on('mpi')
depends_on('cmake@3.21:', type='build', when='@0.3.0:')
def cmake_args(self):
spec = self.spec
args = [
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
'-DENABLE_HYPRE=ON',
'-DHYPRE_DIR=%s' % spec['hypre'].prefix,
'-DENABLE_MFEM=ON',
'-DMETIS_DIR=%s' % spec['metis'].prefix,
'-DMFEM_DIR=%s' % spec['mfem'].prefix,
'-DENABLE_PETSC=ON',
'-DPETSc_DIR=%s' % spec['petsc'].prefix,
'-DENABLE_PLASMA=ON',
'-DPLASMA_DIR=%s' % spec['plasma'].prefix,
'-DENABLE_SUNDIALS=ON',
'-DSUNDIALS_DIR=%s' % spec['sundials'].prefix,
'-DENABLE_SUPERLU=ON',
'-DSUPERLUDIST_DIR=%s' % spec['superlu-dist'].prefix
]
if '+cuda' in spec: # if cuda variant was activated for xsdk
args.extend([
'-DENABLE_CUDA=ON',
'-DCMAKE_CUDA_ARCHITECTURES=%s' % spec.variants['cuda_arch'].value
])
if '+ginkgo' in spec: # if ginkgo variant was activated for xsdk
args.extend([
'-DENABLE_GINKGO=ON',
'-DGinkgo_DIR=%s' % spec['ginkgo'].prefix
])
if '+magma' in spec: # if magma variant was activated for xsdk
args.extend([
'-DENABLE_MAGMA=ON',
'-DMAGMA_DIR=%s' % spec['magma'].prefix
])
if '+strumpack' in spec: # if magma variant was activated for xsdk
args.extend([
'-DENABLE_STRUMPACK=ON',
'-DSTRUMPACK_DIR=%s' % spec['strumpack'].prefix
])
if '+slate' in spec: # if slate variant was activated for xsdk
args.extend([
'-DENABLE_SLATE=ON',
'-DSLATE_DIR=%s' % spec['slate'].prefix,
'-DBLASPP_DIR=%s' % spec['blaspp'].prefix,
'-DLAPACKPP_DIR=%s' % spec['lapackpp'].prefix
])
if 'trilinos' in spec: # if trilinos variant was activated for xsdk
args.extend([
'ENABLE_TRILINOS=ON',
'-DTRILINOS_DIR_PATH=%s' % spec['trilinos'].prefix
])
if 'zlib' in spec: # if zlib variant was activated for MFEM
args.append('-DZLIB_LIBRARY_DIR=%s' % spec['zlib'].prefix.lib)
return args
| 41.021053
| 95
| 0.574545
|
7789a43721c711b041484e779fa9b1fbc894027f
| 1,708
|
py
|
Python
|
tests.py
|
tpyo/pipsy
|
2b9abaa5e53f12421f6e890380552551f582b5dc
|
[
"MIT"
] | null | null | null |
tests.py
|
tpyo/pipsy
|
2b9abaa5e53f12421f6e890380552551f582b5dc
|
[
"MIT"
] | null | null | null |
tests.py
|
tpyo/pipsy
|
2b9abaa5e53f12421f6e890380552551f582b5dc
|
[
"MIT"
] | 1
|
2019-12-12T15:26:31.000Z
|
2019-12-12T15:26:31.000Z
|
import pytest
import pip
import subprocess
import sys
import json
from mock import patch, Mock
from pip._vendor.pkg_resources import Distribution
from pipsy import *
class TestPackaging:
mock_installed = [Distribution(project_name='pipsy-test', version='0.1')]
mock_pkg_json = json.dumps({ "info": { "version": "0.1", "name": "pipsy-test" }, "releases": { "0.3": [{ }], "0.2": [{ }], "0.1": [{ }] } }).encode('utf-8')
@patch('pip.get_installed_distributions')
def test_pip_installed(self, pip_mock):
pip_mock.return_value = self.mock_installed
packages = pip.get_installed_distributions()
assert len(packages) == 1
@patch('urllib.request.urlopen')
def test_get_pkg_info(self, info_mock):
req_mock = Mock()
req_mock.readall.return_value = self.mock_pkg_json
info_mock.return_value = req_mock
pkg = get_pkg_info(self.mock_installed[0])
versions = get_versions(pkg)
assert len(versions) == 3
@patch('urllib.request.urlopen')
def test_get_version_range(self, info_mock):
req_mock = Mock()
req_mock.readall.return_value = self.mock_pkg_json
info_mock.return_value = req_mock
pkg = get_pkg_info(self.mock_installed[0])
version_range = get_version_range(pkg, '0.1')
assert len(version_range) == 2
@patch('urllib.request.urlopen')
def test_get_latest_version(self, info_mock):
req_mock = Mock()
req_mock.readall.return_value = self.mock_pkg_json
info_mock.return_value = req_mock
pkg = get_pkg_info(self.mock_installed[0])
latest_version = get_latest_version(pkg)
assert str(latest_version) == '0.3'
| 34.857143
| 160
| 0.672131
|
8b67a9c2b837836d4072d7bbb3a3e3183bb5cc11
| 7,170
|
py
|
Python
|
clusterman/common/sfx.py
|
akshaysharma096/clusterman
|
27f4bd217fe201a4c0b9bf460c5a9e155ee88041
|
[
"Apache-2.0"
] | 281
|
2019-11-15T03:12:43.000Z
|
2022-01-07T06:36:58.000Z
|
clusterman/common/sfx.py
|
akshaysharma096/clusterman
|
27f4bd217fe201a4c0b9bf460c5a9e155ee88041
|
[
"Apache-2.0"
] | 38
|
2019-11-18T20:15:47.000Z
|
2022-03-28T11:28:45.000Z
|
clusterman/common/sfx.py
|
akshaysharma096/clusterman
|
27f4bd217fe201a4c0b9bf460c5a9e155ee88041
|
[
"Apache-2.0"
] | 21
|
2019-11-16T07:49:40.000Z
|
2022-02-09T18:13:48.000Z
|
# Copyright 2019 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import colorlog
import signalfx
from arrow import Arrow
from signalfx.signalflow.messages import DataMessage
logger = colorlog.getLogger(__name__)
TS_QUERY_PROGRAM_TEMPLATE = """data(
"{metric}",
filter={filters},
extrapolation="{extrapolation}",
maxExtrapolations={max_extrapolations},
rollup={rollup}
){aggregation}.publish()
"""
class Aggregation:
def __init__(self, method, by=None, over=None):
self.method = method
if by and over:
raise ValueError(f"by and over cannot both be set: {by}, {over}")
self.by = by
self.over = over
def __str__(self):
if self.by:
args = f"by={str(self.by)}"
elif self.over:
args = f"over={self.over}"
else:
args = ""
return "{method}({args})".format(method=self.method, args=args)
def __eq__(self, other):
return self.method == other.method and self.by == other.by and self.over == other.over
def _make_ts_label(raw_data, tsid, dimensions):
""" Make a label for a timeseries data point returned from SignalFX
:param raw_data: a processed data stream from SFX
:param tsid: the timeseries ID for a datapoint in the SFX stream
:param dimensions: a list of dimensions to create the label from
:returns: a comma-separated list of the specified dimension values for this tsid
"""
if not dimensions:
return ""
metadata = raw_data.get_metadata(tsid)
return ",".join([metadata[dim] for dim in sorted(dimensions)])
def _make_filter_string(filters):
""" Create a filter string used to modify a SignalFX query
:param filters: a list of (filter_name, value) tuples
:returns: a SignalForm filter string -- 'filter("filter_1", "value_1") and filter("filter_2", "value_2")'
"""
if not filters:
return "None"
fstring = ""
for name, value in filters:
fstring += f'filter("{name}", "{value}") and '
return fstring[:-5]
def execute_sfx_program(api_token, program, start_time, end_time, dimensions=None, resolution=60):
""" Execute an arbitrary SignalFlow program
:param api_token: a valid SFX API query token (you can get this from the SignalFX dashboard)
:param program: a valid signalflow program to execute
:param start_time: beginning of program execution range, as an Arrow object
:param end_time: end of program execution range, as an Arrow object
:param dimensions: list of strings to group the returned timeseries by
:param resolution: smallest time interval (in seconds) to evaluate the program on
note: SignalFX has a maximum resolution of 1 minute, and only for the most recent data;
setting a resolution higher than this (or even 1 minute for older data) will be ignored
:returns: a list of (timestamp, data_points) tuples, where data_points is a dict of timeseries_name -> value
"""
with signalfx.SignalFx().signalflow(api_token) as sfx:
curr_time = start_time
datapoints = []
while curr_time < end_time:
# To prevent overloading SignalFX we grab a maximum of 5 days worth of data at a time
next_time = min(curr_time.shift(days=5), end_time)
logger.info(f"Querying SignalFX from {curr_time} to {next_time}")
raw_data = sfx.execute(
program,
# SignalFX operates on millisecond timescales
start=curr_time.timestamp * 1000,
stop=next_time.timestamp * 1000,
resolution=resolution * 1000,
)
# We can only call _make_ts_label after all of the entries in the raw_data.stream() have been processed
data_messages = [msg for msg in raw_data.stream() if isinstance(msg, DataMessage)]
new_datapoints = sorted(
[
(
Arrow.utcfromtimestamp(msg.logical_timestamp_ms / 1000),
{_make_ts_label(raw_data, key, dimensions): value for key, value in msg.data.items()},
)
for msg in data_messages
]
)
# SignalFX sometimes gives us duplicate datapoints at the beginning of one chunk/the start of
# the next chunk. This doesn't play nicely with the metrics client so detect and remove those here
if datapoints and new_datapoints[0][0] == datapoints[-1][0]:
new_datapoints = new_datapoints[1:]
datapoints.extend(new_datapoints)
curr_time = next_time
return datapoints
def basic_sfx_query(
api_token,
metric,
start_time,
end_time,
rollup="average",
extrapolation="null",
max_extrapolations=0,
filters=None,
resolution=60,
aggregation=Aggregation("sum"),
):
""" Run the simplest of all SignalFX queries: specify a metric name to query and (optionally) some filters, and sum
the results into a single timeseries.
:param api_token: a valid SFX API query token (you can get this from the SignalFX dashboard)
:param metric: name of the metric to query
:param start_time: beginning of program execution range, as an Arrow object
:param end_time: end of program execution range, as an Arrow object
:param rollup: a valid SignalFX rollup string, or None for the default
:param extrapolation: one of 'null', 'zero', or 'last_value'
:param max_extrapolations: how many times to apply the extrapolation policy
:param filters: a list of (filter_name, filter_value) tuples
:param resolution: smallest time interval (in seconds) to evaluate the program on
note: SignalFX has a maximum resolution of 1 minute, and only for the most recent data;
setting a resolution higher than this (or even 1 minute for older data) will be ignored
:param aggregation: an Aggregation object describing how to group the results
:returns: a list of (timestamp, value) tuples
"""
rollup = f'"{rollup}"' if rollup else "None"
agg_string = f".{aggregation}" if aggregation else ""
program = TS_QUERY_PROGRAM_TEMPLATE.format(
metric=metric,
filters=_make_filter_string(filters),
rollup=rollup,
extrapolation=extrapolation,
max_extrapolations=max_extrapolations,
aggregation=agg_string,
)
return execute_sfx_program(
api_token,
program,
start_time,
end_time,
resolution=resolution,
dimensions=(aggregation.by if aggregation else []),
)
| 39.833333
| 119
| 0.666527
|
a0c8ac1028b87920e32d494bd4eeac6c87a5e22b
| 8,426
|
py
|
Python
|
setup.py
|
farisachugthai/sphinx
|
315bffeedf12e6592ed473a9314d896bfee85145
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
farisachugthai/sphinx
|
315bffeedf12e6592ed473a9314d896bfee85145
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
farisachugthai/sphinx
|
315bffeedf12e6592ed473a9314d896bfee85145
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import sys
from distutils import log
from io import StringIO
from setuptools import find_packages, setup
import sphinx
with open('README.rst') as f:
long_desc = f.read()
if sys.version_info < (3, 6):
print('ERROR: Sphinx requires at least Python 3.6 to run.')
sys.exit(1)
install_requires = [
'sphinxcontrib-applehelp',
'sphinxcontrib-devhelp',
'sphinxcontrib-jsmath',
'sphinxcontrib-htmlhelp',
'sphinxcontrib-serializinghtml',
'sphinxcontrib-qthelp',
'Jinja2>=2.3',
'Pygments>=2.0',
'docutils>=0.14',
'snowballstemmer>=1.1',
'babel>=1.3',
'alabaster>=0.7,<0.8',
'imagesize',
'requests>=2.5.0',
'setuptools',
'packaging',
]
extras_require = {
# Environment Marker works for wheel 0.24 or later
':sys_platform=="win32"': [
'colorama>=0.3.5',
],
'docs': [
'sphinxcontrib-websupport',
],
'lint': [
'flake8>=3.5.0',
'isort',
'mypy>=0.790',
'docutils-stubs',
],
'test': [
'pytest',
'pytest-cov',
'html5lib',
"typed_ast; python_version < '3.8'",
'cython',
],
}
# Provide a "compile_catalog" command that also creates the translated
# JavaScript files if Babel is available.
cmdclass = {}
class Tee:
def __init__(self, stream):
self.stream = stream
self.buffer = StringIO()
def write(self, s):
self.stream.write(s)
self.buffer.write(s)
def flush(self):
self.stream.flush()
try:
from json import dump
from babel.messages.frontend import compile_catalog
from babel.messages.pofile import read_po
except ImportError:
pass
else:
class compile_catalog_plusjs(compile_catalog):
"""
An extended command that writes all message strings that occur in
JavaScript files to a JavaScript file along with the .mo file.
Unfortunately, babel's setup command isn't built very extensible, so
most of the run() code is duplicated here.
"""
def run(self):
try:
sys.stderr = Tee(sys.stderr)
compile_catalog.run(self)
finally:
if sys.stderr.buffer.getvalue():
print("Compiling failed.")
sys.exit(1)
if isinstance(self.domain, list):
for domain in self.domain:
self._run_domain_js(domain)
else:
self._run_domain_js(self.domain)
def _run_domain_js(self, domain):
po_files = []
js_files = []
if not self.input_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.directory, self.locale,
'LC_MESSAGES',
domain + '.po')))
js_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
domain + '.js'))
else:
for locale in os.listdir(self.directory):
po_file = os.path.join(self.directory, locale,
'LC_MESSAGES',
domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
js_files.append(os.path.join(self.directory, locale,
'LC_MESSAGES',
domain + '.js'))
else:
po_files.append((self.locale, self.input_file))
if self.output_file:
js_files.append(self.output_file)
else:
js_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
domain + '.js'))
for js_file, (locale, po_file) in zip(js_files, po_files):
with open(po_file, encoding='utf8') as infile:
catalog = read_po(infile, locale)
if catalog.fuzzy and not self.use_fuzzy:
continue
log.info('writing JavaScript strings in catalog %r to %r',
po_file, js_file)
jscatalog = {}
for message in catalog:
if any(x[0].endswith(('.js', '.js_t', '.html'))
for x in message.locations):
msgid = message.id
if isinstance(msgid, (list, tuple)):
msgid = msgid[0]
jscatalog[msgid] = message.string
with open(js_file, 'wt', encoding='utf8') as outfile:
outfile.write('Documentation.addTranslations(')
dump({
'messages': jscatalog,
'plural_expr': catalog.plural_expr,
'locale': str(catalog.locale)
}, outfile, sort_keys=True, indent=4)
outfile.write(');')
cmdclass['compile_catalog'] = compile_catalog_plusjs
setup(
name='Sphinx',
version=sphinx.__version__,
url='http://sphinx-doc.org/',
download_url='https://pypi.org/project/Sphinx/',
license='BSD',
author='Georg Brandl',
author_email='georg@python.org',
description='Python documentation generator',
long_description=long_desc,
long_description_content_type='text/x-rst',
project_urls={
"Code": "https://github.com/sphinx-doc/sphinx",
"Issue tracker": "https://github.com/sphinx-doc/sphinx/issues",
},
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Framework :: Setuptools Plugin',
'Framework :: Sphinx',
'Framework :: Sphinx :: Extension',
'Framework :: Sphinx :: Theme',
'Topic :: Documentation',
'Topic :: Documentation :: Sphinx',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Printing',
'Topic :: Software Development',
'Topic :: Software Development :: Documentation',
'Topic :: Text Processing',
'Topic :: Text Processing :: General',
'Topic :: Text Processing :: Indexing',
'Topic :: Text Processing :: Markup',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Text Processing :: Markup :: LaTeX',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(exclude=['tests', 'utils']),
package_data = {
'sphinx': ['py.typed'],
},
include_package_data=True,
entry_points={
'console_scripts': [
'sphinx-build = sphinx.cmd.build:main',
'sphinx-quickstart = sphinx.cmd.quickstart:main',
'sphinx-apidoc = sphinx.ext.apidoc:main',
'sphinx-autogen = sphinx.ext.autosummary.generate:main',
],
'distutils.commands': [
'build_sphinx = sphinx.setup_command:BuildDoc',
],
},
python_requires=">=3.6",
install_requires=install_requires,
extras_require=extras_require,
cmdclass=cmdclass,
)
| 33.839357
| 80
| 0.522312
|
86bb186060a7220978a0e2b32745a85ceb2c50a6
| 23,989
|
py
|
Python
|
main.py
|
Xilinx/SOM-Dashboard
|
7620e9d4ac9e22bdb96fd130aca7a5004ff05a66
|
[
"Apache-2.0"
] | 1
|
2021-09-22T12:52:12.000Z
|
2021-09-22T12:52:12.000Z
|
main.py
|
Xilinx/SOM-Dashboard
|
7620e9d4ac9e22bdb96fd130aca7a5004ff05a66
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
Xilinx/SOM-Dashboard
|
7620e9d4ac9e22bdb96fd130aca7a5004ff05a66
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bokeh.plotting import figure, curdoc
from bokeh.layouts import layout, row, column, gridplot
from bokeh.models.widgets import Tabs, Panel
from bokeh.models.annotations import Title
from bokeh.models import ColumnDataSource, DataTable, DateFormatter, TableColumn, HTMLTemplateFormatter
from bokeh.models import Button, Div, CheckboxGroup, Range1d
from bokeh.models import HoverTool
from bokeh.models import TextInput
from bokeh.models import Paragraph, Div, CustomJS
from bokeh.events import ButtonClick
from bokeh.themes import built_in_themes
from bokeh.driving import linear
import psutil
from collections import deque
import subprocess
from functools import partial
bg_color = '#15191C'
text_color = '#E0E0E0'
##################################################
##### Platform Stat Tab ##########################
##################################################
sample_size = 60
sample_size_actual = 60
interval = 1
x = deque([0] * sample_size)
color_list = ["darkseagreen", "steelblue", "indianred", "chocolate", "mediumpurple", "rosybrown", "gold",
"mediumaquamarine"]
def get_mem(memtype):
mem_val = int(
''.join(filter(str.isdigit, str(subprocess.run(['/bin/grep', memtype, '/proc/meminfo'], capture_output=True)))))
return mem_val
def clear_min_max():
max_volt[:] = [0, 0, 0, 0, 0, 0, 0, 0, 0]
max_temp[:] = [0, 0, 0]
min_volt[:] = [7000, 7000, 7000, 7000, 7000, 7000, 7000, 7000, 7000]
min_temp[:] = [200, 200, 200]
global average_cpu, average_cpu_sample_size
average_cpu = 0
average_cpu_sample_size = 0
cpu_labels = [
"A-53_Core_0",
"A-53_Core_1",
"A-53_Core_2",
"A-53_Core_3",
]
cpu_data = {
'A-53_Core_0': deque([0.0] * sample_size),
'A-53_Core_1': deque([0.0] * sample_size),
'A-53_Core_2': deque([0.0] * sample_size),
'A-53_Core_3': deque([0.0] * sample_size),
}
volt_labels = [
"VCC_PSPLL",
"PL_VCCINT",
"VOLT_DDRS",
"VCC_PSINTFP",
"VCC_PS_FPD",
"PS_IO_BANK_500",
"VCC_PS_GTR",
"VTT_PS_GTR",
"Total_Volt",
]
volt_data = {
"VCC_PSPLL": deque([0] * sample_size),
"PL_VCCINT": deque([0] * sample_size),
"VOLT_DDRS": deque([0] * sample_size),
"VCC_PSINTFP": deque([0] * sample_size),
"VCC_PS_FPD": deque([0] * sample_size),
"PS_IO_BANK_500": deque([0] * sample_size),
"VCC_PS_GTR": deque([0] * sample_size),
"VTT_PS_GTR": deque([0] * sample_size),
"Total_Volt": deque([0] * sample_size),
}
temp_labels = [
"FPD_TEMP",
"LPD_TEMP",
"PL_TEMP",
]
temp_data = {
"FPD_TEMP": deque([0.0] * sample_size),
"LPD_TEMP": deque([0.0] * sample_size),
"PL_TEMP": deque([0.0] * sample_size),
}
# note that if a queue is not getting appended every sample, remove it from data structure, or
# popping the queue when updating sample size will not work!
mem_labels = [
# "MemTotal",
"MemFree",
# "MemAvailable",
# "SwapTotal",
# "SwapFree",
# "CmaTotal",
# "CmaFree",
]
mem_data = {
# "MemTotal": deque([0] * sample_size),
"MemFree": deque([0] * sample_size),
# "MemAvailable": deque([0] * sample_size),
# "SwapTotal": deque([0] * sample_size),
# "SwapFree": deque([0] * sample_size),
# "CmaTotal": deque([0] * sample_size),
# "CmaFree": deque([0] * sample_size),
}
current_data = deque([0] * sample_size)
power_data = deque([0] * sample_size)
# title
title1 = Div(
text="""<h1 style="color :""" + text_color + """; text-align :center">Kria™ SOM: Hardware Platform Statistics</h1>""",
width=550)
# average cpu display
average_cpu = 0.0
average_cpu_sample_size = 0
average_cpu_display = Div(text=str(average_cpu), width=600)
# CPU frequency display
cpu_freq_text = """<h3 style="color :""" + text_color + """;">CPU Frequencies </h3>"""
cpu_freq = [0, 0, 0, 0]
cpu_freq_display = Div(text=cpu_freq_text, width=400)
# CPU line plot
cpu_plot = figure(plot_width=800, plot_height=300, title='CPU Utilization %')
cpu_ds = [0, 0, 0, 0]
for i in range(len(cpu_labels)):
cpu_ds[i] = (cpu_plot.line(x, cpu_data[cpu_labels[i]], line_width=2,
color=color_list[i], legend_label=cpu_labels[i])).data_source
cpu_plot.legend.click_policy = "hide"
# current line plot
current_plot = figure(plot_width=500, plot_height=300, title='Total SOM Current in mA')
current_ds = (current_plot.line(x, current_data, line_width=2,
color=color_list[0], legend_label="Current")).data_source
current_plot.legend.click_policy = "hide"
# power line plot
power_plot = figure(plot_width=500, plot_height=300, title='Total SOM Power in W')
power_ds = (power_plot.line(x, power_data, line_width=2,
color=color_list[0], legend_label="Power")).data_source
power_plot.legend.click_policy = "hide"
# temperature line plot
temp_plot = figure(plot_width=500, plot_height=300, title='Temperature in Celsius')
temp_ds = [0, 0, 0, 0]
temp_ds[0] = (temp_plot.line(x, temp_data[temp_labels[0]], line_width=2,
color=color_list[0], legend_label=temp_labels[0])).data_source
temp_plot.legend.click_policy = "hide"
# table of min/max for temperature
max_temp = [0.0, 0.0, 0.0]
min_temp = [200.0, 200.0, 200.0]
min_max_temp = dict(temp_labels=temp_labels, max_temp=max_temp, min_temp=min_temp)
min_max_temp_source = ColumnDataSource(min_max_temp)
min_max_temp_column = [
TableColumn(field="temp_labels", title="Temperature"),
TableColumn(field="max_temp", title="Max"),
TableColumn(field="min_temp", title="Min")
]
temp_data_table = DataTable(source=min_max_temp_source, columns=min_max_temp_column, index_position=None,
width=400, height=200, background=bg_color, css_classes=['custom_table'])
# table of min/max for voltages
max_volt = [0, 0, 0, 0, 0, 0, 0, 0, 0]
min_volt = [7000, 7000, 7000, 7000, 7000, 7000, 7000, 7000, 7000]
min_max_volt = dict(volt_labels=volt_labels, max_volt=max_volt, min_volt=min_volt)
min_max_volt_source = ColumnDataSource(min_max_volt)
min_max_volt_column = [
TableColumn(field="volt_labels", title="Voltage"),
TableColumn(field="max_volt", title="Max"),
TableColumn(field="min_volt", title="Min")
]
volt_data_table = DataTable(source=min_max_volt_source, columns=min_max_volt_column, index_position=None,
width=400, height=200, background=bg_color, css_classes=['custom_table'])
# memory line plot
mem_plot = figure(plot_width=800, plot_height=300, title='Total Free Memory in kB')
mem_ds = (mem_plot.line(x, mem_data["MemFree"], line_width=2,
color=color_list[0], legend_label="MemFree")).data_source
mem_plot.legend.click_policy = "hide"
# memory bar plot
mem_bar_label = ['MemUsed', 'SwapUsed', 'CMAUsed']
mem_bar_total = [0, 0, 0]
mem_bar_used = [0, 0, 0]
mem_bar_available = [0, 0, 0]
mem_bar_percent = [0.0, 0.0, 0.0]
mem_bar_dict = dict(mem_bar_label=mem_bar_label, mem_bar_total=mem_bar_total,
mem_bar_used=mem_bar_used, mem_bar_percent=mem_bar_percent,
mem_bar_available=mem_bar_available)
mem_bar_source = ColumnDataSource(mem_bar_dict)
mem_plot_hbar = figure(y_range=mem_bar_label, x_range=[0, 100], plot_width=800, plot_height=300,
title='Memory Usage in %')
mem_plot_hbar.xaxis.axis_label = '%Used'
mem_percent_ds = (mem_plot_hbar.hbar(y='mem_bar_label', right='mem_bar_percent',
tags=mem_bar_label, source=mem_bar_source,
height=.5, fill_color='steelblue',
hatch_pattern='vertical_line', hatch_weight=2, line_width=0)).data_source
hover = HoverTool(tooltips=[("Total in kB:", "@mem_bar_total"), ("Used in kB:", "@mem_bar_used")])
mem_plot_hbar.add_tools(hover)
# reset button
reset_button = Button(label="Reset Min/Max and Averages", width=200, button_type='primary')
reset_button.on_click(clear_min_max)
# sample interval
def update_interval(attr, old, new):
global interval
interval = max(float(new), 0.5)
global input_interval
input_interval.value = str(interval)
global callback
curdoc().remove_periodic_callback(callback)
callback = curdoc().add_periodic_callback(update, interval * 1000)
input_interval = TextInput(value=str(interval), title="input interval in seconds (minimal 0.5s):",
css_classes=['custom_textinput'], width=100)
input_interval.on_change('value', update_interval)
# sample size
def update_sample_size(attr, old, new):
global sample_size, sample_size_actual
new_sample_size = int(new)
if new_sample_size < sample_size_actual:
excess = sample_size_actual - new_sample_size
while excess > 0:
x.popleft();
for j in range(len(cpu_labels)):
cpu_data[cpu_labels[j]].popleft()
for j in range(len(volt_labels)):
volt_data[volt_labels[j]].popleft()
for j in range(len(temp_labels)):
temp_data[temp_labels[j]].popleft()
for j in range(len(mem_labels)):
mem_data[mem_labels[j]].popleft()
excess = excess - 1
sample_size_actual = new_sample_size
sample_size = new_sample_size
input_sample_size = TextInput(value=str(sample_size), title="Sample Size:",
css_classes=['custom_textinput'], width=100)
input_sample_size.on_change('value', update_sample_size)
time = 0
# default_data_range = cpu_plot.y_range
cpu_plot.y_range = Range1d(0, 100)
mem_plot.y_range = Range1d(0, get_mem("MemTotal"))
power_plot.y_range = Range1d(0, 6)
current_plot.y_range = Range1d(0, 1000)
temp_plot.y_range = Range1d(0, 100)
# # dynamic scaling:
# def update_scaling(attr, old, new):
# if new == [0]:
# cpu_plot.y_range = default_data_range
# cpu_plot.title.text = "name 1"
# else:
# cpu_plot.y_range = Range1d(0, 50)
# cpu_plot.title.text = "name 2"
#
# checkbox_labels = ["Enable Dynamic Y-axis Scaling"]
# checkbox_group = CheckboxGroup(labels=checkbox_labels, active=[], css_classes=['custom_textinput'],)
# checkbox_group.on_change('active', update_scaling)
@linear()
def update(step):
global time
global sample_size_actual
time = time + interval
if sample_size_actual >= sample_size:
x.popleft()
x.append(time)
read = psutil.cpu_percent(percpu=True)
average_cpu_x = 0
for j in range(len(cpu_labels)):
if sample_size_actual >= sample_size:
cpu_data[cpu_labels[j]].popleft()
cpu_data_read = read[j]
cpu_data[cpu_labels[j]].append(cpu_data_read)
cpu_ds[j].trigger('data', x, cpu_data[cpu_labels[j]])
average_cpu_x = average_cpu_x + cpu_data_read
# average CPU usage
global average_cpu_sample_size, average_cpu
average_cpu = average_cpu * average_cpu_sample_size
average_cpu_sample_size = average_cpu_sample_size + 1
average_cpu = (average_cpu + (average_cpu_x / 4)) / average_cpu_sample_size
text = """<h2 style="color :""" + text_color + """;">""" + \
" Average CPU utilization over last " + str(average_cpu_sample_size) + \
" samples is " + str(round(average_cpu, 2)) + """%</h2>"""
average_cpu_display.text = text
# CPU frequency
cpu_freq = []
for j in range(4):
cpu_freq.append(open('/sys/devices/system/cpu/cpu' + str(j) + '/cpufreq/cpuinfo_cur_freq', 'r').read())
cpu_freq_display.text = cpu_freq_text + """<p style="color :""" + text_color + """;"> CPU0:""" + cpu_freq[0] + \
"MHz<br> CPU1:" + cpu_freq[1] + \
"MHz<br> CPU2:" + cpu_freq[2] + \
"MHz<br> CPU3:" + cpu_freq[3] + "MHz"
volts = []
for j in range(len(volt_labels) - 1):
volts.append(open('/sys/class/hwmon/hwmon0/in' + str(j + 1) + '_input', 'r').read())
volts = [j.replace('\n', '') for j in volts]
volts.append(int((open('/sys/class/hwmon/hwmon1/in1_input', 'r').read()).replace('\n', '')))
for j in range(len(volt_labels)):
if sample_size_actual >= sample_size:
volt_data[volt_labels[j]].popleft()
volt_read = int(volts[j])
volt_data[volt_labels[j]].append(volt_read)
if (volt_read < min_volt[j]) or (volt_read > max_volt[j]):
min_volt[j] = min(min_volt[j], int(volts[j]))
max_volt[j] = max(max_volt[j], int(volts[j]))
volt_data_table.source.trigger('data', volt_data_table.source, volt_data_table.source)
temperatures = []
for j in range(len(temp_labels)):
temperatures.append(open('/sys/class/hwmon/hwmon0/temp' + str(j + 1) + '_input', 'r').read())
temperatures = [j.replace('\n', '') for j in temperatures]
for j in range(len(temp_labels)):
if sample_size_actual >= sample_size:
temp_data[temp_labels[j]].popleft()
temperature_read = (float(temperatures[j])) / 1000
temp_data[temp_labels[j]].append(temperature_read)
if (temperature_read < min_temp[j]) or (temperature_read > max_temp[j]):
min_temp[j] = min(min_temp[j], temperature_read)
max_temp[j] = max(max_temp[j], temperature_read)
temp_data_table.source.trigger('data', temp_data_table.source, temp_data_table.source)
temp_ds[0].trigger('data', x, temp_data[temp_labels[0]])
ina260_current = (open('/sys/class/hwmon/hwmon1/curr1_input', 'r').read()).replace('\n', '')
if sample_size_actual >= sample_size:
current_data.popleft()
current_data.append(int(ina260_current))
current_ds.trigger('data', x, current_data)
ina260_power = int((open('/sys/class/hwmon/hwmon1/power1_input', 'r').read()).replace('\n', '')) / 1000000
if sample_size_actual >= sample_size:
power_data.popleft()
power_data.append(ina260_power)
power_ds.trigger('data', x, power_data)
# Mem line chart
mem_num = get_mem("MemFree")
if sample_size_actual >= sample_size:
mem_data["MemFree"].popleft()
mem_data["MemFree"].append(mem_num)
mem_ds.trigger('data', x, mem_data["MemFree"])
# Memory usage Horizontal bar chart
mem_bar_total[0] = get_mem('MemTotal')
mem_bar_available[0] = get_mem('MemAvailable')
mem_bar_used[0] = mem_bar_total[0] - mem_bar_available[0]
mem_bar_percent[0] = 100 * mem_bar_used[0] / max(mem_bar_total[0], 1)
mem_bar_total[1] = get_mem('SwapTotal')
mem_bar_available[1] = get_mem('SwapFree')
mem_bar_used[1] = mem_bar_total[1] - mem_bar_available[1]
mem_bar_percent[1] = 100 * mem_bar_used[1] / max(mem_bar_total[1], 1)
mem_bar_total[2] = get_mem('CmaTotal')
mem_bar_available[2] = get_mem('CmaFree')
mem_bar_used[2] = mem_bar_total[2] - mem_bar_available[2]
mem_bar_percent[2] = 100 * mem_bar_used[2] / max(mem_bar_total[2], 1)
mem_percent_ds.trigger('data', mem_bar_label, mem_bar_percent)
if sample_size_actual < sample_size:
sample_size_actual = sample_size_actual + 1
# margin: Margin-Top, Margin-Right, Margin-Bottom and Margin-Left
user_interface = column(reset_button, input_sample_size, input_interval, #checkbox_group,
background=bg_color,
margin=(50, 50, 50, 100))
cpu_freq_block = column(cpu_freq_display,
background=bg_color,
margin=(0, 0, 0, 100))
layout1 = layout(column(row(title1, align='center'),
average_cpu_display,
row(cpu_plot, user_interface, cpu_freq_block, background=bg_color),
row(mem_plot, mem_plot_hbar, background=bg_color),
row(power_plot, current_plot, temp_plot, background=bg_color),
row(volt_data_table, temp_data_table, background=bg_color),
background=bg_color))
# Add a periodic callback to be run every 1000 milliseconds
callback = curdoc().add_periodic_callback(update, interval * 1000)
##################################################
##### Application Cockpit Tab ####################
##################################################
title2 = Div(
text="""<h1 style="color :""" + text_color + """; text-align :center">Kria™ SOM: Application Cockpit</h1>""",
width=500)
def xmutil_unloadapp():
if current_command:
terminate_app()
subprocess.run(["sudo", "xmutil", "unloadapp"])
draw_apps()
#draw_app_run_buttons()
layout2.children[4] = column(load_buttons, margin=(0, 0, 0, 50))
layout2.children[1] = active_app_print
#layout2.children[2] = row(run_buttons)
unload_button = Button(label="Unloadapp", width=600, button_type='primary')
unload_button.on_click(xmutil_unloadapp)
# Apps!!!!!###########################################################################################################
def xmutil_loadapp(app_name):
if current_command:
print("\nError: unexpected command:", current_command, "\n")
command = str('sudo xmutil loadapp ' + app_name)
subprocess.run(command, shell=True, capture_output=True)
draw_apps()
#draw_app_run_buttons()
layout2.children[4] = column(load_buttons, margin=(0, 0, 0, 50))
layout2.children[1] = active_app_print
#layout2.children[2] = row(run_buttons)
# list out applications - currently listpackage doesnt return stdout correctly, temporarily use a fixed string for dev
# listapp_output = subprocess.run(['sudo dfx-mgr-client -listPackage | grep kv260'], shell=True, stdout=subprocess.PIPE)
# print("list app output", listapp_output.stdout)
load_buttons = []
active_app_print = Div(
text="""<h2 style="color :""" + text_color + """; text-align :center">Active Accelerator: None</h2>""",
width=600)
active_app = "None"
def draw_apps():
global load_buttons
global active_app_print
global active_app
active_app = "None"
listapp_output = subprocess.run(['sudo dfx-mgr-client -listPackage'], shell=True,
stdout=subprocess.PIPE).stdout.decode("utf-8")
print("\n", listapp_output, "\n")
listapp = listapp_output.split("\n")
apps = []
load_buttons = []
for i in range(len(listapp) - 1):
x = listapp[i].split()
print("\n x is ", x, " i is ", i, "\n")
if x and x[0] != "Accelerator":
apps.append(x[0])
if x[4] != "-1":
active_app = x[0]
active_app_print = Div(
text="""<h2 style="color :""" + text_color + """; text-align :center">Active Accelerator: """ + active_app + """</h2>""",
width=600)
for i in range(len(apps)):
load_buttons.append(Button(label=apps[i], width=300, button_type='primary'))
if active_app != "None":
if apps[i] == active_app:
load_buttons[i].button_type = 'success'
load_buttons[i].js_on_click(
CustomJS(code='alert("This Accelerator is already loaded, Unloadapp first!");'))
else:
load_buttons[i].button_type = 'default'
load_buttons[i].js_on_click(CustomJS(code='alert("Unloadapp First!");'))
else:
load_buttons[i].on_click(partial(xmutil_loadapp, app_name=apps[i]))
app_print = Div(
text="""<h2 style="color :""" + text_color + """; text-align :left">Available Accelerated Applications on
target to load</h2><h4 style="color :""" + text_color + """; text-align :left">  Blue - click
to load, Green - Loaded Accelerator, White - available to load after unloading</h4>""", width=1600)
draw_apps()
current_command = None
def terminate_app():
global current_command
current_command.terminate()
current_command = None
def run_app(run_command):
global current_command
if current_command:
terminate_app()
print("run_command:", run_command, "\n\n")
current_command = subprocess.Popen(run_command, shell=True)
print("\n\ncurrent command: ", current_command, "\n\n")
# run_buttons = []
#
#
# def draw_app_run_buttons():
# global run_buttons
# global active_app
# run_buttons = []
# if active_app == "None":
# return
# less_cmd = 'less som_dashboard/commands/' + active_app + '_cmds.txt'
# print(less_cmd)
# less_return = subprocess.run(less_cmd, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
# run_commands_txt = less_return.stdout.decode("utf-8")
# if "No such file" in run_commands_txt:
# return
# run_commands = run_commands_txt.split('\n')
# for commands in run_commands:
# x = commands.split(',')
# button = Button(label=x[0], width=300, button_type='primary')
# button.on_click(partial(run_app, run_command=x[1]))
# run_buttons.append(button)
#
#
# draw_app_run_buttons()
# packages!!###########################################################################################################
package_print = Div(
text="""<h2 style="color :""" + text_color + """; text-align :center">Available Accelerated Application
Packages, click button below to download and DNF install the chosen package</h2>""", width=1600)
def dnf_install(app_name):
command = str('sudo dnf install ' + app_name + " -y")
print("execute command: ", command)
subprocess.call(command, shell=True)
print("finished command: ", command)
draw_pkgs()
layout2.children[6] = column(pkgs_buttons, margin=(0, 0, 0, 50))
draw_apps()
layout2.children[4] = column(load_buttons, margin=(0, 0, 0, 50))
pkgs_buttons = []
def draw_pkgs():
global pkgs_buttons
# subprocess.run(['sudo dnf update'], shell=True)
# subprocess.run(['sudo dnf clean all'], shell=True)
getpkgs_output = subprocess.run(['sudo xmutil getpkgs | grep packagegroup-kv260'], shell=True,
stdout=subprocess.PIPE).stdout.decode("utf-8")
print("getpkgs_output", getpkgs_output)
list_pkgs = getpkgs_output.split("\n")
pkgs_buttons = []
for i in range(len(list_pkgs) - 1):
x = list_pkgs[i].split()
pkgs_buttons.append(Button(label=x[0], width=300, button_type='primary'))
pkgs_buttons[i].on_click(partial(dnf_install, app_name=x[0]))
draw_pkgs()
app_print2 = Div(
text="""<h3 style="color :""" + text_color + """; text-align :center">To execute application, use command
line or start Jupyter lab and use Jupyter notebooks. </h3>""", width=1600)
layout2 = layout([
row(title2, align='center'), # 0
[active_app_print], # 1
# row(run_buttons), # 2
column(unload_button, margin=(0, 0, 0, 50)), # 2
[app_print], # 3
column(load_buttons, margin=(0, 0, 0, 50)), # 4
[package_print], # 5
column(pkgs_buttons, margin=(0, 0, 0, 50)), # 6
row(app_print2, margin=(100, 0, 400, 0))
])
layout2.background = bg_color
##################################################
##### Group Tabs ##########################
##################################################
curdoc().theme = 'dark_minimal'
tab1 = Panel(child=layout1, title="Platform Statistic Dashboard")
tab2 = Panel(child=layout2, title="Application Cockpit")
tabs = Tabs(tabs=[tab1, tab2])
curdoc().add_root(tabs)
| 37.718553
| 129
| 0.640502
|
4530bfcf9f3e2c1b4f368752d7a94f8b54246fbc
| 96
|
py
|
Python
|
server/api_modules/apps.py
|
impatrq/safe
|
e7ffd6a052304aceb4dd582df44032a29564f555
|
[
"MIT"
] | 1
|
2021-07-13T23:01:56.000Z
|
2021-07-13T23:01:56.000Z
|
server/api_modules/apps.py
|
impatrq/safe
|
e7ffd6a052304aceb4dd582df44032a29564f555
|
[
"MIT"
] | null | null | null |
server/api_modules/apps.py
|
impatrq/safe
|
e7ffd6a052304aceb4dd582df44032a29564f555
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class ApiModulesConfig(AppConfig):
name = 'api_modules'
| 16
| 34
| 0.770833
|
f5547fb16dc023570f333aabfc5d0a60fae75a6c
| 1,231
|
py
|
Python
|
release/stubs.min/System/Windows/Controls/__init___parts/InkCanvasSelectionHitResult.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/System/Windows/Controls/__init___parts/InkCanvasSelectionHitResult.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/System/Windows/Controls/__init___parts/InkCanvasSelectionHitResult.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class InkCanvasSelectionHitResult(Enum,IComparable,IFormattable,IConvertible):
"""
Identifies the various parts of a selection adorner on an System.Windows.Controls.InkCanvas.
enum InkCanvasSelectionHitResult,values: Bottom (6),BottomLeft (7),BottomRight (5),Left (8),None (0),Right (4),Selection (9),Top (2),TopLeft (1),TopRight (3)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Bottom=None
BottomLeft=None
BottomRight=None
Left=None
None=None
Right=None
Selection=None
Top=None
TopLeft=None
TopRight=None
value__=None
| 27.977273
| 215
| 0.68156
|
33d297f3ca80b9b0e80af23ac92b8bb2a1d79d89
| 87,833
|
py
|
Python
|
sdk/python/feast/feature_store.py
|
aurobindoc/feast
|
72f155882c95f21573b31a613edf066bdb55f630
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/feast/feature_store.py
|
aurobindoc/feast
|
72f155882c95f21573b31a613edf066bdb55f630
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/feast/feature_store.py
|
aurobindoc/feast
|
72f155882c95f21573b31a613edf066bdb55f630
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import itertools
import os
import warnings
from collections import Counter, defaultdict
from datetime import datetime
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import pandas as pd
from colorama import Fore, Style
from google.protobuf.timestamp_pb2 import Timestamp
from tqdm import tqdm
from feast import feature_server, flags, flags_helper, utils
from feast.base_feature_view import BaseFeatureView
from feast.data_source import DataSource
from feast.diff.infra_diff import InfraDiff, diff_infra_protos
from feast.diff.registry_diff import RegistryDiff, apply_diff_to_registry, diff_between
from feast.entity import Entity
from feast.errors import (
EntityNotFoundException,
ExperimentalFeatureNotEnabled,
FeatureNameCollisionError,
FeatureViewNotFoundException,
RequestDataNotFoundInEntityDfException,
RequestDataNotFoundInEntityRowsException,
)
from feast.feast_object import FeastObject
from feast.feature_service import FeatureService
from feast.feature_view import (
DUMMY_ENTITY,
DUMMY_ENTITY_ID,
DUMMY_ENTITY_NAME,
DUMMY_ENTITY_VAL,
FeatureView,
)
from feast.inference import (
update_data_sources_with_inferred_event_timestamp_col,
update_entities_with_inferred_types_from_feature_views,
update_feature_views_with_inferred_features,
)
from feast.infra.infra_object import Infra
from feast.infra.provider import Provider, RetrievalJob, get_provider
from feast.on_demand_feature_view import OnDemandFeatureView
from feast.online_response import OnlineResponse
from feast.protos.feast.core.InfraObject_pb2 import Infra as InfraProto
from feast.protos.feast.serving.ServingService_pb2 import (
FieldStatus,
GetOnlineFeaturesResponse,
)
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import RepeatedValue, Value
from feast.registry import Registry
from feast.repo_config import RepoConfig, load_repo_config
from feast.repo_contents import RepoContents
from feast.request_feature_view import RequestFeatureView
from feast.saved_dataset import SavedDataset, SavedDatasetStorage
from feast.type_map import (
feast_value_type_to_python_type,
python_values_to_proto_values,
)
from feast.usage import log_exceptions, log_exceptions_and_usage, set_usage_attribute
from feast.value_type import ValueType
from feast.version import get_version
warnings.simplefilter("once", DeprecationWarning)
if TYPE_CHECKING:
from feast.embedded_go.online_features_service import EmbeddedOnlineFeatureServer
class FeatureStore:
"""
A FeatureStore object is used to define, create, and retrieve features.
Args:
repo_path (optional): Path to a `feature_store.yaml` used to configure the
feature store.
config (optional): Configuration object used to configure the feature store.
"""
config: RepoConfig
repo_path: Path
_registry: Registry
_provider: Provider
_go_server: Optional["EmbeddedOnlineFeatureServer"]
@log_exceptions
def __init__(
self, repo_path: Optional[str] = None, config: Optional[RepoConfig] = None,
):
"""
Creates a FeatureStore object.
Raises:
ValueError: If both or neither of repo_path and config are specified.
"""
if repo_path is not None and config is not None:
raise ValueError("You cannot specify both repo_path and config.")
if config is not None:
self.repo_path = Path(os.getcwd())
self.config = config
elif repo_path is not None:
self.repo_path = Path(repo_path)
self.config = load_repo_config(Path(repo_path))
else:
raise ValueError("Please specify one of repo_path or config.")
registry_config = self.config.get_registry_config()
self._registry = Registry(registry_config, repo_path=self.repo_path)
self._registry._initialize_registry()
self._provider = get_provider(self.config, self.repo_path)
self._go_server = None
@log_exceptions
def version(self) -> str:
"""Returns the version of the current Feast SDK/CLI."""
return get_version()
@property
def registry(self) -> Registry:
"""Gets the registry of this feature store."""
return self._registry
@property
def project(self) -> str:
"""Gets the project of this feature store."""
return self.config.project
def _get_provider(self) -> Provider:
# TODO: Bake self.repo_path into self.config so that we dont only have one interface to paths
return self._provider
@log_exceptions_and_usage
def refresh_registry(self):
"""Fetches and caches a copy of the feature registry in memory.
Explicitly calling this method allows for direct control of the state of the registry cache. Every time this
method is called the complete registry state will be retrieved from the remote registry store backend
(e.g., GCS, S3), and the cache timer will be reset. If refresh_registry() is run before get_online_features()
is called, then get_online_features() will use the cached registry instead of retrieving (and caching) the
registry itself.
Additionally, the TTL for the registry cache can be set to infinity (by setting it to 0), which means that
refresh_registry() will become the only way to update the cached registry. If the TTL is set to a value
greater than 0, then once the cache becomes stale (more time than the TTL has passed), a new cache will be
downloaded synchronously, which may increase latencies if the triggering method is get_online_features().
"""
registry_config = self.config.get_registry_config()
registry = Registry(registry_config, repo_path=self.repo_path)
registry.refresh()
self._registry = registry
@log_exceptions_and_usage
def list_entities(self, allow_cache: bool = False) -> List[Entity]:
"""
Retrieves the list of entities from the registry.
Args:
allow_cache: Whether to allow returning entities from a cached registry.
Returns:
A list of entities.
"""
return self._list_entities(allow_cache)
def _list_entities(
self, allow_cache: bool = False, hide_dummy_entity: bool = True
) -> List[Entity]:
all_entities = self._registry.list_entities(
self.project, allow_cache=allow_cache
)
return [
entity
for entity in all_entities
if entity.name != DUMMY_ENTITY_NAME or not hide_dummy_entity
]
@log_exceptions_and_usage
def list_feature_services(self) -> List[FeatureService]:
"""
Retrieves the list of feature services from the registry.
Returns:
A list of feature services.
"""
return self._registry.list_feature_services(self.project)
@log_exceptions_and_usage
def list_feature_views(self, allow_cache: bool = False) -> List[FeatureView]:
"""
Retrieves the list of feature views from the registry.
Args:
allow_cache: Whether to allow returning entities from a cached registry.
Returns:
A list of feature views.
"""
return self._list_feature_views(allow_cache)
@log_exceptions_and_usage
def list_request_feature_views(
self, allow_cache: bool = False
) -> List[RequestFeatureView]:
"""
Retrieves the list of feature views from the registry.
Args:
allow_cache: Whether to allow returning entities from a cached registry.
Returns:
A list of feature views.
"""
return self._registry.list_request_feature_views(
self.project, allow_cache=allow_cache
)
def _list_feature_views(
self, allow_cache: bool = False, hide_dummy_entity: bool = True,
) -> List[FeatureView]:
feature_views = []
for fv in self._registry.list_feature_views(
self.project, allow_cache=allow_cache
):
if hide_dummy_entity and fv.entities[0] == DUMMY_ENTITY_NAME:
fv.entities = []
feature_views.append(fv)
return feature_views
@log_exceptions_and_usage
def list_on_demand_feature_views(
self, allow_cache: bool = False
) -> List[OnDemandFeatureView]:
"""
Retrieves the list of on demand feature views from the registry.
Returns:
A list of on demand feature views.
"""
return self._registry.list_on_demand_feature_views(
self.project, allow_cache=allow_cache
)
@log_exceptions_and_usage
def list_data_sources(self, allow_cache: bool = False) -> List[DataSource]:
"""
Retrieves the list of data sources from the registry.
Args:
allow_cache: Whether to allow returning data sources from a cached registry.
Returns:
A list of data sources.
"""
return self._registry.list_data_sources(self.project, allow_cache=allow_cache)
@log_exceptions_and_usage
def get_entity(self, name: str, allow_registry_cache: bool = False) -> Entity:
"""
Retrieves an entity.
Args:
name: Name of entity.
allow_registry_cache: (Optional) Whether to allow returning this entity from a cached registry
Returns:
The specified entity.
Raises:
EntityNotFoundException: The entity could not be found.
"""
return self._registry.get_entity(
name, self.project, allow_cache=allow_registry_cache
)
@log_exceptions_and_usage
def get_feature_service(
self, name: str, allow_cache: bool = False
) -> FeatureService:
"""
Retrieves a feature service.
Args:
name: Name of feature service.
allow_cache: Whether to allow returning feature services from a cached registry.
Returns:
The specified feature service.
Raises:
FeatureServiceNotFoundException: The feature service could not be found.
"""
return self._registry.get_feature_service(name, self.project, allow_cache)
@log_exceptions_and_usage
def get_feature_view(
self, name: str, allow_registry_cache: bool = False
) -> FeatureView:
"""
Retrieves a feature view.
Args:
name: Name of feature view.
allow_registry_cache: (Optional) Whether to allow returning this entity from a cached registry
Returns:
The specified feature view.
Raises:
FeatureViewNotFoundException: The feature view could not be found.
"""
return self._get_feature_view(name, allow_registry_cache=allow_registry_cache)
def _get_feature_view(
self,
name: str,
hide_dummy_entity: bool = True,
allow_registry_cache: bool = False,
) -> FeatureView:
feature_view = self._registry.get_feature_view(
name, self.project, allow_cache=allow_registry_cache
)
if hide_dummy_entity and feature_view.entities[0] == DUMMY_ENTITY_NAME:
feature_view.entities = []
return feature_view
@log_exceptions_and_usage
def get_on_demand_feature_view(self, name: str) -> OnDemandFeatureView:
"""
Retrieves a feature view.
Args:
name: Name of feature view.
Returns:
The specified feature view.
Raises:
FeatureViewNotFoundException: The feature view could not be found.
"""
return self._registry.get_on_demand_feature_view(name, self.project)
@log_exceptions_and_usage
def get_data_source(self, name: str) -> DataSource:
"""
Retrieves the list of data sources from the registry.
Args:
name: Name of the data source.
Returns:
The specified data source.
Raises:
DataSourceObjectNotFoundException: The data source could not be found.
"""
return self._registry.get_data_source(name, self.project)
@log_exceptions_and_usage
def delete_feature_view(self, name: str):
"""
Deletes a feature view.
Args:
name: Name of feature view.
Raises:
FeatureViewNotFoundException: The feature view could not be found.
"""
return self._registry.delete_feature_view(name, self.project)
@log_exceptions_and_usage
def delete_feature_service(self, name: str):
"""
Deletes a feature service.
Args:
name: Name of feature service.
Raises:
FeatureServiceNotFoundException: The feature view could not be found.
"""
return self._registry.delete_feature_service(name, self.project)
def _get_features(
self, features: Union[List[str], FeatureService], allow_cache: bool = False,
) -> List[str]:
_features = features
if not _features:
raise ValueError("No features specified for retrieval")
_feature_refs = []
if isinstance(_features, FeatureService):
feature_service_from_registry = self.get_feature_service(
_features.name, allow_cache
)
if feature_service_from_registry != _features:
warnings.warn(
"The FeatureService object that has been passed in as an argument is"
"inconsistent with the version from Registry. Potentially a newer version"
"of the FeatureService has been applied to the registry."
)
for projection in feature_service_from_registry.feature_view_projections:
_feature_refs.extend(
[
f"{projection.name_to_use()}:{f.name}"
for f in projection.features
]
)
else:
assert isinstance(_features, list)
_feature_refs = _features
return _feature_refs
def _should_use_plan(self):
"""Returns True if _plan and _apply_diffs should be used, False otherwise."""
# Currently only the local provider with sqlite online store supports _plan and _apply_diffs.
return self.config.provider == "local" and (
self.config.online_store and self.config.online_store.type == "sqlite"
)
def _validate_all_feature_views(
self,
views_to_update: List[FeatureView],
odfvs_to_update: List[OnDemandFeatureView],
request_views_to_update: List[RequestFeatureView],
):
"""Validates all feature views."""
if (
not flags_helper.enable_on_demand_feature_views(self.config)
and len(odfvs_to_update) > 0
):
raise ExperimentalFeatureNotEnabled(flags.FLAG_ON_DEMAND_TRANSFORM_NAME)
set_usage_attribute("odfv", bool(odfvs_to_update))
_validate_feature_views(
[*views_to_update, *odfvs_to_update, *request_views_to_update]
)
def _make_inferences(
self,
data_sources_to_update: List[DataSource],
entities_to_update: List[Entity],
views_to_update: List[FeatureView],
odfvs_to_update: List[OnDemandFeatureView],
):
"""Makes inferences for entities, feature views, and odfvs."""
update_entities_with_inferred_types_from_feature_views(
entities_to_update, views_to_update, self.config
)
update_data_sources_with_inferred_event_timestamp_col(
data_sources_to_update, self.config
)
update_data_sources_with_inferred_event_timestamp_col(
[view.batch_source for view in views_to_update], self.config
)
# New feature views may reference previously applied entities.
entities = self._list_entities()
update_feature_views_with_inferred_features(
views_to_update, entities + entities_to_update, self.config
)
for odfv in odfvs_to_update:
odfv.infer_features()
@log_exceptions_and_usage
def _plan(
self, desired_repo_contents: RepoContents
) -> Tuple[RegistryDiff, InfraDiff, Infra]:
"""Dry-run registering objects to metadata store.
The plan method dry-runs registering one or more definitions (e.g., Entity, FeatureView), and produces
a list of all the changes the that would be introduced in the feature repo. The changes computed by the plan
command are for informational purposes, and are not actually applied to the registry.
Args:
desired_repo_contents: The desired repo state.
Raises:
ValueError: The 'objects' parameter could not be parsed properly.
Examples:
Generate a plan adding an Entity and a FeatureView.
>>> from feast import FeatureStore, Entity, FeatureView, Feature, ValueType, FileSource, RepoConfig
>>> from feast.feature_store import RepoContents
>>> from datetime import timedelta
>>> fs = FeatureStore(repo_path="feature_repo")
>>> driver = Entity(name="driver_id", value_type=ValueType.INT64, description="driver id")
>>> driver_hourly_stats = FileSource(
... path="feature_repo/data/driver_stats.parquet",
... timestamp_field="event_timestamp",
... created_timestamp_column="created",
... )
>>> driver_hourly_stats_view = FeatureView(
... name="driver_hourly_stats",
... entities=["driver_id"],
... ttl=timedelta(seconds=86400 * 1),
... batch_source=driver_hourly_stats,
... )
>>> registry_diff, infra_diff, new_infra = fs._plan(RepoContents(
... data_sources={driver_hourly_stats},
... feature_views={driver_hourly_stats_view},
... on_demand_feature_views=set(),
... request_feature_views=set(),
... entities={driver},
... feature_services=set())) # register entity and feature view
"""
# Validate and run inference on all the objects to be registered.
self._validate_all_feature_views(
list(desired_repo_contents.feature_views),
list(desired_repo_contents.on_demand_feature_views),
list(desired_repo_contents.request_feature_views),
)
_validate_data_sources(list(desired_repo_contents.data_sources))
self._make_inferences(
list(desired_repo_contents.data_sources),
list(desired_repo_contents.entities),
list(desired_repo_contents.feature_views),
list(desired_repo_contents.on_demand_feature_views),
)
# Compute the desired difference between the current objects in the registry and
# the desired repo state.
registry_diff = diff_between(
self._registry, self.project, desired_repo_contents
)
# Compute the desired difference between the current infra, as stored in the registry,
# and the desired infra.
self._registry.refresh()
current_infra_proto = (
self._registry.cached_registry_proto.infra.__deepcopy__()
if self._registry.cached_registry_proto
else InfraProto()
)
desired_registry_proto = desired_repo_contents.to_registry_proto()
new_infra = self._provider.plan_infra(self.config, desired_registry_proto)
new_infra_proto = new_infra.to_proto()
infra_diff = diff_infra_protos(current_infra_proto, new_infra_proto)
return registry_diff, infra_diff, new_infra
@log_exceptions_and_usage
def _apply_diffs(
self, registry_diff: RegistryDiff, infra_diff: InfraDiff, new_infra: Infra
):
"""Applies the given diffs to the metadata store and infrastructure.
Args:
registry_diff: The diff between the current registry and the desired registry.
infra_diff: The diff between the current infra and the desired infra.
new_infra: The desired infra.
"""
infra_diff.update()
apply_diff_to_registry(
self._registry, registry_diff, self.project, commit=False
)
self._registry.update_infra(new_infra, self.project, commit=True)
@log_exceptions_and_usage
def apply(
self,
objects: Union[
DataSource,
Entity,
FeatureView,
OnDemandFeatureView,
RequestFeatureView,
FeatureService,
List[FeastObject],
],
objects_to_delete: Optional[List[FeastObject]] = None,
partial: bool = True,
):
"""Register objects to metadata store and update related infrastructure.
The apply method registers one or more definitions (e.g., Entity, FeatureView) and registers or updates these
objects in the Feast registry. Once the apply method has updated the infrastructure (e.g., create tables in
an online store), it will commit the updated registry. All operations are idempotent, meaning they can safely
be rerun.
Args:
objects: A single object, or a list of objects that should be registered with the Feature Store.
objects_to_delete: A list of objects to be deleted from the registry and removed from the
provider's infrastructure. This deletion will only be performed if partial is set to False.
partial: If True, apply will only handle the specified objects; if False, apply will also delete
all the objects in objects_to_delete, and tear down any associated cloud resources.
Raises:
ValueError: The 'objects' parameter could not be parsed properly.
Examples:
Register an Entity and a FeatureView.
>>> from feast import FeatureStore, Entity, FeatureView, Feature, ValueType, FileSource, RepoConfig
>>> from datetime import timedelta
>>> fs = FeatureStore(repo_path="feature_repo")
>>> driver = Entity(name="driver_id", value_type=ValueType.INT64, description="driver id")
>>> driver_hourly_stats = FileSource(
... path="feature_repo/data/driver_stats.parquet",
... timestamp_field="event_timestamp",
... created_timestamp_column="created",
... )
>>> driver_hourly_stats_view = FeatureView(
... name="driver_hourly_stats",
... entities=["driver_id"],
... ttl=timedelta(seconds=86400 * 1),
... batch_source=driver_hourly_stats,
... )
>>> fs.apply([driver_hourly_stats_view, driver]) # register entity and feature view
"""
# TODO: Add locking
if not isinstance(objects, Iterable):
objects = [objects]
assert isinstance(objects, list)
if not objects_to_delete:
objects_to_delete = []
# Separate all objects into entities, feature services, and different feature view types.
entities_to_update = [ob for ob in objects if isinstance(ob, Entity)]
views_to_update = [ob for ob in objects if isinstance(ob, FeatureView)]
request_views_to_update = [
ob for ob in objects if isinstance(ob, RequestFeatureView)
]
odfvs_to_update = [ob for ob in objects if isinstance(ob, OnDemandFeatureView)]
services_to_update = [ob for ob in objects if isinstance(ob, FeatureService)]
data_sources_set_to_update = {
ob for ob in objects if isinstance(ob, DataSource)
}
for fv in views_to_update:
data_sources_set_to_update.add(fv.batch_source)
if fv.stream_source:
data_sources_set_to_update.add(fv.stream_source)
if request_views_to_update:
warnings.warn(
"Request feature view is deprecated. "
"Please use request data source instead",
DeprecationWarning,
)
for rfv in request_views_to_update:
data_sources_set_to_update.add(rfv.request_data_source)
for odfv in odfvs_to_update:
for v in odfv.source_request_sources.values():
data_sources_set_to_update.add(v)
data_sources_to_update = list(data_sources_set_to_update)
# Validate all feature views and make inferences.
self._validate_all_feature_views(
views_to_update, odfvs_to_update, request_views_to_update
)
self._make_inferences(
data_sources_to_update, entities_to_update, views_to_update, odfvs_to_update
)
# Handle all entityless feature views by using DUMMY_ENTITY as a placeholder entity.
entities_to_update.append(DUMMY_ENTITY)
# Add all objects to the registry and update the provider's infrastructure.
for ds in data_sources_to_update:
self._registry.apply_data_source(ds, project=self.project, commit=False)
for view in itertools.chain(
views_to_update, odfvs_to_update, request_views_to_update
):
self._registry.apply_feature_view(view, project=self.project, commit=False)
for ent in entities_to_update:
self._registry.apply_entity(ent, project=self.project, commit=False)
for feature_service in services_to_update:
self._registry.apply_feature_service(
feature_service, project=self.project, commit=False
)
if not partial:
# Delete all registry objects that should not exist.
entities_to_delete = [
ob for ob in objects_to_delete if isinstance(ob, Entity)
]
views_to_delete = [
ob for ob in objects_to_delete if isinstance(ob, FeatureView)
]
request_views_to_delete = [
ob for ob in objects_to_delete if isinstance(ob, RequestFeatureView)
]
odfvs_to_delete = [
ob for ob in objects_to_delete if isinstance(ob, OnDemandFeatureView)
]
services_to_delete = [
ob for ob in objects_to_delete if isinstance(ob, FeatureService)
]
data_sources_to_delete = [
ob for ob in objects_to_delete if isinstance(ob, DataSource)
]
for data_source in data_sources_to_delete:
self._registry.delete_data_source(
data_source.name, project=self.project, commit=False
)
for entity in entities_to_delete:
self._registry.delete_entity(
entity.name, project=self.project, commit=False
)
for view in views_to_delete:
self._registry.delete_feature_view(
view.name, project=self.project, commit=False
)
for request_view in request_views_to_delete:
self._registry.delete_feature_view(
request_view.name, project=self.project, commit=False
)
for odfv in odfvs_to_delete:
self._registry.delete_feature_view(
odfv.name, project=self.project, commit=False
)
for service in services_to_delete:
self._registry.delete_feature_service(
service.name, project=self.project, commit=False
)
self._get_provider().update_infra(
project=self.project,
tables_to_delete=views_to_delete if not partial else [],
tables_to_keep=views_to_update,
entities_to_delete=entities_to_delete if not partial else [],
entities_to_keep=entities_to_update,
partial=partial,
)
self._registry.commit()
# go server needs to be reloaded to apply new configuration.
# we're stopping it here
# new server will be instantiated on the next online request
self._teardown_go_server()
@log_exceptions_and_usage
def teardown(self):
"""Tears down all local and cloud resources for the feature store."""
tables: List[FeatureView] = []
feature_views = self.list_feature_views()
tables.extend(feature_views)
entities = self.list_entities()
self._get_provider().teardown_infra(self.project, tables, entities)
self._registry.teardown()
self._teardown_go_server()
@log_exceptions_and_usage
def get_historical_features(
self,
entity_df: Union[pd.DataFrame, str],
features: Union[List[str], FeatureService],
full_feature_names: bool = False,
) -> RetrievalJob:
"""Enrich an entity dataframe with historical feature values for either training or batch scoring.
This method joins historical feature data from one or more feature views to an entity dataframe by using a time
travel join.
Each feature view is joined to the entity dataframe using all entities configured for the respective feature
view. All configured entities must be available in the entity dataframe. Therefore, the entity dataframe must
contain all entities found in all feature views, but the individual feature views can have different entities.
Time travel is based on the configured TTL for each feature view. A shorter TTL will limit the
amount of scanning that will be done in order to find feature data for a specific entity key. Setting a short
TTL may result in null values being returned.
Args:
entity_df (Union[pd.DataFrame, str]): An entity dataframe is a collection of rows containing all entity
columns (e.g., customer_id, driver_id) on which features need to be joined, as well as a event_timestamp
column used to ensure point-in-time correctness. Either a Pandas DataFrame can be provided or a string
SQL query. The query must be of a format supported by the configured offline store (e.g., BigQuery)
features: The list of features that should be retrieved from the offline store. These features can be
specified either as a list of string feature references or as a feature service. String feature
references must have format "feature_view:feature", e.g. "customer_fv:daily_transactions".
full_feature_names: If True, feature names will be prefixed with the corresponding feature view name,
changing them from the format "feature" to "feature_view__feature" (e.g. "daily_transactions"
changes to "customer_fv__daily_transactions").
Returns:
RetrievalJob which can be used to materialize the results.
Raises:
ValueError: Both or neither of features and feature_refs are specified.
Examples:
Retrieve historical features from a local offline store.
>>> from feast import FeatureStore, RepoConfig
>>> import pandas as pd
>>> fs = FeatureStore(repo_path="feature_repo")
>>> entity_df = pd.DataFrame.from_dict(
... {
... "driver_id": [1001, 1002],
... "event_timestamp": [
... datetime(2021, 4, 12, 10, 59, 42),
... datetime(2021, 4, 12, 8, 12, 10),
... ],
... }
... )
>>> retrieval_job = fs.get_historical_features(
... entity_df=entity_df,
... features=[
... "driver_hourly_stats:conv_rate",
... "driver_hourly_stats:acc_rate",
... "driver_hourly_stats:avg_daily_trips",
... ],
... )
>>> feature_data = retrieval_job.to_df()
"""
_feature_refs = self._get_features(features)
(
all_feature_views,
all_request_feature_views,
all_on_demand_feature_views,
) = self._get_feature_views_to_use(features)
if all_request_feature_views:
warnings.warn(
"Request feature view is deprecated. "
"Please use request data source instead",
DeprecationWarning,
)
# TODO(achal): _group_feature_refs returns the on demand feature views, but it's no passed into the provider.
# This is a weird interface quirk - we should revisit the `get_historical_features` to
# pass in the on demand feature views as well.
fvs, odfvs, request_fvs, request_fv_refs = _group_feature_refs(
_feature_refs,
all_feature_views,
all_request_feature_views,
all_on_demand_feature_views,
)
feature_views = list(view for view, _ in fvs)
on_demand_feature_views = list(view for view, _ in odfvs)
request_feature_views = list(view for view, _ in request_fvs)
set_usage_attribute("odfv", bool(on_demand_feature_views))
set_usage_attribute("request_fv", bool(request_feature_views))
# Check that the right request data is present in the entity_df
if type(entity_df) == pd.DataFrame:
entity_pd_df = cast(pd.DataFrame, entity_df)
for fv in request_feature_views:
for feature in fv.features:
if feature.name not in entity_pd_df.columns:
raise RequestDataNotFoundInEntityDfException(
feature_name=feature.name, feature_view_name=fv.name
)
for odfv in on_demand_feature_views:
odfv_request_data_schema = odfv.get_request_data_schema()
for feature_name in odfv_request_data_schema.keys():
if feature_name not in entity_pd_df.columns:
raise RequestDataNotFoundInEntityDfException(
feature_name=feature_name, feature_view_name=odfv.name,
)
_validate_feature_refs(_feature_refs, full_feature_names)
# Drop refs that refer to RequestFeatureViews since they don't need to be fetched and
# already exist in the entity_df
_feature_refs = [ref for ref in _feature_refs if ref not in request_fv_refs]
provider = self._get_provider()
job = provider.get_historical_features(
self.config,
feature_views,
_feature_refs,
entity_df,
self._registry,
self.project,
full_feature_names,
)
return job
@log_exceptions_and_usage
def create_saved_dataset(
self,
from_: RetrievalJob,
name: str,
storage: SavedDatasetStorage,
tags: Optional[Dict[str, str]] = None,
feature_service: Optional[FeatureService] = None,
) -> SavedDataset:
"""
Execute provided retrieval job and persist its outcome in given storage.
Storage type (eg, BigQuery or Redshift) must be the same as globally configured offline store.
After data successfully persisted saved dataset object with dataset metadata is committed to the registry.
Name for the saved dataset should be unique within project, since it's possible to overwrite previously stored dataset
with the same name.
Returns:
SavedDataset object with attached RetrievalJob
Raises:
ValueError if given retrieval job doesn't have metadata
"""
warnings.warn(
"Saving dataset is an experimental feature. "
"This API is unstable and it could and most probably will be changed in the future. "
"We do not guarantee that future changes will maintain backward compatibility.",
RuntimeWarning,
)
if not from_.metadata:
raise ValueError(
"RetrievalJob must contains metadata. "
"Use RetrievalJob produced by get_historical_features"
)
dataset = SavedDataset(
name=name,
features=from_.metadata.features,
join_keys=from_.metadata.keys,
full_feature_names=from_.full_feature_names,
storage=storage,
tags=tags,
feature_service_name=feature_service.name if feature_service else None,
)
dataset.min_event_timestamp = from_.metadata.min_event_timestamp
dataset.max_event_timestamp = from_.metadata.max_event_timestamp
from_.persist(storage)
dataset = dataset.with_retrieval_job(
self._get_provider().retrieve_saved_dataset(
config=self.config, dataset=dataset
)
)
self._registry.apply_saved_dataset(dataset, self.project, commit=True)
return dataset
@log_exceptions_and_usage
def get_saved_dataset(self, name: str) -> SavedDataset:
"""
Find a saved dataset in the registry by provided name and
create a retrieval job to pull whole dataset from storage (offline store).
If dataset couldn't be found by provided name SavedDatasetNotFound exception will be raised.
Data will be retrieved from globally configured offline store.
Returns:
SavedDataset with RetrievalJob attached
Raises:
SavedDatasetNotFound
"""
warnings.warn(
"Retrieving datasets is an experimental feature. "
"This API is unstable and it could and most probably will be changed in the future. "
"We do not guarantee that future changes will maintain backward compatibility.",
RuntimeWarning,
)
dataset = self._registry.get_saved_dataset(name, self.project)
provider = self._get_provider()
retrieval_job = provider.retrieve_saved_dataset(
config=self.config, dataset=dataset
)
return dataset.with_retrieval_job(retrieval_job)
@log_exceptions_and_usage
def materialize_incremental(
self, end_date: datetime, feature_views: Optional[List[str]] = None,
) -> None:
"""
Materialize incremental new data from the offline store into the online store.
This method loads incremental new feature data up to the specified end time from either
the specified feature views, or all feature views if none are specified,
into the online store where it is available for online serving. The start time of
the interval materialized is either the most recent end time of a prior materialization or
(now - ttl) if no such prior materialization exists.
Args:
end_date (datetime): End date for time range of data to materialize into the online store
feature_views (List[str]): Optional list of feature view names. If selected, will only run
materialization for the specified feature views.
Raises:
Exception: A feature view being materialized does not have a TTL set.
Examples:
Materialize all features into the online store up to 5 minutes ago.
>>> from feast import FeatureStore, RepoConfig
>>> from datetime import datetime, timedelta
>>> fs = FeatureStore(repo_path="feature_repo")
>>> fs.materialize_incremental(end_date=datetime.utcnow() - timedelta(minutes=5))
Materializing...
<BLANKLINE>
...
"""
feature_views_to_materialize: List[FeatureView] = []
if feature_views is None:
feature_views_to_materialize = self._list_feature_views(
hide_dummy_entity=False
)
feature_views_to_materialize = [
fv for fv in feature_views_to_materialize if fv.online
]
else:
for name in feature_views:
feature_view = self._get_feature_view(name, hide_dummy_entity=False)
if not feature_view.online:
raise ValueError(
f"FeatureView {feature_view.name} is not configured to be served online."
)
feature_views_to_materialize.append(feature_view)
_print_materialization_log(
None,
end_date,
len(feature_views_to_materialize),
self.config.online_store.type,
)
# TODO paging large loads
for feature_view in feature_views_to_materialize:
start_date = feature_view.most_recent_end_time
if start_date is None:
if feature_view.ttl is None:
raise Exception(
f"No start time found for feature view {feature_view.name}. materialize_incremental() requires"
f" either a ttl to be set or for materialize() to have been run at least once."
)
start_date = datetime.utcnow() - feature_view.ttl
provider = self._get_provider()
print(
f"{Style.BRIGHT + Fore.GREEN}{feature_view.name}{Style.RESET_ALL}"
f" from {Style.BRIGHT + Fore.GREEN}{start_date.replace(microsecond=0).astimezone()}{Style.RESET_ALL}"
f" to {Style.BRIGHT + Fore.GREEN}{end_date.replace(microsecond=0).astimezone()}{Style.RESET_ALL}:"
)
def tqdm_builder(length):
return tqdm(total=length, ncols=100)
start_date = utils.make_tzaware(start_date)
end_date = utils.make_tzaware(end_date)
provider.materialize_single_feature_view(
config=self.config,
feature_view=feature_view,
start_date=start_date,
end_date=end_date,
registry=self._registry,
project=self.project,
tqdm_builder=tqdm_builder,
)
self._registry.apply_materialization(
feature_view, self.project, start_date, end_date,
)
@log_exceptions_and_usage
def materialize(
self,
start_date: datetime,
end_date: datetime,
feature_views: Optional[List[str]] = None,
) -> None:
"""
Materialize data from the offline store into the online store.
This method loads feature data in the specified interval from either
the specified feature views, or all feature views if none are specified,
into the online store where it is available for online serving.
Args:
start_date (datetime): Start date for time range of data to materialize into the online store
end_date (datetime): End date for time range of data to materialize into the online store
feature_views (List[str]): Optional list of feature view names. If selected, will only run
materialization for the specified feature views.
Examples:
Materialize all features into the online store over the interval
from 3 hours ago to 10 minutes ago.
>>> from feast import FeatureStore, RepoConfig
>>> from datetime import datetime, timedelta
>>> fs = FeatureStore(repo_path="feature_repo")
>>> fs.materialize(
... start_date=datetime.utcnow() - timedelta(hours=3), end_date=datetime.utcnow() - timedelta(minutes=10)
... )
Materializing...
<BLANKLINE>
...
"""
if utils.make_tzaware(start_date) > utils.make_tzaware(end_date):
raise ValueError(
f"The given start_date {start_date} is greater than the given end_date {end_date}."
)
feature_views_to_materialize: List[FeatureView] = []
if feature_views is None:
feature_views_to_materialize = self._list_feature_views(
hide_dummy_entity=False
)
feature_views_to_materialize = [
fv for fv in feature_views_to_materialize if fv.online
]
else:
for name in feature_views:
feature_view = self._get_feature_view(name, hide_dummy_entity=False)
if not feature_view.online:
raise ValueError(
f"FeatureView {feature_view.name} is not configured to be served online."
)
feature_views_to_materialize.append(feature_view)
_print_materialization_log(
start_date,
end_date,
len(feature_views_to_materialize),
self.config.online_store.type,
)
# TODO paging large loads
for feature_view in feature_views_to_materialize:
provider = self._get_provider()
print(f"{Style.BRIGHT + Fore.GREEN}{feature_view.name}{Style.RESET_ALL}:")
def tqdm_builder(length):
return tqdm(total=length, ncols=100)
start_date = utils.make_tzaware(start_date)
end_date = utils.make_tzaware(end_date)
provider.materialize_single_feature_view(
config=self.config,
feature_view=feature_view,
start_date=start_date,
end_date=end_date,
registry=self._registry,
project=self.project,
tqdm_builder=tqdm_builder,
)
self._registry.apply_materialization(
feature_view, self.project, start_date, end_date,
)
@log_exceptions_and_usage
def push(self, push_source_name: str, df: pd.DataFrame):
"""
Push features to a push source. This updates all the feature views that have the push source as stream source.
Args:
push_source_name: The name of the push source we want to push data to.
df: the data being pushed.
"""
from feast.data_source import PushSource
all_fvs = self.list_feature_views(allow_cache=True)
fvs_with_push_sources = {
fv
for fv in all_fvs
if (
fv.stream_source is not None
and isinstance(fv.stream_source, PushSource)
and fv.stream_source.name == push_source_name
)
}
for fv in fvs_with_push_sources:
self.write_to_online_store(fv.name, df, allow_registry_cache=True)
@log_exceptions_and_usage
def write_to_online_store(
self,
feature_view_name: str,
df: pd.DataFrame,
allow_registry_cache: bool = True,
):
"""
ingests data directly into the Online store
"""
# TODO: restrict this to work with online StreamFeatureViews and validate the FeatureView type
feature_view = self.get_feature_view(
feature_view_name, allow_registry_cache=allow_registry_cache
)
entities = []
for entity_name in feature_view.entities:
entities.append(
self.get_entity(entity_name, allow_registry_cache=allow_registry_cache)
)
provider = self._get_provider()
provider.ingest_df(feature_view, entities, df)
@log_exceptions_and_usage
def get_online_features(
self,
features: Union[List[str], FeatureService],
entity_rows: List[Dict[str, Any]],
full_feature_names: bool = False,
) -> OnlineResponse:
"""
Retrieves the latest online feature data.
Note: This method will download the full feature registry the first time it is run. If you are using a
remote registry like GCS or S3 then that may take a few seconds. The registry remains cached up to a TTL
duration (which can be set to infinity). If the cached registry is stale (more time than the TTL has
passed), then a new registry will be downloaded synchronously by this method. This download may
introduce latency to online feature retrieval. In order to avoid synchronous downloads, please call
refresh_registry() prior to the TTL being reached. Remember it is possible to set the cache TTL to
infinity (cache forever).
Args:
features: The list of features that should be retrieved from the online store. These features can be
specified either as a list of string feature references or as a feature service. String feature
references must have format "feature_view:feature", e.g. "customer_fv:daily_transactions".
entity_rows: A list of dictionaries where each key-value is an entity-name, entity-value pair.
full_feature_names: If True, feature names will be prefixed with the corresponding feature view name,
changing them from the format "feature" to "feature_view__feature" (e.g. "daily_transactions"
changes to "customer_fv__daily_transactions").
Returns:
OnlineResponse containing the feature data in records.
Raises:
Exception: No entity with the specified name exists.
Examples:
Retrieve online features from an online store.
>>> from feast import FeatureStore, RepoConfig
>>> fs = FeatureStore(repo_path="feature_repo")
>>> online_response = fs.get_online_features(
... features=[
... "driver_hourly_stats:conv_rate",
... "driver_hourly_stats:acc_rate",
... "driver_hourly_stats:avg_daily_trips",
... ],
... entity_rows=[{"driver_id": 1001}, {"driver_id": 1002}, {"driver_id": 1003}, {"driver_id": 1004}],
... )
>>> online_response_dict = online_response.to_dict()
"""
columnar: Dict[str, List[Any]] = {k: [] for k in entity_rows[0].keys()}
for entity_row in entity_rows:
for key, value in entity_row.items():
try:
columnar[key].append(value)
except KeyError as e:
raise ValueError("All entity_rows must have the same keys.") from e
return self._get_online_features(
features=features,
entity_values=columnar,
full_feature_names=full_feature_names,
native_entity_values=True,
)
def _get_online_features(
self,
features: Union[List[str], FeatureService],
entity_values: Mapping[
str, Union[Sequence[Any], Sequence[Value], RepeatedValue]
],
full_feature_names: bool = False,
native_entity_values: bool = True,
):
# Extract Sequence from RepeatedValue Protobuf.
entity_value_lists: Dict[str, Union[List[Any], List[Value]]] = {
k: list(v) if isinstance(v, Sequence) else list(v.val)
for k, v in entity_values.items()
}
# If Go feature server is enabled, send request to it instead of going through regular Python logic
if self.config.go_feature_retrieval:
from feast.embedded_go.online_features_service import (
EmbeddedOnlineFeatureServer,
)
# Lazily start the go server on the first request
if self._go_server is None:
self._go_server = EmbeddedOnlineFeatureServer(
str(self.repo_path.absolute()), self.config, self
)
entity_native_values: Dict[str, List[Any]]
if not native_entity_values:
# Convert proto types to native types since Go feature server currently
# only handles native types.
# TODO(felixwang9817): Remove this logic once native types are supported.
entity_native_values = {
k: [
feast_value_type_to_python_type(proto_value)
for proto_value in v
]
for k, v in entity_value_lists.items()
}
else:
entity_native_values = entity_value_lists
return self._go_server.get_online_features(
features_refs=features if isinstance(features, list) else [],
feature_service=features
if isinstance(features, FeatureService)
else None,
entities=entity_native_values,
request_data={}, # TODO: add request data parameter to public API
full_feature_names=full_feature_names,
)
_feature_refs = self._get_features(features, allow_cache=True)
(
requested_feature_views,
requested_request_feature_views,
requested_on_demand_feature_views,
) = self._get_feature_views_to_use(
features=features, allow_cache=True, hide_dummy_entity=False
)
if requested_request_feature_views:
warnings.warn(
"Request feature view is deprecated. "
"Please use request data source instead",
DeprecationWarning,
)
(
entity_name_to_join_key_map,
entity_type_map,
join_keys_set,
) = self._get_entity_maps(requested_feature_views)
entity_proto_values: Dict[str, List[Value]]
if native_entity_values:
# Convert values to Protobuf once.
entity_proto_values = {
k: python_values_to_proto_values(
v, entity_type_map.get(k, ValueType.UNKNOWN)
)
for k, v in entity_value_lists.items()
}
else:
entity_proto_values = entity_value_lists
num_rows = _validate_entity_values(entity_proto_values)
_validate_feature_refs(_feature_refs, full_feature_names)
(
grouped_refs,
grouped_odfv_refs,
grouped_request_fv_refs,
_,
) = _group_feature_refs(
_feature_refs,
requested_feature_views,
requested_request_feature_views,
requested_on_demand_feature_views,
)
set_usage_attribute("odfv", bool(grouped_odfv_refs))
set_usage_attribute("request_fv", bool(grouped_request_fv_refs))
# All requested features should be present in the result.
requested_result_row_names = {
feat_ref.replace(":", "__") for feat_ref in _feature_refs
}
if not full_feature_names:
requested_result_row_names = {
name.rpartition("__")[-1] for name in requested_result_row_names
}
feature_views = list(view for view, _ in grouped_refs)
needed_request_data, needed_request_fv_features = self.get_needed_request_data(
grouped_odfv_refs, grouped_request_fv_refs
)
join_key_values: Dict[str, List[Value]] = {}
request_data_features: Dict[str, List[Value]] = {}
# Entity rows may be either entities or request data.
for join_key_or_entity_name, values in entity_proto_values.items():
# Found request data
if (
join_key_or_entity_name in needed_request_data
or join_key_or_entity_name in needed_request_fv_features
):
if join_key_or_entity_name in needed_request_fv_features:
# If the data was requested as a feature then
# make sure it appears in the result.
requested_result_row_names.add(join_key_or_entity_name)
request_data_features[join_key_or_entity_name] = values
else:
if join_key_or_entity_name in join_keys_set:
join_key = join_key_or_entity_name
else:
try:
join_key = entity_name_to_join_key_map[join_key_or_entity_name]
except KeyError:
raise EntityNotFoundException(
join_key_or_entity_name, self.project
)
else:
warnings.warn(
"Using entity name is deprecated. Use join_key instead."
)
# All join keys should be returned in the result.
requested_result_row_names.add(join_key)
join_key_values[join_key] = values
self.ensure_request_data_values_exist(
needed_request_data, needed_request_fv_features, request_data_features
)
# Populate online features response proto with join keys and request data features
online_features_response = GetOnlineFeaturesResponse(results=[])
self._populate_result_rows_from_columnar(
online_features_response=online_features_response,
data=dict(**join_key_values, **request_data_features),
)
# Add the Entityless case after populating result rows to avoid having to remove
# it later.
entityless_case = DUMMY_ENTITY_NAME in [
entity_name
for feature_view in feature_views
for entity_name in feature_view.entities
]
if entityless_case:
join_key_values[DUMMY_ENTITY_ID] = python_values_to_proto_values(
[DUMMY_ENTITY_VAL] * num_rows, DUMMY_ENTITY.value_type
)
provider = self._get_provider()
for table, requested_features in grouped_refs:
# Get the correct set of entity values with the correct join keys.
table_entity_values, idxs = self._get_unique_entities(
table, join_key_values, entity_name_to_join_key_map,
)
# Fetch feature data for the minimum set of Entities.
feature_data = self._read_from_online_store(
table_entity_values, provider, requested_features, table,
)
# Populate the result_rows with the Features from the OnlineStore inplace.
self._populate_response_from_feature_data(
feature_data,
idxs,
online_features_response,
full_feature_names,
requested_features,
table,
)
if grouped_odfv_refs:
self._augment_response_with_on_demand_transforms(
online_features_response,
_feature_refs,
requested_on_demand_feature_views,
full_feature_names,
)
self._drop_unneeded_columns(
online_features_response, requested_result_row_names
)
return OnlineResponse(online_features_response)
@staticmethod
def _get_columnar_entity_values(
rowise: Optional[List[Dict[str, Any]]], columnar: Optional[Dict[str, List[Any]]]
) -> Dict[str, List[Any]]:
if (rowise is None and columnar is None) or (
rowise is not None and columnar is not None
):
raise ValueError(
"Exactly one of `columnar_entity_values` and `rowise_entity_values` must be set."
)
if rowise is not None:
# Convert entity_rows from rowise to columnar.
res = defaultdict(list)
for entity_row in rowise:
for key, value in entity_row.items():
res[key].append(value)
return res
return cast(Dict[str, List[Any]], columnar)
def _get_entity_maps(
self, feature_views
) -> Tuple[Dict[str, str], Dict[str, ValueType], Set[str]]:
entities = self._list_entities(allow_cache=True, hide_dummy_entity=False)
entity_name_to_join_key_map: Dict[str, str] = {}
entity_type_map: Dict[str, ValueType] = {}
for entity in entities:
entity_name_to_join_key_map[entity.name] = entity.join_key
entity_type_map[entity.name] = entity.value_type
for feature_view in feature_views:
for entity_name in feature_view.entities:
entity = self._registry.get_entity(
entity_name, self.project, allow_cache=True
)
# User directly uses join_key as the entity reference in the entity_rows for the
# entity mapping case.
entity_name = feature_view.projection.join_key_map.get(
entity.join_key, entity.name
)
join_key = feature_view.projection.join_key_map.get(
entity.join_key, entity.join_key
)
entity_name_to_join_key_map[entity_name] = join_key
entity_type_map[join_key] = entity.value_type
return (
entity_name_to_join_key_map,
entity_type_map,
set(entity_name_to_join_key_map.values()),
)
@staticmethod
def _get_table_entity_values(
table: FeatureView,
entity_name_to_join_key_map: Dict[str, str],
join_key_proto_values: Dict[str, List[Value]],
) -> Dict[str, List[Value]]:
# The correct join_keys expected by the OnlineStore for this Feature View.
table_join_keys = [
entity_name_to_join_key_map[entity_name] for entity_name in table.entities
]
# If the FeatureView has a Projection then the join keys may be aliased.
alias_to_join_key_map = {v: k for k, v in table.projection.join_key_map.items()}
# Subset to columns which are relevant to this FeatureView and
# give them the correct names.
entity_values = {
alias_to_join_key_map.get(k, k): v
for k, v in join_key_proto_values.items()
if alias_to_join_key_map.get(k, k) in table_join_keys
}
return entity_values
@staticmethod
def _populate_result_rows_from_columnar(
online_features_response: GetOnlineFeaturesResponse,
data: Dict[str, List[Value]],
):
timestamp = Timestamp() # Only initialize this timestamp once.
# Add more values to the existing result rows
for feature_name, feature_values in data.items():
online_features_response.metadata.feature_names.val.append(feature_name)
online_features_response.results.append(
GetOnlineFeaturesResponse.FeatureVector(
values=feature_values,
statuses=[FieldStatus.PRESENT] * len(feature_values),
event_timestamps=[timestamp] * len(feature_values),
)
)
@staticmethod
def get_needed_request_data(
grouped_odfv_refs: List[Tuple[OnDemandFeatureView, List[str]]],
grouped_request_fv_refs: List[Tuple[RequestFeatureView, List[str]]],
) -> Tuple[Set[str], Set[str]]:
needed_request_data: Set[str] = set()
needed_request_fv_features: Set[str] = set()
for odfv, _ in grouped_odfv_refs:
odfv_request_data_schema = odfv.get_request_data_schema()
needed_request_data.update(odfv_request_data_schema.keys())
for request_fv, _ in grouped_request_fv_refs:
for feature in request_fv.features:
needed_request_fv_features.add(feature.name)
return needed_request_data, needed_request_fv_features
@staticmethod
def ensure_request_data_values_exist(
needed_request_data: Set[str],
needed_request_fv_features: Set[str],
request_data_features: Dict[str, List[Any]],
):
if len(needed_request_data) + len(needed_request_fv_features) != len(
request_data_features.keys()
):
missing_features = [
x
for x in itertools.chain(
needed_request_data, needed_request_fv_features
)
if x not in request_data_features
]
raise RequestDataNotFoundInEntityRowsException(
feature_names=missing_features
)
def _get_unique_entities(
self,
table: FeatureView,
join_key_values: Dict[str, List[Value]],
entity_name_to_join_key_map: Dict[str, str],
) -> Tuple[Tuple[Dict[str, Value], ...], Tuple[List[int], ...]]:
"""Return the set of unique composite Entities for a Feature View and the indexes at which they appear.
This method allows us to query the OnlineStore for data we need only once
rather than requesting and processing data for the same combination of
Entities multiple times.
"""
# Get the correct set of entity values with the correct join keys.
table_entity_values = self._get_table_entity_values(
table, entity_name_to_join_key_map, join_key_values,
)
# Convert back to rowise.
keys = table_entity_values.keys()
# Sort the rowise data to allow for grouping but keep original index. This lambda is
# sufficient as Entity types cannot be complex (ie. lists).
rowise = list(enumerate(zip(*table_entity_values.values())))
rowise.sort(
key=lambda row: tuple(getattr(x, x.WhichOneof("val")) for x in row[1])
)
# Identify unique entities and the indexes at which they occur.
unique_entities: Tuple[Dict[str, Value], ...]
indexes: Tuple[List[int], ...]
unique_entities, indexes = tuple(
zip(
*[
(dict(zip(keys, k)), [_[0] for _ in g])
for k, g in itertools.groupby(rowise, key=lambda x: x[1])
]
)
)
return unique_entities, indexes
def _read_from_online_store(
self,
entity_rows: Iterable[Mapping[str, Value]],
provider: Provider,
requested_features: List[str],
table: FeatureView,
) -> List[Tuple[List[Timestamp], List["FieldStatus.ValueType"], List[Value]]]:
"""Read and process data from the OnlineStore for a given FeatureView.
This method guarantees that the order of the data in each element of the
List returned is the same as the order of `requested_features`.
This method assumes that `provider.online_read` returns data for each
combination of Entities in `entity_rows` in the same order as they
are provided.
"""
# Instantiate one EntityKeyProto per Entity.
entity_key_protos = [
EntityKeyProto(join_keys=row.keys(), entity_values=row.values())
for row in entity_rows
]
# Fetch data for Entities.
read_rows = provider.online_read(
config=self.config,
table=table,
entity_keys=entity_key_protos,
requested_features=requested_features,
)
# Each row is a set of features for a given entity key. We only need to convert
# the data to Protobuf once.
null_value = Value()
read_row_protos = []
for read_row in read_rows:
row_ts_proto = Timestamp()
row_ts, feature_data = read_row
# TODO (Ly): reuse whatever timestamp if row_ts is None?
if row_ts is not None:
row_ts_proto.FromDatetime(row_ts)
event_timestamps = [row_ts_proto] * len(requested_features)
if feature_data is None:
statuses = [FieldStatus.NOT_FOUND] * len(requested_features)
values = [null_value] * len(requested_features)
else:
statuses = []
values = []
for feature_name in requested_features:
# Make sure order of data is the same as requested_features.
if feature_name not in feature_data:
statuses.append(FieldStatus.NOT_FOUND)
values.append(null_value)
else:
statuses.append(FieldStatus.PRESENT)
values.append(feature_data[feature_name])
read_row_protos.append((event_timestamps, statuses, values))
return read_row_protos
@staticmethod
def _populate_response_from_feature_data(
feature_data: Iterable[
Tuple[
Iterable[Timestamp], Iterable["FieldStatus.ValueType"], Iterable[Value]
]
],
indexes: Iterable[List[int]],
online_features_response: GetOnlineFeaturesResponse,
full_feature_names: bool,
requested_features: Iterable[str],
table: FeatureView,
):
"""Populate the GetOnlineFeaturesResponse with feature data.
This method assumes that `_read_from_online_store` returns data for each
combination of Entities in `entity_rows` in the same order as they
are provided.
Args:
feature_data: A list of data in Protobuf form which was retrieved from the OnlineStore.
indexes: A list of indexes which should be the same length as `feature_data`. Each list
of indexes corresponds to a set of result rows in `online_features_response`.
online_features_response: The object to populate.
full_feature_names: A boolean that provides the option to add the feature view prefixes to the feature names,
changing them from the format "feature" to "feature_view__feature" (e.g., "daily_transactions" changes to
"customer_fv__daily_transactions").
requested_features: The names of the features in `feature_data`. This should be ordered in the same way as the
data in `feature_data`.
table: The FeatureView that `feature_data` was retrieved from.
"""
# Add the feature names to the response.
requested_feature_refs = [
f"{table.projection.name_to_use()}__{feature_name}"
if full_feature_names
else feature_name
for feature_name in requested_features
]
online_features_response.metadata.feature_names.val.extend(
requested_feature_refs
)
timestamps, statuses, values = zip(*feature_data)
# Populate the result with data fetched from the OnlineStore
# which is guaranteed to be aligned with `requested_features`.
for (
feature_idx,
(timestamp_vector, statuses_vector, values_vector),
) in enumerate(zip(zip(*timestamps), zip(*statuses), zip(*values))):
online_features_response.results.append(
GetOnlineFeaturesResponse.FeatureVector(
values=apply_list_mapping(values_vector, indexes),
statuses=apply_list_mapping(statuses_vector, indexes),
event_timestamps=apply_list_mapping(timestamp_vector, indexes),
)
)
@staticmethod
def _augment_response_with_on_demand_transforms(
online_features_response: GetOnlineFeaturesResponse,
feature_refs: List[str],
requested_on_demand_feature_views: List[OnDemandFeatureView],
full_feature_names: bool,
):
"""Computes on demand feature values and adds them to the result rows.
Assumes that 'online_features_response' already contains the necessary request data and input feature
views for the on demand feature views. Unneeded feature values such as request data and
unrequested input feature views will be removed from 'online_features_response'.
Args:
online_features_response: Protobuf object to populate
feature_refs: List of all feature references to be returned.
requested_on_demand_feature_views: List of all odfvs that have been requested.
full_feature_names: A boolean that provides the option to add the feature view prefixes to the feature names,
changing them from the format "feature" to "feature_view__feature" (e.g., "daily_transactions" changes to
"customer_fv__daily_transactions").
"""
requested_odfv_map = {
odfv.name: odfv for odfv in requested_on_demand_feature_views
}
requested_odfv_feature_names = requested_odfv_map.keys()
odfv_feature_refs = defaultdict(list)
for feature_ref in feature_refs:
view_name, feature_name = feature_ref.split(":")
if view_name in requested_odfv_feature_names:
odfv_feature_refs[view_name].append(
f"{requested_odfv_map[view_name].projection.name_to_use()}__{feature_name}"
if full_feature_names
else feature_name
)
initial_response = OnlineResponse(online_features_response)
initial_response_df = initial_response.to_df()
# Apply on demand transformations and augment the result rows
odfv_result_names = set()
for odfv_name, _feature_refs in odfv_feature_refs.items():
odfv = requested_odfv_map[odfv_name]
transformed_features_df = odfv.get_transformed_features_df(
initial_response_df, full_feature_names,
)
selected_subset = [
f for f in transformed_features_df.columns if f in _feature_refs
]
proto_values = [
python_values_to_proto_values(
transformed_features_df[feature].values, ValueType.UNKNOWN
)
for feature in selected_subset
]
odfv_result_names |= set(selected_subset)
online_features_response.metadata.feature_names.val.extend(selected_subset)
for feature_idx in range(len(selected_subset)):
online_features_response.results.append(
GetOnlineFeaturesResponse.FeatureVector(
values=proto_values[feature_idx],
statuses=[FieldStatus.PRESENT] * len(proto_values[feature_idx]),
event_timestamps=[Timestamp()] * len(proto_values[feature_idx]),
)
)
@staticmethod
def _drop_unneeded_columns(
online_features_response: GetOnlineFeaturesResponse,
requested_result_row_names: Set[str],
):
"""
Unneeded feature values such as request data and unrequested input feature views will
be removed from 'online_features_response'.
Args:
online_features_response: Protobuf object to populate
requested_result_row_names: Fields from 'result_rows' that have been requested, and
therefore should not be dropped.
"""
# Drop values that aren't needed
unneeded_feature_indices = [
idx
for idx, val in enumerate(
online_features_response.metadata.feature_names.val
)
if val not in requested_result_row_names
]
for idx in reversed(unneeded_feature_indices):
del online_features_response.metadata.feature_names.val[idx]
del online_features_response.results[idx]
def _get_feature_views_to_use(
self,
features: Optional[Union[List[str], FeatureService]],
allow_cache=False,
hide_dummy_entity: bool = True,
) -> Tuple[List[FeatureView], List[RequestFeatureView], List[OnDemandFeatureView]]:
fvs = {
fv.name: fv
for fv in self._list_feature_views(allow_cache, hide_dummy_entity)
}
request_fvs = {
fv.name: fv
for fv in self._registry.list_request_feature_views(
project=self.project, allow_cache=allow_cache
)
}
od_fvs = {
fv.name: fv
for fv in self._registry.list_on_demand_feature_views(
project=self.project, allow_cache=allow_cache
)
}
if isinstance(features, FeatureService):
fvs_to_use, request_fvs_to_use, od_fvs_to_use = [], [], []
for fv_name, projection in [
(projection.name, projection)
for projection in features.feature_view_projections
]:
if fv_name in fvs:
fvs_to_use.append(
fvs[fv_name].with_projection(copy.copy(projection))
)
elif fv_name in request_fvs:
request_fvs_to_use.append(
request_fvs[fv_name].with_projection(copy.copy(projection))
)
elif fv_name in od_fvs:
odfv = od_fvs[fv_name].with_projection(copy.copy(projection))
od_fvs_to_use.append(odfv)
# Let's make sure to include an FVs which the ODFV requires Features from.
for projection in odfv.source_feature_view_projections.values():
fv = fvs[projection.name].with_projection(copy.copy(projection))
if fv not in fvs_to_use:
fvs_to_use.append(fv)
else:
raise ValueError(
f"The provided feature service {features.name} contains a reference to a feature view"
f"{fv_name} which doesn't exist. Please make sure that you have created the feature view"
f'{fv_name} and that you have registered it by running "apply".'
)
views_to_use = (fvs_to_use, request_fvs_to_use, od_fvs_to_use)
else:
views_to_use = (
[*fvs.values()],
[*request_fvs.values()],
[*od_fvs.values()],
)
return views_to_use
@log_exceptions_and_usage
def serve(self, host: str, port: int, no_access_log: bool) -> None:
"""Start the feature consumption server locally on a given port."""
feature_server.start_server(self, host, port, no_access_log)
@log_exceptions_and_usage
def get_feature_server_endpoint(self) -> Optional[str]:
"""Returns endpoint for the feature server, if it exists."""
return self._provider.get_feature_server_endpoint()
@log_exceptions_and_usage
def serve_transformations(self, port: int) -> None:
"""Start the feature transformation server locally on a given port."""
if not flags_helper.enable_on_demand_feature_views(self.config):
raise ExperimentalFeatureNotEnabled(flags.FLAG_ON_DEMAND_TRANSFORM_NAME)
from feast import transformation_server
transformation_server.start_server(self, port)
def _teardown_go_server(self):
self._go_server = None
def _validate_entity_values(join_key_values: Dict[str, List[Value]]):
set_of_row_lengths = {len(v) for v in join_key_values.values()}
if len(set_of_row_lengths) > 1:
raise ValueError("All entity rows must have the same columns.")
return set_of_row_lengths.pop()
def _validate_feature_refs(feature_refs: List[str], full_feature_names: bool = False):
"""
Validates that there are no collisions among the feature references.
Args:
feature_refs: List of feature references to validate. Feature references must have format
"feature_view:feature", e.g. "customer_fv:daily_transactions".
full_feature_names: If True, the full feature references are compared for collisions; if False,
only the feature names are compared.
Raises:
FeatureNameCollisionError: There is a collision among the feature references.
"""
collided_feature_refs = []
if full_feature_names:
collided_feature_refs = [
ref for ref, occurrences in Counter(feature_refs).items() if occurrences > 1
]
else:
feature_names = [ref.split(":")[1] for ref in feature_refs]
collided_feature_names = [
ref
for ref, occurrences in Counter(feature_names).items()
if occurrences > 1
]
for feature_name in collided_feature_names:
collided_feature_refs.extend(
[ref for ref in feature_refs if ref.endswith(":" + feature_name)]
)
if len(collided_feature_refs) > 0:
raise FeatureNameCollisionError(collided_feature_refs, full_feature_names)
def _group_feature_refs(
features: List[str],
all_feature_views: List[FeatureView],
all_request_feature_views: List[RequestFeatureView],
all_on_demand_feature_views: List[OnDemandFeatureView],
) -> Tuple[
List[Tuple[FeatureView, List[str]]],
List[Tuple[OnDemandFeatureView, List[str]]],
List[Tuple[RequestFeatureView, List[str]]],
Set[str],
]:
"""Get list of feature views and corresponding feature names based on feature references"""
# view name to view proto
view_index = {view.projection.name_to_use(): view for view in all_feature_views}
# request view name to proto
request_view_index = {
view.projection.name_to_use(): view for view in all_request_feature_views
}
# on demand view to on demand view proto
on_demand_view_index = {
view.projection.name_to_use(): view for view in all_on_demand_feature_views
}
# view name to feature names
views_features = defaultdict(set)
request_views_features = defaultdict(set)
request_view_refs = set()
# on demand view name to feature names
on_demand_view_features = defaultdict(set)
for ref in features:
view_name, feat_name = ref.split(":")
if view_name in view_index:
views_features[view_name].add(feat_name)
elif view_name in on_demand_view_index:
on_demand_view_features[view_name].add(feat_name)
# Let's also add in any FV Feature dependencies here.
for input_fv_projection in on_demand_view_index[
view_name
].source_feature_view_projections.values():
for input_feat in input_fv_projection.features:
views_features[input_fv_projection.name].add(input_feat.name)
elif view_name in request_view_index:
request_views_features[view_name].add(feat_name)
request_view_refs.add(ref)
else:
raise FeatureViewNotFoundException(view_name)
fvs_result: List[Tuple[FeatureView, List[str]]] = []
odfvs_result: List[Tuple[OnDemandFeatureView, List[str]]] = []
request_fvs_result: List[Tuple[RequestFeatureView, List[str]]] = []
for view_name, feature_names in views_features.items():
fvs_result.append((view_index[view_name], list(feature_names)))
for view_name, feature_names in request_views_features.items():
request_fvs_result.append((request_view_index[view_name], list(feature_names)))
for view_name, feature_names in on_demand_view_features.items():
odfvs_result.append((on_demand_view_index[view_name], list(feature_names)))
return fvs_result, odfvs_result, request_fvs_result, request_view_refs
def _print_materialization_log(
start_date, end_date, num_feature_views: int, online_store: str
):
if start_date:
print(
f"Materializing {Style.BRIGHT + Fore.GREEN}{num_feature_views}{Style.RESET_ALL} feature views"
f" from {Style.BRIGHT + Fore.GREEN}{start_date.replace(microsecond=0).astimezone()}{Style.RESET_ALL}"
f" to {Style.BRIGHT + Fore.GREEN}{end_date.replace(microsecond=0).astimezone()}{Style.RESET_ALL}"
f" into the {Style.BRIGHT + Fore.GREEN}{online_store}{Style.RESET_ALL} online store.\n"
)
else:
print(
f"Materializing {Style.BRIGHT + Fore.GREEN}{num_feature_views}{Style.RESET_ALL} feature views"
f" to {Style.BRIGHT + Fore.GREEN}{end_date.replace(microsecond=0).astimezone()}{Style.RESET_ALL}"
f" into the {Style.BRIGHT + Fore.GREEN}{online_store}{Style.RESET_ALL} online store.\n"
)
def _validate_feature_views(feature_views: List[BaseFeatureView]):
"""Verify feature views have case-insensitively unique names"""
fv_names = set()
for fv in feature_views:
case_insensitive_fv_name = fv.name.lower()
if case_insensitive_fv_name in fv_names:
raise ValueError(
f"More than one feature view with name {case_insensitive_fv_name} found. "
f"Please ensure that all feature view names are case-insensitively unique. "
f"It may be necessary to ignore certain files in your feature repository by using a .feastignore file."
)
else:
fv_names.add(case_insensitive_fv_name)
def _validate_data_sources(data_sources: List[DataSource]):
"""Verify data sources have case-insensitively unique names"""
ds_names = set()
for ds in data_sources:
case_insensitive_ds_name = ds.name.lower()
if case_insensitive_ds_name in ds_names:
if case_insensitive_ds_name.strip():
warnings.warn(
f"More than one data source with name {case_insensitive_ds_name} found. "
f"Please ensure that all data source names are case-insensitively unique. "
f"It may be necessary to ignore certain files in your feature repository by using a .feastignore "
f"file. Starting in Feast 0.21, unique names (perhaps inferred from the table name) will be "
f"required in data sources to encourage data source discovery"
)
else:
ds_names.add(case_insensitive_ds_name)
def apply_list_mapping(
lst: Iterable[Any], mapping_indexes: Iterable[List[int]]
) -> Iterable[Any]:
output_len = sum(len(item) for item in mapping_indexes)
output = [None] * output_len
for elem, destinations in zip(lst, mapping_indexes):
for idx in destinations:
output[idx] = elem
return output
| 41.062646
| 126
| 0.633999
|
a15908086a6f14f7b81f60915f21d1a400325c4b
| 3,917
|
py
|
Python
|
py/vtproto/vtworkerdata_pb2.py
|
msolo/vitess
|
04736af822e2143f7b60e993d5bfe6b37343ac0b
|
[
"Apache-2.0"
] | 1
|
2017-06-29T04:57:43.000Z
|
2017-06-29T04:57:43.000Z
|
py/vtproto/vtworkerdata_pb2.py
|
msolo/vitess
|
04736af822e2143f7b60e993d5bfe6b37343ac0b
|
[
"Apache-2.0"
] | 2
|
2017-09-25T08:35:22.000Z
|
2018-03-29T15:21:54.000Z
|
py/vtproto/vtworkerdata_pb2.py
|
msolo/vitess
|
04736af822e2143f7b60e993d5bfe6b37343ac0b
|
[
"Apache-2.0"
] | 3
|
2017-04-19T22:29:59.000Z
|
2018-03-29T14:28:09.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: vtworkerdata.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import logutil_pb2 as logutil__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='vtworkerdata.proto',
package='vtworkerdata',
syntax='proto3',
serialized_pb=_b('\n\x12vtworkerdata.proto\x12\x0cvtworkerdata\x1a\rlogutil.proto\"-\n\x1d\x45xecuteVtworkerCommandRequest\x12\x0c\n\x04\x61rgs\x18\x01 \x03(\t\"?\n\x1e\x45xecuteVtworkerCommandResponse\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.logutil.Eventb\x06proto3')
,
dependencies=[logutil__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EXECUTEVTWORKERCOMMANDREQUEST = _descriptor.Descriptor(
name='ExecuteVtworkerCommandRequest',
full_name='vtworkerdata.ExecuteVtworkerCommandRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='args', full_name='vtworkerdata.ExecuteVtworkerCommandRequest.args', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=96,
)
_EXECUTEVTWORKERCOMMANDRESPONSE = _descriptor.Descriptor(
name='ExecuteVtworkerCommandResponse',
full_name='vtworkerdata.ExecuteVtworkerCommandResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='event', full_name='vtworkerdata.ExecuteVtworkerCommandResponse.event', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=161,
)
_EXECUTEVTWORKERCOMMANDRESPONSE.fields_by_name['event'].message_type = logutil__pb2._EVENT
DESCRIPTOR.message_types_by_name['ExecuteVtworkerCommandRequest'] = _EXECUTEVTWORKERCOMMANDREQUEST
DESCRIPTOR.message_types_by_name['ExecuteVtworkerCommandResponse'] = _EXECUTEVTWORKERCOMMANDRESPONSE
ExecuteVtworkerCommandRequest = _reflection.GeneratedProtocolMessageType('ExecuteVtworkerCommandRequest', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEVTWORKERCOMMANDREQUEST,
__module__ = 'vtworkerdata_pb2'
# @@protoc_insertion_point(class_scope:vtworkerdata.ExecuteVtworkerCommandRequest)
))
_sym_db.RegisterMessage(ExecuteVtworkerCommandRequest)
ExecuteVtworkerCommandResponse = _reflection.GeneratedProtocolMessageType('ExecuteVtworkerCommandResponse', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEVTWORKERCOMMANDRESPONSE,
__module__ = 'vtworkerdata_pb2'
# @@protoc_insertion_point(class_scope:vtworkerdata.ExecuteVtworkerCommandResponse)
))
_sym_db.RegisterMessage(ExecuteVtworkerCommandResponse)
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
# @@protoc_insertion_point(module_scope)
| 33.478632
| 278
| 0.792188
|
4f3b94490fb65ed90c1c3598fa8c33629568bb39
| 4,880
|
py
|
Python
|
torchslim/quantizing/qat.py
|
Ocean-627/torch-model-compression
|
e317c10a92503f2793d0c4c06bb91ddeb512f4ac
|
[
"MIT"
] | 1
|
2021-11-22T19:32:51.000Z
|
2021-11-22T19:32:51.000Z
|
torchslim/quantizing/qat.py
|
Ocean-627/torch-model-compression
|
e317c10a92503f2793d0c4c06bb91ddeb512f4ac
|
[
"MIT"
] | null | null | null |
torchslim/quantizing/qat.py
|
Ocean-627/torch-model-compression
|
e317c10a92503f2793d0c4c06bb91ddeb512f4ac
|
[
"MIT"
] | null | null | null |
import sys
import torch
import copy
import torchslim
import torchslim.slim_solver as slim_solver
import torch.nn as nn
import numpy as np
import torchpruner as pruner
import torchpruner.model_tools as model_tools
import onnx
from . import qat_tools
def optimizer_generator(params, config):
return torch.optim.SGD(params, lr=0.0)
def scheduler_generator(optimizer, config):
return torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config["epoch"])
def init_hook_function(self):
sample_inputs, _ = iter(self.trainloader).__next__()
input_list = []
if not isinstance(sample_inputs, tuple):
input_list.append(sample_inputs[:1])
else:
for memeber in sample_inputs:
input_list.append(memeber[:1])
graph_inputs = tuple(input_list)
self.variable_dict["graph_inputs"] = graph_inputs
pruner.activate_function_module()
self.model = qat_tools.prepare_qat(
self.model, graph_inputs, qat_tools.tensorrt_qconfig
)
def end_hook_function(self):
pruner.deactivate_function_module()
class QATSolver(slim_solver.CommonSlimSolver):
__config_setting__ = [
("task_name", str, "defualt", False, "The task name"),
("lr", float, 0.001, False, "The learning rate of the optimizer"),
("epoch", int, 360, False, "The total epoch to train the model"),
("batch_size", int, 128, False, "The batch size per step"),
("test_batch_size", int, 128, False, "The evaluation batch size per step"),
("momentum", float, 0.9, False, "The momentum for the optimizer"),
("weight_decay", float, 1e-4, False, "The wegith decay for the parameters"),
("save_keyword", str, "acc", False, "The keyword for save"),
("save_dir", str, "checkpoints", False, "The model save dir"),
("num_workers", int, 0, False, "The number of workers to read data"),
("devices", list, None, False, "The device to be used in training"),
("log_interval", int, 200, False, "The interval to report the log"),
# generate the optimizer
(
"optimizer_generator",
"function",
optimizer_generator,
False,
"The optimizer generator (params,config)->optimizer",
),
# generate the scheduler
(
"scheduler_generator",
"function",
scheduler_generator,
True,
"the scheduler generator for the task (optmizer,config)->scheduler",
),
# predict the result
(
"predict_function",
"function",
None,
False,
"get the prediction of the data (model,batch_data)->predict",
),
# calculate the loss for one iteration
(
"calculate_loss_function",
"function",
None,
False,
"(predict,batch_data)->loss",
),
# get the evaluate result for one iteration
(
"evaluate_function",
"function",
None,
True,
"(predict,batch_data)->evaluate_dict",
),
# get the dataset
(
"dataset_generator",
"function",
None,
True,
"()->dataset_train,dataset_validation",
),
]
def __init__(self, model, config):
super(QATSolver, self).__init__(model, config)
self.regist_init_hook(init_hook_function)
self.regist_end_hook(end_hook_function)
def generate_params_setting(self):
model = self.model
if isinstance(model, nn.DataParallel):
model = model.module
base_lr = self.config["lr"]
weight_decay = self.config["weight_decay"]
momentum = self.config["momentum"]
params = []
for key, value in model.named_parameters():
apply_weight_decay = weight_decay
apply_momentum = momentum
apply_lr = base_lr
params += [
{
"params": [value],
"lr": apply_lr,
"weight_decay": apply_weight_decay,
"momentum": apply_momentum,
}
]
return params
def save_model(self):
save_path = os.path.join(self.config["save_dir"], self.config["task_name"])
save_path = os.path.join(save_path, "model.trt")
model = copy.deepcopy(self.model)
if isinstance(model, nn.DataParallel):
model = model.module
model = model.cpu()
model = qat_tools.merge_convbn2d(model)
qat_tools.export_onnx(model, self.variable_dict["graph_inputs"], "tmp.onnx")
trt_engin = qat_tools.export_trt("tmp.onnx")
with open(save_path, "wb") as file:
file.write(trt_engin.serialize())
| 32.751678
| 84
| 0.590574
|
23daecd50f756f2b2d2f76fefd71cc84a004e4fd
| 3,917
|
py
|
Python
|
blockchain-workbench/rest-api-samples/python/swagger_client/models/user_list.py
|
chaosmail/blockchain
|
c78799d548c0d5deb86e03d16bf919df508d09fd
|
[
"MIT"
] | 738
|
2018-05-07T15:37:38.000Z
|
2022-03-30T08:16:04.000Z
|
blockchain-workbench/rest-api-samples/python/swagger_client/models/user_list.py
|
chaosmail/blockchain
|
c78799d548c0d5deb86e03d16bf919df508d09fd
|
[
"MIT"
] | 156
|
2018-05-08T14:01:25.000Z
|
2022-01-31T22:03:32.000Z
|
blockchain-workbench/rest-api-samples/python/swagger_client/models/user_list.py
|
cocoytech/blockchain
|
4a64a41275cf149c0ad66b7fd9864498ec6a7ed9
|
[
"MIT"
] | 682
|
2018-05-07T16:45:10.000Z
|
2022-03-31T16:50:13.000Z
|
# coding: utf-8
"""
Azure Blockchain Workbench REST API
The Azure Blockchain Workbench REST API is a Workbench extensibility point, which allows developers to create and manage blockchain applications, manage users and organizations within a consortium, integrate blockchain applications into services and platforms, perform transactions on a blockchain, and retrieve transactional and contract data from a blockchain. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.user import User # noqa: F401,E501
class UserList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'next_link': 'str',
'users': 'list[User]'
}
attribute_map = {
'next_link': 'nextLink',
'users': 'users'
}
def __init__(self, next_link=None, users=None): # noqa: E501
"""UserList - a model defined in Swagger""" # noqa: E501
self._next_link = None
self._users = None
self.discriminator = None
if next_link is not None:
self.next_link = next_link
if users is not None:
self.users = users
@property
def next_link(self):
"""Gets the next_link of this UserList. # noqa: E501
:return: The next_link of this UserList. # noqa: E501
:rtype: str
"""
return self._next_link
@next_link.setter
def next_link(self, next_link):
"""Sets the next_link of this UserList.
:param next_link: The next_link of this UserList. # noqa: E501
:type: str
"""
self._next_link = next_link
@property
def users(self):
"""Gets the users of this UserList. # noqa: E501
:return: The users of this UserList. # noqa: E501
:rtype: list[User]
"""
return self._users
@users.setter
def users(self, users):
"""Sets the users of this UserList.
:param users: The users of this UserList. # noqa: E501
:type: list[User]
"""
self._users = users
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.780142
| 380
| 0.577993
|
e8ae18d0ce0952a3a9974405ead8fc00eeeeaf8e
| 3,386
|
py
|
Python
|
disvis/pyclfft.py
|
haddocking/disvis
|
a922bd079b41ad5ef3ac33f4e68968f8978626d2
|
[
"Apache-2.0"
] | 4
|
2017-12-27T13:09:43.000Z
|
2021-07-20T09:45:58.000Z
|
disvis/pyclfft.py
|
haddocking/disvis
|
a922bd079b41ad5ef3ac33f4e68968f8978626d2
|
[
"Apache-2.0"
] | 17
|
2015-06-08T03:43:00.000Z
|
2022-03-09T13:12:16.000Z
|
disvis/pyclfft.py
|
haddocking/disvis
|
a922bd079b41ad5ef3ac33f4e68968f8978626d2
|
[
"Apache-2.0"
] | 11
|
2015-07-06T18:25:27.000Z
|
2020-10-08T13:14:16.000Z
|
from gpyfft import GpyFFT
G = GpyFFT()
IMAGINARY = 3
REAL = 5
def builder(context, shape, direction_forward=True):
pass
class RFFTn:
def __init__(self, context, shape):
# The maximum number of elements of the transform is 2^24
# in clFFT (single precision)
elements = 1
for i in shape:
elements *= i
if (elements > (2.0**24.0)):
from math import log
power = log(elements, 2)
raise ValueError('The maximum number of elements for clFFT is 2^24, currently you want 2^{:.2f}'.format(power))
ndim = len(shape)
if ndim > 3:
raise ValueError('clFFT can only work up-to 3 dimensions')
self.context = context
self.shape = tuple(shape)
self.ft_shape = (shape[0]//2 + 1, shape[1], shape[2])
self.ndim = ndim
self.plan = G.create_plan(context, self.shape)
self.plan.inplace = False
self.plan.layouts = (REAL, IMAGINARY)
if ndim == 3:
self.plan.strides_in = (shape[1]*shape[2], shape[2], 1)
elif ndim == 2:
self.plan.strides_in = (shape[1], 1)
elif ndim == 1:
self.plan.strides_in = (1,)
if ndim == 3:
self.plan.strides_out = (shape[1]*shape[2], shape[2], 1)
elif ndim == 2:
self.plan.strides_out = (shape[1], 1)
elif ndim == 1:
self.plan.strides_out = (1,)
self.baked = False
def bake(self, queues):
if not self.baked:
self.plan.bake(queues)
self.baked = True
def __call__(self, queue, inarray, outarray):
self.plan.enqueue_transform(queue, inarray.data, outarray.data)
self.baked = True
class iRFFTn:
def __init__(self, context, shape):
# The maximum number of elements of the transform is 2^24
# in clFFT (single precision)
elements = 1
for i in shape:
elements *= i
if elements > 2**24:
from math import log
power = log(elements, 2)
raise ValueError('The maximum number of elements for clFFT is 2^24, currently you want 2^{:.2f}'.format(power))
ndim = len(shape)
if ndim > 3:
raise ValueError('clFFT can only work up-to 3 dimensions')
self.context = context
self.shape = tuple(shape)
self.ndim = ndim
self.ft_shape = (shape[0]//2 + 1, shape[1], shape[2])
self.plan = G.create_plan(context, self.shape)
self.plan.inplace = False
self.plan.layouts = (IMAGINARY, REAL)
if ndim == 3:
self.plan.strides_out = (shape[1]*shape[2], shape[2], 1)
elif ndim == 2:
self.plan.strides_out = (shape[1], 1)
elif ndim == 1:
self.plan.strides_out = (1,)
if ndim == 3:
self.plan.strides_in = (shape[1]*shape[2], shape[2], 1)
elif ndim == 2:
self.plan.strides_in = (shape[1], 1)
elif ndim == 1:
self.plan.strides_in = (1,)
self.baked = False
def bake(self, queues):
if not self.baked:
self.plan.bake(queues)
self.baked = True
def __call__(self, queue, inarray, outarray):
self.plan.enqueue_transform(queue, inarray.data, outarray.data,
direction_forward=False)
self.baked = True
| 29.189655
| 123
| 0.55759
|
52f85fd7de4ef6f33882e790320fb6db1d47fbb8
| 13,793
|
py
|
Python
|
FPL/Transfers/views.py
|
kamrul-s/FPL
|
b05176d0178bec3684e19709e265faba8375c29d
|
[
"MIT"
] | null | null | null |
FPL/Transfers/views.py
|
kamrul-s/FPL
|
b05176d0178bec3684e19709e265faba8375c29d
|
[
"MIT"
] | null | null | null |
FPL/Transfers/views.py
|
kamrul-s/FPL
|
b05176d0178bec3684e19709e265faba8375c29d
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.db import connection
from django.http import HttpResponse
from django.contrib import messages
def transfers(request):
if 'user' not in request.session:
return render(request,'index.html')
id = request.session['user']
cursor = connection.cursor()
query = "SELECT USER_TEAM_ID FROM USER_TEAM WHERE USER_ID = %s"
cursor.execute(query,[id])
t_id = cursor.fetchone()
cursor.close()
team_id = (''.join(map(str,t_id)))
cursor = connection.cursor()
args = [0]
result_args = cursor.callproc('GET_GAME_WEEK',args)
cursor.close()
gweek = int(result_args[0])
cursor = connection.cursor()
query = "SELECT * FROM TRANSFER WHERE USER_TEAM_ID=%s and GAMEWEEK_ID=%s"
cursor.execute(query,[team_id,gweek])
tans = cursor.fetchall()
cursor.close()
if(len(tans)>0):
messages.warning(request,f"Can't make more transfers in this upcoming week")
cursor = connection.cursor()
sql = "SELECT PLAYER_ID FROM STARTING_TEAM WHERE USER_TEAM_ID = %s AND GAMEWEEK_ID = %s"
cursor.execute(sql,[team_id,gweek])
res = cursor.fetchall()
cursor.close()
starting_players = []
for r in res:
pl_id = r[0]
cursor = connection.cursor()
query3 = 'SELECT LAST_NAME,PLAYING_POSITION,TEAM_NAME FROM PLAYER_INFO WHERE PLAYER_ID = %s'
cursor.execute(query3,[pl_id])
result = cursor.fetchall()
cursor.close()
for qr in result:
Name = qr[0]
Position = qr[1]
Team_name = qr[2]
row = {'Id':pl_id,'Name':Name,'Position':Position,'Team_name':Team_name}
starting_players.append(row)
cursor = connection.cursor()
sql = "SELECT PLAYER_ID FROM BENCHES WHERE USER_TEAM_ID = %s AND GAMEWEEK_ID = %s"
cursor.execute(sql,[team_id,gweek])
res = cursor.fetchall()
cursor.close()
bench_players = []
for r in res:
pl_id = r[0]
cursor = connection.cursor()
query3 = 'SELECT LAST_NAME,PLAYING_POSITION,TEAM_NAME FROM PLAYER_INFO WHERE PLAYER_ID = %s'
cursor.execute(query3,[pl_id])
result = cursor.fetchall()
cursor.close()
for qr in result:
Name = qr[0]
Position = qr[1]
Team_name = qr[2]
row = {'Id':pl_id,'Name':Name,'Position':Position,'Team_name':Team_name}
bench_players.append(row)
cursor = connection.cursor()
query = "SELECT PLAYER_ID FROM CAPTAINS WHERE USER_TEAM_ID = %s AND GAMEWEEK_ID = %s"
cursor.execute(query,[team_id,gweek])
cap = cursor.fetchone()
cap_id = int((''.join(map(str,cap))))
cursor.close()
return render(request,'myteam.html',{'Starting_players':starting_players,'Bench':bench_players,'Captain':cap_id,'Gameweek':gweek})
cursor = connection.cursor()
query = "SELECT PLAYER_ID FROM USER_PLAYERS WHERE USER_ID = %s"
cursor.execute(query,[id])
result = cursor.fetchall()
cursor.close()
players = []
for r in result:
pl_id = r[0]
cursor = connection.cursor()
query = "SELECT LAST_NAME,PLAYING_POSITION,TEAM_NAME,PRICE FROM PLAYER_INFO WHERE PLAYER_ID = %s"
cursor.execute(query,[pl_id])
qr = cursor.fetchall()
cursor.close()
for s in qr:
Name = s[0]
Position = s[1]
Team_name = s[2]
Price = s[3]
row = {'Id':pl_id,'Name':Name,'Position':Position,'Team_name':Team_name,'Price':Price}
players.append(row)
cursor = connection.cursor()
query = 'SELECT PLAYER_ID,LAST_NAME,PLAYING_POSITION,TEAM_NAME,PRICE FROM PLAYER_INFO WHERE PLAYER_ID NOT IN(SELECT PLAYER_ID FROM USER_PLAYERS WHERE USER_ID = %s) ORDER BY PLAYER_ID'
cursor.execute(query,[id])
result = cursor.fetchall()
cursor.close()
all_players = []
for r in result:
Id = r[0]
Name = r[1]
Position = r[2]
Team_name = r[3]
Price = r[4]
row = {'Id':Id,'Name':Name,'Position':Position,'Team_name':Team_name,'Price':Price}
all_players.append(row)
return render(request,'transfer.html',{'Players':players,'All_players':all_players})
def make_transfers(request):
if 'user' not in request.session:
return render(request,'index.html')
id = request.session['user']
cursor = connection.cursor()
query = "SELECT USER_TEAM_ID FROM USER_TEAM WHERE USER_ID = %s"
cursor.execute(query,[id])
t_id = cursor.fetchone()
cursor.close()
team_id = (''.join(map(str,t_id)))
args = [0]
cursor = connection.cursor()
result_args = cursor.callproc('GET_GAME_WEEK',args)
cursor.close()
gw = int(result_args[0])
out = []
inp = []
out_gkp = request.POST.getlist('out_GKP')
if len(out_gkp) != 0:
for pl in out_gkp:
out.append(pl)
out_def = request.POST.getlist('out_DEF')
if len(out_def) != 0:
for pl in out_def:
out.append(pl)
out_mid = request.POST.getlist('out_MID')
if len(out_mid) != 0:
for pl in out_mid:
out.append(pl)
out_fwd = request.POST.getlist('out_FWD')
if len(out_fwd) != 0:
for pl in out_fwd:
out.append(pl)
in_gkp = request.POST.getlist('in_GKP')
if len(in_gkp) != 0:
for pl in in_gkp:
inp.append(pl)
in_def = request.POST.getlist('in_DEF')
if len(in_def) != 0:
for pl in in_def:
inp.append(pl)
in_mid = request.POST.getlist('in_MID')
if len(in_mid) != 0:
for pl in in_mid:
inp.append(pl)
in_fwd = request.POST.getlist('in_FWD')
if len(in_fwd) != 0:
for pl in in_fwd:
inp.append(pl)
cost = 0
cursor = connection.cursor()
sql = "SELECT PLAYER_ID FROM USER_PLAYERS WHERE USER_ID = %s"
cursor.execute(sql,[id])
result = cursor.fetchall()
cursor.close()
for r in result:
pl_id = r[0]
cursor = connection.cursor()
query = "SELECT PRICE FROM PLAYER_INFO WHERE PLAYER_ID = %s"
cursor.execute(query,[pl_id])
qr = cursor.fetchall()
cursor.close()
for s in qr:
Price = s[0]
cost=cost+Price
for p in out:
cursor = connection.cursor()
query = "SELECT PRICE FROM PLAYER_INFO WHERE PLAYER_ID = %s"
cursor.execute(query,[p])
qr = cursor.fetchall()
cursor.close()
for s in qr:
outcost = s[0]
cost=cost-outcost
for p in inp:
cursor = connection.cursor()
query = "SELECT PRICE FROM PLAYER_INFO WHERE PLAYER_ID = %s"
cursor.execute(query,[p])
qr = cursor.fetchall()
cursor.close()
for s in qr:
incost = s[0]
cost=cost+incost
if cost <= 100:
for p in out:
out_id = p
cursor = connection.cursor()
sql = "DELETE FROM USER_PLAYERS WHERE USER_ID = %s AND PLAYER_ID = %s"
cursor.execute(sql,[id,out_id])
connection.commit()
cursor.close()
for p in inp:
in_id = p
cursor = connection.cursor()
query = "SELECT PLAYING_POSITION FROM PLAYER_INFO WHERE PLAYER_ID = %s"
cursor.execute(query,[in_id])
pos = cursor.fetchone()
cursor.close()
pos = (''.join(map(str,pos)))
cursor = connection.cursor()
query = "INSERT INTO USER_PLAYERS VALUES(%s,%s,%s)"
cursor.execute(query,[id,in_id,pos])
connection.commit()
cursor.close()
cursor = connection.cursor()
query1="INSERT INTO TRANSFER(USER_TEAM_ID,IN_PLAYER_ID,OUT_PLAYER_ID,GAMEWEEK_ID) VALUES(%s,%s,%s,%s)"
cursor.execute(query1,[team_id,in_id,out_id,gw])
connection.commit()
cursor.close()
cursor = connection.cursor()
query1="DELETE FROM STARTING_TEAM WHERE USER_TEAM_ID=%s AND GAMEWEEK_ID = %s"
cursor.execute(query1,[team_id,gw])
cursor.close()
cursor = connection.cursor()
query2="DELETE FROM BENCHES WHERE USER_TEAM_ID=%s AND GAMEWEEK_ID = %s"
cursor.execute(query2,[team_id,gw])
cursor.close()
cursor = connection.cursor()
query3="DELETE FROM CAPTAINS WHERE USER_TEAM_ID=%s AND GAMEWEEK_ID = %s"
cursor.execute(query3,[team_id,gw])
cursor.close()
cursor = connection.cursor()
query = "SELECT PLAYER_ID FROM USER_PLAYERS WHERE USER_ID = %s"
cursor.execute(query,[id])
res = cursor.fetchall()
cursor.close()
players = []
for r in res:
Id = r[0]
cursor = connection.cursor()
query = "SELECT LAST_NAME,PLAYING_POSITION,TEAM_NAME FROM PLAYER_INFO WHERE PLAYER_ID = %s"
cursor.execute(query,[Id])
user_playerss = cursor.fetchall()
cursor.close()
for s in user_playerss:
Name = s[0]
Position = s[1]
Team_name = s[2]
row = {'Id':Id,'Name':Name,'Position':Position,'Team_name':Team_name}
players.append(row)
return render(request,'pick_team.html',{'Players':players})
else:
cursor = connection.cursor()
query = "SELECT PLAYER_ID FROM USER_PLAYERS WHERE USER_ID = %s"
cursor.execute(query,[id])
result = cursor.fetchall()
cursor.close()
players = []
for r in result:
pl_id = r[0]
cursor = connection.cursor()
query = "SELECT LAST_NAME,PLAYING_POSITION,TEAM_NAME,PRICE FROM PLAYER_INFO WHERE PLAYER_ID = %s"
cursor.execute(query,[pl_id])
qr = cursor.fetchall()
cursor.close()
for s in qr:
Name = s[0]
Position = s[1]
Team_name = s[2]
Price = s[3]
row = {'Name':Name,'Position':Position,'Team_name':Team_name,'Price':Price}
players.append(row)
cursor = connection.cursor()
query = 'SELECT PLAYER_ID,LAST_NAME,PLAYING_POSITION,TEAM_NAME,PRICE FROM PLAYER_INFO WHERE PLAYER_ID NOT IN(SELECT PLAYER_ID FROM USER_PLAYERS WHERE USER_ID = %s) ORDER BY PLAYER_ID'
cursor.execute(query,[id])
result = cursor.fetchall()
cursor.close()
all_players = []
for r in result:
Id = r[0]
Name = r[1]
Position = r[2]
Team_name = r[3]
Price = r[4]
row = {'Id':Id,'Name':Name,'Position':Position,'Team_name':Team_name,'Price':Price}
all_players.append(row)
return render(request,'transfer.html',{'Players':players,'All_players':all_players})
def viewhistory(request):
if 'user' not in request.session:
return render(request,'index.html')
id = request.session['user']
cursor = connection.cursor()
query = "SELECT USER_TEAM_ID FROM USER_TEAM WHERE USER_ID = %s"
cursor.execute(query,[id])
t_id = cursor.fetchone()
cursor.close()
team_id = (''.join(map(str,t_id)))
trans_history = []
cursor = connection.cursor()
sql = "SELECT GAMEWEEK_ID,IN_PLAYER_ID,OUT_PLAYER_ID FROM TRANSFER WHERE USER_TEAM_ID = %s"
cursor.execute(sql,[team_id])
result = cursor.fetchall()
cursor.close()
if result is None:
messages.warning(request,f"You have not made any transfers yet.")
cursor = connection.cursor()
query = "SELECT PLAYER_ID FROM USER_PLAYERS WHERE USER_ID = %s"
cursor.execute(query,[id])
result = cursor.fetchall()
cursor.close()
players = []
for r in result:
pl_id = r[0]
cursor = connection.cursor()
query = "SELECT LAST_NAME,PLAYING_POSITION,TEAM_NAME,PRICE FROM PLAYER_INFO WHERE PLAYER_ID = %s"
cursor.execute(query,[pl_id])
qr = cursor.fetchall()
cursor.close()
for s in qr:
Name = s[0]
Position = s[1]
Team_name = s[2]
Price = s[3]
row = {'Id':pl_id,'Name':Name,'Position':Position,'Team_name':Team_name,'Price':Price}
players.append(row)
cursor = connection.cursor()
query = 'SELECT PLAYER_ID,LAST_NAME,PLAYING_POSITION,TEAM_NAME,PRICE FROM PLAYER_INFO WHERE PLAYER_ID NOT IN(SELECT PLAYER_ID FROM USER_PLAYERS WHERE USER_ID = %s) ORDER BY PLAYER_ID'
cursor.execute(query,[id])
result = cursor.fetchall()
cursor.close()
all_players = []
for r in result:
Id = r[0]
Name = r[1]
Position = r[2]
Team_name = r[3]
Price = r[4]
row = {'Id':Id,'Name':Name,'Position':Position,'Team_name':Team_name,'Price':Price}
all_players.append(row)
return render(request,'transfer.html',{'Players':players,'All_players':all_players})
for r in result:
gw = r[0]
in_id = r[1]
out_id = r[2]
row = {'GW':gw,'In':in_id,'Out':out_id}
trans_history.append(row)
return render(request,'viewtransfers.html',{'Trans_history':trans_history})
| 36.879679
| 191
| 0.570434
|
3f9307a5116d2a931202a2a6a786fe1832e86dd7
| 10,043
|
py
|
Python
|
stable_baselines/run_exp.py
|
shanlior/OAL
|
39c9eb24f64a27d3da09e92b6da9bf60326baabe
|
[
"MIT"
] | 3
|
2021-04-08T12:49:16.000Z
|
2022-03-11T00:53:47.000Z
|
stable_baselines/run_exp.py
|
shanlior/OAL
|
39c9eb24f64a27d3da09e92b6da9bf60326baabe
|
[
"MIT"
] | null | null | null |
stable_baselines/run_exp.py
|
shanlior/OAL
|
39c9eb24f64a27d3da09e92b6da9bf60326baabe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# noinspection PyUnresolvedReferences
# from mpi4py import MPI
import stable_baselines.common.tf_util as tf_util
from stable_baselines.common.vec_env.vec_normalize import VecNormalize
from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv
from stable_baselines.common.mujoco_wrappers import wrap_mujoco
import gym
from stable_baselines.common.cmd_util import make_mujoco_env, mujoco_arg_parser
from stable_baselines import bench, logger
from stable_baselines.mdal import MDAL_MDPO_OFF, MDAL_MDPO_ON, MDAL_TRPO
from stable_baselines.gail import ExpertDataset, generate_expert_traj
import os
def train(env_id, algo, num_timesteps, seed, sgd_steps, t_pi, t_c, lam, log, expert_path, pretrain, pretrain_epochs,
mdpo_update_steps, num_trajectories, expert_model, exploration_bonus, bonus_coef, random_action_len,
is_action_features, dir_name, neural, lipschitz, args):
"""
Train TRPO model for the mujoco environment, for testing purposes
:param env_id: (str) Environment ID
:param num_timesteps: (int) The total number of samples
:param seed: (int) The initial seed for training
"""
with tf_util.single_threaded_session():
# from mpi4py import MPI
# rank = MPI.COMM_WORLD.Get_rank()
rank = 0
env_name = env_id[:-3].lower()
log_dir = './experiments/' + env_name + '/' + str(algo).lower() + '/'\
+ 'tpi' + str(t_pi) + '_tc' + str(t_c) + '_lam' + str(lam)
log_dir += '_' + dir_name + '/'
log_name = str(algo) + '_updateSteps' + str(mdpo_update_steps)
# log_name += '_randLen' + str(random_action_len)
if exploration_bonus:
log_name += '_exploration' + str(bonus_coef)
if pretrain:
log_name += '_pretrain' + str(pretrain_epochs)
if not is_action_features:
log_name += "_states_only"
log_name+= '_s' + str(seed)
log_path = log_dir + log_name
expert_path = './experts/' + expert_path
num_timesteps = int(num_timesteps)
args = args.__dict__
dir_path = os.getcwd() + log_dir[1:]
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(os.getcwd() + log_dir[1:] + 'args.txt', 'w') as file:
file.write("Experiment Arguments:")
for key, val in args.items():
print(key, ": ", val, file=file)
if log:
if rank == 0:
logger.configure(log_path)
else:
logger.configure(log_path, format_strs=[])
logger.set_level(logger.DISABLED)
else:
if rank == 0:
logger.configure()
else:
logger.configure(format_strs=[])
logger.set_level(logger.DISABLED)
# workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
# env = make_mujoco_env(env_id, workerseed)
def make_env():
# env_out = gym.make(env_id, reset_noise_scale=1.0)
env_out = gym.make(env_id)
env_out = bench.Monitor(env_out, logger.get_dir(), allow_early_resets=True)
env_out.seed(seed)
env_out = wrap_mujoco(env_out, random_action_len=random_action_len)
return env_out
#
env = DummyVecEnv([make_env])
# env = VecNormalize(env)
if algo == 'Train':
train = True
else:
train = False
if algo == 'Evaluate':
eval = True
else:
eval = False
if train:
from stable_baselines import SAC
env = VecNormalize(env, norm_reward=False, norm_obs=False)
if num_timesteps > 0:
model = SAC('MlpPolicy', env_id, verbose=1, buffer_size=1000000, batch_size=256, ent_coef='auto',
train_freq=1, tau=0.01, gradient_steps=1, learning_starts=10000)
else:
model = SAC.load(expert_model, env)
generate_expert_traj(model, expert_path, n_timesteps=num_timesteps, n_episodes=num_trajectories)
if num_timesteps > 0:
model.save('sac_' + env_name + '_' + str(num_timesteps))
elif eval:
from stable_baselines import SAC
env = VecNormalize(env, norm_reward=False, norm_obs=False)
model = SAC.load(expert_model, env)
generate_expert_traj(model, expert_path, n_timesteps=num_timesteps, n_episodes=10, evaluate=True)
else:
expert_path = expert_path + '.npz'
dataset = ExpertDataset(expert_path=expert_path, traj_limitation=10, verbose=1)
if algo == 'MDAL':
model = MDAL_MDPO_OFF('MlpPolicy', env, dataset, verbose=1,
tensorboard_log="./experiments/" + env_name + "/mdal/", seed=seed,
buffer_size=1000000, ent_coef=0.0, learning_starts=10000, batch_size=256, tau=0.01,
gamma=0.99, gradient_steps=sgd_steps, mdpo_update_steps=mdpo_update_steps,
lam=0.0, train_freq=1, d_step=10, tsallis_q=1, reparameterize=True, t_pi=t_pi, t_c=t_c,
exploration_bonus=exploration_bonus, bonus_coef=bonus_coef,
is_action_features=is_action_features,
neural=neural, lipschitz=lipschitz)
elif algo == 'MDAL_ON_POLICY':
model = MDAL_MDPO_ON('MlpPolicy', env, dataset, verbose=1, timesteps_per_batch=2048,
tensorboard_log="./experiments/" + env_name + "/mdal_mdpo_on/", seed=seed,
max_kl=0.01, cg_iters=10, cg_damping=0.1, entcoeff=0.0, adversary_entcoeff=0.001,
gamma=0.99, lam=0.95, vf_iters=5, vf_stepsize=1e-3, sgd_steps=sgd_steps,
klcoeff=1.0, method="multistep-SGD", tsallis_q=1.0,
t_pi=t_pi, t_c=t_c,
exploration_bonus=exploration_bonus, bonus_coef=bonus_coef,
is_action_features=is_action_features, neural=neural)
elif algo == 'MDAL_TRPO':
model = MDAL_TRPO('MlpPolicy', env, dataset, verbose=1,
tensorboard_log="./experiments/" + env_name + "/mdal_trpo/", seed=seed,
gamma=0.99, g_step=3, d_step=5, sgd_steps=1, d_stepsize=9e-5,
entcoeff=0.0, adversary_entcoeff=0.001, max_kl=t_pi, t_pi=t_pi, t_c=t_c,
exploration_bonus=exploration_bonus, bonus_coef=bonus_coef,
is_action_features=is_action_features, neural=neural, lam=0.98,
timesteps_per_batch=2000, lipschitz=lipschitz)
elif algo == 'GAIL':
from mpi4py import MPI
from stable_baselines import GAIL
model = GAIL('MlpPolicy', env, dataset, verbose=1,
tensorboard_log="./experiments/" + env_name + "/gail/", seed=seed,
entcoeff=0.0, adversary_entcoeff=0.001, lipschitz=lipschitz)
elif algo == 'GAIL_MDPO_OFF':
# from mpi4py import MPI
from stable_baselines import GAIL_MDPO_OFF
model = GAIL_MDPO_OFF('MlpPolicy', env, dataset, verbose=1,
tensorboard_log="./experiments/" + env_name + "/gail_mdpo_off/", seed=seed,
ent_coef=0.0, adversary_entcoeff=0.001,
buffer_size=1000000, learning_starts=10000, batch_size=256,
tau=0.01,
gamma=0.99, gradient_steps=sgd_steps, mdpo_update_steps=mdpo_update_steps,
lam=0.0, train_freq=1, tsallis_q=1, reparameterize=True, t_pi=t_pi, t_c=t_c,
exploration_bonus=exploration_bonus, bonus_coef=bonus_coef,
is_action_features=is_action_features, lipschitz=lipschitz)
else:
raise ValueError("Not a valid algorithm.")
if pretrain:
model.pretrain(dataset, n_epochs=pretrain_epochs)
model.learn(total_timesteps=num_timesteps, tb_log_name=log_name)
env.close()
def main():
"""
Runs the testd
"""
args = mujoco_arg_parser().parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'
log = not args.no_log
is_action_features = not args.states
# for t_c in t_cs:
# for t_pi in t_pis:
# for lam in lams:
# args.lam = lam
# args.t_c = t_c
# args.t_pi = t_pi
for seed in range(args.num_seeds):
train(args.env, algo=args.algo, num_timesteps=args.num_timesteps, seed=(seed+args.seed_offset),
expert_model=args.expert_model, expert_path=args.expert_path, num_trajectories=args.num_trajectories,
is_action_features=is_action_features,
sgd_steps=args.sgd_steps, mdpo_update_steps=args.mdpo_update_steps, lipschitz=args.lipschitz,
t_pi=args.t_pi, t_c=args.t_c, lam=args.lam, log=log,
pretrain=args.pretrain, pretrain_epochs=args.pretrain_epochs,
exploration_bonus=args.exploration, bonus_coef=args.bonus_coef,
random_action_len=args.random_action_len, dir_name=args.dir_name, neural=args.neural,
args=args)
if __name__ == '__main__':
main()
| 46.068807
| 125
| 0.574629
|
62a5f3b9fd24a1c03101cc43184b1db945254c73
| 9,752
|
py
|
Python
|
nertivia/http.py
|
FluxedScript/Nertivia.py
|
b54470951d47269906f7162ef7d5e828cfaf3030
|
[
"MIT"
] | 2
|
2021-07-03T13:38:45.000Z
|
2021-07-03T13:45:00.000Z
|
nertivia/http.py
|
FluxedScript/Nertivia.py
|
b54470951d47269906f7162ef7d5e828cfaf3030
|
[
"MIT"
] | 1
|
2021-05-31T17:13:28.000Z
|
2021-05-31T18:25:06.000Z
|
nertivia/http.py
|
FluxedScript/Nertivia.py
|
b54470951d47269906f7162ef7d5e828cfaf3030
|
[
"MIT"
] | 2
|
2021-05-31T17:14:56.000Z
|
2021-07-12T08:23:42.000Z
|
import asyncio
import gc
import json
from nertivia import cache_nertivia_data
import aiohttp
import nest_asyncio
import requests
import nertivia.message
nest_asyncio.apply()
# Set API endpoints
MAIN_URL = "https://nertivia.net/"
URL = "https://nertivia.net/api/channels/"
URL_MSG = "https://nertivia.net/api/messages/"
URL_STA = "https://nertivia.net/api/settings/status"
headers = {}
loop = asyncio.get_event_loop()
session = aiohttp.ClientSession()
def get_sid(token):
"""
Obtain the sid from a given token, returns None if failed connection or other error preventing success
Do not use manually
"""
r = requests.get(url=str(URL + "app"), headers={'Accept': 'text/plain',
'authorization': token,
'Content-Type': 'application/json;charset=utf-8'})
cookie = r.headers.get('set-cookie')
# If successful, then the cookie was set
if cookie:
return cookie.split("connect.sid=", 1)[1].strip("; Path=/; HttpOnly")
return None
def generate_headers(token):
"""
Generates a header using a provided token and sets it as a global variable
Do not use manually
"""
global headers
headers = {'Accept': 'text/plain',
'authorization': token,
'Content-Type': 'application/json;charset=utf-8',
'Cookie': f'connect.sid={get_sid(token)}'}
async def fetch_server(server_id):
"""
Asynchronous function which perform an API call to retrieve a server from its ID
"""
session = aiohttp.ClientSession()
res = await session.get(url=str(f'{MAIN_URL}/api/servers/{server_id}'),
headers=headers)
await session.close()
# Reminder : 2XX is a success
# If unsuccessful we return the error message
if res.status != 200:
return res.content
# However, if succesful return the json data that was returned and transform it into its python equivalent
return await res.json()
async def fetch_channel(channel_id):
"""
Asynchronous function that will perform an API call to retrieve a channel from its ID
"""
session = aiohttp.ClientSession()
res = await session.get(url=str(f'{URL}{channel_id}'), headers=headers)
await session.close()
# Reminder : 2XX is a success
# If unsuccessful we return the error message
if res.status != 200:
return res.content
# However, if succesful return the json data that was returned and transform it into its python equivalent
return await res.json()
async def fetch_user(user_id):
"""
Asynchronous function which performs an API call to retrieve a user from their ID
"""
session = aiohttp.ClientSession()
res = await session.get(url=str(f'{MAIN_URL}/api/user/{user_id}'),
headers=headers)
await session.close()
# Reminder : 2XX is a success
# If unsuccessful we return the error message
if res.status != 200:
return res.content
# However, if successful return the json data that was returned and transform it into its python equivalent
return await res.json()
class HTTPClient:
"""
Object representing the HTTPClient, do not instantiate manually
Handles the bridge between Nertivia.py and the Nertivia servers
"""
def __init__(self, **kwargs):
"""
Prepare the HTTPClient instance
"""
self.token = None
self.user = {}
self._servers = {}
self._users = {}
# If a token was passed as a named argument then set it as a self value for ease of acces
if kwargs.get("token"):
self.token = kwargs.get("token")
# If a socket ip was given, use it to ready all of the endpoints
if kwargs.get("socket_ip"):
global MAIN_URL, URL, URL_MSG, URL_STA
socket_ip = kwargs.get("socket_ip")
MAIN_URL = f"{socket_ip}"
URL = f"{socket_ip}/api/channels/"
URL_MSG = f"{socket_ip}/api/messages/"
URL_STA = f"{socket_ip}/api/settings/status"
def set_token(self, token):
"""
Set the token to the one provided, regardless of whether or not one was already set
After which, generate new headers to fit with the new token
"""
self.token = token
generate_headers(token)
def clear(self):
"""
Clean up the memory, reset the dictionary entries for users, servers and the bot itself
"""
self.user = {}
self._servers = {}
self._users = {}
gc.collect() # make sure it's memory efficient
@property
def servers(self):
"""
Synchronously returns a list of all cached servers
"""
return list(self._servers.values())
def _get_server(self, server_id):
"""
Returns a cached server using its ID
If the server isn't cached, then `None` is returned
"""
return self._servers.get(server_id)
def _add_server(self, server):
"""
Add a server to the cache, using its ID as the dictionary key and the server object being the value
"""
self._servers[server.id] = server
def _remove_server(self, server):
"""
Remove from the cache a server using its ID
For some reason server object is expected and not id (Fix if no dependencies)
"""
# Remove the server from cache, then delete the argument despite no apparent further reference (Useless line ?)
self._servers.pop(server.id, None)
del server
gc.collect()
async def delete_message(self, message_id, channel_id):
"""
Asynchronously delete a message using its ID, and the channel ID in which it is located
"""
res = await session.delete(url=str(URL_MSG + str(message_id) + '/channels/' + str(channel_id)),
headers=headers)
# Reminder : if 2XX then it is a success
if res.status != 200:
return res.content
async def edit_message(self, message_id, channel_id, content: str):
"""
Edit a message using its ID, and the channel ID in which it is located
Third argument is the content to replace it with
"""
res = await session.patch(url=str(URL_MSG + str(message_id) + '/channels/' + str(channel_id)),
headers=headers,
data=json.dumps({'message': content}))
if res.status != 200:
return res.content
async def send_message(self, channel_id, content: str):
"""
Send a message in a channel which is found by its ID
Second argument is a string containing the message to send
"""
res = await session.post(url=str(URL_MSG + '/channels/' + str(channel_id)),
data=json.dumps({"message": content}),
headers=headers)
# Reminder : if 2XX then it is a success
if res.status != 200:
return res
# If successful return...Success message ? Not sure
return res
async def get_message(self, message_id, channel_id):
"""
Asynchronously retrieve a message using its ID and the ID of the channel in which it is located
"""
res = await session.get(url=str(f'{MAIN_URL}/api/messages/{message_id}/channels/{channel_id}'),
headers=headers)
# Reminder : if 2XX then it is a success
if res.status != 200:
# If the response is unsuccessful and has a content attribute then we return it
# Needs doc reading but a content attribute should always be present to specify success/error message
try:
if "content" in res:
return res.content
# Error shouldn't be possible unless res is None
except Exception:
pass
return None
# Return a message object for easy manipulation
return nertivia.message.Message({'message': await res.json()})
def get_channel(self, channel_id):
"""
Synchronously fetch a channel using its ID with an API call
Channels are not cached ?
Returns a nertivia.Channel object
"""
try:
res = asyncio.run(fetch_channel(channel_id))
except asyncio.TimeoutError:
res = asyncio.run(fetch_channel(channel_id))
return nertivia.Channel(res)
def get_user(self, user_id, force_cache: bool = False):
"""
Synchronously obtain a user from cache, if the user is not cached then perform an API call to retrieve them
`force_cache` may be set to True to have the function return None rather than perform an API call
"""
if str(user_id) in cache_nertivia_data.users:
return cache_nertivia_data.users[str(user_id)]
elif not force_cache:
return nertivia.User(asyncio.run(fetch_user(user_id)))
return None
def get_server(self, server_id, force_cache: bool = False):
"""
Synchronously obtain a server from cache, if the server is not cached then perform an API call to retrieve it
`force_cache` may be set to True to have the function return None rather than perform an API call
"""
if str(server_id) in cache_nertivia_data.guilds:
return cache_nertivia_data.guilds[str(server_id)]
elif not force_cache:
return nertivia.Server(asyncio.run(fetch_server(server_id)))
return None
| 35.079137
| 119
| 0.616489
|
713d0f5ae67e37579c15e10069991deede478c19
| 12,018
|
py
|
Python
|
tests/components/tasmota/test_discovery.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 22,481
|
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
tests/components/tasmota/test_discovery.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/tasmota/test_discovery.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 11,411
|
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""The tests for the MQTT discovery."""
import copy
import json
from unittest.mock import patch
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from homeassistant.components.tasmota.discovery import ALREADY_DISCOVERED
from homeassistant.helpers import device_registry as dr
from .conftest import setup_tasmota_helper
from .test_common import DEFAULT_CONFIG, DEFAULT_CONFIG_9_0_0_3
from tests.common import async_fire_mqtt_message
async def test_subscribing_config_topic(hass, mqtt_mock, setup_tasmota):
"""Test setting up discovery."""
discovery_topic = DEFAULT_PREFIX
assert mqtt_mock.async_subscribe.called
call_args = mqtt_mock.async_subscribe.mock_calls[0][1]
assert call_args[0] == discovery_topic + "/#"
assert call_args[2] == 0
async def test_future_discovery_message(hass, mqtt_mock, caplog):
"""Test we handle backwards compatible discovery messages."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["future_option"] = "BEST_SINCE_SLICED_BREAD"
config["so"]["another_future_option"] = "EVEN_BETTER"
with patch(
"homeassistant.components.tasmota.discovery.tasmota_get_device_config",
return_value={},
) as mock_tasmota_get_device_config:
await setup_tasmota_helper(hass)
async_fire_mqtt_message(
hass, f"{DEFAULT_PREFIX}/00000049A3BC/config", json.dumps(config)
)
await hass.async_block_till_done()
assert mock_tasmota_get_device_config.called
async def test_valid_discovery_message(hass, mqtt_mock, caplog):
"""Test discovery callback called."""
config = copy.deepcopy(DEFAULT_CONFIG)
with patch(
"homeassistant.components.tasmota.discovery.tasmota_get_device_config",
return_value={},
) as mock_tasmota_get_device_config:
await setup_tasmota_helper(hass)
async_fire_mqtt_message(
hass, f"{DEFAULT_PREFIX}/00000049A3BC/config", json.dumps(config)
)
await hass.async_block_till_done()
assert mock_tasmota_get_device_config.called
async def test_invalid_topic(hass, mqtt_mock):
"""Test receiving discovery message on wrong topic."""
with patch(
"homeassistant.components.tasmota.discovery.tasmota_get_device_config"
) as mock_tasmota_get_device_config:
await setup_tasmota_helper(hass)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/123456/configuration", "{}")
await hass.async_block_till_done()
assert not mock_tasmota_get_device_config.called
async def test_invalid_message(hass, mqtt_mock, caplog):
"""Test receiving an invalid message."""
with patch(
"homeassistant.components.tasmota.discovery.tasmota_get_device_config"
) as mock_tasmota_get_device_config:
await setup_tasmota_helper(hass)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/123456/config", "asd")
await hass.async_block_till_done()
assert "Invalid discovery message" in caplog.text
assert not mock_tasmota_get_device_config.called
async def test_invalid_mac(hass, mqtt_mock, caplog):
"""Test topic is not matching device MAC."""
config = copy.deepcopy(DEFAULT_CONFIG)
with patch(
"homeassistant.components.tasmota.discovery.tasmota_get_device_config"
) as mock_tasmota_get_device_config:
await setup_tasmota_helper(hass)
async_fire_mqtt_message(
hass, f"{DEFAULT_PREFIX}/00000049A3BA/config", json.dumps(config)
)
await hass.async_block_till_done()
assert "MAC mismatch" in caplog.text
assert not mock_tasmota_get_device_config.called
async def test_correct_config_discovery(
hass, mqtt_mock, caplog, device_reg, entity_reg, setup_tasmota
):
"""Test receiving valid discovery message."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device and registry entries are created
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is not None
entity_entry = entity_reg.async_get("switch.test")
assert entity_entry is not None
state = hass.states.get("switch.test")
assert state is not None
assert state.name == "Test"
assert (mac, "switch", "relay", 0) in hass.data[ALREADY_DISCOVERED]
async def test_device_discover(
hass, mqtt_mock, caplog, device_reg, entity_reg, setup_tasmota
):
"""Test setting up a device."""
config = copy.deepcopy(DEFAULT_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device and registry entries are created
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is not None
assert device_entry.configuration_url == f"http://{config['ip']}/"
assert device_entry.manufacturer == "Tasmota"
assert device_entry.model == config["md"]
assert device_entry.name == config["dn"]
assert device_entry.sw_version == config["sw"]
async def test_device_discover_deprecated(
hass, mqtt_mock, caplog, device_reg, entity_reg, setup_tasmota
):
"""Test setting up a device with deprecated discovery message."""
config = copy.deepcopy(DEFAULT_CONFIG_9_0_0_3)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device and registry entries are created
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is not None
assert device_entry.manufacturer == "Tasmota"
assert device_entry.model == config["md"]
assert device_entry.name == config["dn"]
assert device_entry.sw_version == config["sw"]
async def test_device_update(
hass, mqtt_mock, caplog, device_reg, entity_reg, setup_tasmota
):
"""Test updating a device."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["md"] = "Model 1"
config["dn"] = "Name 1"
config["sw"] = "v1.2.3.4"
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device entry is created
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is not None
# Update device parameters
config["md"] = "Another model"
config["dn"] = "Another name"
config["sw"] = "v6.6.6"
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device entry is updated
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is not None
assert device_entry.model == "Another model"
assert device_entry.name == "Another name"
assert device_entry.sw_version == "v6.6.6"
async def test_device_remove(
hass, mqtt_mock, caplog, device_reg, entity_reg, setup_tasmota
):
"""Test removing a discovered device."""
config = copy.deepcopy(DEFAULT_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device entry is created
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is not None
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
"",
)
await hass.async_block_till_done()
# Verify device entry is removed
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is None
async def test_device_remove_stale(hass, mqtt_mock, caplog, device_reg, setup_tasmota):
"""Test removing a stale (undiscovered) device does not throw."""
mac = "00000049A3BC"
config_entry = hass.config_entries.async_entries("tasmota")[0]
# Create a device
device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, mac)},
)
# Verify device entry was created
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is not None
# Remove the device
device_reg.async_remove_device(device_entry.id)
# Verify device entry is removed
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is None
async def test_device_rediscover(
hass, mqtt_mock, caplog, device_reg, entity_reg, setup_tasmota
):
"""Test removing a device."""
config = copy.deepcopy(DEFAULT_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device entry is created
device_entry1 = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry1 is not None
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
"",
)
await hass.async_block_till_done()
# Verify device entry is removed
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is None
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
# Verify device entry is created, and id is reused
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is not None
assert device_entry1.id == device_entry.id
async def test_entity_duplicate_discovery(hass, mqtt_mock, caplog, setup_tasmota):
"""Test entities are not duplicated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("switch.test")
state_duplicate = hass.states.get("binary_sensor.beer1")
assert state is not None
assert state.name == "Test"
assert state_duplicate is None
assert (
f"Entity already added, sending update: switch ('{mac}', 'switch', 'relay', 0)"
in caplog.text
)
async def test_entity_duplicate_removal(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removing entity twice."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
config["rl"][0] = 0
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
assert f"Removing entity: switch ('{mac}', 'switch', 'relay', 0)" in caplog.text
caplog.clear()
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
assert "Removing entity: switch" not in caplog.text
| 30.736573
| 87
| 0.687968
|
e50679fe2bbc7fcb5d22c7b27e6ade62025ae4ff
| 10,376
|
py
|
Python
|
tests/bugs/core_4889_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2022-02-05T11:37:13.000Z
|
2022-02-05T11:37:13.000Z
|
tests/bugs/core_4889_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-09-03T11:47:00.000Z
|
2021-09-03T12:42:10.000Z
|
tests/bugs/core_4889_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-06-30T14:14:16.000Z
|
2021-06-30T14:14:16.000Z
|
#coding:utf-8
#
# id: bugs.core_4889
# title: FBSVCMGR with `action_trace_start` prevents in 3.0 SuperServer from connecting using local protocol
# decription:
# Confirmed failing to create embedded attach on build 31948.
# Confirmed successful work on build 32268, architectures: SS, SC and CS.
# 10.12.2019. Additional check:
# 4.0.0.1685 SS: 11.439s.
# 4.0.0.1685 CS: 12.078s.
# 3.0.5.33206 SS: 10.827s.
# 3.0.5.33206 CS: 11.793s.
#
# 13.04.2021. Adapted for run both on Windows and Linux. Checked on:
# Windows: 3.0.8.33445, 4.0.0.2416
# Linux: 3.0.8.33426, 4.0.0.2416
#
# tracker_id: CORE-4889
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, python_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
# import os
# import subprocess
# from subprocess import Popen
# import time
#
# fdb_file='$(DATABASE_LOCATION)bugs.core_4889.fdb'
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
# db_conn.close()
#
# #--------------------------------------------
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
# #--------------------------------------------
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
# if type(f_names_list[i]) == file:
# del_name = f_names_list[i].name
# elif type(f_names_list[i]) == str:
# del_name = f_names_list[i]
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# print('type(f_names_list[i])=',type(f_names_list[i]))
# del_name = None
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
# #--------------------------------------------
#
#
# # Prepare config for trace session that will be launched by call of FBSVCMGR:
# ################
# txt = '''database= %[\\\\\\\\/]bugs.core_4889.fdb
# {
# enabled = true
# time_threshold = 0
# log_errors = true
# log_statement_finish = true
# }
# '''
# trc_cfg=open( os.path.join(context['temp_directory'],'tmp_trace_4889.cfg'), 'w')
# trc_cfg.write(txt)
# flush_and_close( trc_cfg )
#
# #####################################################################
# # Async. launch of trace session using FBSVCMGR action_trace_start:
#
# trc_log = open( os.path.join(context['temp_directory'],'tmp_trace_4889.log'), 'w')
#
# # Execute a child program in a new process, redirecting STDERR to the same target as of STDOUT:
# p_svcmgr = Popen( [context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_trace_start","trc_cfg", trc_cfg.name],
# stdout=trc_log, stderr=subprocess.STDOUT)
#
# # Wait! Trace session is initialized not instantly!
# time.sleep(2)
#
# #####################################################################
#
# # Determine active trace session ID (for further stop):
#
# trc_lst = open( os.path.join(context['temp_directory'],'tmp_trace_4889.lst'), 'w')
# subprocess.call([context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_trace_list"],
# stdout=trc_lst, stderr=subprocess.STDOUT
# )
# flush_and_close( trc_lst )
#
# # Session ID: 5
# # user:
# # date: 2015-08-27 15:24:14
# # flags: active, trace
#
# trcssn=0
# with open( trc_lst.name,'r') as f:
# for line in f:
# i=1
# if 'Session ID' in line:
# for word in line.split():
# if i==3:
# trcssn=word
# i=i+1
# break
#
# # Result: `trcssn` is ID of active trace session.
# # We have to terminate trace session that is running on server BEFORE we termitane process `p_svcmgr`
# if trcssn==0:
# print("Error parsing trace session ID.")
# flush_and_close( trc_log )
#
# else:
# #####################################################################
#
# # Preparing script for ISQL:
#
# sql_cmd='''
# set list on;
# set count on;
# select
# iif(a.mon$remote_protocol is null, 'internal', 'remote') as connection_protocol,
# iif(a.mon$remote_process is null, 'internal', 'remote') as connection_process,
# iif(a.mon$remote_pid is null, 'internal', 'remote') as connection_remote_pid,
# a.mon$auth_method as auth_method -- should be: 'User name in DPB'
# from rdb$database r
# left join mon$attachments a on a.mon$attachment_id = current_connection and a.mon$system_flag = 0;
# commit;
# '''
#
# isql_cmd=open( os.path.join(context['temp_directory'],'tmp_isql_4889.sql'), 'w')
# isql_cmd.write(sql_cmd)
# flush_and_close( isql_cmd )
#
# #######################################################################
#
# # Async. launch ISQL process with EMBEDDED connect.
# # ::::: NB :::::
# # Confirmed that this action:
# # works fine on WI-V3.0.0.31940, build 14-jul-2015
# # **HANGS** on WI-V3.0.0.31948, build 16-jul-2015
#
# isql_log=open( os.path.join(context['temp_directory'],'tmp_isql_4889.log'), 'w')
# p_isql = Popen( [ context['isql_path'] , fdb_file,
# "-user", "tmp$no$such$user$4889",
# "-n", "-i", isql_cmd.name ],
# stdout=isql_log,
# stderr=subprocess.STDOUT
# )
#
# # do NOT remove this delay:
# time.sleep(5)
#
# p_isql.terminate()
# flush_and_close( isql_log )
#
# #####################################################################
#
# # Stop trace session:
#
# trc_lst=open(trc_lst.name, "a")
# trc_lst.seek(0,2)
# subprocess.call([ context['fbsvcmgr_path'], "localhost:service_mgr",
# "action_trace_stop","trc_id",trcssn],
# stdout=trc_lst, stderr=subprocess.STDOUT
# )
# flush_and_close( trc_lst )
#
# p_svcmgr.terminate()
# flush_and_close( trc_log )
#
# # do NOT remove this delay:
# time.sleep(2)
#
# #####################################################################
#
# # Output logs:
#
# i=0
# with open( trc_log.name,'r') as f:
# for line in f:
# if ') EXECUTE_STATEMENT_FINISH' in line:
# i=1
# if i==1 and '1 records fetched' in line:
# i=2
# print("OK: found text in trace related to EMBEDDED connect.")
# break
#
# if not i==2:
# print("FAILED to found text in trace related to EMBEDDED connect.")
#
# if os.path.getsize(isql_log.name) == 0:
# print("FAILED to print log from EMBEDDED connect: log is EMPTY.")
# else:
# with open( isql_log.name,'r') as f:
# print(f.read())
# f.close()
#
#
# # do NOT remove this pause otherwise log of trace will not be enable for deletion and test will finish with
# # Exception raised while executing Python test script. exception: WindowsError: 32
#
# # On WI-V3.0.0.31948 final output was:
# # FAILED to found text in trace related to EMBEDDED connect.
# # FAILED to print log from EMBEDDED connect: log is EMPTY.
#
# #####################################################################
#
# # Cleanup:
# time.sleep(1)
# cleanup( (trc_lst, trc_cfg, trc_log,isql_cmd, isql_log) )
#
#
#---
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
OK: found text in trace related to EMBEDDED connect.
CONNECTION_PROTOCOL internal
CONNECTION_PROCESS internal
CONNECTION_REMOTE_PID internal
AUTH_METHOD User name in DPB
Records affected: 1
"""
trace_1 = ['time_threshold = 0',
'log_initfini = false',
'log_errors = true',
'log_statement_finish = true',
]
@pytest.mark.version('>=3.0')
def test_1(act_1: Action, capsys):
with act_1.trace(db_events=trace_1):
isq_script = """
set list on;
set count on;
select
iif(a.mon$remote_protocol is null, 'internal', 'remote') as connection_protocol,
iif(a.mon$remote_process is null, 'internal', 'remote') as connection_process,
iif(a.mon$remote_pid is null, 'internal', 'remote') as connection_remote_pid,
a.mon$auth_method as auth_method -- should be: 'User name in DPB'
from rdb$database r
left join mon$attachments a on a.mon$attachment_id = current_connection and a.mon$system_flag = 0;
commit;
"""
act_1.isql(switches=['-n', '-user', 'tmp$no$such$user$4889', str(act_1.db.db_path)],
connect_db=False, credentials=False, input=isq_script)
# Process trace log
i = 0
for line in act_1.trace_log:
if ') EXECUTE_STATEMENT_FINISH' in line:
i = 1
if i == 1 and '1 records fetched' in line:
i = 2
print("OK: found text in trace related to EMBEDDED connect.")
break
if not i == 2:
print("FAILED to find text in trace related to EMBEDDED connect.")
print(act_1.stdout if act_1.stdout else "FAILED to print log from EMBEDDED connect: log is EMPTY.")
# Check
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 34.818792
| 115
| 0.550405
|
32b5c9371710fac50598384758a8ddc5c01a84ae
| 5,570
|
py
|
Python
|
venv/Lib/site-packages/gym/spaces/box.py
|
RealBrandonChen/AirSim
|
c43b1e821454668e5c9f7181acd8f9334b83f7c0
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/gym/spaces/box.py
|
RealBrandonChen/AirSim
|
c43b1e821454668e5c9f7181acd8f9334b83f7c0
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/gym/spaces/box.py
|
RealBrandonChen/AirSim
|
c43b1e821454668e5c9f7181acd8f9334b83f7c0
|
[
"MIT"
] | null | null | null |
import numpy as np
from .space import Space
from gym import logger
class Box(Space):
"""
A (possibly unbounded) box in R^n. Specifically, a Box represents the
Cartesian product of n closed intervals. Each interval has the form of one
of [a, b], (-oo, b], [a, oo), or (-oo, oo).
There are two common use cases:
* Identical bound for each dimension::
>>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
Box(3, 4)
* Independent bound for each dimension::
>>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)
Box(2,)
"""
def __init__(self, low, high, shape=None, dtype=np.float32):
assert dtype is not None, "dtype must be explicitly provided. "
self.dtype = np.dtype(dtype)
# determine shape if it isn't provided directly
if shape is not None:
shape = tuple(shape)
assert (
np.isscalar(low) or low.shape == shape
), "low.shape doesn't match provided shape"
assert (
np.isscalar(high) or high.shape == shape
), "high.shape doesn't match provided shape"
elif not np.isscalar(low):
shape = low.shape
assert (
np.isscalar(high) or high.shape == shape
), "high.shape doesn't match low.shape"
elif not np.isscalar(high):
shape = high.shape
assert (
np.isscalar(low) or low.shape == shape
), "low.shape doesn't match high.shape"
else:
raise ValueError(
"shape must be provided or inferred from the shapes of low or high"
)
if np.isscalar(low):
low = np.full(shape, low, dtype=dtype)
if np.isscalar(high):
high = np.full(shape, high, dtype=dtype)
self.shape = shape
self.low = low
self.high = high
def _get_precision(dtype):
if np.issubdtype(dtype, np.floating):
return np.finfo(dtype).precision
else:
return np.inf
low_precision = _get_precision(self.low.dtype)
high_precision = _get_precision(self.high.dtype)
dtype_precision = _get_precision(self.dtype)
if min(low_precision, high_precision) > dtype_precision:
logger.warn(
"Box bound precision lowered by casting to {}".format(self.dtype)
)
self.low = self.low.astype(self.dtype)
self.high = self.high.astype(self.dtype)
# Boolean arrays which indicate the interval type for each coordinate
self.bounded_below = -np.inf < self.low
self.bounded_above = np.inf > self.high
super(Box, self).__init__(self.shape, self.dtype)
def is_bounded(self, manner="both"):
below = np.all(self.bounded_below)
above = np.all(self.bounded_above)
if manner == "both":
return below and above
elif manner == "below":
return below
elif manner == "above":
return above
else:
raise ValueError("manner is not in {'below', 'above', 'both'}")
def sample(self):
"""
Generates a single random sample inside of the Box.
In creating a sample of the box, each coordinate is sampled according to
the form of the interval:
* [a, b] : uniform distribution
* [a, oo) : shifted exponential distribution
* (-oo, b] : shifted negative exponential distribution
* (-oo, oo) : normal distribution
"""
high = self.high if self.dtype.kind == "f" else self.high.astype("int64") + 1
sample = np.empty(self.shape)
# Masking arrays which classify the coordinates according to interval
# type
unbounded = ~self.bounded_below & ~self.bounded_above
upp_bounded = ~self.bounded_below & self.bounded_above
low_bounded = self.bounded_below & ~self.bounded_above
bounded = self.bounded_below & self.bounded_above
# Vectorized sampling by interval type
sample[unbounded] = self.np_random.normal(size=unbounded[unbounded].shape)
sample[low_bounded] = (
self.np_random.exponential(size=low_bounded[low_bounded].shape)
+ self.low[low_bounded]
)
sample[upp_bounded] = (
-self.np_random.exponential(size=upp_bounded[upp_bounded].shape)
+ self.high[upp_bounded]
)
sample[bounded] = self.np_random.uniform(
low=self.low[bounded], high=high[bounded], size=bounded[bounded].shape
)
if self.dtype.kind == "i":
sample = np.floor(sample)
return sample.astype(self.dtype)
def contains(self, x):
if isinstance(x, list):
x = np.array(x) # Promote list to array for contains check
return (
x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)
)
def to_jsonable(self, sample_n):
return np.array(sample_n).tolist()
def from_jsonable(self, sample_n):
return [np.asarray(sample) for sample in sample_n]
def __repr__(self):
return f"Box({self.low}, {self.high}, {self.shape}, {self.dtype})"
def __eq__(self, other):
return (
isinstance(other, Box)
and (self.shape == other.shape)
and np.allclose(self.low, other.low)
and np.allclose(self.high, other.high)
)
| 34.171779
| 88
| 0.582226
|
6fdbcd2770503231411f87bc6492ab78443b3c0a
| 12,828
|
py
|
Python
|
src/kraken/krakenclient.py
|
pacanada/crypto-trading-agent
|
8f46c9d9e87d05af2624cc7db59250f0a3a37f15
|
[
"MIT"
] | 1
|
2021-11-18T12:58:34.000Z
|
2021-11-18T12:58:34.000Z
|
src/kraken/krakenclient.py
|
pacanada/crypto-trading-agent
|
8f46c9d9e87d05af2624cc7db59250f0a3a37f15
|
[
"MIT"
] | null | null | null |
src/kraken/krakenclient.py
|
pacanada/crypto-trading-agent
|
8f46c9d9e87d05af2624cc7db59250f0a3a37f15
|
[
"MIT"
] | null | null | null |
import requests
import pandas as pd
import json
import time
import sys
import platform
import time
import base64
import hashlib
import hmac
from src.base.platformclient import PlatformClient
# TODO: Remove assert, only Exceptions
class KrakenClient(PlatformClient):
def __init__(self, api_private_key=None, api_public_key=None):
self.api_private_key=api_private_key
self.api_public_key=api_public_key
def get_last_historical_data(self, pair_name: str, interval: int):
"""TODO: add description"""
if interval not in [1, 5, 15, 30, 60, 240, 1440, 10080, 21600]:
raise ValueError("Interval is not supported")
data = self._get_historical_data_from_crypto(pair_name=pair_name, interval=interval)
df = self.from_dict_to_df(data=data)
#df = self.fix_columns_type(df)
#df = self.set_datetime_as_index(df)
return df
def _get_historical_data_from_crypto(self, pair_name: str, interval: int):
"""TODO: add description"""
output=self.krakenapi_func(
["","OHLC", f"pair={pair_name.lower()}", f"interval={interval}"],
api_private_key=None,
api_public_key=None
)
output_dict = eval(output)
return output_dict
def from_dict_to_df(self, data: dict):
"""TODO: add description"""
result_name = list(data["result"].keys())[0]
df_raw = pd.DataFrame.from_dict(data=data["result"][result_name])
df = pd.DataFrame()
df[["time", "open", "high", "low", "close", "vwap", "volume", "count"]] = df_raw.copy()
return df
def fix_columns_type(self, df: pd.DataFrame):
"""TODO: add description"""
df = df.astype(float).copy()
df[["time", "count"]] = df[["time", "count"]].astype(int).copy()
return df
def execute_order(self, order_type, volume, pair_name):
assert order_type in ["sell", "buy"], "Unknown order_type"
output=self.krakenapi_func(
sysargs=[
"",
"AddOrder",
f"pair={pair_name.lower()}",
f"type={order_type.lower()}",
"ordertype=market",
f"volume={volume}"
],
api_private_key=self.api_private_key,
api_public_key=self.api_public_key
)
print(output)
id_order = self.get_id_order(output)
finalized_order = self.wait_until_fulfilled(id_order, order_type, pair_name)
assert finalized_order["Id"] == id_order
return finalized_order
def _get_order_book_response(self, pair_name, since=None, count=10):
resp = requests.get(f"https://api.kraken.com/0/public/Depth?pair={pair_name}&since={since}&count={count}")
return resp.json()
def _get_best_limit_price(self, order_type, pair_name):
order_book = self._get_order_book_response(pair_name)
if order_type=="buy":
# {'error': [],
#'result': {'XXLMZEUR': {'asks': [['0.28805700', '3104.111', 1637781170],
output = order_book["result"][list(order_book["result"].keys())[0]]["asks"][0][0]
elif order_type=="sell":
output = order_book["result"][list(order_book["result"].keys())[0]]["bids"][0][0]
return float(output)
def execute_limit_market_order(self, order_type, volume, pair_name):
"""TODO: It will break when it cannot make the trade at the limit specified,
it is not probably a problem for crypto with low volume of trades but still,
we have to change the wait_until_ function to handle that """
assert order_type in ["sell", "buy"], "Unknown order_type"
# get best posible limit price
limit_price = self._get_best_limit_price(order_type=order_type, pair_name=pair_name)
output=self.krakenapi_func(
sysargs=[
"",
"AddOrder",
f"pair={pair_name.lower()}",
f"type={order_type.lower()}",
"ordertype=limit",
f"volume={volume}",
f"price={limit_price}"
],
api_private_key=self.api_private_key,
api_public_key=self.api_public_key
)
print(output)
id_order = self.get_id_order(output)
finalized_order = self.wait_until_fulfilled(id_order, order_type, pair_name)
assert finalized_order["Id"] == id_order
return finalized_order
def execute_order_leverage(self, trade_type, volume, pair_name, order_type, leverage, price=None ):
assert trade_type in ["Sell", "Buy"], "Unknown order_type"
sysargs = [
"",
"AddOrder",
f"pair={pair_name.lower()}",
f"type={trade_type.lower()}",
f"ordertype={order_type}",
f"volume={volume}",
f"leverage={leverage}"
]
if price is not None:
# for market type limit or stoploss or takeprofit
sysargs = sysargs + [f"price={price}"]
output=self.krakenapi_func(
sysargs=sysargs,
api_private_key=self.api_private_key,
api_public_key=self.api_public_key
)
print(output)
id_order = self.get_id_order(output)
if order_type not in ["take-profit", "stop-loss"]:
finalized_order = self.wait_until_fulfilled(id_order, order_type, pair_name)
else:
print("We dont have to wait until fulfilled")
finalized_order = {"Id": id_order,
"Price": price}
assert finalized_order["Id"] == id_order
return finalized_order
def cancel_order(self, txid):
output=self.krakenapi_func(
sysargs=[
"",
"CancelOrder",
f"txid={txid}",
],
api_private_key=self.api_private_key,
api_public_key=self.api_public_key
)
return output
def get_closed_order(self,):
output=self.krakenapi_func(
sysargs=[
"",
"ClosedOrders",
],
api_private_key=self.api_private_key,
api_public_key=self.api_public_key
)
output = eval(output.replace("null", "None"))
return output
def get_closed_order_from_start(self, start_id):
output=self.krakenapi_func(
sysargs=[
"",
"ClosedOrders",
f"start={start_id}",
],
api_private_key=self.api_private_key,
api_public_key=self.api_public_key
)
output = eval(output.replace("null", "None"))
return output
def execute_mock_order(self, order_type, volume, pair_name):
"""mock order to simulate and not have to make a trade"""
assert order_type in ["Sell", "Buy"], "Unknown order_type"
finalized_order = {'Id': 'OPJZJL-V76CB-ZWOYJK', 'Price': 0.331511, 'Action': 'Sell'}
return finalized_order
def get_id_order(self, output):
id_order = eval(output)["result"]["txid"][0]
return id_order
def wait_until_fulfilled(self, id_order, order_type, pair_name ):
while True:
# Get last closed order
output_closedorders = self.krakenapi_func(
sysargs=[" ","ClosedOrders"],
api_private_key=self.api_private_key,
api_public_key=self.api_public_key)
output_closeorders_json=json.loads(output_closedorders)
finalized_id = list(output_closeorders_json["result"]["closed"].keys())[0]
finalized_price = eval(output_closeorders_json["result"]["closed"][finalized_id]["price"])
print("checking id",id_order,finalized_id)
# Wait until id of last trade is recognized
time.sleep(3)
if id_order==finalized_id:
finalized_order = {
"Id": id_order,
"Price": finalized_price,
"Action": order_type,
"Pair_Name": pair_name
}
#send_slack_message(text=str(finalized_order), channel=CHANNEL_NAME)
#save_order(finalized_order, dir_data=dir_finalized_order, name_file=finalized_orders_file)
break
return finalized_order
def krakenapi_func(self, sysargs: list, api_public_key: str, api_private_key: str ):
"""
# Kraken Rest API
#
# Usage: ./krakenapi.py method [parameters]
# Example: ./krakenapi.py Time
# Example: ./krakenapi.py OHLC pair=xbtusd interval=1440
# Example: ./krakenapi.py Balance
# Example: ./krakenapi.py OpenPositions
# Example: ./krakenapi.py AddOrder pair=xxbtzusd type=buy ordertype=market volume=0.003 leverage=5
"""
if int(platform.python_version_tuple()[0]) > 2:
import urllib.request as urllib2
else:
import urllib2
api_public = {"Time", "Assets", "AssetPairs", "Ticker", "OHLC", "Depth", "Trades", "Spread"}
api_private = {"Balance", "BalanceEx", "TradeBalance", "OpenOrders", "ClosedOrders", "QueryOrders", "TradesHistory", "QueryTrades", "OpenPositions", "Ledgers", "QueryLedgers", "TradeVolume", "AddExport", "ExportStatus", "RetrieveExport", "RemoveExport", "GetWebSocketsToken"}
api_trading = {"AddOrder", "CancelOrder", "CancelAll"}
api_funding = {"DepositMethods", "DepositAddresses", "DepositStatus", "WithdrawInfo", "Withdraw", "WithdrawStatus", "WithdrawCancel", "WalletTransfer"}
api_domain = "https://api.kraken.com"
api_data = ""
if len(sysargs) < 2:
api_method = "Time"
elif len(sysargs) == 2:
api_method = sysargs[1]
else:
api_method = sysargs[1]
for count in range(2, len(sysargs)):
if count == 2:
api_data = sysargs[count]
else:
api_data = api_data + "&" + sysargs[count]
if api_method in api_private or api_method in api_trading or api_method in api_funding:
api_path = "/0/private/"
api_nonce = str(int(time.time()*1000))
try:
api_key = api_public_key#open("API_Public_Key").read().strip()
api_secret = base64.b64decode(api_private_key)#open("API_Private_Key").read().strip())
except:
print("API public key and API private (secret) key must be in text files called API_Public_Key and API_Private_Key")
#sys.exit(1)
api_postdata = api_data + "&nonce=" + api_nonce
api_postdata = api_postdata.encode('utf-8')
api_sha256 = hashlib.sha256(api_nonce.encode('utf-8') + api_postdata).digest()
api_hmacsha512 = hmac.new(api_secret, api_path.encode('utf-8') + api_method.encode('utf-8') + api_sha256, hashlib.sha512)
api_request = urllib2.Request(api_domain + api_path + api_method, api_postdata)
api_request.add_header("API-Key", api_key)
api_request.add_header("API-Sign", base64.b64encode(api_hmacsha512.digest()))
api_request.add_header("User-Agent", "Kraken REST API")
elif api_method in api_public:
api_path = "/0/public/"
api_request = urllib2.Request(api_domain + api_path + api_method + '?' + api_data)
print(api_domain + api_path + api_method + '?' + api_data)
api_request.add_header("User-Agent", "Kraken REST API")
else:
print("Usage: %s method [parameters]" % sysargs[0])
print("Example: %s OHLC pair=xbtusd interval=1440" % sysargs[0])
#sys.exit(1)
try:
api_reply = urllib2.urlopen(api_request).read()
except Exception as error:
print("API call failed (%s)" % error)
try:
api_reply = api_reply.decode()
except Exception as error:
if api_method == 'RetrieveExport':
sys.stdout.buffer.write(api_reply)
print("API response invalid (%s)" % error)
#sys.exit(1)
if '"error":[]' in api_reply:
output = api_reply
else:
print(api_reply)
return output
| 41.514563
| 283
| 0.57203
|
0ecd3e81b96ab05cf75cb01972153372a59f3806
| 7,638
|
py
|
Python
|
Lapchat.py
|
Kalebu/Desktop-chatitng-GUI-app-with-python
|
e149d956674ef04a9955e836842a293dca645b1c
|
[
"MIT"
] | 3
|
2020-04-22T12:25:35.000Z
|
2021-09-27T22:46:33.000Z
|
Lapchat.py
|
Alfaxad/Desktop-chatitng-GUI-app-with-python
|
e149d956674ef04a9955e836842a293dca645b1c
|
[
"MIT"
] | null | null | null |
Lapchat.py
|
Alfaxad/Desktop-chatitng-GUI-app-with-python
|
e149d956674ef04a9955e836842a293dca645b1c
|
[
"MIT"
] | 3
|
2020-05-26T11:16:30.000Z
|
2022-02-23T08:25:59.000Z
|
from tkinter import Tk, Label, Button, StringVar, Text, Scrollbar, PhotoImage,Entry
from tkinter import Frame, filedialog, messagebox
import socket
import threading
import os
class Lapchat:
def __init__(self, master):
master.title("Lapchat")
master.resizable(False, False)
master.geometry("400x450")
#master.iconbitmap("4.ico")
master.configure(bg="#3D9970")
#==============Initializing communication==============
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.host = socket.gethostname()
self.port = 12345
self.client = (self.host, self.port)
self.top_frame = Frame(master, width=400, height=45, bg="#6666ff")
self.top_frame.place(x=0, y=0)
self.message_count = 1.5
self.filename = None
self.message_session = Text(master, bd=3, relief="flat", font=("consolas", 12, "italic"), undo=True, wrap="word")
self.message_session.config(width=35, height=15,bg="#AAAAAA", fg="blue")
self.overscroll = Scrollbar(master, command=self.message_session.yview)
self.overscroll.config(width=20)
self.message_session["yscrollcommand"] = self.overscroll.set
self.message = Entry(bg="#ffffff", width=30, bd=5, relief="flat")
self.message.bind("<Return>", self.send_msg)
self.send_message = Button(master, text="send", fg="blue", width=10, height=1, relief="flat")
self.send_message.configure(command=self.send_msg)
self.attachment = Button(master, text="File", fg="red", width=5, height=1, relief="flat")
self.attachment.configure(command=self.select_file)
self.file_label = Label(master, fg="#008080", font=("verdana", 7), width=50 )
self.message_session.place(x=40, y=50)
self.overscroll.place(x=360, y=50)
self.message.place(x=40, y = 345)
self.send_message.place(x=240, y = 345)
self.attachment.place(x=325, y=345)
def get_filename(self, folder):
self.temp_filename = folder.split("/")
self.temp_filename = self.temp_filename[-1]
return self.temp_filename
def select_file(self, event=None):
self.select_file = filedialog.askopenfilename()
self.filename = self.select_file
self.temp_filename = self.get_filename(self.select_file)
self.file_label.config(text=self.temp_filename)
self.file_label.place(x=40, y=380)
def receive_sms_txt(self, receive_txt=None):
print("Receiving sms again")
print(self.received_message)
if receive_txt:
self.sm = receive_txt
else:
self.sm = self.received_message
self.message.delete(0, "end")
self.sm ="client:"+self.sm+"\n"
self.message_session.insert(self.message_count, self.sm)
self.message_count+=1.5
self.received_message=None
def receive_file(self, size, name):
with open(name, "wb") as rec_file:
print(size)
print(name)
while size>0:
received_buffer = self.server.recv(1024)
rec_file.write(received_buffer)
size = size-len(received_buffer)
print(size)
print("File received successful")
self.server.send(("ready_received").encode())
self.received_message = None
def try_sample1(self):
self.receive_sms_thread= threading.Thread(target=self.receive_file, args=(self.received_size, self.received_name))
self.receive_sms_thread.start()
self.receive_sms_thread.join()
def receive_sms(self):
while True:
try:
self.server.connect(self.client)
while True:
try:
print("receiving messages")
self.received_message = self.server.recv(1024).decode()
print(self.received_message)
if "&&&" in self.received_message:
self.received_message = self.received_message.split("&&&")
self.received_size = self.received_message[0]
self.received_name = self.received_message[1]
self.received_size = int(self.received_size)
self.receive_sms_txt(receive_txt="File Received")
self.try_sample1()
else:
if self.received_message:
self.receive_sms_txt()
except:
continue
except:
continue
def send_sms_txt(self, file_message=None):
if file_message:
self.sms = file_message
else:
self.sms= self.message.get()
self.server.send(self.sms.encode())
self.message.delete(0, "end")
self.sms = "you:"+self.sms+"\n"
self.message_session.insert(self.message_count, self.sms)
self.message_count+=1.5
print("Message sent succeful")
def send_file(self, size):
print(size)
with open(self.filename, "rb") as file:
size = int(size)
while size>0:
buffer = file.read()
self.server.send(buffer)
buffer_size = len(buffer)
break
print("File successful sent")
def receive_sms_txt(self, receive_txt=None):
print("Receiving sms again")
print(self.received_message)
if receive_txt:
self.sm = receive_txt
else:
self.sm = self.received_message
self.message.delete(0, "end")
self.sm ="client:"+self.sm+"\n"
self.message_session.insert(self.message_count, self.sm)
self.message_count+=1.5
self.received_message=None
def try_sample(self):
sendfile_thread = threading.Thread(target=self.send_file, args=(self.filesize,))
sendfile_thread.start()
sendfile_thread.join()
self.filename = None
self.file_label.place_forget()
print("Thread stopped")
def send_msg(self, event=None):
try:
if self.filename:
self.ask_send = messagebox.askyesno("Confirm", "Do want to send message with file")
print(self.ask_send)
if self.ask_send:
self.file_name = self.get_filename(self.filename)
self.filesize = str(os.stat(self.filename).st_size)
print("file size is : {}".format(self.filesize))
self.embedded_filename = self.filesize+"&&&"+self.file_name
self.send_sms_txt()
self.send_sms_txt(file_message="File has been sent")
self.server.send(self.embedded_filename.encode())
self.try_sample()
else:
self.filename = None
self.file_label.place_forget()
self.send_sms_txt()
else:
self.send_sms_txt()
except:
self.show_error =messagebox.showerror("No connection", "Time out , no connection found")
root = Tk()
app = Lapchat(root)
receive_thread = threading.Thread(target=app.receive_sms)
receive_thread.start()
root.mainloop()
| 37.441176
| 123
| 0.564153
|
89236aa87f6fd469c533f9616ab6c8426107a57d
| 3,656
|
py
|
Python
|
datacity_ckan_dgp/operators/db_fetcher.py
|
hasadna/datacity-ckan-dgp
|
4a61be492e5c72e56d3aa9caadee5315cd48f90b
|
[
"MIT"
] | null | null | null |
datacity_ckan_dgp/operators/db_fetcher.py
|
hasadna/datacity-ckan-dgp
|
4a61be492e5c72e56d3aa9caadee5315cd48f90b
|
[
"MIT"
] | null | null | null |
datacity_ckan_dgp/operators/db_fetcher.py
|
hasadna/datacity-ckan-dgp
|
4a61be492e5c72e56d3aa9caadee5315cd48f90b
|
[
"MIT"
] | null | null | null |
import os
import tempfile
from decimal import Decimal
import dataflows as DF
from datacity_ckan_dgp import ckan
def update_package(instance_name, org_id, package_name, title, resources):
print("Creating/updating package {}@{} {}".format(package_name, org_id, title))
package = ckan.package_show(instance_name, package_name)
if not package or package['state'] == 'deleted':
res = ckan.package_create(instance_name, {
'name': package_name,
'title': title,
'private': False,
'owner_org': org_id
})
if res['success']:
package = ckan.package_show(instance_name, package_name)
else:
print('Failed to create package', res)
print(package)
if package:
existing_resources = package.get('resources', [])
existing_resources = dict((r['format'], r['id']) for r in existing_resources)
print(existing_resources)
for format, filename in resources:
print(format, filename)
with open(filename, 'rb') as f:
resource = {
'package_id': package['id'],
'description': '{} - {}'.format(title, format),
'format': format,
'name': format,
}
if format in existing_resources:
print('Updating resource', existing_resources[format])
resource['id'] = existing_resources[format]
res = ckan.resource_update(instance_name, resource, files=[('upload', f)])
if not res['success']:
print('update resource failed: {}'.format(res))
else:
print('updated resource {} {}: {}'.format(package_name, format, res))
else:
print('Creating resource', resource)
res = ckan.resource_create(instance_name, resource, files=[('upload', f)])
if not res['success']:
print('create resource failed: {}'.format(res))
else:
print('created resource {} {}: {}'.format(package_name, format, res))
def operator(name, params):
connection_string = params['db_url']
source_table = params['db_table']
target_instance_name = params['target_instance_name']
target_package_id = params['target_package_id']
target_organization_id = params['target_organization_id']
print('starting db_fetcher operator')
print('source_table={} target_instance_name={} target_package_id={} target_organization_id={}'.format(
source_table, target_instance_name, target_package_id, target_organization_id))
with tempfile.TemporaryDirectory() as tempdir:
csv_filename = target_package_id + '.csv'
DF.Flow(
DF.load(connection_string, table=source_table, name=target_package_id,
infer_strategy=DF.load.INFER_PYTHON_TYPES),
DF.update_resource(-1, path=csv_filename),
DF.delete_fields(['_source']),
DF.dump_to_path(tempdir)
).process()
csv_filename = os.path.join(tempdir, csv_filename)
print('{}, {:,} bytes'.format(csv_filename, os.stat(csv_filename).st_size))
update_package(
target_instance_name,
target_organization_id,
target_package_id,
target_package_id,
[('CSV', csv_filename)]
)
if __name__ == '__main__':
import sys
import json
exit(0 if operator('_', json.loads(sys.argv[1])) else 1)
| 41.545455
| 106
| 0.584792
|
ac8e15a8e5b28222088b7dc4bbee5cca04480e3b
| 460
|
py
|
Python
|
deploy/urls.py
|
chenbin743/OpsSystem
|
7427627aef8c3886b7d5bd8f2d8fc9215c0a1a19
|
[
"MIT"
] | 1
|
2018-11-14T08:33:57.000Z
|
2018-11-14T08:33:57.000Z
|
deploy/urls.py
|
chenbin743/OpsSystem
|
7427627aef8c3886b7d5bd8f2d8fc9215c0a1a19
|
[
"MIT"
] | null | null | null |
deploy/urls.py
|
chenbin743/OpsSystem
|
7427627aef8c3886b7d5bd8f2d8fc9215c0a1a19
|
[
"MIT"
] | 1
|
2018-11-30T09:06:34.000Z
|
2018-11-30T09:06:34.000Z
|
from django.conf.urls import url
from views import *
urlpatterns=[
url(r'^add_svn_repo/$',add_svn_repo,name='add_svn_repo'),
url(r'^add_git_repo/$',add_git_repo,name='add_git_repo'),
url(r'^list_repo_info/$',list_repo_info,name='list_repo_info'),
url(r'^list_deploy_info/$',list_deploy_info,name='list_deploy_info'),
url(r'^deploy_project/$',deploy_project,name='deploy_project'),
url(r'^rollback_project/$',rollback_project,name='rollback_project')
]
| 41.818182
| 70
| 0.767391
|
755b6629775cf2d24f8f33581f8eaaf43d76149c
| 5,984
|
py
|
Python
|
2017/16_PermutationPromenade/aoc_16.py
|
deanearlwright/AdventOfCode
|
ca4cf6315c0efa38bd7748fb6f4bc99e7934871d
|
[
"MIT"
] | 1
|
2021-01-03T23:09:28.000Z
|
2021-01-03T23:09:28.000Z
|
2017/16_PermutationPromenade/aoc_16.py
|
deanearlwright/AdventOfCode
|
ca4cf6315c0efa38bd7748fb6f4bc99e7934871d
|
[
"MIT"
] | 6
|
2020-12-26T21:02:42.000Z
|
2020-12-26T21:02:52.000Z
|
2017/16_PermutationPromenade/aoc_16.py
|
deanearlwright/AdventOfCode
|
ca4cf6315c0efa38bd7748fb6f4bc99e7934871d
|
[
"MIT"
] | null | null | null |
# ======================================================================
# Permutation Promenade
# Advent of Code 2017 Day 16 -- Eric Wastl -- https://adventofcode.com
#
# Computer simulation by Dr. Dean Earl Wright III
# ======================================================================
# ======================================================================
# a o c _ 1 6 . p y
# ======================================================================
"Solve the Permutation Promenade problem for Advent of Code 2017 day 16"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
import argparse
import sys
import promenade
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# parse_commnd_line
# ----------------------------------------------------------------------
def parse_command_line():
"Parse the command line options"
# 1. Create the command line parser
desc = 'Permutation Promenade - Day 16 of Advent of Code 2017'
sample = 'sample: python aoc_16.py input.txt'
parser = argparse.ArgumentParser(description=desc,
epilog=sample)
parser.add_argument('-v', '--verbose', action='store_true', default=False,
dest='verbose', help='Print status messages to stdout')
parser.add_argument('-p', '--part', action='store', default=1, type=int,
dest='part', help='Puzzle Part (1 or 2)')
parser.add_argument('-l', '--limit', action='store', default=0, type=int,
dest='limit',
help='Maximum limit (e.g., time, size, recursion) before stopping')
parser.add_argument('filepath', metavar='FILENAME', action='store', type=str,
help="Location of puzzle input")
# 2. Get the options and arguments
return parser.parse_args()
# ----------------------------------------------------------------------
# part_one
# ----------------------------------------------------------------------
def part_one(args, input_lines):
"Process part one of the puzzle"
# 1. Create the puzzle solver
solver = promenade.Promenade(part2=False, text=input_lines[0])
# 2. Determine the dancer's position at the end of the dance
solution = solver.part_one(verbose=args.verbose)
if solution is None:
print("There is no solution")
else:
print("The programs are in order %s after their dance" % (solution))
# 3. Return result
return solution is not None
# ----------------------------------------------------------------------
# part_two
# ----------------------------------------------------------------------
def part_two(args, input_lines):
"Process part two of the puzzle"
# 1. Create the puzzle solver
solver = promenade.Promenade(part2=True, text=input_lines[0])
# 2. Determine the dancer's position at the end of the dance
solution = solver.part_two(verbose=args.verbose, limit=args.limit)
if solution is None:
print("There is no solution")
else:
print("The programs are in order %s after their dance" % (solution))
# 3. Return result
return solution is not None
# ----------------------------------------------------------------------
# from_file
# ----------------------------------------------------------------------
def from_file(filepath):
"Read the file"
return from_text(open(filepath).read())
# ----------------------------------------------------------------------
# from_text
# ----------------------------------------------------------------------
def from_text(text):
"Break the text into trimed, non-comment lines"
# 1. We start with no lines
lines = []
# 2. Loop for lines in the text
for line in text.split('\n'):
# 3. But ignore blank and non-claim lines
line = line.rstrip(' \r')
if not line:
continue
if line.startswith('!'):
continue
# 4. Add the line
lines.append(line)
# 5. Return a list of clean lines
return lines
# ----------------------------------------------------------------------
# main
# ----------------------------------------------------------------------
def main():
"""Read the Advent of Code problem and solve it"""
# 1. Get the command line options
args = parse_command_line()
# 2. Read the puzzle file
input_text = from_file(args.filepath)
# 3. Process the appropiate part of the puzzle
if args.part == 1:
result = part_one(args, input_text)
else:
result = part_two(args, input_text)
# 5. Set return code (0 if solution found, 2 if not)
if result:
sys.exit(0)
sys.exit(2)
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
# ======================================================================
# end a o c _ 1 6 . p y end
# ======================================================================
| 35.832335
| 91
| 0.38135
|
351a036a3cf78143cbe43f8d9629ba99242c650e
| 8,061
|
py
|
Python
|
release/scripts/addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_animation_sampler_keyframes.py
|
noorbeast/BlenderSource
|
65ebecc5108388965678b04b43463b85f6c69c1d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2
|
2019-03-20T13:10:46.000Z
|
2019-05-15T20:00:31.000Z
|
engine/2.80/scripts/addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_animation_sampler_keyframes.py
|
byteinc/Phasor
|
f7d23a489c2b4bcc3c1961ac955926484ff8b8d9
|
[
"Unlicense"
] | null | null | null |
engine/2.80/scripts/addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_animation_sampler_keyframes.py
|
byteinc/Phasor
|
f7d23a489c2b4bcc3c1961ac955926484ff8b8d9
|
[
"Unlicense"
] | null | null | null |
# Copyright 2018 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
import mathutils
import typing
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
from io_scene_gltf2.blender.com import gltf2_blender_math
from . import gltf2_blender_export_keys
from io_scene_gltf2.io.com import gltf2_io_debug
class Keyframe:
def __init__(self, channels: typing.Tuple[bpy.types.FCurve], time: float):
self.seconds = time / bpy.context.scene.render.fps
self.__target = channels[0].data_path.split('.')[-1]
self.__indices = [c.array_index for c in channels]
# Data holders for virtual properties
self.__value = None
self.__in_tangent = None
self.__out_tangent = None
def __get_target_len(self):
length = {
"delta_location": 3,
"delta_rotation_euler": 3,
"location": 3,
"rotation_axis_angle": 4,
"rotation_euler": 3,
"rotation_quaternion": 4,
"scale": 3,
"value": 1
}.get(self.__target)
if length is None:
raise RuntimeError("Animations with target type '{}' are not supported.".format(self.__target))
return length
def __set_indexed(self, value):
# 'value' targets don't use keyframe.array_index
if self.__target == "value":
return value
# Sometimes blender animations only reference a subset of components of a data target. Keyframe should always
# contain a complete Vector/ Quaternion --> use the array_index value of the keyframe to set components in such
# structures
result = [0.0] * self.__get_target_len()
for i, v in zip(self.__indices, value):
result[i] = v
result = gltf2_blender_math.list_to_mathutils(result, self.__target)
return result
@property
def value(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
return self.__value
@value.setter
def value(self, value: typing.List[float]):
self.__value = self.__set_indexed(value)
@property
def in_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
return self.__in_tangent
@in_tangent.setter
def in_tangent(self, value: typing.List[float]):
self.__in_tangent = self.__set_indexed(value)
@property
def out_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
return self.__in_tangent
@out_tangent.setter
def out_tangent(self, value: typing.List[float]):
self.__out_tangent = self.__set_indexed(value)
# cache for performance reasons
@cached
def gather_keyframes(channels: typing.Tuple[bpy.types.FCurve], export_settings) \
-> typing.List[Keyframe]:
"""Convert the blender action groups' fcurves to keyframes for use in glTF."""
# Find the start and end of the whole action group
ranges = [channel.range() for channel in channels]
start = min([channel.range()[0] for channel in channels])
end = max([channel.range()[1] for channel in channels])
keyframes = []
if needs_baking(channels, export_settings):
# Bake the animation, by evaluating it at a high frequency
# TODO: maybe baking can also be done with FCurve.convert_to_samples
time = start
# TODO: make user controllable
step = 1.0 / bpy.context.scene.render.fps
while time <= end:
key = Keyframe(channels, time)
key.value = [c.evaluate(time) for c in channels]
keyframes.append(key)
time += step
else:
# Just use the keyframes as they are specified in blender
times = [keyframe.co[0] for keyframe in channels[0].keyframe_points]
for i, time in enumerate(times):
key = Keyframe(channels, time)
# key.value = [c.keyframe_points[i].co[0] for c in action_group.channels]
key.value = [c.evaluate(time) for c in channels]
# compute tangents for cubic spline interpolation
if channels[0].keyframe_points[0].interpolation == "BEZIER":
# Construct the in tangent
if time == times[0]:
# start in-tangent has zero length
key.in_tangent = [0.0 for _ in channels]
else:
# otherwise construct an in tangent from the keyframes control points
key.in_tangent = [
3.0 * (c.keyframe_points[i].co[1] - c.keyframe_points[i].handle_left[1]
) / (time - times[i - 1])
for c in channels
]
# Construct the out tangent
if time == times[-1]:
# end out-tangent has zero length
key.out_tangent = [0.0 for _ in channels]
else:
# otherwise construct an out tangent from the keyframes control points
key.out_tangent = [
3.0 * (c.keyframe_points[i].handle_right[1] - c.keyframe_points[i].co[1]
) / (times[i + 1] - time)
for c in channels
]
keyframes.append(key)
return keyframes
def needs_baking(channels: typing.Tuple[bpy.types.FCurve],
export_settings
) -> bool:
"""
Check if baking is needed.
Some blender animations need to be baked as they can not directly be expressed in glTF.
"""
def all_equal(lst):
return lst[1:] == lst[:-1]
if export_settings[gltf2_blender_export_keys.FORCE_SAMPLING]:
return True
interpolation = channels[0].keyframe_points[0].interpolation
if interpolation not in ["BEZIER", "LINEAR", "CONSTANT"]:
gltf2_io_debug.print_console("WARNING",
"Baking animation because of an unsupported interpolation method: {}".format(
interpolation)
)
return True
if any(any(k.interpolation != interpolation for k in c.keyframe_points) for c in channels):
# There are different interpolation methods in one action group
gltf2_io_debug.print_console("WARNING",
"Baking animation because there are different "
"interpolation methods in one channel"
)
return True
if not all_equal([len(c.keyframe_points) for c in channels]):
gltf2_io_debug.print_console("WARNING",
"Baking animation because the number of keyframes is not "
"equal for all channel tracks")
return True
if len(channels[0].keyframe_points) <= 1:
# we need to bake to 'STEP', as at least two keyframes are required to interpolate
return True
if not all(all_equal(key_times) for key_times in zip([[k.co[0] for k in c.keyframe_points] for c in channels])):
# The channels have differently located keyframes
gltf2_io_debug.print_console("WARNING",
"Baking animation because of differently located keyframes in one channel")
return True
return False
| 40.104478
| 119
| 0.612579
|
e8a5bdd762b935e7d42106231722730cbb3e7399
| 1,263
|
py
|
Python
|
report/bazel.py
|
agasparovic-sabre/bazel-coverage-report
|
7ca9e5775a69cc0115caf58974c2a7bc55bcf17b
|
[
"Apache-2.0"
] | 19
|
2018-08-01T07:54:32.000Z
|
2021-11-17T23:06:10.000Z
|
report/bazel.py
|
Marcus-Rosti/bazel-coverage-report
|
c6098ee67130670ab950156777d795627d61481c
|
[
"Apache-2.0"
] | 3
|
2018-10-22T17:14:54.000Z
|
2020-03-05T08:09:25.000Z
|
report/bazel.py
|
Marcus-Rosti/bazel-coverage-report
|
c6098ee67130670ab950156777d795627d61481c
|
[
"Apache-2.0"
] | 34
|
2018-10-22T20:48:45.000Z
|
2022-02-14T16:17:35.000Z
|
# Copyright 2018 The Bazel Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interacts with Bazel's directory layout."""
import os
_bazel = {}
def runfiles():
"""Returns the runfiles manifest entries.
Returns: A dictionary of runfiles to their real absolute paths.
"""
if not _bazel.has_key("runfiles"):
_bazel["_runfiles"] = {}
with open(os.path.join(os.getenv("RUNFILES_MANIFEST_FILE")), 'r') as f:
for l in f:
tokens = l.strip().split(' ')
if len(tokens) == 2:
_bazel["_runfiles"][tokens[0]] = tokens[1]
return _bazel["_runfiles"]
def runfile(relative_path):
"""Returns the real absolute path of a runfile relative path.
The runfiles manifest is used."""
return runfiles()[relative_path]
| 30.804878
| 75
| 0.704671
|
400d9940e7e8f264daa0b9ad04e2c9eb24130f24
| 3,496
|
py
|
Python
|
data/dataset/dataset.py
|
AUT-Data-Group/NeurIPS2021-traffic4cast
|
2575ce1b98cebeadba645d3826ac962f4d0f3721
|
[
"Apache-2.0"
] | 19
|
2021-06-20T21:35:49.000Z
|
2022-03-01T14:49:19.000Z
|
data/dataset/dataset.py
|
ejhill24/NeurIPS2021-traffic4cast
|
5421223d1f43f43a6bf5bcc8659847b1335492f3
|
[
"Apache-2.0"
] | 3
|
2021-06-15T19:57:39.000Z
|
2021-09-09T15:52:56.000Z
|
data/dataset/dataset.py
|
ejhill24/NeurIPS2021-traffic4cast
|
5421223d1f43f43a6bf5bcc8659847b1335492f3
|
[
"Apache-2.0"
] | 9
|
2021-06-20T21:37:19.000Z
|
2021-12-15T15:30:05.000Z
|
# Copyright 2021 Institute of Advanced Research in Artificial Intelligence (IARAI) GmbH.
# IARAI licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Any
from typing import Callable
from typing import Optional
from typing import Tuple
import numpy as np
import torch
from torch.utils.data import Dataset
from competition.competition_constants import MAX_TEST_SLOT_INDEX
from competition.prepare_test_data.prepare_test_data import prepare_test
from util.h5_util import load_h5_file
class T4CDataset(Dataset):
def __init__(
self,
root_dir: str,
file_filter: str = None,
limit: Optional[int] = None,
transform: Optional[Callable[[torch.Tensor], torch.Tensor]] = None,
use_npy: bool = False,
):
"""torch dataset from training data.
Parameters
----------
root_dir
data root folder, by convention should be `data/raw`, see `data/README.md`. All `**/training/*8ch.h5` will be added to the dataset.
file_filter: str
filter files under `root_dir`, defaults to `"**/training/*ch8.h5`
limit
truncate dataset size
transform
transform applied to both the input and label
"""
self.root_dir = root_dir
self.limit = limit
self.files = []
self.file_filter = file_filter
self.use_npy = use_npy
if self.file_filter is None:
self.file_filter = "**/training/*8ch.h5"
if self.use_npy:
self.file_filter = "**/training_npy/*.npy"
self.transform = transform
self._load_dataset()
def _load_dataset(self):
self.files = list(Path(self.root_dir).rglob(self.file_filter))
def _load_h5_file(self, fn, sl: Optional[slice]):
if self.use_npy:
return np.load(fn)
else:
return load_h5_file(fn, sl=sl)
def __len__(self):
size_240_slots_a_day = len(self.files) * MAX_TEST_SLOT_INDEX
if self.limit is not None:
return min(size_240_slots_a_day, self.limit)
return size_240_slots_a_day
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
if idx > self.__len__():
raise IndexError("Index out of bounds")
file_idx = idx // MAX_TEST_SLOT_INDEX
start_hour = idx % MAX_TEST_SLOT_INDEX
two_hours = self._load_h5_file(self.files[file_idx], sl=slice(start_hour, start_hour + 12 * 2 + 1))
input_data, output_data = prepare_test(two_hours)
input_data = self._to_torch(input_data)
output_data = self._to_torch(output_data)
if self.transform is not None:
input_data = self.transform(input_data)
output_data = self.transform(output_data)
return input_data, output_data
def _to_torch(self, data):
data = torch.from_numpy(data)
data = data.to(dtype=torch.float)
return data
| 34.96
| 143
| 0.662185
|
2d4bfb700d11fdc010f40ad5030ad709b81e0f23
| 3,360
|
py
|
Python
|
script.module.nanscrapers/lib/nanscrapers/scraperplugins/twoddl.py
|
TheWardoctor/BadWolf
|
a86184a0bd21ffbd255ae264e3f682eff58d2eef
|
[
"Apache-2.0"
] | 1
|
2019-03-05T09:38:10.000Z
|
2019-03-05T09:38:10.000Z
|
script.module.nanscrapers/lib/nanscrapers/scraperplugins/twoddl.py
|
TheWardoctor/BadWolf
|
a86184a0bd21ffbd255ae264e3f682eff58d2eef
|
[
"Apache-2.0"
] | null | null | null |
script.module.nanscrapers/lib/nanscrapers/scraperplugins/twoddl.py
|
TheWardoctor/BadWolf
|
a86184a0bd21ffbd255ae264e3f682eff58d2eef
|
[
"Apache-2.0"
] | 1
|
2021-11-05T20:48:09.000Z
|
2021-11-05T20:48:09.000Z
|
import re,xbmc,urllib,urlparse
from ..scraper import Scraper
import requests
from ..common import clean_title,clean_search, filter_host, get_rd_domains
User_Agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
# kept movies off
class twoddl(Scraper):
domains = ['http://twoddl.co']
name = "TwoDDL"
sources = []
def __init__(self):
self.base_link = 'http://twoddl.co'
self.sources = []
# def scrape_movie(self, title, year, imdb, debrid=False):
# try:
# start_url = "%s/search/%s+%s/" % (self.base_link, title.replace(' ','+').lower(),year)
# headers = {'User_Agent':User_Agent}
# OPEN = open_url(start_url,headers=headers,timeout=5).content
# content = re.compile('<h2><a href="([^"]+)"',re.DOTALL).findall(OPEN)
# for url in content:
# self.get_source(url)
# return self.sources
# except Exception, argument:
# return self.sources
def scrape_episode(self,title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
if not debrid:
return []
season_url = "0%s"%season if len(season)<2 else season
episode_url = "0%s"%episode if len(episode)<2 else episode
sea_epi ='s%se%s'%(season_url,episode_url)
start_url = "%s/?s=%s+%s" % (self.base_link, title.replace(' ','+').lower(),sea_epi)
headers = {'User_Agent':User_Agent}
OPEN = requests.get(start_url,headers=headers,timeout=5).content
content = re.compile('<h2><a href="([^"]+)"',re.DOTALL).findall(OPEN)
for url in content:
if clean_title(title).lower() in clean_title(url).lower():
self.get_source(url)
return self.sources
except Exception, argument:
return self.sources
def get_source(self,url):
try:
headers = {'User_Agent':User_Agent}
links = requests.get(url,headers=headers,timeout=3).content
LINK = re.compile('href="([^"]+)" rel="nofollow"',re.DOTALL).findall(links)
for url in LINK:
if '.rar' not in url:
if '.srt' not in url:
if '1080' in url:
res = '1080p'
elif '720' in url:
res = '720p'
elif 'HDTV' in url:
res = 'HD'
else:
pass
host = url.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
# if not filter_host(host):
# continue
# if debrid == "true":
rd_domains = get_rd_domains()
if host in rd_domains:
self.sources.append({'source': host,'quality': res,'scraper': self.name,'url': url,'direct': False, 'debridonly': True})
except:pass
| 40.481928
| 148
| 0.482143
|
38ae84bc920c343b27497d0aa03bb3ebf29f085b
| 216
|
py
|
Python
|
src/aspire/image/__init__.py
|
PrincetonUniversity/ASPIRE-Python
|
1bff8d3884183203bd77695a76bccb1efc909fd3
|
[
"MIT"
] | 7
|
2018-11-07T16:45:35.000Z
|
2020-01-10T16:54:26.000Z
|
src/aspire/image/__init__.py
|
PrincetonUniversity/ASPIRE-Python
|
1bff8d3884183203bd77695a76bccb1efc909fd3
|
[
"MIT"
] | 1
|
2019-04-05T18:41:39.000Z
|
2019-04-05T18:41:39.000Z
|
src/aspire/image/__init__.py
|
PrincetonUniversity/ASPIRE-Python
|
1bff8d3884183203bd77695a76bccb1efc909fd3
|
[
"MIT"
] | 2
|
2019-06-04T17:01:53.000Z
|
2019-07-08T19:01:40.000Z
|
from .image import (
BasisImage,
BispecImage,
CartesianImage,
FBBasisImage,
Image,
PolarImage,
_im_translate2,
normalize_bg,
)
from .preprocess import crop_pad, downsample, fuzzy_mask
| 18
| 56
| 0.699074
|
fe20f9a28012b6a5cfbbfbb95c9fab52911638bd
| 296
|
py
|
Python
|
pacos2/discr_impulse.py
|
jadnohra/PaCoS
|
0b97cf566d89d99f85c55581beb9ad701395d4fe
|
[
"MIT"
] | null | null | null |
pacos2/discr_impulse.py
|
jadnohra/PaCoS
|
0b97cf566d89d99f85c55581beb9ad701395d4fe
|
[
"MIT"
] | null | null | null |
pacos2/discr_impulse.py
|
jadnohra/PaCoS
|
0b97cf566d89d99f85c55581beb9ad701395d4fe
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from .interfaces import TimeInterval, IClock
from .discr_evt_engine import DiscreteEventEngine
class IDiscreteImpulse(ABC):
@abstractmethod
def generate(self, engine: DiscreteEventEngine, clock: IClock
) -> TimeInterval:
pass
| 29.6
| 65
| 0.736486
|
ec2835854183a388605513a174d1d60252ab466f
| 34,088
|
py
|
Python
|
obswebsocket/events.py
|
a1rwulf/obs-websocket-py
|
76cdd5b69d1eda312bce8887cfe386fc2350985c
|
[
"MIT"
] | null | null | null |
obswebsocket/events.py
|
a1rwulf/obs-websocket-py
|
76cdd5b69d1eda312bce8887cfe386fc2350985c
|
[
"MIT"
] | null | null | null |
obswebsocket/events.py
|
a1rwulf/obs-websocket-py
|
76cdd5b69d1eda312bce8887cfe386fc2350985c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# THIS FILE WAS GENERATED BY generate_classes.py - DO NOT EDIT #
# (Generated on 2020-05-16 15:31:50.833979) #
from .base_classes import Baseevents
class SourceCreated(Baseevents):
"""A source has been created. A source can be an input, a scene or a transition.
:Returns:
*sourceName*
type: String
Source name
*sourceType*
type: String
Source type. Can be "input", "scene", "transition" or "filter".
*sourceKind*
type: String
Source kind.
*sourceSettings*
type: Object
Source settings
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SourceCreated'
self.datain['sourceName'] = None
self.datain['sourceKind'] = None
self.datain['sourceType'] = None
self.datain['sourceSettings'] = None
def getSourcename(self):
return self.datain['sourceName']
def getSourcekind(self):
return self.datain['sourceKind']
def getSourcetype(self):
return self.datain['sourceType']
def getSourcesettings(self):
return self.datain['sourceSettings']
class SourceDestroyed(Baseevents):
"""A source has been destroyed/removed. A source can be an input, a scene or a transition.
:Returns:
*sourceName*
type: String
Source name
*sourceType*
type: String
Source type. Can be "input", "scene", "transition" or "filter".
*sourceKind*
type: String
Source kind.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SourceDestroyed'
self.datain['sourceName'] = None
self.datain['sourceKind'] = None
self.datain['sourceType'] = None
def getSourcename(self):
return self.datain['sourceName']
def getSourcekind(self):
return self.datain['sourceKind']
def getSourcetype(self):
return self.datain['sourceType']
class SourceVolumeChanged(Baseevents):
"""The volume of a source has changed.
:Returns:
*sourceName*
type: String
Source name
*volume*
type: float
Source volume
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SourceVolumeChanged'
self.datain['sourceName'] = None
self.datain['volume'] = None
def getSourcename(self):
return self.datain['sourceName']
def getVolume(self):
return self.datain['volume']
class SourceMuteStateChanged(Baseevents):
"""A source has been muted or unmuted.
:Returns:
*sourceName*
type: String
Source name
*muted*
type: boolean
Mute status of the source
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SourceMuteStateChanged'
self.datain['sourceName'] = None
self.datain['muted'] = None
def getSourcename(self):
return self.datain['sourceName']
def getMuted(self):
return self.datain['muted']
class SourceAudioSyncOffsetChanged(Baseevents):
"""The audio sync offset of a source has changed.
:Returns:
*sourceName*
type: String
Source name
*syncOffset*
type: int
Audio sync offset of the source (in nanoseconds)
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SourceAudioSyncOffsetChanged'
self.datain['sourceName'] = None
self.datain['syncOffset'] = None
def getSourcename(self):
return self.datain['sourceName']
def getSyncoffset(self):
return self.datain['syncOffset']
class SourceAudioMixersChanged(Baseevents):
"""Audio mixer routing changed on a source.
:Returns:
*sourceName*
type: String
Source name
*mixers*
type: Array<Object>
Routing status of the source for each audio mixer (array of 6 values)
*mixers.*.id*
type: int
Mixer number
*mixers.*.enabled*
type: boolean
Routing status
*hexMixersValue*
type: String
Raw mixer flags (little-endian, one bit per mixer) as an hexadecimal value
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SourceAudioMixersChanged'
self.datain['sourceName'] = None
self.datain['mixers'] = None
self.datain['hexMixersValue'] = None
def getSourcename(self):
return self.datain['sourceName']
def getMixers(self):
return self.datain['mixers']
def getHexmixersvalue(self):
return self.datain['hexMixersValue']
class SourceRenamed(Baseevents):
"""A source has been renamed.
:Returns:
*previousName*
type: String
Previous source name
*newName*
type: String
New source name
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SourceRenamed'
self.datain['newName'] = None
self.datain['previousName'] = None
def getNewname(self):
return self.datain['newName']
def getPreviousname(self):
return self.datain['previousName']
class SourceFilterAdded(Baseevents):
"""A filter was added to a source.
:Returns:
*sourceName*
type: String
Source name
*filterName*
type: String
Filter name
*filterType*
type: String
Filter type
*filterSettings*
type: Object
Filter settings
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SourceFilterAdded'
self.datain['sourceName'] = None
self.datain['filterName'] = None
self.datain['filterSettings'] = None
self.datain['filterType'] = None
def getSourcename(self):
return self.datain['sourceName']
def getFiltername(self):
return self.datain['filterName']
def getFiltersettings(self):
return self.datain['filterSettings']
def getFiltertype(self):
return self.datain['filterType']
class SourceFilterRemoved(Baseevents):
"""A filter was removed from a source.
:Returns:
*sourceName*
type: String
Source name
*filterName*
type: String
Filter name
*filterType*
type: String
Filter type
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SourceFilterRemoved'
self.datain['sourceName'] = None
self.datain['filterName'] = None
self.datain['filterType'] = None
def getSourcename(self):
return self.datain['sourceName']
def getFiltername(self):
return self.datain['filterName']
def getFiltertype(self):
return self.datain['filterType']
class SourceFilterVisibilityChanged(Baseevents):
"""The visibility/enabled state of a filter changed
:Returns:
*sourceName*
type: String
Source name
*filterName*
type: String
Filter name
*filterEnabled*
type: Boolean
New filter state
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SourceFilterVisibilityChanged'
self.datain['sourceName'] = None
self.datain['filterName'] = None
self.datain['filterEnabled'] = None
def getSourcename(self):
return self.datain['sourceName']
def getFiltername(self):
return self.datain['filterName']
def getFilterenabled(self):
return self.datain['filterEnabled']
class SourceFiltersReordered(Baseevents):
"""Filters in a source have been reordered.
:Returns:
*sourceName*
type: String
Source name
*filters*
type: Array<Object>
Ordered Filters list
*filters.*.name*
type: String
Filter name
*filters.*.type*
type: String
Filter type
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SourceFiltersReordered'
self.datain['sourceName'] = None
self.datain['filters'] = None
def getSourcename(self):
return self.datain['sourceName']
def getFilters(self):
return self.datain['filters']
class PreviewSceneChanged(Baseevents):
"""The selected preview scene has changed (only available in Studio Mode).
:Returns:
*scene_name*
type: String
Name of the scene being previewed.
*sources*
type: Array<SceneItem>
List of sources composing the scene. Same specification as [`GetCurrentScene`](#getcurrentscene).
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'PreviewSceneChanged'
self.datain['sources'] = None
self.datain['scene-name'] = None
def getSources(self):
return self.datain['sources']
def getSceneName(self):
return self.datain['scene-name']
class StudioModeSwitched(Baseevents):
"""Studio Mode has been enabled or disabled.
:Returns:
*new_state*
type: boolean
The new enabled state of Studio Mode.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'StudioModeSwitched'
self.datain['new-state'] = None
def getNewState(self):
return self.datain['new-state']
class ReplayStarting(Baseevents):
"""A request to start the replay buffer has been issued.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'ReplayStarting'
class ReplayStarted(Baseevents):
"""Replay Buffer started successfully
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'ReplayStarted'
class ReplayStopping(Baseevents):
"""A request to stop the replay buffer has been issued.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'ReplayStopping'
class ReplayStopped(Baseevents):
"""Replay Buffer stopped successfully
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'ReplayStopped'
class SwitchScenes(Baseevents):
"""Indicates a scene change.
:Returns:
*scene_name*
type: String
The new scene.
*sources*
type: Array<SceneItem>
List of scene items in the new scene. Same specification as [`GetCurrentScene`](#getcurrentscene).
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SwitchScenes'
self.datain['sources'] = None
self.datain['scene-name'] = None
def getSources(self):
return self.datain['sources']
def getSceneName(self):
return self.datain['scene-name']
class ScenesChanged(Baseevents):
"""The scene list has been modified.
Scenes have been added, removed, or renamed.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'ScenesChanged'
class SceneCollectionChanged(Baseevents):
"""Triggered when switching to another scene collection or when renaming the current scene collection.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SceneCollectionChanged'
class SceneCollectionListChanged(Baseevents):
"""Triggered when a scene collection is created, added, renamed, or removed.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SceneCollectionListChanged'
class ProfileChanged(Baseevents):
"""Triggered when switching to another profile or when renaming the current profile.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'ProfileChanged'
class ProfileListChanged(Baseevents):
"""Triggered when a profile is created, added, renamed, or removed.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'ProfileListChanged'
class Heartbeat(Baseevents):
"""Emitted every 2 seconds after enabling it by calling SetHeartbeat.
:Returns:
*pulse*
type: boolean
Toggles between every JSON message as an "I am alive" indicator.
*current_profile*
type: string (optional)
Current active profile.
*current_scene*
type: string (optional)
Current active scene.
*streaming*
type: boolean (optional)
Current streaming state.
*total_stream_time*
type: int (optional)
Total time (in seconds) since the stream started.
*total_stream_bytes*
type: int (optional)
Total bytes sent since the stream started.
*total_stream_frames*
type: int (optional)
Total frames streamed since the stream started.
*recording*
type: boolean (optional)
Current recording state.
*total_record_time*
type: int (optional)
Total time (in seconds) since recording started.
*total_record_bytes*
type: int (optional)
Total bytes recorded since the recording started.
*total_record_frames*
type: int (optional)
Total frames recorded since the recording started.
*stats*
type: OBSStats
OBS Stats
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'Heartbeat'
self.datain['current-profile'] = None
self.datain['total-record-bytes'] = None
self.datain['total-stream-bytes'] = None
self.datain['stats'] = None
self.datain['total-stream-frames'] = None
self.datain['total-stream-time'] = None
self.datain['pulse'] = None
self.datain['total-record-time'] = None
self.datain['recording'] = None
self.datain['streaming'] = None
self.datain['total-record-frames'] = None
self.datain['current-scene'] = None
def getCurrentProfile(self):
return self.datain['current-profile']
def getTotalRecordBytes(self):
return self.datain['total-record-bytes']
def getTotalStreamBytes(self):
return self.datain['total-stream-bytes']
def getStats(self):
return self.datain['stats']
def getTotalStreamFrames(self):
return self.datain['total-stream-frames']
def getTotalStreamTime(self):
return self.datain['total-stream-time']
def getPulse(self):
return self.datain['pulse']
def getTotalRecordTime(self):
return self.datain['total-record-time']
def getRecording(self):
return self.datain['recording']
def getStreaming(self):
return self.datain['streaming']
def getTotalRecordFrames(self):
return self.datain['total-record-frames']
def getCurrentScene(self):
return self.datain['current-scene']
class BroadcastCustomMessage(Baseevents):
"""A custom broadcast message was received
:Returns:
*realm*
type: String
Identifier provided by the sender
*data*
type: Object
User-defined data
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'BroadcastCustomMessage'
self.datain['data'] = None
self.datain['realm'] = None
def getData(self):
return self.datain['data']
def getRealm(self):
return self.datain['realm']
class RecordingStarting(Baseevents):
"""A request to start recording has been issued.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'RecordingStarting'
class RecordingStarted(Baseevents):
"""Recording started successfully.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'RecordingStarted'
class RecordingStopping(Baseevents):
"""A request to stop recording has been issued.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'RecordingStopping'
class RecordingStopped(Baseevents):
"""Recording stopped successfully.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'RecordingStopped'
class RecordingPaused(Baseevents):
"""Current recording paused
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'RecordingPaused'
class RecordingResumed(Baseevents):
"""Current recording resumed
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'RecordingResumed'
class StreamStarting(Baseevents):
"""A request to start streaming has been issued.
:Returns:
*preview_only*
type: boolean
Always false (retrocompatibility).
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'StreamStarting'
self.datain['preview-only'] = None
def getPreviewOnly(self):
return self.datain['preview-only']
class StreamStarted(Baseevents):
"""Streaming started successfully.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'StreamStarted'
class StreamStopping(Baseevents):
"""A request to stop streaming has been issued.
:Returns:
*preview_only*
type: boolean
Always false (retrocompatibility).
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'StreamStopping'
self.datain['preview-only'] = None
def getPreviewOnly(self):
return self.datain['preview-only']
class StreamStopped(Baseevents):
"""Streaming stopped successfully.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'StreamStopped'
class StreamStatus(Baseevents):
"""Emit every 2 seconds.
:Returns:
*streaming*
type: boolean
Current streaming state.
*recording*
type: boolean
Current recording state.
*replay_buffer_active*
type: boolean
Replay Buffer status
*bytes_per_sec*
type: int
Amount of data per second (in bytes) transmitted by the stream encoder.
*kbits_per_sec*
type: int
Amount of data per second (in kilobits) transmitted by the stream encoder.
*strain*
type: double
Percentage of dropped frames.
*total_stream_time*
type: int
Total time (in seconds) since the stream started.
*num_total_frames*
type: int
Total number of frames transmitted since the stream started.
*num_dropped_frames*
type: int
Number of frames dropped by the encoder since the stream started.
*fps*
type: double
Current framerate.
*render_total_frames*
type: int
Number of frames rendered
*render_missed_frames*
type: int
Number of frames missed due to rendering lag
*output_total_frames*
type: int
Number of frames outputted
*output_skipped_frames*
type: int
Number of frames skipped due to encoding lag
*average_frame_time*
type: double
Average frame time (in milliseconds)
*cpu_usage*
type: double
Current CPU usage (percentage)
*memory_usage*
type: double
Current RAM usage (in megabytes)
*free_disk_space*
type: double
Free recording disk space (in megabytes)
*preview_only*
type: boolean
Always false (retrocompatibility).
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'StreamStatus'
self.datain['num-dropped-frames'] = None
self.datain['memory-usage'] = None
self.datain['num-total-frames'] = None
self.datain['output-total-frames'] = None
self.datain['strain'] = None
self.datain['kbits-per-sec'] = None
self.datain['render-missed-frames'] = None
self.datain['total-stream-time'] = None
self.datain['replay-buffer-active'] = None
self.datain['preview-only'] = None
self.datain['bytes-per-sec'] = None
self.datain['recording'] = None
self.datain['streaming'] = None
self.datain['output-skipped-frames'] = None
self.datain['fps'] = None
self.datain['render-total-frames'] = None
self.datain['average-frame-time'] = None
self.datain['cpu-usage'] = None
self.datain['free-disk-space'] = None
def getNumDroppedFrames(self):
return self.datain['num-dropped-frames']
def getMemoryUsage(self):
return self.datain['memory-usage']
def getNumTotalFrames(self):
return self.datain['num-total-frames']
def getOutputTotalFrames(self):
return self.datain['output-total-frames']
def getStrain(self):
return self.datain['strain']
def getKbitsPerSec(self):
return self.datain['kbits-per-sec']
def getRenderMissedFrames(self):
return self.datain['render-missed-frames']
def getTotalStreamTime(self):
return self.datain['total-stream-time']
def getReplayBufferActive(self):
return self.datain['replay-buffer-active']
def getPreviewOnly(self):
return self.datain['preview-only']
def getBytesPerSec(self):
return self.datain['bytes-per-sec']
def getRecording(self):
return self.datain['recording']
def getStreaming(self):
return self.datain['streaming']
def getOutputSkippedFrames(self):
return self.datain['output-skipped-frames']
def getFps(self):
return self.datain['fps']
def getRenderTotalFrames(self):
return self.datain['render-total-frames']
def getAverageFrameTime(self):
return self.datain['average-frame-time']
def getCpuUsage(self):
return self.datain['cpu-usage']
def getFreeDiskSpace(self):
return self.datain['free-disk-space']
class Exiting(Baseevents):
"""OBS is exiting.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'Exiting'
class SwitchTransition(Baseevents):
"""The active transition has been changed.
:Returns:
*transition_name*
type: String
The name of the new active transition.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SwitchTransition'
self.datain['transition-name'] = None
def getTransitionName(self):
return self.datain['transition-name']
class TransitionListChanged(Baseevents):
"""The list of available transitions has been modified.
Transitions have been added, removed, or renamed.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'TransitionListChanged'
class TransitionDurationChanged(Baseevents):
"""The active transition duration has been changed.
:Returns:
*new_duration*
type: int
New transition duration.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'TransitionDurationChanged'
self.datain['new-duration'] = None
def getNewDuration(self):
return self.datain['new-duration']
class TransitionBegin(Baseevents):
"""A transition (other than "cut") has begun.
:Returns:
*name*
type: String
Transition name.
*type*
type: String
Transition type.
*duration*
type: int
Transition duration (in milliseconds). Will be -1 for any transition with a fixed duration, such as a Stinger, due to limitations of the OBS API.
*from_scene*
type: String
Source scene of the transition
*to_scene*
type: String
Destination scene of the transition
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'TransitionBegin'
self.datain['duration'] = None
self.datain['to-scene'] = None
self.datain['type'] = None
self.datain['name'] = None
self.datain['from-scene'] = None
def getDuration(self):
return self.datain['duration']
def getToScene(self):
return self.datain['to-scene']
def getType(self):
return self.datain['type']
def getName(self):
return self.datain['name']
def getFromScene(self):
return self.datain['from-scene']
class TransitionEnd(Baseevents):
"""A transition (other than "cut") has ended.
Please note that the `from-scene` field is not available in TransitionEnd.
:Returns:
*name*
type: String
Transition name.
*type*
type: String
Transition type.
*duration*
type: int
Transition duration (in milliseconds).
*to_scene*
type: String
Destination scene of the transition
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'TransitionEnd'
self.datain['duration'] = None
self.datain['to-scene'] = None
self.datain['type'] = None
self.datain['name'] = None
def getDuration(self):
return self.datain['duration']
def getToScene(self):
return self.datain['to-scene']
def getType(self):
return self.datain['type']
def getName(self):
return self.datain['name']
class TransitionVideoEnd(Baseevents):
"""A stinger transition has finished playing its video.
:Returns:
*name*
type: String
Transition name.
*type*
type: String
Transition type.
*duration*
type: int
Transition duration (in milliseconds).
*from_scene*
type: String
Source scene of the transition
*to_scene*
type: String
Destination scene of the transition
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'TransitionVideoEnd'
self.datain['duration'] = None
self.datain['to-scene'] = None
self.datain['type'] = None
self.datain['name'] = None
self.datain['from-scene'] = None
def getDuration(self):
return self.datain['duration']
def getToScene(self):
return self.datain['to-scene']
def getType(self):
return self.datain['type']
def getName(self):
return self.datain['name']
def getFromScene(self):
return self.datain['from-scene']
class SourceOrderChanged(Baseevents):
"""Scene items have been reordered.
:Returns:
*scene_name*
type: String
Name of the scene where items have been reordered.
*scene_items*
type: Array<Object>
Ordered list of scene items
*scene_items.*.source_name*
type: String
Item source name
*scene_items.*.item_id*
type: int
Scene item unique ID
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SourceOrderChanged'
self.datain['scene-items'] = None
self.datain['scene-name'] = None
def getSceneItems(self):
return self.datain['scene-items']
def getSceneName(self):
return self.datain['scene-name']
class SceneItemAdded(Baseevents):
"""An item has been added to the current scene.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item added to the scene.
*item_id*
type: int
Scene item ID
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SceneItemAdded'
self.datain['item-id'] = None
self.datain['item-name'] = None
self.datain['scene-name'] = None
def getItemId(self):
return self.datain['item-id']
def getItemName(self):
return self.datain['item-name']
def getSceneName(self):
return self.datain['scene-name']
class SceneItemRemoved(Baseevents):
"""An item has been removed from the current scene.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item removed from the scene.
*item_id*
type: int
Scene item ID
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SceneItemRemoved'
self.datain['item-id'] = None
self.datain['item-name'] = None
self.datain['scene-name'] = None
def getItemId(self):
return self.datain['item-id']
def getItemName(self):
return self.datain['item-name']
def getSceneName(self):
return self.datain['scene-name']
class SceneItemVisibilityChanged(Baseevents):
"""An item's visibility has been toggled.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item in the scene.
*item_id*
type: int
Scene item ID
*item_visible*
type: boolean
New visibility state of the item.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SceneItemVisibilityChanged'
self.datain['item-id'] = None
self.datain['item-visible'] = None
self.datain['item-name'] = None
self.datain['scene-name'] = None
def getItemId(self):
return self.datain['item-id']
def getItemVisible(self):
return self.datain['item-visible']
def getItemName(self):
return self.datain['item-name']
def getSceneName(self):
return self.datain['scene-name']
class SceneItemLockChanged(Baseevents):
"""An item's locked status has been toggled.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item in the scene.
*item_id*
type: int
Scene item ID
*item_locked*
type: boolean
New locked state of the item.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SceneItemLockChanged'
self.datain['item-id'] = None
self.datain['item-locked'] = None
self.datain['item-name'] = None
self.datain['scene-name'] = None
def getItemId(self):
return self.datain['item-id']
def getItemLocked(self):
return self.datain['item-locked']
def getItemName(self):
return self.datain['item-name']
def getSceneName(self):
return self.datain['scene-name']
class SceneItemTransformChanged(Baseevents):
"""An item's transform has been changed.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item in the scene.
*item_id*
type: int
Scene item ID
*transform*
type: SceneItemTransform
Scene item transform properties
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SceneItemTransformChanged'
self.datain['item-id'] = None
self.datain['item-name'] = None
self.datain['scene-name'] = None
self.datain['transform'] = None
def getItemId(self):
return self.datain['item-id']
def getItemName(self):
return self.datain['item-name']
def getSceneName(self):
return self.datain['scene-name']
def getTransform(self):
return self.datain['transform']
class SceneItemSelected(Baseevents):
"""A scene item is selected.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item in the scene.
*item_id*
type: int
Name of the item in the scene.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SceneItemSelected'
self.datain['item-id'] = None
self.datain['item-name'] = None
self.datain['scene-name'] = None
def getItemId(self):
return self.datain['item-id']
def getItemName(self):
return self.datain['item-name']
def getSceneName(self):
return self.datain['scene-name']
class SceneItemDeselected(Baseevents):
"""A scene item is deselected.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item in the scene.
*item_id*
type: int
Name of the item in the scene.
"""
def __init__(self):
Baseevents.__init__(self)
self.name = 'SceneItemDeselected'
self.datain['item-id'] = None
self.datain['item-name'] = None
self.datain['scene-name'] = None
def getItemId(self):
return self.datain['item-id']
def getItemName(self):
return self.datain['item-name']
def getSceneName(self):
return self.datain['scene-name']
| 25.120118
| 157
| 0.594608
|
a9b3a0f06d3eee3f5ce121fffa7018e60cbb1f97
| 25,277
|
py
|
Python
|
openquake/hmtk/tests/seismicity/max_magnitude/test_maximum_magnitude.py
|
gfzriesgos/shakyground-lfs
|
2caf67cc32e6800286eded2df1efb05973ccf41b
|
[
"BSD-3-Clause"
] | 1
|
2019-08-01T00:28:24.000Z
|
2019-08-01T00:28:24.000Z
|
openquake/hmtk/tests/seismicity/max_magnitude/test_maximum_magnitude.py
|
gfzriesgos/shakyground-lfs
|
2caf67cc32e6800286eded2df1efb05973ccf41b
|
[
"BSD-3-Clause"
] | 4
|
2018-08-31T14:14:35.000Z
|
2021-10-11T12:53:13.000Z
|
openquake/hmtk/tests/seismicity/max_magnitude/test_maximum_magnitude.py
|
gfzriesgos/shakyground-lfs
|
2caf67cc32e6800286eded2df1efb05973ccf41b
|
[
"BSD-3-Clause"
] | 3
|
2018-08-31T14:11:00.000Z
|
2019-07-17T10:06:02.000Z
|
#!/usr/bin/env python
# LICENSE
#
# Copyright (C) 2010-2018 GEM Foundation, G. Weatherill, M. Pagani, D. Monelli
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein is released as
# a prototype implementation on behalf of scientists and engineers working
# within the GEM Foundation (Global Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the hope that
# it will be useful to the scientific, engineering, disaster risk and software
# design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (https://www.globalquakemodel.org/tools-products) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software developers,
# as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be directed to
# the hazard scientific staff of the GEM Model Facility
# (hazard@globalquakemodel.org).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# The GEM Foundation, and the authors of the software, assume no liability for
# use of the software.
'''Prototype unittest code for mmax module'''
import os
import warnings
import unittest
import numpy as np
from openquake.hmtk.parsers.catalogue import CsvCatalogueParser
from openquake.hmtk.seismicity.max_magnitude.base import (_get_observed_mmax,
_get_magnitude_vector_properties)
from openquake.hmtk.seismicity.max_magnitude.cumulative_moment_release import \
CumulativeMoment
from openquake.hmtk.seismicity.max_magnitude.kijko_sellevol_fixed_b import \
KijkoSellevolFixedb
from openquake.hmtk.seismicity.max_magnitude.kijko_sellevol_bayes import \
KijkoSellevolBayes
from openquake.hmtk.seismicity.max_magnitude.kijko_nonparametric_gaussian import \
KijkoNonParametricGaussian, _get_exponential_spaced_values
BASE_DATA_PATH = os.path.join(os.path.dirname(__file__),
'./../completeness/data')
class MmaxTestCase(unittest.TestCase):
'''Testing class for Mmax functions'''
def setUp(self):
'''Set up the test class'''
self.config = {'algorithm': None,
'input_mmax': None,
'input_mmax_uncertainty': None,
'maximum_iterations': None,
'tolerance': None,
'input_mmin': None,
'b-value': 1.0,
'sigma-b': 0.1,
'number_samples': 51,
'number_earthquakes': 100,
'number_bootstraps': None}
#self.completeness = np.array([])
def test_get_observed_mmax_good_data(self):
# Asserts that the observed Mmax and corresponding sigma MMax are
# returned when data are availavle
test_catalogue = {
'magnitude': np.array([3.4, 4.5, 7.6, 5.4, 4.3]),
'sigmaMagnitude': np.array([0.1, 0.2, 0.3, 0.2, 0.1])
}
# Test 1: Finds the mmax from the catalogue with defined sigma
mmax, mmax_sig = _get_observed_mmax(test_catalogue, self.config)
self.assertAlmostEqual(mmax, 7.6)
self.assertAlmostEqual(mmax_sig, 0.3)
def test_get_observed_mmax_from_input(self):
# Tests that the input mmax and its uncertainty are returned when
# specified in the config
# Test 3: Finds the mmax from the input file
self.config['input_mmax'] = 8.5
self.config['input_mmax_uncertainty'] = 0.35
test_catalogue = {
'magnitude': np.array([3.4, 4.5, 7.6, 5.4, 4.3]),
'sigmaMagnitude': None
}
mmax, mmax_sig = _get_observed_mmax(test_catalogue, self.config)
self.assertAlmostEqual(mmax, 8.5)
self.assertAlmostEqual(mmax_sig, 0.35)
def test_get_observed_max_no_sigma_error(self):
"""
When an input mmax is given in the config, but no uncertainty is
specified assert that this raises an error
"""
self.config['input_mmax'] = 8.5
self.config['input_mmax_uncertainty'] = None
test_catalogue = {
'magnitude': np.array([3.4, 4.5, 7.6, 5.4, 4.3]),
'sigmaMagnitude': None
}
self._get_observed_mmax_error(test_catalogue, self.config)
def test_bad_sigma_magnitude_mmax_error(self):
"""
If reading mmax from the catalogue, three criteria must be met
in order to retreive the uncertainty. sigmaMagnitude must be a
numpy.ndarray, have the same length as magnitude and not consist
entirely of NaNs
"""
self.config['input_mmax'] = None
self.config['input_mmax_uncertainty'] = None
# 1st case - sigmaMagnitude is not an np.ndarray
test_catalogue = {
'magnitude': np.array([3.4, 4.5, 7.6, 5.4, 4.3]),
'sigmaMagnitude': None
}
self._get_observed_mmax_error(test_catalogue, self.config)
# 2nd case - sigmaMagnitude is different from that of magnitude
test_catalogue = {
'magnitude': np.array([3.4, 4.5, 7.6, 5.4, 4.3]),
'sigmaMagnitude': np.array([])
}
self._get_observed_mmax_error(test_catalogue, self.config)
# 3rd case, is np.ndarray of equal length to magnitude but entirely
# NaNs
test_catalogue = {
'magnitude': np.array([3.4, 4.5, 7.6, 5.4, 4.3]),
'sigmaMagnitude': np.array([np.nan] * 5)
}
self._get_observed_mmax_error(test_catalogue, self.config)
def test_observed_mmax_catalogue_uncertainty_config(self):
# Tests the case when the observed Mmax must be read from the catalogue
# but the uncertainty is specified in the config
self.config['input_mmax'] = None
self.config['input_mmax_uncertainty'] = 0.5
test_catalogue = {
'magnitude': np.array([3.4, 4.5, 7.6, 5.4, 4.3]),
'sigmaMagnitude': np.array([])
}
mmax, mmax_sig = _get_observed_mmax(test_catalogue, self.config)
self.assertAlmostEqual(mmax, 7.6)
self.assertAlmostEqual(mmax_sig, 0.5)
def test_mmax_uncertainty_largest_in_catalogue(self):
# When largest mmax has a NaN sigmaMagnitude, take the largest
# sigmaMagnitude found in catalogue
self.config['input_mmax'] = None
self.config['input_mmax_uncertainty'] = None
test_catalogue = {
'magnitude': np.array([3.4, 4.5, 7.6, 5.4, 4.3]),
'sigmaMagnitude': np.array([0.1, 0.4, np.nan, 0.2, 0.1])
}
mmax, mmax_sig = _get_observed_mmax(test_catalogue, self.config)
self.assertAlmostEqual(mmax, 7.6)
self.assertAlmostEqual(mmax_sig, 0.4)
def _get_observed_mmax_error(self, test_catalogue, test_config):
# Tests the get_observed_mmax exceptions are raised
with self.assertRaises(ValueError) as ae:
mmax, mmax_sig = _get_observed_mmax(test_catalogue, self.config)
self.assertEqual(str(ae.exception),
'Input mmax uncertainty must be specified!')
# def test_get_observed_mmax(self):
# test_catalogue = {
# 'magnitude': np.array([3.4, 4.5, 7.6, 5.4, 4.3]),
# 'sigmaMagnitude': np.array([0.1, 0.2, 0.3, 0.2, 0.1])
# }
# # Test 1: Finds the mmax from the catalogue with defined sigma
# mmax, mmax_sig = _get_observed_mmax(test_catalogue, self.config)
# self.assertAlmostEqual(mmax, 7.6)
# self.assertAlmostEqual(mmax_sig, 0.3)
#
# # Test 2: Finds the mmax from the catalogue with default sigma (0.2)
# test_catalogue['sigmaMagnitude'] = None
# mmax, mmax_sig = _get_observed_mmax(test_catalogue, self.config)
# self.assertAlmostEqual(mmax, 7.6)
# self.assertAlmostEqual(mmax_sig, 0.2)
#
# # Test 3: Finds the mmax from the input file
# self.config['input_mmax'] = 8.5
# self.config['input_mmax_uncertainty'] = 0.35
# mmax, mmax_sig = _get_observed_mmax(test_catalogue, self.config)
# self.assertAlmostEqual(mmax, 8.5)
# self.assertAlmostEqual(mmax_sig, 0.35)
def test_get_magnitude_vector_properties(self):
# Tests the function to retreive mmin and number of earthquakes if
# required for certain functions
test_catalogue = {
'magnitude': np.array([3.4, 4.5, 7.6, 5.4, 3.8]),
'sigmaMagnitude': np.array([0.1, 0.2, 0.3, 0.2, 0.1])
}
self.config['input_mmin'] = 4.0
# Test 1: Finds the number of events from the catalogue with defined
# minimum magnitude
neq, mmin = _get_magnitude_vector_properties(test_catalogue,
self.config)
self.assertAlmostEqual(neq, 3.0)
self.assertAlmostEqual(mmin, 4.0)
# Test 2 Finds the number of events from the catalogue with an
# unspecified minimum magnitude
del self.config['input_mmin']
neq, mmin = _get_magnitude_vector_properties(test_catalogue,
self.config)
self.assertAlmostEqual(neq, 5.0)
self.assertAlmostEqual(mmin, 3.4)
class TestCumulativeMoment(unittest.TestCase):
'''
Test suite for the
:class: openquake.hmtk.seismicity.max_magnitude.cumulative_moment_release
module
'''
def setUp(self):
filename = os.path.join(BASE_DATA_PATH, 'completeness_test_cat.csv')
parser0 = CsvCatalogueParser(filename)
self.catalogue = parser0.read_file()
self.config = {'algorithm': None,
'number_bootstraps': None}
self.model = CumulativeMoment()
def test_check_config(self):
# Tests the configuration checker
# Test 1: No bootstraps specified
self.config['number_bootstraps'] = None
fixed_config = self.model.check_config(self.config)
self.assertEqual(1, fixed_config['number_bootstraps'])
# Test 2: Invalid number of bootstraps specified
self.config['number_bootstraps'] = 0
fixed_config = self.model.check_config(self.config)
self.assertEqual(1, fixed_config['number_bootstraps'])
# Test 3: Valid number of bootstraps
self.config['number_bootstraps'] = 1000
fixed_config = self.model.check_config(self.config)
self.assertEqual(1000, fixed_config['number_bootstraps'])
def test_cumulative_moment(self):
# Tests the cumulative moment function
# Test 1: Ordinary behaviour using the completeness_test_cat.csv
self.assertAlmostEqual(7.5, self.model.cumulative_moment(
self.catalogue.data['year'],
self.catalogue.data['magnitude']), 1)
# Test 2: If catalogue is less than or equal to 1 year duration
id0 = self.catalogue.data['year'].astype(int) == 1990
self.assertTrue(np.isinf(self.model.cumulative_moment(
self.catalogue.data['year'][id0],
self.catalogue.data['magnitude'][id0])))
def test_get_mmax_cumulative_moment(self):
# Tests the cumulative moment function sampled with uncertainty
# Test 1: Case when no sigma is found on magnitude
self.catalogue.data['backup'] = np.copy(
self.catalogue.data['sigmaMagnitude'])
self.catalogue.data['sigmaMagnitude'] = None
mmax, sigma_mmax = self.model.get_mmax(self.catalogue, self.config)
self.assertAlmostEqual(7.4847335589, mmax, 1)
self.assertAlmostEqual(0.0, sigma_mmax)
# Test 2: Case when one or no bootstraps are specified
self.catalogue.data['sigmaMagnitude'] = self.catalogue.data['backup']
self.config['number_bootstraps'] = 0
mmax, sigma_mmax = self.model.get_mmax(self.catalogue, self.config)
self.assertAlmostEqual(7.4847335589, mmax, 1)
self.assertAlmostEqual(0.0, sigma_mmax)
# Test 3: Ordinary test case with uncertainty - seeded random generator
self.config['number_bootstraps'] = 1000
# Can fix the seed (used for testing!)
np.random.seed(123456)
mmax, sigma_mmax = self.model.get_mmax(self.catalogue, self.config)
self.assertAlmostEqual(7.518906927, mmax)
self.assertAlmostEqual(0.058204597, sigma_mmax)
class TestKijkoSellevolFixedb(unittest.TestCase):
'''
Test suite for the Kijko & Sellevol fixed b-value estimator of Mmax
'''
def setUp(self):
'''
Set up test class
'''
filename = os.path.join(BASE_DATA_PATH, 'completeness_test_cat.csv')
parser0 = CsvCatalogueParser(filename)
self.catalogue = parser0.read_file()
self.config = {'b-value': 1.0,
'input_mmin': 5.0,
'input_mmax': None,
'tolerance': 0.001,
'maximum_iterations': 1000}
self.model = KijkoSellevolFixedb()
def test_integral_function(self):
# Tests the integral of the Kijko & Sellevol fixed-b estimator
# define in Equation 6 of Kijko (2004)
# Simple test case 1 - all good parameters
mmax = 8.5
mmin = 5.0
mval = 6.5
beta = np.log(10.)
neq = 100.
self.assertAlmostEqual(self.model._ks_intfunc(mval, neq, mmax, mmin,
beta), 0.04151379)
# Test case 4 - Number of earthquakes is 0
mmax = 8.5
mmin = 5.0
neq = 0.
self.assertAlmostEqual(1.0, self.model._ks_intfunc(mval, neq, mmax,
mmin, beta))
# Test case 5 - beta is negative
neq = 100.
self.assertAlmostEqual(
0.0, self.model._ks_intfunc(mval, neq, mmax, mmin, -0.5))
def test_get_mmin(self):
# Tests the main method to calculate Mmax
# BEHAVIOUR NOTE 1: the estimator of mmax is dependent on the mmin
# If mmin < mmin_observed then the integral will not reach stability
# (or give rubbish) therefore if the mmin specified in the config
# is less than mmin_obs it will be overwritten by mmin_observed
# BEHAVIOUR NOTE 2: Negative or very small b-values (< 1E-16) will
# result in immediate stability of the integral, thus giving
# mmax == mmax_obs.
# If b-value == 0 then will give a divide by zero warning
# Test good working case b = 1, mmin = 5.0
mmax, sigma_mmax = self.model.get_mmax(self.catalogue, self.config)
self.assertAlmostEqual(mmax, 7.6994981)
self.assertAlmostEqual(sigma_mmax, 0.31575163)
# Test case with mmin < min(magnitude) (b = 1)
# Gives very high value of mmax
self.config['input_mmin'] = 4.0
mmax_1, sigma_mmax_1 = self.model.get_mmax(self.catalogue, self.config)
self.assertAlmostEqual(mmax_1, 8.35453605)
self.assertAlmostEqual(sigma_mmax_1, 0.959759906)
self.config['input_mmin'] = 3.0
mmax_2, sigma_mmax_2 = self.model.get_mmax(self.catalogue, self.config)
self.assertAlmostEqual(mmax_1, mmax_2)
# Case where the maximum magnitude is overriden
# self.config['input_mmax'] = 7.6
# self.config['b-value'] = 1.0
# self.config['input_mmax_uncertainty'] = 0.2
# mmax_1, sigma_mmax_1 = self.model.get_mmax(self.catalogue, self.config)
# self.assertAlmostEqual(mmax_1, 8.1380422)
# self.assertAlmostEqual(sigma_mmax_1, 0.57401164)
def test_raise_runTimeWarning(self):
"""Test case with b-value = 0
"""
self.config['input_mmin'] = 5.0
self.config['b-value'] = 0.0
with warnings.catch_warnings(record=True) as cm:
self.model.get_mmax(self.catalogue, self.config)
assert len(cm) > 0
def test_raise_valueError(self):
# Simple test case 2 - Mmin == Mmax (returns inf)
mmin = 6.0
mmax = 6.0
mval = 6.5
beta = np.log(10.)
neq = 100.
with self.assertRaises(ValueError) as cm:
self.model._ks_intfunc(mval, neq, mmax, mmin, beta)
self.assertEqual(str(cm.exception),
'Maximum magnitude smaller than minimum magnitude'
' in Kijko & Sellevol (Fixed-b) integral')
def test_raise_valueError_1(self):
# Test case 3 - Mmin > MMax (raises value Error)
mmin = 6.2
mmax = 6.0
mval = 6.5
beta = np.log(10.)
neq = 100.
with self.assertRaises(ValueError) as ae:
self.model._ks_intfunc(mval, neq, mmax, mmin, beta)
exception = ae.exception
self.assertEqual(str(exception),
'Maximum magnitude smaller than minimum magnitude'
' in Kijko & Sellevol (Fixed-b) integral')
class TestKijkoSellevolBayes(unittest.TestCase):
'''
Test the openquake.hmtk.seismicity.max_magnitude.KijkoSellevolBayes module
'''
def setUp(self):
filename = os.path.join(BASE_DATA_PATH, 'completeness_test_cat.csv')
parser0 = CsvCatalogueParser(filename)
self.catalogue = parser0.read_file()
self.config = {'b-value': 1.0,
'sigma-b': 0.05,
'input_mmin': 5.0,
'input_mmax': None,
'input_mmax_uncertainty': None,
'tolerance': 0.001,
'maximum_iterations': 1000}
self.model = KijkoSellevolBayes()
def test_ksb_intfunc(self):
# Tests the integral function of the Kijko-Sellevol-Bayes estimator
# of mmax
neq = 100.
mval = 6.0
mmin = 5.0
# Good case b-value is 1.0, sigma-b is 0.05
pval, qval = self._get_pval_qval(1.0, 0.05)
self.assertAlmostEqual(
self.model._ksb_intfunc(mval, neq, mmin, pval, qval),
2.4676049E-5)
# Bad case b-value is 0.0, sigma-b is 0,05
pval0, qval0 = self._get_pval_qval(0.0, 0.05)
self.assertAlmostEqual(
self.model._ksb_intfunc(mval, neq, mmin, pval0, qval0),
0.0)
# Bad case neq = 0.
self.assertAlmostEqual(
self.model._ksb_intfunc(mval, 0., mmin, pval0, qval0),
1.0)
# Bad case mval < mmin
mmin = 6.0
mval = 5.0
self.assertAlmostEqual(
np.log10(self.model._ksb_intfunc(mval, neq, mmin, pval, qval)),
95.7451687)
def test_get_mmax(self):
# Tests the function to calculate mmax using the Kijko-Sellevol-Bayes
# operator
# Good case - b = 1., sigma_b = 0.05, mmin = 5.0
mmax, mmax_sigma = self.model.get_mmax(self.catalogue, self.config)
self.assertAlmostEqual(mmax, 7.6902450)
self.assertAlmostEqual(mmax_sigma, 0.30698886)
# Bad case 1 - input mmin < catalogue mmin
self.config['input_mmin'] = 3.5
mmax, mmax_sigma = self.model.get_mmax(self.catalogue, self.config)
self.assertAlmostEqual(mmax, 8.2371167)
self.assertAlmostEqual(mmax_sigma, 0.84306841)
self.config['input_mmin'] = 4.0
mmax_check, _ = self.model.get_mmax(self.catalogue, self.config)
self.assertAlmostEqual(mmax, mmax_check)
# Good case 1 - input mmax
self.config['input_mmin'] = 5.0
self.config['input_mmax'] = 7.8
self.config['input_mmax_uncertainty'] = 0.2
mmax, mmax_sigma = self.model.get_mmax(self.catalogue, self.config)
self.assertAlmostEqual(mmax, 8.9427386)
self.assertAlmostEqual(mmax_sigma, 1.16010841)
# Bad case 1 - negative b-value (should return nan)
self.config = {'b-value': -0.5,
'sigma-b': 0.05,
'input_mmin': 5.0,
'input_mmax': None,
'input_mmax_uncertainty': None,
'tolerance': 0.001,
'maximum_iterations': 1000}
mmax, mmax_sigma = self.model.get_mmax(self.catalogue, self.config)
self.assertTrue(np.isnan(mmax))
self.assertTrue(np.isnan(mmax_sigma))
def _get_pval_qval(self, bval, sigma_b):
'''
Get the p-value and q-value from b and sigma b
'''
beta = bval * np.log(10.)
sigma_beta = sigma_b * np.log(10.)
pval = beta / (sigma_beta ** 2.)
qval = (beta / sigma_beta) ** 2.
return pval, qval
class TestKijkoNPG(unittest.TestCase):
'''
Class to test the Kijko Nonparametric Gaussian function
'''
def setUp(self):
filename = os.path.join(BASE_DATA_PATH, 'completeness_test_cat.csv')
parser0 = CsvCatalogueParser(filename)
self.catalogue = parser0.read_file()
self.config = {'maximum_iterations': 1000,
'number_earthquakes': 100,
'number_samples': 51,
'tolerance': 0.05}
self.model = KijkoNonParametricGaussian()
def test_get_exponential_values(self):
# Tests the function to derive an exponentially spaced set of values.
# Tested against Kijko implementation
min_mag = 5.8
max_mag = 7.4
expected_output = np.array(
[5.8, 5.87609089, 5.94679912, 6.01283617, 6.07478116, 6.13311177,
6.18822664, 6.24046187, 6.29010351, 6.33739696, 6.38255438,
6.42576041, 6.46717674, 6.50694576, 6.5451935, 6.58203209,
6.61756168, 6.65187211, 6.68504428, 6.71715129, 6.74825943,
6.77842898, 6.80771492, 6.83616754, 6.86383296, 6.89075356,
6.9169684, 6.94251354, 6.96742235, 6.99172576, 7.0154525,
7.0386293, 7.06128109, 7.08343111, 7.10510113, 7.1263115,
7.14708132, 7.16742851, 7.18736994, 7.20692147, 7.22609806,
7.24491382, 7.26338207, 7.28151542, 7.2993258, 7.31682451,
7.33402228, 7.35092928, 7.36755517, 7.38390916, 7.4])
np.testing.assert_almost_equal(
expected_output,
_get_exponential_spaced_values(min_mag, max_mag, 51))
def test_h_smooth(self):
# Function to test the smoothing factor functiob h_smooth
# Test 1: Good magnitude range (4.0 - 8.0)
mag = np.arange(4.5, 8.1, 0.1)
self.assertAlmostEqual(self.model.h_smooth(mag), 0.46)
# Test 2: Bad magnitude
mag = np.array([6.5])
self.assertAlmostEqual(self.model.h_smooth(mag), 0.0)
def test_gauss_cdf(self):
# Tests the Gaussian cumulative distribution function
# Simple case where x = -3 to 3 with a step of 1.
xvals = np.arange(-7., 8., 1.)
yvals_expected = np.array(
[0.00000000e+00, 0.00000000e+00, 3.01100756e-05, 8.22638484e-04,
2.36281702e-02, 2.06039365e-01, 3.39612064e-01, 5.00000000e-01,
6.60387936e-01, 7.93960635e-01, 9.76371830e-01, 9.99177362e-01,
9.99969890e-01, 1.00000000e+00, 1.00000000e+00])
self.assertTrue(np.allclose(yvals_expected,
self.model._gauss_cdf_hastings(xvals)))
def test_kijko_npg_intfunc_simps(self):
'''
Tests the integration function using Simpson's rule
'''
# Simple test using test catalogue data - verified against
# implementation in Kijko's own code
# Get the largest 100 events from the catalogue
idx = np.flipud(np.argsort(self.catalogue.data['magnitude']))
test_mag = self.catalogue.data['magnitude'][idx[:100]]
h_fact = self.model.h_smooth(test_mag)
mvals = _get_exponential_spaced_values(
np.min(test_mag), np.max(test_mag), 51)
self.assertAlmostEqual(
0.11026752,
self.model._kijko_npg_intfunc_simps(
mvals, test_mag, np.max(test_mag), h_fact, 100.))
def test_get_mmax(self):
# Tests the main get_mmax function. These test results are derived by
# applying Kijko's implementation to the top 100 events in the test
# catalogue
mmax, mmax_sig = self.model.get_mmax(self.catalogue, self.config)
self.assertAlmostEqual(mmax, 7.5434318)
self.assertAlmostEqual(mmax_sig, 0.17485045)
| 41.988372
| 91
| 0.622977
|
7b75a22579d40e745850341adafd905b87855811
| 611
|
py
|
Python
|
deform/tests/test_api.py
|
krysros/deform
|
5ba96e43234521d9deecfe85a45accd08565c4d8
|
[
"CC-BY-3.0"
] | 266
|
2015-01-12T23:51:50.000Z
|
2022-03-30T02:49:53.000Z
|
deform/tests/test_api.py
|
krysros/deform
|
5ba96e43234521d9deecfe85a45accd08565c4d8
|
[
"CC-BY-3.0"
] | 234
|
2015-01-01T20:21:09.000Z
|
2021-12-19T06:51:19.000Z
|
deform/tests/test_api.py
|
krysros/deform
|
5ba96e43234521d9deecfe85a45accd08565c4d8
|
[
"CC-BY-3.0"
] | 100
|
2015-01-01T20:08:50.000Z
|
2021-10-02T12:14:37.000Z
|
"""API tests."""
# Standard Library
import unittest
class TestAPI(unittest.TestCase):
def test_it(self):
"""
none of these imports should fail
"""
# Deform
from deform import Button # noQA
from deform import Field # noQA
from deform import FileData # noQA
from deform import Form # noQA
from deform import TemplateError # noQA
from deform import ValidationFailure # noQA
from deform import ZPTRendererFactory # noQA
from deform import default_renderer # noQA
from deform import widget # noQA
| 29.095238
| 53
| 0.631751
|
2867913e4d5aebf9db4540c52a787e384f0fd314
| 1,040
|
py
|
Python
|
cleff/messaging/migrations/0004_auto_20150801_2121.py
|
lancekrogers/music-network
|
e8b21f3f1bbeb1ee46fb41c2b25f3b3e26e97097
|
[
"Apache-2.0"
] | null | null | null |
cleff/messaging/migrations/0004_auto_20150801_2121.py
|
lancekrogers/music-network
|
e8b21f3f1bbeb1ee46fb41c2b25f3b3e26e97097
|
[
"Apache-2.0"
] | null | null | null |
cleff/messaging/migrations/0004_auto_20150801_2121.py
|
lancekrogers/music-network
|
e8b21f3f1bbeb1ee46fb41c2b25f3b3e26e97097
|
[
"Apache-2.0"
] | 1
|
2015-08-12T20:51:04.000Z
|
2015-08-12T20:51:04.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('messaging', '0003_musicianmusicianconversation_initializer'),
]
operations = [
migrations.AlterModelOptions(
name='musicianmusicianconversation',
options={'ordering': ['-timestamp']},
),
migrations.AddField(
model_name='musicianmusicianconversation',
name='messages',
field=models.ManyToManyField(blank=True, to='messaging.MusMusMessage'),
),
migrations.AlterField(
model_name='musicianmusicianconversation',
name='initializer',
field=models.ForeignKey(to='profiles.Musician', related_name='initializer'),
),
migrations.AlterField(
model_name='musmusmessage',
name='sender',
field=models.ForeignKey(to='profiles.Musician', related_name='sender'),
),
]
| 30.588235
| 88
| 0.619231
|
d802710486b7a2c4c2b659b24fb0dce7b460973d
| 16,547
|
py
|
Python
|
Old MRI segmentation code/Hist-seg-WES_010.py
|
akac0297/PETLAB
|
950cc153ce230d12d752ad0d11111e7fc22d9e7d
|
[
"MIT"
] | null | null | null |
Old MRI segmentation code/Hist-seg-WES_010.py
|
akac0297/PETLAB
|
950cc153ce230d12d752ad0d11111e7fc22d9e7d
|
[
"MIT"
] | null | null | null |
Old MRI segmentation code/Hist-seg-WES_010.py
|
akac0297/PETLAB
|
950cc153ce230d12d752ad0d11111e7fc22d9e7d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#import modules
import SimpleITK as sitk
from platipy.imaging.visualisation.tools import ImageVisualiser
from platipy.imaging.utils.tools import get_com
import matplotlib.pyplot as plt
import numpy as np
get_ipython().run_line_magic('matplotlib', 'notebook')
# In[4]:
#add segs tp4
seg_B50T=sitk.ReadImage("test_label_threshold_010_4_B50T_hist.nii.gz")
seg_B800T=sitk.ReadImage("test_label_threshold_010_4_B800T_hist.nii.gz")
seg_T2=sitk.ReadImage("test_label_threshold_010_4_T2w_hist.nii.gz")
seg_MPE=sitk.ReadImage("test_label_threshold_010_4_MPE_hist.nii.gz")
seg_B50T=sitk.Resample(seg_B50T,seg_T2)
seg_B800T=sitk.Resample(seg_B800T,seg_T2)
seg_MPE=sitk.Resample(seg_MPE,seg_T2)
new_seg_T2=sitk.LabelMapToBinary(sitk.Cast(seg_T2, sitk.sitkLabelUInt8))
new_seg_B50T=sitk.LabelMapToBinary(sitk.Cast(seg_B50T, sitk.sitkLabelUInt8))
new_seg_B800T=sitk.LabelMapToBinary(sitk.Cast(seg_B800T, sitk.sitkLabelUInt8))
new_seg_MPE=sitk.LabelMapToBinary(sitk.Cast(seg_MPE, sitk.sitkLabelUInt8))
new_TRACE_seg=(new_seg_B50T+new_seg_B800T)/2#sitk.Cast((new_seg_B50T+new_seg_B800T)/2,sitk.sitkUInt8)
new_seg_1=(sitk.Cast(new_seg_T2,sitk.sitkFloat64)+new_TRACE_seg+sitk.Cast(new_seg_MPE,sitk.sitkFloat64)) #need to threshold this somehow
vis=ImageVisualiser(new_seg_1, cut=get_com(new_seg_1), window=[0,3])
fig=vis.show()
# In[5]:
new_seg_1_1=sitk.BinaryThreshold(new_seg_1, lowerThreshold=2)
vis=ImageVisualiser(new_seg_1_1, cut=get_com(new_seg_1), window=[0,1])
fig=vis.show()
# In[6]:
sitk.WriteImage(new_seg_1_1,"new_seg_010_4_mri.nii.gz")
# In[7]:
R_breast=sitk.ReadImage("/home/alicja/Downloads/Segmentation.nii.gz")
# In[8]:
WES_010_4_B50T=sitk.ReadImage("/home/alicja/Documents/WES_010/IMAGES/WES_010_4_20180829_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B50T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz")
WES_010_4_B800T=sitk.ReadImage("/home/alicja/Documents/WES_010/IMAGES/WES_010_4_20180829_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B800T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz")
# In[9]:
from platipy.imaging.visualisation.tools import ImageVisualiser
from platipy.imaging.registration.registration import (
initial_registration,
fast_symmetric_forces_demons_registration,
transform_propagation,
apply_field
)
# In[10]:
#DIR to tp5
WES_010_5_B50T=sitk.ReadImage("/home/alicja/Documents/WES_010/IMAGES/WES_010_5_20181010_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B50T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_6.nii.gz")
image_to_0_rigid, tfm_to_0_rigid = initial_registration(
WES_010_5_B50T,
WES_010_4_B50T,
options={
'shrink_factors': [8,4],
'smooth_sigmas': [0,0],
'sampling_rate': 0.5,
'final_interp': 2,
'metric': 'mean_squares',
'optimiser': 'gradient_descent_line_search',
'number_of_iterations': 25},
reg_method='Rigid')
image_to_0_dir, tfm_to_0_dir = fast_symmetric_forces_demons_registration(
WES_010_5_B50T,
image_to_0_rigid,
resolution_staging=[4,2],
iteration_staging=[10,10]
)
R_breast_to_0_rigid = transform_propagation(
WES_010_5_B50T,
R_breast,
tfm_to_0_rigid,
structure=True
)
R_breast_to_0_dir = apply_field(
R_breast_to_0_rigid,
tfm_to_0_dir,
structure=True
)
# In[11]:
vis = ImageVisualiser(WES_010_5_B50T, axis='z', cut=get_com(R_breast_to_0_dir), window=[-250, 500])
vis.add_contour(R_breast_to_0_dir, name='BREAST', color='g')
fig = vis.show()
# In[12]:
breast_contour_dilate=sitk.BinaryDilate(R_breast_to_0_dir, (2,2,2))
# In[14]:
vis = ImageVisualiser(WES_010_5_B50T, axis='z', cut=get_com(R_breast_to_0_dir), window=[-250, 500])
vis.add_contour(breast_contour_dilate, name='BREAST', color='g')
fig = vis.show()
# In[15]:
masked_R_breast = sitk.Mask(WES_010_5_B50T, breast_contour_dilate)
# In[20]:
values = sitk.GetArrayViewFromImage(masked_R_breast).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(500,3000,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[22]:
image_mri=WES_010_5_B50T
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=950, upperthreshold=5000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_010_5_B50T_hist.nii.gz")
# In[18]:
def estimate_tumour_vol(img_mri, lowerthreshold=300, upperthreshold=3000, hole_size=1):
label_threshold = sitk.BinaryThreshold(img_mri, lowerThreshold=lowerthreshold, upperThreshold=upperthreshold)
label_threshold_cc = sitk.RelabelComponent(sitk.ConnectedComponent(label_threshold))
label_threshold_cc_x = (label_threshold_cc==1)
label_threshold_cc_x_f = sitk.BinaryMorphologicalClosing(label_threshold_cc_x, (hole_size,hole_size,hole_size))
return(label_threshold_cc_x_f)
# In[23]:
WES_010_5_B800T=sitk.ReadImage("/home/alicja/Documents/WES_010/IMAGES/WES_010_5_20181010_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B800T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_6.nii.gz")
WES_010_5_T2w=sitk.ReadImage("/home/alicja/Documents/WES_010/IMAGES/WES_010_5_20181010_MR_T2_TSE_TRA_SPAIR_TSE2D1_11_T2_TSE_TRA_SPAIR_3.nii.gz")
WES_010_5_MPE=sitk.ReadImage("MPE_sub_WES_010_5.nii.gz")
masked_R_breast = sitk.Mask(WES_010_5_B800T, breast_contour_dilate)
# In[31]:
values = sitk.GetArrayViewFromImage(masked_R_breast).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(200,750,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[33]:
image_mri=WES_010_5_B800T
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=400, upperthreshold=5000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_010_5_B800T_hist.nii.gz") #ok but picks up fibro
# In[49]:
WES_010_5_T2w=sitk.Resample(WES_010_5_B50T)
masked_R_breast = sitk.Mask(WES_010_5_T2w, breast_contour_dilate)
values = sitk.GetArrayViewFromImage(masked_R_breast).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(200,750,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[51]:
image_mri=WES_010_5_B800T
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=440, upperthreshold=5000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_010_5_T2w_hist.nii.gz") #picks up fibro
# In[38]:
WES_010_5_MPE=sitk.Resample(WES_010_5_B50T)
masked_R_breast = sitk.Mask(WES_010_5_MPE, breast_contour_dilate)
values = sitk.GetArrayViewFromImage(masked_R_breast).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(1,750,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[42]:
image_mri=WES_010_5_MPE
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=640, upperthreshold=5000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_010_5_MPE_hist.nii.gz") #okay but not ideal
# In[52]:
#add segs tp4
seg_B50T=sitk.ReadImage("test_label_threshold_010_5_B50T_hist.nii.gz")
seg_B800T=sitk.ReadImage("test_label_threshold_010_5_B800T_hist.nii.gz")
seg_T2=sitk.ReadImage("test_label_threshold_010_5_T2w_hist.nii.gz")
seg_MPE=sitk.ReadImage("test_label_threshold_010_5_MPE_hist.nii.gz")
seg_B50T=sitk.Resample(seg_B50T,seg_T2)
seg_B800T=sitk.Resample(seg_B800T,seg_T2)
seg_MPE=sitk.Resample(seg_MPE,seg_T2)
new_seg_T2=sitk.LabelMapToBinary(sitk.Cast(seg_T2, sitk.sitkLabelUInt8))
new_seg_B50T=sitk.LabelMapToBinary(sitk.Cast(seg_B50T, sitk.sitkLabelUInt8))
new_seg_B800T=sitk.LabelMapToBinary(sitk.Cast(seg_B800T, sitk.sitkLabelUInt8))
new_seg_MPE=sitk.LabelMapToBinary(sitk.Cast(seg_MPE, sitk.sitkLabelUInt8))
new_TRACE_seg=(new_seg_B50T+new_seg_B800T)/2#sitk.Cast((new_seg_B50T+new_seg_B800T)/2,sitk.sitkUInt8)
new_seg_1=(sitk.Cast(new_seg_T2,sitk.sitkFloat64)+new_TRACE_seg+sitk.Cast(new_seg_MPE,sitk.sitkFloat64)) #need to threshold this somehow
vis=ImageVisualiser(new_seg_1, cut=get_com(new_seg_1), window=[0,3])
fig=vis.show()
# In[53]:
new_seg_1_1=sitk.BinaryThreshold(new_seg_1, lowerThreshold=2)
vis=ImageVisualiser(new_seg_1_1, cut=get_com(new_seg_1), window=[0,1])
fig=vis.show()
# In[54]:
sitk.WriteImage(new_seg_1_1,"new_seg_010_5_mri.nii.gz") #not good but okay
# In[62]:
#DIR to tp6
WES_010_6_B50T=sitk.ReadImage("/home/alicja/Documents/WES_010/IMAGES/WES_010_6_20190301_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B50T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_5.nii.gz")
WES_010_6_B50T=sitk.Resample(WES_010_6_B50T,WES_010_5_B50T)
image_to_0_rigid, tfm_to_0_rigid = initial_registration(
WES_010_6_B50T,
WES_010_4_B50T,
options={
'shrink_factors': [8,4],
'smooth_sigmas': [0,0],
'sampling_rate': 0.5,
'final_interp': 2,
'metric': 'mean_squares',
'optimiser': 'gradient_descent_line_search',
'number_of_iterations': 25},
reg_method='Rigid')
image_to_0_dir, tfm_to_0_dir = fast_symmetric_forces_demons_registration(
WES_010_6_B50T,
image_to_0_rigid,
resolution_staging=[4,2],
iteration_staging=[10,10]
)
R_breast_to_0_rigid = transform_propagation(
WES_010_6_B50T,
R_breast,
tfm_to_0_rigid,
structure=True
)
R_breast_to_0_dir = apply_field(
R_breast_to_0_rigid,
tfm_to_0_dir,
structure=True
)
# In[63]:
vis = ImageVisualiser(WES_010_6_B50T, axis='z', cut=get_com(R_breast_to_0_dir), window=[-250, 500])
vis.add_contour(R_breast_to_0_dir, name='BREAST', color='g')
fig = vis.show()
# In[64]:
breast_contour_dilate=sitk.BinaryDilate(R_breast_to_0_dir, (2,2,2))
# In[65]:
vis = ImageVisualiser(WES_010_5_B50T, axis='z', cut=get_com(R_breast_to_0_dir), window=[-250, 500])
vis.add_contour(breast_contour_dilate, name='BREAST', color='g')
fig = vis.show()
# In[66]:
masked_R_breast = sitk.Mask(WES_010_6_B50T, breast_contour_dilate)
# In[72]:
values = sitk.GetArrayViewFromImage(masked_R_breast).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(1,600,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[73]:
image_mri=WES_010_6_B50T
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=405, upperthreshold=5000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_010_6_B50T_hist.nii.gz") #is okay
# In[79]:
WES_010_6_B800T=sitk.ReadImage("/home/alicja/Documents/WES_010/IMAGES/WES_010_6_20190301_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B800T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_5.nii.gz")
WES_010_6_B800T=sitk.Resample(WES_010_6_B800T,WES_010_6_B50T)
masked_R_breast = sitk.Mask(WES_010_6_B800T, breast_contour_dilate)
values = sitk.GetArrayViewFromImage(masked_R_breast).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(100,400,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[82]:
image_mri=WES_010_6_B50T
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=330, upperthreshold=5000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_010_6_B800T_hist.nii.gz") #okay but no time
# In[105]:
WES_010_6_T2w=sitk.ReadImage("/home/alicja/Documents/WES_010/IMAGES/WES_010_6_20190301_MR_T2_TSE_TRA_SPAIR_TSE2D1_11_T2_TSE_TRA_SPAIR_3.nii.gz")
WES_010_6_T2w=sitk.Resample(WES_010_6_T2w,WES_010_6_B50T)
masked_R_breast = sitk.Mask(WES_010_6_B800T, breast_contour_dilate)
values = sitk.GetArrayViewFromImage(masked_R_breast).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(1,400,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[109]:
image_mri=WES_010_6_T2w
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
arr_mri[:,:,:177] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
image_mri_masked=sitk.Mask(image_mri_masked, breast_contour_dilate)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=100, upperthreshold=5000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_010_6_T2w_hist.nii.gz")#this one doesnt work
# In[111]:
WES_010_6_MPE=sitk.ReadImage("MPE_sub_WES_010_6.nii.gz")
WES_010_6_MPE=sitk.Resample(WES_010_6_MPE,WES_010_6_B50T)
masked_R_breast = sitk.Mask(WES_010_6_MPE, breast_contour_dilate)
values = sitk.GetArrayViewFromImage(masked_R_breast).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(1,400,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[123]:
image_mri=WES_010_6_MPE
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
arr_mri[:,:,:100] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
image_mri_masked=sitk.Mask(image_mri_masked, breast_contour_dilate)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=85, upperthreshold=5000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_010_6_MPE_hist.nii.gz") #doesnt work
# In[126]:
#add segs tp4
seg_B50T=sitk.ReadImage("test_label_threshold_010_6_B50T_hist.nii.gz")
seg_B800T=sitk.ReadImage("test_label_threshold_010_6_B800T_hist.nii.gz")
seg_B800T=sitk.Resample(seg_B800T,seg_B50T)
new_seg_B50T=sitk.LabelMapToBinary(sitk.Cast(seg_B50T, sitk.sitkLabelUInt8))
new_seg_B800T=sitk.LabelMapToBinary(sitk.Cast(seg_B800T, sitk.sitkLabelUInt8))
new_TRACE_seg=(new_seg_B50T+new_seg_B800T)/2#sitk.Cast((new_seg_B50T+new_seg_B800T)/2,sitk.sitkUInt8)
new_seg_1=(sitk.Cast(new_TRACE_seg,sitk.sitkFloat64)) #need to threshold this somehow
vis=ImageVisualiser(new_seg_1, cut=get_com(new_seg_1), window=[0,3])
fig=vis.show()
# In[127]:
new_seg_1_1=sitk.BinaryThreshold(new_seg_1, lowerThreshold=1)
vis=ImageVisualiser(new_seg_1_1, cut=get_com(new_seg_1), window=[0,1])
fig=vis.show()
# In[128]:
sitk.WriteImage(new_seg_1_1,"new_seg_010_6_mri.nii.gz") #very bad
# In[130]:
image_mri_masked=sitk.Mask(WES_010_6_MPE,new_seg_1_1)
arr_mri_masked=sitk.GetArrayFromImage(image_mri_masked)
arr_mri_masked[arr_mri_masked<120]=0
tum_MPE=sitk.GetImageFromArray(arr_mri_masked)
tum_MPE.CopyInformation(image_mri_masked)
# In[131]:
label_threshold_cc_x_f=estimate_tumour_vol(tum_MPE, lowerthreshold=150, upperthreshold=5000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_010_6_MPE_hist_new.nii.gz") #doesnt work either
# In[2]:
#date order: 29/08, 10/10, 01/03 (next year)
#volumes
img1=sitk.ReadImage("new_seg_010_4_mri.nii.gz")
img2=sitk.ReadImage("new_seg_010_5_mri.nii.gz")
img3=sitk.ReadImage("new_seg_010_6_mri.nii.gz")
arr1=sitk.GetArrayFromImage(img1)
arr2=sitk.GetArrayFromImage(img2)
arr3=sitk.GetArrayFromImage(img3)
vol1=np.sum(arr1==1)
vol2=np.sum(arr2==1)
vol3=np.sum(arr3==1)
# In[3]:
print(vol1, vol2, vol3)
# In[ ]:
| 26.949511
| 180
| 0.794223
|
d41df2117deb9f7bf02e0430fc2af1dd29a16904
| 2,098
|
py
|
Python
|
service/song_service.py
|
janvillarosa/jammer-song-selector
|
aa93c5b1f18b1c8ae82de3bb611db1cf3c3e09fc
|
[
"MIT"
] | null | null | null |
service/song_service.py
|
janvillarosa/jammer-song-selector
|
aa93c5b1f18b1c8ae82de3bb611db1cf3c3e09fc
|
[
"MIT"
] | null | null | null |
service/song_service.py
|
janvillarosa/jammer-song-selector
|
aa93c5b1f18b1c8ae82de3bb611db1cf3c3e09fc
|
[
"MIT"
] | null | null | null |
import os
import spotipy
import random
from spotipy.oauth2 import SpotifyClientCredentials
from service.playlist_seed import PLAYLIST_SEED
CLIENT_ID = "2cc3f44677c74b45a0813be25575c494"
CLIENT_SECRET = os.environ["SPOTIFY_SECRET"]
SP = spotipy.Spotify(auth_manager=SpotifyClientCredentials(client_id=CLIENT_ID,
client_secret=CLIENT_SECRET))
def _get_random_playlist():
return random.choice(list(PLAYLIST_SEED.values()))
def _get_playlist_size(playlist_id):
playlist_data = SP.playlist_items(playlist_id)
size = playlist_data["total"]
return size
def _call_spotify_for_track():
random_playlist_id = _get_random_playlist()
playlist_size = _get_playlist_size(random_playlist_id)
random_offset = random.randrange(1, playlist_size)
return SP.playlist_items(random_playlist_id, fields=None, limit=1, offset=random_offset, market=None, additional_types=('track',))
def _parse_spotify_results(results):
track_obj = {}
for item in results["items"]:
track = item["track"]
album = track["album"]
artists = track["artists"]
track_obj["id"] = track["id"]
track_obj["song"] = track["name"]
track_obj["album"] = album["name"]
track_obj["preview_url"] = None
track_obj["artists"] = []
for artist in artists:
track_obj["artists"].append(artist["name"])
if track["preview_url"]:
track_obj["preview_url"] = track["preview_url"]
album_art_highest_res = 0
for album_art in album["images"]:
#Get the highest res album art available
if album_art["height"] > album_art_highest_res:
track_obj["album_art_url"] = album_art["url"]
album_art_highest_res = album_art["height"]
return track_obj
def get_random_song():
random_song = _parse_spotify_results(_call_spotify_for_track())
while not random_song["preview_url"]:
random_song = _parse_spotify_results(_call_spotify_for_track())
return random_song
| 36.807018
| 134
| 0.677788
|
f2fb3ea34ff9cb3cd67b572a3658f84a8a40c3da
| 1,132
|
py
|
Python
|
core/utils.py
|
hulingfeng211/weixin
|
e3b869ffa2bf54366d561535c84e118db0e9827e
|
[
"Apache-2.0"
] | null | null | null |
core/utils.py
|
hulingfeng211/weixin
|
e3b869ffa2bf54366d561535c84e118db0e9827e
|
[
"Apache-2.0"
] | null | null | null |
core/utils.py
|
hulingfeng211/weixin
|
e3b869ffa2bf54366d561535c84e118db0e9827e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
功能描述:实用工具类包括 httputil
"""
import hashlib
from tornado.gen import coroutine, Return
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
import constant
from core import settings
__author__ = 'george'
@coroutine
def send_request(url, method="GET", data=None, headers=None, **kwargs):
"""发送HTTP请求的封装
:param url 目标资源
:param method HTTP请求方法,默认GET
:param data 需要发送的数据,如果是GET请求,默认忽略该参数
:param headers 请求需要携带的HTTP头部信息
:return 返回请求后的response对象 """
# todo 待实现
if not kwargs:
kwargs = {}
kwargs[constant.PROXY_HOST] = settings[constant.PROXY_SETTINGS][constant.PROXY_HOST]
kwargs[constant.PROXY_PORT] = settings[constant.PROXY_SETTINGS][constant.PROXY_PORT]
else:
kwargs.update(settings[constant.PROXY_SETTINGS])
request = HTTPRequest(url=url,method=method,body=data,headers=headers,**kwargs)
response=yield AsyncHTTPClient().fetch(request)
raise Return(response)
def make_password(password):
"""生成用户的加密后的密码,默认采用md5算法
:param password 明文的密码
:return md5加密后的密文密码"""
return hashlib.md5(password).hexdigest()
| 27.609756
| 92
| 0.723498
|
9d7411dc7ccce761b5d1303bd7844c42380288e6
| 3,556
|
py
|
Python
|
py_algo/graphs_2/competition/monk_and_hops.py
|
Sk0uF/Algorithms
|
236cc5b056ce2637d5d947c5fc1e3367cde886bf
|
[
"MIT"
] | 1
|
2021-07-05T15:39:04.000Z
|
2021-07-05T15:39:04.000Z
|
py_algo/graphs_2/competition/monk_and_hops.py
|
Sk0uF/Algorithms
|
236cc5b056ce2637d5d947c5fc1e3367cde886bf
|
[
"MIT"
] | null | null | null |
py_algo/graphs_2/competition/monk_and_hops.py
|
Sk0uF/Algorithms
|
236cc5b056ce2637d5d947c5fc1e3367cde886bf
|
[
"MIT"
] | 1
|
2021-09-02T21:31:34.000Z
|
2021-09-02T21:31:34.000Z
|
"""
Codemonk link: https://www.hackerearth.com/problem/algorithm/monk-and-hops-357a2ca6/
Monk has a connected graph with N nodes and M weighted undirected edges.He gave a task to you on the same. You need to
travel from node 1 to node N using a path with the minimum cost. If many such paths with minimum cost exists you need
the one with the minimum hops. Path is a sequence of vertices u1, u2, ..., uk such that:
1) All ui are distinct.
2) ui and ui+1 are connected by an edge.
The cost of a path is the sum of weight of the edges. The hops in a path are the number of pairs of adjacent edges in
the path such that the weight of one edge is even and the other is odd.
NOTE: The graph may contain multiple edges between same pair of nodes. It does not contain self loops.
Input - Output:
First line contains the two integers N and M, denoting the number of nodes and the number of edges in the graph.
M lines follow each containing three integers U, V and W, denoting that there exists an edge between nodes U and V with
weight W.
Output, on a single line, two space separated integers, the cost and the number of hops of the path.
Sample input:
5 5
1 2 4
2 4 6
1 3 5
3 4 5
4 5 5
Sample Output:
15 0
"""
"""
We solve the problem by using dijkstra's algorithm with a simple alternation. What we have to do is keep track of the
previous weight of the edge we used to reach a node and more importantly keep track if that weight was even or odd. That
way, we can find the new path weight to that vertex as well as the number of hops. We must not use a visited array
because there is the case in which 2 paths to a vertex have the same weight and hops but the edge we used to visit
them differs which is key to finding the next hops. That makes the complexity worse though.
Final complexity: O(E*logE) => O(E*logV) because E <= (V*V-1)/2
and thus O(E*logE) becomes O(E*logV^2) which is O(2*E*logV)
"""
import heapq
from sys import stdin
def decide(a, b):
if a[0] and b[0]:
return 0
if a[1] and b[1]:
return 0
return 1
def dijkstra(graph):
distances = [float("inf")] * len(graph)
hops = [float("inf")] * len(graph)
distances[0] = 0
hops[0] = 0
priority = []
for vertex, weight in graph[0]:
parent = [0, 0]
parent[weight % 2] = 1
distances[vertex] = weight
hops[vertex] = 0
heapq.heappush(priority, (weight, vertex, parent))
while priority:
path_weight, vertex, parent = heapq.heappop(priority)
for new_vertex, weight in graph[vertex]:
new_parent = [0, 0]
new_parent[weight % 2] = 1
if path_weight + weight < distances[new_vertex]:
distances[new_vertex] = path_weight + weight
hops[new_vertex] = hops[vertex] + decide(parent, new_parent)
heapq.heappush(priority, (distances[new_vertex], new_vertex, new_parent))
elif path_weight + weight == distances[new_vertex]:
hop_value = hops[vertex] + decide(parent, new_parent)
if hop_value <= hops[new_vertex]:
hops[new_vertex] = hop_value
heapq.heappush(priority, (distances[new_vertex], new_vertex, new_parent))
return distances, hops
n, m = map(int, stdin.readline().split())
graph = [[] for _ in range(n)]
for _ in range(m):
u, v, w = map(int, stdin.readline().split())
graph[u-1].append((v-1, w))
graph[v-1].append((u-1, w))
ans1, ans2 = dijkstra(graph)
print(ans1[-1], ans2[-1])
| 37.431579
| 120
| 0.667323
|
73b4076c4e5697c596df2652a9b4bbd06cab8cde
| 3,147
|
py
|
Python
|
setup.py
|
shaklev/Pyrez
|
a9cc5bc0c62fbd56235c06b39683f697fd77e8a8
|
[
"MIT"
] | null | null | null |
setup.py
|
shaklev/Pyrez
|
a9cc5bc0c62fbd56235c06b39683f697fd77e8a8
|
[
"MIT"
] | null | null | null |
setup.py
|
shaklev/Pyrez
|
a9cc5bc0c62fbd56235c06b39683f697fd77e8a8
|
[
"MIT"
] | null | null | null |
import re as Regex
import os
from setuptools import find_packages, setup
import sys
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) # allow setup.py to be run from any path
if sys.version_info [:2] < (3, 4):
raise RuntimeError("Unsupported Python version")
def readFile(filename):
with open(os.path.join(os.path.dirname(__file__), filename), 'r') as file:
return file.read()
def readMe(filename = "README.rst"):
try:
return readFile(filename)
except Exception:
raise RuntimeError("File not found!")
def requeriments(filename = "requirements.txt"):
try:
return readFile(filename).splitlines()
except Exception:
raise RuntimeError("File not found!")
def regexFunc(pattern):
stringFile = readFile("pyrez/__init__.py")
return Regex.search(r'^__{}__\s*=\s*[\'"]([^\'"]*)[\'"]'.format(pattern), stringFile, Regex.MULTILINE).group(1)
VERSION = regexFunc("version")
AUTHOR = regexFunc ("author")
LICENSE = regexFunc("license")
NAME = regexFunc("name")
setup(
author=AUTHOR,
author_email="luis.silva.1044894@sga.pucminas.br",
classifiers=[#https://pypi.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Games/Entertainment",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
],
description="An open-source wrapper for Hi-Rez API (Paladins, Realm Royale, and Smite), written in Python",
download_url="https://pypi.org/project/pyrez/#files",
include_package_data=True,
install_requires=requeriments(),
keywords=["hirez hi-rez smite paladins realmapi open-source api wrapper library python api-wrapper paladins-api smitegame smiteapi realm-api python3 python-3 python-3-6"],
license=LICENSE,
long_description=readMe(), # long_description=open ('README.rst').read () + '\n\n' + open ('HISTORY.rst').read (),
long_description_content_type="text/x-rst",
name=NAME,
packages=find_packages(), # packages=[name] # find_packages (exclude=['docs', 'tests*']),
url="https://github.com/luissilva1044894/PyRez",
version=VERSION,
zip_safe=True,
project_urls={
"Documentation": "http://pyrez.readthedocs.io/en/latest/?badge=latest",
"Source": "https://github.com/luissilva1044894/PyRez",
},
)
if __name__ == "main":
os.system ("python setup.py sdist")
sys.exit()
| 40.346154
| 175
| 0.656816
|
152de7135840076066a497f2c41d5110ab39b5df
| 773
|
py
|
Python
|
setup.py
|
alading241/python-wechaty
|
514c83529b028a3262595b52125406cdce83c80a
|
[
"Apache-2.0"
] | 1
|
2020-03-03T07:46:09.000Z
|
2020-03-03T07:46:09.000Z
|
setup.py
|
alading241/python-wechaty
|
514c83529b028a3262595b52125406cdce83c80a
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
alading241/python-wechaty
|
514c83529b028a3262595b52125406cdce83c80a
|
[
"Apache-2.0"
] | null | null | null |
''' setup '''
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
version = '0.0.0'
with open('VERSION', 'r') as fh:
version = fh.readline()
setuptools.setup(
name='wechaty',
version=version,
author='Huan LI (李卓桓)',
author_email='zixia@zixia.net',
description='Wechaty is a Bot SDK for Wechat Personal Account',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/Chatie/python-wechaty',
packages=setuptools.find_packages('src'),
package_dir={'': 'src'},
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
)
| 26.655172
| 67
| 0.650712
|
6844ad1f37cae99528cbc63c9a014d2e44f64753
| 1,419
|
py
|
Python
|
ui/combobox.py
|
jonaslindemann/compute-course-public
|
b8f55595ebbd790d79b525efdff17b8517154796
|
[
"MIT"
] | 4
|
2021-09-12T12:07:01.000Z
|
2021-09-29T17:38:34.000Z
|
ui/combobox.py
|
jonaslindemann/compute-course-public
|
b8f55595ebbd790d79b525efdff17b8517154796
|
[
"MIT"
] | null | null | null |
ui/combobox.py
|
jonaslindemann/compute-course-public
|
b8f55595ebbd790d79b525efdff17b8517154796
|
[
"MIT"
] | 5
|
2020-10-24T16:02:31.000Z
|
2021-09-28T20:57:46.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 11 09:44:29 2016
@author: lindemann
"""
import sys
from PyQt5.QtWidgets import *
class MyWindow(QWidget):
"""Huvudklass för vårt fönster"""
def __init__(self):
"""Klass constructor"""
super().__init__()
self.init_gui()
def init_gui(self):
# Konfigurera fönster
self.resize(400, 200)
self.move(50, 50)
self.setWindowTitle("MyWindow")
# Skapa combobox-kontroll
self.combo_box = QComboBox(self)
self.combo_box.move(20, 20)
# Lägg till alternativ
self.combo_box.addItem("Alternativ 1")
self.combo_box.addItem("Alternativ 2")
self.combo_box.addItem("Alternativ 3")
self.combo_box.addItem("Alternativ 4")
# Ange standardval
self.combo_box.setCurrentIndex(3)
# Koppla händelsemetod till signal
self.combo_box.currentIndexChanged.connect(self.on_current_index_changed)
def on_current_index_changed(self, index):
"""Hantera signalen currentIndexChanged"""
QMessageBox.information(self, "Meddelande", "Du valde: " + str(index))
QMessageBox.information(self, "Meddelande", "Texten var: " + self.combo_box.currentText())
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MyWindow()
window.show()
sys.exit(app.exec_())
| 22.52381
| 98
| 0.627907
|
ccf84d47fcf9437a5a04bb716ccd69852a7ca849
| 3,092
|
py
|
Python
|
Advance/jwitter/jwitter/jwitter/settings.py
|
AMZEnterprise/Python_Course_Jadi
|
4c1b3512ae0292f897d3ae2aa6449be6a5adb514
|
[
"MIT"
] | null | null | null |
Advance/jwitter/jwitter/jwitter/settings.py
|
AMZEnterprise/Python_Course_Jadi
|
4c1b3512ae0292f897d3ae2aa6449be6a5adb514
|
[
"MIT"
] | null | null | null |
Advance/jwitter/jwitter/jwitter/settings.py
|
AMZEnterprise/Python_Course_Jadi
|
4c1b3512ae0292f897d3ae2aa6449be6a5adb514
|
[
"MIT"
] | null | null | null |
"""
Django settings for jwitter project.
Generated by 'django-admin startproject' using Django 3.0.14.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^!vo^^f#ba(vn-2ggaev_54gzbikx4v6@8ctu$fc&!az3ep_n9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'jwitter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jwitter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| 25.553719
| 91
| 0.696636
|
f634814519811e866bdaf62718e29797392b9a96
| 733
|
py
|
Python
|
gram/forms.py
|
gumato/instagram
|
2f96d0d66e5a621823a3e23aa20ba8cc8d09b26c
|
[
"Unlicense",
"MIT"
] | null | null | null |
gram/forms.py
|
gumato/instagram
|
2f96d0d66e5a621823a3e23aa20ba8cc8d09b26c
|
[
"Unlicense",
"MIT"
] | null | null | null |
gram/forms.py
|
gumato/instagram
|
2f96d0d66e5a621823a3e23aa20ba8cc8d09b26c
|
[
"Unlicense",
"MIT"
] | null | null | null |
from django import forms
from .models import Image,Profile, Comments
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class ImageForm(forms.ModelForm):
class Meta:
model = Image
exclude = ['likes','profile','posted_time','profile']
class CommentForm(forms.ModelForm):
class Meta:
model = Comments
exclude = ['image','user']
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user']
class SignupForm(UserCreationForm):
email = forms.EmailField(max_length=250, help_text='Required')
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
| 29.32
| 66
| 0.671214
|
b41e2e723c36703cb5ab1ca47738f2060af82ceb
| 2,605
|
py
|
Python
|
sciunit/unit_test/base_tests.py
|
lakesare/sciunit
|
7e8635e40d3155a8f8118be6494c67c6c6370899
|
[
"MIT"
] | null | null | null |
sciunit/unit_test/base_tests.py
|
lakesare/sciunit
|
7e8635e40d3155a8f8118be6494c67c6c6370899
|
[
"MIT"
] | null | null | null |
sciunit/unit_test/base_tests.py
|
lakesare/sciunit
|
7e8635e40d3155a8f8118be6494c67c6c6370899
|
[
"MIT"
] | null | null | null |
import unittest
from pathlib import Path
tmp_folder_path = Path(__file__).parent / "delete_after_tests"
class BaseCase(unittest.TestCase):
"""Unit tests for config files"""
@classmethod
def setUpClass(cls):
Path(tmp_folder_path).mkdir(parents=True, exist_ok=True)
@classmethod
def tearDownClass(cls):
import shutil
if tmp_folder_path.exists() and tmp_folder_path.is_dir():
shutil.rmtree(tmp_folder_path)
def test_deep_exclude(self):
from sciunit.base import deep_exclude
test_state = {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}
test_exclude = [("a", "b"), ("c", "d")]
deep_exclude(test_state, test_exclude)
def test_default(self):
# TODO
pass
def test_SciUnit(self):
from sciunit.base import SciUnit
sciunitObj = SciUnit()
self.assertIsInstance(sciunitObj.properties(), dict)
self.assertIsInstance(sciunitObj.__getstate__(), dict)
self.assertIsInstance(sciunitObj.json(), str)
sciunitObj.json(string=False)
self.assertIsInstance(sciunitObj._class, dict)
sciunitObj.testState = "testState"
SciUnit.state_hide.append("testState")
self.assertFalse("testState" in sciunitObj.__getstate__())
def test_Versioned(self):
from git import Repo
from sciunit.base import Versioned
ver = Versioned()
# Testing .get_remote()
# 1. Checking our sciunit .git repo
# (to make sure .get_remote() works with real repos too!)
self.assertEqual("origin", ver.get_remote("I am not a remote").name)
self.assertEqual("origin", ver.get_remote().name)
# 2. Checking NO .git repo
self.assertEqual(None, ver.get_remote(repo=None))
# 3. Checking a .git repo without remotes
git_repo = Repo.init(tmp_folder_path / "git_repo")
self.assertEqual(None, ver.get_remote(repo=git_repo))
# 4. Checking a .git repo with remotes
origin = git_repo.create_remote("origin", "https://origin.com")
beta = git_repo.create_remote('beta', "https://beta.com")
self.assertEqual(origin, ver.get_remote(repo=git_repo))
self.assertEqual(origin, ver.get_remote("not a remote", repo=git_repo))
self.assertEqual(beta, ver.get_remote("beta", repo=git_repo))
# Testing .get_repo()
self.assertIsInstance(ver.get_repo(), Repo)
# Testing .get_remote_url()
self.assertIsInstance(ver.get_remote_url("I am not a remote"), str)
if __name__ == "__main__":
unittest.main()
| 34.733333
| 79
| 0.649136
|
53fa0cec55822e260f0ab264bade1a4b9130c92e
| 1,101
|
py
|
Python
|
ask/qa/models.py
|
takzhanov/stepic-web-tech
|
248b45e61a053e1ef9cb246da0c509fbf0cac182
|
[
"MIT"
] | null | null | null |
ask/qa/models.py
|
takzhanov/stepic-web-tech
|
248b45e61a053e1ef9cb246da0c509fbf0cac182
|
[
"MIT"
] | null | null | null |
ask/qa/models.py
|
takzhanov/stepic-web-tech
|
248b45e61a053e1ef9cb246da0c509fbf0cac182
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Model, Count
from django.utils import timezone
class QuestionManager(models.Manager):
def new(self):
return (self.order_by('-added_at'))
def popular(self):
return (self.annotate(num_likes=Count('likes')).order_by('num_likes'))
class Question(Model):
title = models.CharField(max_length=255)
text = models.TextField()
added_at = models.DateTimeField(default=timezone.now, null=True)
rating = models.IntegerField(default=0)
author = models.ForeignKey(User, related_name='author', null=True)
likes = models.ManyToManyField(User)
objects = QuestionManager()
def get_url(self):
return '/question/' + str(self.id) + '/'
def __str__(self):
return '(id=' + str(self.id) + ', added_at=' + str(self.added_at)
class Answer(Model):
text = models.TextField()
added_at = models.DateTimeField(null=True)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
author = models.ForeignKey(User, null=True)
| 30.583333
| 78
| 0.698456
|
a2e283089641adead43c505697afc7ff6d076c09
| 448
|
py
|
Python
|
chpass/dal/models/base.py
|
bengabay11/chromesy
|
03328f7330c4e119e7f6a5bc2b75575eb47e4e61
|
[
"MIT"
] | null | null | null |
chpass/dal/models/base.py
|
bengabay11/chromesy
|
03328f7330c4e119e7f6a5bc2b75575eb47e4e61
|
[
"MIT"
] | 23
|
2021-08-05T17:38:02.000Z
|
2022-02-14T08:28:15.000Z
|
chpass/dal/models/base.py
|
bengabay11/chromesy
|
03328f7330c4e119e7f6a5bc2b75575eb47e4e61
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from sqlalchemy.ext.declarative import as_declarative, declared_attr
@dataclass
@as_declarative()
class Base(object):
@declared_attr
def __tablename__(cls) -> str:
return cls.__name__.lower()
def json(self) -> dict:
dict_base = self.__dict__.copy()
for key in dict_base.copy():
if key.startswith("_"):
del dict_base[key]
return dict_base
| 23.578947
| 68
| 0.654018
|
60bb5dac384352c9fc9ce9d6baf907b60bc6fb09
| 8,032
|
py
|
Python
|
release/scripts/addons/io_mesh_uv_layout/__init__.py
|
vic3t3chn0/Bforartists
|
7c54a60dd7aa568e20ae7e3778dfef993b61b7b5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2020-01-18T22:13:24.000Z
|
2020-01-18T22:13:24.000Z
|
release/scripts/addons/io_mesh_uv_layout/__init__.py
|
vic3t3chn0/Bforartists
|
7c54a60dd7aa568e20ae7e3778dfef993b61b7b5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/addons/io_mesh_uv_layout/__init__.py
|
vic3t3chn0/Bforartists
|
7c54a60dd7aa568e20ae7e3778dfef993b61b7b5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
bl_info = {
"name": "UV Layout",
"author": "Campbell Barton, Matt Ebb",
"version": (1, 1, 1),
"blender": (2, 80, 0),
"location": "Image-Window > UVs > Export UV Layout",
"description": "Export the UV layout as a 2D graphic",
"warning": "",
"wiki_url": "https://docs.blender.org/manual/en/latest/addons/io_mesh_uv_layout.html",
"support": 'OFFICIAL',
"category": "Import-Export",
}
# @todo write the wiki page
if "bpy" in locals():
import importlib
if "export_uv_eps" in locals():
importlib.reload(export_uv_eps)
if "export_uv_png" in locals():
importlib.reload(export_uv_png)
if "export_uv_svg" in locals():
importlib.reload(export_uv_svg)
import os
import bpy
from bpy.props import (
StringProperty,
BoolProperty,
EnumProperty,
IntVectorProperty,
FloatProperty,
)
class ExportUVLayout(bpy.types.Operator):
"""Export UV layout to file"""
bl_idname = "uv.export_layout"
bl_label = "Export UV Layout"
bl_options = {'REGISTER', 'UNDO'}
filepath: StringProperty(
subtype='FILE_PATH',
)
export_all: BoolProperty(
name="All UVs",
description="Export all UVs in this mesh (not just visible ones)",
default=False,
)
modified: BoolProperty(
name="Modified",
description="Exports UVs from the modified mesh",
default=False,
)
mode: EnumProperty(
items=(
('SVG', "Scalable Vector Graphic (.svg)",
"Export the UV layout to a vector SVG file"),
('EPS', "Encapsulate PostScript (.eps)",
"Export the UV layout to a vector EPS file"),
('PNG', "PNG Image (.png)",
"Export the UV layout to a bitmap image"),
),
name="Format",
description="File format to export the UV layout to",
default='PNG',
)
size: IntVectorProperty(
size=2,
default=(1024, 1024),
min=8, max=32768,
description="Dimensions of the exported file",
)
opacity: FloatProperty(
name="Fill Opacity",
min=0.0, max=1.0,
default=0.25,
description="Set amount of opacity for exported UV layout",
)
@classmethod
def poll(cls, context):
obj = context.active_object
return obj is not None and obj.type == 'MESH' and obj.data.uv_layers
def invoke(self, context, event):
self.size = self.get_image_size(context)
self.filepath = self.get_default_file_name(context) + "." + self.mode.lower()
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def get_default_file_name(self, context):
AMOUNT = 3
objects = list(self.iter_objects_to_export(context))
name = " ".join(sorted([obj.name for obj in objects[:AMOUNT]]))
if len(objects) > AMOUNT:
name += " and more"
return name
def check(self, context):
if any(self.filepath.endswith(ext) for ext in (".png", ".eps", ".svg")):
self.filepath = self.filepath[:-4]
ext = "." + self.mode.lower()
self.filepath = bpy.path.ensure_ext(self.filepath, ext)
return True
def execute(self, context):
obj = context.active_object
is_editmode = (obj.mode == 'EDIT')
if is_editmode:
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
filepath = self.filepath
filepath = bpy.path.ensure_ext(filepath, "." + self.mode.lower())
meshes = list(self.iter_meshes_to_export(context))
polygon_data = list(self.iter_polygon_data_to_draw(context, meshes))
different_colors = set(color for _, color in polygon_data)
if self.modified:
depsgraph = context.evaluated_depsgraph_get()
for obj in self.iter_objects_to_export(context):
obj_eval = obj.evaluated_get(depsgraph)
obj_eval.to_mesh_clear()
export = self.get_exporter()
export(filepath, polygon_data, different_colors, self.size[0], self.size[1], self.opacity)
if is_editmode:
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
return {'FINISHED'}
def iter_meshes_to_export(self, context):
depsgraph = context.evaluated_depsgraph_get()
for obj in self.iter_objects_to_export(context):
if self.modified:
yield obj.evaluated_get(depsgraph).to_mesh()
else:
yield obj.data
@staticmethod
def iter_objects_to_export(context):
for obj in {*context.selected_objects, context.active_object}:
if obj.type != 'MESH':
continue
mesh = obj.data
if mesh.uv_layers.active is None:
continue
yield obj
@staticmethod
def currently_image_image_editor(context):
return isinstance(context.space_data, bpy.types.SpaceImageEditor)
def get_currently_opened_image(self, context):
if not self.currently_image_image_editor(context):
return None
return context.space_data.image
def get_image_size(self, context):
# fallback if not in image context
image_width = self.size[0]
image_height = self.size[1]
# get size of "active" image if some exist
image = self.get_currently_opened_image(context)
if image is not None:
width, height = image.size
if width and height:
image_width = width
image_height = height
return image_width, image_height
def iter_polygon_data_to_draw(self, context, meshes):
for mesh in meshes:
uv_layer = mesh.uv_layers.active.data
for polygon in mesh.polygons:
if self.export_all or polygon.select:
start = polygon.loop_start
end = start + polygon.loop_total
uvs = tuple(tuple(uv.uv) for uv in uv_layer[start:end])
yield (uvs, self.get_polygon_color(mesh, polygon))
@staticmethod
def get_polygon_color(mesh, polygon, default=(0.8, 0.8, 0.8)):
if polygon.material_index < len(mesh.materials):
material = mesh.materials[polygon.material_index]
if material is not None:
return tuple(material.diffuse_color)[:3]
return default
def get_exporter(self):
if self.mode == 'PNG':
from . import export_uv_png
return export_uv_png.export
elif self.mode == 'EPS':
from . import export_uv_eps
return export_uv_eps.export
elif self.mode == 'SVG':
from . import export_uv_svg
return export_uv_svg.export
else:
assert False
def menu_func(self, context):
self.layout.operator(ExportUVLayout.bl_idname, icon = "FILE_TICK")
def register():
bpy.utils.register_class(ExportUVLayout)
bpy.types.IMAGE_MT_uvs.append(menu_func)
def unregister():
bpy.utils.unregister_class(ExportUVLayout)
bpy.types.IMAGE_MT_uvs.remove(menu_func)
if __name__ == "__main__":
register()
| 32.518219
| 98
| 0.623506
|
3ee1f46c666a1cb9a317a6f2687ecbcc1bb7ab92
| 18,605
|
py
|
Python
|
test/test_tokenizer_tok.py
|
vthorsteinsson/Tokenizer
|
19ade7dc63c131e1942fba31159bc8fc771aec3e
|
[
"MIT"
] | 10
|
2018-01-17T19:17:17.000Z
|
2019-02-19T02:25:36.000Z
|
test/test_tokenizer_tok.py
|
vthorsteinsson/Tokenizer
|
19ade7dc63c131e1942fba31159bc8fc771aec3e
|
[
"MIT"
] | 4
|
2018-04-20T08:45:39.000Z
|
2018-11-28T18:13:15.000Z
|
test/test_tokenizer_tok.py
|
vthorsteinsson/Tokenizer
|
19ade7dc63c131e1942fba31159bc8fc771aec3e
|
[
"MIT"
] | 3
|
2018-04-20T08:36:12.000Z
|
2018-11-20T16:31:55.000Z
|
# type: ignore
"""
Tests for Tokenizer module
Copyright (C) 2022 by Miðeind ehf.
This software is licensed under the MIT License:
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import tokenizer
Tok = tokenizer.Tok
TOK = tokenizer.TOK
ACCENT = chr(769)
UMLAUT = chr(776)
SOFT_HYPHEN = chr(173)
ZEROWIDTH_SPACE = chr(8203)
ZEROWIDTH_NBSP = chr(65279)
def test_split_simple() -> None:
t = Tok(TOK.RAW, "boat", None)
l, r = t.split(2)
assert l == Tok(TOK.RAW, "bo", None)
assert r == Tok(TOK.RAW, "at", None)
def test_split_simple_original() -> None:
t = Tok(TOK.RAW, "boat", None, "boat", [0, 1, 2, 3])
l, r = t.split(2)
assert l == Tok(TOK.RAW, "bo", None, "bo", [0, 1])
assert r == Tok(TOK.RAW, "at", None, "at", [0, 1])
def test_split_with_substitutions() -> None:
# original: "a&123b". replace "&123" with "x" and end up with "axb"
t = Tok(TOK.RAW, "axb", None, "a&123b", [0, 1, 5])
l1, r1 = t.split(1)
assert l1 == Tok(TOK.RAW, "a", None, "a", [0])
assert r1 == Tok(TOK.RAW, "xb", None, "&123b", [0, 4])
l2, r2 = t.split(2)
assert l2 == Tok(TOK.RAW, "ax", None, "a&123", [0, 1])
assert r2 == Tok(TOK.RAW, "b", None, "b", [0])
def test_split_with_substitutions_with_whitespace_prefix() -> None:
# original: " a&123b". strip whitespace and replace "&123" with "x" and end up with "axb"
t = Tok(TOK.RAW, "axb", None, " a&123b", [2, 3, 7])
l1, r1 = t.split(1)
assert l1 == Tok(TOK.RAW, "a", None, " a", [2])
assert r1 == Tok(TOK.RAW, "xb", None, "&123b", [0, 4])
l2, r2 = t.split(2)
assert l2 == Tok(TOK.RAW, "ax", None, " a&123", [2, 3])
assert r2 == Tok(TOK.RAW, "b", None, "b", [0])
def test_split_with_whitespace_prefix() -> None:
t = Tok(TOK.RAW, "boat", None, " boat", [3, 4, 5, 6])
l, r = t.split(2)
assert l == Tok(TOK.RAW, "bo", None, " bo", [3, 4])
assert r == Tok(TOK.RAW, "at", None, "at", [0, 1])
def test_split_at_ends() -> None:
t = Tok(TOK.RAW, "ab", None, "ab", [0, 1])
l, r = t.split(0)
assert l == Tok(TOK.RAW, "", None, "", [])
assert r == Tok(TOK.RAW, "ab", None, "ab", [0, 1])
t = Tok(TOK.RAW, "ab", None, "ab", [0, 1])
l, r = t.split(2)
assert l == Tok(TOK.RAW, "ab", None, "ab", [0, 1])
assert r == Tok(TOK.RAW, "", None, "", [])
t = Tok(TOK.RAW, "ab", None)
l, r = t.split(0)
assert l == Tok(TOK.RAW, "", None)
assert r == Tok(TOK.RAW, "ab", None)
t = Tok(TOK.RAW, "ab", None)
l, r = t.split(2)
assert l == Tok(TOK.RAW, "ab", None)
assert r == Tok(TOK.RAW, "", None)
def test_split_with_negative_index() -> None:
test_string = "abcde"
t = Tok(TOK.RAW, test_string, None, test_string, list(range(len(test_string))))
l, r = t.split(-2)
assert l == Tok(TOK.RAW, "abc", None, "abc", [0, 1, 2])
assert r == Tok(TOK.RAW, "de", None, "de", [0, 1])
"""
TODO: Haven't decided what's the correct behavior.
def test_split_on_empty_txt():
t = Tok(TOK.RAW, "", None, "this got removed", [])
l, r = t.split(0)
assert l == Tok(TOK.RAW, "", None, "", [])
assert r == Tok(TOK.RAW, "", None, "this got removed", [])
l, r = t.split(1)
assert l == Tok(TOK.RAW, "", None, "this got removed", [])
assert r == Tok(TOK.RAW, "", None, "", [])
"""
def test_substitute() -> None:
t = Tok(TOK.RAW, "a&123b", None, "a&123b", [0, 1, 2, 3, 4, 5])
t.substitute((1, 5), "x")
assert t == Tok(TOK.RAW, "axb", None, "a&123b", [0, 1, 5])
t = Tok(TOK.RAW, "ab&123", None, "ab&123", [0, 1, 2, 3, 4, 5])
t.substitute((2, 6), "x")
assert t == Tok(TOK.RAW, "abx", None, "ab&123", [0, 1, 2])
t = Tok(TOK.RAW, "&123ab", None, "&123ab", [0, 1, 2, 3, 4, 5])
t.substitute((0, 4), "x")
assert t == Tok(TOK.RAW, "xab", None, "&123ab", [0, 4, 5])
def test_substitute_bugfix_1() -> None:
test_string = "xya" + ACCENT + "zu" + ACCENT + "wáo" + UMLAUT + "b"
# 012 3 45 6 7890123456 7 8
# 0123456789012345
t = Tok(
kind=-1,
txt=test_string,
val=None,
original=test_string,
origin_spans=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
)
t.substitute((2, 4), "á")
assert t == Tok(
kind=-1,
txt="xyázu" + ACCENT + "wáo" + UMLAUT + "b",
val=None,
original=test_string,
origin_spans=[0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
)
t.substitute((4, 6), "ú")
assert t == Tok(
kind=-1,
txt="xyázúwáo" + UMLAUT + "b",
val=None,
original=test_string,
origin_spans=[0, 1, 2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
)
t.substitute((14, 16), "ö")
assert t == Tok(
kind=-1,
txt="xyázúwáöb",
val=None,
original=test_string,
origin_spans=[0, 1, 2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18],
)
# bug was here
t.substitute((6, 14), "á")
assert t == Tok(
kind=-1,
txt="xyázúwáöb",
val=None,
original=test_string,
origin_spans=[0, 1, 2, 4, 5, 7, 8, 16, 18],
)
def test_multiple_substitutions() -> None:
t = Tok(
TOK.RAW,
"a&123b&456&789c",
None,
"a&123b&456&789c",
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
)
t.substitute((1, 5), "x")
assert t == Tok(
TOK.RAW,
"axb&456&789c",
None,
"a&123b&456&789c",
[0, 1, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
)
t.substitute((3, 7), "y")
assert t == Tok(
TOK.RAW, "axby&789c", None, "a&123b&456&789c", [0, 1, 5, 6, 10, 11, 12, 13, 14]
)
t.substitute((4, 8), "z")
assert t == Tok(TOK.RAW, "axbyzc", None, "a&123b&456&789c", [0, 1, 5, 6, 10, 14])
def test_substitute_without_origin_tracking() -> None:
t = Tok(TOK.RAW, "a&123b", None)
t.substitute((1, 5), "x")
assert t == Tok(TOK.RAW, "axb", None)
t = Tok(TOK.RAW, "ab&123", None)
t.substitute((2, 6), "x")
assert t == Tok(TOK.RAW, "abx", None)
t = Tok(TOK.RAW, "&123ab", None)
t.substitute((0, 4), "x")
assert t == Tok(TOK.RAW, "xab", None)
t = Tok(TOK.RAW, "a&123b&456c", None)
t.substitute((1, 5), "x")
assert t == Tok(TOK.RAW, "axb&456c", None)
t.substitute((3, 7), "y")
assert t == Tok(TOK.RAW, "axbyc", None)
def test_substitute_that_removes() -> None:
t = Tok(TOK.RAW, "a&123b", None, "a&123b", [0, 1, 2, 3, 4, 5])
t.substitute((1, 5), "")
assert t == Tok(TOK.RAW, "ab", None, "a&123b", [0, 5])
t = Tok(TOK.RAW, "&123ab", None, "&123ab", [0, 1, 2, 3, 4, 5])
t.substitute((0, 4), "")
assert t == Tok(TOK.RAW, "ab", None, "&123ab", [4, 5])
t = Tok(TOK.RAW, "ab&123", None, "ab&123", [0, 1, 2, 3, 4, 5])
t.substitute((2, 6), "")
assert t == Tok(TOK.RAW, "ab", None, "ab&123", [0, 1])
def test_split_without_origin_tracking() -> None:
t = Tok(TOK.RAW, "boat", None)
l, r = t.split(2)
assert l == Tok(TOK.RAW, "bo", None)
assert r == Tok(TOK.RAW, "at", None)
###
# original: "a&123b". replace "&123" with "x" and end up with "axb"
t = Tok(TOK.RAW, "axb", None)
l1, r1 = t.split(1)
assert l1 == Tok(TOK.RAW, "a", None)
assert r1 == Tok(TOK.RAW, "xb", None)
l2, r2 = t.split(2)
assert l2 == Tok(TOK.RAW, "ax", None)
assert r2 == Tok(TOK.RAW, "b", None)
###
# original: " a&123b". strip whitespace and replace "&123" with "x" and end up with "axb"
t = Tok(TOK.RAW, "axb", None)
l1, r1 = t.split(1)
assert l1 == Tok(TOK.RAW, "a", None)
assert r1 == Tok(TOK.RAW, "xb", None)
l2, r2 = t.split(2)
assert l2 == Tok(TOK.RAW, "ax", None)
assert r2 == Tok(TOK.RAW, "b", None)
###
t = Tok(TOK.RAW, "boat", None)
l, r = t.split(2)
assert l == Tok(TOK.RAW, "bo", None)
assert r == Tok(TOK.RAW, "at", None)
def test_html_escapes_with_origin_tracking() -> None:
test_string = "xyazáwab"
tokens = list(tokenizer.generate_raw_tokens(test_string, replace_html_escapes=True))
assert len(tokens) == 1
assert tokens[0] == Tok(
kind=TOK.RAW,
txt="xyazáwab",
val=None,
original=test_string,
origin_spans=[0, 1, 2, 8, 9, 17, 18, 23],
)
# Note the space after
test_string = "Ég fór út."
# Here we show in comments when a new token starts in the string with "|" (inclusive).
# | | |
# We also show the character indices for each token.
# 0101234567890123
# And where the 'txt' is.
# ^^ ^^^ ^^^
tokens = list(tokenizer.generate_raw_tokens(test_string, replace_html_escapes=True))
assert len(tokens) == 3
assert tokens == [
Tok(kind=TOK.RAW, txt="Ég", val=None, original="Ég", origin_spans=[0, 1]),
Tok(
kind=TOK.RAW,
txt="fór",
val=None,
original=" fór",
origin_spans=[7, 8, 9],
),
Tok(kind=TOK.RAW, txt="út.", val=None, original=" út.", origin_spans=[1, 2, 3]),
]
test_string = "Ég fór út."
# | | |
# 010123456789012340123
# ^^ ^^^ ^^^
tokens = list(tokenizer.generate_raw_tokens(test_string, replace_html_escapes=True))
assert len(tokens) == 3
assert tokens == [
Tok(kind=TOK.RAW, txt="Ég", val=None, original="Ég", origin_spans=[0, 1]),
Tok(
kind=TOK.RAW,
txt="fór",
val=None,
original=" fór",
origin_spans=[12, 13, 14],
),
Tok(kind=TOK.RAW, txt="út.", val=None, original=" út.", origin_spans=[1, 2, 3]),
]
test_string = "Ég fór út."
# | | |
# 01012345678012345678
# ^^ ^^^ ^^^
tokens = list(tokenizer.generate_raw_tokens(test_string, replace_html_escapes=True))
assert len(tokens) == 3
assert tokens == [
Tok(kind=TOK.RAW, txt="Ég", val=None, original="Ég", origin_spans=[0, 1]),
Tok(
kind=TOK.RAW,
txt="fór",
val=None,
original=" fór",
origin_spans=[6, 7, 8],
),
Tok(
kind=TOK.RAW,
txt="út.",
val=None,
original=" út.",
origin_spans=[6, 7, 8],
),
]
test_string = "Ég fór út. "
# | | |
# 0101230123012345
# ^^ ^^^ ^^^
tokens = list(tokenizer.generate_raw_tokens(test_string, replace_html_escapes=True))
assert len(tokens) == 4
assert tokens == [
Tok(kind=TOK.RAW, txt="Ég", val=None, original="Ég", origin_spans=[0, 1]),
Tok(kind=TOK.RAW, txt="fór", val=None, original=" fór", origin_spans=[1, 2, 3]),
Tok(kind=TOK.RAW, txt="út.", val=None, original=" út.", origin_spans=[1, 2, 3]),
Tok(kind=TOK.S_SPLIT, txt="", val=None, original=" ", origin_spans=[]),
]
test_string = " Ég fór út."
# | | |
# 0123456701230123
# ^^ ^^^ ^^^
tokens = list(tokenizer.generate_raw_tokens(test_string, replace_html_escapes=True))
assert len(tokens) == 3
assert tokens == [
Tok(kind=TOK.RAW, txt="Ég", val=None, original=" Ég", origin_spans=[6, 7]),
Tok(kind=TOK.RAW, txt="fór", val=None, original=" fór", origin_spans=[1, 2, 3]),
Tok(kind=TOK.RAW, txt="út.", val=None, original=" út.", origin_spans=[1, 2, 3]),
]
def test_unicode_escapes_with_origin_tracking() -> None:
test_string = "xya" + ACCENT + "zu" + ACCENT + "wo" + UMLAUT + "b"
tokens = list(
tokenizer.generate_raw_tokens(test_string, replace_composite_glyphs=True)
)
assert len(tokens) == 1
assert tokens[0] == Tok(
kind=TOK.RAW,
txt="xyázúwöb",
val=None,
original=test_string,
origin_spans=[0, 1, 2, 4, 5, 7, 8, 10],
)
test_string = (
"þetta" + ZEROWIDTH_SPACE + "er" + ZEROWIDTH_NBSP + "eitt" + SOFT_HYPHEN + "orð"
)
tokens = list(
tokenizer.generate_raw_tokens(test_string, replace_composite_glyphs=True)
)
assert len(tokens) == 1
assert tokens[0] == Tok(
kind=TOK.RAW,
txt="þettaereittorð",
val=None,
original=test_string,
origin_spans=[0, 1, 2, 3, 4, 6, 7, 9, 10, 11, 12, 14, 15, 16],
)
def test_unicode_escapes_that_are_removed() -> None:
test_string = "a\xadb\xadc"
tokens = list(
tokenizer.generate_raw_tokens(test_string, replace_composite_glyphs=True)
)
assert len(tokens) == 1
assert tokens[0] == Tok(
kind=TOK.RAW, txt="abc", val=None, original=test_string, origin_spans=[0, 2, 4]
)
def test_html_unicode_mix() -> None:
test_string = "xya" + ACCENT + "zu" + ACCENT + "wáo" + UMLAUT + "b"
# 012 3 45 6 7890123456 7 8
tokens = list(
tokenizer.generate_raw_tokens(
test_string, replace_composite_glyphs=True, replace_html_escapes=True
)
)
assert len(tokens) == 1
assert tokens[0] == Tok(
kind=TOK.RAW,
txt="xyázúwáöb",
val=None,
original=test_string,
origin_spans=[0, 1, 2, 4, 5, 7, 8, 16, 18],
)
def test_tok_concatenation() -> None:
str1 = "asdf"
tok1 = Tok(TOK.RAW, str1, None, str1, list(range(len(str1))))
str2 = "jklæ"
tok2 = Tok(TOK.RAW, str2, None, str2, list(range(len(str1))))
assert tok1.concatenate(tok2) == Tok(
TOK.RAW, str1 + str2, None, str1 + str2, list(range(len(str1 + str2)))
)
str1 = "abc"
or1 = "&123&456&789"
str2 = "xyz"
or2 = "&xx&yy&zz"
tok1 = Tok(TOK.RAW, str1, None, or1, [0, 4, 8])
tok2 = Tok(TOK.RAW, str2, None, or2, [0, 2, 4])
assert tok1.concatenate(tok2) == Tok(
TOK.RAW, str1 + str2, None, or1 + or2, [0, 4, 8, 12, 14, 16]
)
def test_tok_concatenation_with_separator() -> None:
str1 = "asdf"
tok1 = Tok(TOK.RAW, str1, None, str1, list(range(len(str1))))
str2 = "jklæ"
tok2 = Tok(TOK.RAW, str2, None, str2, list(range(len(str1))))
sep = "WOLOLO"
assert tok1.concatenate(tok2, separator=sep) == Tok(
TOK.RAW,
str1 + sep + str2,
None,
str1 + str2,
[0, 1, 2, 3, 4, 4, 4, 4, 4, 4, 4, 5, 6, 7],
)
str1 = "abc"
or1 = "&123&456&789"
str2 = "xyz"
or2 = "&xx&yy&zz"
tok1 = Tok(TOK.RAW, str1, None, or1, [0, 4, 8])
tok2 = Tok(TOK.RAW, str2, None, or2, [0, 2, 4])
sep = "WOLOLO"
assert tok1.concatenate(tok2, separator=sep) == Tok(
TOK.RAW,
str1 + sep + str2,
None,
or1 + or2,
[0, 4, 8, 12, 12, 12, 12, 12, 12, 12, 14, 16],
)
def test_tok_substitute_all() -> None:
s = "asdf"
t = Tok(TOK.RAW, s, None, s, list(range(len(s))))
t.substitute_all("d", "x")
assert t == Tok(TOK.RAW, "asxf", None, s, [0, 1, 2, 3])
s = "Þetta er lengri strengur."
t = Tok(TOK.RAW, s, None, s, list(range(len(s))))
t.substitute_all("e", "x")
assert t == Tok(TOK.RAW, "Þxtta xr lxngri strxngur.", None, s, list(range(len(s))))
s = "asdf"
t = Tok(TOK.RAW, s, None, s, list(range(len(s))))
t.substitute_all("d", "")
assert t == Tok(TOK.RAW, "asf", None, s, [0, 1, 3])
s = "Þessi verður lengri."
# 01234567890123456789
t = Tok(TOK.RAW, s, None, s, list(range(len(s))))
t.substitute_all("r", "")
assert t == Tok(
TOK.RAW,
"Þessi veðu lengi.",
None,
s,
[0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 12, 13, 14, 15, 16, 18, 19],
)
def test_tok_substitute_longer() -> None:
s = "asdf"
t = Tok(TOK.RAW, s, None, s, list(range(len(s))))
t.substitute_longer((1, 2), "xyz")
assert t == Tok(TOK.RAW, "axyzdf", None, s, [0, 2, 2, 2, 2, 3])
s = "asdf"
t = Tok(TOK.RAW, s, None, s, list(range(len(s))))
t.substitute_longer((3, 4), "xyz")
assert t == Tok(TOK.RAW, "asdxyz", None, s, [0, 1, 2, 4, 4, 4])
s = "asdf"
t = Tok(TOK.RAW, s, None, s, list(range(len(s))))
t.substitute_longer((0, 1), "xyz")
assert t == Tok(TOK.RAW, "xyzsdf", None, s, [1, 1, 1, 1, 2, 3])
def test_tok_from_txt() -> None:
s = "asdf"
t = Tok.from_txt(s)
assert t == Tok(TOK.RAW, s, None, s, list(range(len(s))))
s = " asdf"
t = Tok.from_txt(s)
assert t == Tok(TOK.RAW, s, None, s, list(range(len(s))))
s = "asdf "
t = Tok.from_txt(s)
assert t == Tok(TOK.RAW, s, None, s, list(range(len(s))))
s = " asdf "
t = Tok.from_txt(s)
assert t == Tok(TOK.RAW, s, None, s, list(range(len(s))))
s = "Tok getur alveg verið heil setning."
t = Tok.from_txt(s)
assert t == Tok(TOK.RAW, s, None, s, list(range(len(s))))
s = "HTML & er líka óbreytt"
t = Tok.from_txt(s)
assert t == Tok(TOK.RAW, s, None, s, list(range(len(s))))
s = (
"unicode"
+ ZEROWIDTH_SPACE
+ "er"
+ ZEROWIDTH_NBSP
+ "líka"
+ SOFT_HYPHEN
+ "óbreytt"
)
t = Tok.from_txt(s)
assert t == Tok(TOK.RAW, s, None, s, list(range(len(s))))
| 31.587436
| 94
| 0.532599
|
70cae66f086460608bebb6ff5372734787d5cd56
| 56,125
|
py
|
Python
|
tests/tape/test_qnode_old.py
|
QDaria/pennylane
|
5a28983fc7bd950cde8a4014e54261fef4b54293
|
[
"Apache-2.0"
] | null | null | null |
tests/tape/test_qnode_old.py
|
QDaria/pennylane
|
5a28983fc7bd950cde8a4014e54261fef4b54293
|
[
"Apache-2.0"
] | null | null | null |
tests/tape/test_qnode_old.py
|
QDaria/pennylane
|
5a28983fc7bd950cde8a4014e54261fef4b54293
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the QNode"""
import pytest
import numpy as np
from collections import defaultdict
import pennylane as qml
from pennylane import numpy as pnp
from pennylane import QNodeCollection
from pennylane.qnode_old import qnode, QNode
from pennylane.transforms import draw
from pennylane.tape import JacobianTape, QubitParamShiftTape, CVParamShiftTape
def dummyfunc():
return None
class TestValidation:
"""Tests for QNode creation and validation"""
def test_invalid_interface(self):
"""Test that an exception is raised for an invalid interface"""
dev = qml.device("default.qubit", wires=1)
test_interface = "something"
expected_error = (
rf"Unknown interface {test_interface}\. Interface must be "
r"one of \['autograd', 'torch', 'tf', 'jax'\]\."
)
with pytest.raises(qml.QuantumFunctionError, match=expected_error):
QNode(dummyfunc, dev, interface="something")
def test_invalid_device(self):
"""Test that an exception is raised for an invalid device"""
with pytest.raises(qml.QuantumFunctionError, match="Invalid device"):
QNode(dummyfunc, None)
def test_validate_device_method(self, monkeypatch):
"""Test that the method for validating the device diff method
tape works as expected"""
dev = qml.device("default.qubit", wires=1)
with pytest.raises(
qml.QuantumFunctionError,
match="does not provide a native method for computing the jacobian",
):
QNode._validate_device_method(dev, None)
monkeypatch.setitem(dev._capabilities, "provides_jacobian", True)
tape_class, interface, device, diff_options = QNode._validate_device_method(
dev, "interface"
)
method = diff_options["method"]
assert tape_class is JacobianTape
assert method == "device"
assert interface == "interface"
assert device is dev
@pytest.mark.parametrize("interface", ("autograd", "torch", "tensorflow", "jax"))
def test_validate_backprop_method_finite_shots(self, interface):
"""Tests that an error is raised for backpropagation with finite shots."""
dev = qml.device("default.qubit", wires=1, shots=3)
with pytest.raises(qml.QuantumFunctionError, match="Devices with finite shots"):
QNode._validate_backprop_method(dev, interface)
def test_validate_backprop_method_invalid_device(self):
"""Test that the method for validating the backprop diff method
tape raises an exception if the device does not support backprop."""
dev = qml.device("default.gaussian", wires=1)
with pytest.raises(qml.QuantumFunctionError, match="does not support native computations"):
QNode._validate_backprop_method(dev, None)
def test_validate_backprop_method_invalid_interface(self, monkeypatch):
"""Test that the method for validating the backprop diff method
tape raises an exception if the wrong interface is provided"""
dev = qml.device("default.qubit", wires=1)
test_interface = "something"
monkeypatch.setitem(dev._capabilities, "passthru_interface", test_interface)
with pytest.raises(qml.QuantumFunctionError, match=f"when using the {test_interface}"):
QNode._validate_backprop_method(dev, None)
def test_validate_backprop_method(self, monkeypatch):
"""Test that the method for validating the backprop diff method
tape works as expected"""
dev = qml.device("default.qubit", wires=1)
test_interface = "something"
monkeypatch.setitem(dev._capabilities, "passthru_interface", test_interface)
tape_class, interface, device, diff_options = QNode._validate_backprop_method(
dev, test_interface
)
method = diff_options["method"]
assert tape_class is JacobianTape
assert method == "backprop"
assert interface == "something"
assert device is dev
def test_validate_backprop_child_method(self, monkeypatch):
"""Test that the method for validating the backprop diff method
tape works as expected if a child device supports backprop"""
dev = qml.device("default.qubit", wires=1)
test_interface = "something"
orig_capabilities = dev.capabilities().copy()
orig_capabilities["passthru_devices"] = {test_interface: "default.gaussian"}
monkeypatch.setattr(dev, "capabilities", lambda: orig_capabilities)
tape_class, interface, device, diff_options = QNode._validate_backprop_method(
dev, test_interface
)
method = diff_options["method"]
assert tape_class is JacobianTape
assert method == "backprop"
assert interface == "something"
assert isinstance(device, qml.devices.DefaultGaussian)
def test_validate_backprop_child_method_wrong_interface(self, monkeypatch):
"""Test that the method for validating the backprop diff method
tape raises an error if a child device supports backprop but using a different interface"""
dev = qml.device("default.qubit", wires=1)
test_interface = "something"
orig_capabilities = dev.capabilities().copy()
orig_capabilities["passthru_devices"] = {test_interface: "default.gaussian"}
monkeypatch.setattr(dev, "capabilities", lambda: orig_capabilities)
with pytest.raises(
qml.QuantumFunctionError, match=r"when using the \['something'\] interface"
):
QNode._validate_backprop_method(dev, "another_interface")
def test_parameter_shift_tape_qubit_device(self):
"""Test that the get_parameter_shift_method method correctly and
returns the correct tape for qubit devices."""
dev = qml.device("default.qubit", wires=1)
tape_class = QNode._get_parameter_shift_tape(dev)
assert tape_class is QubitParamShiftTape
def test_parameter_shift_tape_cv_device(self):
"""Test that the get_parameter_shift_method method correctly and
returns the correct tape for qubit devices."""
dev = qml.device("default.gaussian", wires=1)
tape_class = QNode._get_parameter_shift_tape(dev)
assert tape_class is CVParamShiftTape
def test_parameter_shift_tape_unknown_model(self, monkeypatch):
"""test that an unknown model raises an exception"""
def capabilities(cls):
capabilities = cls._capabilities
capabilities.update(model="None")
return capabilities
monkeypatch.setattr(qml.devices.DefaultQubit, "capabilities", capabilities)
dev = qml.device("default.qubit", wires=1)
with pytest.raises(
qml.QuantumFunctionError, match="does not support the parameter-shift rule"
):
QNode._get_parameter_shift_tape(dev)
def test_best_method(self, monkeypatch):
"""Test that the method for determining the best diff method
for a given device and interface works correctly"""
dev = qml.device("default.qubit", wires=1)
monkeypatch.setitem(dev._capabilities, "passthru_interface", "some_interface")
monkeypatch.setitem(dev._capabilities, "provides_jacobian", True)
# device is top priority
res = QNode.get_best_method(dev, "another_interface")
assert res == (JacobianTape, "another_interface", dev, {"method": "device"})
# backprop is next priority
monkeypatch.setitem(dev._capabilities, "provides_jacobian", False)
res = QNode.get_best_method(dev, "some_interface")
assert res == (JacobianTape, "some_interface", dev, {"method": "backprop"})
# The next fallback is parameter-shift.
res = QNode.get_best_method(dev, "another_interface")
assert res == (QubitParamShiftTape, "another_interface", dev, {"method": "best"})
# finally, if both fail, finite differences is the fallback
def capabilities(cls):
capabilities = cls._capabilities
capabilities.update(model="None")
return capabilities
monkeypatch.setattr(qml.devices.DefaultQubit, "capabilities", capabilities)
res = QNode.get_best_method(dev, "another_interface")
assert res == (JacobianTape, "another_interface", dev, {"method": "numeric"})
def test_diff_method(self, mocker):
"""Test that a user-supplied diff-method correctly returns the right
quantum tape, interface, and diff method."""
dev = qml.device("default.qubit", wires=1)
mock_best = mocker.patch("pennylane.qnode_old.QNode.get_best_method")
mock_best.return_value = 1, 2, 3, {"method": "best"}
mock_backprop = mocker.patch("pennylane.qnode_old.QNode._validate_backprop_method")
mock_backprop.return_value = 4, 5, 6, {"method": "backprop"}
mock_device = mocker.patch("pennylane.qnode_old.QNode._validate_device_method")
mock_device.return_value = 7, 8, 9, {"method": "device"}
qn = QNode(dummyfunc, dev, diff_method="best")
assert qn._tape == mock_best.return_value[0]
assert qn.interface == mock_best.return_value[1]
assert qn.diff_options["method"] == mock_best.return_value[3]["method"]
qn = QNode(dummyfunc, dev, diff_method="backprop")
assert qn._tape == mock_backprop.return_value[0]
assert qn.interface == mock_backprop.return_value[1]
assert qn.diff_options["method"] == mock_backprop.return_value[3]["method"]
mock_backprop.assert_called_once()
qn = QNode(dummyfunc, dev, diff_method="device")
assert qn._tape == mock_device.return_value[0]
assert qn.interface == mock_device.return_value[1]
assert qn.diff_options["method"] == mock_device.return_value[3]["method"]
mock_device.assert_called_once()
qn = QNode(dummyfunc, dev, diff_method="finite-diff")
assert qn._tape == JacobianTape
assert qn.diff_options["method"] == "numeric"
qn = QNode(dummyfunc, dev, diff_method="parameter-shift")
assert qn._tape == QubitParamShiftTape
assert qn.diff_options["method"] == "analytic"
# check that get_best_method was only ever called once
mock_best.assert_called_once()
def test_unknown_diff_method(self):
"""Test that an exception is raised for an unknown differentiation method"""
dev = qml.device("default.qubit", wires=1)
with pytest.raises(
qml.QuantumFunctionError, match="Differentiation method hello not recognized"
):
QNode(dummyfunc, dev, diff_method="hello")
def test_validate_adjoint_invalid_device(self):
"""Test if a ValueError is raised when an invalid device is provided to
_validate_adjoint_method"""
dev = qml.device("default.gaussian", wires=1)
with pytest.raises(ValueError, match="The default.gaussian device does not"):
QNode._validate_adjoint_method(dev, "tf")
def test_validate_adjoint_finite_shots(self):
"""Test that a UserWarning is raised when device has finite shots"""
dev = qml.device("default.qubit", wires=1, shots=1)
with pytest.warns(
UserWarning, match="Requested adjoint differentiation to be computed with finite shots."
):
QNode._validate_adjoint_method(dev, "autograd")
def test_adjoint_finite_shots(self):
"""Tests that UserWarning is raised with the adjoint differentiation method
on QNode construction when the device has finite shots
"""
dev = qml.device("default.qubit", wires=1, shots=1)
with pytest.warns(
UserWarning, match="Requested adjoint differentiation to be computed with finite shots."
):
@qml.qnode_old.qnode(dev, diff_method="adjoint")
def circ():
return qml.expval(qml.PauliZ(0))
def test_validate_reversible_finite_shots(self):
"""Test that a UserWarning is raised when validating the reversible differentiation method
and using a device that has finite shots
"""
dev = qml.device("default.qubit", wires=1, shots=1)
with pytest.warns(
UserWarning,
match="Requested reversible differentiation to be computed with finite shots.",
):
QNode._validate_reversible_method(dev, "autograd")
def test_reversible_finite_shots(self):
"""Tests that UserWarning is raised with the reversible differentiation method
on QNode construction when the device has finite shots
"""
dev = qml.device("default.qubit", wires=1, shots=1)
with pytest.warns(
UserWarning,
match="Requested reversible differentiation to be computed with finite shots.",
):
@qml.qnode_old.qnode(dev, diff_method="reversible")
def circ():
return qml.expval(qml.PauliZ(0))
def test_qnode_print(self):
"""Test that printing a QNode object yields the right information."""
dev = qml.device("default.qubit", wires=1)
def func(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
qn = qml.qnode_old.QNode(func, dev, diff_method="finite-diff")
assert (
qn.__repr__()
== "<QNode: wires=1, device='default.qubit', interface='autograd', diff_method='finite-diff'>"
)
assert qn.diff_method_change == False
def test_qnode_best_diff_method_backprop(self):
"""Test that selected "best" diff_method is correctly set to 'backprop'."""
dev = qml.device("default.qubit", wires=1)
def func(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
qn = qml.qnode_old.QNode(func, dev)
assert qn.diff_method == "backprop"
assert qn.diff_method_change
def test_qnode_best_diff_method_parameter_shift(self):
"""Test that selected "best" diff_method is correctly set to 'parameter-shift'."""
dev = qml.device("default.mixed", wires=1)
def func(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
qn = qml.qnode_old.QNode(func, dev)
assert qn.diff_method == "parameter-shift"
assert qn.diff_method_change
def test_qnode_best_diff_method_device(self, monkeypatch):
"""Test that selected "best" diff_method is correctly set to 'device'."""
dev = qml.device("default.qubit", wires=1)
def func(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
# Force the "best" method to be "device"
monkeypatch.setitem(dev._capabilities, "passthru_interface", "some_interface")
monkeypatch.setitem(dev._capabilities, "provides_jacobian", True)
qn = qml.qnode_old.QNode(func, dev)
assert qn.diff_method == "device"
assert qn.diff_method_change
def test_qnode_best_diff_method_finite_diff(self, monkeypatch):
"""Test that selected "best" diff_method is correctly set to 'finite-diff'."""
dev = qml.device("default.qubit", wires=1)
def func(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
def capabilities(cls):
capabilities = cls._capabilities
capabilities.update(model="None")
return capabilities
# Force the "best" method to be "finite-diff"
monkeypatch.setitem(dev._capabilities, "provides_jacobian", False)
monkeypatch.setattr(qml.devices.DefaultQubit, "capabilities", capabilities)
qn = qml.qnode_old.QNode(func, dev)
assert qn.diff_method == "finite-diff"
assert qn.diff_method_change
def test_qnode_best_diff_method_finite_fallback(self):
"""Test that selected "best" diff_method is correctly set to 'finite-diff'
in cases where other methods are not available."""
# Custom operation which has grad_method="finite_diff"
class MyRX(qml.operation.Operation):
num_wires = 1
is_composable_rotation = True
basis = "X"
grad_method = "F"
@classmethod
def _matrix(cls, *params):
return qml.RX._matrix(*params)
dev = qml.device("default.mixed", wires=3, shots=None)
dev.operations.add("MyRX")
def circuit(x):
MyRX(x, wires=1)
return qml.expval(qml.PauliZ(1))
qnode = qml.qnode_old.QNode(circuit, dev, diff_method="best")
# Before execution correctly show 'parameter-shift'
assert qnode.diff_method == "parameter-shift"
par = qml.numpy.array(0.3)
qml.grad(qnode)(par)
# After execution correctly show 'finite-diff'
assert qnode.diff_method == "finite-diff"
@pytest.mark.parametrize(
"method",
[
"best",
"parameter-shift",
"finite-diff",
"reversible",
"adjoint",
"backprop",
],
)
def test_to_tf(self, method, mocker):
"""Test if interface change is working"""
tf = pytest.importorskip("tensorflow")
dev = qml.device("default.qubit", wires=1)
def func(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
# Test if interface change works with different diff_methods
qn = qml.qnode_old.QNode(func, dev, interface="autograd", diff_method=method)
spy = mocker.spy(qn, "_get_best_diff_method")
qn.to_tf()
if method == "best":
spy.assert_called_once()
@pytest.mark.parametrize(
"method",
[
"best",
"parameter-shift",
"finite-diff",
"reversible",
"adjoint",
"backprop",
],
)
def test_to_autograd(self, method, mocker):
"""Test if interface change is working"""
dev = qml.device("default.qubit", wires=1)
tf = pytest.importorskip("tensorflow")
def func(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
# Test if interface change works with different diff_methods
qn = qml.qnode_old.QNode(func, dev, interface="tf", diff_method=method)
spy = mocker.spy(qn, "_get_best_diff_method")
qn.to_autograd()
if method == "best":
spy.assert_called_once()
@pytest.mark.parametrize(
"method",
[
"best",
"parameter-shift",
"finite-diff",
"reversible",
"adjoint",
"backprop",
],
)
def test_to_torch(self, method, mocker):
"""Test if interface change is working"""
dev = qml.device("default.qubit", wires=1)
torch = pytest.importorskip("torch")
def func(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
# Test if interface change works with different diff_methods
qn = qml.qnode_old.QNode(func, dev, interface="autograd", diff_method=method)
spy = mocker.spy(qn, "_get_best_diff_method")
qn.to_torch()
if method == "best":
spy.assert_called_once()
@pytest.mark.parametrize(
"method",
[
"best",
"parameter-shift",
"finite-diff",
"reversible",
"adjoint",
"backprop",
],
)
def test_to_jax(self, method, mocker):
"""Test if interface change is working"""
dev = qml.device("default.qubit", wires=1)
jax = pytest.importorskip("jax")
def func(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
# Test if interface change works with different diff_methods
qn = qml.qnode_old.QNode(func, dev, interface="autograd", diff_method=method)
spy = mocker.spy(qn, "_get_best_diff_method")
qn.to_jax()
if method == "best":
spy.assert_called_once()
@pytest.mark.parametrize(
"par", [None, 1, 1.1, np.array(1.2), pnp.array(1.3, requires_grad=True)]
)
def test_diff_method_none(self, par):
"""Test if diff_method=None works as intended."""
dev = qml.device("default.qubit", wires=1)
def func(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
qn = qml.qnode_old.QNode(func, dev, diff_method=None)
assert qn.interface is None
grad = qml.grad(qn)
# Raise error in cases 1 and 5, as non-trainable parameters do not trigger differentiation
# Case 1: No input
# Case 2: int input
# Case 3: float input
# Case 4: numpy input
# Case 5: differentiable tensor input
if par is None or hasattr(par, "requires_grad"):
with pytest.raises(TypeError) as exp:
grad() if par is None else grad(par)
else:
grad(par)
def test_diff_method_none_no_qnode_param(self):
"""Test if diff_method=None works as intended."""
dev = qml.device("default.qubit", wires=1)
def func():
qml.PauliX(wires=0)
return qml.expval(qml.PauliZ(0))
qn = qml.qnode_old.QNode(func, dev, diff_method=None)
assert qn.interface is None
grad = qml.grad(qn)
# No differentiation required. No error raised.
grad()
def test_unrecognized_keyword_arguments_validation(self):
"""Tests that a UserWarning is raised when unrecognized keyword arguments are provided."""
# use two unrecognized methods, to confirm that multiple warnings are raised
unrecognized_one = "test_method_one"
unrecognized_two = "test_method_two"
warning_text = (
" is unrecognized, and will not be included in your computation. "
"Please review the QNode class or qnode decorator for the list of available "
"keyword variables."
)
expected_warnings = {
(UserWarning, f"'{unrecognized_one}'{warning_text}"),
(UserWarning, f"'{unrecognized_two}'{warning_text}"),
}
dev = qml.device("default.qubit", wires=1, shots=1)
with pytest.warns(UserWarning) as warning_list:
QNode(dummyfunc, dev, test_method_one=1, test_method_two=2)
warnings = {(warning.category, warning.message.args[0]) for warning in warning_list}
assert warnings == expected_warnings
def test_unrecognized_keyword_arguments_validation_decorator(self):
"""Tests that a UserWarning is raised when unrecognized keyword arguments are provided."""
# use two unrecognized methods, to confirm that multiple warnings are raised
unrecognized_one = "test_method_one"
unrecognized_two = "test_method_two"
warning_text = (
" is unrecognized, and will not be included in your computation. "
"Please review the QNode class or qnode decorator for the list of available "
"keyword variables."
)
expected_warnings = {
(UserWarning, f"'{unrecognized_one}'{warning_text}"),
(UserWarning, f"'{unrecognized_two}'{warning_text}"),
}
dev = qml.device("default.qubit", wires=1, shots=1)
with pytest.warns(UserWarning) as warning_list:
@qml.qnode_old.qnode(dev, test_method_one=1, test_method_two=2)
def circ():
return qml.expval(qml.PauliZ(0))
warnings = {(warning.category, warning.message.args[0]) for warning in warning_list}
assert warnings == expected_warnings
class TestTapeConstruction:
"""Tests for the tape construction"""
def test_basic_tape_construction(self, tol):
"""Test that a quantum tape is properly constructed"""
dev = qml.device("default.qubit", wires=2)
def func(x, y):
qml.RX(x, wires=0)
qml.RY(y, wires=1)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
qn = QNode(func, dev)
x = 0.12
y = 0.54
res = qn(x, y)
assert isinstance(qn.qtape, JacobianTape)
assert len(qn.qtape.operations) == 3
assert len(qn.qtape.observables) == 1
assert qn.qtape.num_params == 2
expected = qn.qtape.execute(dev)
assert np.allclose(res, expected, atol=tol, rtol=0)
# when called, a new quantum tape is constructed
old_tape = qn.qtape
res2 = qn(x, y)
assert np.allclose(res, res2, atol=tol, rtol=0)
assert qn.qtape is not old_tape
def test_jacobian(self, tol):
"""Test the jacobian computation"""
dev = qml.device("default.qubit", wires=2)
def func(x, y):
qml.RX(x, wires=0)
qml.RY(y, wires=1)
qml.CNOT(wires=[0, 1])
return qml.probs(wires=0), qml.probs(wires=1)
qn = QNode(func, dev, h=1e-8, order=2)
assert qn.diff_options["h"] == 1e-8
assert qn.diff_options["order"] == 2
x = 0.12
y = 0.54
res = qn(x, y)
jac = qn.qtape.jacobian(dev, params=[0.45, 0.1])
assert jac.shape == (4, 2)
def test_diff_method_expansion(self, monkeypatch, mocker):
"""Test that a QNode with tape expansion during construction
preserves the differentiation method."""
class MyDev(qml.devices.DefaultQubit):
"""Dummy device that supports device Jacobians"""
@classmethod
def capabilities(cls):
capabilities = super().capabilities().copy()
capabilities.update(
provides_jacobian=True,
)
return capabilities
def jacobian(self, *args, **kwargs):
return np.zeros((2, 4))
dev = MyDev(wires=2)
def func(x, y):
# the U2 operation is not supported on default.qubit
# and is decomposed.
qml.U2(x, y, wires=0)
qml.CNOT(wires=[0, 1])
return qml.probs(wires=0)
qn = QNode(func, dev, diff_method="device", h=1e-8, order=2)
assert qn.diff_options["method"] == "device"
assert qn.diff_options["h"] == 1e-8
assert qn.diff_options["order"] == 2
x = pnp.array(0.12, requires_grad=True)
y = pnp.array(0.54, requires_grad=True)
spy = mocker.spy(JacobianTape, "expand")
res = qn(x, y)
spy.assert_called_once()
assert qn.qtape.jacobian_options["method"] == "device"
assert qn.qtape.jacobian_options["h"] == 1e-8
assert qn.qtape.jacobian_options["order"] == 2
spy = mocker.spy(JacobianTape, "jacobian")
jac = qml.jacobian(qn)(x, y)
assert spy.call_args_list[0][1]["method"] == "device"
def test_returning_non_measurements(self):
"""Test that an exception is raised if a non-measurement
is returned from the QNode."""
dev = qml.device("default.qubit", wires=2)
def func(x, y):
qml.RX(x, wires=0)
qml.RY(y, wires=1)
qml.CNOT(wires=[0, 1])
return 5
qn = QNode(func, dev)
with pytest.raises(
qml.QuantumFunctionError, match="must return either a single measurement"
):
qn(5, 1)
def func(x, y):
qml.RX(x, wires=0)
qml.RY(y, wires=1)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), 5
qn = QNode(func, dev)
with pytest.raises(
qml.QuantumFunctionError, match="must return either a single measurement"
):
qn(5, 1)
def test_inconsistent_measurement_order(self):
"""Test that an exception is raised if measurements are returned in an
order different to how they were queued on the tape"""
dev = qml.device("default.qubit", wires=2)
def func(x, y):
qml.RX(x, wires=0)
qml.RY(y, wires=1)
qml.CNOT(wires=[0, 1])
m = qml.expval(qml.PauliZ(0))
return qml.expval(qml.PauliX(1)), m
qn = QNode(func, dev)
with pytest.raises(
qml.QuantumFunctionError,
match="measurements must be returned in the order they are measured",
):
qn(5, 1)
def test_consistent_measurement_order(self):
"""Test evaluation exceeds as expected if measurements are returned in the
same order to how they were queued on the tape"""
dev = qml.device("default.qubit", wires=2)
def func(x, y):
global op1, op2, op3, m1, m2
op1 = qml.RX(x, wires=0)
op2 = qml.RY(y, wires=1)
op3 = qml.CNOT(wires=[0, 1])
m1 = qml.expval(qml.PauliZ(0))
m2 = qml.expval(qml.PauliX(1))
return [m1, m2]
qn = QNode(func, dev)
qn(5, 1) # evaluate the QNode
assert qn.qtape.operations == [op1, op2, op3]
assert qn.qtape.measurements == [m1, m2]
def test_draw_transform(self):
"""Test circuit drawing"""
from pennylane import numpy as anp
x = anp.array(0.1, requires_grad=True)
y = anp.array([0.2, 0.3], requires_grad=True)
z = anp.array(0.4, requires_grad=True)
dev = qml.device("default.qubit", wires=2)
@qnode(dev, interface="autograd")
def circuit(p1, p2=y, **kwargs):
qml.RX(p1, wires=0)
qml.RY(p2[0] * p2[1], wires=1)
qml.RX(kwargs["p3"], wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
result = draw(circuit)(p1=x, p3=z)
expected = """\
0: ──RX(0.1)───RX(0.4)──╭C──╭┤ ⟨Z ⊗ X⟩
1: ──RY(0.06)───────────╰X──╰┤ ⟨Z ⊗ X⟩
"""
assert result == expected
def test_draw_transform_ascii(self):
"""Test circuit drawing when using ASCII characters"""
from pennylane import numpy as anp
x = anp.array(0.1, requires_grad=True)
y = anp.array([0.2, 0.3], requires_grad=True)
z = anp.array(0.4, requires_grad=True)
dev = qml.device("default.qubit", wires=2)
@qnode(dev, interface="autograd")
def circuit(p1, p2, **kwargs):
qml.RX(p1, wires=0)
qml.RY(p2[0] * p2[1], wires=1)
qml.RX(kwargs["p3"], wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
result = draw(circuit, charset="ascii")(p1=x, p2=y, p3=z)
expected = """\
0: --RX(0.1)---RX(0.4)--+C--+| <Z @ X>
1: --RY(0.06)-----------+X--+| <Z @ X>
"""
assert result == expected
def test_drawing(self):
"""Test circuit drawing"""
from pennylane import numpy as anp
x = anp.array(0.1, requires_grad=True)
y = anp.array([0.2, 0.3], requires_grad=True)
z = anp.array(0.4, requires_grad=True)
dev = qml.device("default.qubit", wires=2)
@qnode(dev, interface="autograd")
def circuit(p1, p2=y, **kwargs):
qml.RX(p1, wires=0)
qml.RY(p2[0] * p2[1], wires=1)
qml.RX(kwargs["p3"], wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
circuit(p1=x, p3=z)
result = circuit.draw()
expected = """\
0: ──RX(0.1)───RX(0.4)──╭C──╭┤ ⟨Z ⊗ X⟩
1: ──RY(0.06)───────────╰X──╰┤ ⟨Z ⊗ X⟩
"""
assert result == expected
def test_drawing_ascii(self):
"""Test circuit drawing when using ASCII characters"""
from pennylane import numpy as anp
x = anp.array(0.1, requires_grad=True)
y = anp.array([0.2, 0.3], requires_grad=True)
z = anp.array(0.4, requires_grad=True)
dev = qml.device("default.qubit", wires=2)
@qnode(dev, interface="autograd")
def circuit(p1, p2=y, **kwargs):
qml.RX(p1, wires=0)
qml.RY(p2[0] * p2[1], wires=1)
qml.RX(kwargs["p3"], wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
circuit(p1=x, p3=z)
result = circuit.draw(charset="ascii")
expected = """\
0: --RX(0.1)---RX(0.4)--+C--+| <Z @ X>
1: --RY(0.06)-----------+X--+| <Z @ X>
"""
assert result == expected
def test_drawing_exception(self):
"""Test that an error is raised if a QNode is drawn prior to
construction."""
from pennylane import numpy as anp
x = anp.array(0.1, requires_grad=True)
y = anp.array([0.2, 0.3], requires_grad=True)
z = anp.array(0.4, requires_grad=True)
dev = qml.device("default.qubit", wires=2)
@qnode(dev, interface="autograd")
def circuit(p1, p2=y, **kwargs):
qml.RX(p1, wires=0)
qml.RY(p2[0] * p2[1], wires=1)
qml.RX(kwargs["p3"], wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
with pytest.raises(qml.QuantumFunctionError, match="can only be drawn after"):
circuit.draw()
def test_multiple_observables_same_wire_expval(self, mocker):
"""Test that the QNode supports returning expectation values of observables that are on the
same wire (provided that they are Pauli words and qubit-wise commuting)"""
dev = qml.device("default.qubit", wires=3)
w = np.random.random((2, 3, 3))
@qnode(dev)
def f(w):
qml.templates.StronglyEntanglingLayers(w, wires=range(3))
return (
qml.expval(qml.PauliX(0)),
qml.expval(qml.PauliX(0) @ qml.PauliZ(1)),
qml.expval(qml.PauliX(2)),
)
spy = mocker.spy(qml.devices.DefaultQubit, "apply")
res = f(w)
spy.assert_called_once()
obs = [qml.PauliX(0), qml.PauliX(0) @ qml.PauliZ(1), qml.PauliX(2)]
qnodes = qml.map(qml.templates.StronglyEntanglingLayers, obs, dev)
res_2 = qnodes(w)
assert np.allclose(res, res_2)
def test_multiple_observables_same_wire_mixed(self, mocker):
"""Test that the QNode supports returning observables that are on the
same wire but with different return types (provided that the observables are Pauli words and
qubit-wise commuting)"""
dev = qml.device("default.qubit", wires=3)
w = np.random.random((2, 3, 3))
@qnode(dev)
def f(w):
qml.templates.StronglyEntanglingLayers(w, wires=range(3))
return qml.expval(qml.PauliX(0)), qml.var(qml.PauliX(0) @ qml.PauliZ(1))
spy = mocker.spy(qml.devices.DefaultQubit, "apply")
res = f(w)
spy.assert_called_once()
q1 = qml.map(qml.templates.StronglyEntanglingLayers, [qml.PauliX(0)], dev, measure="expval")
q2 = qml.map(
qml.templates.StronglyEntanglingLayers,
[qml.PauliX(0) @ qml.PauliZ(1)],
dev,
measure="var",
)
res_2 = np.array([q1(w), q2(w)]).squeeze()
assert np.allclose(res, res_2)
class TestDecorator:
"""Unittests for the decorator"""
def test_decorator(self, tol):
"""Test that the decorator correctly creates a QNode."""
dev = qml.device("default.qubit", wires=2)
@qnode(dev)
def func(x, y):
"""My function docstring"""
qml.RX(x, wires=0)
qml.RY(y, wires=1)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
assert isinstance(func, QNode)
assert func.__doc__ == "My function docstring"
x = 0.12
y = 0.54
res = func(x, y)
assert isinstance(func.qtape, JacobianTape)
assert len(func.qtape.operations) == 3
assert len(func.qtape.observables) == 1
assert func.qtape.num_params == 2
expected = func.qtape.execute(dev)
assert np.allclose(res, expected, atol=tol, rtol=0)
# when called, a new quantum tape is constructed
old_tape = func.qtape
res2 = func(x, y)
assert np.allclose(res, res2, atol=tol, rtol=0)
assert func.qtape is not old_tape
@pytest.mark.usefixtures("skip_if_no_dask_support")
class TestQNodeCollection:
"""Unittests for the QNodeCollection"""
def test_multi_thread(self):
"""Test that multi-threaded queuing works correctly"""
n_qubits = 4
n_batches = 5
dev = qml.device("default.qubit", wires=n_qubits)
def circuit(inputs, weights):
for index, input in enumerate(inputs):
qml.RY(input, wires=index)
for index in range(n_qubits - 1):
qml.CNOT(wires=(index, index + 1))
for index, weight in enumerate(weights):
qml.RX(weight, wires=index)
return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)]
weight_shapes = {"weights": (n_qubits)}
try:
qnode = QNodeCollection([QNode(circuit, dev) for _ in range(n_batches)])
except Exception as e:
pytest.fail("QNodeCollection cannot be instantiated")
x = np.random.rand(n_qubits).astype(np.float64)
p = np.random.rand(weight_shapes["weights"]).astype(np.float64)
try:
for _ in range(10):
qnode(x, p, parallel=True)
except:
pytest.fail("Multi-threading on QuantumTape failed")
class TestIntegration:
"""Integration tests."""
def test_correct_number_of_executions_autograd(self):
"""Test that number of executions are tracked in the autograd interface."""
def func():
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
dev = qml.device("default.qubit", wires=2)
qn = QNode(func, dev, interface="autograd")
for i in range(2):
qn()
assert dev.num_executions == 2
qn2 = QNode(func, dev, interface="autograd")
for i in range(3):
qn2()
assert dev.num_executions == 5
def test_correct_number_of_executions_tf(self):
"""Test that number of executions are tracked in the tf interface."""
tf = pytest.importorskip("tf")
def func():
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
dev = qml.device("default.qubit", wires=2)
qn = QNode(func, dev, interface="tf")
for i in range(2):
qn()
assert dev.num_executions == 2
qn2 = QNode(func, dev, interface="tf")
for i in range(3):
qn2()
assert dev.num_executions == 5
# qubit of different interface
qn3 = QNode(func, dev, interface="autograd")
qn3()
assert dev.num_executions == 6
def test_correct_number_of_executions_torch(self):
"""Test that number of executions are tracked in the torch interface."""
torch = pytest.importorskip("torch")
def func():
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
dev = qml.device("default.qubit", wires=2)
qn = QNode(func, dev, interface="torch")
for i in range(2):
qn()
assert dev.num_executions == 2
qn2 = QNode(func, dev, interface="torch")
for i in range(3):
qn2()
assert dev.num_executions == 5
# qubit of different interface
qn3 = QNode(func, dev, interface="autograd")
qn3()
assert dev.num_executions == 6
@pytest.mark.parametrize("diff_method", ["parameter-shift", "finite-diff", "reversible"])
def test_single_expectation_value_with_argnum_one(self, diff_method, tol):
"""Tests correct output shape and evaluation for a QNode
with a single expval output where only one parameter is chosen to
estimate the jacobian.
This test relies on the fact that exactly one term of the estimated
jacobian will match the expected analytical value.
"""
from pennylane import numpy as anp
dev = qml.device("default.qubit", wires=2)
x = anp.array(0.543, requires_grad=True)
y = anp.array(-0.654, requires_grad=True)
@qml.qnode_old.qnode(
dev, diff_method=diff_method, argnum=[1]
) # <--- we only choose one trainable parameter
def circuit(x, y):
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
res = qml.grad(circuit)(x, y)
assert len(res) == 2
expected = (0, np.cos(y) * np.cos(x))
res = res
expected = expected
assert np.allclose(res, expected, atol=tol, rtol=0)
class TestMutability:
"""Test for QNode immutability"""
def test_mutable(self, mocker, tol):
"""Test that a QNode which has structure dependent
on trainable arguments is reconstructed with
every call, and remains differentiable"""
dev = qml.device("default.qubit", wires=2)
@qml.qnode_old.qnode(dev, mutable=True)
def circuit(x):
if x < 0:
qml.RY(x, wires=0)
else:
qml.RZ(x, wires=0)
return qml.expval(qml.PauliZ(0))
x = 0.5
spy = mocker.spy(circuit, "construct")
res = circuit(x)
spy.assert_called_once_with((x,), {})
assert len(spy.call_args_list) == 1
assert circuit.qtape.operations[0].name == "RZ"
assert circuit.qtape.operations[0].data == [x]
np.testing.assert_allclose(res, 1, atol=tol, rtol=0)
# calling the qnode with new arguments reconstructs the tape
x = -0.5
res = circuit(x)
spy.assert_called_with((x,), {})
assert len(spy.call_args_list) == 2
assert circuit.qtape.operations[0].name == "RY"
assert circuit.qtape.operations[0].data == [x]
np.testing.assert_allclose(res, np.cos(x), atol=tol, rtol=0)
# test differentiability
grad = qml.grad(circuit)(0.5)
np.testing.assert_allclose(grad, 0, atol=tol, rtol=0)
grad = qml.grad(circuit)(-0.5)
np.testing.assert_allclose(grad, -np.sin(-0.5), atol=tol, rtol=0)
def test_immutable(self, mocker, tol):
"""Test that a QNode which has structure dependent
on trainable arguments is *not* reconstructed with
every call when mutable=False"""
dev = qml.device("default.qubit", wires=2)
@qml.qnode_old.qnode(dev, mutable=False)
def circuit(x):
if x < 0:
qml.RY(x, wires=0)
else:
qml.RZ(x, wires=0)
return qml.expval(qml.PauliZ(0))
x = 0.5
spy = mocker.spy(circuit, "construct")
res = circuit(x)
spy.assert_called_once_with((x,), {})
assert len(spy.call_args_list) == 1
assert circuit.qtape.operations[0].name == "RZ"
assert circuit.qtape.operations[0].data == [x]
np.testing.assert_allclose(res, 1, atol=tol, rtol=0)
# calling the qnode with new arguments does not reconstruct the tape
x = -0.5
res = circuit(x)
spy.assert_called_once_with((0.5,), {})
assert len(spy.call_args_list) == 1
assert circuit.qtape.operations[0].name == "RZ"
assert circuit.qtape.operations[0].data == [0.5]
np.testing.assert_allclose(res, 1, atol=tol, rtol=0)
# test differentiability. The circuit will assume an RZ gate
grad = qml.grad(circuit)(-0.5)
np.testing.assert_allclose(grad, 0, atol=tol, rtol=0)
class TestShots:
"""Unittests for specifying shots per call."""
def test_specify_shots_per_call_sample(self):
"""Tests that shots can be set per call for a sample return type."""
dev = qml.device("default.qubit", wires=1, shots=10)
@qml.qnode_old.qnode(dev)
def circuit(a):
qml.RX(a, wires=0)
return qml.sample(qml.PauliZ(wires=0))
assert len(circuit(0.8)) == 10
assert len(circuit(0.8, shots=2)) == 2
assert len(circuit(0.8, shots=3178)) == 3178
assert len(circuit(0.8)) == 10
def test_specify_shots_per_call_expval(self):
"""Tests that shots can be set per call for an expectation value.
Note: this test has a vanishingly small probability to fail."""
dev = qml.device("default.qubit", wires=1, shots=None)
@qml.qnode_old.qnode(dev)
def circuit():
qml.Hadamard(wires=0)
return qml.expval(qml.PauliZ(wires=0))
# check that the circuit is analytic
res1 = [circuit() for _ in range(100)]
assert np.std(res1) == 0.0
assert circuit.device._shots is None
# check that the circuit is temporary non-analytic
res1 = [circuit(shots=1) for _ in range(100)]
assert np.std(res1) != 0.0
# check that the circuit is analytic again
res1 = [circuit() for _ in range(100)]
assert np.std(res1) == 0.0
assert circuit.device._shots is None
def test_no_shots_per_call_if_user_has_shots_qfunc_kwarg(self):
"""Tests that the per-call shots overwriting is suspended if user
has a shots keyword argument, but a warning is raised."""
dev = qml.device("default.qubit", wires=2, shots=10)
def circuit(a, shots=0):
qml.RX(a, wires=shots)
return qml.sample(qml.PauliZ(wires=0))
with pytest.warns(
UserWarning, match="The 'shots' argument name is reserved for overriding"
):
circuit = qml.qnode_old.QNode(circuit, dev)
assert len(circuit(0.8)) == 10
assert circuit.qtape.operations[0].wires.labels == (0,)
assert len(circuit(0.8, shots=1)) == 10
assert circuit.qtape.operations[0].wires.labels == (1,)
assert len(circuit(0.8, shots=0)) == 10
assert circuit.qtape.operations[0].wires.labels == (0,)
def test_no_shots_per_call_if_user_has_shots_qfunc_arg(self):
"""Tests that the per-call shots overwriting is suspended
if user has a shots argument, but a warning is raised."""
# Todo: use standard creation of qnode below for both asserts once we do not parse args to tensors any more
dev = qml.device("default.qubit", wires=[qml.numpy.array(0), qml.numpy.array(1)], shots=10)
def circuit(a, shots):
qml.RX(a, wires=shots)
return qml.sample(qml.PauliZ(wires=qml.numpy.array(0)))
# assert that warning is still raised
with pytest.warns(
UserWarning, match="The 'shots' argument name is reserved for overriding"
):
circuit = qml.qnode_old.QNode(circuit, dev)
assert len(circuit(0.8, 1)) == 10
assert circuit.qtape.operations[0].wires.labels == (1,)
dev = qml.device("default.qubit", wires=2, shots=10)
@qml.qnode_old.qnode(dev)
def circuit(a, shots):
qml.RX(a, wires=shots)
return qml.sample(qml.PauliZ(wires=0))
assert len(circuit(0.8, shots=0)) == 10
assert circuit.qtape.operations[0].wires.labels == (0,)
@pytest.mark.parametrize("diff_method", ["backprop", "parameter-shift"])
def test_shots_setting_does_not_mutate_device(self, diff_method):
"""Tests that per-call shots setting does not change the number of shots in the device."""
dev = qml.device("default.qubit", wires=1, shots=3)
@qml.qnode_old.qnode(dev)
def circuit(a):
qml.RX(a, wires=0)
return qml.sample(qml.PauliZ(wires=0))
assert dev.shots == 3
res = circuit(0.8, shots=2)
assert len(res) == 2
assert dev.shots == 3
class TestSpecs:
"""Tests for the qnode property specs"""
def test_specs_error(self):
"""Tests an error is raised if the tape is not constructed."""
dev = qml.device("default.qubit", wires=4)
@qml.qnode_old.qnode(dev)
def circuit():
return qml.expval(qml.PauliZ(0))
with pytest.raises(qml.QuantumFunctionError, match=r"The QNode specifications"):
circuit.specs
@pytest.mark.parametrize(
"diff_method, len_info", [("backprop", 10), ("parameter-shift", 12), ("adjoint", 11)]
)
def test_specs(self, diff_method, len_info):
"""Tests the specs property with backprop"""
dev = qml.device("default.qubit", wires=4)
@qml.qnode_old.qnode(dev, diff_method=diff_method)
def circuit(x, y):
qml.RX(x[0], wires=0)
qml.Toffoli(wires=(0, 1, 2))
qml.CRY(x[1], wires=(0, 1))
qml.Rot(x[2], x[3], y, wires=2)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliX(1))
x = pnp.array([0.05, 0.1, 0.2, 0.3], requires_grad=True)
y = pnp.array(0.1, requires_grad=False)
res = circuit(x, y)
info = circuit.specs
assert len(info) == len_info
assert info["gate_sizes"] == defaultdict(int, {1: 2, 3: 1, 2: 1})
assert info["gate_types"] == defaultdict(int, {"RX": 1, "Toffoli": 1, "CRY": 1, "Rot": 1})
assert info["num_operations"] == 4
assert info["num_observables"] == 2
assert info["num_diagonalizing_gates"] == 1
assert info["num_used_wires"] == 3
assert info["depth"] == 3
assert info["num_device_wires"] == 4
assert info["diff_method"] == diff_method
if diff_method == "parameter-shift":
assert info["num_parameter_shift_executions"] == 7
if diff_method != "backprop":
assert info["device_name"] == "default.qubit"
assert info["num_trainable_params"] == 4
else:
assert info["device_name"] == "default.qubit.autograd"
def test_finitediff_float32(tol):
"""Tests that float32 parameters do not effect order 1 finite-diff results.
Checks bugfix. Problem occured with StronglyEntanglingLayers, but not simpler circuits.
"""
n_wires = 2
n_layers = 2
shape = qml.templates.StronglyEntanglingLayers.shape(n_wires=n_wires, n_layers=n_layers)
rng = np.random.default_rng(seed=42)
params = rng.random(shape)
params_f32 = np.array(params, dtype=np.float32)
dev = qml.device("default.qubit", n_wires)
@qml.qnode_old.qnode(dev, diff_method="finite-diff", order=1)
def circuit(params):
qml.templates.StronglyEntanglingLayers(params, wires=range(n_wires))
return qml.expval(qml.PauliZ(0))
grad64 = qml.grad(circuit)(params)
grad32 = qml.grad(circuit)(params_f32)
assert np.allclose(grad64, grad32, atol=tol, rtol=0)
class TestDrawMethod:
"""Tests for the deprecated qnode.draw() method"""
def test_method_deprecation(self):
"""Test that the qnode.draw() method raises a deprecation warning"""
x = np.array(0.1)
y = np.array([0.2, 0.3])
z = np.array(0.4)
dev = qml.device("default.qubit", wires=2)
@qml.qnode_old.qnode(dev, interface="autograd")
def circuit(p1, p2=y, **kwargs):
qml.RX(p1, wires=0)
qml.RY(p2[0] * p2[1], wires=1)
qml.RX(kwargs["p3"], wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
circuit(p1=x, p3=z)
with pytest.warns(UserWarning, match=r"The QNode\.draw method has been deprecated"):
result = circuit.draw()
expected = """\
0: ──RX(0.1)───RX(0.4)──╭C──╭┤ ⟨Z ⊗ X⟩
1: ──RY(0.06)───────────╰X──╰┤ ⟨Z ⊗ X⟩
"""
assert result == expected
def test_invalid_wires(self):
"""Test that an exception is raised if a wire in the wire
ordering does not exist on the device"""
dev = qml.device("default.qubit", wires=["a", -1, "q2"])
@qml.qnode_old.qnode(dev)
def circuit():
qml.Hadamard(wires=-1)
qml.CNOT(wires=["a", "q2"])
qml.RX(0.2, wires="a")
return qml.expval(qml.PauliX(wires="q2"))
circuit()
with pytest.raises(ValueError, match="contains wires not contained on the device"):
res = circuit.draw(wire_order=["q2", 5])
def test_tape_not_constructed(self):
"""Test that an exception is raised if the tape has not been constructed"""
dev = qml.device("default.qubit", wires=1)
@qml.qnode_old.qnode(dev)
def circuit():
return qml.expval(qml.PauliX(wires=0))
with pytest.raises(
qml.QuantumFunctionError, match="after its quantum tape has been constructed"
):
res = circuit.draw()
def test_show_all_wires_error(self):
"""Test that show_all_wires will raise an error if the provided wire
order does not contain all wires on the device"""
dev = qml.device("default.qubit", wires=[-1, "a", "q2", 0])
@qml.qnode_old.qnode(dev)
def circuit():
qml.Hadamard(wires=-1)
qml.CNOT(wires=[-1, "q2"])
return qml.expval(qml.PauliX(wires="q2"))
circuit()
with pytest.raises(ValueError, match="must contain all wires"):
circuit.draw(show_all_wires=True, wire_order=[-1, "a"])
| 36.13973
| 116
| 0.588151
|
729d1cab5eac412fe0b2bd93731d6cd65e16b952
| 2,116
|
py
|
Python
|
books/migrations/0001_initial.py
|
vis7/librery_management
|
704e69e8c78fb79bc567464df02213c59bc6051b
|
[
"MIT"
] | null | null | null |
books/migrations/0001_initial.py
|
vis7/librery_management
|
704e69e8c78fb79bc567464df02213c59bc6051b
|
[
"MIT"
] | 2
|
2021-10-31T12:14:29.000Z
|
2021-10-31T12:16:54.000Z
|
books/migrations/0001_initial.py
|
vis7/librery_management
|
704e69e8c78fb79bc567464df02213c59bc6051b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2021-06-07 09:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('age', models.IntegerField()),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='enter name of book', max_length=32)),
('price', models.DecimalField(decimal_places=2, max_digits=4)),
('is_available', models.BooleanField(default=True)),
('pub_date', models.DateField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='books.author')),
],
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('mobile', models.IntegerField()),
('num_books_take', models.IntegerField()),
('books', models.ManyToManyField(to='books.Book')),
],
),
migrations.AddField(
model_name='book',
name='genre',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='books.genre'),
),
]
| 37.122807
| 114
| 0.551985
|
38aec332d5eaee70a2ebf97dbd1cbca6156ddce1
| 2,873
|
py
|
Python
|
src/backend/common/queries/tests/team_events_query_test.py
|
bovlb/the-blue-alliance
|
29389649d96fe060688f218d463e642dcebfd6cc
|
[
"MIT"
] | 266
|
2015-01-04T00:10:48.000Z
|
2022-03-28T18:42:05.000Z
|
src/backend/common/queries/tests/team_events_query_test.py
|
bovlb/the-blue-alliance
|
29389649d96fe060688f218d463e642dcebfd6cc
|
[
"MIT"
] | 2,673
|
2015-01-01T20:14:33.000Z
|
2022-03-31T18:17:16.000Z
|
src/backend/common/queries/tests/team_events_query_test.py
|
ZachOrr/the-blue-alliance
|
b9a2e6e07374fb12c70f8fae1948bfe90e34adfe
|
[
"MIT"
] | 230
|
2015-01-04T00:10:48.000Z
|
2022-03-26T18:12:04.000Z
|
from typing import List, Optional
from google.appengine.ext import ndb
from backend.common.consts.event_type import EventType
from backend.common.models.event import Event
from backend.common.models.event_team import EventTeam
from backend.common.models.keys import EventKey, TeamKey, Year
from backend.common.models.team import Team
from backend.common.queries.event_query import (
TeamEventsQuery,
TeamYearEventsQuery,
TeamYearEventTeamsQuery,
)
def preseed_events(year: Year, n: int) -> List[str]:
stored = ndb.put_multi(
[
Event(
id=f"{year}test{i}",
event_short=f"test{i}",
year=year,
event_type_enum=EventType.OFFSEASON,
)
for i in range(n)
]
)
assert len(stored) == n
return [k.id() for k in stored]
def preseed_teams(start_team: int, end_team: Optional[int] = None) -> None:
end_team = end_team or start_team
stored = ndb.put_multi(
[
Team(
id=f"frc{i}",
team_number=i,
)
for i in range(start_team, end_team + 1)
]
)
assert len(stored) == (end_team - start_team + 1)
def preseed_event_team(team_key: TeamKey, event_keys: List[EventKey]) -> None:
[
EventTeam(
id=f"{event_key}_{team_key}",
event=ndb.Key(Event, event_key),
team=ndb.Key(Team, team_key),
year=int(event_key[:4]),
).put()
for event_key in event_keys
]
def test_team_events_no_data() -> None:
events = TeamEventsQuery(team_key="frc254").fetch()
assert events == []
def test_team_year_events_no_data() -> None:
events = TeamYearEventsQuery(team_key="frc254", year=2020).fetch()
assert events == []
def test_team_year_event_teams_no_data() -> None:
event_teams = TeamYearEventTeamsQuery(team_key="frc254", year=2020).fetch()
assert event_teams == []
def test_team_events() -> None:
events1 = preseed_events(2020, 1)
events2 = preseed_events(2019, 1)
preseed_teams(254)
preseed_event_team("frc254", events1 + events2)
event_teams = TeamEventsQuery(team_key="frc254").fetch()
assert len(event_teams) == 2
def test_team_year_events() -> None:
events1 = preseed_events(2020, 1)
events2 = preseed_events(2019, 1)
preseed_teams(254)
preseed_event_team("frc254", events1 + events2)
event_teams = TeamYearEventTeamsQuery(team_key="frc254", year=2020).fetch()
assert len(event_teams) == 1
def test_team_year_event_teams() -> None:
events1 = preseed_events(2020, 1)
events2 = preseed_events(2019, 1)
preseed_teams(254)
preseed_event_team("frc254", events1 + events2)
event_teams = TeamYearEventTeamsQuery(team_key="frc254", year=2020).fetch()
assert len(event_teams) == 1
| 28.166667
| 79
| 0.649147
|
d4f39da5226d1b0d7f3d255b844dd64c18ecb3cb
| 5,013
|
py
|
Python
|
new.py
|
truongcntn2017/FingerHandDetector
|
ed6d79ae140a6650ff1f20c72967e9d549acf69f
|
[
"MIT"
] | null | null | null |
new.py
|
truongcntn2017/FingerHandDetector
|
ed6d79ae140a6650ff1f20c72967e9d549acf69f
|
[
"MIT"
] | null | null | null |
new.py
|
truongcntn2017/FingerHandDetector
|
ed6d79ae140a6650ff1f20c72967e9d549acf69f
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import copy
import math
#from appscript import app
# Environment:
# OS : Mac OS EL Capitan
# python: 3.5
# opencv: 2.4.13
# parameters
cap_region_x_begin=0.5 # start point/total width
cap_region_y_end=0.8 # start point/total width
threshold = 60 # BINARY threshold
blurValue = 41 # GaussianBlur parameter
bgSubThreshold = 50
learningRate = 0
# variables
isBgCaptured = 0 # bool, whether the background captured
triggerSwitch = False # if true, keyborad simulator works
def printThreshold(thr):
print("! Changed threshold to "+str(thr))
def removeBG(frame):
fgmask = bgModel.apply(frame,learningRate=learningRate)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
# res = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
kernel = np.ones((3, 3), np.uint8)
fgmask = cv2.erode(fgmask, kernel, iterations=1)
res = cv2.bitwise_and(frame, frame, mask=fgmask)
return res
def calculateFingers(res,drawing): # -> finished bool, cnt: finger count
# convexity defect
hull = cv2.convexHull(res, returnPoints=False)
if len(hull) > 3:
defects = cv2.convexityDefects(res, hull)
if type(defects) != type(None): # avoid crashing. (BUG not found)
cnt = 0
for i in range(defects.shape[0]): # calculate the angle
s, e, f, d = defects[i][0]
start = tuple(res[s][0])
end = tuple(res[e][0])
far = tuple(res[f][0])
a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
b = math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)
c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)
angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c)) # cosine theorem
if angle <= math.pi / 2: # angle less than 90 degree, treat as fingers
cnt += 1
cv2.circle(drawing, far, 8, [211, 84, 0], -1)
return True, cnt
return False, 0
# Camera
camera = cv2.VideoCapture(0)
camera.set(10,200)
cv2.namedWindow('trackbar')
cv2.createTrackbar('trh1', 'trackbar', threshold, 100, printThreshold)
while camera.isOpened():
ret, frame = camera.read()
threshold = cv2.getTrackbarPos('trh1', 'trackbar')
frame = cv2.bilateralFilter(frame, 5, 50, 100) # smoothing filter
frame = cv2.flip(frame, 1) # flip the frame horizontally
cv2.rectangle(frame, (int(cap_region_x_begin * frame.shape[1]), 0),
(frame.shape[1], int(cap_region_y_end * frame.shape[0])), (255, 0, 0), 2)
cv2.imshow('original', frame)
# Main operation
if isBgCaptured == 1: # this part wont run until background captured
img = removeBG(frame)
img = img[0:int(cap_region_y_end * frame.shape[0]),
int(cap_region_x_begin * frame.shape[1]):frame.shape[1]] # clip the ROI
cv2.imshow('mask', img)
# convert the image into binary image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (blurValue, blurValue), 0)
cv2.imshow('blur', blur)
ret, thresh = cv2.threshold(blur, threshold, 255, cv2.THRESH_BINARY)
cv2.imshow('ori', thresh)
# get the coutours
thresh1 = copy.deepcopy(thresh)
contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
length = len(contours)
maxArea = -1
if length > 0:
for i in range(length): # find the biggest contour (according to area)
temp = contours[i]
area = cv2.contourArea(temp)
if area > maxArea:
maxArea = area
ci = i
res = contours[ci]
hull = cv2.convexHull(res)
drawing = np.zeros(img.shape, np.uint8)
cv2.drawContours(drawing, [res], 0, (0, 255, 0), 2)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
isFinishCal,cnt = calculateFingers(res,drawing)
if triggerSwitch is True:
if isFinishCal is True and cnt <= 2:
print (cnt)
#app('System Events').keystroke(' ') # simulate pressing blank space
cv2.imshow('output', drawing)
# Keyboard OP
k = cv2.waitKey(10)
if k == 27: # press ESC to exit
camera.release()
cv2.destroyAllWindows()
break
elif k == ord('b'): # press 'b' to capture the background
bgModel = cv2.createBackgroundSubtractorMOG2(0, bgSubThreshold)
isBgCaptured = 1
print( '!!!Background Captured!!!')
elif k == ord('r'): # press 'r' to reset the background
bgModel = None
triggerSwitch = False
isBgCaptured = 0
print ('!!!Reset BackGround!!!')
elif k == ord('n'):
triggerSwitch = True
print ('!!!Trigger On!!!')
| 35.807143
| 95
| 0.579094
|
e63544db49c0ced6a81d3bbec65ebeb398a58e11
| 11,001
|
py
|
Python
|
frontend/amundsen_application/api/utils/metadata_utils.py
|
ohayak/amundsen
|
32a196c611c8ca623129984d8d0252f00202b73c
|
[
"Apache-2.0"
] | null | null | null |
frontend/amundsen_application/api/utils/metadata_utils.py
|
ohayak/amundsen
|
32a196c611c8ca623129984d8d0252f00202b73c
|
[
"Apache-2.0"
] | null | null | null |
frontend/amundsen_application/api/utils/metadata_utils.py
|
ohayak/amundsen
|
32a196c611c8ca623129984d8d0252f00202b73c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import logging
from dataclasses import dataclass
from marshmallow import EXCLUDE
from typing import Any, Dict, List
from amundsen_common.models.dashboard import DashboardSummary, DashboardSummarySchema
from amundsen_common.models.feature import Feature, FeatureSchema
from amundsen_common.models.popular_table import PopularTable, PopularTableSchema
from amundsen_common.models.table import Table, TableSchema
from amundsen_application.models.user import load_user, dump_user
from amundsen_application.config import MatchRuleObject
from flask import current_app as app
import re
@dataclass
class TableUri:
database: str
cluster: str
schema: str
table: str
def __str__(self) -> str:
return f"{self.database}://{self.cluster}.{self.schema}/{self.table}"
@classmethod
def from_uri(cls, uri: str) -> 'TableUri':
"""
TABLE_KEY_FORMAT = '{db}://{cluster}.{schema}/{tbl}'
"""
pattern = re.compile(r'^(?P<database>.*?)://(?P<cluster>.*)\.(?P<schema>.*?)/(?P<table>.*?)$', re.X)
groups = pattern.match(uri)
spec = groups.groupdict() if groups else {}
return TableUri(**spec)
def marshall_table_partial(table_dict: Dict) -> Dict:
"""
Forms a short version of a table Dict, with selected fields and an added 'key'
:param table_dict: Dict of partial table object
:return: partial table Dict
TODO - Unify data format returned by search and metadata.
"""
schema = PopularTableSchema()
table: PopularTable = schema.load(table_dict, unknown=EXCLUDE)
results = schema.dump(table)
# TODO: fix popular tables to provide these? remove if we're not using them?
# TODO: Add the 'key' or 'id' to the base PopularTableSchema
results['key'] = f'{table.database}://{table.cluster}.{table.schema}/{table.name}'
results['last_updated_timestamp'] = None
results['type'] = 'table'
return results
def _parse_editable_rule(rule: MatchRuleObject,
schema: str,
table: str) -> bool:
"""
Matches table name and schema with corresponding regex in matching rule
:parm rule: MatchRuleObject defined in list UNEDITABLE_TABLE_DESCRIPTION_MATCH_RULES in config file
:parm schema: schema name from Table Dict received from metadata service
:parm table: table name from Table Dict received from metadata service
:return: boolean which determines if table desc is editable or not for given table as per input matching rule
"""
if rule.schema_regex and rule.table_name_regex:
match_schema = re.match(rule.schema_regex, schema)
match_table = re.match(rule.table_name_regex, table)
return not (match_schema and match_table)
if rule.schema_regex:
return not re.match(rule.schema_regex, schema)
if rule.table_name_regex:
return not re.match(rule.table_name_regex, table)
return True
def is_table_editable(schema_name: str, table_name: str, cfg: Any = None) -> bool:
if cfg is None:
cfg = app.config
if schema_name in cfg['UNEDITABLE_SCHEMAS']:
return False
for rule in cfg['UNEDITABLE_TABLE_DESCRIPTION_MATCH_RULES']:
if not _parse_editable_rule(rule, schema_name, table_name):
return False
return True
def marshall_table_full(table_dict: Dict) -> Dict:
"""
Forms the full version of a table Dict, with additional and sanitized fields
:param table_dict: Table Dict from metadata service
:return: Table Dict with sanitized fields
"""
schema = TableSchema()
table: Table = schema.load(table_dict)
results: Dict[str, Any] = schema.dump(table)
is_editable = is_table_editable(results['schema'], results['name'])
results['is_editable'] = is_editable
# TODO - Cleanup https://github.com/lyft/amundsen/issues/296
# This code will try to supplement some missing data since the data here is incomplete.
# Once the metadata service response provides complete user objects we can remove this.
results['owners'] = [_map_user_object_to_schema(owner) for owner in results['owners']]
readers = results['table_readers']
for reader_object in readers:
reader_object['user'] = _map_user_object_to_schema(reader_object['user'])
columns = results['columns']
for col in columns:
# Set editable state
col['is_editable'] = is_editable
# If order is provided, we sort the column based on the pre-defined order
if app.config['COLUMN_STAT_ORDER']:
# the stat_type isn't defined in COLUMN_STAT_ORDER, we just use the max index for sorting
col['stats'].sort(key=lambda x: app.config['COLUMN_STAT_ORDER'].
get(x['stat_type'], len(app.config['COLUMN_STAT_ORDER'])))
# TODO: Add the 'key' or 'id' to the base TableSchema
results['key'] = f'{table.database}://{table.cluster}.{table.schema}/{table.name}'
# Temp code to make 'partition_key' and 'partition_value' part of the table
results['partition'] = _get_partition_data(results['watermarks'])
# We follow same style as column stat order for arranging the programmatic descriptions
prog_descriptions = results['programmatic_descriptions']
results['programmatic_descriptions'] = _convert_prog_descriptions(prog_descriptions)
return results
def marshall_dashboard_partial(dashboard_dict: Dict) -> Dict:
"""
Forms a short version of dashboard metadata, with selected fields and an added 'key'
and 'type'
:param dashboard_dict: Dict of partial dashboard metadata
:return: partial dashboard Dict
"""
schema = DashboardSummarySchema(unknown=EXCLUDE)
dashboard: DashboardSummary = schema.load(dashboard_dict)
results = schema.dump(dashboard)
results['type'] = 'dashboard'
# TODO: Bookmark logic relies on key, opting to add this here to avoid messy logic in
# React app and we have to clean up later.
results['key'] = results.get('uri', '')
return results
def marshall_dashboard_full(dashboard_dict: Dict) -> Dict:
"""
Cleanup some fields in the dashboard response
:param dashboard_dict: Dashboard response from metadata service.
:return: Dashboard dictionary with sanitized fields, particularly the tables and owners.
"""
# TODO - Cleanup https://github.com/lyft/amundsen/issues/296
# This code will try to supplement some missing data since the data here is incomplete.
# Once the metadata service response provides complete user objects we can remove this.
dashboard_dict['owners'] = [_map_user_object_to_schema(owner) for owner in dashboard_dict['owners']]
dashboard_dict['tables'] = [marshall_table_partial(table) for table in dashboard_dict['tables']]
return dashboard_dict
def marshall_lineage_table(table_dict: Dict) -> Dict:
"""
Decorate lineage entries with database, schema, cluster, and table
:param table_dict:
:return: table entry with additional fields
"""
table_key = str(table_dict.get('key'))
table_uri = TableUri.from_uri(table_key)
table_dict['database'] = table_uri.database
table_dict['schema'] = table_uri.schema
table_dict['cluster'] = table_uri.cluster
table_dict['name'] = table_uri.table
return table_dict
def _convert_prog_descriptions(prog_descriptions: List = None) -> Dict:
"""
Apply the PROGRAMMATIC_DISPLAY configuration to convert to the structure.
:param prog_descriptions: A list of objects representing programmatic descriptions
:return: A dictionary with organized programmatic_descriptions
"""
left = [] # type: List
right = [] # type: List
other = prog_descriptions or [] # type: List
updated_descriptions = {}
if prog_descriptions:
# We want to make sure there is a display title that is just source
for desc in prog_descriptions:
source = desc.get('source')
if not source:
logging.warning("no source found in: " + str(desc))
# If config is defined for programmatic disply we organize and sort them based on the configuration
prog_display_config = app.config['PROGRAMMATIC_DISPLAY']
if prog_display_config:
left_config = prog_display_config.get('LEFT', {})
left = [x for x in prog_descriptions if x.get('source') in left_config]
left.sort(key=lambda x: _sort_prog_descriptions(left_config, x))
right_config = prog_display_config.get('RIGHT', {})
right = [x for x in prog_descriptions if x.get('source') in right_config]
right.sort(key=lambda x: _sort_prog_descriptions(right_config, x))
other_config = dict(filter(lambda x: x not in ['LEFT', 'RIGHT'], prog_display_config.items()))
other = list(filter(lambda x: x.get('source') not in left_config and x.get('source')
not in right_config, prog_descriptions))
other.sort(key=lambda x: _sort_prog_descriptions(other_config, x))
updated_descriptions['left'] = left
updated_descriptions['right'] = right
updated_descriptions['other'] = other
return updated_descriptions
def _sort_prog_descriptions(base_config: Dict, prog_description: Dict) -> int:
default_order = len(base_config)
prog_description_source = prog_description.get('source')
config_dict = base_config.get(prog_description_source)
if config_dict:
return config_dict.get('display_order', default_order)
return default_order
def _map_user_object_to_schema(u: Dict) -> Dict:
return dump_user(load_user(u))
def _get_partition_data(watermarks: Dict) -> Dict:
if watermarks:
high_watermark = next(filter(lambda x: x['watermark_type'] == 'high_watermark', watermarks))
if high_watermark:
return {
'is_partitioned': True,
'key': high_watermark['partition_key'],
'value': high_watermark['partition_value']
}
return {
'is_partitioned': False
}
def marshall_feature_full(feature_dict: Dict) -> Dict:
"""
Forms the full version of a table Dict, with additional and sanitized fields
:param table_dict: Table Dict from metadata service
:return: Table Dict with sanitized fields
"""
schema = FeatureSchema()
feature: Feature = schema.load(feature_dict)
results: Dict[str, Any] = schema.dump(feature)
# TODO do we need this for Features?
# is_editable = is_table_editable(results['schema'], results['name'])
# results['is_editable'] = is_editable
results['owners'] = [_map_user_object_to_schema(owner) for owner in results['owners']]
prog_descriptions = results['programmatic_descriptions']
results['programmatic_descriptions'] = _convert_prog_descriptions(prog_descriptions)
return results
| 39.289286
| 113
| 0.696937
|
6a52ad7779d03cca1d204aa73f76095e510c424a
| 721
|
py
|
Python
|
dashboard/dashboard/__init__.py
|
vbessonov/quicksight-embedding-test
|
2f61cfba3a526e3a012ef4e81cb464347117dd27
|
[
"MIT"
] | null | null | null |
dashboard/dashboard/__init__.py
|
vbessonov/quicksight-embedding-test
|
2f61cfba3a526e3a012ef4e81cb464347117dd27
|
[
"MIT"
] | null | null | null |
dashboard/dashboard/__init__.py
|
vbessonov/quicksight-embedding-test
|
2f61cfba3a526e3a012ef4e81cb464347117dd27
|
[
"MIT"
] | null | null | null |
import logging
import os
from flask import Flask
from dashboard.blueprints import home, api
def create_app():
app = Flask(__name__, instance_relative_config=True)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
application_settings_file = os.getenv('APPLICATION_SETTINGS_FILE')
logger.info(f"Application settings file={application_settings_file}")
if application_settings_file:
app.config.from_envvar('APPLICATION_SETTINGS_FILE')
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
app.register_blueprint(home.blueprint)
app.register_blueprint(api.blueprint)
return app
| 24.033333
| 73
| 0.746186
|
680bf3fd8ac522455f7c2fa82db8abb480212f8a
| 2,728
|
py
|
Python
|
bot.py
|
blazecus/BlazesBot
|
0d197101dcdc28ef1bc7566c6355d75939baae0e
|
[
"MIT"
] | null | null | null |
bot.py
|
blazecus/BlazesBot
|
0d197101dcdc28ef1bc7566c6355d75939baae0e
|
[
"MIT"
] | null | null | null |
bot.py
|
blazecus/BlazesBot
|
0d197101dcdc28ef1bc7566c6355d75939baae0e
|
[
"MIT"
] | null | null | null |
import os
import re
import discord
import dotsandboxes
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
client = discord.Client()
client.state_per_server = dict()
client.game_per_server = dict()
for i in client.guilds:
client.state_per_server[i] = "NO GAME"
client.game_per_server[i] = None
@client.event
async def on_message(message):
guild = message.guild.id
if(guild not in client.state_per_server):
client.state_per_server[guild] = "NO GAME"
client.game_per_server[guild] = None
if(client.state_per_server[guild] == "NO GAME"):
print("asdkflja;lskdjfa;lskjdf;laskjdf;lakj")
if(bool(re.match("!playgame dotsandboxes [0-9]*x[0-9]* <@.*>", message.content))):
message_content = message.content.split(" ")
(width, height) = [int(i) for i in message_content[2].split("x")]
if(len(message.mentions) == 1):
player2 = message.mentions[0].id
client.state_per_server[guild] = "dotsandboxes"
client.game_per_server[guild] = dotsandboxes.dotsandboxes(message.author.id, player2, width, height)
game = client.game_per_server[guild]
await message.channel.send(game.print_board())
await message.channel.send("%s's turn!" % ("<@" + str(game.current_player) + ">"))
if(client.state_per_server[guild] == "dotsandboxes"):
game = client.game_per_server[guild]
if(message.author.id == game.current_player):
if(bool(re.match("!move [0-9]*,[0-9]* [0-9]*,[0-9]*", message.content))):
coords = message.content.split(" ")
(x1,y1,x2,y2) = [int(i) for i in coords[1].split(",")] + [int(i) for i in coords[2].split(",")]
if game.check_valid(x1,y1,x2,y2):
game.place_line(x1,y1,x2,y2)
if(game.tie):
client.state_per_server[message.guild.id] = "NO GAME"
await message.channel.send("TIE!!!")
elif(game.done):
client.state_per_server[message.guild.id] = "NO GAME"
await message.channel.send(game.print_board())
await message.channel.send("GAME OVER! %s WINS WITH " % ("<@" + str(game.current_player) + ">") + str(game.winning_score) + " POINTS!")
else:
await message.channel.send(game.print_board())
await message.channel.send("%s's turn!" % ("<@" + str(game.current_player) + ">"))
else:
await message.channel.send("Invalid choice, try again.")
client.run(TOKEN)
| 43.301587
| 159
| 0.576246
|
52ffdf37252cc9cd888baee7f368b73dcbcd1115
| 1,437
|
py
|
Python
|
alignment/urls.py
|
pszgaspar/protwis
|
4989a67175ef3c95047d795c843cf6b9cf4141fa
|
[
"Apache-2.0"
] | 21
|
2016-01-20T09:33:14.000Z
|
2021-12-20T19:19:45.000Z
|
alignment/urls.py
|
pszgaspar/protwis
|
4989a67175ef3c95047d795c843cf6b9cf4141fa
|
[
"Apache-2.0"
] | 75
|
2016-02-26T16:29:58.000Z
|
2022-03-21T12:35:13.000Z
|
alignment/urls.py
|
pszgaspar/protwis
|
4989a67175ef3c95047d795c843cf6b9cf4141fa
|
[
"Apache-2.0"
] | 77
|
2016-01-22T08:44:26.000Z
|
2022-02-01T15:54:56.000Z
|
from django.conf.urls import url
from django.views.generic import TemplateView
from django.views.decorators.cache import cache_page
from alignment import views
urlpatterns = [
url(r'^targetselection', (views.TargetSelection.as_view()), name='targetselection'),
url(r'^gproteinselection', (views.TargetSelectionGprotein.as_view()), name='targetselectiongprot'),
url(r'^arrestinselection', (views.TargetSelectionArrestin.as_view()), name='arrestinselectionprot'),
url(r'^segmentselectiongprot', views.SegmentSelectionGprotein.as_view(), name='segmentselectiongprot'),
url(r'^segmentselectionarrestin', views.SegmentSelectionArrestin.as_view(), name='segmentselectionarrestin'),
url(r'^segmentselection', views.SegmentSelection.as_view(), name='segmentselection'),
url(r'^render_alignment_excel', views.render_alignment_excel, name='render_alignment_excel'),
url(r'^render/(?P<slug>[^/]+)/$', views.render_family_alignment, name='render-family'),
url(r'^render', views.render_alignment, name='render'),
url(r'^fasta/(?P<slug>[^/]+)/$', views.render_fasta_family_alignment, name='fasta-family'),
url(r'^fasta', views.render_fasta_alignment, name='fasta'),
url(r'^csv', views.render_csv_alignment, name='csv'),
url(r'blastsearch$', views.BlastSearchInput.as_view(), name='blastsearch'),
url(r'blastsearchresults', views.BlastSearchResults.as_view(), name='blastsearch_results'),
]
| 57.48
| 113
| 0.75087
|
806fe167ae8223f4bcc7bf8c0003345081c7539f
| 2,339
|
py
|
Python
|
test/test_condica.py
|
hugorichard/augfmri
|
1c69929261931e8e852407aa0dc09923d569b5e4
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_condica.py
|
hugorichard/augfmri
|
1c69929261931e8e852407aa0dc09923d569b5e4
|
[
"BSD-3-Clause"
] | 2
|
2021-09-01T01:40:25.000Z
|
2021-10-13T06:28:51.000Z
|
test/test_condica.py
|
hugorichard/augfmri
|
1c69929261931e8e852407aa0dc09923d569b5e4
|
[
"BSD-3-Clause"
] | 2
|
2021-09-01T08:23:13.000Z
|
2022-02-09T16:17:55.000Z
|
# Authors: Hugo Richard, Badr Tajini
# License: BSD 3 clause
from condica.main import condica
from picard import picard
from sklearn.preprocessing import QuantileTransformer
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
def test_condica():
m_scores = []
m_scores_aug = []
n_components = 2
n_features = 10
n_samples = 1000
n_classes = 2
indexes = np.split(np.arange(n_samples), n_classes)
seed = 0
rng = np.random.RandomState(seed)
S = rng.laplace(size=(n_components, n_samples))
A = rng.randn(n_features, n_components)
mu = rng.randn(n_classes, n_components)
Y = np.zeros(n_samples)
for i in range(n_classes):
S[:, indexes[i]] = S[:, indexes[i]] + mu[i][:, None]
Y[indexes[i]] = i
S = S.T
X = S.dot(A.T)
clf = LinearDiscriminantAnalysis()
cv = ShuffleSplit(random_state=rng, train_size=0.1, n_splits=20)
scores_noaug = []
scores_withaug = []
for train, test in cv.split(X):
X_train, X_test = X[train], X[test]
Y_train, Y_test = Y[train], Y[test]
X_fakes, Y_fakes = condica(
A, X_train, Y_train, len(X[train]), n_quantiles=len(X[train])
)
scores_noaug.append(clf.fit(X_train, Y_train).score(X_test, Y_test))
scores_withaug.append(
clf.fit(
np.concatenate([X_train, X_fakes]),
np.concatenate([Y_train, Y_fakes]),
).score(X_test, Y_test)
)
print(np.mean(scores_noaug), np.mean(scores_withaug))
assert np.mean(scores_noaug) < np.mean(scores_withaug)
def test_condica_noY():
m_scores = []
m_scores_aug = []
n_components = 2
n_features = 10
n_samples = 1000
seed = 0
rng = np.random.RandomState(seed)
S = rng.laplace(size=(n_components, n_samples)).T
A = rng.randn(n_features, n_components)
X = S.dot(A.T)
clf = LinearDiscriminantAnalysis()
rng = np.random.RandomState()
X_fake = condica(A, X, nb_fakes=n_samples)
X = np.row_stack([X, X_fake])
Y = np.array([0] * n_samples + [1] * n_samples)
scores = cross_val_score(clf, X, Y, cv=10)
assert np.max(scores) > 0.5
assert np.min(scores) < 0.5
| 32.041096
| 76
| 0.645147
|
1538ebe9867ef5afcff924a15755685130b2f9d4
| 6,076
|
py
|
Python
|
prediction.py
|
baraschi/Twitter_Sentiment_Analysis
|
5bfc21fd712dc158505063c9810b46d4b5a26e85
|
[
"MIT"
] | null | null | null |
prediction.py
|
baraschi/Twitter_Sentiment_Analysis
|
5bfc21fd712dc158505063c9810b46d4b5a26e85
|
[
"MIT"
] | null | null | null |
prediction.py
|
baraschi/Twitter_Sentiment_Analysis
|
5bfc21fd712dc158505063c9810b46d4b5a26e85
|
[
"MIT"
] | null | null | null |
import csv, os, collections
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import cross_val_predict
from data_loading import *
from constants import *
from utils import *
def format_submission(labels):
# returns a df ready to input to submission_to_csv
if isinstance(labels[0], collections.Iterable):
labels = [y for x in labels for y in x]
pred_df= pd.DataFrame({'Prediction': labels})
pred_df.index.name = 'Id' # rename id column
pred_df.index += 1 #shift to correspond to sample submission
pred_df['Prediction'] = pred_df['Prediction'].replace("0","-1").replace(0,-1)
return pred_df
def submission_to_csv(predictions, filename):
if not os.path.exists(PREDS_FOLDER):
os.makedirs(PREDS_FOLDER)
predictions.to_csv(PREDS_FOLDER + filename + ".csv", index_label="Id")
def predict_and_submit(classifier, test_texts, filename):
labels = classifier.predict(test_texts)
submission_to_csv(format_submission(labels), filename)
def classify_bow(train, test = None, tweets_col = CLEAN_TWEET, filename = "bow", max_features=1000, ngram_range=(1,1)):
xtrain, xvalid, ytrain, yvalid = train_test_split(train[tweets_col], train['label'], random_state=2000, test_size=0.02)
bow_vectorizer = CountVectorizer(max_features=max_features, ngram_range=ngram_range)
# bag-of-words feature matrix
xtrain_bow = bow_vectorizer.fit_transform(xtrain)
xvalid_bow = bow_vectorizer.transform(xvalid)
if test is not None:
test_bow = bow_vectorizer.transform(test[tweets_col])
lreg = LogisticRegression()
lreg.fit(xtrain_bow, ytrain)
prediction_validation = lreg.predict(xvalid_bow) # predicting on the validation set
accuracy = accuracy_score(prediction_validation,yvalid)
cross_pred = cross_val_predict(lreg, xtrain_bow, ytrain, cv=5, n_jobs = 4)
print(classification_report(ytrain, cross_pred))
# regression using test set
if test is not None:
# regression using test set
prediction_test = lreg.predict(test_bow)
submission_to_csv(format_submission(prediction_test.tolist()), filename + "_" + tweets_col)
return accuracy
def classify_tfidf(train, test = None, tweets_col = CLEAN_TWEET, filename = "tfidf", max_features=1000, ngram_range=(1,1)):
# splitting data into training and validation set
xtrain, xvalid, ytrain, yvalid = train_test_split(train[tweets_col], train['label'], random_state=2000, test_size=0.02)
tfidf_vectorizer = TfidfVectorizer(max_features=max_features, ngram_range=ngram_range)
# TF-IDF feature matrix
xtrain_tfidf = tfidf_vectorizer.fit_transform(xtrain)
xvalid_tfidf = tfidf_vectorizer.transform(xvalid)
if test is not None:
test_tfidf = tfidf_vectorizer.transform(test[tweets_col])
lreg = LogisticRegression()
lreg.fit(xtrain_tfidf, ytrain)
prediction_validation = lreg.predict(xvalid_tfidf) # predicting on the validation set
accuracy = accuracy_score(prediction_validation,yvalid)
cross_pred = cross_val_predict(lreg, xtrain_tfidf, ytrain, cv=5, n_jobs = 4)
print(classification_report(ytrain, cross_pred))
# regression using test set
if test is not None:
# regression using test set
prediction_test = lreg.predict(test_tfidf)
submission_to_csv(format_submission(prediction_test.tolist()), filename + "_" + tweets_col)
return accuracy
def classify_fasttext(train, test, tweets_col = "clean_tweet", filename = "fasttext", max_iter = 200):
filename = "fasttext_labeled"
best_accuracy = 0;
i_best = -1;
for i in range(0,max_iter):
# create column with correct label format for fasttext: '__label__0 '
train['label_prefixed'] = train['label'].apply(lambda s: '__label__' + str(s) + ' ')
train_fasttext, validation_fasttext = train_test_split(train[['label_prefixed',tweets_col]], random_state=42, test_size=0.3)
train_validation_name = "data/" + filename + "_train_validation.txt"
#train set
train_fasttext.to_csv(train_validation_name, columns = ['label_prefixed',tweets_col], index=False)
classifier_validation = fasttext.supervised(train_validation_name, 'model_supervised', label_prefix='__label__')
#here we append a ' ' char at the end to avoid an IndexOutOfBound exception
labels_validation = classifier_validation.predict(validation_fasttext[tweets_col].apply(lambda s: str(s) + ' '))
#formatting
validation_fasttext['label'] = validation_fasttext['label_prefixed'].apply(lambda s: int(s.replace("__label__", "").strip()))
labels_validation = [int(y) for x in labels_validation for y in x]
accuracy = accuracy_score(validation_fasttext['label'].tolist(), labels_validation)
if accuracy > best_accuracy:
best_accuracy = accuracy
i_best = i
#we have a better result so we predict on test set
train_name = "data/" + filename + "_train.txt"
train[['label_prefixed',tweets_col]].to_csv(train_name, columns = ['label_prefixed',tweets_col], index=False)
classifier_test = fasttext.supervised(train_name, 'model_supervised', label_prefix='__label__')
labels_test = classifier_test.predict(test[tweets_col].apply(lambda s: str(s) + ' '))
submission_to_csv(format_submission(labels_test), filename + "_" + tweets_col)
#labels = classifier.predict(load_txt(TEST_DATA))
#submission_to_csv(format_submission(labels), csv_name)
printOver('\033[1mclassifying:\033[0m '+ str( (i+1)/ max_iter * 100) + '%, best_acc=' + str(best_accuracy))
print("\n")
return best_accuracy
| 46.030303
| 133
| 0.714944
|
bf30dedfda96b907fed5932e2e2fb2918ad8654d
| 692
|
py
|
Python
|
pymore/__init__.py
|
dabacon/pymore
|
2a7bea5669d50d814ae89de3919c5544f9c7fd29
|
[
"Apache-2.0"
] | 3
|
2021-02-13T22:58:06.000Z
|
2021-11-17T10:52:42.000Z
|
pymore/__init__.py
|
dabacon/pymore
|
2a7bea5669d50d814ae89de3919c5544f9c7fd29
|
[
"Apache-2.0"
] | null | null | null |
pymore/__init__.py
|
dabacon/pymore
|
2a7bea5669d50d814ae89de3919c5544f9c7fd29
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The pymore Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.1.3"
from pymore.equals_tester import EqualsTester
from pymore.iterators import first
| 34.6
| 74
| 0.771676
|
dc364558e6475efa0f2ece550acd996b7cd117c9
| 3,112
|
py
|
Python
|
pypy/module/_stackless/test/test_coroutine.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
pypy/module/_stackless/test/test_coroutine.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | null | null | null |
pypy/module/_stackless/test/test_coroutine.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 2
|
2016-07-29T07:09:50.000Z
|
2016-10-16T08:50:26.000Z
|
from pypy.conftest import gettestobjspace
from py.test import skip
class AppTest_Coroutine:
def setup_class(cls):
space = gettestobjspace(usemodules=('_stackless',))
cls.space = space
def test_pickle_coroutine_empty(self):
# this test is limited to basic pickling.
# real stacks can only tested with a stackless pypy build.
import _stackless as stackless
co = stackless.coroutine()
import pickle
pckl = pickle.dumps(co)
co2 = pickle.loads(pckl)
# the empty unpickled coroutine can still be used:
result = []
co2.bind(result.append, 42)
co2.switch()
assert result == [42]
def test_pickle_coroutine_bound(self):
import pickle
import _stackless
lst = [4]
co = _stackless.coroutine()
co.bind(lst.append, 2)
pckl = pickle.dumps((co, lst))
(co2, lst2) = pickle.loads(pckl)
assert lst2 == [4]
co2.switch()
assert lst2 == [4, 2]
def test_raise_propagate(self):
import _stackless as stackless
co = stackless.coroutine()
def f():
return 1/0
co.bind(f)
try:
co.switch()
except ZeroDivisionError:
pass
else:
raise AssertionError("exception not propagated")
def test_strange_test(self):
from _stackless import coroutine
def f():
print "in new coro"
return 42
def create():
b = coroutine()
b.bind(f)
print "bound"
b.switch()
print "switched"
return b
a = coroutine()
a.bind(create)
b = a.switch()
# now b.parent = a
def nothing():
pass
a.bind(nothing)
def kill():
# this sets a.parent = b
a.kill()
b.bind(kill)
b.switch()
def test_kill(self):
import _stackless as stackless
co = stackless.coroutine()
def f():
pass
co.bind(f)
assert co.is_alive
co.kill()
assert not co.is_alive
def test_kill_running(self):
skip("kill is not really working (there is only CoroutineExit, "
"which is not an app-level exception)")
import _stackless as stackless
main = stackless.coroutine.getcurrent()
result = []
co = stackless.coroutine()
def f():
x = 2
try:
result.append(1)
main.switch()
x = 3
finally:
result.append(x)
result.append(4)
co.bind(f)
assert co.is_alive
co.switch()
assert co.is_alive
assert result == [1]
co.kill()
assert not co.is_alive
assert result == [1, 2]
def test_bogus_bind(self):
import _stackless as stackless
co = stackless.coroutine()
def f():
pass
co.bind(f)
raises(ValueError, co.bind, f)
| 26.372881
| 72
| 0.523458
|
6c87191eb385b51d9cff11faac371fd22ddec7ca
| 588
|
py
|
Python
|
examples/03-remote-system/diff.py
|
chadell/diffsync
|
b59a362f75144661897762dc47486f2bc0680bdd
|
[
"Apache-2.0"
] | 67
|
2020-10-26T14:57:53.000Z
|
2022-03-28T20:38:03.000Z
|
examples/03-remote-system/diff.py
|
chadell/diffsync
|
b59a362f75144661897762dc47486f2bc0680bdd
|
[
"Apache-2.0"
] | 47
|
2020-10-26T14:49:37.000Z
|
2022-03-04T11:32:10.000Z
|
examples/03-remote-system/diff.py
|
chadell/diffsync
|
b59a362f75144661897762dc47486f2bc0680bdd
|
[
"Apache-2.0"
] | 13
|
2020-12-06T02:32:34.000Z
|
2022-03-28T16:10:02.000Z
|
"""Custom Diff class for DiffSync to influence the behavior of the core Engine."""
from diffsync.diff import Diff
class AlphabeticalOrderDiff(Diff):
"""Simple diff to return all children country in alphabetical order."""
@classmethod
def order_children_default(cls, children):
"""Simple diff to return all children in alphabetical order."""
for child in sorted(children.values()):
# it's possible to access additional information about the object
# like child.action can be "update", "create" or "delete"
yield child
| 34.588235
| 82
| 0.685374
|
7bd33b2ae126b761756bac709635fe06952d34d8
| 191
|
py
|
Python
|
EventFilter/EcalTBRawToDigi/python/ecalTBunpack_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
EventFilter/EcalTBRawToDigi/python/ecalTBunpack_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
EventFilter/EcalTBRawToDigi/python/ecalTBunpack_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
ecalTBunpack = cms.EDProducer("EcalDCCTBUnpackingModule",
fedRawDataCollectionTag = cms.InputTag('rawDataCollector')
)
| 31.833333
| 88
| 0.701571
|
acd1c0f7d3969b86d2c400548546baf9a80fc78e
| 37
|
py
|
Python
|
Prac/p5.py
|
SaicharanKandukuri/snippets-python-from-scrach
|
b0823fde3cf1a88bf43d97bdc542de7e32c76dac
|
[
"MIT"
] | 1
|
2021-05-29T03:09:24.000Z
|
2021-05-29T03:09:24.000Z
|
Prac/p5.py
|
SaicharanKandukuri/snippets-python-from-scrach
|
b0823fde3cf1a88bf43d97bdc542de7e32c76dac
|
[
"MIT"
] | null | null | null |
Prac/p5.py
|
SaicharanKandukuri/snippets-python-from-scrach
|
b0823fde3cf1a88bf43d97bdc542de7e32c76dac
|
[
"MIT"
] | null | null | null |
lst = [3,4,6,1,8,9]
print(6 in lst)
| 9.25
| 19
| 0.540541
|
2e91db1c4257e090f5f6abfa0d10e16735517b2c
| 729
|
py
|
Python
|
setup.py
|
SergioLaRosa/pydevrant
|
5582e1407efd1704c50b50c5ed96c0aaa876d429
|
[
"MIT"
] | 8
|
2018-04-06T21:47:18.000Z
|
2019-05-09T19:22:04.000Z
|
setup.py
|
SergioLaRosa/pydevrant
|
5582e1407efd1704c50b50c5ed96c0aaa876d429
|
[
"MIT"
] | 3
|
2018-04-06T23:16:36.000Z
|
2018-12-01T15:54:12.000Z
|
setup.py
|
SergioLaRosa/pydevrant
|
5582e1407efd1704c50b50c5ed96c0aaa876d429
|
[
"MIT"
] | 2
|
2018-04-06T21:47:22.000Z
|
2018-04-06T22:28:09.000Z
|
# Written by Sergio La Rosa (sergio.larosa89@gmail.com)
# and John Leonardo (hey@jdleo.me)
# Part of "pydevrant" package
# https://github.com/SergioLaRosa/pydevrant
from setuptools import setup
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
long_description = ''
setup(
name='pydevrant',
version='0.21',
description='Unofficial Python wrapper for the public devRant API',
long_description=long_description,
url='https://github.com/SergioLaRosa/pydevrant',
author='Sergio La Rosa',
author_email='sergio.larosa89@gmail.com',
license='MIT',
packages=['pydevrant'],
install_requires=['requests'],
zip_safe=False)
| 28.038462
| 71
| 0.711934
|
123fae63fa459cf77393abeda0f80c6ae2a57b42
| 570
|
py
|
Python
|
Most Asked DSA By Companies/Meta/41-15.py
|
neelaadityakumar/leetcode
|
e78e0b8dc0113bdc1721bf7d025a463bea04847f
|
[
"MIT"
] | null | null | null |
Most Asked DSA By Companies/Meta/41-15.py
|
neelaadityakumar/leetcode
|
e78e0b8dc0113bdc1721bf7d025a463bea04847f
|
[
"MIT"
] | null | null | null |
Most Asked DSA By Companies/Meta/41-15.py
|
neelaadityakumar/leetcode
|
e78e0b8dc0113bdc1721bf7d025a463bea04847f
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/3sum/
# 15. 3Sum
# Medium
# 16643
# 1597
# Add to List
# Share
# Given an integer array nums, return all the triplets [nums[i], nums[j], nums[k]] such that i != j, i != k, and j != k, and nums[i] + nums[j] + nums[k] == 0.
# Notice that the solution set must not contain duplicate triplets.
# Example 1:
# Input: nums = [-1,0,1,2,-1,-4]
# Output: [[-1,-1,2],[-1,0,1]]
# Example 2:
# Input: nums = []
# Output: []
# Example 3:
# Input: nums = [0]
# Output: []
# Constraints:
# 0 <= nums.length <= 3000
# -105 <= nums[i] <= 105
| 16.285714
| 158
| 0.580702
|
01bf92f52537b8b80ebd50a7c4b39a7a7545dcd3
| 8,873
|
py
|
Python
|
project5/data/make_shuffling_integrations_wma.py
|
HoliestCow/ece692_deeplearning
|
638c27e0d9c01ec9b0a8be8a85e54937645a897e
|
[
"MIT"
] | null | null | null |
project5/data/make_shuffling_integrations_wma.py
|
HoliestCow/ece692_deeplearning
|
638c27e0d9c01ec9b0a8be8a85e54937645a897e
|
[
"MIT"
] | null | null | null |
project5/data/make_shuffling_integrations_wma.py
|
HoliestCow/ece692_deeplearning
|
638c27e0d9c01ec9b0a8be8a85e54937645a897e
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import os.path
from rebin import rebin
import glob
from random import shuffle
from joblib import Parallel, delayed
# import time
import h5py
from itertools import islice
from copy import deepcopy
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def label_datasets():
targetfile = '/home/holiestcow/Documents/zephyr/datasets/muse/trainingData/answers.csv'
head, tail = os.path.split(targetfile)
# filename = []
source_labels = {}
id2string = {0: 'Background',
1: 'HEU',
2: 'WGPu',
3: 'I131',
4: 'Co60',
5: 'Tc99',
6: 'HEUandTc99'}
f = open(targetfile, 'r')
a = f.readlines()
for i in range(len(a)):
line = a[i].strip()
if line[0] == 'R':
continue
parsed = line.split(',')
filename = parsed[0]
source = parsed[1]
source_time = parsed[2]
source_labels[filename] = {'source': id2string[int(source)],
'time': float(source_time)}
f.close()
return source_labels
def parse_datafiles(targetfile, binno, outdir):
item = targetfile
# for item in filelist:
f = open(item, 'r')
a = f.readlines()
binnumber = 1024
counter = 0
spectra = np.zeros((0, binnumber))
timetracker = 0
energy_deposited = []
for i in range(len(a)):
b = a[i].strip()
b_parsed = b.split(',')
event_time = int(b_parsed[0])
energy_deposited += [float(b_parsed[1])]
timetracker += event_time
# print(timetracker)
if timetracker >= 1E6:
timetracker = 0
source_id = 0
counts, energy_edges = np.histogram(energy_deposited, bins=binnumber, range=(0.0, 3000.0))
spectra = np.vstack((spectra, counts))
counter += 1
# print(max(energy_deposited))
energy_deposited = []
# if counter >= 100:
# break
# print(np.sum(spectra[0, :]))
time = np.linspace(0, counter, counter)
time = time.reshape((time.shape[0], 1))
# print(time.shape, spectra.shape)
tosave = np.hstack((time, spectra))
f.close()
head, tail = os.path.split(item)
print(tail, spectra.shape)
# f = open(os.path.join('./integrations', tail), 'w')
# np.savetxt(f, tosave, delimiter=',')
# f.close()
np.save(os.path.join(outdir, tail[:-4] + '.npy'), tosave)
return
def main():
# only need to do this once.
binnumber = 1024
ncores = 4
nsamples = 50000
# nsamples = 100
# nsamples = 0
filename = 'naive_dataset_wma'
id2string = {0: 'Background',
1: 'HEU',
2: 'WGPu',
3: 'I131',
4: 'Co60',
5: 'Tc99',
6: 'HEUandTc99'}
string2id = {'Background': 0,
'HEU': 1,
'WGPu': 2,
'I131': 3,
'Co60': 4,
'Tc99': 5,
'HEUandTc99': 6}
# sequence_length = 30 # 30 seconds used to guess the next one
filelist = glob.glob('/home/holiestcow/Documents/zephyr/datasets/muse/trainingData/1*.csv')
# shuffle(filelist)
# Parallel(n_jobs=ncores)(delayed(parse_datafiles)(item, binnumber, './integrations') for item in filelist)
# test_filelist = glob.glob('/home/holiestcow/Documents/zephyr/datasets/muse/testData/2*.csv')
# HACK: RIGHT HERE
test_filelist = glob.glob('./test_integrations/2*.npy')
# Parallel(n_jobs=ncores)(delayed(parse_datafiles)(item, binnumber, './test_integrations') for item in test_filelist)
labels = label_datasets()
# NOTE: Slice for background segments
f = h5py.File(filename + '.h5', 'w')
train = f.create_group('training')
test = f.create_group('testing')
validation = f.create_group('validation')
number_of_testing_files = 4800
number_of_training_files = len(labels.keys()) - number_of_testing_files # The last 10000 are for testing
test2train_ratio = number_of_testing_files / number_of_training_files
tostore_spectra = np.zeros((nsamples, 1024))
tostore_labels = np.zeros((nsamples, 1))
filelist = list(labels.keys())
for i in range(nsamples):
# create training dataset
random_file = filelist[np.random.randint(number_of_training_files)]
if i % 100 == 0:
print('training sample: {}'.format(i))
x = np.load('./integrations/' + random_file + '.npy')
# time = x[:, 0]
start = np.random.randint(x.shape[0] - 15)
source = 'Background'
if labels[random_file]['source'] != 'Background' and start >= 30:
start = int(labels[random_file]['time']) - 15
source = labels[random_file]['source']
spectra = x[start + 15, 1:]
# From here, get the moving average estimate of the background from the past 15 seconds
background = x[start:start + 15, 1:]
background = np.mean(background, axis=0)
# background subtraction
spectra = np.subtract(spectra, background)
# zero out the negatives
spectra[spectra < 0] = 0
tostore_spectra[i, :] = spectra
tostore_labels[i] = int(string2id[source])
# g = train.create_group('sample_' + str(i))
# g.create_dataset('spectra', data=spectra, compression='gzip')
# g.create_dataset('spectra', data=spectra)
# g.create_dataset('label', data=int(string2id[source]))
train.create_dataset('spectra', data=tostore_spectra, compression='gzip')
train.create_dataset('labels', data=tostore_labels, compression='gzip')
tostore_spectra = np.zeros((int(nsamples * test2train_ratio), 1024))
tostore_labels = np.zeros((int(nsamples * test2train_ratio), 1))
for i in range(int(nsamples * test2train_ratio)):
# create training dataset
random_file = filelist[number_of_training_files + np.random.randint(number_of_testing_files)]
if i % 100 == 0:
print('testing sample: {}'.format(i))
x = np.load('./integrations/' + random_file + '.npy')
# time = x[:, 0]
start = np.random.randint(x.shape[0] - 15)
source = 'Background'
if labels[random_file]['source'] != 'Background' and start >= 30:
start = int(labels[random_file]['time']) - 15
source = labels[random_file]['source']
spectra = x[start + 15, 1:]
# From here, get the moving average estimate of the background from the past 15 seconds
background = x[start:start + 15, 1:]
background = np.mean(background, axis=0)
# background subtraction
spectra = np.subtract(spectra, background)
# zero out the negatives
spectra[spectra < 0] = 0
tostore_spectra[i, :] = spectra
tostore_labels[i] = int(string2id[source])
# g = test.create_group('sample_' + str(i))
# g.create_dataset('spectra', data=spectra, compression='gzip')
# g.create_dataset('label', data=int(string2id[source]))
test.create_dataset('spectra', data=tostore_spectra, compression='gzip')
test.create_dataset('labels', data=tostore_labels, compression='gzip')
# this is for the validation set, where i have to analyze
# each file individual
for i in range(len(test_filelist)):
if i % 100 == 0:
print('validation sample {}'.format(i))
filename = test_filelist[i]
head, tail = os.path.split(filename)
dataname = tail[:-4]
x = np.load(os.path.join('./test_integrations', dataname + '.npy'))
t = x[:, 0]
spectra = deepcopy(x[:, 1:])
tostore_spectra = deepcopy(spectra[16:, :])
index = np.arange(spectra.shape[0])
index_generator = window(index, n=15)
counter = 0
for index_list in index_generator:
background = np.mean(spectra[index_list, :], axis=0)
tostore_spectra[counter, :] = np.subtract(tostore_spectra[counter, :], background)
tostore_spectra[counter, tostore_spectra[counter, :] < 0] = 0
counter += 1
if counter >= tostore_spectra.shape[0]:
break
file_sample = validation.create_group(dataname)
file_sample.create_dataset('time', data=t, compression='gzip')
file_sample.create_dataset('spectra', data=tostore_spectra, compression='gzip')
f.close()
return
main()
| 33.483019
| 121
| 0.589654
|
584160335ab5521087d80809ad08be9cbf489217
| 497
|
py
|
Python
|
wunderlist/migrations/0006_connection_list_title.py
|
passuf/WunderHabit
|
97c16bf491a1eb5fa9eb3a41a59e149a1b0f1a9b
|
[
"MIT"
] | 43
|
2016-01-04T10:24:42.000Z
|
2021-03-10T22:55:21.000Z
|
wunderlist/migrations/0006_connection_list_title.py
|
passuf/WunderHabit
|
97c16bf491a1eb5fa9eb3a41a59e149a1b0f1a9b
|
[
"MIT"
] | 27
|
2016-01-07T01:59:12.000Z
|
2019-05-23T09:06:44.000Z
|
wunderlist/migrations/0006_connection_list_title.py
|
passuf/WunderHabit
|
97c16bf491a1eb5fa9eb3a41a59e149a1b0f1a9b
|
[
"MIT"
] | 9
|
2016-01-07T14:26:19.000Z
|
2020-01-10T20:20:30.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-26 18:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wunderlist', '0005_auto_20151226_1750'),
]
operations = [
migrations.AddField(
model_name='connection',
name='list_title',
field=models.CharField(blank=True, max_length=255, verbose_name='List Title'),
),
]
| 23.666667
| 90
| 0.635815
|
b05b677348572ae4e676469ed72946c42ad86f41
| 130
|
py
|
Python
|
tests/utils.py
|
sturmianseq/django-hug
|
778b16f568ce9fdb01f360b7bff89c9d8b6c2ef7
|
[
"MIT"
] | 3
|
2019-03-11T16:40:37.000Z
|
2020-08-06T18:27:48.000Z
|
tests/utils.py
|
sturmianseq/django-hug
|
778b16f568ce9fdb01f360b7bff89c9d8b6c2ef7
|
[
"MIT"
] | null | null | null |
tests/utils.py
|
sturmianseq/django-hug
|
778b16f568ce9fdb01f360b7bff89c9d8b6c2ef7
|
[
"MIT"
] | 1
|
2021-08-18T12:54:51.000Z
|
2021-08-18T12:54:51.000Z
|
from django.http import JsonResponse
def json_response(locals):
locals.pop("request", None)
return JsonResponse(locals)
| 18.571429
| 36
| 0.753846
|
99cd89db7889ecd20617b1ac5060b60f8b313c45
| 9,934
|
py
|
Python
|
heat/tests/openstack/zaqar/test_queue.py
|
jasondunsmore/heat
|
6bd7352dc4838b8ef782f2345a4dfdf57ba3e356
|
[
"Apache-2.0"
] | 1
|
2015-12-18T21:46:55.000Z
|
2015-12-18T21:46:55.000Z
|
heat/tests/openstack/zaqar/test_queue.py
|
jasondunsmore/heat
|
6bd7352dc4838b8ef782f2345a4dfdf57ba3e356
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/openstack/zaqar/test_queue.py
|
jasondunsmore/heat
|
6bd7352dc4838b8ef782f2345a4dfdf57ba3e356
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients import client_plugin
from heat.engine.resources.openstack.zaqar import queue
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
try:
from zaqarclient.transport.errors import ResourceNotFound # noqa
except ImportError:
ResourceNotFound = Exception
wp_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "openstack Zaqar queue service as a resource",
"Resources" : {
"MyQueue2" : {
"Type" : "OS::Zaqar::Queue",
"Properties" : {
"name": "myqueue",
"metadata": { "key1": { "key2": "value", "key3": [1, 2] } }
}
}
},
"Outputs" : {
"queue_id": {
"Value": { "Ref" : "MyQueue2" },
"Description": "queue name"
},
"queue_href": {
"Value": { "Fn::GetAtt" : [ "MyQueue2", "href" ]},
"Description": "queue href"
}
}
}
'''
class FakeQueue(object):
def __init__(self, queue_name, auto_create=True):
self._id = queue_name
self._auto_create = auto_create
self._exists = False
def metadata(self, new_meta=None):
pass
def delete(self):
pass
class ZaqarMessageQueueTest(common.HeatTestCase):
def setUp(self):
super(ZaqarMessageQueueTest, self).setUp()
self.fc = self.m.CreateMockAnything()
self.ctx = utils.dummy_context()
def parse_stack(self, t):
stack_name = 'test_stack'
tmpl = template.Template(t)
self.stack = stack.Stack(self.ctx, stack_name, tmpl)
self.stack.validate()
self.stack.store()
def test_create(self):
t = template_format.parse(wp_template)
self.parse_stack(t)
queue = self.stack['MyQueue2']
self.m.StubOutWithMock(queue, 'client')
queue.client().MultipleTimes().AndReturn(self.fc)
fake_q = FakeQueue(queue.physical_resource_name(), auto_create=False)
self.m.StubOutWithMock(self.fc, 'queue')
self.fc.queue(queue.physical_resource_name(),
auto_create=False).AndReturn(fake_q)
self.m.StubOutWithMock(fake_q, 'metadata')
fake_q.metadata(new_meta=queue.properties.get('metadata'))
self.m.ReplayAll()
scheduler.TaskRunner(queue.create)()
self.fc.api_url = 'http://127.0.0.1:8888/v1'
self.assertEqual('http://127.0.0.1:8888/v1/queues/myqueue',
queue.FnGetAtt('href'))
self.m.VerifyAll()
def test_delete(self):
t = template_format.parse(wp_template)
self.parse_stack(t)
queue = self.stack['MyQueue2']
queue.resource_id_set(queue.properties.get('name'))
self.m.StubOutWithMock(queue, 'client')
queue.client().MultipleTimes().AndReturn(self.fc)
fake_q = FakeQueue("myqueue", auto_create=False)
self.m.StubOutWithMock(self.fc, 'queue')
self.fc.queue("myqueue",
auto_create=False).MultipleTimes().AndReturn(fake_q)
self.m.StubOutWithMock(fake_q, 'delete')
fake_q.delete()
self.m.ReplayAll()
scheduler.TaskRunner(queue.create)()
scheduler.TaskRunner(queue.delete)()
self.m.VerifyAll()
@mock.patch.object(queue.ZaqarQueue, "client")
def test_delete_not_found(self, mockclient):
class ZaqarClientPlugin(client_plugin.ClientPlugin):
def _create(self):
return mockclient()
mock_def = mock.Mock(spec=rsrc_defn.ResourceDefinition)
mock_stack = mock.Mock()
mock_stack.db_resource_get.return_value = None
mock_stack.has_cache_data.return_value = False
mockplugin = ZaqarClientPlugin(self.ctx)
mock_stack.clients = mock.Mock()
mock_stack.clients.client_plugin.return_value = mockplugin
mockplugin.is_not_found = mock.Mock()
mockplugin.is_not_found.return_value = True
zaqar_q = mock.Mock()
zaqar_q.delete.side_effect = ResourceNotFound()
mockclient.return_value.queue.return_value = zaqar_q
zplugin = queue.ZaqarQueue("test_delete_not_found", mock_def,
mock_stack)
zplugin.resource_id = "test_delete_not_found"
zplugin.handle_delete()
mock_stack.clients.client_plugin.assert_called_once_with('zaqar')
mockplugin.is_not_found.assert_called_once_with(
zaqar_q.delete.side_effect)
mockclient.return_value.queue.assert_called_once_with(
"test_delete_not_found", auto_create=False)
def test_update_in_place(self):
t = template_format.parse(wp_template)
self.parse_stack(t)
queue = self.stack['MyQueue2']
queue.resource_id_set(queue.properties.get('name'))
self.m.StubOutWithMock(queue, 'client')
queue.client().MultipleTimes().AndReturn(self.fc)
fake_q = FakeQueue('myqueue', auto_create=False)
self.m.StubOutWithMock(self.fc, 'queue')
self.fc.queue('myqueue',
auto_create=False).MultipleTimes().AndReturn(fake_q)
self.m.StubOutWithMock(fake_q, 'metadata')
fake_q.metadata(new_meta={"key1": {"key2": "value", "key3": [1, 2]}})
# Expected to be called during update
fake_q.metadata(new_meta={'key1': 'value'})
self.m.ReplayAll()
t = template_format.parse(wp_template)
new_queue = t['Resources']['MyQueue2']
new_queue['Properties']['metadata'] = {'key1': 'value'}
resource_defns = template.Template(t).resource_definitions(self.stack)
scheduler.TaskRunner(queue.create)()
scheduler.TaskRunner(queue.update, resource_defns['MyQueue2'])()
self.m.VerifyAll()
def test_update_replace(self):
t = template_format.parse(wp_template)
self.parse_stack(t)
queue = self.stack['MyQueue2']
queue.resource_id_set(queue.properties.get('name'))
self.m.StubOutWithMock(queue, 'client')
queue.client().MultipleTimes().AndReturn(self.fc)
fake_q = FakeQueue('myqueue', auto_create=False)
self.m.StubOutWithMock(self.fc, 'queue')
self.fc.queue('myqueue',
auto_create=False).MultipleTimes().AndReturn(fake_q)
self.m.ReplayAll()
t = template_format.parse(wp_template)
t['Resources']['MyQueue2']['Properties']['name'] = 'new_queue'
resource_defns = template.Template(t).resource_definitions(self.stack)
new_queue = resource_defns['MyQueue2']
scheduler.TaskRunner(queue.create)()
err = self.assertRaises(exception.UpdateReplace,
scheduler.TaskRunner(queue.update,
new_queue))
msg = 'The Resource MyQueue2 requires replacement.'
self.assertEqual(msg, six.text_type(err))
self.m.VerifyAll()
def test_show_resource(self):
t = template_format.parse(wp_template)
self.parse_stack(t)
queue = self.stack['MyQueue2']
self.m.StubOutWithMock(queue, 'client')
queue.client().MultipleTimes().AndReturn(self.fc)
fake_q = FakeQueue(queue.physical_resource_name(), auto_create=False)
self.m.StubOutWithMock(self.fc, 'queue')
self.fc.queue(queue.physical_resource_name(),
auto_create=False).AndReturn(fake_q)
self.m.StubOutWithMock(fake_q, 'metadata')
fake_q.metadata(new_meta=queue.properties.get('metadata'))
self.fc.queue(queue.physical_resource_name(),
auto_create=False).AndReturn(fake_q)
fake_q.metadata().AndReturn(
{"key1": {"key2": "value", "key3": [1, 2]}})
self.m.ReplayAll()
scheduler.TaskRunner(queue.create)()
self.assertEqual(
{'metadata': {"key1": {"key2": "value", "key3": [1, 2]}}},
queue._show_resource())
self.m.VerifyAll()
def test_parse_live_resource_data(self):
t = template_format.parse(wp_template)
self.parse_stack(t)
queue = self.stack['MyQueue2']
self.m.StubOutWithMock(queue, 'client')
queue.client().MultipleTimes().AndReturn(self.fc)
fake_q = FakeQueue(queue.physical_resource_name(), auto_create=False)
self.m.StubOutWithMock(self.fc, 'queue')
self.fc.queue(queue.physical_resource_name(),
auto_create=False).AndReturn(fake_q)
self.m.StubOutWithMock(fake_q, 'metadata')
fake_q.metadata(new_meta=queue.properties.get('metadata'))
self.fc.queue(queue.physical_resource_name(),
auto_create=False).AndReturn(fake_q)
fake_q.metadata().AndReturn(
{"key1": {"key2": "value", "key3": [1, 2]}})
self.m.ReplayAll()
scheduler.TaskRunner(queue.create)()
self.assertEqual(
{'metadata': {"key1": {"key2": "value", "key3": [1, 2]}},
'name': queue.resource_id},
queue.parse_live_resource_data(queue.properties,
queue._show_resource()))
self.m.VerifyAll()
| 36.388278
| 78
| 0.63338
|
9e3616a7868f983a191758cc02111f780c1a68b8
| 10,494
|
py
|
Python
|
slim-inceptionV3-visual-finetune.py
|
Alex-Fun/distracted_driver_detection
|
aaec8316934acbbccd9eae66adaa1970adea4fed
|
[
"Apache-2.0"
] | null | null | null |
slim-inceptionV3-visual-finetune.py
|
Alex-Fun/distracted_driver_detection
|
aaec8316934acbbccd9eae66adaa1970adea4fed
|
[
"Apache-2.0"
] | null | null | null |
slim-inceptionV3-visual-finetune.py
|
Alex-Fun/distracted_driver_detection
|
aaec8316934acbbccd9eae66adaa1970adea4fed
|
[
"Apache-2.0"
] | null | null | null |
import os
import tensorflow as tf
import tensorflow.contrib.slim as slim
import logging
import time
import utils
import vgg_preprocessing
import inception_preprocessing
import model
def main(a):
# config param
# base_dir = r'E:\tmp\data\state-farm-distracted-driver-detection'
# out_put_dir = r'E:\tmp\data\state-farm-distracted-driver-detection'
base_dir = "/data/oHongMenYan/distracted-driver-detection-dataset"
out_put_dir = "/output"
init_global_step = 0
# init_global_step= 19487
train_data_file_path = os.path.join(base_dir, 'new_train.record')
val_data_file_path = os.path.join(base_dir, 'new_val.record')
ckpt_path = os.path.join(base_dir, 'model_inceptionv3_adam.ckpt-19487')
ckpt_path = os.path.join(base_dir, 'ckpt')
ckpt_path = os.path.join(base_dir, 'inception_v3.ckpt')
input_image_size = (480, 640)
model_image_size = (360, 480)
# model_image_size = (299, 299)
batch_size = 64
batch_size = 32
num_classes = 10
epochs_num = 30
# epochs_num = 1
train_examples_num = 20787
# train_examples_num = batch_size
num_steps = int(epochs_num * train_examples_num / batch_size)
img_dir = os.path.join(out_put_dir, 'img')
if not os.path.exists(img_dir):
os.makedirs(img_dir)
logs_dir = os.path.join(out_put_dir, 'logs')
if not os.path.exists(logs_dir):
os.makedirs(logs_dir)
# 读取数据
# train_images, train_labels = utils.read_TFRecord(
# train_data_file_path, model_image_size, num_epochs=10 * epochs_num, batch_size=batch_size)
# val_images, val_labels = utils.read_TFRecord(
# val_data_file_path, model_image_size, num_epochs=10 * epochs_num, batch_size=batch_size)
# shuffle_buffer = 100
train_images, train_labels = utils.read_TFRecord2(train_data_file_path, batch_size=batch_size)
val_images, val_labels = utils.read_TFRecord2(val_data_file_path, batch_size=batch_size)
# 配置模型
# inception_model = model.Model(num_classes=num_classes, is_training=True,
# fixed_resize_side_min=model_image_size[0],
inception_model = model.Model(
num_classes=num_classes,
is_training=True,
fixed_resize_side_min=299,
default_image_height=model_image_size[0],
default_image_width=model_image_size[1])
# 图像预处理
# rotation_range = 10.,
# width_shift_range = 0.05,
# height_shift_range = 0.05,
# shear_range = 0.1,
# zoom_range = 0.1,
# preprocessed_inputs = preprocessing.preprocess_image(images, output_height=model_image_size[0],
# output_width=model_image_size[1], is_training=True)
# preprocessed_inputs = inception_preprocessing.preprocess_image(images, height=model_image_size[0],
# width=model_image_size[1], is_training=True)
images = tf.placeholder(
tf.float32, [None, input_image_size[0], input_image_size[1], 3], name='input_images')
labels = tf.placeholder(tf.int64, [None, 1], name='labels')
# images = train_images
# labels = train_labels
processed_images = inception_model.preprocess(images)
prediction_dict = inception_model.predict(processed_images)
loss_dict = inception_model.loss(prediction_dict, labels)
loss = loss_dict['loss']
postprocess_dict = inception_model.postprocess(prediction_dict)
accuracy = inception_model.accuracy(postprocess_dict, labels)
# add loss & accuracy to summary
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
# global_step
global_step = tf.train.create_global_step()
if global_step is None:
print('global_step is none')
global_step = tf.Variable(
initial_value=init_global_step,
dtype=tf.int64,
trainable=False,
name='global_step')
# 读取ckpt
# 不需要从谷歌模型中加载的参数,这里就是最后的全连接层。因为输出类别不一样,所以最后全连接层的参数也不一样
CHECKPOINT_EXCLUDE_SCOPES = None
CHECKPOINT_EXCLUDE_SCOPES = ['InceptionV3/Logits', 'InceptionV3/AuxLogits']
print('before get_init_fn')
init_fn = utils.get_init_fn(
checkpoint_path=ckpt_path,
checkpoint_exclude_scopes=CHECKPOINT_EXCLUDE_SCOPES)
print('after get_init_fn')
# # 验证集
# val_images = inception_model.preprocess(val_images)
# val_prediction_dict = inception_model.predict(val_images)
# val_loss_dict = inception_model.loss(val_prediction_dict, val_labels)
# val_loss = val_loss_dict['loss']
# val_postprocess_dict = inception_model.postprocess(val_prediction_dict)
# val_accuracy = inception_model.accuracy(val_postprocess_dict, val_labels)
# add loss & accuracy to summary
# tf.summary.scalar('val_loss', val_loss)
# tf.summary.scalar('val_accuracy', val_accuracy)
# 配置优化器
with tf.variable_scope('adam_vars'):
learning_rate = 1e-3 # 初始学习速率时
decay_rate = 0.96 # 衰减率
# global_steps = 1000 # 总的迭代次数
decay_steps = 100 # 衰减间隔的steps数
num_epochs_per_decay = 0.5 # 10个epoch后lr变为原值的0.96^(10/0.5)倍
decay_steps = int(
train_examples_num /
batch_size *
num_epochs_per_decay)
decay_steps = 100
learning_rate = tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False)
# learning_rate = tf.Variable(initial_value=1e-3, dtype=tf.float32, trainable=False, name='learning_rate')
# adam_opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
adam_opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
adam_train_step = adam_opt.minimize(loss, global_step=global_step)
opt_lr_t = adam_opt._lr_t
beta1_power, beta2_power = adam_opt._get_beta_accumulators()
current_lr = (adam_opt._lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power))
tf.summary.scalar('the_learning_rate', current_lr)
# merge all summary
merged_summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(logs_dir)
# initial config &run
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
global_init = tf.global_variables_initializer()
local_init = tf.local_variables_initializer()
with sess:
sess.run(global_init)
sess.run(local_init)
saver = tf.train.Saver(max_to_keep=5)
init_fn(sess)
# build thread coordinator
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
begin_time = time.time()
logging.debug("train begin, time:%d", begin_time)
for i in range(num_steps):
step_time = time.time()
# 运行session拿到真实图片的数据
train_img_batch, train_label_batch = sess.run([train_images, train_labels])
gs, _ = sess.run([global_step, adam_train_step]
, feed_dict={
images: train_img_batch,
labels: train_label_batch
}
)
logging.debug("step_num i:%d, global_step: %d", i, gs)
print("step_num i:%d, global_step: %d" % (i, gs))
# loss_result, accuracy_result, lr, summary_string = sess.run([loss, accuracy, lr_t, merged_summary_op])
loss_result, accuracy_result, lr_o_t, lr_ct, summary_string = sess.run(
[loss, accuracy, opt_lr_t, current_lr, merged_summary_op]
, feed_dict={
images: train_img_batch,
labels: train_label_batch
}
)
step_time = time.time() - step_time
time_per_img = float(batch_size / step_time)
logging.debug("step_num i:%d, global_step: %d, loss:%f, acc:%f, lr_o_t:%f, current_lr:%f, step_time:%d, imgs_per_time:%f", i, gs, loss_result, accuracy_result, lr_o_t, lr_ct, step_time, time_per_img)
print("step_num i:%d, global_step: %d, loss:%f, acc:%f, lr_o_t:%f, current_lr:%f, step_time:%d, imgs_per_time:%f" % (i, gs, loss_result, accuracy_result, lr_o_t, lr_ct, step_time, time_per_img))
# logging.debug("step_num i:%d, global_step: %d, loss:%f, acc:%f, learning_rate:%f, step_time:%d, imgs_per_time:%d",
# i, gs, loss_result, accuracy_result, lr, step_time, batch_size/step_time)
# print("step_num i:%d, global_step: %d, loss:%f, acc:%f, learning_rate:%f, step_time:%d, imgs_per_time:%d" %
# (i, gs, loss_result, accuracy_result, lr, step_time, batch_size/step_time))
# 查看验证集指标
if (i + 1) % 1000 == 0:
val_img_batch, val_label_batch = sess.run([val_images, val_labels])
val_loss_result, val_accuracy_result = sess.run(
[loss, accuracy]
, feed_dict={
images: val_img_batch,
labels: val_label_batch
}
)
# [val_loss, val_accuracy])
step_time = time.time() - step_time
time_per_img = float(batch_size / step_time)
logging.debug(
"val---step_num i:%d, global_step: %d, loss:%f, acc:%f, lr_o_t:%f, step_time:%d, time_per_img:%f", i, gs, val_loss_result, val_accuracy_result, lr_o_t, step_time, time_per_img)
print("val---step_num i:%d, global_step: %d, loss:%f, acc:%f, lr_o_t:%f, step_time:%d, time_per_img:%f" % (i, gs, val_loss_result, val_accuracy_result, lr_o_t, step_time, time_per_img))
summary_writer.add_summary(summary_string, global_step=gs)
if (i + 1) % 1000 == 0:
save_path_name = 'model_inceptionv3_adam_%.ckpt' % gs
save_path = saver.save(sess, os.path.join(logs_dir, save_path_name), global_step=gs)
logging.debug("model---saved, save_path:%s, cost_time:%d", save_path, time.time() - begin_time)
print("model---saved, save_path:%s, cost_time:%d" % (save_path, time.time() - begin_time))
save_path = saver.save(sess, os.path.join(logs_dir, 'model_inceptionv3_adam.ckpt'), global_step=gs)
logging.debug("model saved, save_path:%s, cost_time:%d", save_path, time.time() - begin_time)
print("model saved, save_path:%s, cost_time:%d" %
(save_path, time.time() - begin_time))
summary_writer.close()
if __name__ == '__main__':
tf.app.run()
| 43.725
| 211
| 0.65628
|
1c4b17155ce1c194a7b8f5676a046ae0ba85cb53
| 2,036
|
py
|
Python
|
spark-1.3.0/python/build/py4j/tests/java_array_test.py
|
iflink/spark
|
3b6b0c2cbdc1f939fb60ef5717ffbe232ebceee5
|
[
"Apache-2.0"
] | null | null | null |
spark-1.3.0/python/build/py4j/tests/java_array_test.py
|
iflink/spark
|
3b6b0c2cbdc1f939fb60ef5717ffbe232ebceee5
|
[
"Apache-2.0"
] | null | null | null |
spark-1.3.0/python/build/py4j/tests/java_array_test.py
|
iflink/spark
|
3b6b0c2cbdc1f939fb60ef5717ffbe232ebceee5
|
[
"Apache-2.0"
] | 1
|
2020-07-23T22:14:41.000Z
|
2020-07-23T22:14:41.000Z
|
'''
Created on Mar 24, 2010
@author: Barthelemy Dagenais
'''
from __future__ import unicode_literals, absolute_import
from multiprocessing import Process
import subprocess
import time
import unittest
from py4j.java_gateway import JavaGateway
from py4j.tests.java_gateway_test import PY4J_JAVA_PATH
def start_example_server():
subprocess.call(["java", "-cp", PY4J_JAVA_PATH,
"py4j.examples.ExampleApplication"])
def start_example_app_process():
# XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
p = Process(target=start_example_server)
p.start()
return p
class Test(unittest.TestCase):
def setUp(self):
# logger = logging.getLogger("py4j")
# logger.setLevel(logging.DEBUG)
# logger.addHandler(logging.StreamHandler())
self.p = start_example_app_process()
time.sleep(0.5)
self.gateway = JavaGateway()
def tearDown(self):
self.p.terminate()
self.gateway.shutdown()
time.sleep(0.5)
def testArray(self):
example = self.gateway.entry_point.getNewExample()
array1 = example.getStringArray()
array2 = example.getIntArray()
self.assertEqual(3, len(array1))
self.assertEqual(4, len(array2))
self.assertEqual('333', array1[2])
self.assertEqual(5, array2[1])
array1[2] = 'aaa'
array2[1] = 6
self.assertEqual('aaa', array1[2])
self.assertEqual(6, array2[1])
new_array = array2[1:3]
self.assertEqual(2, len(new_array))
self.assertEqual(1, new_array[1])
def testCreateArray(self):
int_class = self.gateway.jvm.int
string_class = self.gateway.jvm.java.lang.String
int_array = self.gateway.new_array(int_class, 2)
string_array = self.gateway.new_array(string_class, 3, 5)
self.assertEqual(2, len(int_array))
self.assertEqual(3, len(string_array))
self.assertEqual(5, len(string_array[0]))
if __name__ == "__main__":
unittest.main()
| 27.513514
| 72
| 0.664047
|
d7447fe47428e682889270d28cf612a92d2c7ec2
| 1,117
|
py
|
Python
|
bce/logic/balancer/merger.py
|
bce-toolkit/BCE
|
9e4d168cab18132bbe2867d13c629510b86d350a
|
[
"BSD-3-Clause"
] | 12
|
2017-12-18T10:37:52.000Z
|
2021-11-28T07:08:46.000Z
|
bce/logic/balancer/merger.py
|
bce-toolkit/BCE
|
9e4d168cab18132bbe2867d13c629510b86d350a
|
[
"BSD-3-Clause"
] | null | null | null |
bce/logic/balancer/merger.py
|
bce-toolkit/BCE
|
9e4d168cab18132bbe2867d13c629510b86d350a
|
[
"BSD-3-Clause"
] | 4
|
2018-04-29T10:32:26.000Z
|
2021-07-13T08:17:36.000Z
|
#!/usr/bin/env python
#
# Copyright 2014 - 2018 The BCE Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the license.txt file.
#
def merge_coefficients_with_cexp_object(cexp_object, coefficients):
"""Merge balanced coefficients with a chemical equation.
:type cexp_object: bce.parser.interface.cexp_parser.ChemicalEquation
:type coefficients: list
:param cexp_object: The chemical equation object.
:param coefficients: The balanced coefficients list.
"""
# Check the size.
assert len(coefficients) == len(cexp_object)
# Process left items.
for idx in range(0, cexp_object.get_left_item_count()):
item = cexp_object.get_left_item(idx)
item.set_coefficient(coefficients[idx])
cexp_object.set_left_item(idx, item)
# Process right items.
for idx in range(0, cexp_object.get_right_item_count()):
item = cexp_object.get_right_item(idx)
item.set_coefficient(coefficients[cexp_object.get_left_item_count() + idx])
cexp_object.set_right_item(idx, item)
| 34.90625
| 83
| 0.722471
|
a59d6b573d55f7569db2bab07a33d2e954f570b6
| 260
|
py
|
Python
|
PasswordGenerator.py
|
gabe-martins/PythonCodes
|
1ec11636559f61890f3ec52a60f13695337999a1
|
[
"MIT"
] | null | null | null |
PasswordGenerator.py
|
gabe-martins/PythonCodes
|
1ec11636559f61890f3ec52a60f13695337999a1
|
[
"MIT"
] | null | null | null |
PasswordGenerator.py
|
gabe-martins/PythonCodes
|
1ec11636559f61890f3ec52a60f13695337999a1
|
[
"MIT"
] | null | null | null |
import random
import string
length = 16
lower = string.ascii_lowercase
upper = string.ascii_uppercase
number = string.digits
special = string.punctuation
all = lower + upper + number + special
passoword = "".join(random.sample(all,length))
print(passoword)
| 18.571429
| 46
| 0.769231
|
ed667ae6cbe4ea8220de358d0001ee33ad9e0910
| 7,319
|
py
|
Python
|
fmcapi/api_objects/device_services/devicerecords.py
|
dadelowo367/fmcapi
|
cd6ac6d118383a06063ead563ca98c7994238715
|
[
"BSD-3-Clause"
] | 57
|
2017-06-13T17:06:20.000Z
|
2021-07-27T08:53:25.000Z
|
fmcapi/api_objects/device_services/devicerecords.py
|
dadelowo367/fmcapi
|
cd6ac6d118383a06063ead563ca98c7994238715
|
[
"BSD-3-Clause"
] | 66
|
2017-11-09T16:02:45.000Z
|
2021-08-04T20:52:41.000Z
|
fmcapi/api_objects/device_services/devicerecords.py
|
dadelowo367/fmcapi
|
cd6ac6d118383a06063ead563ca98c7994238715
|
[
"BSD-3-Clause"
] | 56
|
2017-06-08T07:53:12.000Z
|
2021-07-30T13:26:47.000Z
|
"""Device Records Classes."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
from fmcapi.api_objects.policy_services.accesspolicies import AccessPolicies
from fmcapi.api_objects.status_services import TaskStatuses
import time
import logging
class DeviceRecords(APIClassTemplate):
"""The DeviceRecords Object in the FMC."""
VALID_JSON_DATA = [
"id",
"name",
"type",
"hostName",
"natID",
"regKey",
"license_caps",
"accessPolicy",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + [
"acp_name",
"acp_id",
"model",
"modelId",
"modelNumber",
"modelType",
"healthStatus",
"healthPolicy",
"type",
"version",
"sw_version",
"deviceGroup",
"prohibitPacketTransfer",
"keepLocalEvents",
"ftdMode",
"keepLocalEvents",
]
URL_SUFFIX = "/devices/devicerecords"
REQUIRED_FOR_POST = ["accessPolicy", "hostName", "regKey"]
REQUIRED_FOR_PUT = ["id"]
LICENSES = [
"BASE",
"THREAT",
"URLFilter",
"MALWARE",
"APEX",
"PLUS",
"VPNOnly",
"INSTANCE",
]
def __init__(self, fmc, **kwargs):
"""
Initialize DeviceRecords object.
:param fmc (object): FMC object
:param **kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for DeviceRecords class.")
self.parse_kwargs(**kwargs)
def parse_kwargs(self, **kwargs):
"""
Parse the kwargs and set self variables to match.
:return: None
"""
super().parse_kwargs(**kwargs)
logging.debug("In parse_kwargs() for DeviceRecords class.")
if "acp_name" in kwargs:
self.acp(name=kwargs["acp_name"])
def licensing(self, action, name="BASE"):
"""
Associate licenses with this device record.
:param action: (str) 'add', 'remove', 'clear'
:param name: (str) Value from LICENSES constant.
:return: None
"""
logging.debug("In licensing() for DeviceRecords class.")
if action == "add":
if name in self.LICENSES:
if "license_caps" in self.__dict__:
self.license_caps.append(name)
self.license_caps = list(set(self.license_caps))
else:
self.license_caps = [name]
logging.info(f'License "{name}" added to this DeviceRecords object.')
else:
logging.warning(
f"{name} not found in {self.LICENSES}. Cannot add license to DeviceRecords."
)
elif action == "remove":
if name in self.LICENSES:
if "license_caps" in self.__dict__:
try:
self.license_caps.remove(name)
except ValueError:
logging.warning(
f"{name} is not assigned to this devicerecord thus cannot be removed."
)
logging.info(
f'License "{name}" removed from this DeviceRecords object.'
)
else:
logging.warning(
f"{name} is not assigned to this devicerecord thus cannot be removed."
)
else:
logging.warning(
f"{name} not found in {self.LICENSES}. Cannot remove license from DeviceRecords."
)
elif action == "clear":
if "license_caps" in self.__dict__:
del self.license_caps
logging.info("All licensing removed from this DeviceRecords object.")
def acp(self, name=""):
"""
Associate AccessPolicy with this device.
:param name: (str) Name of ACP.
:return: None
"""
logging.debug("In acp() for DeviceRecords class.")
acp = AccessPolicies(fmc=self.fmc)
acp.get(name=name)
if "id" in acp.__dict__:
self.accessPolicy = {"id": acp.id, "type": acp.type}
else:
logging.warning(
f"Access Control Policy {name} not found. Cannot set up accessPolicy for DeviceRecords."
)
def wait_for_task(self, task, wait_time=10):
"""
Pause configuration script and wait for device registration to complete.
:param task: (dict) task["id": (str)]
:param wait_time: (int) Seconds to wait before rechecking.
:return: None
"""
task_completed_states = ["Success", "SUCCESS", "COMPLETED"]
try:
status = TaskStatuses(fmc=self.fmc, id=task["id"])
current_status = status.get()
"""
Task Status for new device registration behaves differently than other tasks
On new device registration, a task is sent for the initial registration. After completion
the UUID is deleted without any change in task status. So we check to see if the object no longer exists
to assume the registration is complete. After registration, discovery of the device begins, but there is
no way to check for this with a task status. The device can't be modified during this time, but a new
device registration can begin.
OTOH, a device HA operation will update its status to "Success" on completion. Hence the two different
checks.
"""
while (
current_status["status"] is not None
and current_status["status"] not in task_completed_states
):
# Lot of inconsistencies with the type of data a task can return
if "taskType" in current_status.keys():
logging.info(
f"Task: {current_status['taskType']} {current_status['status']} {current_status['id']}"
)
time.sleep(wait_time)
current_status = status.get()
else:
logging.info(
f"Task: {current_status['status']} {current_status['id']}"
)
time.sleep(wait_time)
current_status = status.get()
logging.info(f"Task: {current_status['status']} {current_status['id']}")
except Exception as e:
logging.info(type(e), e)
def post(self, **kwargs):
"""POST to FMC API."""
logging.debug("In post() for DeviceRecords class.")
response = super().post(**kwargs)
# self.wait_for_task(task=response["metadata"]["task"], wait_time=30) # Doesn't work yet.
if "post_wait_time" in kwargs:
self.post_wait_time = kwargs["post_wait_time"]
else:
self.post_wait_time = 300
logging.info(
f"DeviceRecords registration task submitted. "
f"Waiting {self.post_wait_time} seconds for it to complete."
)
time.sleep(self.post_wait_time)
return response
| 36.232673
| 117
| 0.547343
|
8c95cb443bcd582ed955e61c1effb0de8cac48a1
| 726
|
py
|
Python
|
credit_calculator/choice.py
|
blanketastronomer/credit_calculator
|
195cb56934f354543c8d71787577695986bc9340
|
[
"MIT"
] | null | null | null |
credit_calculator/choice.py
|
blanketastronomer/credit_calculator
|
195cb56934f354543c8d71787577695986bc9340
|
[
"MIT"
] | null | null | null |
credit_calculator/choice.py
|
blanketastronomer/credit_calculator
|
195cb56934f354543c8d71787577695986bc9340
|
[
"MIT"
] | null | null | null |
class Choice:
def __init__(self, choice: str, description: str, default: bool = False):
"""
A menu choice to add to a prompt in interactive mode.
:param choice: Letter to respond to
:param description: Description of the choice
:param default: If true, this will be the default choice for a the parent prompt.
"""
self.choice = choice
self.description = description
self.default = default
def show(self) -> str:
"""
Show the choice to the user.
:return: Choice info
"""
output = f"'{self.choice}' - {self.description}"
if self.default:
output += " (default)"
return output
| 27.923077
| 89
| 0.575758
|
f28d14dacc349043edf42ebc7ae300069a6aad46
| 983
|
py
|
Python
|
notifiers/pushbullet_notifier.py
|
cfsmp3/kimsufi-crawler
|
acefd900694579f85b3478275e27e93a54ed9079
|
[
"MIT"
] | null | null | null |
notifiers/pushbullet_notifier.py
|
cfsmp3/kimsufi-crawler
|
acefd900694579f85b3478275e27e93a54ed9079
|
[
"MIT"
] | null | null | null |
notifiers/pushbullet_notifier.py
|
cfsmp3/kimsufi-crawler
|
acefd900694579f85b3478275e27e93a54ed9079
|
[
"MIT"
] | null | null | null |
"""Notifier that sends messages through Pushbullet"""
import logging
from pushbullet import Pushbullet
from notifiers.base_notifier import Notifier
_logger = logging.getLogger(__name__)
class PushbulletNotifier(Notifier):
"""Notifier class to work with Pushbullet"""
def __init__(self, config):
"""Override init to check settings"""
self.pushbullet_apikey = config['pushbullet_apikey']
super(PushbulletNotifier, self).__init__(config)
def check_requirements(self):
try:
Pushbullet(self.pushbullet_apikey)
except Exception as ex:
_logger.error("Cannot connect to your Pushbullet account. "
"Correct your config and try again. Error details:")
_logger.error(ex)
raise
_logger.info("Pushbullet server check passed")
def notify(self, title, text, url=None):
pb = Pushbullet(self.pushbullet_apikey)
_ = pb.push_link(text, url)
| 31.709677
| 78
| 0.668362
|
6eac28978ceb7382f7586ac4290d3e25e2459120
| 3,797
|
py
|
Python
|
doajtest/UI/pages/metadata_form.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | 47
|
2015-04-24T13:13:39.000Z
|
2022-03-06T03:22:42.000Z
|
doajtest/UI/pages/metadata_form.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | 1,215
|
2015-01-02T14:29:38.000Z
|
2022-03-28T14:19:13.000Z
|
doajtest/UI/pages/metadata_form.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | 14
|
2015-11-27T13:01:23.000Z
|
2021-05-21T07:57:23.000Z
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class PublisherMetadataForm:
URL = 'localhost:5004/publisher/metadata'
SEARCH_ADD_ARTICLE_BUTTON = (By.XPATH, "//button[@type='submit' and contains(., 'Add Article')]")
SEARCH_ERRORS = (By.XPATH, "//ul[@class='errors']//li")
SEARCH_AUTHOR_FIELDS = (By.XPATH, "//input[contains(@id,'authors-')]")
SEARCH_REMOVE_BUTTON = (By.XPATH, "//button[contains(@id, 'remove_authors-')]")
SEARCH_ADD_AUTHOR_BUTTON = (By.XPATH, "//input[@type='submit' and @name='more_authors']")
SEARCH_ISSN_SELECT = (By.ID, "select2-chosen-2")
SEARCH_SUCCESS_BANNER = (By.CLASS_NAME, "alert-success")
def __init__(self, browser):
self.browser = browser
def load(self):
self.browser.get(self.URL)
def add_article(self):
add_article_btn = self.browser.find_element(*self.SEARCH_ADD_ARTICLE_BUTTON)
add_article_btn.click()
def focus_on_element(self, elem_id):
return self.browser.find_element_by_id(elem_id) == self.browser.switch_to.active_element
def add_title(self, title):
title_input = self.browser.find_element_by_id("title")
title_input.send_keys(title)
def errors(self):
errors = self.browser.find_elements(*self.SEARCH_ERRORS)
result = []
for e in errors:
if e.text == 'Invalid DOI. A DOI can optionally start with a prefix (such as "doi:"), followed by "10." and the remainder of the identifier':
result.append('no_doi')
elif e.text == 'Please provide at least one author':
result.append('not_enough_authors')
elif e.text == 'Invalid URL.':
result.append('invalid_url')
elif e.text == 'Either this field or Journal ISSN (online version) is required':
result.append('no_pissn_or_eissn')
return result
def remove_author(self):
remove_button = self.browser.find_element(*self.SEARCH_REMOVE_BUTTON)
remove_button.click()
def count_author_fields(self):
authors_fields = self.browser.find_elements(*self.SEARCH_AUTHOR_FIELDS)
return len(authors_fields) // 2
def add_author_field(self):
add_author_btn = self.browser.find_element(*self.SEARCH_ADD_AUTHOR_BUTTON)
add_author_btn.click()
def add_author(self, author):
author_name_input = self.browser.find_element_by_id("authors-0-name")
author_name_input.send_keys(author["name"])
author_aff_input = self.browser.find_element_by_id("authors-0-affiliation")
author_aff_input.send_keys(author["affiliation"])
def add_url(self, fulltext):
fulltext_input = self.browser.find_element_by_id("fulltext")
fulltext_input.send_keys(fulltext)
def _open_issn_dropdown(self):
issn_dropdown = self.browser.find_element(*self.SEARCH_ISSN_SELECT)
webdriver.ActionChains(self.browser).send_keys(Keys.ESCAPE).perform()
issn_dropdown.click()
return issn_dropdown
def confirm_issn_dropdown(self, issns):
issn_dropdown = self._open_issn_dropdown()
options_elements = issn_dropdown.find_elements_by_xpath("//ul[contains(@id,'select2-results')]//li")
options = [x.text for x in options_elements]
issns.append("Select an ISSN")
return len(options) == len(issns) and sorted(options) == sorted(issns)
def choose_pissn(self, pissn):
issn_dropdown = self._open_issn_dropdown()
input = issn_dropdown.find_element_by_xpath("//input")
input.send_keys(pissn + Keys.RETURN)
def success(self):
banner = self.browser.find_element(*self.SEARCH_SUCCESS_BANNER)
return banner
| 42.188889
| 153
| 0.683171
|
7e7cade4d330763d078f23b26262c1a4447f59f3
| 4,951
|
py
|
Python
|
mmtbx/regression/tst_clashes.py
|
mphancock/cctbx_project
|
ec8a239c5bcee9c9b2d1c6c95dc3fff2580bbb85
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/regression/tst_clashes.py
|
mphancock/cctbx_project
|
ec8a239c5bcee9c9b2d1c6c95dc3fff2580bbb85
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/regression/tst_clashes.py
|
mphancock/cctbx_project
|
ec8a239c5bcee9c9b2d1c6c95dc3fff2580bbb85
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import mmtbx.clashes
from libtbx.utils import null_out
pdb_good = """
CRYST1 23.341 28.568 19.164 90.00 90.00 90.00 P 1
ATOM 1 N ARG A 58 9.158 17.337 8.990 1.00 7.73 N
ATOM 2 CA ARG A 58 10.275 17.092 9.895 1.00 8.65 C
ATOM 3 C ARG A 58 9.831 16.274 11.105 1.00 9.84 C
ATOM 4 O ARG A 58 9.166 16.793 12.002 1.00 8.58 O
ATOM 5 CB ARG A 58 10.895 18.414 10.352 1.00 20.00 C
ATOM 6 CG ARG A 58 11.449 19.263 9.219 1.00 20.00 C
ATOM 7 CD ARG A 58 12.052 20.557 9.743 1.00 20.00 C
ATOM 8 NE ARG A 58 12.585 21.387 8.667 1.00 20.00 N
ATOM 9 CZ ARG A 58 13.157 22.572 8.852 1.00 20.00 C
ATOM 10 NH1 ARG A 58 13.273 23.072 10.075 1.00 20.00 N
ATOM 11 NH2 ARG A 58 13.614 23.259 7.813 1.00 20.00 N
ATOM 12 N GLU A 59 10.199 14.996 11.127 1.00 9.39 N
ATOM 13 CA GLU A 59 10.987 14.395 10.056 1.00 11.89 C
ATOM 14 C GLU A 59 10.157 13.393 9.261 1.00 9.81 C
ATOM 15 O GLU A 59 9.832 12.313 9.753 1.00 8.97 O
ATOM 16 CB GLU A 59 12.233 13.714 10.624 1.00 20.00 C
ATOM 17 CG GLU A 59 13.173 14.654 11.361 1.00 20.00 C
ATOM 18 CD GLU A 59 14.393 13.943 11.915 1.00 20.00 C
ATOM 19 OE1 GLU A 59 14.461 12.701 11.801 1.00 20.00 O
ATOM 20 OE2 GLU A 59 15.283 14.626 12.464 1.00 20.00 O
TER
"""
pdb_poor = """
CRYST1 23.341 28.568 19.164 90.00 90.00 90.00 P 1
ATOM 1 N ARG A 58 9.158 17.337 8.990 1.00 7.73 N
ATOM 2 CA ARG A 58 10.275 17.092 9.895 1.00 8.65 C
ATOM 3 C ARG A 58 9.831 16.274 11.105 1.00 9.84 C
ATOM 4 O ARG A 58 9.166 16.793 12.002 1.00 8.58 O
ATOM 5 CB ARG A 58 10.895 18.414 10.352 1.00 20.00 C
ATOM 6 CG ARG A 58 12.359 18.576 9.974 1.00 20.00 C
ATOM 7 CD ARG A 58 13.136 17.290 10.213 1.00 20.00 C
ATOM 8 NE ARG A 58 14.545 17.429 9.859 1.00 20.00 N
ATOM 9 CZ ARG A 58 15.444 16.459 9.982 1.00 20.00 C
ATOM 10 NH1 ARG A 58 15.084 15.272 10.451 1.00 20.00 N
ATOM 11 NH2 ARG A 58 16.707 16.675 9.635 1.00 20.00 N
ATOM 12 N GLU A 59 10.199 14.996 11.127 1.00 9.39 N
ATOM 13 CA GLU A 59 10.987 14.395 10.056 1.00 11.89 C
ATOM 14 C GLU A 59 10.157 13.393 9.261 1.00 9.81 C
ATOM 15 O GLU A 59 9.832 12.313 9.753 1.00 8.97 O
ATOM 16 CB GLU A 59 12.233 13.714 10.624 1.00 20.00 C
ATOM 17 CG GLU A 59 13.155 14.647 11.392 1.00 20.00 C
ATOM 18 CD GLU A 59 14.055 15.459 10.480 1.00 20.00 C
ATOM 19 OE1 GLU A 59 14.651 16.447 10.957 1.00 20.00 O
ATOM 20 OE2 GLU A 59 14.167 15.108 9.286 1.00 20.00 O
TER 21 GLU A 59
END
"""
def tst_01():
o = mmtbx.clashes.from_pdb(pdb_str=pdb_good, clash_threshold=2.3)
o.show()
assert o.clashing_pairs() == [(9, 10), (18, 19), (3, 11)]
print()
o = mmtbx.clashes.from_pdb(pdb_str=pdb_poor, clash_threshold=1.5)
o.show()
assert o.clashing_pairs() == [(9, 18), (7, 19), (8, 19), (9, 19), (9, 17)]
def tst_02():
# Test for remove_clashes
import mmtbx.model
from mmtbx.clashes import remove_clashes
import iotbx.pdb
import sys
pdb_inp = iotbx.pdb.input(lines=pdb_poor.splitlines(),source_info='None')
model= mmtbx.model.manager( model_input= pdb_inp,)
model.set_log(log = null_out())
print("\n","-"*79)
print(" Summary of input model statistics ")
print("-"*79)
model.get_restraints_manager()
geometry = model.geometry_statistics()
geometry.show(log = sys.stdout)
rc=remove_clashes(model=model)
print("\n","-"*79)
print("Starting residues: %d " % (
rc.model.get_hierarchy().overall_counts().n_residues))
print("Side-chains removed: %d Residues removed: %d" %(
rc.side_chains_removed,
rc.residues_removed))
print("Final residues: %d " % (
rc.new_model.get_hierarchy().overall_counts().n_residues))
rc.new_model.set_log(log = null_out())
rc.new_model.get_restraints_manager()
new_geometry = rc.new_model.geometry_statistics()
new_geometry.show(log = sys.stdout)
assert rc.side_chains_removed==1
assert rc.residues_removed==0
if (__name__ == "__main__"):
tst_01()
tst_02()
| 46.707547
| 78
| 0.533226
|
a77aa728e307eb7279c3bc8712995ee28f87c099
| 1,381
|
py
|
Python
|
cogs/modules/aes_angou.py
|
thiqxis/bottest
|
a874ed3fc08f0271ad0f8957d422775ba2b07359
|
[
"MIT"
] | 7
|
2021-11-03T00:53:40.000Z
|
2022-03-17T23:50:08.000Z
|
cogs/modules/aes_angou.py
|
p-yon-kity/discord-reminderbot
|
aae56d3606c6a61da6447de3c81f818e6b787e7c
|
[
"MIT"
] | 27
|
2021-03-21T14:35:27.000Z
|
2022-03-23T12:21:00.000Z
|
cogs/modules/aes_angou.py
|
p-yon-kity/discord-reminderbot
|
aae56d3606c6a61da6447de3c81f818e6b787e7c
|
[
"MIT"
] | 7
|
2021-07-31T21:43:56.000Z
|
2022-03-18T00:15:03.000Z
|
# https://pycryptodome.readthedocs.io/en/latest/src/examples.html
from Crypto.Cipher import AES
from Crypto.Random import get_random_bytes
from os.path import join, dirname
import hashlib, os
class Aes_angou:
ENC_FILE = 'encrypted.bin'
DEC_FILE = 'reminder.db'
ENC_FILE_PATH = join(dirname(__file__), 'files' + os.sep + ENC_FILE)
DEC_FILE_PATH = join(dirname(__file__), 'files' + os.sep + DEC_FILE)
def __init__(self, password:str=''):
self.password = password
def encode(self):
with open(self.DEC_FILE_PATH, mode='rb') as file:
data = file.read()
secret_key = hashlib.sha256(self.password.encode("utf8")).digest()
cipher = AES.new(secret_key, AES.MODE_EAX)
ciphertext, tag = cipher.encrypt_and_digest(data)
with open(self.ENC_FILE_PATH, 'wb') as file_out:
[ file_out.write(x) for x in (cipher.nonce, tag, ciphertext) ]
def decode(self):
secret_key = hashlib.sha256(self.password.encode("utf8")).digest()
with open(self.ENC_FILE_PATH, 'rb') as file_in:
nonce, tag, ciphertext = [ file_in.read(x) for x in (16, 16, -1) ]
cipher = AES.new(secret_key, AES.MODE_EAX, nonce)
data = cipher.decrypt_and_verify(ciphertext, tag)
with open(self.DEC_FILE_PATH, 'wb') as dec_file:
dec_file.write(data)
| 37.324324
| 78
| 0.650253
|
16cf26a4151458a83edd1058747d79f80994759b
| 5,065
|
py
|
Python
|
petastorm/weighted_sampling_reader.py
|
rizalgowandy/petastorm
|
f7aad8cf5ef6878231b335911e3e95541b388d40
|
[
"Apache-2.0"
] | 1,393
|
2018-08-17T19:01:12.000Z
|
2022-03-31T22:33:45.000Z
|
petastorm/weighted_sampling_reader.py
|
rizalgowandy/petastorm
|
f7aad8cf5ef6878231b335911e3e95541b388d40
|
[
"Apache-2.0"
] | 554
|
2018-08-17T19:59:42.000Z
|
2022-03-31T23:15:07.000Z
|
petastorm/weighted_sampling_reader.py
|
rizalgowandy/petastorm
|
f7aad8cf5ef6878231b335911e3e95541b388d40
|
[
"Apache-2.0"
] | 241
|
2018-08-17T20:03:10.000Z
|
2022-03-22T11:18:47.000Z
|
# Copyright (c) 2017-2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import numpy as np
class WeightedSamplingReader(object):
"""Allows to combine outputs of two or more Reader objects, sampling them with a configurable probability.
Complies to the same interfaces as :class:`~petastorm.reader.Reader`, hence
:class:`~petastorm.weighted_sampling_reader.WeightedSamplingReader` can be used anywhere the
:class:`~petastorm.reader.Reader` can be used."""
def __init__(self, readers, probabilities):
"""Creates an instance WeightedSamplingReader.
The constructor gets a list of readers and probabilities as its parameters. The lists must be the same length.
:class:`~petastorm.weighted_sampling_reader.WeightedSamplingReader` implements an iterator interface. Each time
a new element is requested, one of the readers is selected, weighted by the matching probability. An element
produced by the selected reader is returned.
The iterator raises StopIteration exception once one of the embedded readers has no more data left.
The following example shows how a :class:`~petastorm.weighted_sampling_reader.WeightedSamplingReader` can be
instantiated with two readers which are sampled with 10% and 90% probabilities respectively.
>>> from petastorm.weighted_sampling_reader import WeightedSamplingReader
>>> from petastorm.reader import Reader
>>>
>>> with WeightedSamplingReader([Reader('file:///dataset1'), Reader('file:///dataset1')], [0.1, 0.9]) as reader:
>>> new_sample = next(reader)
:param readers: A list of readers. The length of the list must be the same as the length of the
``probabilities`` list.
:param probabilities: A list of probabilities. The length of the list must be the same as the length
of ``readers`` argument. If the sum of all probability values is not 1.0, it will be automatically
normalized.
"""
if len(readers) <= 1:
raise ValueError('Two or more readers must be specified. Got {}.'.format(len(readers)))
if len(readers) != len(probabilities):
raise ValueError('readers and probabilities are expected to be lists of the same length')
self._readers = readers
# Normalize probabilities
self._cum_prob = np.cumsum(np.asarray(probabilities, dtype=np.float) / np.sum(probabilities))
for other_idx in range(1, len(readers)):
if readers[0].batched_output != readers[other_idx].batched_output:
raise ValueError('All readers passed to WeightedSamplingReader should have the same value of '
'"batched_output" attribute')
if set(readers[0].schema.fields.keys()) != set(readers[other_idx].schema.fields.keys()):
raise ValueError('All readers passed to WeightedSamplingReader should have the same schema')
# If either of ngram attribute is not None, or the ngrams are different, then we can not mix
both_have_ngram = (readers[0].ngram is not None) and (readers[other_idx].ngram is not None)
ngram_differ = both_have_ngram and readers[0].ngram != readers[other_idx].ngram
only_one_have_ngram = (readers[0].ngram is None) != (readers[other_idx].ngram is None)
if only_one_have_ngram or ngram_differ:
raise ValueError('All readers passed to WeightedSamplingReader should have the same ngram spec')
self.batched_output = readers[0].batched_output
self.ngram = readers[0].ngram
self.schema = readers[0].schema
def __len__(self):
return sum(len(reader) for reader in self._readers)
def __iter__(self):
return self
def __next__(self):
r = np.random.random()
reader_index = np.where(r < self._cum_prob)[0][0]
return next(self._readers[reader_index])
def next(self):
return self.__next__()
@property
def last_row_consumed(self):
return any(map(lambda r: r.last_row_consumed, self._readers))
# Functions needed to treat reader as a context manager
def __enter__(self):
return self
def stop(self):
for reader in self._readers:
reader.stop()
def join(self):
for reader in self._readers:
reader.join()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
self.join()
| 43.663793
| 120
| 0.684501
|
c4c2cbcc641234e4b1afe42aca1f160c1028302f
| 8,544
|
py
|
Python
|
releasenotes/source/conf.py
|
openstack/puppet-glance
|
16d60d2f12e136ece361bb7e56c4fc17475ca696
|
[
"Apache-2.0"
] | 26
|
2015-06-23T08:06:41.000Z
|
2020-06-01T06:23:06.000Z
|
releasenotes/source/conf.py
|
openstack/puppet-glance
|
16d60d2f12e136ece361bb7e56c4fc17475ca696
|
[
"Apache-2.0"
] | null | null | null |
releasenotes/source/conf.py
|
openstack/puppet-glance
|
16d60d2f12e136ece361bb7e56c4fc17475ca696
|
[
"Apache-2.0"
] | 29
|
2015-06-22T13:20:41.000Z
|
2020-02-03T06:07:55.000Z
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2017, Puppet OpenStack Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'puppet-glanceReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'puppet-glanceReleaseNotes.tex', u'puppet-glance Release Notes Documentation',
u'2017, Puppet OpenStack Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'puppet-glancereleasenotes', u'puppet-glance Release Notes Documentation',
[u'2017, Puppet OpenStack Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'puppet-glanceReleaseNotes', u'puppet-glance Release Notes Documentation',
u'2017, Puppet OpenStack Developers', 'puppet-glanceReleaseNotes', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/puppet-glance'
openstackdocs_bug_project = 'puppet-glance'
openstackdocs_bug_tag = ''
openstackdocs_auto_name = False
| 33.245136
| 105
| 0.724368
|
f7704efa3c66d5373f3d0e73beecd42d9a15ead6
| 221
|
py
|
Python
|
gluoncv/utils/metrics/__init__.py
|
islinwh/gluon-cv
|
669b1ac169ad26c6df06ab26592de2baf109fe44
|
[
"Apache-2.0"
] | 36
|
2019-12-25T04:59:49.000Z
|
2022-03-17T07:24:49.000Z
|
gluoncv/utils/metrics/__init__.py
|
islinwh/gluon-cv
|
669b1ac169ad26c6df06ab26592de2baf109fe44
|
[
"Apache-2.0"
] | 1
|
2019-01-18T06:42:59.000Z
|
2019-01-18T12:26:11.000Z
|
gluoncv/utils/metrics/__init__.py
|
islinwh/gluon-cv
|
669b1ac169ad26c6df06ab26592de2baf109fe44
|
[
"Apache-2.0"
] | 9
|
2019-12-25T05:00:33.000Z
|
2021-10-01T14:23:51.000Z
|
"""Custom evaluation metrics"""
from __future__ import absolute_import
from .coco_detection import COCODetectionMetric
from .voc_detection import VOCMApMetric, VOC07MApMetric
from .segmentation import SegmentationMetric
| 31.571429
| 55
| 0.859729
|
996b553956a3726b75ea893b8ae187ed984846cb
| 611
|
py
|
Python
|
Python3/0514-Freedom-Trail/soln-1.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5
|
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/0514-Freedom-Trail/soln-1.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/0514-Freedom-Trail/soln-1.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2
|
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
class Solution:
def findRotateSteps(self, ring: str, key: str) -> int:
idxes = collections.defaultdict(list)
for i, ch in enumerate(ring):
idxes[ch].append(i)
n = len(ring)
dp = [0] * n
for i in idxes[key[0]]:
dp[i] = min(i, n - i) + 1
pre = key[0]
for i in range(1, len(key)):
ch = key[i]
for j in idxes[ch]:
dp[j] = min(dp[k] + (min(j - k, k - j + n) if j >= k else min(k - j, j - k + n)) for k in idxes[pre]) + 1
pre = ch
return min(dp[i] for i in idxes[key[-1]])
| 35.941176
| 121
| 0.454992
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.