hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2a353b8353b6d8c3a4d205a391dce022568d9247
| 576
|
py
|
Python
|
Medium/Container With Most Water.py
|
lukemshannonhill/LeetCode_Daily_Problem_Solutions
|
ead0f38316761c2533c879289b8128a14feaf020
|
[
"MIT"
] | null | null | null |
Medium/Container With Most Water.py
|
lukemshannonhill/LeetCode_Daily_Problem_Solutions
|
ead0f38316761c2533c879289b8128a14feaf020
|
[
"MIT"
] | null | null | null |
Medium/Container With Most Water.py
|
lukemshannonhill/LeetCode_Daily_Problem_Solutions
|
ead0f38316761c2533c879289b8128a14feaf020
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/container-with-most-water/
class Solution:
def maxArea(self, height: List[int]) -> int:
area = 0
size = len(height)
max_width = size - 1
l = 0
r = size - 1
for width in range(max_width, 0, -1):
if height[l] < height[r]:
area = max(area, width*height[l])
l += 1
else:
area = max(area, width*height[r])
r -= 1
return area
| 23.04
| 58
| 0.407986
|
c7036fe2e71deecc28c439ca567e164e0f938896
| 268
|
py
|
Python
|
pineboolib/interfaces/__init__.py
|
juanjosepablos/pineboo
|
f6ce515aec6e0139821bb9c1d62536d9fb50dae4
|
[
"MIT"
] | 2
|
2017-12-10T23:06:16.000Z
|
2017-12-10T23:06:23.000Z
|
pineboolib/interfaces/__init__.py
|
Aulla/pineboo
|
3ad6412d365a6ad65c3bb2bdc03f5798d7c37004
|
[
"MIT"
] | 36
|
2017-11-05T21:13:47.000Z
|
2020-08-26T15:56:15.000Z
|
pineboolib/interfaces/__init__.py
|
deavid/pineboo
|
acc96ab6d5b8bb182990af6dea4bf0986af15549
|
[
"MIT"
] | 9
|
2015-01-15T18:15:42.000Z
|
2019-05-05T18:53:00.000Z
|
"""
Interfaces module.
This module contains some common class interfaces, so they can be interchanged.
"""
from .ifieldmetadata import IFieldMetaData # noqa: F401
from .itablemetadata import ITableMetaData # noqa: F401
from .imanager import IManager # noqa: F401
| 26.8
| 79
| 0.776119
|
675a8e5b588a44734b130d6a3b989106cc9e111f
| 2,694
|
py
|
Python
|
Examples/Modules/space_charge_initialization/analysis.py
|
mrowan137/amrex
|
cafcb6bd5902fc72a4d6fa51b99fe837f5eb5381
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2020-09-16T04:01:09.000Z
|
2020-09-16T04:01:09.000Z
|
Examples/Modules/space_charge_initialization/analysis.py
|
mrowan137/amrex
|
cafcb6bd5902fc72a4d6fa51b99fe837f5eb5381
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
Examples/Modules/space_charge_initialization/analysis.py
|
mrowan137/amrex
|
cafcb6bd5902fc72a4d6fa51b99fe837f5eb5381
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2019-09-16T18:20:16.000Z
|
2019-09-16T18:20:16.000Z
|
#!/usr/bin/env python
# Copyright 2019-2020 Axel Huebl, Remi Lehe
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
"""
This script checks the space-charge initialization routine, by
verifying that the space-charge field of a Gaussian beam corresponds to
the expected theoretical field.
"""
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import yt
import numpy as np
import scipy.constants as scc
from scipy.special import gammainc
yt.funcs.mylog.setLevel(0)
# Parameters from the Simulation
Qtot = -1.e-20
r0 = 2.e-6
# Open data file
filename = sys.argv[1]
ds = yt.load( filename )
# Extract data
ad0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions)
Ex_array = ad0['Ex'].to_ndarray().squeeze()
if ds.dimensionality == 2:
# Rename the z dimension as y, so as to make this script work for 2d and 3d
Ey_array = ad0['Ez'].to_ndarray().squeeze()
elif ds.dimensionality == 3:
Ey_array = ad0['Ey'].to_ndarray()
Ez_array = ad0['Ez'].to_ndarray()
# Extract grid coordinates
Nx, Ny, Nz = ds.domain_dimensions
xmin, ymin, zmin = ds.domain_left_edge.v
Lx, Ly, Lz = ds.domain_width.v
x = xmin + Lx/Nx*(0.5+np.arange(Nx))
y = ymin + Ly/Ny*(0.5+np.arange(Ny))
z = zmin + Lz/Nz*(0.5+np.arange(Nz))
# Compute theoretical field
if ds.dimensionality == 2:
x_2d, y_2d = np.meshgrid(x, y, indexing='ij')
r2 = x_2d**2 + y_2d**2
factor = (Qtot/r0)/(2*np.pi*scc.epsilon_0*r2) * (1-np.exp(-r2/(2*r0**2)))
Ex_th = x_2d * factor
Ey_th = y_2d * factor
elif ds.dimensionality == 3:
x_2d, y_2d, z_2d = np.meshgrid(x, y, z, indexing='ij')
r2 = x_2d**2 + y_2d**2 + z_2d**2
factor = Qtot/(4*np.pi*scc.epsilon_0*r2**1.5) * gammainc(3./2, r2/(2.*r0**2))
Ex_th = factor*x_2d
Ey_th = factor*y_2d
Ez_th = factor*z_2d
# Plot theory and data
def make_2d(arr):
if arr.ndim == 3:
return arr[:,:,Nz//2]
else:
return arr
plt.figure(figsize=(10,10))
plt.subplot(221)
plt.title('Ex: Theory')
plt.imshow(make_2d(Ex_th))
plt.colorbar()
plt.subplot(222)
plt.title('Ex: Simulation')
plt.imshow(make_2d(Ex_array))
plt.colorbar()
plt.subplot(223)
plt.title('Ey: Theory')
plt.imshow(make_2d(Ey_th))
plt.colorbar()
plt.subplot(224)
plt.title('Ey: Simulation')
plt.imshow(make_2d(Ey_array))
plt.colorbar()
plt.savefig('Comparison.png')
# Automatically check the results
def check(E, E_th, label):
print( 'Relative error in %s: %.3f'%(
label, abs(E-E_th).max()/E_th.max()))
assert np.allclose( E, E_th, atol=0.1*E_th.max() )
check( Ex_array, Ex_th, 'Ex' )
check( Ey_array, Ey_th, 'Ey' )
if ds.dimensionality == 3:
check( Ez_array, Ez_th, 'Ez' )
| 27.212121
| 89
| 0.67706
|
391be150d454a5192621ad3b9d32f75eeda6e4e0
| 1,976
|
py
|
Python
|
dthm4kaiako/resources/migrations/0010_auto_20220406_1005.py
|
uccser/cs4teachers
|
e2dc6278a8ca75093cbab987b4c54d80799dd35b
|
[
"MIT"
] | 2
|
2018-04-25T09:04:09.000Z
|
2018-07-06T08:51:33.000Z
|
dthm4kaiako/resources/migrations/0010_auto_20220406_1005.py
|
uccser/cs4teachers
|
e2dc6278a8ca75093cbab987b4c54d80799dd35b
|
[
"MIT"
] | 137
|
2017-06-13T10:28:18.000Z
|
2018-09-11T11:14:59.000Z
|
dthm4kaiako/resources/migrations/0010_auto_20220406_1005.py
|
uccser/cs4teachers
|
e2dc6278a8ca75093cbab987b4c54d80799dd35b
|
[
"MIT"
] | 1
|
2018-04-08T08:53:11.000Z
|
2018-04-08T08:53:11.000Z
|
# Generated by Django 3.2.12 on 2022-04-05 22:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resources', '0009_auto_20200309_2200'),
]
operations = [
migrations.AlterField(
model_name='curriculumlearningarea',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='language',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='nzqastandard',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='progressoutcome',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='resource',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='resourcecomponent',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='technologicalarea',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='yearlevel',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 36.592593
| 111
| 0.611336
|
e882e682e9a2b404fb695eea79d709fc7d7ab497
| 103
|
py
|
Python
|
wsgi.py
|
KMoszczyc/Sentiment-Predictor-Deep-L
|
86535a66d03197f61ce128b8cd10645efbc74b70
|
[
"MIT"
] | 1
|
2021-07-23T02:26:16.000Z
|
2021-07-23T02:26:16.000Z
|
wsgi.py
|
KMoszczyc/Sentiment-Predictor-Deep-L
|
86535a66d03197f61ce128b8cd10645efbc74b70
|
[
"MIT"
] | null | null | null |
wsgi.py
|
KMoszczyc/Sentiment-Predictor-Deep-L
|
86535a66d03197f61ce128b8cd10645efbc74b70
|
[
"MIT"
] | null | null | null |
from api import app
from content import train
if __name__ == "__main__":
app.run()
# train()
| 12.875
| 26
| 0.650485
|
4156828de0955fb8c8d24058f9963802aec969f0
| 107
|
py
|
Python
|
test.py
|
sagorbrur/itranslit
|
2ec27b86e334e982e43909042dc3f22597ee4f2a
|
[
"MIT"
] | null | null | null |
test.py
|
sagorbrur/itranslit
|
2ec27b86e334e982e43909042dc3f22597ee4f2a
|
[
"MIT"
] | null | null | null |
test.py
|
sagorbrur/itranslit
|
2ec27b86e334e982e43909042dc3f22597ee4f2a
|
[
"MIT"
] | null | null | null |
from itranslit.transliterate import Translit
trans = Translit('ta')
res = trans.predict('ami')
print(res)
| 17.833333
| 44
| 0.757009
|
c0d58244b9cd98c5e0f76b2d322ca89a17801749
| 3,628
|
py
|
Python
|
.history/Missions_to_Mars/scrape_mars_20200809103224.py
|
ermiasgelaye/web-scraping-challenge
|
f99c3436dfb0169595c46dae7733d90e21385cc6
|
[
"ADSL"
] | null | null | null |
.history/Missions_to_Mars/scrape_mars_20200809103224.py
|
ermiasgelaye/web-scraping-challenge
|
f99c3436dfb0169595c46dae7733d90e21385cc6
|
[
"ADSL"
] | null | null | null |
.history/Missions_to_Mars/scrape_mars_20200809103224.py
|
ermiasgelaye/web-scraping-challenge
|
f99c3436dfb0169595c46dae7733d90e21385cc6
|
[
"ADSL"
] | 2
|
2020-11-02T08:12:16.000Z
|
2021-05-17T21:45:42.000Z
|
from splinter import Browser
from bs4 import BeautifulSoup as bs
import pandas as pd
import time
import re
# This is for debugging
def savetofile(contents):
file = open('_temporary.txt',"w",encoding="utf-8")
file.write(contents)
file.close()
def scrape():
executable_path = {"executable_path": "chromedriver"}
browser = Browser("chrome", **executable_path, headless=False)
# NASA Mars News
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
time.sleep(3)
html = browser.html
soup = bs(html, 'html.parser')
slides = soup.find_all('li', class_='slide')
html = browser.html
soup = bs(html, "html.parser")
content_title = slides[0].find('div', class_='content_title')
news_title = content_title.text.strip()
article_teaser_body = slides[0].find('div', class_='article_teaser_body')
news_p = article_teaser_body.text.strip()
# JPL Mars Space Images
base_url = 'https://www.jpl.nasa.gov'
url = base_url + '/spaceimages/?search=&category=Mars'
browser.visit(url)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
featured_image_url = base_url + soup.find('a',class_='button fancybox')['data-fancybox-href']
# Mars Weather
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
mars_tweets = soup.find_all('p', class_="TweetTextSize"), soup.find_all(
'span', class_="css-901oao css-16my406 r-1qd0xha r-ad9z0x r-bcqeeo r-qvutc0")
for tweets in mars_tweets:
mars_tweet = tweets
for tweet in mars_tweet:
if 'InSight' in tweet.text:
mars_weather = tweet.text
if tweet.a in tweet:
mars_weather = mars_weather.strip(tweet.a.text)
break
except:
continue
# Mars facts
url = 'https://space-facts.com/mars/'
browser.visit(url) # not necessary, but added for checking the operation
time.sleep(1)
dfs = pd.read_html(url)
for df in dfs:
try:
df = df.rename(columns={0: "Description", 1: "Value"})
df = df.set_index("Description")
marsfacts_html = df.to_html().replace('\n', '')
# df.to_html('marsfacts.html') # to save to a file to test
break
except:
continue
# Mars Hemispheres
base_url = 'https://astrogeology.usgs.gov'
url = base_url + '/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
items = soup.find_all('div', class_='item')
urls = []
titles = []
for item in items:
urls.append(base_url + item.find('a')['href'])
titles.append(item.find('h3').text.strip())
img_urls = []
for oneurl in urls:
browser.visit(oneurl)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
oneurl = base_url+soup.find('img',class_='wide-image')['src']
img_urls.append(oneurl)
hemisphere_image_urls = []
for i in range(len(titles)):
hemisphere_image_urls.append({'title':titles[i],'img_url':img_urls[i]})
# Assigning scraped data to a page
marspage = {}
marspage["news_title"] = news_title
marspage["news_p"] = news_p
marspage["featured_image_url"] = featured_image_url
marspage["mars_weather"] =
marspage["marsfacts_html"] = marsfacts_html
marspage["hemisphere_image_urls"] = hemisphere_image_urls
return marspage
| 27.484848
| 101
| 0.62817
|
4dd53a6467ee4228228d98a517efb5052ce64bd1
| 3,620
|
py
|
Python
|
camera_stream_recording/Utils.py
|
Dontoronto/TrafficMonitoring
|
3da106694a78400453d4728959e545a62632f0fc
|
[
"Apache-2.0"
] | 5
|
2022-01-11T22:59:50.000Z
|
2022-02-18T21:11:12.000Z
|
camera_stream_recording/Utils.py
|
Dontoronto/TrafficMonitoring
|
3da106694a78400453d4728959e545a62632f0fc
|
[
"Apache-2.0"
] | 1
|
2022-02-16T12:32:23.000Z
|
2022-02-16T12:32:23.000Z
|
camera_stream_recording/Utils.py
|
Dontoronto/TrafficMonitoring
|
3da106694a78400453d4728959e545a62632f0fc
|
[
"Apache-2.0"
] | 1
|
2021-10-25T00:15:37.000Z
|
2021-10-25T00:15:37.000Z
|
# ****************************************************************************
# @Utils.py
#
#
#
#
# @copyright (c) 2021 Elektronische Fahrwerksysteme GmbH. All rights reserved.
# Dr.-Ludwig-Kraus-Straße 6, 85080 Gaimersheim, DE, https://www.efs-auto.com
# ****************************************************************************
import os
import subprocess
from datetime import datetime, timedelta
def delete_file(filename):
"""Delete a specific file"""
if os.path.exists(filename):
os.remove(filename)
else:
print("The file " + filename + " does not exist")
def generate_unique_name():
"""Returns a 'unique' name as string by using the current timestamp.\n
Format of generated name: yyyy-mm-dd_hh-mm-ss
"""
cur_datetime = datetime.now()
return cur_datetime.strftime("%Y-%m-%d_%H-%M-%S")
def get_length(filename):
"""
Get the length of a specific file with ffrobe from the ffmpeg library
:param filename: this param is used for the file
:type filename: str
:return: length of the given video file
:rtype: float
"""
# use ffprobe because it is faster then other (for example moviepy)
result = subprocess.run([
"ffprobe", "-v", "error", "-show_entries", "format=duration", "-of",
"default=noprint_wrappers=1:nokey=1", filename
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return float(result.stdout)
def is_video_consistent(filename, video_duration):
"""
Check if the Video is consistent with the length criteria
:param filename: filename
:type filename: str
:param video_duration: expected video duration of video file in seconds
:type video_duration: float
:return: if the video is consistent due to a threshold
:rtype: bool
"""
video_length = get_length(filename)
# Check if video is 8 percent shorter than expected
# check if the video is shorter than the expected length. This could be a "jump" in the video -> delete video
expected_length = video_duration
tolerance = expected_length - (expected_length * 0.1)
if video_length < tolerance:
print('video is 1 percent shorter than expected:')
print('video_length: ' + str(video_length))
print('expected_length: ' + str(expected_length))
print('tolerance: ' + str(tolerance))
return False
else:
print('video_length: ' + str(video_length))
print('expected_length: ' + str(expected_length))
return True
def upload_video(filename, uploads_list, storage_addr, sas_token):
"""
Prepare the upload of files to a azure Filestorage
:param filename: filename
:type filename: str
:param uploads_list: list of processes to track the process of uploading
:type uploads_list: list
:param storage_addr: address for the azure storage path
:type storage_addr: str
:param sas_token: token for azure authentitication
:type sas_token: str
:return: -
:rtype: void
"""
print("Upload file " + filename)
dest = storage_addr + filename + sas_token
sub_p_id = subprocess.Popen(["./azcopy", "copy", filename, dest],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
uploads_list.append([sub_p_id, filename])
return
def get_end_timestamp_from_minutes_duration(video_max_duration):
return datetime.now() + timedelta(minutes=video_max_duration)
def get_end_timestamp_from_seconds_duration(video_max_duration):
return datetime.now() + timedelta(seconds=video_max_duration)
| 33.518519
| 113
| 0.65
|
100ee3a2ea11c9573c859cc362bbf01b5c2489e9
| 7,119
|
py
|
Python
|
compiler/modules/write_driver_array.py
|
ajaymr12/openram
|
b46fb724b603c79445bd5601db230468684ad233
|
[
"BSD-3-Clause"
] | 43
|
2016-11-06T20:53:46.000Z
|
2021-09-03T18:57:39.000Z
|
compiler/modules/write_driver_array.py
|
ajaymr12/openram
|
b46fb724b603c79445bd5601db230468684ad233
|
[
"BSD-3-Clause"
] | 27
|
2016-11-15T19:28:25.000Z
|
2018-02-20T19:23:52.000Z
|
compiler/modules/write_driver_array.py
|
ajaymr12/openram
|
b46fb724b603c79445bd5601db230468684ad233
|
[
"BSD-3-Clause"
] | 30
|
2016-11-09T16:02:45.000Z
|
2018-02-23T17:07:59.000Z
|
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
from math import log
import design
from tech import drc
import debug
from sram_factory import factory
from vector import vector
from globals import OPTS
class write_driver_array(design.design):
"""
Array of tristate drivers to write to the bitlines through the column mux.
Dynamically generated write driver array of all bitlines.
"""
def __init__(self, name, columns, word_size,write_size=None):
design.design.__init__(self, name)
debug.info(1, "Creating {0}".format(self.name))
self.add_comment("columns: {0}".format(columns))
self.add_comment("word_size {0}".format(word_size))
self.columns = columns
self.word_size = word_size
self.write_size = write_size
self.words_per_row = int(columns / word_size)
if self.write_size:
self.num_wmasks = int(self.word_size/self.write_size)
self.create_netlist()
if not OPTS.netlist_only:
self.create_layout()
def create_netlist(self):
self.add_modules()
self.add_pins()
self.create_write_array()
def create_layout(self):
if self.bitcell.width > self.driver.width:
self.width = self.columns * self.bitcell.width
else:
self.width = self.columns * self.driver.width
self.height = self.driver.height
self.place_write_array()
self.add_layout_pins()
self.add_boundary()
self.DRC_LVS()
def add_pins(self):
for i in range(self.word_size):
self.add_pin("data_{0}".format(i), "INPUT")
for i in range(self.word_size):
self.add_pin("bl_{0}".format(i), "OUTPUT")
self.add_pin("br_{0}".format(i), "OUTPUT")
if self.write_size:
for i in range(self.num_wmasks):
self.add_pin("en_{0}".format(i), "INPUT")
else:
self.add_pin("en", "INPUT")
self.add_pin("vdd", "POWER")
self.add_pin("gnd", "GROUND")
def add_modules(self):
self.driver = factory.create(module_type="write_driver")
self.add_mod(self.driver)
# This is just used for measurements,
# so don't add the module
self.bitcell = factory.create(module_type="bitcell")
def create_write_array(self):
self.driver_insts = {}
w = 0
windex=0
for i in range(0,self.columns,self.words_per_row):
name = "write_driver{}".format(i)
index = int(i/self.words_per_row)
self.driver_insts[index]=self.add_inst(name=name,
mod=self.driver)
if self.write_size:
self.connect_inst(["data_{0}".format(index),
"bl_{0}".format(index),
"br_{0}".format(index),
"en_{0}".format(windex), "vdd", "gnd"])
w+=1
# when w equals write size, the next en pin can be connected since we are now at the next wmask bit
if w == self.write_size:
w = 0
windex+=1
else:
self.connect_inst(["data_{0}".format(index),
"bl_{0}".format(index),
"br_{0}".format(index),
"en", "vdd", "gnd"])
def place_write_array(self):
if self.bitcell.width > self.driver.width:
self.driver_spacing = self.bitcell.width
else:
self.driver_spacing = self.driver.width
for i in range(0,self.columns,self.words_per_row):
index = int(i/self.words_per_row)
base = vector(i * self.driver_spacing, 0)
self.driver_insts[index].place(base)
def add_layout_pins(self):
for i in range(self.word_size):
din_pin = self.driver_insts[i].get_pin("din")
self.add_layout_pin(text="data_{0}".format(i),
layer="metal2",
offset=din_pin.ll(),
width=din_pin.width(),
height=din_pin.height())
bl_pin = self.driver_insts[i].get_pin("bl")
self.add_layout_pin(text="bl_{0}".format(i),
layer="metal2",
offset=bl_pin.ll(),
width=bl_pin.width(),
height=bl_pin.height())
br_pin = self.driver_insts[i].get_pin("br")
self.add_layout_pin(text="br_{0}".format(i),
layer="metal2",
offset=br_pin.ll(),
width=br_pin.width(),
height=br_pin.height())
for n in ["vdd", "gnd"]:
pin_list = self.driver_insts[i].get_pins(n)
for pin in pin_list:
pin_pos = pin.center()
# Add the M2->M3 stack
self.add_via_center(layers=("metal2", "via2", "metal3"),
offset=pin_pos)
self.add_layout_pin_rect_center(text=n,
layer="metal3",
offset=pin_pos)
if self.write_size:
for bit in range(self.num_wmasks):
en_pin = self.driver_insts[bit*self.write_size].get_pin("en")
# Determine width of wmask modified en_pin with/without col mux
wmask_en_len = self.words_per_row*(self.write_size * self.driver_spacing)
if (self.words_per_row == 1):
en_gap = self.driver_spacing - en_pin.width()
else:
en_gap = self.driver_spacing
self.add_layout_pin(text="en_{0}".format(bit),
layer=en_pin.layer,
offset=en_pin.ll(),
width=wmask_en_len-en_gap,
height=en_pin.height())
else:
self.add_layout_pin(text="en",
layer="metal1",
offset=self.driver_insts[0].get_pin("en").ll().scale(0,1),
width=self.width)
def get_w_en_cin(self):
"""Get the relative capacitance of all the enable connections in the bank"""
#The enable is connected to a nand2 for every row.
return self.driver.get_w_en_cin() * len(self.driver_insts)
| 39.77095
| 115
| 0.511167
|
4f278aeb8e8c576c496f082f75a850d65989f983
| 249
|
py
|
Python
|
__init__.py
|
AgapovDP/ghost_images
|
44ea500d9f6f51628571b6f5f704dfbcfd3d51b8
|
[
"MIT"
] | 5
|
2021-11-02T12:13:05.000Z
|
2022-01-14T07:20:56.000Z
|
__init__.py
|
AgapovDP/ghost_images
|
44ea500d9f6f51628571b6f5f704dfbcfd3d51b8
|
[
"MIT"
] | 1
|
2021-12-24T12:32:34.000Z
|
2021-12-24T12:32:34.000Z
|
__init__.py
|
AgapovDP/ghost_images
|
44ea500d9f6f51628571b6f5f704dfbcfd3d51b8
|
[
"MIT"
] | 1
|
2021-09-23T10:39:27.000Z
|
2021-09-23T10:39:27.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 7 17:49:32 2021
@author: vonGostev
"""
import os
import sys
sys.path.append(os.path.abspath('../lightprop2d'))
sys.path.append(os.path.abspath('../pyMMF'))
sys.path.append(os.path.abspath('../..'))
| 17.785714
| 50
| 0.650602
|
3347c9bd383b3d666f124684d422a01a7b463e08
| 3,378
|
py
|
Python
|
quspin/basis/basis_general/_basis_general_core/setup.py
|
marcotav/QuSpin
|
ba3dbb9c8b4440ad7deba6412444489d45cb7349
|
[
"BSD-3-Clause"
] | 1
|
2020-05-10T08:32:34.000Z
|
2020-05-10T08:32:34.000Z
|
quspin/basis/basis_general/_basis_general_core/setup.py
|
marcotav/QuSpin
|
ba3dbb9c8b4440ad7deba6412444489d45cb7349
|
[
"BSD-3-Clause"
] | null | null | null |
quspin/basis/basis_general/_basis_general_core/setup.py
|
marcotav/QuSpin
|
ba3dbb9c8b4440ad7deba6412444489d45cb7349
|
[
"BSD-3-Clause"
] | null | null | null |
def cython_files():
import os,glob
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
USE_CYTHON = False
package_dir = os.path.dirname(os.path.realpath(__file__))
cython_src = glob.glob(os.path.join(package_dir,"*.pyx"))
include_dirs = os.path.join(package_dir,"source")
if USE_CYTHON:
cythonize(cython_src,language="c++",include_path=[include_dirs])
def configuration(parent_package='', top_path=None):
import numpy,os,sys
from numpy.distutils.misc_util import Configuration
config = Configuration('_basis_general_core',parent_package, top_path)
if sys.platform == "win32":
extra_compile_args=["/openmp"]
extra_link_args=["/openmp"]
elif sys.platform == "darwin":
extra_compile_args = []
extra_link_args = []
else:
extra_compile_args = ["-fopenmp"]
extra_link_args = ["-lgomp"]
package_dir = os.path.dirname(os.path.realpath(__file__))
include_dirs = os.path.join(package_dir,"source")
hcp_src = os.path.join(package_dir,"hcb_core.cpp")
config.add_extension('hcb_core',sources=hcp_src,include_dirs=[numpy.get_include(),include_dirs],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
language="c++")
boson_src = os.path.join(package_dir,"boson_core.cpp")
config.add_extension('boson_core',sources=boson_src,include_dirs=[numpy.get_include(),include_dirs],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
language="c++")
higher_spin_src = os.path.join(package_dir,"higher_spin_core.cpp")
config.add_extension('higher_spin_core',sources=higher_spin_src,include_dirs=[numpy.get_include(),include_dirs],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
language="c++")
# spinless_fermion_src = os.path.join(package_dir,"spinless_fermion_core.cpp")
# config.add_extension('spinless_fermion_core',sources=spinless_fermion_src,include_dirs=[numpy.get_include(),include_dirs],
# extra_compile_args=extra_compile_args,
# extra_link_args=extra_link_args,
# language="c++")
# spinful_fermion_src = os.path.join(package_dir,"spinful_fermion_core.cpp")
# config.add_extension('spinful_fermion_core',sources=spinful_fermion_src,include_dirs=[numpy.get_include(),include_dirs],
# extra_compile_args=extra_compile_args,
# extra_link_args=extra_link_args,
# language="c++")
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
import sys
try:
instr = sys.argv[1]
if instr == "build_templates":
cython_files()
else:
setup(**configuration(top_path='').todict())
except IndexError: pass
| 41.703704
| 132
| 0.593546
|
d043323768ef9e3ddab29946d57bcf2ee3f2f0de
| 2,375
|
py
|
Python
|
openpype/hosts/photoshop/plugins/load/load_image.py
|
yosuperdope/OpenPype
|
0c90df97ddb8cda291a4f66d35da58b3deb94a71
|
[
"MIT"
] | 1
|
2020-09-21T14:55:33.000Z
|
2020-09-21T14:55:33.000Z
|
openpype/hosts/photoshop/plugins/load/load_image.py
|
jrsndl/pype
|
f9d80ef2c0663921291c5f47d24bea51fc43bac7
|
[
"MIT"
] | null | null | null |
openpype/hosts/photoshop/plugins/load/load_image.py
|
jrsndl/pype
|
f9d80ef2c0663921291c5f47d24bea51fc43bac7
|
[
"MIT"
] | null | null | null |
import re
from avalon import api, photoshop
from openpype.hosts.photoshop.plugins.lib import get_unique_layer_name
stub = photoshop.stub()
class ImageLoader(api.Loader):
"""Load images
Stores the imported asset in a container named after the asset.
"""
families = ["image", "render"]
representations = ["*"]
def load(self, context, name=None, namespace=None, data=None):
layer_name = get_unique_layer_name(stub.get_layers(),
context["asset"]["name"],
name)
with photoshop.maintained_selection():
layer = stub.import_smart_object(self.fname, layer_name)
self[:] = [layer]
namespace = namespace or layer_name
return photoshop.containerise(
name,
namespace,
layer,
context,
self.__class__.__name__
)
def update(self, container, representation):
""" Switch asset or change version """
layer = container.pop("layer")
context = representation.get("context", {})
namespace_from_container = re.sub(r'_\d{3}$', '',
container["namespace"])
layer_name = "{}_{}".format(context["asset"], context["subset"])
# switching assets
if namespace_from_container != layer_name:
layer_name = self._get_unique_layer_name(context["asset"],
context["subset"])
else: # switching version - keep same name
layer_name = container["namespace"]
path = api.get_representation_path(representation)
with photoshop.maintained_selection():
stub.replace_smart_object(
layer, path, layer_name
)
stub.imprint(
layer, {"representation": str(representation["_id"])}
)
def remove(self, container):
"""
Removes element from scene: deletes layer + removes from Headline
Args:
container (dict): container to be removed - used to get layer_id
"""
layer = container.pop("layer")
stub.imprint(layer, {})
stub.delete_layer(layer.id)
def switch(self, container, representation):
self.update(container, representation)
| 31.666667
| 77
| 0.568842
|
11c05d1b7d7194159ededad5c6e28d0139732997
| 3,538
|
py
|
Python
|
pokemongo_bot/tests/navigation/fort_navigator_test.py
|
trongtan/pokemonGo
|
670df159c5c5facd14dd9e79ea0db5d7e8dcc2e1
|
[
"MIT"
] | null | null | null |
pokemongo_bot/tests/navigation/fort_navigator_test.py
|
trongtan/pokemonGo
|
670df159c5c5facd14dd9e79ea0db5d7e8dcc2e1
|
[
"MIT"
] | null | null | null |
pokemongo_bot/tests/navigation/fort_navigator_test.py
|
trongtan/pokemonGo
|
670df159c5c5facd14dd9e79ea0db5d7e8dcc2e1
|
[
"MIT"
] | null | null | null |
import unittest
from mock import MagicMock
from api.worldmap import Cell
from pokemongo_bot import FortNavigator
from pokemongo_bot.navigation.destination import Destination
from pokemongo_bot.tests import create_mock_bot
class FortNavigatorTest(unittest.TestCase):
def test_navigate_pokestops_known(self):
bot = create_mock_bot({
"walk": 5,
"max_steps": 2
})
api_wrapper = bot.api_wrapper
pgoapi = api_wrapper._api # pylint: disable=protected-access
pgoapi.set_response('fort_details', self._create_pokestop("Test Stop", 51.5043872, -0.0741802))
pgoapi.set_response('fort_details', self._create_pokestop("Test Stop 2", 51.5060435, -0.073983))
navigator = FortNavigator(bot)
map_cells = self._create_map_cells()
destinations = list()
for destination in navigator.navigate(map_cells):
assert isinstance(destination, Destination)
if len(destinations) == 0:
assert destination.target_lat == 51.5043872
assert destination.target_lng == -0.0741802
assert destination.name == "PokeStop \"Test Stop\""
elif len(destinations) == 1:
assert destination.target_lat == 51.5060435
assert destination.target_lng == -0.073983
assert destination.name == "PokeStop \"Test Stop 2\""
destinations.append(destination)
assert len(destinations) == 2
assert pgoapi.call_stack_size() == 0
def test_navigate_pokestops_unknown(self):
bot = create_mock_bot({
"walk": 5,
"max_steps": 2
})
api_wrapper = bot.api_wrapper
api_wrapper.call = MagicMock(return_value=None)
pgoapi = api_wrapper._api # pylint: disable=protected-access
navigator = FortNavigator(bot)
map_cells = self._create_map_cells()
destinations = list()
for destination in navigator.navigate(map_cells):
assert isinstance(destination, Destination)
if len(destinations) == 0:
assert destination.target_lat == 51.5043872
assert destination.target_lng == -0.0741802
assert destination.name == "PokeStop \"fort_unknown1\""
elif len(destinations) == 1:
assert destination.target_lat == 51.5060435
assert destination.target_lng == -0.073983
assert destination.name == "PokeStop \"fort_unknown2\""
destinations.append(destination)
assert len(destinations) == 2
assert pgoapi.call_stack_size() == 0
def _create_map_cells(self):
return [
Cell({
"s2_cell_id": 1,
"spawn_points": [
{
"latitude": 0,
"longitude": 0
}
],
"forts": [
self._create_pokestop("unknown1", 51.5043872, -0.0741802),
self._create_pokestop("unknown2", 51.5060435, -0.073983),
]
})
]
@staticmethod
def _create_pokestop(name, lat, lng):
return {
"id": "fort_" + str(name),
"name": str(name),
"latitude": lat,
"longitude": lng,
"enabled": 1,
"last_modified_timestamp_ms": 0,
"cooldown_complete_timestamp_ms": 0,
"type": 1
}
| 34.019231
| 104
| 0.572357
|
3b390e0f290512df65d2cc688ee6a87e1f8e49be
| 4,838
|
py
|
Python
|
wallpaper/tools/file_cache.py
|
gabbpuy/wallpaper
|
ec5fdeeb92d1a6285e1ac2ec3b0164929b7ea305
|
[
"BSD-2-Clause"
] | 1
|
2021-06-01T21:24:42.000Z
|
2021-06-01T21:24:42.000Z
|
wallpaper/tools/file_cache.py
|
gabbpuy/wallpaper
|
ec5fdeeb92d1a6285e1ac2ec3b0164929b7ea305
|
[
"BSD-2-Clause"
] | null | null | null |
wallpaper/tools/file_cache.py
|
gabbpuy/wallpaper
|
ec5fdeeb92d1a6285e1ac2ec3b0164929b7ea305
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import logging
import os
import queue
import sqlite3
import threading
from pickle import load, dump
from wallpaper.tools.dir_entry import DirEntry
from wallpaper.tools.image_history import ImageHistory
from wallpaper.tools.decorators import locked
osPath = os.path
class _Cache(metaclass=ABCMeta):
@abstractmethod
def get(self, path):
pass
@abstractmethod
def flush(self):
pass
@abstractmethod
def update(self, path, files=None, **kwargs):
pass
@abstractmethod
def set(self, path, files):
pass
class SQL_Cache(_Cache):
CacheLock = threading.RLock()
CacheQueue = queue.Queue()
def __init__(self):
self.dir_cache = {}
self.history = ImageHistory()
self.changed = False
if not osPath.exists('dircache.db'):
db = sqlite3.connect('dircache.db')
cursor = db.cursor()
cursor.executescript(
"""
CREATE TABLE dir_entries (
directory TEXT NOT NULL,
file TEXT NOT NULL,
modified_time REAL NOT NULL
);
CREATE INDEX dir_path ON dir_entries(directory);
"""
)
db.close()
self.queue_thread = threading.Thread(target=self._set_db)
self.queue_thread.start()
# self.count()
def count(self):
db = sqlite3.connect('dircache.db')
cursor = db.cursor()
cursor.execute('SELECT COUNT(*) FROM dir_entries')
logging.error("Cache Entries: %d", cursor.fetchone()[0])
def get(self, path):
if path not in self.dir_cache:
db = sqlite3.connect('dircache.db')
cursor = db.cursor()
cursor.execute('SELECT path, modified_time FROM dir_entries WHERE path = ?', (path,))
try:
r = cursor.fetchone()
if r:
self.dir_cache[path] = DirEntry(r[0], stat=r[1])
except:
logging.exception("QUERY FAILED %s", path)
return self.dir_cache.get(path)
def flush(self):
SQL_Cache.CacheQueue.put(None)
self.queue_thread.join()
def update(self, path, files=None, stat=os.stat):
if self.get(path) and self.dir_cache[path].stat == int(stat(path).st_mtime):
return path
return self.set(path, files)
@staticmethod
def _set_db():
db = sqlite3.connect('dircache.db')
while True:
entry = SQL_Cache.CacheQueue.get()
if entry is None:
db.commit()
db.close()
break
cursor = db.cursor()
cursor.execute(
"""
INSERT OR REPLACE INTO dir_entries (modified_time, path) VALUES(?, ?)
""", (entry.stat, entry.path)
)
def set(self, path, files):
entry = DirEntry(path, files)
self.dir_cache[path] = entry
self.CacheQueue.put(entry)
return path
class OnFileCache(_Cache):
CacheLock = threading.RLock()
def __init__(self):
self.directory_cache = {}
self.history = ImageHistory()
self.changed = False
if osPath.exists('dircache.txt'):
try:
self.directory_cache = load(open('dircache.txt', 'rb'))
except os.error:
pass
except AttributeError:
pass
def get(self, path):
return self.directory_cache.get(path)
@locked(CacheLock)
def set(self, dire, files):
self.changed = True
self.directory_cache[dire] = DirEntry(dire, files)
return dire
def update(self, path, files=None, stat=os.stat):
if path in self.directory_cache and self.directory_cache.get(path).stat == stat(path).st_mtime:
return path
return self.set(path, files)
@locked(CacheLock)
def flush(self):
if self.changed:
dump(self.directory_cache, open('dircache.txt', 'wb'), -1)
self.changed = False
class NullCache(_Cache):
CacheLock = threading.RLock()
def __init__(self):
self.directory_cache = {}
self.history = ImageHistory()
def get(self, path):
return self.directory_cache.get(path)
@locked(CacheLock)
def set(self, path, files):
self.directory_cache[path] = DirEntry(path, files)
return path
def update(self, path, files=None, stat=os.stat):
if path in self.directory_cache and self.directory_cache.get(path).stat == stat(path).st_mtime:
return path
return self.set(path, files)
def flush(self):
pass
DirectoryCache = NullCache
| 27.645714
| 103
| 0.574204
|
53a35b9eecd1b30f83fd635182d60e3fdf686fb4
| 12,779
|
py
|
Python
|
nova/tests/functional/api_sample_tests/test_hypervisors.py
|
daespinel/nova
|
67c2eaf83bcd613c8b56e470899c4562b900b867
|
[
"Apache-2.0"
] | 2
|
2021-10-11T04:56:25.000Z
|
2022-02-16T08:49:29.000Z
|
nova/tests/functional/api_sample_tests/test_hypervisors.py
|
sapcc/nova
|
ad71af7307365d6aabd122e140f56df4db1e6182
|
[
"Apache-2.0"
] | 132
|
2017-03-27T11:31:52.000Z
|
2022-03-30T08:45:02.000Z
|
nova/tests/functional/api_sample_tests/test_hypervisors.py
|
sapcc/nova
|
ad71af7307365d6aabd122e140f56df4db1e6182
|
[
"Apache-2.0"
] | 8
|
2017-03-27T07:50:38.000Z
|
2020-02-14T16:55:56.000Z
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.cells import utils as cells_utils
from nova import objects
from nova.tests.functional.api_sample_tests import api_sample_base
class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
sample_dir = "os-hypervisors"
def test_hypervisors_list(self):
response = self._do_get('os-hypervisors')
self._verify_response('hypervisors-list-resp', {}, response, 200)
def test_hypervisors_search(self):
response = self._do_get('os-hypervisors/fake/search')
self._verify_response('hypervisors-search-resp', {}, response, 200)
def test_hypervisors_without_servers(self):
response = self._do_get('os-hypervisors/fake/servers')
self._verify_response('hypervisors-without-servers-resp',
{}, response, 200)
@mock.patch("nova.compute.api.HostAPI.instance_get_all_by_host")
def test_hypervisors_with_servers(self, mock_instance_get):
instance = [
{
"deleted": None,
"name": "test_server1",
"uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
},
{
"deleted": None,
"name": "test_server2",
"uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
}]
mock_instance_get.return_value = instance
response = self._do_get('os-hypervisors/fake/servers')
self._verify_response('hypervisors-with-servers-resp', {},
response, 200)
def test_hypervisors_detail(self):
hypervisor_id = '1'
subs = {
'hypervisor_id': hypervisor_id,
'service_id': '[0-9]+',
}
response = self._do_get('os-hypervisors/detail')
self._verify_response('hypervisors-detail-resp', subs, response, 200)
def test_hypervisors_show(self):
hypervisor_id = '1'
subs = {
'hypervisor_id': hypervisor_id,
'service_id': '[0-9]+',
}
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
self._verify_response('hypervisors-show-resp', subs, response, 200)
def test_hypervisors_statistics(self):
response = self._do_get('os-hypervisors/statistics')
self._verify_response('hypervisors-statistics-resp', {}, response, 200)
def test_hypervisors_uptime(self):
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
self.stub_out('nova.compute.api.HostAPI.get_host_uptime',
fake_get_host_uptime)
hypervisor_id = '1'
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
class HypervisorsCellsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
sample_dir = "os-hypervisors"
def setUp(self):
self.flags(enable=True, cell_type='api', group='cells')
super(HypervisorsCellsSampleJsonTests, self).setUp()
def test_hypervisor_uptime(self, mocks):
fake_hypervisor = objects.ComputeNode(id=1, host='fake-mini',
hypervisor_hostname='fake-mini')
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
def fake_compute_node_get(self, context, hyp):
return fake_hypervisor
def fake_service_get_by_compute_host(self, context, host):
return cells_utils.ServiceProxy(
objects.Service(id=1, host='fake-mini', disabled=False,
disabled_reason=None),
'cell1')
self.stub_out(
'nova.compute.cells_api.HostAPI.compute_node_get',
fake_compute_node_get)
self.stub_out(
'nova.compute.cells_api.HostAPI.service_get_by_compute_host',
fake_service_get_by_compute_host)
self.stub_out(
'nova.compute.cells_api.HostAPI.get_host_uptime',
fake_get_host_uptime)
hypervisor_id = fake_hypervisor.id
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {'hypervisor_id': str(hypervisor_id)}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)
class HypervisorsSampleJson228Tests(HypervisorsSampleJsonTests):
microversion = '2.28'
scenarios = [('v2_28', {'api_major_version': 'v2.1'})]
def setUp(self):
super(HypervisorsSampleJson228Tests, self).setUp()
self.api.microversion = self.microversion
class HypervisorsSampleJson233Tests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
sample_dir = "os-hypervisors"
microversion = '2.33'
scenarios = [('v2_33', {'api_major_version': 'v2.1'})]
def setUp(self):
super(HypervisorsSampleJson233Tests, self).setUp()
self.api.microversion = self.microversion
# Start a new compute service to fake a record with hypervisor id=2
# for pagination test.
host = 'host1'
self.start_service('compute', host=host)
def test_hypervisors_list(self):
response = self._do_get('os-hypervisors?limit=1&marker=1')
self._verify_response('hypervisors-list-resp', {}, response, 200)
def test_hypervisors_detail(self):
subs = {
'hypervisor_id': '2',
'host': 'host1',
'host_name': 'host1'
}
response = self._do_get('os-hypervisors/detail?limit=1&marker=1')
self._verify_response('hypervisors-detail-resp', subs, response, 200)
class HypervisorsSampleJson253Tests(HypervisorsSampleJson228Tests):
microversion = '2.53'
scenarios = [('v2_53', {'api_major_version': 'v2.1'})]
def setUp(self):
super(HypervisorsSampleJson253Tests, self).setUp()
self.compute_node_1 = self.compute.service_ref.compute_node
def generalize_subs(self, subs, vanilla_regexes):
"""Give the test a chance to modify subs after the server response
was verified, and before the on-disk doc/api_samples file is checked.
"""
# When comparing the template to the sample we just care that the
# hypervisor id and service id are UUIDs.
subs['hypervisor_id'] = vanilla_regexes['uuid']
subs['service_id'] = vanilla_regexes['uuid']
return subs
def test_hypervisors_list(self):
# Start another compute service to get a 2nd compute for paging tests.
compute_node_2 = self.start_service(
'compute', host='host2').service_ref.compute_node
marker = self.compute_node_1.uuid
response = self._do_get('os-hypervisors?limit=1&marker=%s' % marker)
subs = {'hypervisor_id': compute_node_2.uuid}
self._verify_response('hypervisors-list-resp', subs, response, 200)
def test_hypervisors_detail(self):
# Start another compute service to get a 2nd compute for paging tests.
host = 'host2'
service_2 = self.start_service('compute', host=host).service_ref
compute_node_2 = service_2.compute_node
marker = self.compute_node_1.uuid
subs = {
'hypervisor_id': compute_node_2.uuid,
'service_id': service_2.uuid
}
response = self._do_get('os-hypervisors/detail?limit=1&marker=%s' %
marker)
self._verify_response('hypervisors-detail-resp', subs, response, 200)
@mock.patch("nova.compute.api.HostAPI.instance_get_all_by_host")
def test_hypervisors_detail_with_servers(self, instance_get_all_by_host):
"""List hypervisors with details and with hosted servers."""
instances = [
{
"name": "test_server1",
"uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
},
{
"name": "test_server2",
"uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
}]
instance_get_all_by_host.return_value = instances
response = self._do_get('os-hypervisors/detail?with_servers=1')
subs = {
'hypervisor_id': self.compute_node_1.uuid,
'service_id': self.compute.service_ref.uuid,
}
self._verify_response('hypervisors-detail-with-servers-resp',
subs, response, 200)
def test_hypervisors_search(self):
"""The search route is deprecated in 2.53 and is now a query parameter
on the GET /os-hypervisors API.
"""
response = self._do_get(
'os-hypervisors?hypervisor_hostname_pattern=fake')
subs = {'hypervisor_id': self.compute_node_1.uuid}
self._verify_response('hypervisors-search-resp', subs, response, 200)
@mock.patch("nova.compute.api.HostAPI.instance_get_all_by_host")
def test_hypervisors_with_servers(self, instance_get_all_by_host):
"""The servers route is deprecated in 2.53 and is now a query parameter
on the GET /os-hypervisors API.
"""
instances = [
{
"name": "test_server1",
"uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
},
{
"name": "test_server2",
"uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
}]
instance_get_all_by_host.return_value = instances
response = self._do_get('os-hypervisors?with_servers=true')
subs = {'hypervisor_id': self.compute_node_1.uuid}
self._verify_response('hypervisors-with-servers-resp', subs,
response, 200)
def test_hypervisors_without_servers(self):
# This is the same as GET /os-hypervisors in 2.53 which is covered by
# test_hypervisors_list already.
pass
def test_hypervisors_uptime(self):
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
self.stub_out('nova.compute.api.HostAPI.get_host_uptime',
fake_get_host_uptime)
hypervisor_id = self.compute_node_1.uuid
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)
def test_hypervisors_show(self):
hypervisor_id = self.compute_node_1.uuid
subs = {
'hypervisor_id': hypervisor_id,
'service_id': self.compute.service_ref.uuid,
}
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
self._verify_response('hypervisors-show-resp', subs, response, 200)
@mock.patch("nova.compute.api.HostAPI.instance_get_all_by_host")
def test_hypervisors_show_with_servers(self, instance_get_all_by_host):
"""Tests getting details for a specific hypervisor and including the
hosted servers in the response.
"""
instances = [
{
"name": "test_server1",
"uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
},
{
"name": "test_server2",
"uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
}]
instance_get_all_by_host.return_value = instances
hypervisor_id = self.compute_node_1.uuid
subs = {
'hypervisor_id': hypervisor_id,
'service_id': self.compute.service_ref.uuid,
}
response = self._do_get('os-hypervisors/%s?with_servers=1' %
hypervisor_id)
self._verify_response('hypervisors-show-with-servers-resp', subs,
response, 200)
| 40.185535
| 79
| 0.627983
|
8274e430252ec3ed9a134801f1d5c04f7261033e
| 89
|
py
|
Python
|
dictonaryloop.py
|
bjoffficial/Python
|
73e6fdc19a1bec18488405c4a60c30ba68581ce5
|
[
"Apache-2.0"
] | null | null | null |
dictonaryloop.py
|
bjoffficial/Python
|
73e6fdc19a1bec18488405c4a60c30ba68581ce5
|
[
"Apache-2.0"
] | null | null | null |
dictonaryloop.py
|
bjoffficial/Python
|
73e6fdc19a1bec18488405c4a60c30ba68581ce5
|
[
"Apache-2.0"
] | null | null | null |
n={"name":"balaji","no":2345,"address":"345/vbnjh"}
for i in n.values():
print(i)
| 22.25
| 52
| 0.573034
|
e3df392d50ff8fef6c8dc6f04ce633cbe2b1a636
| 2,543
|
py
|
Python
|
flaskr/products.py
|
MatheusXCH/Python-Server
|
08f9dcfdba659fa8eba2ebb33b2dc60a375f54f6
|
[
"MIT"
] | null | null | null |
flaskr/products.py
|
MatheusXCH/Python-Server
|
08f9dcfdba659fa8eba2ebb33b2dc60a375f54f6
|
[
"MIT"
] | null | null | null |
flaskr/products.py
|
MatheusXCH/Python-Server
|
08f9dcfdba659fa8eba2ebb33b2dc60a375f54f6
|
[
"MIT"
] | null | null | null |
from flask import make_response, abort
from config import db
from models import Product, ProductSchema
# # "./api/products", GET
def read_all():
products = Product.query.order_by(Product.name).all()
product_schema = ProductSchema(many=True)
data = product_schema.dump(products)
return data
# # "./api/product/{id}", GET
def read_one(product_id):
product = Product.query.filter(Product.product_id == product_id).one_or_none()
if product is not None:
product_schema = ProductSchema()
data = product_schema.dump(product)
return data
else:
abort(404, f"Product not found for ID: {product_id}")
# # "./api/products", POST
def create(product):
name = product.get("name")
brand = product.get("brand")
existing_product = (
Product.query.filter(Product.name == name)
.filter(Product.brand == brand)
.one_or_none()
)
if existing_product is None:
schema = ProductSchema()
new_product = schema.load(product, session=db.session)
db.session.add(new_product)
db.session.commit()
data = schema.dump(new_product)
return data, 201
else:
abort(409, f"Product {name} of Brand {brand} already existis.")
# # "./api/product/{id}", PUT
def update(product_id, product):
update_product = Product.query.filter(Product.product_id == product_id).one_or_none()
name = product.get("name")
brand = product.get("brand")
existing_product = (
Product.query.filter(Product.name == name)
.filter(Product.brand == brand)
.one_or_none()
)
if update_product is None:
abort(404, f"Product not found for Id: {product_id}")
elif (existing_product is not None and existing_product.product_id != product_id):
abort(409, f"Product {name} of Brand {brand} already exists.")
else:
schema = ProductSchema()
update = schema.load(product, session=db.session)
update.product_id = update_product.product_id
db.session.merge(update)
db.session.commit()
data = schema.dump(update_product)
return data, 200
# # "./api/product/{id}", DELETE
def delete(product_id):
product = Product.query.filter(Product.product_id == product_id).one_or_none()
if product is not None:
db.session.delete(product)
db.session.commit()
return make_response(f"Product {product_id} deleted", 200)
else:
abort(404, f"Product not found for Id: {product_id}")
| 26.216495
| 89
| 0.648447
|
13719efa4aa08fdd1b53fd041a15cd4248dd1e05
| 180
|
py
|
Python
|
Models/QueueObject.py
|
Jack-Dane/clipboard-copy
|
2853ce2540298a26a51bc0f627d9e965562dd86d
|
[
"MIT"
] | null | null | null |
Models/QueueObject.py
|
Jack-Dane/clipboard-copy
|
2853ce2540298a26a51bc0f627d9e965562dd86d
|
[
"MIT"
] | 1
|
2021-10-04T20:54:46.000Z
|
2021-10-05T21:34:18.000Z
|
Models/QueueObject.py
|
Jack-Dane/clipboard-copy
|
2853ce2540298a26a51bc0f627d9e965562dd86d
|
[
"MIT"
] | null | null | null |
from queue import Queue
class QueueObject(Queue):
def clear(self):
"""
Additional function to easily clear the queue object
"""
with self.mutex:
self.queue.clear()
| 13.846154
| 54
| 0.694444
|
e9b046a024b41171e22509453cdf8dc0d9ed10a4
| 310
|
py
|
Python
|
commands/quit_cmd.py
|
GhostHitThose/DexioBot
|
7b7c31576f8946f38d0f5838ea393db20f015115
|
[
"MIT"
] | null | null | null |
commands/quit_cmd.py
|
GhostHitThose/DexioBot
|
7b7c31576f8946f38d0f5838ea393db20f015115
|
[
"MIT"
] | null | null | null |
commands/quit_cmd.py
|
GhostHitThose/DexioBot
|
7b7c31576f8946f38d0f5838ea393db20f015115
|
[
"MIT"
] | null | null | null |
def run(chat_id, update, context):
message = "Stopping Bot for Development"
user_status = context.bot.get_chat_member(chat_id, update.message.from_user.id)['status']
if user_status == "administrator" or user_status == "creator":
updater.stop()
context.bot.send_message(chat_id=chat_id, text=message)
| 38.75
| 90
| 0.758065
|
48458cd64c1f4a409c36ef244fcc077a5bcd07a1
| 2,338
|
py
|
Python
|
CURRENT BUILD/plugins/wii.py
|
Wamy-Dev/DiscordRomScraperBot
|
d7b7e524baa423adb0f5fae9c8293cc3c83f83e1
|
[
"MIT"
] | 12
|
2021-02-01T00:20:29.000Z
|
2022-03-28T17:42:39.000Z
|
CURRENT BUILD/plugins/wii.py
|
Wamy-Dev/DiscordRomScraperBot
|
d7b7e524baa423adb0f5fae9c8293cc3c83f83e1
|
[
"MIT"
] | 4
|
2021-02-06T18:58:43.000Z
|
2022-03-31T04:36:01.000Z
|
CURRENT BUILD/plugins/wii.py
|
Wamy-Dev/DiscordRomScraperBot
|
d7b7e524baa423adb0f5fae9c8293cc3c83f83e1
|
[
"MIT"
] | 2
|
2021-02-06T18:44:41.000Z
|
2021-03-19T23:29:01.000Z
|
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import asyncio
import time
from bs4 import BeautifulSoup
import pyshorteners
modules=["wii"]
color=0xFFFFFA
async def get(driver,searchTerm,moduleName):
#start multiuser
driver.get("https://github.com/Wamy-Dev/Rezi")
driver.execute_script("window.open('');")
driver.switch_to.window(driver.window_handles[1])
time.sleep(2)
driver.get('https://roms-download.com/roms/nintendo-wii')
await asyncio.sleep(2)
#b = driver.find_element_by_class_name('btn btn-h')
ac = ActionChains(driver)
ac.move_by_offset(5,5).click().perform()
await asyncio.sleep(1)
driver.find_element_by_class_name('form-control').send_keys(searchTerm)
await asyncio.sleep(1)
driver.find_element_by_id('searh_btn').click()
await asyncio.sleep(1)
link = driver.find_element_by_partial_link_text(f'{searchTerm.title()}')
link.click()
html = driver.page_source
bSoup = BeautifulSoup(html,'html.parser')
links_list = bSoup.find_all('a')
games_list = []
n_l = []
for link in links_list:
if 'href' in link.attrs:
games_list.append(str(link.attrs['href']))
# print(str(link.attrs['href']))
for i in games_list:
if '/download/roms/nintendo-wii/' in i:
n_l.append(i)
#ends multiuser
driver.close()
driver.switch_to.window(driver.window_handles[0])
time.sleep(2)
return [searchTerm,f"https://roms-download.com{n_l[0]}"]
| 49.744681
| 96
| 0.461933
|
3356818af0d5695969b3ddf29ee0b853e5daef10
| 1,560
|
py
|
Python
|
tools/anchors_calc_yolo_format_annotation/kmeans_anchors.py
|
chenyuqing/darknet_yolov3_hardcore
|
f976b90f63cff1291aee341914a75dfabec53004
|
[
"MIT"
] | null | null | null |
tools/anchors_calc_yolo_format_annotation/kmeans_anchors.py
|
chenyuqing/darknet_yolov3_hardcore
|
f976b90f63cff1291aee341914a75dfabec53004
|
[
"MIT"
] | null | null | null |
tools/anchors_calc_yolo_format_annotation/kmeans_anchors.py
|
chenyuqing/darknet_yolov3_hardcore
|
f976b90f63cff1291aee341914a75dfabec53004
|
[
"MIT"
] | null | null | null |
# -*- coding=utf-8 -*-
import glob
import os
import sys
import xml.etree.ElementTree as ET
import numpy as np
from kmeans import kmeans, avg_iou
# label path
labels_txt = "./labels/"
# 聚类的数目
CLUSTERS = 6
# 模型中图像的输入尺寸,默认是一样的, from cfg file
SIZE = 608
# 加载YOLO格式的标注数据
def load_dataset(labels_txt):
if not os.path.exists(labels_txt):
print('no labels folders, program abort')
sys.exit(0)
label_file = os.listdir(labels_txt)
print('label count: {}'.format(len(label_file)))
dataset = []
for label in label_file:
with open(os.path.join(labels_txt, label), 'r') as f:
txt_content = f.readlines()
for line in txt_content:
line_split = line.split(' ')
roi_with = float(line_split[len(line_split)-2])
roi_height = float(line_split[len(line_split)-1])
if roi_with == 0 or roi_height == 0:
continue
dataset.append([roi_with, roi_height])
# print([roi_with, roi_height])
return np.array(dataset)
data = load_dataset(labels_txt)
out = kmeans(data, k=CLUSTERS)
kmeans_anchors = ""
print(out)
print("Accuracy: {:.2f}%".format(avg_iou(data, out) * 100))
for x, y in zip(out[:, 0], out[:, 1]):
kmeans_anchors += str(int(x*SIZE))+","+str(int(y*SIZE)) + ","
print(kmeans_anchors[:-1])
print('-------------------------------------')
print("Boxes:\n {}-{}".format(out[:, 0] * SIZE, out[:, 1] * SIZE))
ratios = np.around(out[:, 0] / out[:, 1], decimals=2).tolist()
print("Ratios:\n {}".format(sorted(ratios)))
| 26.896552
| 66
| 0.605128
|
d253d79511c1a8095ab9e30a52f63e8ceb4a152d
| 2,897
|
py
|
Python
|
dusty/systems/nfs/client.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 421
|
2015-06-02T16:29:59.000Z
|
2021-06-03T18:44:42.000Z
|
dusty/systems/nfs/client.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 404
|
2015-06-02T20:23:42.000Z
|
2019-08-21T16:59:41.000Z
|
dusty/systems/nfs/client.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 16
|
2015-06-16T17:21:02.000Z
|
2020-03-27T02:27:09.000Z
|
from __future__ import absolute_import
import logging
from subprocess import CalledProcessError
import time
from ... import constants
from ..virtualbox import get_host_ip, check_output_on_vm, check_call_on_vm, call_on_vm
from ...log import log_to_client
from ...subprocess import call_demoted, check_call_demoted, check_output_demoted, check_output_demoted
from ...compiler.spec_assembler import get_all_repos
def mount_active_repos():
remount_repos(get_all_repos(active_only=True, include_specs_repo=False))
def remount_repos(repos):
_start_nfs_client()
for i, repo in enumerate(repos):
_unmount_repo(repo)
_mount_repo(repo, wait_for_server=(i==0))
def unmount_all_repos():
mounts = check_output_on_vm('mount | {{ grep {} || true; }}'.format(constants.VM_REPOS_DIR))
mounted_dirs = []
for mount in mounts.splitlines():
for word in mount.split(' '):
if constants.VM_REPOS_DIR in word:
mounted_dirs.append(word)
for mounted_dir in mounted_dirs:
_unmount_vm_dir(mounted_dir)
def _start_nfs_client():
check_call_on_vm('sudo /usr/local/etc/init.d/nfs-client start')
def _unmount_repo(repo):
_unmount_vm_dir(repo.vm_path)
def _unmount_vm_dir(vm_dir):
call_on_vm('sudo umount -l {}'.format(vm_dir))
def _mount_repo(repo, wait_for_server=False):
"""
This function will create the VM directory where a repo will be mounted, if it
doesn't exist. If wait_for_server is set, it will wait up to 10 seconds for
the nfs server to start, by retrying mounts that fail with 'Connection Refused'.
If wait_for_server is not set, it will attempt to run the mount command once
"""
check_call_on_vm('sudo mkdir -p {}'.format(repo.vm_path))
if wait_for_server:
for i in range(0,10):
try:
_run_mount_command(repo)
return
except CalledProcessError as e:
if 'Connection refused' in e.output:
logging.info('Failed to mount repo; waiting for nfsd to restart')
time.sleep(1)
else:
logging.info(e.output)
raise e
log_to_client('Failed to mount repo {}'.format(repo.short_name))
raise RuntimeError('Unable to mount repo with NFS')
else:
_run_mount_command(repo)
def _run_mount_command(repo):
# Check output is used here so that if it raises an error, the output can be parsed
return check_output_on_vm('sudo mount {}'.format(_nfs_mount_args_string(repo)), redirect_stderr=True)
def _nfs_mount_args_string(repo):
mount_string = '-t nfs {} '.format(_nfs_options_string())
mount_string += '{}:{} '.format(get_host_ip(), repo.local_path)
mount_string += repo.vm_path
return mount_string
def _nfs_options_string():
return '-o async,udp,noatime,nfsvers=3'
| 36.670886
| 105
| 0.689334
|
972a284dc1acb4af1f35a9aa3ee2fb22ceb0b4cf
| 882
|
py
|
Python
|
web/__init__.py
|
marcelb98/pycroft
|
34cc59d9ab7fdc0c20b09b4851111048a9f64d90
|
[
"Apache-2.0"
] | null | null | null |
web/__init__.py
|
marcelb98/pycroft
|
34cc59d9ab7fdc0c20b09b4851111048a9f64d90
|
[
"Apache-2.0"
] | null | null | null |
web/__init__.py
|
marcelb98/pycroft
|
34cc59d9ab7fdc0c20b09b4851111048a9f64d90
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
"""
web
~~~~~~~~~~~~~~
This package contains the web interface based on flask
:copyright: (c) 2012 by AG DSN.
"""
from flask import Flask, redirect, url_for, request, flash, render_template, current_app
from flask_login import current_user
from flask_babel import Babel
from jinja2 import StrictUndefined
from werkzeug.datastructures import ImmutableDict
from pycroft.helpers.i18n import gettext
from pycroft.model import session
from .app import make_app, PycroftFlask
from .blueprints import (
finance, infrastructure, properties, user, facilities, login)
from .blueprints.login import login_manager
from .form import widgets
| 35.28
| 88
| 0.763039
|
1b3b9a72169a034125b39f238d5aa4e3c95d1e9b
| 1,332
|
py
|
Python
|
tests/integration/test_containers/src/test3_kafka_util.py
|
rexengineering/metaflow
|
fcba7cd6aaccd3806ce7d6a4a8aaeef350bbeaf8
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_containers/src/test3_kafka_util.py
|
rexengineering/metaflow
|
fcba7cd6aaccd3806ce7d6a4a8aaeef350bbeaf8
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_containers/src/test3_kafka_util.py
|
rexengineering/metaflow
|
fcba7cd6aaccd3806ce7d6a4a8aaeef350bbeaf8
|
[
"Apache-2.0"
] | null | null | null |
'''Test script to simulate someone from outside the REXFlow swimlane doing the following:
1. Reading an Event from Kafka
2. Doing some tasks in that agent's swim lane
3. Pushing another Event back into the REXFlow SwimLane using Kafka.
'''
import json
from confluent_kafka import Consumer, Producer
KAFKA_HOST = "my-cluster-kafka-bootstrap.kafka:9092"
KAFKA_TOPIC_READ = "test3_throw"
KAFKA_TOPIC_WRITE = "test3_catch"
GROUP_ID = "test3_external_swimlane"
consumer = Consumer({
'bootstrap.servers': KAFKA_HOST,
'group.id': GROUP_ID,
'auto.offset.reset': 'earliest'
})
producer = Producer({'bootstrap.servers': KAFKA_HOST})
def process_message(msg):
headers = dict(msg.headers())
assert 'x-flow-id' in headers
assert 'x-rexflow-wf-id' in headers
payload = json.loads(msg.value().decode())
payload['val'] *= 2
producer.produce(KAFKA_TOPIC_WRITE, json.dumps(payload).encode(), headers=headers)
producer.poll(0.1)
consumer.subscribe([KAFKA_TOPIC_READ])
print("Test3 Event Processing daemon: starting to poll", flush=True)
msg = consumer.poll(15)
processed = 0
while msg is not None:
processed += 1
process_message(msg)
msg = consumer.poll(10)
producer.poll(0.1)
producer.flush()
print(f"Test3 Event Processing daemon: done processing {processed} messages!", flush=True)
| 28.340426
| 90
| 0.734234
|
33b29d85771ddc84f143142424dcf1b4577411e9
| 2,906
|
py
|
Python
|
test/functional/wallet_zapwallettxes.py
|
ckti-ion-v4/ion
|
9eb70a07464c8f07d3206372485437e6ce16f278
|
[
"MIT"
] | 1
|
2021-05-04T09:18:39.000Z
|
2021-05-04T09:18:39.000Z
|
test/functional/wallet_zapwallettxes.py
|
ckti-ion-v4/ion
|
9eb70a07464c8f07d3206372485437e6ce16f278
|
[
"MIT"
] | 22
|
2019-06-14T17:34:02.000Z
|
2019-06-14T20:57:00.000Z
|
test/functional/wallet_zapwallettxes.py
|
ckti-ion-v4/ion
|
9eb70a07464c8f07d3206372485437e6ce16f278
|
[
"MIT"
] | 2
|
2020-07-12T10:24:08.000Z
|
2021-07-24T20:21:31.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the zapwallettxes functionality.
- start two bitcoind nodes
- create two transactions on node 0 - one is confirmed and one is unconfirmed.
- restart node 0 and verify that both the confirmed and the unconfirmed
transactions are still available.
- restart node 0 with zapwallettxes and persistmempool, and verify that both
the confirmed and the unconfirmed transactions are still available.
- restart node 0 with just zapwallettxes and verify that the confirmed
transactions are still available, but that the unconfirmed transaction has
been zapped.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
wait_until,
)
class ZapWalletTXesTest (BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(2)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
print("Node 0 Balance %s" % self.nodes[0].getbalance())
assert_equal(self.nodes[0].getbalance(), 250250)
# This transaction will be confirmed
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
# This transaction will not be confirmed
txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20)
# Confirmed and unconfirmed transactions are now in the wallet.
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet.
self.stop_node(0)
self.start_node(0)
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed
# transaction is zapped from the wallet, but is re-added when the mempool is reloaded.
self.stop_node(0)
self.start_node(0, ["-zapwallettxes=2"])
# tx1 is still be available because it was confirmed
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
# This will raise an exception because the unconfirmed transaction has been zapped
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2)
if __name__ == '__main__':
ZapWalletTXesTest().main()
| 40.361111
| 112
| 0.708534
|
bf9dcb9b8d08c1e7e1caceb35702f2a90d858d95
| 3,016
|
py
|
Python
|
Machine Learing/Sequential_Bayesian_Learning/Sequential_Bayesian_Learning.py
|
roger8587/-Portfolio
|
752824c50514d5a5f3e4a561be18b7dce7741d91
|
[
"MIT"
] | null | null | null |
Machine Learing/Sequential_Bayesian_Learning/Sequential_Bayesian_Learning.py
|
roger8587/-Portfolio
|
752824c50514d5a5f3e4a561be18b7dce7741d91
|
[
"MIT"
] | null | null | null |
Machine Learing/Sequential_Bayesian_Learning/Sequential_Bayesian_Learning.py
|
roger8587/-Portfolio
|
752824c50514d5a5f3e4a561be18b7dce7741d91
|
[
"MIT"
] | null | null | null |
import scipy.io as sio
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import multivariate_normal as normal
import scipy.stats as stats
# =============================================================================
# 1.Sequential Bayesian Learning
# =============================================================================
#%%
data=sio.loadmat('1_data.mat')
x = data['x']
t = data['t']
j = np.arange(0,3,1)
mu = j*2/3
beta = 1
alpha = 10**-6
N = [5,10,30,80]
s = 0.1
a = []
for i in range(3):
a.append((x - mu[i])/0.1)
#%%
def sigmoid(x):
return (1 / (1 + np.exp(-x)))
a1 = np.concatenate((sigmoid(a[0]),sigmoid(a[1]),sigmoid(a[2])),axis=1)
#%%
for n in N:
# 1-1
plt.figure()
plt.plot(x[:n],t[:n],'o')
target = t[:n]
PHI = a1[:n]
s0_inv = (10**-6) * np.identity(3)
sn_inv = s0_inv + PHI.T.dot(PHI)
sn = np.linalg.inv(sn_inv)
mn = sn.dot(PHI.T.dot(target))
mn_ = np.reshape(mn, 3)
line = np.linspace(0., 2., 50)
ws = normal(mn_, sn, 5)
for w in ws:
line1 = []
for i in range(3):
line1.append((line - mu[i])/0.1)
line1 = np.concatenate((sigmoid(line1[0]).reshape(50,1),sigmoid(line1[1]).reshape(50,1),sigmoid(line1[2]).reshape(50,1)),axis=1)
value = []
for point in line1:
value += [point.T.dot(w)]
plt.plot(line, value, linestyle ='--', zorder = 1)
plt.xlabel('X')
plt.ylabel('Y')
plt.title('N='+str(n))
plt.savefig('%d.png'%n)
# 1-2
plt.figure()
plt.plot(x[:n],t[:n],'o')
line2 = []
for i in range(3):
line2.append((line - mu[i])/0.1)
line2 = np.concatenate((sigmoid(line2[0]).reshape(50,1),sigmoid(line2[1]).reshape(50,1),sigmoid(line2[2]).reshape(50,1)),axis=1)
mx = []
vx = []
for point in line2:
mx += [mn.T.dot(point)] #預測分佈的平均
vx += [1. + (point.T.dot(sn)).dot(point)] #預測分佈的變異數
mx = np.reshape(np.asarray(mx), len(mx))
vx = np.reshape(np.asarray(vx), len(vx))
plt.plot(line, mx, linestyle = '-', zorder = 1, color = 'red')
plt.fill_between(line, mx-vx, mx+vx, color = 'pink') #範圍內塗色
plt.xlabel('X')
plt.ylabel('Y')
plt.title('N='+str(n))
plt.savefig('pred_%d.png'%n)
#%%
# 1-3
j1 = np.arange(0,2,1)
mu = j1*2/3
beta = 1
alpha = 10**-6
N = [5,10,30,80]
s = 0.1
a = []
for i in range(2):
a.append((x - mu[i])/0.1)
def sigmoid(x):
return (1 / (1 + np.exp(-x)))
a1 = np.concatenate((sigmoid(a[0]),sigmoid(a[1])),axis=1)
for n in N:
target = t[:n]
PHI = a1[:n]
s0_inv = (10**-6) * np.identity(2)
sn_inv = s0_inv + PHI.T.dot(PHI)
sn = np.linalg.inv(sn_inv)
mn = sn.dot(PHI.T.dot(target))
mn_ = np.reshape(mn, 2)
w = np.linspace(-10, 10, 100)
W = np.dstack(np.meshgrid(w, w))
prior_vals = stats.multivariate_normal(mn_, sn).pdf(W)
plt.figure()
plt.contourf(w, w, prior_vals, 100)
plt.xlabel('w0')
plt.ylabel('w1')
plt.title('N='+str(n))
plt.savefig('a_%d.png'%n)
| 28.186916
| 136
| 0.524536
|
019e3035ebfcf526e2f17cb6d194475ad48eee47
| 992
|
py
|
Python
|
09WebFramework/day02/basic03.py
|
HaoZhang95/PythonAndMachineLearning
|
b897224b8a0e6a5734f408df8c24846a98c553bf
|
[
"MIT"
] | 937
|
2019-05-08T08:46:25.000Z
|
2022-03-31T12:56:07.000Z
|
09WebFramework/day02/basic03.py
|
Sakura-gh/Python24
|
b97e18867264a0647d5645c7d757a0040e755577
|
[
"MIT"
] | 47
|
2019-09-17T10:06:02.000Z
|
2022-03-11T23:46:52.000Z
|
09WebFramework/day02/basic03.py
|
Sakura-gh/Python24
|
b97e18867264a0647d5645c7d757a0040e755577
|
[
"MIT"
] | 354
|
2019-05-10T02:15:26.000Z
|
2022-03-30T05:52:57.000Z
|
"""
静态url:一般html结尾的都是静态的url,每个网页有真实的物理路径,存在服务器中
优点:对seo有加分影响,因为打开速度快
缺点:太多的静态网页,占用硬盘空间
动态url:类似php,asp,py等结尾的,或者带有?id=5类似的url,动态生成的url,每个url只是逻辑地址,并不是真正存在服务器中的
优点:修改网页方便,因为是逻辑地址,所以占用硬盘空间少
缺点:因为需要运算,所以打开速度稍慢
伪静态url:xxx/coure/95.html看起来是一个静态的url,但是并不存在真实的物理地址在服务器中
优点和缺点:处于静态url和动态url之间,是一个折中的方案
"""
"""
dynamic文件夹下存在隐藏的__pycache__文件夹,里面是pyc格式的文件,保存的是已经翻译好的被导入模块的二进制代码
python解释器把python程序编译为二进制供机器运行
无论导入的方式是from xxx还是import XXX,那么这个模块引入的时候都会被从头到尾执行一遍
使用if __name__ == '__main__':
当一个模块main.py导入了test.py那么test.py会被从头到尾执行一遍,只要执行,那么就需要解释器翻译为二进制
如果main.py中导入了大量的其他模块,那么python解释器需要大量的解释和翻译,根据实际情况,如果test.py如果没有被修改过
那么下一次被导入的时候,依然会被解释器花时间和资源进行翻译,为了提高下一次导入的效率,python解释器在第一次导入一个模块时
它会将翻译之后的字节码,保存在__pacache__文件夹下以test.py --> test.cpython-35.pyc(cpython表示c语言的python解释器, 35表示python3.5的解释器)
当下一次导入这个模块的时候,会先到__pycache__文件夹下寻找对应的文件名,如果找到了,根据一定的条件,判断是否需要重新翻译,加快了翻译速度
"""
if __name__ == '__main__':
pass
| 35.428571
| 109
| 0.777218
|
f0a235ba1410c60a75a5ff98ea433c6882c8f7fa
| 2,877
|
py
|
Python
|
botoy/parser/group.py
|
First-frost/botoy
|
20a6f32c61beb045a9dbea11f0b1744fc6a40a60
|
[
"MIT"
] | 1
|
2021-06-17T10:20:45.000Z
|
2021-06-17T10:20:45.000Z
|
botoy/parser/group.py
|
First-frost/botoy
|
20a6f32c61beb045a9dbea11f0b1744fc6a40a60
|
[
"MIT"
] | null | null | null |
botoy/parser/group.py
|
First-frost/botoy
|
20a6f32c61beb045a9dbea11f0b1744fc6a40a60
|
[
"MIT"
] | null | null | null |
import json
import re
from typing import List, Optional
from pydantic import BaseModel
from ..collection import MsgTypes
from ..model import GroupMsg
# at
class UserExtItem(BaseModel):
QQNick: str
QQUid: int
class AT(BaseModel):
Content: str
UserExt: List[UserExtItem]
UserID: List[int]
def at(ctx: GroupMsg, clean=True) -> Optional[AT]:
"""艾特@ AtMsg
:param clean: 如果为``True``将会清除发送文字内容中包含的被AT用户的昵称
"""
try:
assert ctx.MsgType == MsgTypes.AtMsg
ret = AT(**json.loads(ctx.Content))
if clean:
ret_dict = ret.dict()
for user in ret.UserExt:
ret_dict["Content"] = re.sub(
f"@{user.QQNick}\\s+", "", ret_dict["Content"]
)
ret = AT(**ret_dict)
return ret
except Exception:
pass
return None
# reply
class Reply(BaseModel):
Content: str
SrcContent: str
MsgSeq: int
UserID: List[int]
Tips: str = "[回复]"
def reply(ctx: GroupMsg) -> Optional[Reply]:
"""回复 AtMsg"""
try:
assert ctx.MsgType == MsgTypes.AtMsg
data = json.loads(ctx.Content)
users = []
for user in data["UserID"]:
if user not in users:
users.append(user)
data["UserID"] = users
return Reply(**data)
except Exception:
pass
return None
# picture
class PicItem(BaseModel):
FileId: int
FileMd5: str
FileSize: int
ForwordBuf: str
ForwordField: int
Url: str
class Pic(BaseModel):
GroupPic: List[PicItem]
Content: str = ""
Tips: str # FIXME: 处理这类情况,但无法复现该消息 [好友图片]
def pic(ctx: GroupMsg) -> Optional[Pic]:
"""图片 PicMsg"""
try:
assert ctx.MsgType == MsgTypes.PicMsg
return Pic(**json.loads(ctx.Content))
except Exception:
pass
return None
# voice
class Voice(BaseModel):
Url: str
Tips: str = "语音"
def voice(ctx: GroupMsg) -> Optional[Voice]:
"""语音 VoiceMsg"""
try:
assert ctx.MsgType == MsgTypes.VoiceMsg
return Voice(**json.loads(ctx.Content))
except Exception:
pass
return None
# video
class Video(BaseModel):
ForwordBuf: str
ForwordField: int
VideoMd5: str
VideoSize: str
VideoUrl: str
Tips: str
def video(ctx: GroupMsg) -> Optional[Video]:
"""视频 VideoMsg"""
try:
assert ctx.MsgType == MsgTypes.VideoMsg
return Video(**json.loads(ctx.Content))
except Exception:
pass
return None
# file
class File(BaseModel):
FileID: str
FileName: str
FileSize: int
Tips: str = "[群文件]"
def file(ctx: GroupMsg) -> Optional[File]:
"""文件 GroupFileMsg"""
try:
assert ctx.MsgType == MsgTypes.GroupFileMsg
return File(**json.loads(ctx.Content))
except Exception:
pass
return None
| 18.927632
| 66
| 0.58846
|
0af454b329b141135a52f88511837cde180cf082
| 22,964
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/operations/_network_interface_tap_configurations_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/operations/_network_interface_tap_configurations_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/operations/_network_interface_tap_configurations_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceTapConfigurationsOperations(object):
"""NetworkInterfaceTapConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
tap_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_interface_name, # type: str
tap_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified tap configuration from the NetworkInterface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param tap_configuration_name: The name of the tap configuration.
:type tap_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
tap_configuration_name=tap_configuration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_interface_name, # type: str
tap_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkInterfaceTapConfiguration"
"""Get the specified tap configuration on a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param tap_configuration_name: The name of the tap configuration.
:type tap_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceTapConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.NetworkInterfaceTapConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceTapConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceTapConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
tap_configuration_name, # type: str
tap_configuration_parameters, # type: "_models.NetworkInterfaceTapConfiguration"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkInterfaceTapConfiguration"
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceTapConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(tap_configuration_parameters, 'NetworkInterfaceTapConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterfaceTapConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterfaceTapConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_interface_name, # type: str
tap_configuration_name, # type: str
tap_configuration_parameters, # type: "_models.NetworkInterfaceTapConfiguration"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.NetworkInterfaceTapConfiguration"]
"""Creates or updates a Tap configuration in the specified NetworkInterface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param tap_configuration_name: The name of the tap configuration.
:type tap_configuration_name: str
:param tap_configuration_parameters: Parameters supplied to the create or update tap
configuration operation.
:type tap_configuration_parameters: ~azure.mgmt.network.v2019_04_01.models.NetworkInterfaceTapConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkInterfaceTapConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_04_01.models.NetworkInterfaceTapConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceTapConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
tap_configuration_name=tap_configuration_name,
tap_configuration_parameters=tap_configuration_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceTapConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceTapConfigurationListResult"]
"""Get all Tap configurations in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceTapConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_04_01.models.NetworkInterfaceTapConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceTapConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceTapConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations'} # type: ignore
| 52.190909
| 244
| 0.68198
|
276d7bc8c80d7688ecadb70836aa07eb3e405fa2
| 299
|
py
|
Python
|
website/maintenance/admin/__init__.py
|
JobDoesburg/landolfio
|
4cbf31c2e6f93745f5aa0d20893bf20f3acecc6e
|
[
"MIT"
] | 1
|
2021-02-24T14:33:09.000Z
|
2021-02-24T14:33:09.000Z
|
website/maintenance/admin/__init__.py
|
JobDoesburg/landolfio
|
4cbf31c2e6f93745f5aa0d20893bf20f3acecc6e
|
[
"MIT"
] | 2
|
2022-01-13T04:03:38.000Z
|
2022-03-12T01:03:10.000Z
|
website/maintenance/admin/__init__.py
|
JobDoesburg/landolfio
|
4cbf31c2e6f93745f5aa0d20893bf20f3acecc6e
|
[
"MIT"
] | null | null | null |
from maintenance.admin.asset_amortizations import *
from maintenance.admin.maintenance_assets import *
from maintenance.admin.maintenance_ticket import *
from maintenance.admin.reviews import *
from maintenance.admin.under_review_assets import *
from maintenance.admin.maintenance_provider import *
| 42.714286
| 52
| 0.859532
|
9c6d86d1f08db9656bb20e6504c7f79e68e3a228
| 1,598
|
py
|
Python
|
hailo_model_zoo/core/preprocessing/pose_preprocessing.py
|
markgrobman/hailo_model_zoo
|
2ea72272ed2debd7f6bee7c4a65bd41de57ec9cf
|
[
"MIT"
] | 2
|
2021-07-20T15:09:51.000Z
|
2021-11-17T11:05:02.000Z
|
hailo_model_zoo/core/preprocessing/pose_preprocessing.py
|
markgrobman/hailo_model_zoo
|
2ea72272ed2debd7f6bee7c4a65bd41de57ec9cf
|
[
"MIT"
] | null | null | null |
hailo_model_zoo/core/preprocessing/pose_preprocessing.py
|
markgrobman/hailo_model_zoo
|
2ea72272ed2debd7f6bee7c4a65bd41de57ec9cf
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import cv2
import numpy as np
import math
def _openpose_padding(img, desired_dims, pad_value=0):
h, w, _ = img.shape
assert h <= desired_dims[0] and w <= desired_dims[1]
pad = []
pad.append(int(math.floor((desired_dims[0] - h) / 2.0)))
pad.append(int(math.floor((desired_dims[1] - w) / 2.0)))
pad.append(int(desired_dims[0] - h - pad[0]))
pad.append(int(desired_dims[1] - w - pad[1]))
padded_img = cv2.copyMakeBorder(img, pad[0], pad[2], pad[1], pad[3],
cv2.BORDER_CONSTANT, value=pad_value) \
if pad != [0, 0, 0, 0] else img
return padded_img, pad
def _openpose_preproc(img, desired_height, desired_width):
height_in, width_in, _ = img.shape
ratio = min(desired_height / float(height_in), desired_width / float(width_in))
scaled_img = cv2.resize(img, (0, 0), fx=ratio, fy=ratio, interpolation=cv2.INTER_CUBIC)
desired_dims = [desired_height, desired_width]
padded_img, pad = _openpose_padding(scaled_img, desired_dims)
return padded_img, pad
def openpose_tf_preproc(img, image_info, desired_height, desired_width, **kwargs):
res_tens, pad = tf.py_func(_openpose_preproc,
[img, desired_height, desired_width], (tf.float32, tf.int64))
image_info["pad"] = pad
image_info["orig_shape"] = tf.shape(img)
return (tf.cast(res_tens, tf.float32), image_info)
def openpose_denormalize(img, img_mean=128.0, img_scale=1 / 256.0):
img = np.array(img, dtype=np.float32)
img = (img / img_scale) + img_mean
return img
| 37.162791
| 92
| 0.661452
|
af53be5e463c9b102ff19647284aba3c047bdf37
| 23,340
|
py
|
Python
|
music.py
|
Doominicprog/bot-discord-music
|
4dd6d573dd2cad414ec6d69cb666e7cde3d58530
|
[
"CC0-1.0"
] | 1
|
2020-08-31T01:52:23.000Z
|
2020-08-31T01:52:23.000Z
|
music.py
|
Doominicprog/bot-discord-music
|
4dd6d573dd2cad414ec6d69cb666e7cde3d58530
|
[
"CC0-1.0"
] | null | null | null |
music.py
|
Doominicprog/bot-discord-music
|
4dd6d573dd2cad414ec6d69cb666e7cde3d58530
|
[
"CC0-1.0"
] | 1
|
2020-08-31T01:52:41.000Z
|
2020-08-31T01:52:41.000Z
|
import discord,asyncio,random,youtube_dl,string,os
from discord.ext import commands
from googleapiclient.discovery import build
from discord.ext.commands import command
# import pymongo
#NOTE: Import pymongo if you are using the database function commands
#NOTE: Also add `pymongo` and `dnspython` inside the requirements.txt file if you are using pymongo
#TODO: CREATE PLAYLIST SUPPORT FOR MUSIC
#NOTE: Without database, the music bot will not save your volume
#flat-playlist:True?
#extract_flat:True
ytdl_format_options= {
'format': 'bestaudio/best',
'outtmpl': '{}',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': True,
'logtostderr': False,
"extractaudio":True,
"audioformat":"opus",
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0' #bind to ipv4 since ipv6 addresses cause issues sometimes
}
stim= {
'default_search': 'auto',
"ignoreerrors":True,
'quiet': True,
"no_warnings": True,
"simulate": True, # do not keep the video files
"nooverwrites": True,
"keepvideo": False,
"noplaylist": True,
"skip_download": False,
'source_address': '0.0.0.0' # bind to ipv4 since ipv6 addresses cause issues sometimes
}
ffmpeg_options = {
'options': '-vn',
# 'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5'
}
class Downloader(discord.PCMVolumeTransformer):
def __init__(self,source,*,data,volume=0.5):
super().__init__(source,volume)
self.data=data
self.title=data.get('title')
self.url=data.get("url")
self.thumbnail=data.get('thumbnail')
self.duration=data.get('duration')
self.views=data.get('view_count')
self.playlist={}
@classmethod
async def yt_download(cls,url,ytdl,*,loop=None,stream=False):
"""
Download video directly with link
"""
API_KEY='API_KEY'
youtube=build('youtube','v3',developerKey=API_KEY)
data=youtube.search().list(part='snippet',q=url).execute()
song_url=data
song_info=data
download= await loop.run_in_executor(None,lambda: ytdl.extract_info(song_url,download=not stream))
filename=data['url'] if stream else ytdl.prepare_filename(download)
return cls(discord.FFmpegPCMAudio(filename,**ffmpeg_options),data=download),song_info
async def yt_info(self,song):
"""
Get info from youtube
"""
API_KEY='API_KEY'
youtube=build('youtube','v3',developerKey=API_KEY)
song_data=youtube.search().list(part='snippet').execute()
return song_data[0]
@classmethod
async def video_url(cls,url,ytdl,*,loop=None,stream=False):
"""
Download the song file and data
"""
loop=loop or asyncio.get_event_loop()
data= await loop.run_in_executor(None,lambda: ytdl.extract_info(url,download=not stream))
data1={'queue':[]}
if 'entries' in data:
if len(data['entries']) >1:
playlist_titles=[title['title'] for title in data['entries']]
data1={'title':data['title'],'queue':playlist_titles}
data1['queue'].pop(0)
data=data['entries'][0]
filename=data['url'] if stream else ytdl.prepare_filename(data)
return cls(discord.FFmpegPCMAudio(filename,**ffmpeg_options),data=data),data1
async def get_info(self,url):
"""
Get the info of the next song by not downloading the actual file but just the data of song/query
"""
yt=youtube_dl.YoutubeDL(stim)
down=yt.extract_info(url,download=False)
data1={'queue':[]}
if 'entries' in down:
if len(down['entries']) > 1:
playlist_titles=[title['title'] for title in down['entries']]
data1={'title':down['title'],'queue':playlist_titles}
down=down['entries'][0]['title']
return down,data1
class MusicPlayer(commands.Cog,name='Music'):
def __init__(self,client):
self.bot=client
# self.database = pymongo.MongoClient(os.getenv('MONGO'))['Discord-Bot-Database']['General']
# self.music=self.database.find_one('music')
self.player={
"audio_files":[]
}
@property
def random_color(self):
return discord.Color.from_rgb(random.randint(1,255),random.randint(1,255),random.randint(1,255))
# def cog_unload(self):
# """
# Update the database in mongodb to the latest changes when the bot is disconnecting
# """
# current=self.database.find_one('music')
# if current != self.voice:
# self.database.update_one({'_id':'music'},{'$set':self.music})
@commands.Cog.listener('on_voice_state_update')
async def music_voice(self,user,before,after):
"""
Clear the server's playlist after bot leave the voice channel
"""
if after.channel is None and user.id == self.bot.user.id:
try:
self.player[user.guild.id]['queue'].clear()
except KeyError:
#NOTE: server ID not in bot's local self.player dict
print(f"Failed to get guild id {user.guild.id}") #Server ID lost or was not in data before disconnecting
async def filename_generator(self):
"""
Generate a unique file name for the song file to be named as
"""
chars=list(string.ascii_letters+string.digits)
name=''
for i in range(random.randint(9,25)):
name+=random.choice(chars)
if name not in self.player['audio_files']:
return name
return await self.filename_generator()
async def playlist(self,data,msg):
"""
THIS FUNCTION IS FOR WHEN YOUTUBE LINK IS A PLAYLIST
Add song into the server's playlist inside the self.player dict
"""
for i in data['queue']:
self.player[msg.guild.id]['queue'].append({'title':i,'author':msg})
async def queue(self,msg,song):
"""
Add the query/song to the queue of the server
"""
title1=await Downloader.get_info(self,url=song)
title=title1[0]
data=title1[1]
#NOTE:needs fix here
if data['queue']:
await self.playlist(data,msg)
#NOTE: needs to be embeded to make it better output
return await msg.send(f"Added playlist {data['title']} to queue")
self.player[msg.guild.id]['queue'].append({'title':title,'author':msg})
return await msg.send(f"**{title} added to queue**".title())
async def voice_check(self,msg):
"""
function used to make bot leave voice channel if music not being played for longer than 2 minutes
"""
if msg.voice_client is not None:
await asyncio.sleep(120)
if msg.voice_client is not None and msg.voice_client.is_playing() is False and msg.voice_client.is_paused() is False:
await msg.voice_client.disconnect()
async def clear_data(self,msg):
"""
Clear the local dict data
name - remove file name from dict
remove file and filename from directory
remove filename from global audio file names
"""
name=self.player[msg.guild.id]['name']
os.remove(name)
self.player['audio_files'].remove(name)
async def loop_song(self,msg):
"""
Loop the currently playing song by replaying the same audio file via `discord.PCMVolumeTransformer()`
"""
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(self.player[msg.guild.id]['name']))
loop=asyncio.get_event_loop()
try:
msg.voice_client.play(source, after=lambda a: loop.create_task(self.done(msg)))
msg.voice_client.source.volume=self.player[msg.guild.id]['volume']
# if str(msg.guild.id) in self.music:
# msg.voice_client.source.volume=self.music['vol']/100
except Exception as Error:
#Has no attribute play
print(Error) #NOTE: output back the error for later debugging
async def done(self,msg,msgId:int=None):
"""
Function to run once song completes
Delete the "Now playing" message via ID
"""
if msgId:
try:
message=await msg.channel.fetch_message(msgId)
await message.delete()
except Exception as Error:
print("Failed to get the message")
if self.player[msg.guild.id]['reset'] is True:
self.player[msg.guild.id]['reset']=False
return await self.loop_song(msg)
if msg.guild.id in self.player and self.player[msg.guild.id]['repeat'] is True:
return await self.loop_song(msg)
await self.clear_data(msg)
if self.player[msg.guild.id]['queue']:
queue_data=self.player[msg.guild.id]['queue'].pop(0)
return await self.start_song(msg=queue_data['author'],song=queue_data['title'])
else:
await self.voice_check(msg)
async def start_song(self,msg,song):
new_opts=ytdl_format_options.copy()
audio_name=await self.filename_generator()
self.player['audio_files'].append(audio_name)
new_opts['outtmpl']=new_opts['outtmpl'].format(audio_name)
ytdl=youtube_dl.YoutubeDL(new_opts)
download1=await Downloader.video_url(song,ytdl=ytdl,loop=self.bot.loop)
download=download1[0]
data=download1[1]
self.player[msg.guild.id]['name']=audio_name
emb=discord.Embed(colour=self.random_color, title='Now Playing',description=download.title,url=download.url)
emb.set_thumbnail(url=download.thumbnail)
emb.set_footer(text=f'Requested by {msg.author.display_name}',icon_url=msg.author.avatar_url)
loop=asyncio.get_event_loop()
if data['queue']:
await self.playlist(data,msg)
msgId=await msg.send(embed=emb)
self.player[msg.guild.id]['player']=download
self.player[msg.guild.id]['author']=msg
msg.voice_client.play(download,after=lambda a: loop.create_task(self.done(msg,msgId.id)))
# if str(msg.guild.id) in self.music: #NOTE adds user's default volume if in database
# msg.voice_client.source.volume=self.music[str(msg.guild.id)]['vol']/100
msg.voice_client.source.volume=self.player[msg.guild.id]['volume']
return msg.voice_client
@command()
async def play(self,msg,*,song):
"""
Play a song with given url or title from Youtube
`Ex:` s.play Titanium David Guetta
`Command:` play(song_name)
"""
if msg.guild.id in self.player:
if msg.voice_client.is_playing() is True:#NOTE: SONG CURRENTLY PLAYING
return await self.queue(msg,song)
if self.player[msg.guild.id]['queue']:
return await self.queue(msg,song)
if msg.voice_client.is_playing() is False and not self.player[msg.guild.id]['queue']:
return await self.start_song(msg,song)
else:
#IMPORTANT: THE ONLY PLACE WHERE NEW `self.player[msg.guild.id]={}` IS CREATED
self.player[msg.guild.id]={
'player':None,
'queue':[],
'author':msg,
'name':None,
"reset":False,
'repeat':False,
'volume': 0.5
}
return await self.start_song(msg,song)
@play.before_invoke
async def before_play(self,msg):
"""
Check voice_client
- User voice = None:
please join a voice channel
- bot voice == None:
joins the user's voice channel
- user and bot voice NOT SAME:
- music NOT Playing AND queue EMPTY
join user's voice channel
- items in queue:
please join the same voice channel as the bot to add song to queue
"""
if msg.author.voice is None:
return await msg.send('**Please join a voice channel to play music**'.title())
if msg.voice_client is None:
return await msg.author.voice.channel.connect()
if msg.voice_client.channel != msg.author.voice.channel:
#NOTE: Check player and queue
if msg.voice_client.is_playing() is False and not self.player[msg.guild.id]['queue']:
return await msg.voice_client.move_to(msg.author.voice.channel)
#NOTE: move bot to user's voice channel if queue does not exist
if self.player[msg.guild.id]['queue']:
#NOTE: user must join same voice channel if queue exist
return await msg.send("Please join the same voice channel as the bot to add song to queue")
@commands.has_permissions(manage_channels=True)
@command()
async def repeat(self,msg):
"""
Repeat the currently playing or turn off by using the command again
`Ex:` .repeat
`Command:` repeat()
"""
if msg.guild.id in self.player:
if msg.voice_client.is_playing() is True:
if self.player[msg.guild.id]['repeat'] is True:
self.player[msg.guild.id]['repeat']=False
return await msg.message.add_reaction(emoji='✅')
self.player[msg.guild.id]['repeat']=True
return await msg.message.add_reaction(emoji='✅')
return await msg.send("No audio currently playing")
return await msg.send("Bot not in voice channel or playing music")
@commands.has_permissions(manage_channels=True)
@command(aliases=['restart-loop'])
async def reset(self,msg):
"""
Restart the currently playing song from the begining
`Ex:` s.reset
`Command:` reset()
"""
if msg.voice_client is None:
return await msg.send(f"**{msg.author.display_name}, there is no audio currently playing from the bot.**")
if msg.author.voice is None or msg.author.voice.channel != msg.voice_client.channel:
return await msg.send(f"**{msg.author.display_name}, you must be in the same voice channel as the bot.**")
if self.player[msg.guild.id]['queue'] and msg.voice_client.is_playing() is False:
return await msg.send("**No audio currently playing or songs in queue**".title(),delete_after=25)
self.player[msg.guild.id]['reset']=True
msg.voice_client.stop()
@commands.has_permissions(manage_channels=True)
@command()
async def skip(self,msg):
"""
Skip the current playing song
`Ex:` s.skip
`Command:` skip()
"""
if msg.voice_client is None:
return await msg.send("**No music currently playing**".title(),delete_after=60)
if msg.author.voice is None or msg.author.voice.channel != msg.voice_client.channel:
return await msg.send("Please join the same voice channel as the bot")
if self.player[msg.guild.id]['queue'] and msg.voice_client.is_playing() is False:
return await msg.send("**No songs in queue to skip**".title(),delete_after=60)
self.player[msg.guild.id]['repeat']=False
msg.voice_client.stop()
return await msg.message.add_reaction(emoji='✅')
@commands.has_permissions(manage_channels=True)
@command()
async def stop(self,msg):
"""
Stop the current playing songs and clear the queue
`Ex:` s.stop
`Command:` stop()
"""
if msg.voice_client is None:
return await msg.send("Bot is not connect to a voice channel")
if msg.author.voice is None:
return await msg.send("You must be in the same voice channel as the bot")
if msg.author.voice is not None and msg.voice_client is not None:
if msg.voice_client.is_playing() is True or self.player[msg.guild.id]['queue']:
self.player[msg.guild.id]['queue'].clear()
self.player[msg.guild.id]['repeat']=False
msg.voice_client.stop()
return await msg.message.add_reaction(emoji='✅')
return await msg.send(f"**{msg.author.display_name}, there is no audio currently playing or songs in queue**")
@commands.has_permissions(manage_channels=True)
@command(aliases=['get-out','disconnect','leave-voice'])
async def leave(self,msg):
"""
Disconnect the bot from the voice channel
`Ex:` s.leave
`Command:` leave()
"""
if msg.author.voice is not None and msg.voice_client is not None:
if msg.voice_client.is_playing() is True or self.player[msg.guild.id]['queue']:
self.player[msg.guild.id]['queue'].clear()
msg.voice_client.stop()
return await msg.voice_client.disconnect(), await msg.message.add_reaction(emoji='✅')
return await msg.voice_client.disconnect(), await msg.message.add_reaction(emoji='✅')
if msg.author.voice is None:
return await msg.send("You must be in the same voice channel as bot to disconnect it via command")
@commands.has_permissions(manage_channels=True)
@command()
async def pause(self,msg):
"""
Pause the currently playing audio
`Ex:` s.pause
`Command:` pause()
"""
if msg.author.voice is not None and msg.voice_client is not None:
if msg.voice_client.is_paused() is True:
return await msg.send("Song is already paused")
if msg.voice_client.is_paused() is False:
msg.voice_client.pause()
await msg.message.add_reaction(emoji='✅')
@commands.has_permissions(manage_channels=True)
@command()
async def resume(self,msg):
"""
Resume the currently paused audio
`Ex:` s.resume
`Command:` resume()
"""
if msg.author.voice is not None and msg.voice_client is not None:
if msg.voice_client.is_paused() is False:
return await msg.send("Song is already playing")
if msg.voice_client.is_paused() is True:
msg.voice_client.resume()
return await msg.message.add_reaction(emoji='✅')
@command(name='queue',aliases=['song-list','q','current-songs'])
async def _queue(self,msg):
"""
Show the current songs in queue
`Ex:` s.queue
`Command:` queue()
"""
if msg.voice_client is not None:
if msg.guild.id in self.player:
if self.player[msg.guild.id]['queue']:
emb=discord.Embed(colour=self.random_color, title='queue')
emb.set_footer(text=f'Command used by {msg.author.name}',icon_url=msg.author.avatar_url)
for i in self.player[msg.guild.id]['queue']:
emb.add_field(name=f"**{i['author'].author.name}**",value=i['title'],inline=False)
return await msg.send(embed=emb,delete_after=120)
return await msg.send("No songs in queue")
@command(name='song-info',aliases=['song?','nowplaying','current-song'])
async def song_info(self,msg):
"""
Show information about the current playing song
`Ex:` s.song-info
`Command:` song-into()
"""
if msg.voice_client is not None and msg.voice_client.is_playing() is True:
emb=discord.Embed(colour=self.random_color, title='Currently Playing',description=self.player[msg.guild.id]['player'].title)
emb.set_footer(text=f"{self.player[msg.guild.id]['author'].author.name}",icon_url=msg.author.avatar_url)
emb.set_thumbnail(url=self.player[msg.guild.id]['player'].thumbnail)
return await msg.send(embed=emb,delete_after=120)
return await msg.send(f"**No songs currently playing**".title(),delete_after=30)
@command(aliases=['move-bot','move-b','mb','mbot'])
async def join(self, msg, *, channel: discord.VoiceChannel=None):
"""
Make bot join a voice channel you are in if no channel is mentioned
`Ex:` .join (If voice channel name is entered, it'll join that one)
`Command:` join(channel:optional)
"""
if msg.voice_client is not None:
return await msg.send(f"Bot is already in a voice channel\nDid you mean to use {msg.prefix}moveTo")
if msg.voice_client is None:
if channel is None:
return await msg.author.voice.channel.connect(), await msg.message.add_reaction(emoji='✅')
return await channel.connect(), await msg.message.add_reaction(emoji='✅')
else:
if msg.voice_client.is_playing() is False and not self.player[msg.guild.id]['queue']:
return await msg.author.voice.channel.connect(), await msg.message.add_reaction(emoji='✅')
@join.before_invoke
async def before_join(self,msg):
if msg.author.voice is None:
return await msg.send("You are not in a voice channel")
@join.error
async def join_error(self,msg,error):
if isinstance(error,commands.BadArgument):
return msg.send(error)
if error.args[0] == 'Command raised an exception: Exception: playing':
return await msg.send("**Please join the same voice channel as the bot to add song to queue**".title())
@commands.has_permissions(manage_channels=True)
@command(aliases=['vol'])
async def volume(self,msg,vol:int):
"""
Change the volume of the bot
`Ex:` .vol 100 (200 is the max)
`Permission:` manage_channels
`Command:` volume(amount:integer)
"""
if vol > 200:
vol = 200
vol=vol/100
if msg.author.voice is not None:
if msg.voice_client is not None:
if msg.voice_client.channel == msg.author.voice.channel and msg.voice_client.is_playing() is True:
msg.voice_client.source.volume=vol
self.player[msg.guild.id]['volume']=vol
# if (msg.guild.id) in self.music:
# self.music[str(msg.guild.id)]['vol']=vol
return await msg.message.add_reaction(emoji='✅')
return await msg.send("**Please join the same voice channel as the bot to use the command**".title(),delete_after=30)
@volume.error
async def volume_error(self,msg,error):
if isinstance(error,commands.MissingPermissions):
return await msg.send("Manage channels or admin perms required to change volume",delete_after=30)
def setup(bot):
bot.add_cog(MusicPlayer(bot))
| 36.074189
| 136
| 0.605013
|
ac5911487cf39817e433760dd1bf09204a97409b
| 769
|
py
|
Python
|
EmeraldAI/Logic/Memory/STT.py
|
MaxMorgenstern/EmeraldAI
|
4bbb50f94b656904c428fc28cdbb61e7353e61aa
|
[
"Apache-2.0"
] | null | null | null |
EmeraldAI/Logic/Memory/STT.py
|
MaxMorgenstern/EmeraldAI
|
4bbb50f94b656904c428fc28cdbb61e7353e61aa
|
[
"Apache-2.0"
] | null | null | null |
EmeraldAI/Logic/Memory/STT.py
|
MaxMorgenstern/EmeraldAI
|
4bbb50f94b656904c428fc28cdbb61e7353e61aa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from EmeraldAI.Logic.Singleton import Singleton
from EmeraldAI.Config.Config import Config
if(Config().Get("Database", "ConversationDatabaseType").lower() == "sqlite"):
from EmeraldAI.Logic.Database.SQlite3 import SQlite3 as db
elif(Config().Get("Database", "ConversationDatabaseType").lower() == "mysql"):
from EmeraldAI.Logic.Database.MySQL import MySQL as db
from EmeraldAI.Logic.Memory.Base import Base
class STT(Base):
__metaclass__ = Singleton
def __init__(self):
query = """SELECT ID FROM Memory WHERE ParentID = '1' AND Key = 'STT'"""
sqlResult = db().Fetchall(query)
parentID = -1
for r in sqlResult:
parentID = r[0]
Base.__init__(self, parentID)
| 34.954545
| 80
| 0.677503
|
0c25bb030855d9f766c75949230b1e19bb8ff41c
| 2,022
|
py
|
Python
|
code/python3/index_facets.py
|
jaylett/xapian-docsprint
|
2e8fdffecf71f7042c0abe49924ba48c11818b7e
|
[
"MIT"
] | 47
|
2015-01-20T15:38:41.000Z
|
2022-02-15T21:03:50.000Z
|
code/python3/index_facets.py
|
jaylett/xapian-docsprint
|
2e8fdffecf71f7042c0abe49924ba48c11818b7e
|
[
"MIT"
] | 16
|
2015-06-09T16:12:50.000Z
|
2020-02-05T06:40:18.000Z
|
code/python3/index_facets.py
|
jaylett/xapian-docsprint
|
2e8fdffecf71f7042c0abe49924ba48c11818b7e
|
[
"MIT"
] | 56
|
2015-01-20T15:38:44.000Z
|
2022-03-03T18:13:39.000Z
|
#!/usr/bin/env python
import json
import sys
import xapian
from support import parse_csv_file
### Start of example code.
def index(datapath, dbpath):
# Create or open the database we're going to be writing to.
db = xapian.WritableDatabase(dbpath, xapian.DB_CREATE_OR_OPEN)
# Set up a TermGenerator that we'll use in indexing.
termgenerator = xapian.TermGenerator()
termgenerator.set_stemmer(xapian.Stem("en"))
for fields in parse_csv_file(datapath):
# 'fields' is a dictionary mapping from field name to value.
# Pick out the fields we're going to index.
description = fields.get('DESCRIPTION', u'')
title = fields.get('TITLE', u'')
identifier = fields.get('id_NUMBER', u'')
collection = fields.get('COLLECTION', u'')
maker = fields.get('MAKER', u'')
# We make a document and tell the term generator to use this.
doc = xapian.Document()
termgenerator.set_document(doc)
# Index each field with a suitable prefix.
termgenerator.index_text(title, 1, 'S')
termgenerator.index_text(description, 1, 'XD')
# Index fields without prefixes for general search.
termgenerator.index_text(title)
termgenerator.increase_termpos()
termgenerator.index_text(description)
# Add the collection as a value in slot 0.
doc.add_value(0, collection)
# Add the maker as a value in slot 1.
doc.add_value(1, maker)
# Store all the fields for display purposes.
doc.set_data(json.dumps(fields))
# We use the identifier to ensure each object ends up in the
# database only once no matter how many times we run the
# indexer.
idterm = u"Q" + identifier
doc.add_boolean_term(idterm)
db.replace_document(idterm, doc)
### End of example code.
if len(sys.argv) != 3:
print("Usage: %s DATAPATH DBPATH" % sys.argv[0])
sys.exit(1)
index(datapath = sys.argv[1], dbpath = sys.argv[2])
| 33.147541
| 69
| 0.653808
|
6407f293cf7c3b2865c84d3841937998f6e29564
| 3,869
|
py
|
Python
|
util/mosflm.py
|
toastisme/dials
|
6bc8ababc33bfe334513677f8adb65c0e90003f3
|
[
"BSD-3-Clause"
] | 58
|
2015-10-15T09:28:20.000Z
|
2022-03-28T20:09:38.000Z
|
util/mosflm.py
|
toastisme/dials
|
6bc8ababc33bfe334513677f8adb65c0e90003f3
|
[
"BSD-3-Clause"
] | 1,741
|
2015-11-24T08:17:02.000Z
|
2022-03-31T15:46:42.000Z
|
util/mosflm.py
|
toastisme/dials
|
6bc8ababc33bfe334513677f8adb65c0e90003f3
|
[
"BSD-3-Clause"
] | 45
|
2015-10-14T13:44:16.000Z
|
2022-03-22T14:45:56.000Z
|
import os
from dxtbx.model import Crystal
from rstbx.cftbx.coordinate_frame_helpers import align_reference_frame
from scitbx import matrix
def dump(experiments, directory):
"""
Dump the experiments in mosflm format
:param experiments: The experiments to dump
:param directory: The directory to write to
"""
for i, experiment in enumerate(experiments):
suffix = ""
if len(experiments) > 1:
suffix = "_%i" % (i + 1)
sub_dir = f"{directory}{suffix}"
if not os.path.isdir(sub_dir):
os.makedirs(sub_dir)
detector = experiment.detector
beam = experiment.beam
goniometer = experiment.goniometer
# XXX imageset is getting the experimental geometry from the image files
# rather than the input models.expt file
imageset = experiment.imageset
R_to_mosflm = align_reference_frame(
beam.get_s0(),
(1.0, 0.0, 0.0),
goniometer.get_rotation_axis(),
(0.0, 0.0, 1.0),
)
cryst = experiment.crystal
cryst = cryst.change_basis(
cryst.get_space_group().info().change_of_basis_op_to_reference_setting()
)
A = matrix.sqr(cryst.get_A())
A_inv = A.inverse()
real_space_a = R_to_mosflm * A_inv.elems[:3]
real_space_b = R_to_mosflm * A_inv.elems[3:6]
real_space_c = R_to_mosflm * A_inv.elems[6:9]
cryst_mosflm = Crystal(
real_space_a,
real_space_b,
real_space_c,
space_group=cryst.get_space_group(),
)
A_mosflm = matrix.sqr(cryst_mosflm.get_A())
U_mosflm = matrix.sqr(cryst_mosflm.get_U())
assert U_mosflm.is_r3_rotation_matrix(), U_mosflm
w = beam.get_wavelength()
index_mat = os.path.join(sub_dir, "index.mat")
mosflm_in = os.path.join(sub_dir, "mosflm.in")
print(f"Exporting experiment to {index_mat} and {mosflm_in}")
with open(index_mat, "w") as f:
f.write(format_mosflm_mat(w * A_mosflm, U_mosflm, cryst.get_unit_cell()))
img_dir, template = os.path.split(imageset.get_template())
symmetry = cryst_mosflm.get_space_group().type().number()
beam_centre = tuple(reversed(detector[0].get_beam_centre(beam.get_s0())))
distance = detector[0].get_directed_distance()
with open(mosflm_in, "w") as f:
f.write(
write_mosflm_input(
directory=img_dir,
template=template,
symmetry=symmetry,
beam_centre=beam_centre,
distance=distance,
mat_file="index.mat",
)
)
def format_mosflm_mat(A, U, unit_cell, missets=(0, 0, 0)):
lines = []
uc_params = unit_cell.parameters()
for i in range(3):
lines.append(("%12.8f" * 3) % A.elems[i * 3 : 3 * (i + 1)])
lines.append(("%12.3f" * 3) % missets)
for i in range(3):
lines.append("%12.8f" * 3 % U.elems[i * 3 : 3 * (i + 1)])
lines.append(("%12.4f" * 6) % uc_params)
lines.append(("%12.3f" * 3) % missets)
return "\n".join(lines)
def write_mosflm_input(
directory=None,
template=None,
symmetry=None,
beam_centre=None,
distance=None,
mat_file=None,
):
lines = []
if directory is not None:
lines.append(f"DIRECTORY {directory}")
if template is not None:
lines.append(f"TEMPLATE {template}")
if symmetry is not None:
lines.append(f"SYMMETRY {symmetry}")
if beam_centre is not None:
lines.append("BEAM %.3f %.3f" % beam_centre)
if distance is not None:
lines.append(f"DISTANCE {distance:.4f}")
if mat_file is not None:
lines.append(f"MATRIX {mat_file}")
return "\n".join(lines)
| 32.241667
| 85
| 0.59266
|
8fafc6e9b982566da2315e7246278aface4a1729
| 2,491
|
py
|
Python
|
equivalence/equivalence_node/equalization_Urals_EES.py
|
Shurik412/equivalence_rastr_win3
|
c37119f9cc59dcf96d236cd8f3ec090dc65a5db6
|
[
"MIT"
] | null | null | null |
equivalence/equivalence_node/equalization_Urals_EES.py
|
Shurik412/equivalence_rastr_win3
|
c37119f9cc59dcf96d236cd8f3ec090dc65a5db6
|
[
"MIT"
] | null | null | null |
equivalence/equivalence_node/equalization_Urals_EES.py
|
Shurik412/equivalence_rastr_win3
|
c37119f9cc59dcf96d236cd8f3ec090dc65a5db6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from equivalence.tables.Tables import Vetv, Node, Generator
def equalization_of_the_Urals_energy_system(
selection_of_the_area: str,
rastr_win: object) -> None:
"""
# ************************************************************
# Назначение: Эквиваленитрование ОЭС Урала
# Входные параметры: selection_of_the_area - выборка районов ОЭС Урала
# Возврат: Nothing
# ************************************************************
"""
table_node = rastr_win.Tables(Node.table)
table_vetv = rastr_win.Tables(Vetv.table)
table_generator = rastr_win.Tables(Generator.table)
table_node.SetSel(selection_of_the_area)
table_node.Cols(Node.sel).Calc(1)
j = table_node.FindNextSel(-1)
while j != (-1):
ny_node = table_node.Cols(Node.ny).Z(j)
tip_node = table_node.Cols(Node.tip).Z(j)
# uhom_node = tip_node.Cols(Node.uhom).Z(j)
if tip_node > 1:
table_generator.SetSel(f"Node.ny={ny_node}")
j_gen = table_generator.FindNextSel(-1)
if j_gen != (-1):
table_vetv.SetSel(f"(ip={ny_node})|(iq={ny_node})")
j_vetv = table_vetv.FindNextSel(-1)
while j_vetv != (-1):
tip_vetv = table_vetv.Cols(Vetv.tip).Z(j_vetv)
if tip_vetv == 1:
v_ip = tip_vetv.Cols(Vetv.v_ip).Z(j_vetv)
v_iq = tip_vetv.Cols(Vetv.v_iq).Z(j_vetv)
if (v_ip > 430 and v_iq < 580) or (v_ip < 430 and v_iq > 580):
table_node.Cols(Node.sel).SetZ(j, 0)
j_vetv = table_vetv.FindNextSel(j_vetv)
else:
table_vetv.SetSel(f"(ip={ny_node})|(iq={ny_node})")
j_vetv_2 = table_vetv.FindNextSel(-1)
while j_vetv_2 != (-1):
tip_vetv_2 = table_vetv.Cols(Vetv.tip).Z(j_vetv_2)
if tip_vetv_2 == 1:
v_ip_2 = table_vetv.Cols(Vetv.table).Z(j_vetv_2)
v_iq_2 = table_vetv.Cols(Vetv.table).Z(j_vetv_2)
if (v_ip_2 > 430 and v_iq_2 < 580) or (v_ip_2 < 430 and v_iq_2 > 580):
table_node.Cols(Node.sel).SetZ(j, 0)
j_vetv_2 = table_vetv.FindNextSel(j_vetv_2)
table_node.SetSel(selection_of_the_area)
j = table_node.FindNextSel(j)
print(" -> Завершено выделение района(-ов): " & selection_of_the_area)
| 44.482143
| 90
| 0.542754
|
c417bad994193562a30b1d652e379659434bc6aa
| 838
|
py
|
Python
|
acispy/utils.py
|
sjtuzyk/chandra-acis-analysis
|
8d194c8107ba2e91b35eba95c044ac238ce03229
|
[
"MIT"
] | 3
|
2016-05-28T00:32:55.000Z
|
2022-02-28T13:36:04.000Z
|
acispy/utils.py
|
sjtuzyk/chandra-acis-analysis
|
8d194c8107ba2e91b35eba95c044ac238ce03229
|
[
"MIT"
] | null | null | null |
acispy/utils.py
|
sjtuzyk/chandra-acis-analysis
|
8d194c8107ba2e91b35eba95c044ac238ce03229
|
[
"MIT"
] | 1
|
2018-10-09T16:42:18.000Z
|
2018-10-09T16:42:18.000Z
|
# Copyright (c) 2017-2018 Weitian LI <wt@liwt.net>
# MIT license
import re
# The base directory has the format: <name>_oi<obsid>
RE_BASEDIR = re.compile(r"^.*/(?P<name>[^/_]+)_oi(?P<obsid>\d+).*$")
def get_name_from_path(path):
"""
Extract the object name from the directory path.
Parameters
----------
path : str
Path to the base directory
Returns
-------
objname : str
The name part of the base directory
"""
return RE_BASEDIR.match(path).group("name")
def get_obsid_from_path(path):
"""
Extract the observation ID from the directory path.
Parameters
----------
path : str
Path to the base directory
Returns
-------
obsid : int
The observation ID of the data
"""
return int(RE_BASEDIR.match(path).group("obsid"))
| 19.488372
| 68
| 0.596659
|
9271bc94dcca99afe3d1561028807348ee3c0bfc
| 323
|
py
|
Python
|
books/techno/python/programming_python_4_ed_m_lutz/code/chapter_3/04_fetching_shell_variables/main.py
|
ordinary-developer/lin_education
|
13d65b20cdbc3e5467b2383e5c09c73bbcdcb227
|
[
"MIT"
] | 1
|
2017-05-04T08:23:46.000Z
|
2017-05-04T08:23:46.000Z
|
books/techno/python/programming_python_4_ed_m_lutz/code/chapter_3/04_fetching_shell_variables/main.py
|
ordinary-developer/lin_education
|
13d65b20cdbc3e5467b2383e5c09c73bbcdcb227
|
[
"MIT"
] | null | null | null |
books/techno/python/programming_python_4_ed_m_lutz/code/chapter_3/04_fetching_shell_variables/main.py
|
ordinary-developer/lin_education
|
13d65b20cdbc3e5467b2383e5c09c73bbcdcb227
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
import os
print(os.environ.keys())
print(list(os.environ.keys()))
print(os.environ['PATH'])
print(os.environ['PATH'])
for src_dir in os.environ['PATH'].split(os.pathsep):
print(src_dir)
import sys
print(sys.path[:3])
print(sys.path)
| 20.1875
| 57
| 0.578947
|
e32a78f382bf17f2d1ed4086a7291885e317d2e9
| 1,946
|
py
|
Python
|
wiki/views.py
|
JericHunter/makewiki
|
fa5a45f25a47c2f404414bc8e81685e2f1be9206
|
[
"MIT"
] | null | null | null |
wiki/views.py
|
JericHunter/makewiki
|
fa5a45f25a47c2f404414bc8e81685e2f1be9206
|
[
"MIT"
] | 5
|
2020-06-06T00:37:31.000Z
|
2022-02-10T09:43:39.000Z
|
wiki/views.py
|
JericHunter/makewiki
|
fa5a45f25a47c2f404414bc8e81685e2f1be9206
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from wiki.models import Page
from django.views.generic import DetailView, ListView
from django.http import Http404, HttpResponse
# Create your views here.
class PageList(ListView):
"""
CHALLENGES:
1. On GET, display a homepage that shows all Pages in your wiki.
2. Replace this CHALLENGE text with a descriptive docstring for PageList.
3. Replace pass below with the code to render a template named `list.html`.
"""
model = Page
def get(self, request):
""" Returns a list of wiki pages. """
wiki_list = Page.objects.all()
context = {'wiki_list': wiki_list}
return render(request, 'list.html', context=context)
class PageDetailView(DetailView):
"""
CHALLENGES:
1. On GET, render a template named `page.html`.
2. Replace this docstring with a description of what thos accomplishes.
STRETCH CHALLENGES:
1. Import the PageForm class from forms.py.
- This ModelForm enables editing of an existing Page object in the database.
2. On GET, render an edit form below the page details.
3. On POST, check if the data in the form is valid.
- If True, save the data, and redirect back to the DetailsView.
- If False, display all the errors in the template, above the form fields.
4. Instead of hard-coding the path to redirect to, use the `reverse` function to return the path.
5. After successfully editing a Page, use Django Messages to "flash" the user a success message
- Message Content: REPLACE_WITH_PAGE_TITLE has been successfully updated.
"""
model = Page
def get(self, request, slug):
""" Returns a specific of wiki page by slug. """
try:
wiki = Page.objects.get(slug=slug)
except Page.DoesNotExist:
raise Http404("Page does not exist")
return render(request, 'page.html', {'wiki': wiki})
| 38.92
| 103
| 0.675231
|
64960f3c881c83577643cc5a00fc16eec2a9e13c
| 602
|
py
|
Python
|
src/syckIO.py
|
davix3f/SyCk
|
71a51876488b6f493502e1cefe352af758cf8d63
|
[
"Apache-2.0"
] | null | null | null |
src/syckIO.py
|
davix3f/SyCk
|
71a51876488b6f493502e1cefe352af758cf8d63
|
[
"Apache-2.0"
] | null | null | null |
src/syckIO.py
|
davix3f/SyCk
|
71a51876488b6f493502e1cefe352af758cf8d63
|
[
"Apache-2.0"
] | null | null | null |
import re
import linecache
lines={}
filename=""
def file_length(filename):
length=0
f=open(filename, "r")
for line in f:
length+=1
f.close()
return(length)
if filename!="":
file_length(filename)
class Basic:
def openfile():
Basic.f=open(filename,"r")
return(Basic.f)
def closefile():
Basic.f.close()
def reader():
line_number=1
Basic.openfile()
for line in Basic.f:
lines[line_number]=linecache.getline(filename, line_number).replace("\n","")
linecache.clearcache()
line_number+=1
def readlines():
for item in lines:
print(item, lines[item])
| 15.842105
| 78
| 0.666113
|
70878e149ae7a876335e76b7c02f5ea1ec79c01b
| 20,334
|
py
|
Python
|
custom_components/tuya_v2/sensor.py
|
MorrisTheHorris/tuya-home-assistant
|
cc9665ead3975288d66781f9bd95f271eb568725
|
[
"MIT"
] | null | null | null |
custom_components/tuya_v2/sensor.py
|
MorrisTheHorris/tuya-home-assistant
|
cc9665ead3975288d66781f9bd95f271eb568725
|
[
"MIT"
] | null | null | null |
custom_components/tuya_v2/sensor.py
|
MorrisTheHorris/tuya-home-assistant
|
cc9665ead3975288d66781f9bd95f271eb568725
|
[
"MIT"
] | null | null | null |
"""Support for Tuya sensors."""
import json
import logging
from homeassistant.components.sensor import (
DOMAIN as DEVICE_DOMAIN,
SensorEntity,
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONCENTRATION_PARTS_PER_MILLION,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_CO2,
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_VOLTAGE,
ENERGY_KILO_WATT_HOUR,
MASS_MILLIGRAMS,
PERCENTAGE,
TEMP_CELSIUS,
TIME_DAYS,
TIME_MINUTES,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from tuya_iot import TuyaDevice, TuyaDeviceManager
from .base import TuyaHaDevice
from .const import (
DOMAIN,
TUYA_DEVICE_MANAGER,
TUYA_DISCOVERY_NEW,
TUYA_HA_DEVICES,
TUYA_HA_TUYA_MAP,
)
_LOGGER = logging.getLogger(__name__)
TUYA_SUPPORT_TYPE = [
"wsdcg", # Temperature and Humidity Sensor
"mcs", # Door Window Sensor
"ywbj", # Somke Detector
"rqbj", # Gas Detector
"pir", # PIR Detector
"sj", # Water Detector
"pm2.5", # PM2.5 Sensor
"kg", # Switch
"cz", # Socket
"pc", # Power Strip
"wk", # Thermostat
"dlq", # Breaker
"ldcg", # Luminance Sensor
"ms", # Residential Lock
"dj", # Smart RGB Plug
"kj", # Air Purifier,
"xxj", # Diffuser
"zndb", # Smart Electricity Meter
"wnykq", # Smart IR
"szjqr", # Fingerbot
]
# Smoke Detector
# https://developer.tuya.com/en/docs/iot/s?id=K9gf48r5i2iiy
DPCODE_BATTERY = "va_battery"
DPCODE_BATTERY_PERCENTAGE = "battery_percentage"
DPCODE_BATTERY_CODE = "battery"
DPCODE_TEMPERATURE = "va_temperature"
DPCODE_HUMIDITY = "va_humidity"
DPCODE_PM100_VALUE = "pm100_value"
DPCODE_PM25_VALUE = "pm25_value"
DPCODE_PM10_VALUE = "pm10_value"
DPCODE_TEMP_CURRENT = "temp_current"
DPCODE_HUMIDITY_VALUE = "humidity_value"
DPCODE_CURRENT = "cur_current"
DPCODE_POWER = "cur_power"
DPCODE_VOLTAGE = "cur_voltage"
DPCODE_TOTAL_FORWARD_ENERGY = "total_forward_energy"
DPCODE_ADD_ELE = "add_ele"
DPCODE_BRIGHT_VALUE = "bright_value"
# Residential Lock
# https://developer.tuya.com/en/docs/iot/f?id=K9i5ql58frxa2
DPCODE_BATTERY_ZIGBEELOCK = "residual_electricity"
# Air Purifier
# https://developer.tuya.com/en/docs/iot/s?id=K9gf48r41mn81
DPCODE_AP_PM25 = "pm25" # PM25 - no units
DPCODE_AP_FILTER = "filter" # Filter cartridge utilization [%]
DPCODE_AP_TEMP = "temp" # Temperature [℃]
DPCODE_AP_HUMIDITY = "humidity" # Humidity [%]
DPCODE_AP_TVOC = "tvoc" # Total Volatile Organic Compounds [ppm]
DPCODE_AP_ECO2 = "eco2" # Carbon dioxide concentration [ppm]
DPCODE_AP_FDAYS = "filter_days" # Remaining days of the filter cartridge [day]
DPCODE_AP_TTIME = "total_time" # Total operating time [minute]
DPCODE_AP_TPM = "total_pm" # Total absorption of particles [mg]
DPCODE_AP_COUNTDOWN = "countdown_left" # Remaining time of countdown [minute]
# Smart Electricity Meter (zndb)
# https://developer.tuya.com/en/docs/iot/smart-meter?id=Kaiuz4gv6ack7
DPCODE_FORWARD_ENERGY_TOTAL = "forward_energy_total"
DPCODE_PHASE = ["phase_a", "phase_b", "phase_c"]
JSON_CODE_CURRENT = "electricCurrent"
JSON_CODE_POWER = "power"
JSON_CODE_VOLTAGE = "voltage"
# Door Window Sensor (mcs)
DPCODE_BATTERY_VALUE = "battery_value"
async def async_setup_entry(
hass: HomeAssistant, _entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up tuya sensors dynamically through tuya discovery."""
_LOGGER.info("sensor init")
hass.data[DOMAIN][TUYA_HA_TUYA_MAP].update({DEVICE_DOMAIN: TUYA_SUPPORT_TYPE})
async def async_discover_device(dev_ids):
"""Discover and add a discovered tuya sensor."""
_LOGGER.info(f"sensor add-> {dev_ids}")
if not dev_ids:
return
entities = await hass.async_add_executor_job(_setup_entities, hass, dev_ids)
hass.data[DOMAIN][TUYA_HA_DEVICES].extend(entities)
async_add_entities(entities)
async_dispatcher_connect(
hass, TUYA_DISCOVERY_NEW.format(DEVICE_DOMAIN), async_discover_device
)
device_manager = hass.data[DOMAIN][TUYA_DEVICE_MANAGER]
device_ids = []
for (device_id, device) in device_manager.device_map.items():
if device.category in TUYA_SUPPORT_TYPE:
device_ids.append(device_id)
await async_discover_device(device_ids)
def _setup_entities(hass: HomeAssistant, device_ids: list):
"""Set up Tuya Switch device."""
device_manager = hass.data[DOMAIN][TUYA_DEVICE_MANAGER]
entities = []
for device_id in device_ids:
device = device_manager.device_map[device_id]
if device is None:
continue
if device.category == "kj":
if DPCODE_AP_PM25 in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
"PM25",
DPCODE_AP_PM25,
"",
STATE_CLASS_MEASUREMENT,
)
)
elif DPCODE_AP_FILTER in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
"Filter",
DPCODE_AP_FILTER,
PERCENTAGE,
STATE_CLASS_MEASUREMENT,
)
)
elif DPCODE_AP_TEMP in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_TEMPERATURE,
DPCODE_AP_TEMP,
TEMP_CELSIUS,
STATE_CLASS_MEASUREMENT,
)
)
elif DPCODE_AP_HUMIDITY in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_HUMIDITY,
DPCODE_AP_HUMIDITY,
PERCENTAGE,
STATE_CLASS_MEASUREMENT,
)
)
elif DPCODE_AP_TVOC in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
"TVOC",
DPCODE_AP_TVOC,
CONCENTRATION_PARTS_PER_MILLION,
STATE_CLASS_TOTAL_INCREASING,
)
)
elif DPCODE_AP_ECO2 in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_CO2,
DPCODE_AP_ECO2,
CONCENTRATION_PARTS_PER_MILLION,
STATE_CLASS_MEASUREMENT,
)
)
elif DPCODE_AP_FDAYS in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
"FilterDays",
DPCODE_AP_FDAYS,
TIME_DAYS,
None,
)
)
elif DPCODE_AP_TTIME in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
"TotalTime",
DPCODE_AP_TTIME,
TIME_MINUTES,
STATE_CLASS_TOTAL_INCREASING,
)
)
elif DPCODE_AP_TPM in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
"TotalPM",
DPCODE_AP_TPM,
MASS_MILLIGRAMS,
STATE_CLASS_TOTAL_INCREASING,
)
)
elif DPCODE_AP_COUNTDOWN in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
"Countdown",
DPCODE_AP_COUNTDOWN,
TIME_MINUTES,
None,
)
)
else:
if DPCODE_BATTERY_ZIGBEELOCK in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_BATTERY,
DPCODE_BATTERY_ZIGBEELOCK,
PERCENTAGE,
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_BATTERY in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_BATTERY,
DPCODE_BATTERY,
PERCENTAGE,
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_BATTERY_PERCENTAGE in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_BATTERY,
DPCODE_BATTERY_PERCENTAGE,
PERCENTAGE,
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_BATTERY_VALUE in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_BATTERY,
DPCODE_BATTERY_VALUE,
PERCENTAGE,
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_BATTERY_CODE in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_BATTERY,
DPCODE_BATTERY_CODE,
PERCENTAGE,
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_TEMPERATURE in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_TEMPERATURE,
DPCODE_TEMPERATURE,
TEMP_CELSIUS,
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_TEMP_CURRENT in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_TEMPERATURE,
DPCODE_TEMP_CURRENT,
TEMP_CELSIUS,
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_HUMIDITY in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_HUMIDITY,
DPCODE_HUMIDITY,
PERCENTAGE,
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_HUMIDITY_VALUE in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_HUMIDITY,
DPCODE_HUMIDITY_VALUE,
PERCENTAGE,
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_PM100_VALUE in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
"PM10",
DPCODE_PM100_VALUE,
"ug/m³",
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_PM25_VALUE in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
"PM2.5",
DPCODE_PM25_VALUE,
"ug/m³",
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_PM10_VALUE in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
"PM1.0",
DPCODE_PM10_VALUE,
"ug/m³",
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_CURRENT in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_CURRENT,
DPCODE_CURRENT,
json.loads(device.status_range.get(DPCODE_CURRENT).values).get(
"unit", 0
),
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_POWER in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_POWER,
DPCODE_POWER,
json.loads(device.status_range.get(DPCODE_POWER).values).get(
"unit", 0
),
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_TOTAL_FORWARD_ENERGY in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_ENERGY,
DPCODE_TOTAL_FORWARD_ENERGY,
ENERGY_KILO_WATT_HOUR,
STATE_CLASS_TOTAL_INCREASING,
)
)
if DPCODE_ADD_ELE in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_ENERGY,
DPCODE_ADD_ELE,
ENERGY_KILO_WATT_HOUR,
STATE_CLASS_TOTAL_INCREASING,
)
)
if DPCODE_VOLTAGE in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_VOLTAGE,
DPCODE_VOLTAGE,
json.loads(device.status_range.get(DPCODE_VOLTAGE).values).get(
"unit", 0
),
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_BRIGHT_VALUE in device.status and device.category != "dj":
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_ILLUMINANCE,
DPCODE_BRIGHT_VALUE,
json.loads(
device.status_range.get(DPCODE_BRIGHT_VALUE).values
).get("unit", 0),
STATE_CLASS_MEASUREMENT,
)
)
if DPCODE_FORWARD_ENERGY_TOTAL in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_ENERGY,
DPCODE_FORWARD_ENERGY_TOTAL,
ENERGY_KILO_WATT_HOUR,
STATE_CLASS_TOTAL_INCREASING,
)
)
if device.category == "zndb":
for phase in DPCODE_PHASE:
if phase in device.status:
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_CURRENT,
phase + "_" + JSON_CODE_CURRENT,
"A",
STATE_CLASS_MEASUREMENT,
)
)
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_POWER,
phase + "_" + JSON_CODE_POWER,
"kW",
STATE_CLASS_MEASUREMENT,
)
)
entities.append(
TuyaHaSensor(
device,
device_manager,
DEVICE_CLASS_VOLTAGE,
phase + "_" + JSON_CODE_VOLTAGE,
"V",
STATE_CLASS_MEASUREMENT,
)
)
return entities
class TuyaHaSensor(TuyaHaDevice, SensorEntity):
"""Tuya Sensor Device."""
def __init__(
self,
device: TuyaDevice,
device_manager: TuyaDeviceManager,
sensor_type: str,
sensor_code: str,
sensor_unit: str,
sensor_state_class: str,
) -> None:
"""Init TuyaHaSensor."""
self._code = sensor_code
self._attr_device_class = sensor_type
self._attr_name = self.tuya_device.name + "_" + self._attr_device_class
self._attr_unique_id = f"{super().unique_id}{self._code}"
self._attr_unit_of_measurement = sensor_unit
self._attr_state_class = sensor_state_class
self._attr_available = True
super().__init__(device, device_manager)
@property
def state(self) -> StateType:
"""Return the state of the sensor."""
if self.tuya_device.category == "zndb" and self._code.startswith("phase_"):
__value = json.loads(self.tuya_device.status.get(self._code[:7])).get(
self._code[8:]
)
return __value
__value = self.tuya_device.status.get(self._code)
if self.tuya_device.status_range.get(self._code).type == "Integer":
__value_range = json.loads(
self.tuya_device.status_range.get(self._code).values
)
__state = (__value) * 1.0 / (10 ** __value_range.get("scale"))
if __value_range.get("scale") == 0:
return int(__state)
return f"%.{__value_range.get('scale')}f" % __state
return ""
| 35.548951
| 87
| 0.470985
|
b273190765107f0f6dab81bce060a661b80e1143
| 3,484
|
py
|
Python
|
src/basic_crawler.py
|
yuju13488/homework
|
d6ff579d0fd0d03d51b780b0c062b3a8495a10a4
|
[
"CC-BY-4.0"
] | 332
|
2015-09-07T01:45:59.000Z
|
2022-03-01T02:39:57.000Z
|
src/basic_crawler.py
|
yuju13488/homework
|
d6ff579d0fd0d03d51b780b0c062b3a8495a10a4
|
[
"CC-BY-4.0"
] | 1
|
2019-12-17T22:49:05.000Z
|
2019-12-17T22:49:05.000Z
|
src/basic_crawler.py
|
yuju13488/homework
|
d6ff579d0fd0d03d51b780b0c062b3a8495a10a4
|
[
"CC-BY-4.0"
] | 90
|
2016-06-30T12:42:10.000Z
|
2022-03-25T02:37:37.000Z
|
import re
import time
import urllib
from multiprocessing import Pool
import requests
from requests_html import HTML
from utils import pretty_print # noqa
def fetch(url):
''' Step-1: send a request and fetch the web page.
'''
response = requests.get(url)
return response
def parse_article_entries(doc):
''' Step-2: parse the post entries on the source string.
'''
html = HTML(html=doc)
post_entries = html.find('div.r-ent')
return post_entries
def parse_article_meta(ent):
''' Step-3: parse the metadata in article entry
'''
meta = {
'title': ent.find('div.title', first=True).text,
'push': ent.find('div.nrec', first=True).text,
'date': ent.find('div.date', first=True).text,
}
try:
meta['author'] = ent.find('div.author', first=True).text
meta['link'] = ent.find('div.title > a', first=True).attrs['href']
except AttributeError:
if '(本文已被刪除)' in meta['title']:
match_author = re.search('\[(\w*)\]', meta['title'])
if match_author:
meta['author'] = match_author.group(1)
elif re.search('已被\w*刪除', meta['title']):
match_author = re.search('\<(\w*)\>', meta['title'])
if match_author:
meta['author'] = match_author.group(1)
return meta
def get_metadata_from(url):
''' Step-4: parse the link of previous link.
'''
def parse_next_link(doc):
''' Step-4a: parse the link of previous link.
'''
html = HTML(html=doc)
controls = html.find('.action-bar a.btn.wide')
link = controls[1].attrs.get('href')
return urllib.parse.urljoin(domain, link)
resp = fetch(url)
post_entries = parse_article_entries(resp.text)
next_link = parse_next_link(resp.text)
metadata = [parse_article_meta(entry) for entry in post_entries]
return metadata, next_link
def get_paged_meta(url, num_pages):
''' Step-4-ext: collect pages of metadata starting from url.
'''
collected_meta = []
for _ in range(num_pages):
posts, link = get_metadata_from(url)
collected_meta += posts
url = urllib.parse.urljoin(domain, link)
return collected_meta
def partA():
resp = fetch(start_url)
post_entries = parse_article_entries(resp.text)
for entry in post_entries:
meta = parse_article_meta(entry)
pretty_print(meta['push'], meta['title'], meta['date'], meta['author'])
def partB():
metadata = get_paged_meta(start_url, num_pages=5)
for meta in metadata:
pretty_print(meta['push'], meta['title'], meta['date'], meta['author'])
def partC():
def get_posts(metadata):
post_links = [
urllib.parse.urljoin(domain, meta['link'])
for meta in metadata if 'link' in meta]
with Pool(processes=8) as pool:
contents = pool.map(fetch, post_links)
return contents
start = time.time()
metadata = get_paged_meta(start_url, num_pages=2)
resps = get_posts(metadata)
print('花費: %f 秒' % (time.time() - start))
print('共%d項結果:' % len(resps))
for post, resps in zip(metadata, resps):
print('{0} {1: <15} {2}, 網頁內容共 {3} 字'.format(
post['date'], post['author'], post['title'], len(resps.text)))
domain = 'https://www.ptt.cc/'
start_url = 'https://www.ptt.cc/bbs/movie/index.html'
if __name__ == '__main__':
partA()
partB()
partC()
| 26.8
| 79
| 0.610218
|
db5a28f24b8631a024f3f90c4c0d18ac8a563201
| 2,089
|
py
|
Python
|
src/python/bot/untrusted_runner/build_setup.py
|
ABHIsHEk122811/clusterfuzz
|
7cac0ee869787e6f547a4b3dac18196c60f03383
|
[
"Apache-2.0"
] | 4
|
2019-11-26T01:50:51.000Z
|
2021-08-14T20:32:43.000Z
|
src/python/bot/untrusted_runner/build_setup.py
|
ABHIsHEk122811/clusterfuzz
|
7cac0ee869787e6f547a4b3dac18196c60f03383
|
[
"Apache-2.0"
] | 22
|
2019-12-26T17:02:34.000Z
|
2022-03-21T22:16:52.000Z
|
src/python/bot/untrusted_runner/build_setup.py
|
ABHIsHEk122811/clusterfuzz
|
7cac0ee869787e6f547a4b3dac18196c60f03383
|
[
"Apache-2.0"
] | 2
|
2019-02-09T09:09:20.000Z
|
2019-02-15T05:25:13.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build setup (untrusted side)."""
from build_management import build_manager
from protos import untrusted_runner_pb2
from system import environment
def _build_response(result):
if not result:
return untrusted_runner_pb2.SetupBuildResponse(result=False)
return untrusted_runner_pb2.SetupBuildResponse(
result=True,
app_path=environment.get_value('APP_PATH'),
app_path_debug=environment.get_value('APP_PATH_DEBUG'),
app_dir=environment.get_value('APP_DIR'),
build_dir=environment.get_value('BUILD_DIR'),
build_url=environment.get_value('BUILD_URL'),
fuzz_target=environment.get_value('FUZZ_TARGET'))
def setup_regular_build(request):
"""Setup a regular build."""
build = build_manager.RegularBuild(request.base_build_dir, request.revision,
request.build_url, request.build_dir_name,
request.target_weights)
return _build_response(build.setup())
def setup_symbolized_build(request):
"""Setup a symbolized build."""
build = build_manager.SymbolizedBuild(
request.base_build_dir, request.revision, request.release_build_url,
request.debug_build_url)
return _build_response(build.setup())
def setup_production_build(request):
"""Setup a production build."""
build = build_manager.ProductionBuild(request.base_build_dir, request.version,
request.build_url, request.build_type)
return _build_response(build.setup())
| 37.303571
| 80
| 0.731929
|
0a550c1dc1665545da5c3b4d9ab32fd0394996b2
| 200
|
py
|
Python
|
app/routes/__init__.py
|
DzhonPetrus/Treatment-Management
|
6b08c59d2d4e79181bbae4e951b7a5fd2e3162f1
|
[
"MIT"
] | null | null | null |
app/routes/__init__.py
|
DzhonPetrus/Treatment-Management
|
6b08c59d2d4e79181bbae4e951b7a5fd2e3162f1
|
[
"MIT"
] | null | null | null |
app/routes/__init__.py
|
DzhonPetrus/Treatment-Management
|
6b08c59d2d4e79181bbae4e951b7a5fd2e3162f1
|
[
"MIT"
] | null | null | null |
from . import surgery, surgery_type, lab_result, lab_request, profile, lab_test, treatment, treatment_type, patient, user, authentication, index, me, sysadmin
from .public import landing, find_doctor
| 66.666667
| 158
| 0.81
|
55368289275a78cb0c0beed6ab2fae9c78ffd6a3
| 8,426
|
py
|
Python
|
mmdet/core/evaluation/coco_utils.py
|
atoaiari/Pedestron
|
635f291ad919a8ffe62e6a0530662500961e6d46
|
[
"Apache-2.0"
] | null | null | null |
mmdet/core/evaluation/coco_utils.py
|
atoaiari/Pedestron
|
635f291ad919a8ffe62e6a0530662500961e6d46
|
[
"Apache-2.0"
] | null | null | null |
mmdet/core/evaluation/coco_utils.py
|
atoaiari/Pedestron
|
635f291ad919a8ffe62e6a0530662500961e6d46
|
[
"Apache-2.0"
] | null | null | null |
import mmcv
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from .recall import eval_recalls
def coco_eval(result_files, result_types, coco, max_dets=(100, 300, 1000)):
for res_type in result_types:
assert res_type in [
'proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'
]
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
if result_types == ['proposal_fast']:
ar = fast_eval_recall(result_files, coco, np.array(max_dets))
for i, num in enumerate(max_dets):
print('AR@{}\t= {:.4f}'.format(num, ar[i]))
return
for res_type in result_types:
result_file = result_files[res_type]
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = 'bbox' if res_type == 'proposal' else res_type
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
def fast_eval_recall(results,
coco,
max_dets,
iou_thrs=np.arange(0.5, 0.96, 0.05)):
if mmcv.is_str(results):
assert results.endswith('.pkl')
results = mmcv.load(results)
elif not isinstance(results, list):
raise TypeError(
'results must be a list of numpy arrays or a filename, not {}'.
format(type(results)))
gt_bboxes = []
img_ids = coco.getImgIds()
for i in range(len(img_ids)):
ann_ids = coco.getAnnIds(imgIds=img_ids[i])
ann_info = coco.loadAnns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, max_dets, iou_thrs, print_summary=False)
ar = recalls.mean(axis=1)
return ar
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def proposal2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def det2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
json_results.append(data)
return json_results
def segm2json(dataset, results):
bbox_json_results = []
segm_json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different score for det and segm
if len(seg) == 2:
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['score'] = float(mask_score[i])
data['category_id'] = dataset.cat_ids[label]
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(dataset, results, out_file):
result_files = dict()
if isinstance(results[0], list):
json_results = det2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = segm2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['segm'] = '{}.{}.json'.format(out_file, 'segm')
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = proposal2json(dataset, results)
result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal')
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def orientation_results2json(dataset, results, out_file):
result_files = dict()
json_results = orientation2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['segm'] = '{}.{}.json'.format(out_file, 'segm')
# result_files['orientation'] = '{}.{}.json'.format(out_file, 'orientation')
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
# mmcv.dump(json_results[2], result_files['orientation'])
return result_files
def orientation2json(dataset, results):
bbox_json_results = []
segm_json_results = []
orientation_json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
det, seg, ori = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
data['orientation'] = int(np.argmax(ori[i]))
data['orientation_score'] = float(np.max(ori[i]))
bbox_json_results.append(data)
# segm results
# some detectors use different score for det and segm
if seg:
if len(seg) == 2:
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['score'] = float(mask_score[i])
data['category_id'] = dataset.cat_ids[label]
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
| 35.855319
| 80
| 0.558509
|
f66d569de307b2d35e66cb4f18b239cb2ce20f53
| 1,554
|
py
|
Python
|
util/levelGenerators.py
|
neuhier/inception2
|
b04b2d1499ad3510baed38b7349ae4399d3bc4a0
|
[
"CC-BY-4.0"
] | null | null | null |
util/levelGenerators.py
|
neuhier/inception2
|
b04b2d1499ad3510baed38b7349ae4399d3bc4a0
|
[
"CC-BY-4.0"
] | null | null | null |
util/levelGenerators.py
|
neuhier/inception2
|
b04b2d1499ad3510baed38b7349ae4399d3bc4a0
|
[
"CC-BY-4.0"
] | null | null | null |
# Contains functions to generate a random level, objects, etc.
import random
from gameObjects.level import Level
from gameObjects.villian import Villian
from itemGenerators import generateTree, generateSpeedBoostPanel, generateGoal
# --------------------------------------------------------------------------------------------------------------+
# Generate a random map - JUST FOR TESTING
# --------------------------------------------------------------------------------------------------------------+
def initRandomLevel(theme, width, height, game):
game.level = Level(theme, width, height) # New empty level
# -------------------------+
# Add some zombies
# -------------------------+
for i in range(0, 5):
game.level.chars.add(
Villian("zombie", game.imgMngr, [random.randint(1, width - 1), random.randint(1, height - 1)]))
# -------------------------+
# Generate Texture grid
# -------------------------+
n_tex = len(game.imgMngr.all_textures) # How many different textures are there
for i in range(width - 1):
for j in range(height - 1):
game.level.texture_grid[i, j] = random.randint(0, n_tex - 1)
# -------------------------+
# Add items
# -------------------------+
nitems = int(round(width * height / 150))
for i in range(0, nitems):
game.level.items.add(generateTree(game))
# Boosts
game.level.items.add(generateSpeedBoostPanel(game))
# Goal
game.level.items.add(generateGoal(game, [width - 10, height - 10]))
| 37.902439
| 113
| 0.494852
|
7952876c1c0dd5c3b60b0b613ba4ac4f86bc93d9
| 5,137
|
py
|
Python
|
lib/focaltouch.py
|
szczys/st7789_mpy
|
bc854ec453d7644ce1773f7ed4d41504f37d376b
|
[
"MIT"
] | 153
|
2020-02-02T11:03:14.000Z
|
2022-03-30T05:47:07.000Z
|
lib/focaltouch.py
|
skylin008/st7789_mpy
|
f304991fc5558be653df5f0de928494b85cbc60d
|
[
"MIT"
] | 58
|
2020-04-11T23:23:02.000Z
|
2022-03-26T20:45:23.000Z
|
lib/focaltouch.py
|
skylin008/st7789_mpy
|
f304991fc5558be653df5f0de928494b85cbc60d
|
[
"MIT"
] | 50
|
2020-02-02T11:05:23.000Z
|
2022-03-22T15:24:42.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2017 ladyada for adafruit industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_focaltouch`
====================================================
CircuitPython driver for common low-cost FocalTech capacitive touch chips.
Currently supports FT6206 & FT6236.
* Author(s): ladyada
* adopted for micropython => franz schaefer (mond)
Implementation Notes
--------------------
**Hardware:**
* Adafruit `2.8" TFT LCD with Cap Touch Breakout Board w/MicroSD Socket
<http://www.adafruit.com/product/2090>`_ (Product ID: 2090)
* Adafruit `2.8" TFT Touch Shield for Arduino w/Capacitive Touch
<http://www.adafruit.com/product/1947>`_ (Product ID: 1947)
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the ESP8622 and M0-based boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library (when using I2C/SPI):
https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
# imports
try:
import struct
except ImportError:
import ustruct as struct
from machine import SoftI2C
from micropython import const
_FT6206_DEFAULT_I2C_ADDR = 0x38
_FT6XXX_REG_DATA = const(0x00)
_FT6XXX_REG_NUMTOUCHES = const(0x02)
_FT6XXX_REG_THRESHHOLD = const(0x80)
_FT6XXX_REG_POINTRATE = const(0x88)
_FT6XXX_REG_LIBH = const(0xA1)
_FT6XXX_REG_LIBL = const(0xA2)
_FT6XXX_REG_CHIPID = const(0xA3)
_FT6XXX_REG_FIRMVERS = const(0xA6)
_FT6XXX_REG_VENDID = const(0xA8)
_FT6XXX_REG_RELEASE = const(0xAF)
class FocalTouch:
"""
A driver for the FocalTech capacitive touch sensor.
"""
_debug = False
chip = None
def __init__(self, i2c, address=_FT6206_DEFAULT_I2C_ADDR, debug=False):
self.bus = i2c
self.address = address
self._debug = debug
chip_data = self._read(_FT6XXX_REG_LIBH, 8)
lib_ver, chip_id, _, _, firm_id, _, vend_id = struct.unpack(
">HBBBBBB", chip_data
)
if debug:
print("Vendor ID %02x" % vend_id)
self.vend_id=vend_id
if chip_id == 0x06:
self.chip = "FT6206"
elif chip_id == 0x64:
self.chip = "FT6236"
elif debug:
print("Chip Id: %02x" % chip_id)
if debug:
print("Library vers %04X" % lib_ver)
print("Firmware ID %02X" % firm_id)
print("Point rate %d Hz" % self._read(_FT6XXX_REG_POINTRATE, 1)[0])
print("Thresh %d" % self._read(_FT6XXX_REG_THRESHHOLD, 1)[0])
@property
def touched(self):
""" Returns the number of touches currently detected """
return self._read(_FT6XXX_REG_NUMTOUCHES, 1)[0]
# pylint: disable=unused-variable
@property
def touches(self):
"""
Returns a list of touchpoint dicts, with 'x' and 'y' containing the
touch coordinates, and 'id' as the touch # for multitouch tracking
"""
touchpoints = []
data = self._read(_FT6XXX_REG_DATA, 32)
for i in range(2):
point_data = data[i * 6 + 3 : i * 6 + 9]
if all([i == 0xFF for i in point_data]):
continue
# print([hex(i) for i in point_data])
x, y, weight, misc = struct.unpack(">HHBB", point_data)
# print(x, y, weight, misc)
touch_id = y >> 12
x &= 0xFFF
y &= 0xFFF
point = {"x": x, "y": y, "id": touch_id}
touchpoints.append(point)
return touchpoints
def _read(self, reg, length):
"""Returns an array of 'length' bytes from the 'register'"""
result = bytearray(length)
self.bus.readfrom_mem_into(self.address, reg, result)
if self._debug:
print("\t$%02X => %s" % (reg, [hex(i) for i in result]))
return result
def _write(self, reg, values):
"""Writes an array of 'length' bytes to the 'register'"""
values = [(v & 0xFF) for v in values]
self.bus.writeto_mem(self.address,reg,bytes(values))
if self._debug:
print("\t$%02X <= %s" % (reg, [hex(i) for i in values]))
| 32.929487
| 79
| 0.648628
|
20f77809485c74730bd60edd7bd1cbba8e1ac3c1
| 2,521
|
py
|
Python
|
grr/server/grr_response_server/gui/api_plugins/report_plugins/report_plugins_test_mocks.py
|
Onager/grr
|
646196bbfb332e4cb546b6d0fe1c09b57c675f7d
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/gui/api_plugins/report_plugins/report_plugins_test_mocks.py
|
Onager/grr
|
646196bbfb332e4cb546b6d0fe1c09b57c675f7d
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/gui/api_plugins/report_plugins/report_plugins_test_mocks.py
|
Onager/grr
|
646196bbfb332e4cb546b6d0fe1c09b57c675f7d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""This module contains report plugin mocks used for testing."""
from grr.lib import rdfvalue
from grr.lib import utils
from grr.server.grr_response_server.gui.api_plugins.report_plugins import rdf_report_plugins
from grr.server.grr_response_server.gui.api_plugins.report_plugins import report_plugin_base
from grr.server.grr_response_server.gui.api_plugins.report_plugins import report_plugins
class FooReportPlugin(report_plugin_base.ReportPluginBase):
TYPE = rdf_report_plugins.ApiReportDescriptor.ReportType.CLIENT
TITLE = "Foo"
SUMMARY = "Reports all foos."
class BarReportPlugin(report_plugin_base.ReportPluginBase):
TYPE = rdf_report_plugins.ApiReportDescriptor.ReportType.SERVER
TITLE = "Bar Activity"
SUMMARY = "Reports bars' activity in the given time range."
REQUIRES_TIME_RANGE = True
def GetReportData(self, get_report_args, token):
ret = rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.RepresentationType.
STACK_CHART)
database = {
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/11"): (1, 0),
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/12"): (2, 1),
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/13"): (3, 2),
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/14"): (5, 3),
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/15"): (8, 4),
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/16"): (13, 5),
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/17"): (21, 6),
rdfvalue.RDFDatetime.FromHumanReadable("2012/12/18"): (34, 7)
}
ret.stack_chart.data = [
rdf_report_plugins.ApiReportDataSeries2D(
label="Bar",
points=[
rdf_report_plugins.ApiReportDataPoint2D(x=x, y=y)
for (t, (x, y)) in sorted(database.iteritems())
if get_report_args.start_time <= t and
t < get_report_args.start_time + get_report_args.duration
])
]
return ret
class MockedReportPlugins(object):
"""A context manager that swaps available reports with the mocked reports."""
def __init__(self):
self.stubber = utils.Stubber(report_plugins.REGISTRY, "plugins", {
"FooReportPlugin": FooReportPlugin,
"BarReportPlugin": BarReportPlugin
})
def __enter__(self):
self.Start()
def __exit__(self, *_):
self.Stop()
def Start(self):
self.stubber.Start()
def Stop(self):
self.stubber.Stop()
| 34.534247
| 92
| 0.704879
|
df5c50db78a143151997f87faa709ba93cb0f990
| 808
|
py
|
Python
|
python/translate-text/main.py
|
stnguyen90/demos-for-functions
|
2b6161c21c6bc8a7f38df4c518cedbbf8824ff03
|
[
"MIT"
] | null | null | null |
python/translate-text/main.py
|
stnguyen90/demos-for-functions
|
2b6161c21c6bc8a7f38df4c518cedbbf8824ff03
|
[
"MIT"
] | 2
|
2022-03-11T08:32:38.000Z
|
2022-03-11T14:42:33.000Z
|
python/translate-text/main.py
|
stnguyen90/demos-for-functions
|
2b6161c21c6bc8a7f38df4c518cedbbf8824ff03
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
from translate import Translator
if os.environ.get("APPWRITE_FUNCTION_DATA", None) is None:
sys.exit("Search query not provided")
query = os.environ["APPWRITE_FUNCTION_DATA"]
try:
query = json.loads(query)
except Exception as e:
sys.exit(e)
if query.get('text', None) is None:
sys.exit("Query not valid! Exiting..")
else:
in_text = query.get('text')
if query.get('dest', None) is None:
sys.exit("Query not valid! Exiting..")
else:
to_lang = query.get('dest')
if query.get('source', None) is None:
translator = Translator(to_lang=to_lang)
else:
translator = Translator(from_lang=query.get('source'), to_lang=to_lang)
try:
translation = translator.translate(in_text)
print(translation)
except Exceptionas as e:
print(e)
| 21.263158
| 75
| 0.700495
|
e0d55f609651bdcecc4588fb05d14b61c0b036f1
| 13,807
|
py
|
Python
|
codes/traingradient.py
|
AlphaAtlas/BasicSR
|
e8fec2f11702e4928d3ce488caf4b897c8023efd
|
[
"Apache-2.0"
] | null | null | null |
codes/traingradient.py
|
AlphaAtlas/BasicSR
|
e8fec2f11702e4928d3ce488caf4b897c8023efd
|
[
"Apache-2.0"
] | null | null | null |
codes/traingradient.py
|
AlphaAtlas/BasicSR
|
e8fec2f11702e4928d3ce488caf4b897c8023efd
|
[
"Apache-2.0"
] | null | null | null |
import os.path
import sys
import math
import argparse
import time
import random
import numpy as np
from collections import OrderedDict
import logging
import glob
import shutil
import torch
import options.options as option
from utils import util
from data import create_dataloader, create_dataset
from models import create_model
from models.modules.LPIPS import compute_dists as lpips
drivebackup = "/storage/TrainingBackup/"
experiments = "/content/BasicSR/experiments/"
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
#Keeps gdrive from filling up, and writes the save state to the json
def movebackups(files, jsondir):
print("Backing up...")
allfiles = set(glob.glob(os.path.join(experiments, "**/*.*"), recursive = True))
logs = set(glob.glob(os.path.join(experiments, "**/*.log"), recursive = True)) | set(glob.glob(os.path.join(experiments, "**/*.png"), recursive = True))
newfiles = (allfiles - files) | logs
shutil.rmtree(drivebackup)
os.makedirs(drivebackup, exist_ok = True)
for d in newfiles:
dest = os.path.join(drivebackup, d.replace(experiments, ""))
os.makedirs(os.path.dirname(dest), exist_ok = True)
shutil.copy2(d, dest)
#Try to write .state dir to the json
with open(jsondir, "r") as f:
contents = f.readlines()
for i in range(len(contents)):
if "resume_state" in contents[i]:
rstate = None
for n in newfiles:
if ".state" in n:
rstate = n
break
if rstate is not None:
print("writing resume state to json...")
contents[i] = r''', "resume_state": "''' + rstate + '"\n'
else:
print("No backup state found!")
break
with open(jsondir, "w") as f:
contents = "".join(contents)
f.write(contents)
files = allfiles
print("Backed up!")
return files
def get_pytorch_ver():
#print(torch.__version__)
pytorch_ver = torch.__version__
if pytorch_ver == "0.4.0":
return "pre"
elif pytorch_ver == "0.4.1":
return "pre"
elif pytorch_ver == "1.0.0":
return "pre"
else: #"1.1.0", "1.1.1", "1.2.0", "1.2.1" and beyond
return "post"
def main():
# options
os.makedirs(drivebackup, exist_ok = True)
os.makedirs(experiments, exist_ok = True)
files = set(glob.glob(os.path.join(experiments, "**/*.*"), recursive = True))
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True, help='Path to option JSON file.')
opt = option.parse(parser.parse_args().opt, is_train=True)
jsondir = parser.parse_args().opt
#json_directory = "/content/gdrive/My Drive/Avatar/train_test.json" #@param {type:"string"}
#opt = option.parse(json_directory, is_train=True)
opt = option.dict_to_nonedict(opt) # Convert to NoneDict, which return None for missing key.
pytorch_ver = get_pytorch_ver()
# train from scratch OR resume training
if opt['path']['resume_state']:
if os.path.isdir(opt['path']['resume_state']):
resume_state_path = util.sorted_nicely(glob.glob(os.path.normpath(opt['path']['resume_state']) + '/*.state'))[-1]
else:
resume_state_path = opt['path']['resume_state']
resume_state = torch.load(resume_state_path)
else: # training from scratch
resume_state = None
util.mkdir_and_rename(opt['path']['experiments_root']) # rename old folder if exists
util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root'
and 'pretrain_model' not in key and 'resume' not in key))
# config loggers. Before it, the log will not work
util.setup_logger(None, opt['path']['log'], 'train', level=logging.INFO, screen=True)
util.setup_logger('val', opt['path']['log'], 'val', level=logging.INFO)
logger = logging.getLogger('base')
if resume_state:
logger.info('Set [resume_state] to ' + resume_state_path)
logger.info('Resuming training from epoch: {}, iter: {}.'.format(
resume_state['epoch'], resume_state['iter']))
option.check_resume(opt) # check resume options
logger.info(option.dict2str(opt))
# tensorboard logger
if opt['use_tb_logger'] and 'debug' not in opt['name']:
from tensorboardX import SummaryWriter
try:
tb_logger = SummaryWriter(logdir='../tb_logger/' + opt['name']) #for version tensorboardX >= 1.7
except:
tb_logger = SummaryWriter(log_dir='../tb_logger/' + opt['name']) #for version tensorboardX < 1.6
# random seed
seed = opt['train']['manual_seed']
if seed is None:
seed = random.randint(1, 10000)
logger.info('Random seed: {}'.format(seed))
util.set_random_seed(seed)
torch.backends.cudnn.benckmark = True
# torch.backends.cudnn.deterministic = True
# create train and val dataloader
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
train_set = create_dataset(dataset_opt)
train_size = int(math.ceil(len(train_set) / dataset_opt['batch_size']))
logger.info('Number of train images: {:,d}, iters: {:,d}'.format(
len(train_set), train_size))
total_iters = int(opt['train']['niter'])
total_epochs = int(math.ceil(total_iters / train_size))
logger.info('Total epochs needed: {:d} for iters {:,d}'.format(
total_epochs, total_iters))
train_loader = create_dataloader(train_set, dataset_opt)
elif phase == 'val':
val_set = create_dataset(dataset_opt)
val_loader = create_dataloader(val_set, dataset_opt)
logger.info('Number of val images in [{:s}]: {:d}'.format(dataset_opt['name'],
len(val_set)))
else:
raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))
assert train_loader is not None
# create model
model = create_model(opt)
# resume training
if resume_state:
start_epoch = resume_state['epoch']
current_step = resume_state['iter']
model.resume_training(resume_state) # handle optimizers and schedulers
model.update_schedulers(opt['train']) # updated schedulers in case JSON configuration has changed
else:
current_step = 0
start_epoch = 0
# training
logger.info('Start training from epoch: {:d}, iter: {:d}'.format(start_epoch, current_step))
for epoch in range(start_epoch, total_epochs):
for n, train_data in enumerate(train_loader,start=1):
current_step += 1
if current_step > total_iters:
break
if pytorch_ver=="pre": #Order for PyTorch ver < 1.1.0
# update learning rate
model.update_learning_rate(current_step-1)
# training
model.feed_data(train_data)
model.optimize_parameters(current_step)
elif pytorch_ver=="post": #Order for PyTorch ver > 1.1.0
# training
model.feed_data(train_data)
model.optimize_parameters(current_step)
# update learning rate
model.update_learning_rate(current_step-1)
else:
print('Error identifying PyTorch version. ', torch.__version__)
break
# log
if current_step % opt['logger']['print_freq'] == 0:
logs = model.get_current_log()
message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format(
epoch, current_step, model.get_current_learning_rate())
for k, v in logs.items():
message += '{:s}: {:.4e} '.format(k, v)
# tensorboard logger
if opt['use_tb_logger'] and 'debug' not in opt['name']:
tb_logger.add_scalar(k, v, current_step)
logger.info(message)
# save models and training states (changed to save models before validation)
if current_step % opt['logger']['save_checkpoint_freq'] == 0:
model.save(current_step)
model.save_training_state(epoch + (n >= len(train_loader)), current_step)
logger.info('Models and training states saved.')
#files = movebackups(files, jsondir)
# validation
if current_step % opt['train']['val_freq'] == 0:
avg_psnr = 0.0
avg_ssim = 0.0
avg_lpips = 0.0
idx = 0
val_sr_imgs_list = []
val_gt_imgs_list = []
for val_data in val_loader:
idx += 1
img_name = os.path.splitext(os.path.basename(val_data['LR_path'][0]))[0]
img_dir = os.path.join(opt['path']['val_images'], img_name)
util.mkdir(img_dir)
model.feed_data(val_data)
model.test()
visuals = model.get_current_visuals()
if opt['datasets']['train']['znorm']: # If the image range is [-1,1]
sr_img = util.tensor2img(visuals['SR'],min_max=(-1, 1)) # uint8
gt_img = util.tensor2img(visuals['HR'],min_max=(-1, 1)) # uint8
else: # Default: Image range is [0,1]
sr_img = util.tensor2img(visuals['SR']) # uint8
gt_img = util.tensor2img(visuals['HR']) # uint8
# sr_img = util.tensor2img(visuals['SR']) # uint8
# gt_img = util.tensor2img(visuals['HR']) # uint8
# print("Min. SR value:",sr_img.min()) # Debug
# print("Max. SR value:",sr_img.max()) # Debug
# print("Min. GT value:",gt_img.min()) # Debug
# print("Max. GT value:",gt_img.max()) # Debug
# Save SR images for reference
save_img_path = os.path.join(img_dir, '{:s}_{:d}.png'.format(\
img_name, current_step))
util.save_img(sr_img, save_img_path)
# calculate PSNR, SSIM and LPIPS distance
crop_size = opt['scale']
gt_img = gt_img / 255.
sr_img = sr_img / 255.
# For training models with only one channel ndim==2, if RGB ndim==3, etc.
if gt_img.ndim == 2:
cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size]
else:
cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]
if sr_img.ndim == 2:
cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size]
else: # Default: RGB images
cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]
val_gt_imgs_list.append(cropped_gt_img) # If calculating only once for all images
val_sr_imgs_list.append(cropped_sr_img) # If calculating only once for all images
# LPIPS only works for RGB images
avg_psnr += util.calculate_psnr(cropped_sr_img * 255, cropped_gt_img * 255)
avg_ssim += util.calculate_ssim(cropped_sr_img * 255, cropped_gt_img * 255)
#avg_lpips += lpips.calculate_lpips([cropped_sr_img], [cropped_gt_img]) # If calculating for each image
avg_psnr = avg_psnr / idx
avg_ssim = avg_ssim / idx
#avg_lpips = avg_lpips / idx # If calculating for each image
avg_lpips = lpips.calculate_lpips(val_sr_imgs_list,val_gt_imgs_list) # If calculating only once for all images
# log
# logger.info('# Validation # PSNR: {:.5g}, SSIM: {:.5g}'.format(avg_psnr, avg_ssim))
logger.info('# Validation # PSNR: {:.5g}, SSIM: {:.5g}, LPIPS: {:.5g}'.format(avg_psnr, avg_ssim, avg_lpips))
logger_val = logging.getLogger('val') # validation logger
# logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.5g}, ssim: {:.5g}'.format(
# epoch, current_step, avg_psnr, avg_ssim))
logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.5g}, ssim: {:.5g}, lpips: {:.5g}'.format(
epoch, current_step, avg_psnr, avg_ssim, avg_lpips))
# tensorboard logger
if opt['use_tb_logger'] and 'debug' not in opt['name']:
tb_logger.add_scalar('psnr', avg_psnr, current_step)
tb_logger.add_scalar('ssim', avg_ssim, current_step)
tb_logger.add_scalar('lpips', avg_lpips, current_step)
files = movebackups(files, jsondir)
logger.info('Saving the final model.')
model.save('latest')
logger.info('End of training.')
#os.mkdir("/content/gdrive/My Drive/FinalModel", exist_OK = True)
files = movebackups(files, jsondir)
if __name__ == '__main__':
main()
| 44.827922
| 156
| 0.567393
|
4700cd19f8d7d97309b3451a965a3725b2519d1c
| 1,303
|
py
|
Python
|
tests/unit_tests/conftest.py
|
asdf2014/superset
|
8e69b2db34bc45e14cf6a58153dabb66b7198e9c
|
[
"Apache-2.0"
] | 2
|
2020-03-24T08:44:10.000Z
|
2020-06-22T16:33:58.000Z
|
tests/unit_tests/conftest.py
|
asdf2014/superset
|
8e69b2db34bc45e14cf6a58153dabb66b7198e9c
|
[
"Apache-2.0"
] | 42
|
2021-04-14T08:17:46.000Z
|
2022-03-14T20:47:19.000Z
|
tests/unit_tests/conftest.py
|
asdf2014/superset
|
8e69b2db34bc45e14cf6a58153dabb66b7198e9c
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from superset.app import SupersetApp
from superset.initialization import SupersetAppInitializer
@pytest.fixture
def app_context():
"""
A fixture for running the test inside an app context.
"""
app = SupersetApp(__name__)
app.config.from_object("superset.config")
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite://"
app_initializer = app.config.get("APP_INITIALIZER", SupersetAppInitializer)(app)
app_initializer.init_app()
with app.app_context():
yield
| 33.410256
| 84
| 0.753645
|
df955af2395e422d9a65b434bb0516c345bced4b
| 241
|
py
|
Python
|
8/tests.py
|
remihuguet/aoc2020
|
c313c5b425dda92d949fd9ca4f18ff66f452794f
|
[
"MIT"
] | null | null | null |
8/tests.py
|
remihuguet/aoc2020
|
c313c5b425dda92d949fd9ca4f18ff66f452794f
|
[
"MIT"
] | null | null | null |
8/tests.py
|
remihuguet/aoc2020
|
c313c5b425dda92d949fd9ca4f18ff66f452794f
|
[
"MIT"
] | null | null | null |
import handeld
def test_accumulator_value_before_second_instruction():
5 == handeld.compute_accumulator_value('8/test_input.txt')
def test_compute_final_value():
8 == handeld.compute_final_value_debugged_code('8/test_input.txt')
| 24.1
| 70
| 0.80083
|
a43b2101923d390246a725c366f87a5d5d44e15e
| 2,361
|
py
|
Python
|
train4.py
|
Alvinhech/resnet-autoencoder
|
cdcaab6c6c9792f76f46190c2b6407a28702f7af
|
[
"MIT"
] | 18
|
2019-01-18T20:18:33.000Z
|
2022-03-25T16:02:16.000Z
|
train4.py
|
Alvinhech/resnet-autoencoder
|
cdcaab6c6c9792f76f46190c2b6407a28702f7af
|
[
"MIT"
] | null | null | null |
train4.py
|
Alvinhech/resnet-autoencoder
|
cdcaab6c6c9792f76f46190c2b6407a28702f7af
|
[
"MIT"
] | 3
|
2018-12-12T23:44:42.000Z
|
2020-06-12T03:47:46.000Z
|
import torch
from torch import nn
from autoencoder4 import ResNet_autoencoder, Bottleneck, DeconvBottleneck
from coco import load_dataset
import matplotlib.pyplot as plt
from torch.autograd import Variable
EPOCH = 10
if __name__ == "__main__":
model = ResNet_autoencoder(Bottleneck, DeconvBottleneck, [
3, 4, 6, 3], 3).cuda()
# load data
print("start loading.")
dataloader = load_dataset('/home/achhe_ucdavis_edu/resnet-autoencoder/data')
print("load data success.")
'''
load pre_trained_model
'''
pretrained_dict = torch.load('./resnet50-19c8e357.pth')
print("load pretrained model success")
model_dict = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k,
v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
# fix encoder
fix_length = len(pretrained_dict.keys())
all_length = len(model.state_dict().keys())
for idx, k in enumerate(model_dict.keys()):
if idx < fix_length:
model.state_dict()[k].requires_grad = False
params = filter(lambda p: p.requires_grad, model.parameters())
# Loss and Optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(params, lr=1e-4)
model.train()
loss_list=[]
print("start training.")
for epoch in range(EPOCH):
for batch_idx, (image, target) in enumerate(dataloader):
image = Variable(image.cuda())
# Forward + Backward + Optimize
optimizer.zero_grad()
tmp1, tmp2, tmp3 = model(image)
loss1 = criterion(tmp2,image.detach())
loss2 = criterion(tmp3,tmp1.detach())
loss = loss1 + loss2
loss.backward()
optimizer.step()
if (batch_idx+1) % 10 == 0:
print ("Epoch [%d/%d], Iter [%d] Loss: %.4f" % (epoch+1, EPOCH, batch_idx+1, loss.data[0]))
'''
loss_list.append(loss)
plt.plot(loss_list)
plt.ylable('loss')
plt.show()
'''
if((batch_idx+1)%1000==0):
torch.save(model.state_dict(), './save4/resnet'+str(epoch+1)+'_'+str(batch_idx+1)+'.pkl')
| 29.148148
| 107
| 0.604828
|
c6d840d1d13354689350fc5c962e64ad137d5fe2
| 58
|
py
|
Python
|
template/license.py
|
ptomulik/numbo
|
571f6eff37f97147afa21577354028f99c141ec8
|
[
"Unlicense"
] | null | null | null |
template/license.py
|
ptomulik/numbo
|
571f6eff37f97147afa21577354028f99c141ec8
|
[
"Unlicense"
] | null | null | null |
template/license.py
|
ptomulik/numbo
|
571f6eff37f97147afa21577354028f99c141ec8
|
[
"Unlicense"
] | null | null | null |
# @COPYRIGHT@
# Licensed under MIT license (LICENSE.txt).
| 19.333333
| 43
| 0.724138
|
0262c126789326b9a47d1e3a807ec81798055bbd
| 7,727
|
py
|
Python
|
main.py
|
HashimMufti/conwayGOL
|
2358ace415fdbded99fd1d51a2d222d57a90b0d1
|
[
"MIT"
] | 1
|
2022-03-13T15:14:13.000Z
|
2022-03-13T15:14:13.000Z
|
main.py
|
HashimMufti/conwayGOL
|
2358ace415fdbded99fd1d51a2d222d57a90b0d1
|
[
"MIT"
] | null | null | null |
main.py
|
HashimMufti/conwayGOL
|
2358ace415fdbded99fd1d51a2d222d57a90b0d1
|
[
"MIT"
] | null | null | null |
import random as rand
import copy
import os
import sys
import getopt
def game_of_life(some_board):
"""Play a single iteration of Conway's Game of Life on a board.
Args:
some_board (List of lists of strings): List of lists containing the ALIVE/DEAD variable.
Returns:
[List of lists of strings]: List of lists containing the updated ALIVE/DEAD variables.
"""
r_index = -1
duplicate_board = copy.deepcopy(some_board)
for rows in some_board:
r_index += 1
c_index = -1
for columns in rows:
c_index += 1
neighbors = get_neighbors(r_index, c_index)
lives = get_lives(neighbors, some_board, len(
some_board), len(some_board[0]))
duplicate_board = rules(
r_index, c_index, lives, some_board, duplicate_board)
return duplicate_board
def rules(r_index, c_index, lives, some_board, duplicate_board):
"""Apply Conway's Rules to a board
Args:
r_index (int): Current row index
c_index (int): Current column index
lives (int): Number of ALIVE cells around current position
some_board (List of lists of strings): Board used to determine rule
duplicate_board (List of lists of strings): Board used to apply rule
Returns:
[List of lists of strings]: Board used to apply rule (modified board)
"""
if some_board[r_index][c_index] == ALIVE:
if lives < 2 or lives > 3:
duplicate_board[r_index][c_index] = DEAD
else:
if lives == 3:
duplicate_board[r_index][c_index] = ALIVE
return duplicate_board
def get_lives(neighbors, some_board, rows, columns):
"""Get all the ALIVE cells around current position
Args:
neighbors (List of integers): List of row and column of neighbor
some_board (List of lists of strings): Board used to find neighbors
rows (int): Current row
columns (int)): Current column
Returns:
[int]: Number of alive cells around current position
"""
alive = 0
for neighbor in neighbors:
if valid(neighbor, rows, columns) and some_board[neighbor[0]][neighbor[1]] == ALIVE:
alive += 1
return alive
def valid(neighbor, rows, columns):
"""Find out if neighbor cell is valid
Args:
neighbor (List of integers): Neighboring cell position
rows (int): Number of rows on the board
columns (int): Number of columns on the board
Returns:
[boolean]: True if valid, False otherwise
"""
if neighbor[0] < 0 or neighbor[1] < 0:
return False
if neighbor[0] >= rows or neighbor[1] >= columns:
return False
return True
def get_neighbors(r_index, c_index):
"""Get neighboring cell positions
Args:
r_index (int): Current row index
c_index ([type]): Current column index
Returns:
[List of list of integers]: List of neighbors with each neighbor containing a list of their row and column index.
"""
neighbors = []
for x in range(-1, 2, 1):
for y in range(-1, 2, 1):
if x != 0 or y != 0:
neighbors.append([r_index + x, c_index + y])
return neighbors
def generate_board(r_size, c_size):
"""Generate board of row size, column size
Args:
r_size (int): Number of rows to generate
c_size (int): Number of columns to generate
Returns:
[List of lists of string]: Board that can be used to play Conway's GOL
"""
board = []
for x in range(0, r_size):
row = []
for y in range(0, c_size):
row.append(DEAD)
board.append(row)
return board
def pretty_print(board):
"""Pretty print a board
Args:
board (List of lists of string): Board to be printed!
"""
for row in board:
for char in row:
print(*char, end='')
print("\n", end='')
def random_alive(board):
"""Randomly make a subset of the board ALIVE!
Args:
board (List of lists of string): Board to modify
Returns:
[List of lists of string]: Modified board
"""
r_index = -1
for rows in board:
c_index = -1
for columns in rows:
c_index += 1
if random_number(1) > 0:
board[r_index][c_index] = ALIVE
r_index += 1
return board
def random_number(n):
"""Generate a random number from 0 to n
Args:
n (int): Limit of number to be generated (inclusive)
Returns:
[int]: Random generated number
"""
return rand.randint(0, n)
def run_game(row, column, times):
"""Run the game a certain number of times for a board of size row, column,
Args:
row (int): Number of rows on the board
column (int): Number of columns on the board
times (int): Number of times to be run
"""
board = generate_board(row, column)
board = random_alive(board)
pretty_print(board)
for time in range(0, times):
print("Iteration: " + str(time))
board = game_of_life(board)
pretty_print(board)
def custom_game(board, times):
"""Run the game on your custom board
Args:
board (List of lists of lists of lists of strings): Board to run the game on
times (int): How many times to run the game
"""
pretty_print(board)
for time in range(0, times):
print("Iteration: " + str(time))
board = game_of_life(board)
pretty_print(board)
def clear():
"""Clear the terminal
"""
os.system('cls' if os.name ==
'nt' else 'clear') # Nasty way to do this, find alternative
def stream_game(row, column):
"""Play the game an infinite number of times for a row, column
Args:
row (int): Number of rows on the board
column (int): Number of columns on the board
"""
# Depending on game and size, can cause flashing, find other way to stream
clear()
board = generate_board(row, column)
board = random_alive(board)
pretty_print(board)
while True:
board = game_of_life(board)
clear()
pretty_print(board)
def main(argv):
"""Parse command line arguments and run the game accordingly
Args:
argv (Command line arguments): Passed command line arguments
"""
# This is janky, better ways to do this!
try:
opts, args = getopt.getopt(argv, "hi:r:c:", ["rows=", "columns="])
except getopt.GetoptError as err:
print(err)
print('Error! Invalid option, please use:')
print('main.py -h')
sys.exit(2)
if opts == []:
print('Error! No options selected, please use:')
print('main.py -h')
sys.exit(2)
row = None
column = None
for opt, arg in opts:
if opt == '-h':
print('Format:')
print('main.py -r <int rows> -c <int columns>')
sys.exit()
elif opt in ("-r", "--rows"):
row = arg
elif opt in ("-c", "--columns"):
column = arg
try:
stream_game(int(row), int(column))
except Exception: # TODO: Handle specific exception
print('Error! Invalid row/column, please use:')
print('main.py -h')
sys.exit()
# Change these to change board visual
ALIVE = u"\u2591"
DEAD = u"\u2588"
if __name__ == "__main__":
main(sys.argv[1:])
| 28.512915
| 122
| 0.579009
|
5a2f3cbfd0aa3dca3120cbe5e4077582e4d56b25
| 3,059
|
py
|
Python
|
temperature_image.py
|
SeanCline/UnicornHatWeather
|
76f555fededfe72537e8b94e7fad5bfcb4441ce9
|
[
"MIT"
] | 3
|
2020-12-12T19:14:08.000Z
|
2022-03-08T21:48:21.000Z
|
temperature_image.py
|
SeanCline/UnicornHatWeather
|
76f555fededfe72537e8b94e7fad5bfcb4441ce9
|
[
"MIT"
] | null | null | null |
temperature_image.py
|
SeanCline/UnicornHatWeather
|
76f555fededfe72537e8b94e7fad5bfcb4441ce9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import config
from PIL import Image, ImageChops
import colorsys
# Returns an image representing a given character.
def open_char_image(character : str):
if len(character) != 1:
raise RuntimeError('Not a character.')
return Image.open(f'characters/{ord(character)}.gif').convert('RGB')
# Draws a string of fixed width characters on to an image at the given location.
def draw_msg_on_image(base_img, msg : str, x : int, y : int, padding : int = 1):
img = base_img.copy()
for c in msg:
char_img = open_char_image(c)
img.paste(char_img, (x, y))
x += char_img.size[0] + padding
return img
# Saturate a value to a given min or max.
def clamp(val, low, high):
return max(min(val, high), low)
# Linear interpolate a value x from one range of values into another.
def interpolate(x : float, src_range, dest_range):
frac = x / (src_range[1] - src_range[0])
return dest_range[0] + frac * (dest_range[1] - dest_range[0])
# Converts a Fahrenheit temperature into an associated RGBA colour. Blue for cold. Red for hot.
def tempertature_to_color(temperature : float):
temp = clamp(temperature, config.cold_temperature, config.hot_tempertature) # Out of range should stay blue or red.
hue = interpolate(temp, (config.cold_temperature, config.hot_tempertature), (0, .68)) # Convert temp to hue.
color = colorsys.hsv_to_rgb(1-hue, 1.0, 1.0) # Convert hue to RGB.
return (int(color[0]*255), int(color[1]*255), int(color[2]*255), 255)
# Tints an image with a given colour value.
def apply_color_filter_to_image(base_img, color):
filter_img = Image.new('RGB', (base_img.width, base_img.height), color)
return ImageChops.multiply(base_img, filter_img)
# Creates an 8x8 image with the current temperature on it.
def create_temperature_image(temperature : int):
temp_str = str(temperature).zfill(2)
# If the string is tool long to draw, then show a different icon.
if len(temp_str) > 2:
img = Image.open('icons/cold.gif') if (temperature < 0) else Image.open('icons/hot.gif')
return img.convert('RGB')
# Draw the temperature on top of the base image.
img = Image.open('icons/degree_background.gif').convert('RGB')
img = draw_msg_on_image(img, temp_str, 0, 3)
img = apply_color_filter_to_image(img, tempertature_to_color(temperature))
return img
if __name__ == "__main__":
# Run through a demonstration of how various colors render.
create_temperature_image(-10).show()
create_temperature_image(-9).show()
create_temperature_image(0).show()
create_temperature_image(10).show()
create_temperature_image(20).show()
create_temperature_image(30).show()
create_temperature_image(40).show()
create_temperature_image(50).show()
create_temperature_image(60).show()
create_temperature_image(70).show()
create_temperature_image(80).show()
create_temperature_image(90).show()
create_temperature_image(99).show()
create_temperature_image(100).show()
| 37.765432
| 119
| 0.708728
|
6bcdb8688cf6a8012ddbffee38b54dc19f234299
| 1,606
|
py
|
Python
|
packages/python/plotly/plotly/validators/isosurface/colorbar/title/font/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | 3
|
2020-02-04T21:39:20.000Z
|
2020-11-17T19:07:07.000Z
|
packages/python/plotly/plotly/validators/isosurface/colorbar/title/font/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | 12
|
2020-06-06T01:22:26.000Z
|
2022-03-12T00:13:42.000Z
|
packages/python/plotly/plotly/validators/isosurface/colorbar/title/font/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | 17
|
2019-11-21T14:11:29.000Z
|
2019-11-21T15:26:23.000Z
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="isosurface.colorbar.title.font", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="isosurface.colorbar.title.font",
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="isosurface.colorbar.title.font",
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
| 28.678571
| 88
| 0.608966
|
5123fc417b32288f94de14279cb6f7e8966b00d0
| 35,537
|
py
|
Python
|
tensorflow/python/debug/lib/stepper.py
|
imdone/tensorflow
|
bb4d1ef3861c83627ee9586b85ac3070a7d38335
|
[
"Apache-2.0"
] | 1
|
2021-04-16T14:53:22.000Z
|
2021-04-16T14:53:22.000Z
|
tensorflow/python/debug/lib/stepper.py
|
imdone/tensorflow
|
bb4d1ef3861c83627ee9586b85ac3070a7d38335
|
[
"Apache-2.0"
] | 10
|
2018-02-04T18:41:52.000Z
|
2018-05-02T09:00:46.000Z
|
tensorflow/python/debug/lib/stepper.py
|
imdone/tensorflow
|
bb4d1ef3861c83627ee9586b85ac3070a7d38335
|
[
"Apache-2.0"
] | 4
|
2018-01-17T14:22:49.000Z
|
2018-02-27T15:06:41.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Debugger (tfdbg) Stepper Module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import shutil
import tempfile
import time
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import session_ops
# TODO (cais): Use nest.flatten once it handles nest Dicts correctly. id:3120
# https://github.com/imdone/tensorflow/issues/3119
def _flatten_fetches(fetches):
"""Flatten list, tuple of fetches, or a single fetch into a list of fetches.
Args:
fetches: The fetches to flatten: Can be a single Tensor, Op, or a
potentially nested list, tuple or dict of such individual fetches.
Returns:
The fetches flattened to a list.
"""
flattened = []
if isinstance(fetches, (list, tuple)):
for fetch in fetches:
flattened.extend(_flatten_fetches(fetch))
elif isinstance(fetches, dict):
for key in fetches:
flattened.extend(_flatten_fetches(fetches[key]))
else:
flattened.append(fetches)
return flattened
class NodeStepper(object):
"""TensorFlow Debugger (tfdbg) stepper.
The stepper provides ability to perform "continue to" actions on a graph,
given fetch and feeds. The stepper calculates the transitive closure of the
fetch. cont() (continue to) calls can only be performed on members of the
transitive closure.
On a cont() call, the stepper performs depth-first tracing of the input
tree of the target. When it reaches an input where one of the following is
available, it will supply the available value to the feed_dict of the cont()
call:
(1) Overriding (injected) values from the client.
(2) TensorHandles from previous cont() calls.
(3) Dumped intermediate Tensors from previous cont() calls.
(4) Feeds supplied during the construction of the stepper instance.
During the cont() call, intermediate Tensors are dumped to temporary
directories. The dumped Tensor values will be used in subsequent cont() calls
when they are required as data dependencies.
The temporary directories are automatically clean when the NodeStepper
instance exits as a context manager.
Once the tracing is complete, it will issue a run() call on the
underlying session, using the aforementioned feed_dict prepared by the input
tracing, to achieve the "continue-to" action. The above process takes into
account whether the transitive closure of an input contains Variables that
are updated during previous cont() calls on this stepper instance. If such
updates exist, we say the transitive closure is "dirty" and the stepper
can restore the "clean" state of the Variable and avoid using the
TensorHandle.
Example of basic usage:
a = tf.Variable(1.0, name="a")
b = tf.Variable(2.0, anme="b")
c = tf.add(a, b, name="c")
d = tf.multiply(a, c, name="d")
sess = tf.Session()
sess.run(tf.initialize_all_varialbes())
stepper = NodeStepper(sess, d)
stepper.cont(c) # Caches the handle to Tensor c:0.
stepper.cont(d) # Uses handle to Tensor c:0, avoiding recomputing c.
"""
# Possible types of feed used during cont() calls.
FEED_TYPE_CLIENT = "client"
FEED_TYPE_HANDLE = "handle"
FEED_TYPE_OVERRIDE = "override"
FEED_TYPE_DUMPED_INTERMEDIATE = "dumped_intermediate"
def __init__(self, sess, fetches, feed_dict=None):
"""Constructor for Debugger.
Args:
sess: (Session) the TensorFlow Session to step in.
fetches: Same as the fetches input argument to `Session.run()`.
feed_dict: Same as the feed_dict input argument to `Session.run()`.
"""
self._sess = sess
self._fetches = fetches
flattened_fetches = _flatten_fetches(fetches)
self._fetch_names, self._fetch_list = self._get_fetch_and_name_lists(
flattened_fetches)
# A map from Variable name to initializer op.
self._variable_initializers = {}
# A map from Variable name to initial value, used when overriding or
# restoring Variable values.
self._variable_initial_values = {}
# Initialize the map for output recipients (targets).
self._output_targets = {}
# Sorted transitive closure of the fetched node.
# We also collect the list of the names of the reference-type Tensors,
# because we later need to avoid using intermediate dumps for such Tensors.
(self._sorted_nodes,
self._closure_elements,
self._ref_tensor_names) = self._dfs_visit(self._sess.graph,
self._fetch_list)
self._transitive_closure_set = set(self._sorted_nodes)
# A map from Variable name to the old values (before any cont() calls).
self._cached_variable_values = {}
# A cache map from tensor name to what variables may invalidate the tensor
self._cached_invalidation_path = {}
# Keep track of which variables are in a dirty state.
self._dirty_variables = set()
# Variables updated in the last cont() call.
self._last_updated = None
# Cached tensor handles: a dict with keys as tensor names and values as
# tensor handles.
self._tensor_handles = {}
# Cached intermediate tensor values: a dict mapping tensor names to
# DebugTensorDatum.
self._dumped_intermediate_tensors = {}
self._dump_session_root = tempfile.mkdtemp(prefix="tfdbg_stepper_")
# Feed dict from the client.
self._client_feed_dict = {}
if feed_dict:
for key in feed_dict:
if isinstance(key, ops.Tensor):
self._client_feed_dict[key.name] = feed_dict[key]
else:
self._client_feed_dict[key] = feed_dict[key]
# Overriding tensor values.
self._override_tensors = {}
# What the feed types were used by the last cont() call.
self._last_feed_types = {}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if os.path.isdir(self._dump_session_root):
shutil.rmtree(self._dump_session_root)
def _get_fetch_and_name_lists(self, flattened_fetches):
"""Get the lists of fetches and their names.
Args:
flattened_fetches: A list of fetches or their names. Can mix fetches and
names.
Returns:
(list of str): A list of the names of the fetches.
(list): A list of the fetches.
"""
fetch_names = []
fetch_list = []
for fetch in flattened_fetches:
if isinstance(fetch, six.string_types):
fetch_names.append(fetch)
fetch_list.append(self._sess.graph.as_graph_element(fetch))
else:
fetch_names.append(fetch.name)
fetch_list.append(fetch)
return fetch_names, fetch_list
def _dfs_visit(self, graph, elem_list):
"""Trace back the input of a graph element, using depth-first search.
Uses non-recursive implementation to prevent stack overflow for deep
graphs.
Also performs the following action(s):
1) When encountering a Variable, obtain its initializer op, to
facilitate possible subsequent restoration / overriding of variable
value.
Args:
graph: A TF graph instance.
elem_list: list of graph elements: a Tensor or an Operation.
Returns:
(list of str) A topologically-sorted list of all nodes (not tensors)
in the transitive closure of elem_list. Obviously, the topological sort
is not unique in general. The return value here is just an arbitrary
one of potentially many possible topological sorts.
(list of str) A list of all graph elements (nodes and/or tensors) in the
transitive closure.
"""
# These set should hold only strings, i.e, names of the nodes.
done = set() # Keep track of visited graph elements.
# A list of str: Names of the topologically-sorted graph elements.
node_inputs = dict() # New: Input map of nodes in the transitive closure.
elem_stack = copy.copy(elem_list)
# Graph elements in the transitive closure, including the nodes and tensors.
closure_elements = [elem.name for elem in elem_list]
ref_tensor_names = set()
for element in elem_list:
if isinstance(element, ops.Tensor) and element.dtype._is_ref_dtype: # pylint: disable=protected-access
ref_tensor_names.add(element.name)
while elem_stack:
curr_elem = elem_stack.pop()
curr_node = self._get_node(curr_elem)
done.add(curr_node.name)
non_control_inputs = [inp for inp in curr_node.inputs]
control_inputs = [inp for inp in curr_node.control_inputs]
all_inputs = set(non_control_inputs + control_inputs)
if curr_node.name not in node_inputs:
all_input_nodes = set()
for inp in all_inputs:
all_input_nodes.add(self._get_node(inp).name)
node_inputs[curr_node.name] = all_input_nodes
# Iterate through the (non-control) inputs.
for inp in all_inputs:
# Set up the non-control output map.
# if is_non_control_input:
if inp.name not in self._output_targets:
self._output_targets[inp.name] = set([curr_elem.name])
else:
self._output_targets[inp.name].add(curr_elem.name)
if (isinstance(inp, ops.Tensor) and
inp.op.type in ["Variable", "VariableV2"] and
inp.name not in self._variable_initializers):
# Obtain the initializer op of the variable, in case the Variable's
# value needs to be restored later.
initializer = graph.as_graph_element(inp.op.name + "/Assign")
self._variable_initializers[inp.name] = initializer
self._variable_initial_values[inp.name] = initializer.inputs[1]
inp_node = self._get_node(inp)
if inp_node.name in done:
# Already visited.
continue
elem_stack.append(inp)
closure_elements.append(inp.name)
if isinstance(inp, ops.Tensor) and inp.dtype._is_ref_dtype: # pylint: disable=protected-access
ref_tensor_names.add(inp.name)
# Now that we have traversed the transitive closure and obtained the
# node-input map, we can topologically sort them.
sorted_nodes = []
stack = []
for node in node_inputs:
if not node_inputs[node]:
stack.append(node)
for node in stack:
del node_inputs[node]
while stack:
curr_node = stack.pop()
sorted_nodes.append(curr_node)
# Iterate through the node-input map and remove the child.
pushes = []
for node in node_inputs:
if curr_node in node_inputs[node]:
node_inputs[node].remove(curr_node)
if not node_inputs[node]:
pushes.append(node)
# Delete new pushes from node-input map.
for node in pushes:
del node_inputs[node]
stack.extend(pushes)
return sorted_nodes, closure_elements, ref_tensor_names
def sorted_nodes(self):
"""Get a topologically-sorted list of node names of the stepper.
These are the names of the nodes (i.e., not Tensors) in the transitive
closure of the stepper, in a topologically-sorted order.
Returns:
(list of str): Sorted transitive inputs to the fetch of the stepper
instance. The fetch itself is included in the list.
"""
return self._sorted_nodes
def closure_elements(self):
"""Get a name list of the graph elements of the stepper.
Returns:
(list of str): names of the graph elements (i.e., nodes and tensors) in
the transitive closure of the stepper, in a random order.
"""
return self._closure_elements
def output_slots_in_closure(self, node_name):
"""Get the output tensors in the transitive closure from node.
Args:
node_name: (str) Name of the node in question.
Returns:
(list of int) Output slots of the output tensors of the node that are in
the transitive closure of the stepper.
"""
node = self._sess.graph.as_graph_element(node_name)
tensor_slots = []
for i, _ in enumerate(node.outputs):
tensor_name = node_name + ":%d" % i
if tensor_name in self._closure_elements:
tensor_slots.append(i)
return tensor_slots
def is_feedable(self, name):
"""Determine if a graph element if feedable.
Args:
name: (str) name of the graph element (Tensor or Operation)
Returns:
(bool) whether the graph element is feedable.
"""
if not isinstance(name, six.string_types):
raise TypeError("Expected type str; got type %s" % type(name))
elem = self._sess.graph.as_graph_element(name)
return self._sess.graph.is_feedable(elem)
def override_tensor(self, tensor_name, overriding_val):
"""Override the value of a tensor.
Args:
tensor_name: (str) Name of the tensor to override.
overriding_val: (numpy.ndarray) Overriding tensor value.
Raises:
ValueError: If tensor_name does not correspond to a tensor in the input
tree to the fetched graph element of this stepper instance.
"""
if not isinstance(tensor_name, six.string_types):
raise TypeError("Expected type str; got type %s" % type(tensor_name))
node_name = self._get_node_name(tensor_name)
if node_name not in self._transitive_closure_set:
raise ValueError(
"Cannot override tensor \"%s\" because it does not exist in the "
"input tree to the fetch \"%s\"" %
(tensor_name, repr(self._fetch_names)))
self._override_tensors[tensor_name] = overriding_val
# Invalidate cache by tracing outputs.
self._invalidate_transitively_outgoing_cache(tensor_name)
def remove_override(self, tensor_name):
"""Remove the overriding value on a tensor.
Args:
tensor_name: (str) name of the tensor to remove the overriding value
from.
Raises:
ValueError: If no overriding value exists for tensor_name.
"""
if tensor_name not in self._override_tensors:
raise ValueError("No overriding value exists for tensor \"%s\"." %
tensor_name)
del self._override_tensors[tensor_name]
# Invalidate cache by tracing outputs.
self._invalidate_transitively_outgoing_cache(tensor_name)
def last_feed_types(self):
"""Obtain information about the feed in the last cont() call.
Returns:
(dict) A dict mapping tensor names to feed types.
"""
return self._last_feed_types
def cont(self,
target,
use_tensor_handles=True,
use_dumped_intermediates=True,
use_overrides=True,
invalidate_from_updated_variables=False,
restore_variable_values=False):
"""Continue till the completion of the specified target tensor.
Args:
target: A single fetched Tensor or Op, or a name (str) representing the
Tensor or Op. In the case of a name str, the graph will be searched
to find the corresponding Tensor or Op.
# TODO (cais): Support multiple fetches as in Session.run() interface. id:3614
# https://github.com/imdone/tensorflow/issues/3613
use_tensor_handles: (bool) Whether this cont() run will use cached tensor
handles to avoid recomputation. Default: True.
use_dumped_intermediates: (bool) Whether this cont() call will use dumped
intermediate tensors to avoid recomputation.
use_overrides: (bool) Whether the overriding tensor values supplied by
the client are to be used in this cont() call. Default: True.
invalidate_from_updated_variables: (bool) Whether to invalidate the
tensor handles and intermediate tensor handles affected by the
Variable updates that happen in this cont() call.
restore_variable_values: (bool) Whether the old values of the variables
(before any cont() calls in this object) are to be restored.
Returns:
Value from Session.run() of the target.
Raises:
ValueError: If the target is specified as a string and the string does
not correspond to any tensors in the Session graph.
Or if the target of this cont() is not in the input list of the Stepper
object's target.
Or if target is a Placeholder.
"""
self._last_feed_types = {}
if isinstance(target, six.string_types):
# Fetch target is a string. Assume it is the name of the Tensor or Op and
# will attempt to find it in the Session's graph.
target_name = target
else:
target_name = target.name
graph_element = self._sess.graph.as_graph_element(target_name)
# Any additional tensor handles to obtain in this cont() action.
additional_handle_requests = []
if (isinstance(graph_element, ops.Tensor) and
graph_element.op.type == "Placeholder"):
self._last_feed_types[graph_element.name] = self.FEED_TYPE_CLIENT
return self._client_feed_dict[graph_element.name]
elif (isinstance(graph_element, ops.Operation) and
graph_element.type == "Placeholder"):
tensor_name = graph_element.name + ":0"
self._last_feed_types[tensor_name] = self.FEED_TYPE_CLIENT
return self._client_feed_dict[tensor_name]
if isinstance(graph_element, ops.Operation) and graph_element.outputs:
# Check if this op has any output tensors that also fall into this
# stepper's transitive closure.
node_outputs = [
output.name for output in graph_element.outputs
if output.name in self._closure_elements
]
if node_outputs:
# The target is an op with at least one output within the transitive
# closure. The cont() action will amount to using the 0-th
# output Tensor as the target, as well as obtaining handles to it
# and to the rest of the outputs tensors in the transitive closure
# (if any).
target_name = node_outputs[0]
additional_handle_requests = node_outputs[1:]
# Verify that the target is in the transitive closure of the stepper's
# fetch.
target_node_name = self._get_node_name(target_name)
if target_node_name not in self._transitive_closure_set:
raise ValueError(
"Target \"%s\" is not in the transitive closure for the fetch of the "
"stepper: \"%s\"." % (target_name, repr(self._fetch_names)))
# Check if a cached tensor handle can be used on the fetch directly.
if use_tensor_handles and target_name in self._tensor_handles:
self._last_feed_types[target_name] = self.FEED_TYPE_HANDLE
return self._tensor_handles[target_name].eval()
# Check if a dumped intermediate tensor can be used on the fetch directly.
if (use_dumped_intermediates and
target_name in self._dumped_intermediate_tensors):
self._last_feed_types[target_name] = self.FEED_TYPE_DUMPED_INTERMEDIATE
return self._dumped_intermediate_tensors[target_name].get_tensor()
# Check if an overriding tensor value can be used directly.
if use_overrides and target_name in self._override_tensors:
# Override is available. Return the value right away.
self._last_feed_types[target_name] = self.FEED_TYPE_OVERRIDE
return self._override_tensors[target_name]
# Keep track of which variables are restored in this cont() call.
restored_variables = set()
# Keep track of which variables are "touched" (i.e., possibly updated) in
# this cont() call.
self._last_updated = set()
# =========================================================================
# Use a non-recursive method to trace the inputs from the node and set up
# the feeds.
feeds = {} # The feeds to be used in the Session.run() call.
fetched = self._sess.graph.as_graph_element(target_name)
elem_stack = [fetched]
done = set()
while elem_stack:
curr_elem = elem_stack.pop()
curr_node = self._get_node(curr_elem)
done.add(curr_node.name)
non_control_inputs = [inp for inp in curr_node.inputs]
control_inputs = [inp for inp in curr_node.control_inputs]
all_inputs = set(non_control_inputs + control_inputs)
# Iterate through the (non-control) inputs.
for inp in all_inputs:
# Determine whether the input is feedable. Reference-type tensors,
# e.g., Variables, should not be fed, because they can change.
if isinstance(inp, ops.Tensor):
is_inp_ref = inp.dtype._is_ref_dtype # pylint: disable=protected-access
can_feed = self._sess.graph.is_feedable(inp) and not is_inp_ref
else:
is_inp_ref = False
can_feed = False
if (restore_variable_values and inp.name in self._dirty_variables and
inp.name not in restored_variables and
inp.name not in self._last_updated):
# Do not restore Variables touched or restored previously in this
# cont() call.
initializer_op = self._variable_initializers[inp.name]
initial_value_tensor = self._variable_initial_values[inp.name]
self._sess.run(initializer_op,
feed_dict={
initial_value_tensor:
self._cached_variable_values[inp.name]
})
# Mark the variable as restored.
restored_variables.add(inp.name)
# Determine if this is a reference-type input from a variable, and
# the recipient node is not Identity. In that case, the Variable
# needs to be marked as dirty and its current value recorded, due to
# the fact that the receiving op may mutate the value of the Variable.
if (is_inp_ref and inp.op.type in ["Variable", "VariableV2"] and
curr_node.type != "Identity"):
# Mark the variable as dirty.
self._last_updated.add(inp.name)
# Obtain the old value of the variable and cache it.
if inp.name not in self._cached_variable_values:
old_value = self._sess.run(inp)
self._cached_variable_values[inp.name] = old_value
# N.B.: The order of the logical branches matters. For example,
# _client_feed_dict comes after _tensor_handles, so that tensor
# handles stored in cont() calls can override the original client
# feeds. Also for example, _override_tensors comes the first, so
# the manual overriding, if exists, can always take effect.
if use_overrides and can_feed and inp.name in self._override_tensors:
# Use client-supplied overriding tensor value.
feeds[inp] = self._override_tensors[inp.name]
self._last_feed_types[inp.name] = self.FEED_TYPE_OVERRIDE
elif (can_feed and inp not in feeds and
use_tensor_handles and inp.name in self._tensor_handles):
# Tensor handle found in cache.
feeds[inp] = self._tensor_handles[inp.name]
self._last_feed_types[inp.name] = self.FEED_TYPE_HANDLE
elif (can_feed and inp not in feeds and
use_dumped_intermediates and
inp.name in self._dumped_intermediate_tensors):
# Dumped intermediate Tensor found.
feeds[inp] = self._dumped_intermediate_tensors[inp.name].get_tensor()
self._last_feed_types[inp.name] = self.FEED_TYPE_DUMPED_INTERMEDIATE
elif inp.name in self._client_feed_dict:
# This input is available in the client feed_dict.
feeds[inp] = self._client_feed_dict[inp.name]
self._last_feed_types[inp.name] = self.FEED_TYPE_CLIENT
else:
# There is no feed available for this input. So keep tracing its
# input(s).
inp_node = self._get_node(inp)
if inp_node.name in done:
# Already visited.
continue
elem_stack.append(inp)
done.add(inp_node.name)
# =========================================================================
if self._last_updated:
self._dirty_variables.update(self._last_updated)
for variable in restored_variables:
self._dirty_variables.remove(variable)
(dump_path,
run_options) = self._prepare_cont_call_dump_path_and_run_options()
if isinstance(fetched, ops.Operation):
# The fetched is an Operation: Will not get tensor handle.
self._sess.run(fetched, feed_dict=feeds, options=run_options)
return_value = None
else:
# This is a Tensor: Will get tensor handle and cache it.
# Will also get the additional requested tensor handles (if any).
tensors_to_get_handles_for = [fetched]
handle_names = [target_name]
tensors_to_get_handles_for.extend([
self._sess.graph.as_graph_element(h)
for h in additional_handle_requests
])
handle_names.extend(additional_handle_requests)
handles = self._sess.run(
[session_ops.get_session_handle(tensor) for tensor in
tensors_to_get_handles_for],
feed_dict=feeds,
options=run_options)
for handle_name, handle in zip(handle_names, handles):
self._tensor_handles[handle_name] = handle
return_value = self._tensor_handles[target_name].eval()
self._load_dumped_intermediate_tensors(dump_path, target_name)
if invalidate_from_updated_variables:
# Invalidate caches at the end.
for last_updated_variable in self._last_updated:
self._invalidate_transitively_outgoing_cache(last_updated_variable)
return return_value
def _prepare_cont_call_dump_path_and_run_options(self):
"""Prepare the dump path and RunOptions for next cont() call.
Returns:
dump_path: (str) Directory path to which the intermediate tensor will be
dumped.
run_options: (config_pb2.RunOptions) The RunOptions containing the tensor
watch options for this graph.
"""
run_options = config_pb2.RunOptions()
dump_path = self._cont_call_dump_path()
for element_name in self._closure_elements:
if ":" in element_name:
debug_utils.add_debug_tensor_watch(
run_options,
debug_graphs.get_node_name(element_name),
output_slot=debug_graphs.get_output_slot(element_name),
debug_urls=["file://" + dump_path])
return dump_path, run_options
def _cont_call_dump_path(self):
return os.path.join(self._dump_session_root,
"cont_%d" % int(time.time() * 1e6))
def _load_dumped_intermediate_tensors(self, dump_path, target_name):
dump_dir = debug_data.DebugDumpDir(dump_path, validate=False)
for dump in dump_dir.dumped_tensor_data:
if (dump.tensor_name not in self._ref_tensor_names and
dump.tensor_name not in self._tensor_handles and
dump.tensor_name not in self._override_tensors and
dump.tensor_name != target_name):
self._dumped_intermediate_tensors[dump.tensor_name] = dump
def _get_node_name(self, graph_element_name):
return graph_element_name.split(":")[0]
def _invalidate_transitively_outgoing_cache(self, source_element):
"""Invalidate the cached tensor handles by tracing output.
This method is used to invalidate caches such as cached TensorHandles
and intermediate tensor values when Variable mutation happens or when
client overrides tensor values.
Uses non-recursive implementation to avoid stack overflow on deep networks.
Args:
source_element: The source graph element (e.g., a Variable output slot)
to trace the output from.
"""
if not self._tensor_handles and not self._dumped_intermediate_tensors:
return
# First, use cached invalidation paths to eliminate some cached tensor
# handles and intermediate tensors.
to_delete_handles = []
for handle_name in self._tensor_handles:
if (handle_name in self._cached_invalidation_path and
source_element in self._cached_invalidation_path[handle_name]):
to_delete_handles.append(handle_name)
for handle_name in to_delete_handles:
del self._tensor_handles[handle_name]
to_delete_intermediates = []
for intm_tensor_name in self._dumped_intermediate_tensors:
if (intm_tensor_name in self._cached_invalidation_path and
source_element in self._cached_invalidation_path[intm_tensor_name]):
to_delete_intermediates.append(intm_tensor_name)
for intermediate in to_delete_intermediates:
del self._dumped_intermediate_tensors[intermediate]
if not self._tensor_handles and not self._dumped_intermediate_tensors:
return
stack = [source_element]
done = set()
while stack:
curr_element = stack.pop()
done.add(curr_element)
if (curr_element in self._tensor_handles or
curr_element in self._dumped_intermediate_tensors):
# Cache the invalidation path for potential future use.
if curr_element not in self._cached_invalidation_path:
self._cached_invalidation_path[curr_element] = set([source_element])
else:
self._cached_invalidation_path[curr_element].add(source_element)
if curr_element in self._tensor_handles:
del self._tensor_handles[curr_element]
else:
del self._dumped_intermediate_tensors[curr_element]
targets = self._output_targets.get(curr_element, [])
for target in targets:
if target in done:
continue
else:
stack.append(target)
def finalize(self):
"""Run the final fetch(es).
Restore the dirty variables; ignore the client-supplied overriding tensor
values.
Returns:
The same return value as self.cont() as called on the final fetch.
"""
self.restore_variable_values()
return self._sess.run(self._fetches, feed_dict=self._client_feed_dict)
def restore_variable_values(self):
"""Restore variables to the initial values.
"Initial value" refers to the value when this NodeStepper instance was
first constructed.
"""
for var_name in self._dirty_variables:
self._sess.run(self._variable_initializers[var_name],
feed_dict={
self._variable_initial_values[var_name]:
self._cached_variable_values[var_name]
})
def handle_names(self):
"""Return names of the TensorHandles that the debugger is holding.
Returns:
(list of str) Name of the tensors for which TensorHandle is available.
"""
return [name for name in self._tensor_handles]
def handle_node_names(self):
"""Get list of names of the nodes for which handles are available.
Returns:
(set of str) List of names of the nodes.
"""
return set([self._get_node_name(name) for name in self._tensor_handles])
def intermediate_tensor_names(self):
"""Get list of the names of the Tensors for which dumps are available.
Returns:
(list of str) List of the names of the Tensors for which intermediate
dumps are available.
"""
return self._dumped_intermediate_tensors.keys()
def last_updated(self):
"""Get the names of the variables updated in the last cont() call.
Returns:
A set of the variable names updated in the previous cont() call.
If no cont() call has occurred before, returns None.
"""
return self._last_updated
def dirty_variables(self):
"""Get the set of variables that are currently "dirty".
"dirty" means:
previous cont() calls have updated the value of the Variable,
and the Variable's old value (the value before any cont() calls
happened) was not restored.
Returns:
(set) A set of dirty variables.
"""
return self._dirty_variables
def is_placeholder(self, graph_element_name):
"""Check whether a graph element is a Placeholder, by name.
Args:
graph_element_name: (str) Name of the tensor or op to be tested.
Returns:
(bool) Whether the graph element of the specified name is a Placeholder
op or the output Tensor of a Placeholder op.
Raises:
ValueError: If graph_element_name is not in the transitive closure of the
stepper instance.
"""
node_name = self._get_node_name(graph_element_name)
if node_name not in self.sorted_nodes():
raise ValueError(
"%s is not in the transitive closure of this NodeStepper "
"instance" % graph_element_name)
graph_element = self._sess.graph.as_graph_element(graph_element_name)
if not isinstance(graph_element, ops.Operation):
graph_element = graph_element.op
return graph_element.type == "Placeholder"
def placeholders(self):
"""Get the list of Placeholder Tensors in the transitive closure.
Returns:
(list of str) A list of Placeholder Tensors or ops in the transitive
closure.
"""
placeholders = []
for item in self.sorted_nodes():
if self.is_placeholder(item):
placeholders.append(item)
return placeholders
def get_tensor_value(self, tensor_name):
"""Get the value of a tensor that the stepper has access to.
Args:
tensor_name: (str) Name of the tensor.
Returns:
Value of the tensor, from overriding values or cached tensor handles.
Raises:
ValueError: If the value is not available as an overriding value
or through a TensorHandle.
"""
if self.is_placeholder(tensor_name):
if ":" not in tensor_name:
tensor_name += ":0"
return self._client_feed_dict[tensor_name]
elif tensor_name in self._override_tensors:
return self._override_tensors[tensor_name]
elif tensor_name in self._tensor_handles:
return self._tensor_handles[tensor_name].eval()
elif tensor_name in self._dumped_intermediate_tensors:
return self._dumped_intermediate_tensors[tensor_name].get_tensor()
else:
raise ValueError(
"This stepper instance does not have access to the value of "
"tensor \"%s\"" % tensor_name)
def override_names(self):
"""Return names of the TensorHandles that the debugger is holding.
Returns:
(list of str) Name of the tensor for which overriding tensor values are
available.
"""
return [name for name in self._override_tensors]
def _get_node(self, element):
"""Get the node of a graph element.
Args:
element: A graph element (Op, Tensor or Node)
Returns:
The node associated with element in the graph.
"""
node_name, _ = debug_graphs.parse_node_or_tensor_name(element.name)
return self._sess.graph.as_graph_element(node_name)
| 36.673891
| 109
| 0.688494
|
f2f2dcd3839b86f3ae6a64a0c4319dee7cf1d8e6
| 706
|
py
|
Python
|
scripts/bcf2itol.py
|
jodyphelan/pathogenseq
|
2e04190f25063d722ef653e819b94eb66407ea8d
|
[
"MIT"
] | null | null | null |
scripts/bcf2itol.py
|
jodyphelan/pathogenseq
|
2e04190f25063d722ef653e819b94eb66407ea8d
|
[
"MIT"
] | null | null | null |
scripts/bcf2itol.py
|
jodyphelan/pathogenseq
|
2e04190f25063d722ef653e819b94eb66407ea8d
|
[
"MIT"
] | 1
|
2018-05-11T14:54:51.000Z
|
2018-05-11T14:54:51.000Z
|
#! /usr/bin/env python
import sys
import pathogenseq as ps
import argparse
def main(args):
bcf = ps.bcf(args.bcf_file)
bcf.itol_from_bcf(args.mutation_file,args.amino_acid,args.no_ref,args.no_missing)
parser = argparse.ArgumentParser(description='bcf2matrix.py',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('bcf_file', help='First read file')
parser.add_argument('mutation_file',default=None,type=str, help='First read file')
parser.add_argument('--amino_acid',action='store_true')
parser.add_argument('--no_ref',action='store_true')
parser.add_argument('--no_missing',action='store_true')
parser.set_defaults(func=main)
args = parser.parse_args()
args.func(args)
| 35.3
| 116
| 0.791785
|
2ba7f0cf6206c943ed4baa897e06fd67bddcd6ea
| 4,085
|
py
|
Python
|
library/page_data/martindale_app/banner_module_data.py
|
tate138/Martindale_Automation
|
082169df9f88ddc96fa81365f9c5e8aa460f7d14
|
[
"Apache-2.0"
] | null | null | null |
library/page_data/martindale_app/banner_module_data.py
|
tate138/Martindale_Automation
|
082169df9f88ddc96fa81365f9c5e8aa460f7d14
|
[
"Apache-2.0"
] | null | null | null |
library/page_data/martindale_app/banner_module_data.py
|
tate138/Martindale_Automation
|
082169df9f88ddc96fa81365f9c5e8aa460f7d14
|
[
"Apache-2.0"
] | null | null | null |
class BannerModuleData:
####
# BANNER MODULE
####
BANNER_MODULE_HEADER = \
"//*[@id=\"ple_column-0\"]/div[2]"
CREATE_BANNER_RADIO_BUTTON = \
"//*[@id=\"ple_column-0-modal-gallery\"]/div/form/div[2]/div[2]/fieldset/div[1]/label"
CREATE_BANNER_MODULE_NAME_TEXT = \
"//*[@id=\"alias-picker-component-0-new-alias\"]"
GALLERY_BANNER_TITLE = \
"//*[@id=\"BannerSettings-Smb__Banner__Widgets__BannerSettings___automation_only-settings\"]/div/h4"
BANNER_MODULE_CONTENT_TAB = \
"//*[@id=\"BannerSettings-Smb__Banner__Widgets__BannerSettings___automation_only-settings\"]/form/div[1]/div/ul/li[1]/a"
BANNER_MODULE_CONENT_MANAGE_CONTENT = \
"//*[@id=\"BannerSettings-Smb__Banner__Widgets__BannerSettings___automation_only-tab-content\"]/div/div[1]"
BANNER_MODULE_CONTENT_ADD_BANNER_BUTTON = \
"//*[@id=\"addItem\"]"
BANNER_LIST_ITEM_ONE = \
"/html/body/ul/li[1]/div[2]/form/div[1]/div/div[1]/div/div[4]/ul[1]/li[1]/div/div[3]/div[1]/div[1]/span"
BANNER_UNSAVED_CHANGES_MODAL = \
"//*[@id=\"se__body\"]/div[8]/div[2]/p[1]"
BANNER_MODULE_CONTENT_ADD_BANNER = \
"//*[@id=\"BannerSettings-Smb__Banner__Widgets__BannerSettings___automation_only-settings-item\"]/div/h4"
BANNER_TITLE = \
"//*[@id=\"title\"]"
BANNER_TITLE_ERROR = \
"//*[@id=\"title-error\"]"
BANNER_CAPTION = \
"//*[@id=\"caption\"]"
BANNER_CAPTION_ERROR = \
"//*[@id=\"caption-error\"]"
BANNER_MODULE_CONTENT_ADD_LINK_APPEARANCE = \
"//*[@id=\"cta1_buttonStyle\"]"
BANNER_MODULE_CONTENT_ADD_LINK_APPEARANCE_TWO = \
"//*[@id=\"cta2_buttonStyle\"]"
BANNER_MODULE_CONTENT_ADD_LINK_TYPE = \
"//*[@id=\"cta1_linkType\"]"
BANNER_MODULE_CONTENT_ADD_LINK_TYPE_TWO = \
"//*[@id=\"cta2_linkType\"]"
BANNER_MODULE_CONTENT_ADD_LINK_PAGE_LABEL = \
"//*[@id=\"BannerSettings-Smb__Banner__Widgets__BannerSettings___automation_only-settings-item\"]/form/div[1]/div/ul[1]/div[2]/div[6]/label"
BANNER_MODULE_CONTENT_ADD_LINK_PAGE_LABEL_TWO = \
"//*[@id=\"BannerSettings-Smb__Banner__Widgets__BannerSettings___automation_only-settings-item\"]/form/div[1]/div/ul[2]/div[2]/div[6]/label"
BANNER_MODULE_CONTENT_ADD_LINK_PAGE_URL_LABEL = \
"//*[@id=\"BannerSettings-Smb__Banner__Widgets__BannerSettings___automation_only-settings-item\"]/form/div[1]/div/ul[1]/div[2]/div[7]/label"
BANNER_MODULE_CONTENT_ADD_LINK_PAGE_URL_LABEL_TWO = \
"//*[@id=\"BannerSettings-Smb__Banner__Widgets__BannerSettings___automation_only-settings-item\"]/form/div[1]/div/ul[2]/div[2]/div[7]/label"
CTA_URL_TEXT = \
"//*[@id=\"cta1_linkUrl\"]"
CTA_URL_TEXT_TWO = \
"//*[@id=\"cta2_linkUrl\"]"
ADD_BANNER_SAVE_BUTTON = \
"//*[@id=\"BannerSettings-Smb__Banner__Widgets__BannerSettings___automation_only-settings-item\"]/form/div[2]/button[2]"
ADD_BANNER_CANCEL_BUTTON = \
"//*[@id=\"cancel-item\"]"
DELETE_BANNER_CONFIRMATION = \
"//*[@id=\"se__body\"]/div[8]/div[2]/h2"
DELETE_BANNER_CANCEL_BUTTON = \
"//*[@id=\"se__body\"]/div[8]/div[2]/p[2]/button[1]"
DELETE_BANNER_OK_BUTTON = \
"//*[@id=\"se__body\"]/div[8]/div[2]/p[2]/button[2]"
BANNER_MODULE_SETTINGS_TAB = \
"//*[@id=\"BannerSettings-Smb__Banner__Widgets__BannerSettings___automation_only-settings\"]/form/div[1]/div/ul/li[2]/a"
BANNER_MODULE_SETTINGS_ELEMENT_VISIBILITY = \
"//*[@id=\"BannerSettings-Smb__Banner__Widgets__BannerSettings___automation_only-tab-settings\"]/ul/li[1]/div[1]"
SETTINGS_SHOW_BANNER_TITLE = \
"//*[@id=\"BannerSettings-Smb__Banner__Widgets__BannerSettings___automation_only-tab-settings\"]/ul/li[1]/div[2]/ul/li[1]/div/label/span"
BANNER_MODULE_LAYOUT_TAB = \
"//*[@id=\"BannerSettings-Smb__Banner__Widgets__BannerSettings___automation_only-settings\"]/form/div[1]/div/ul/li[3]/a"
BANNER_MODULE_LAYOUT_TYPE = \
"//*[@id=\"privew_options_container\"]/ul/li[1]/div[1]"
| 50.432099
| 148
| 0.668543
|
f9b3a70e0f698d272dca0e50e9dbd77756e8f50d
| 487
|
py
|
Python
|
tests/mock_logger.py
|
fossabot/mosec
|
b803cffbbdb92212a2810597f8ce59fe14c1f728
|
[
"Apache-2.0"
] | null | null | null |
tests/mock_logger.py
|
fossabot/mosec
|
b803cffbbdb92212a2810597f8ce59fe14c1f728
|
[
"Apache-2.0"
] | null | null | null |
tests/mock_logger.py
|
fossabot/mosec
|
b803cffbbdb92212a2810597f8ce59fe14c1f728
|
[
"Apache-2.0"
] | null | null | null |
class MockLogger:
@staticmethod
def info(content):
print(f"\nMockLogger: info - {content}")
@staticmethod
def debug(content):
print(f"\nMockLogger: debug - {content}")
def warn(self, content):
self.warning(content)
@staticmethod
def warning(content):
print(f"\nMockLogger: warning - {content}")
@staticmethod
def error(content):
print(f"\nMockLogger: error - {content}")
raise RuntimeError(content)
| 23.190476
| 51
| 0.61807
|
0593d78b4bc4ff04edbae26b1143e7fe0bc84369
| 1,749
|
py
|
Python
|
cvpods/modeling/backbone/backbone.py
|
hanqiu-hq/cvpods
|
597fa669151fdad87c250fa118a9e3a555f4fb5e
|
[
"Apache-2.0"
] | null | null | null |
cvpods/modeling/backbone/backbone.py
|
hanqiu-hq/cvpods
|
597fa669151fdad87c250fa118a9e3a555f4fb5e
|
[
"Apache-2.0"
] | null | null | null |
cvpods/modeling/backbone/backbone.py
|
hanqiu-hq/cvpods
|
597fa669151fdad87c250fa118a9e3a555f4fb5e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2019-2021 Megvii Inc. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch.nn as nn
from cvpods.layers import ShapeSpec
__all__ = ["Backbone"]
class Backbone(nn.Module, metaclass=ABCMeta):
"""
Abstract base class for network backbones.
"""
def __init__(self):
"""
The `__init__` method of any subclass can specify its own set of arguments.
"""
super().__init__()
@abstractmethod
def forward(self):
"""
Subclasses must override this method, but adhere to the same return type.
Returns:
dict[str->Tensor]: mapping from feature name (e.g., "res2") to tensor
"""
pass
@property
def size_divisibility(self):
"""
Some backbones require the input height and width to be divisible by a
specific integer. This is typically true for encoder / decoder type networks
with lateral connection (e.g., FPN) for which feature maps need to match
dimension in the "bottom up" and "top down" paths. Set to 0 if no specific
input size divisibility is required.
"""
return 0
def output_shape(self):
"""
Returns:
dict[str->ShapeSpec]
"""
# this is a backward-compatible default
return {
name: ShapeSpec(channels=self._out_feature_channels[name],
stride=self._out_feature_strides[name])
for name in self._out_features
}
| 30.155172
| 87
| 0.6255
|
3efaebc1bfd6238655adda37641b4f717a1fd28c
| 13,935
|
py
|
Python
|
test/basetest/utils.py
|
taiyu-len/taskwarrior
|
bd221a5adc43e5c70e05eb4f7a48d1db3d18555d
|
[
"MIT"
] | null | null | null |
test/basetest/utils.py
|
taiyu-len/taskwarrior
|
bd221a5adc43e5c70e05eb4f7a48d1db3d18555d
|
[
"MIT"
] | 1
|
2022-03-29T02:41:58.000Z
|
2022-03-31T17:57:20.000Z
|
test/basetest/utils.py
|
taiyu-len/taskwarrior
|
bd221a5adc43e5c70e05eb4f7a48d1db3d18555d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division
import errno
import os
import sys
import socket
import signal
import functools
import atexit
import tempfile
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
from time import sleep
try:
import simplejson as json
except ImportError:
import json
from .exceptions import CommandError, TimeoutWaitingFor
USED_PORTS = set()
ON_POSIX = 'posix' in sys.builtin_module_names
# Directory relative to basetest module location
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
# Location of binary files (usually the src/ folder)
BIN_PREFIX = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "..", "src")
)
# Default location of test certificates
DEFAULT_CERT_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "test_certs")
)
# Default location of test hooks
DEFAULT_HOOK_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "test_hooks")
)
# Environment flags to control skipping of task and taskd tests
TASKW_SKIP = os.environ.get("TASKW_SKIP", False)
TASKD_SKIP = os.environ.get("TASKD_SKIP", False)
# Environment flags to control use of PATH or in-tree binaries
TASK_USE_PATH = os.environ.get("TASK_USE_PATH", False)
TASKD_USE_PATH = os.environ.get("TASKD_USE_PATH", False)
UUID_REGEXP = ("[0-9A-Fa-f]{8}-" + ("[0-9A-Fa-f]{4}-" * 3) + "[0-9A-Fa-f]{12}")
def task_binary_location(cmd="task"):
"""If TASK_USE_PATH is set rely on PATH to look for task binaries.
Otherwise ../src/ is used by default.
"""
return binary_location(cmd, TASK_USE_PATH)
def taskd_binary_location(cmd="taskd"):
"""If TASKD_USE_PATH is set rely on PATH to look for taskd binaries.
Otherwise ../src/ is used by default.
"""
return binary_location(cmd, TASKD_USE_PATH)
def binary_location(cmd, USE_PATH=False):
"""If USE_PATH is True rely on PATH to look for taskd binaries.
Otherwise ../src/ is used by default.
"""
if USE_PATH:
return cmd
else:
return os.path.join(BIN_PREFIX, cmd)
def wait_condition(cond, timeout=1, sleeptime=.01):
"""Wait for condition to return anything other than None
"""
# NOTE Increasing sleeptime can dramatically increase testsuite runtime
# It also reduces CPU load significantly
if timeout is None:
timeout = 1
if timeout < sleeptime:
print("Warning, timeout cannot be smaller than", sleeptime)
timeout = sleeptime
# Max number of attempts until giving up
tries = int(timeout / sleeptime)
for i in range(tries):
val = cond()
if val is not None:
break
sleep(sleeptime)
return val
def wait_process(pid, timeout=None):
"""Wait for process to finish
"""
def process():
try:
os.kill(pid, 0)
except OSError:
# Process is dead
return True
else:
# Process is still ticking
return None
return wait_condition(process, timeout)
def _queue_output(arguments, pidq, outputq):
"""Read/Write output/input of given process.
This function is meant to be executed in a thread as it may block
"""
kwargs = arguments["process"]
input = arguments["input"]
try:
proc = Popen(**kwargs)
except OSError as e:
# pid None is read by the main thread as a crash of the process
pidq.put(None)
outputq.put((
"",
("Unexpected exception caught during execution of taskw: '{0}' . "
"If you are running out-of-tree tests set TASK_USE_PATH=1 or "
"TASKD_USE_PATH=1 in shell env before execution and add the "
"location of the task(d) binary to the PATH".format(e)),
255)) # false exitcode
return
# Put the PID in the queue for main process to know.
pidq.put(proc.pid)
# Send input and wait for finish
out, err = proc.communicate(input)
if sys.version_info > (3,):
out, err = out.decode('utf-8'), err.decode('utf-8')
# Give the output back to the caller
outputq.put((out, err, proc.returncode))
def _retrieve_output(thread, timeout, queue, thread_error):
"""Fetch output from taskw subprocess queues
"""
# Try to join the thread on failure abort
thread.join(timeout)
if thread.isAlive():
# Join should have killed the thread. This is unexpected
raise TimeoutWaitingFor(thread_error + ". Unexpected error")
# Thread died so we should have output
try:
# data = (stdout, stderr, exitcode)
data = queue.get(timeout=timeout)
except Empty:
data = TimeoutWaitingFor("streams from TaskWarrior")
return data
def _get_output(arguments, timeout=None):
"""Collect output from the subprocess without blocking the main process if
subprocess hangs.
"""
# NOTE Increase this value if tests fail with None being received as
# stdout/stderr instead of the expected content
output_timeout = 0.1 # seconds
pidq = Queue()
outputq = Queue()
t = Thread(target=_queue_output, args=(arguments, pidq, outputq))
t.daemon = True
t.start()
try:
pid = pidq.get(timeout=timeout)
except Empty:
pid = None
# Process crashed or timed out for some reason
if pid is None:
return _retrieve_output(t, output_timeout, outputq,
"TaskWarrior to start")
# Wait for process to finish (normal execution)
state = wait_process(pid, timeout)
if state:
# Process finished
return _retrieve_output(t, output_timeout, outputq,
"TaskWarrior thread to join")
# If we reach this point we assume the process got stuck or timed out
for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):
# Start with lower signals and escalate if process ignores them
try:
os.kill(pid, signal.SIGABRT)
except OSError as e:
# ESRCH means the process finished/died between last check and now
if e.errno != errno.ESRCH:
raise
# Wait for process to finish (should die/exit after signal)
state = wait_process(pid, timeout)
if state:
# Process finished
return _retrieve_output(t, output_timeout, outputq,
"TaskWarrior to die")
# This should never happen but in case something goes really bad
raise OSError("TaskWarrior stopped responding and couldn't be killed")
def run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE,
merge_streams=False, env=os.environ, timeout=None):
"Run a subprocess and wait for it to finish"
if input is None:
stdin = None
else:
stdin = PIPE
if merge_streams:
stderr = STDOUT
else:
stderr = PIPE
arguments = {
"process": {
"args": cmd,
"stdin": stdin,
"stdout": stdout,
"stderr": stderr,
"bufsize": 1,
"close_fds": ON_POSIX,
"env": env,
},
"input": input,
}
out, err, exit = _get_output(arguments, timeout)
if merge_streams:
if exit != 0:
raise CommandError(cmd, exit, out)
else:
return exit, out
else:
if exit != 0:
raise CommandError(cmd, exit, out, err)
else:
return exit, out, err
def run_cmd_wait_nofail(*args, **kwargs):
"Same as run_cmd_wait but silence the exception if it happens"
try:
return run_cmd_wait(*args, **kwargs)
except CommandError as e:
return e.code, e.out, e.err
def get_IPs(hostname):
output = {}
addrs = socket.getaddrinfo(hostname, 0, 0, 0, socket.IPPROTO_TCP)
for family, socktype, proto, canonname, sockaddr in addrs:
addr = sockaddr[0]
output[family] = addr
return output
def port_used(addr="localhost", port=None):
"Return True if port is in use, False otherwise"
if port is None:
raise TypeError("Argument 'port' may not be None")
# If we got an address name, resolve it both to IPv6 and IPv4.
IPs = get_IPs(addr)
# Taskd seems to prefer IPv6 so we do it first
for family in (socket.AF_INET6, socket.AF_INET):
try:
addr = IPs[family]
except KeyError:
continue
s = socket.socket(family, socket.SOCK_STREAM)
result = s.connect_ex((addr, port))
s.close()
if result == 0:
# connection was successful
return True
else:
return False
def find_unused_port(addr="localhost", start=53589, track=True):
"""Find an unused port starting at `start` port
If track=False the returned port will not be marked as in-use and the code
will rely entirely on the ability to connect to addr:port as detection
mechanism. Note this may cause problems if ports are assigned but not used
immediately
"""
maxport = 65535
unused = None
for port in xrange(start, maxport):
if not port_used(addr, port):
if track and port in USED_PORTS:
continue
unused = port
break
if unused is None:
raise ValueError("No available port in the range {0}-{1}".format(
start, maxport))
if track:
USED_PORTS.add(unused)
return unused
def release_port(port):
"""Forget that given port was marked as'in-use
"""
try:
USED_PORTS.remove(port)
except KeyError:
pass
def memoize(obj):
"""Keep an in-memory cache of function results given it's inputs
"""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
try:
from shutil import which
which = memoize(which)
except ImportError:
# NOTE: This is shutil.which backported from python-3.3.3
@memoize
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode) and
not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path
# extensions. This will allow us to short circuit when given
# "python.exe". If it does match, only test that one, otherwise we
# have to try others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
def parse_datafile(file):
"""Parse .data files on the client and server treating files as JSON
"""
data = []
with open(file) as fh:
for line in fh:
line = line.rstrip("\n")
# Turn [] strings into {} to be treated properly as JSON hashes
if line.startswith('[') and line.endswith(']'):
line = '{' + line[1:-1] + '}'
if line.startswith("{"):
data.append(json.loads(line))
else:
data.append(line)
return data
def mkstemp(data):
"""
Create a temporary file that is removed at process exit
"""
def rmtemp(name):
try:
os.remove(name)
except OSError:
pass
f = tempfile.NamedTemporaryFile(delete=False)
f.write(data)
f.close()
# Ensure removal at end of python session
atexit.register(rmtemp, f.name)
return f.name
def mkstemp_exec(data):
"""Create a temporary executable file that is removed at process exit
"""
name = mkstemp(data)
os.chmod(name, 0o755)
return name
# vim: ai sts=4 et sw=4
| 28.731959
| 79
| 0.610621
|
ecccdea0245ef4f362281e5eac26714853deb3ef
| 840
|
py
|
Python
|
students/k3342/laboratory_works/Kataeva_Veronika/laboratory_work_1/scoreboard/board/migrations/0005_race.py
|
KataevaVeronika/ITMO_ICT_WebProgramming_2020
|
d8cf1d0479519bbafd34d4678e9eda2eabaeb0cf
|
[
"MIT"
] | null | null | null |
students/k3342/laboratory_works/Kataeva_Veronika/laboratory_work_1/scoreboard/board/migrations/0005_race.py
|
KataevaVeronika/ITMO_ICT_WebProgramming_2020
|
d8cf1d0479519bbafd34d4678e9eda2eabaeb0cf
|
[
"MIT"
] | null | null | null |
students/k3342/laboratory_works/Kataeva_Veronika/laboratory_work_1/scoreboard/board/migrations/0005_race.py
|
KataevaVeronika/ITMO_ICT_WebProgramming_2020
|
d8cf1d0479519bbafd34d4678e9eda2eabaeb0cf
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.4 on 2020-04-17 18:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('board', '0004_auto_20200417_2050'),
]
operations = [
migrations.CreateModel(
name='Race',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('race_name', models.CharField(max_length=20)),
('series', models.CharField(max_length=20)),
('year', models.IntegerField()),
('winner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='board.Racer')),
],
options={
'db_table': 'Race',
},
),
]
| 30
| 114
| 0.561905
|
ff8f673eef62ea35b2c0e443ea94157f9cf7ce6b
| 31,378
|
py
|
Python
|
xsd-fu/python/genshi/output.py
|
manics/ome-model
|
835e6f2c6e3da3cb7f94f228458f707a83fd0b98
|
[
"BSD-2-Clause"
] | null | null | null |
xsd-fu/python/genshi/output.py
|
manics/ome-model
|
835e6f2c6e3da3cb7f94f228458f707a83fd0b98
|
[
"BSD-2-Clause"
] | null | null | null |
xsd-fu/python/genshi/output.py
|
manics/ome-model
|
835e6f2c6e3da3cb7f94f228458f707a83fd0b98
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""This module provides different kinds of serialization methods for XML event
streams.
"""
from itertools import chain
import re
from genshi.core import escape, Attrs, Markup, Namespace, QName, StreamEventKind
from genshi.core import START, END, TEXT, XML_DECL, DOCTYPE, START_NS, END_NS, \
START_CDATA, END_CDATA, PI, COMMENT, XML_NAMESPACE
__all__ = ['encode', 'get_serializer', 'DocType', 'XMLSerializer',
'XHTMLSerializer', 'HTMLSerializer', 'TextSerializer']
__docformat__ = 'restructuredtext en'
def encode(iterator, method='xml', encoding=None, out=None):
"""Encode serializer output into a string.
:param iterator: the iterator returned from serializing a stream (basically
any iterator that yields unicode objects)
:param method: the serialization method; determines how characters not
representable in the specified encoding are treated
:param encoding: how the output string should be encoded; if set to `None`,
this method returns a `unicode` object
:param out: a file-like object that the output should be written to
instead of being returned as one big string; note that if
this is a file or socket (or similar), the `encoding` must
not be `None` (that is, the output must be encoded)
:return: a `str` or `unicode` object (depending on the `encoding`
parameter), or `None` if the `out` parameter is provided
:since: version 0.4.1
:note: Changed in 0.5: added the `out` parameter
"""
if encoding is not None:
errors = 'replace'
if method != 'text' and not isinstance(method, TextSerializer):
errors = 'xmlcharrefreplace'
_encode = lambda string: string.encode(encoding, errors)
else:
_encode = lambda string: string
if out is None:
return _encode(''.join(list(iterator)))
for chunk in iterator:
out.write(_encode(chunk))
def get_serializer(method='xml', **kwargs):
"""Return a serializer object for the given method.
:param method: the serialization method; can be either "xml", "xhtml",
"html", "text", or a custom serializer class
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:see: `XMLSerializer`, `XHTMLSerializer`, `HTMLSerializer`, `TextSerializer`
:since: version 0.4.1
"""
if isinstance(method, str):
method = {'xml': XMLSerializer,
'xhtml': XHTMLSerializer,
'html': HTMLSerializer,
'text': TextSerializer}[method.lower()]
return method(**kwargs)
def _prepare_cache(use_cache=True):
"""Prepare a private token serialization cache.
:param use_cache: boolean indicating whether a real cache should
be used or not. If not, the returned functions
are no-ops.
:return: emit and get functions, for storing and retrieving
serialized values from the cache.
"""
cache = {}
if use_cache:
def _emit(kind, input, output):
cache[kind, input] = output
return output
_get = cache.get
else:
def _emit(kind, input, output):
return output
def _get(key):
pass
return _emit, _get, cache
class DocType(object):
"""Defines a number of commonly used DOCTYPE declarations as constants."""
HTML_STRICT = (
'html', '-//W3C//DTD HTML 4.01//EN',
'http://www.w3.org/TR/html4/strict.dtd'
)
HTML_TRANSITIONAL = (
'html', '-//W3C//DTD HTML 4.01 Transitional//EN',
'http://www.w3.org/TR/html4/loose.dtd'
)
HTML_FRAMESET = (
'html', '-//W3C//DTD HTML 4.01 Frameset//EN',
'http://www.w3.org/TR/html4/frameset.dtd'
)
HTML = HTML_STRICT
HTML5 = ('html', None, None)
XHTML_STRICT = (
'html', '-//W3C//DTD XHTML 1.0 Strict//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd'
)
XHTML_TRANSITIONAL = (
'html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'
)
XHTML_FRAMESET = (
'html', '-//W3C//DTD XHTML 1.0 Frameset//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd'
)
XHTML = XHTML_STRICT
XHTML11 = (
'html', '-//W3C//DTD XHTML 1.1//EN',
'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'
)
SVG_FULL = (
'svg', '-//W3C//DTD SVG 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'
)
SVG_BASIC = (
'svg', '-//W3C//DTD SVG Basic 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-basic.dtd'
)
SVG_TINY = (
'svg', '-//W3C//DTD SVG Tiny 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-tiny.dtd'
)
SVG = SVG_FULL
@classmethod
def get(cls, name):
"""Return the ``(name, pubid, sysid)`` tuple of the ``DOCTYPE``
declaration for the specified name.
The following names are recognized in this version:
* "html" or "html-strict" for the HTML 4.01 strict DTD
* "html-transitional" for the HTML 4.01 transitional DTD
* "html-frameset" for the HTML 4.01 frameset DTD
* "html5" for the ``DOCTYPE`` proposed for HTML5
* "xhtml" or "xhtml-strict" for the XHTML 1.0 strict DTD
* "xhtml-transitional" for the XHTML 1.0 transitional DTD
* "xhtml-frameset" for the XHTML 1.0 frameset DTD
* "xhtml11" for the XHTML 1.1 DTD
* "svg" or "svg-full" for the SVG 1.1 DTD
* "svg-basic" for the SVG Basic 1.1 DTD
* "svg-tiny" for the SVG Tiny 1.1 DTD
:param name: the name of the ``DOCTYPE``
:return: the ``(name, pubid, sysid)`` tuple for the requested
``DOCTYPE``, or ``None`` if the name is not recognized
:since: version 0.4.1
"""
return {
'html': cls.HTML, 'html-strict': cls.HTML_STRICT,
'html-transitional': DocType.HTML_TRANSITIONAL,
'html-frameset': DocType.HTML_FRAMESET,
'html5': cls.HTML5,
'xhtml': cls.XHTML, 'xhtml-strict': cls.XHTML_STRICT,
'xhtml-transitional': cls.XHTML_TRANSITIONAL,
'xhtml-frameset': cls.XHTML_FRAMESET,
'xhtml11': cls.XHTML11,
'svg': cls.SVG, 'svg-full': cls.SVG_FULL,
'svg-basic': cls.SVG_BASIC,
'svg-tiny': cls.SVG_TINY
}.get(name.lower())
class XMLSerializer(object):
"""Produces XML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XMLSerializer()(elem.generate())))
<div><a href="foo"/><br/><hr noshade="True"/></div>
"""
_PRESERVE_SPACE = frozenset()
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, cache=True):
"""Initialize the XML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output, or the name of a DOCTYPE as
defined in `DocType.get`
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.4.2: The `doctype` parameter can now be a string.
:note: Changed in 0.6: The `cache` parameter was added
"""
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = cache
def _prepare_cache(self):
return _prepare_cache(self.cache)[:2]
def __call__(self, stream):
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
buf += [' ', attr, '="', escape(value), '"']
buf.append(kind is EMPTY and '/>' or '>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is XML_DECL and not have_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class XHTMLSerializer(XMLSerializer):
"""Produces XHTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XHTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br /><hr noshade="noshade" /></div>
"""
_EMPTY_ELEMS = frozenset(['area', 'base', 'basefont', 'br', 'col', 'frame',
'hr', 'img', 'input', 'isindex', 'link', 'meta',
'param'])
_BOOLEAN_ATTRS = frozenset(['selected', 'checked', 'compact', 'declare',
'defer', 'disabled', 'ismap', 'multiple',
'nohref', 'noresize', 'noshade', 'nowrap',
'autofocus', 'readonly', 'required',
'formnovalidate'])
_PRESERVE_SPACE = frozenset([
QName('pre'), QName('http://www.w3.org/1999/xhtml}pre'),
QName('textarea'), QName('http://www.w3.org/1999/xhtml}textarea')
])
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, drop_xml_decl=True, cache=True):
super(XHTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
namespace_prefixes = namespace_prefixes or {}
namespace_prefixes['http://www.w3.org/1999/xhtml'] = ''
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.drop_xml_decl = drop_xml_decl
self.cache = cache
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
drop_xml_decl = self.drop_xml_decl
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
value = attr
elif attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr == 'xml:space':
continue
buf += [' ', attr, '="', escape(value), '"']
if kind is EMPTY:
if tag in empty_elems:
buf.append(' />')
else:
buf.append('></%s>' % tag)
else:
buf.append('>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is XML_DECL and not have_decl and not drop_xml_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class HTMLSerializer(XHTMLSerializer):
"""Produces HTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(HTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br><hr noshade></div>
"""
_NOESCAPE_ELEMS = frozenset([
QName('script'), QName('http://www.w3.org/1999/xhtml}script'),
QName('style'), QName('http://www.w3.org/1999/xhtml}style')
])
def __init__(self, doctype=None, strip_whitespace=True, cache=True):
"""Initialize the HTML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.6: The `cache` parameter was added
"""
super(HTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE,
self._NOESCAPE_ELEMS))
self.filters.append(NamespaceFlattener(prefixes={
'http://www.w3.org/1999/xhtml': ''
}, cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = True
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
noescape_elems = self._NOESCAPE_ELEMS
have_doctype = False
noescape = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, _ in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
output = _get((kind, data))
if output is not None:
yield output
if (kind is START or kind is EMPTY) \
and data[0] in noescape_elems:
noescape = True
elif kind is END:
noescape = False
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
if value:
buf += [' ', attr]
elif ':' in attr:
if attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr != 'xmlns':
buf += [' ', attr, '="', escape(value), '"']
buf.append('>')
if kind is EMPTY:
if tag not in empty_elems:
buf.append('</%s>' % tag)
yield _emit(kind, data, Markup(''.join(buf)))
if tag in noescape_elems:
noescape = True
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
noescape = False
elif kind is TEXT:
if noescape:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class TextSerializer(object):
"""Produces plain text from an event stream.
Only text events are included in the output. Unlike the other serializer,
special XML characters are not escaped:
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a('<Hello!>', href='foo'), tag.br)
>>> print(elem)
<div><a href="foo"><Hello!></a><br/></div>
>>> print(''.join(TextSerializer()(elem.generate())))
<Hello!>
If text events contain literal markup (instances of the `Markup` class),
that markup is by default passed through unchanged:
>>> elem = tag.div(Markup('<a href="foo">Hello & Bye!</a><br/>'))
>>> print(elem.generate().render(TextSerializer, encoding=None))
<a href="foo">Hello & Bye!</a><br/>
You can use the ``strip_markup`` to change this behavior, so that tags and
entities are stripped from the output (or in the case of entities,
replaced with the equivalent character):
>>> print(elem.generate().render(TextSerializer, strip_markup=True,
... encoding=None))
Hello & Bye!
"""
def __init__(self, strip_markup=False):
"""Create the serializer.
:param strip_markup: whether markup (tags and encoded characters) found
in the text should be removed
"""
self.strip_markup = strip_markup
def __call__(self, stream):
strip_markup = self.strip_markup
for event in stream:
if event[0] is TEXT:
data = event[1]
if strip_markup and type(data) is Markup:
data = data.striptags().stripentities()
yield str(data)
class EmptyTagFilter(object):
"""Combines `START` and `STOP` events into `EMPTY` events for elements that
have no contents.
"""
EMPTY = StreamEventKind('EMPTY')
def __call__(self, stream):
prev = (None, None, None)
for ev in stream:
if prev[0] is START:
if ev[0] is END:
prev = EMPTY, prev[1], prev[2]
yield prev
continue
else:
yield prev
if ev[0] is not START:
yield ev
prev = ev
EMPTY = EmptyTagFilter.EMPTY
class NamespaceFlattener(object):
r"""Output stream filter that removes namespace information from the stream,
instead adding namespace attributes and prefixes as needed.
:param prefixes: optional mapping of namespace URIs to prefixes
>>> from genshi.input import XML
>>> xml = XML('''<doc xmlns="NS1" xmlns:two="NS2">
... <two:item/>
... </doc>''')
>>> for kind, data, pos in NamespaceFlattener()(xml):
... print('%s %r' % (kind, data))
START (u'doc', Attrs([('xmlns', u'NS1'), (u'xmlns:two', u'NS2')]))
TEXT u'\n '
START (u'two:item', Attrs())
END u'two:item'
TEXT u'\n'
END u'doc'
"""
def __init__(self, prefixes=None, cache=True):
self.prefixes = {XML_NAMESPACE.uri: 'xml'}
if prefixes is not None:
self.prefixes.update(prefixes)
self.cache = cache
def __call__(self, stream):
prefixes = dict([(v, [k]) for k, v in list(self.prefixes.items())])
namespaces = {XML_NAMESPACE.uri: ['xml']}
_emit, _get, cache = _prepare_cache(self.cache)
def _push_ns(prefix, uri):
namespaces.setdefault(uri, []).append(prefix)
prefixes.setdefault(prefix, []).append(uri)
cache.clear()
def _pop_ns(prefix):
uris = prefixes.get(prefix)
uri = uris.pop()
if not uris:
del prefixes[prefix]
if uri not in uris or uri != uris[-1]:
uri_prefixes = namespaces[uri]
uri_prefixes.pop()
if not uri_prefixes:
del namespaces[uri]
cache.clear()
return uri
ns_attrs = []
_push_ns_attr = ns_attrs.append
def _make_ns_attr(prefix, uri):
return 'xmlns%s' % (prefix and ':%s' % prefix or ''), uri
def _gen_prefix():
val = 0
while 1:
val += 1
yield 'ns%d' % val
_gen_prefix = _gen_prefix().__next__
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield kind, data, pos
continue
output = _get((kind, data))
if output is not None:
yield kind, output, pos
elif kind is START or kind is EMPTY:
tag, attrs = data
tagname = tag.localname
tagns = tag.namespace
if tagns:
if tagns in namespaces:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
else:
_push_ns_attr(('xmlns', tagns))
_push_ns('', tagns)
new_attrs = []
for attr, value in attrs:
attrname = attr.localname
attrns = attr.namespace
if attrns:
if attrns not in namespaces:
prefix = _gen_prefix()
_push_ns(prefix, attrns)
_push_ns_attr(('xmlns:%s' % prefix, attrns))
else:
prefix = namespaces[attrns][-1]
if prefix:
attrname = '%s:%s' % (prefix, attrname)
new_attrs.append((attrname, value))
data = _emit(kind, data, (tagname, Attrs(ns_attrs + new_attrs)))
yield kind, data, pos
del ns_attrs[:]
elif kind is END:
tagname = data.localname
tagns = data.namespace
if tagns:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
yield kind, _emit(kind, data, tagname), pos
elif kind is START_NS:
prefix, uri = data
if uri not in namespaces:
prefix = prefixes.get(uri, [prefix])[-1]
_push_ns_attr(_make_ns_attr(prefix, uri))
_push_ns(prefix, uri)
elif kind is END_NS:
if data in prefixes:
uri = _pop_ns(data)
if ns_attrs:
attr = _make_ns_attr(data, uri)
if attr in ns_attrs:
ns_attrs.remove(attr)
else:
yield kind, data, pos
class WhitespaceFilter(object):
"""A filter that removes extraneous ignorable white space from the
stream.
"""
def __init__(self, preserve=None, noescape=None):
"""Initialize the filter.
:param preserve: a set or sequence of tag names for which white-space
should be preserved
:param noescape: a set or sequence of tag names for which text content
should not be escaped
The `noescape` set is expected to refer to elements that cannot contain
further child elements (such as ``<style>`` or ``<script>`` in HTML
documents).
"""
if preserve is None:
preserve = []
self.preserve = frozenset(preserve)
if noescape is None:
noescape = []
self.noescape = frozenset(noescape)
def __call__(self, stream, ctxt=None, space=XML_NAMESPACE['space'],
trim_trailing_space=re.compile('[ \t]+(?=\n)').sub,
collapse_lines=re.compile('\n{2,}').sub):
mjoin = Markup('').join
preserve_elems = self.preserve
preserve = 0
noescape_elems = self.noescape
noescape = False
textbuf = []
push_text = textbuf.append
pop_text = textbuf.pop
for kind, data, pos in chain(stream, [(None, None, None)]):
if kind is TEXT:
if noescape:
data = Markup(data)
push_text(data)
else:
if textbuf:
if len(textbuf) > 1:
text = mjoin(textbuf, escape_quotes=False)
del textbuf[:]
else:
text = escape(pop_text(), quotes=False)
if not preserve:
text = collapse_lines('\n', trim_trailing_space('', text))
yield TEXT, Markup(text), pos
if kind is START:
tag, attrs = data
if preserve or (tag in preserve_elems or
attrs.get(space) == 'preserve'):
preserve += 1
if not noescape and tag in noescape_elems:
noescape = True
elif kind is END:
noescape = False
if preserve:
preserve -= 1
elif kind is START_CDATA:
noescape = True
elif kind is END_CDATA:
noescape = False
if kind:
yield kind, data, pos
class DocTypeInserter(object):
"""A filter that inserts the DOCTYPE declaration in the correct location,
after the XML declaration.
"""
def __init__(self, doctype):
"""Initialize the filter.
:param doctype: DOCTYPE as a string or DocType object.
"""
if isinstance(doctype, str):
doctype = DocType.get(doctype)
self.doctype_event = (DOCTYPE, doctype, (None, -1, -1))
def __call__(self, stream):
doctype_inserted = False
for kind, data, pos in stream:
if not doctype_inserted:
doctype_inserted = True
if kind is XML_DECL:
yield (kind, data, pos)
yield self.doctype_event
continue
yield self.doctype_event
yield (kind, data, pos)
if not doctype_inserted:
yield self.doctype_event
| 37.177725
| 82
| 0.520683
|
f4a8ef704641041e5999edab8e9e05ef74b7033c
| 2,379
|
py
|
Python
|
elephant.py
|
danielvarga/Elephant
|
6c0ff018b1fe1b2acc246a142b5fe34a605e41b0
|
[
"Unlicense"
] | null | null | null |
elephant.py
|
danielvarga/Elephant
|
6c0ff018b1fe1b2acc246a142b5fe34a605e41b0
|
[
"Unlicense"
] | null | null | null |
elephant.py
|
danielvarga/Elephant
|
6c0ff018b1fe1b2acc246a142b5fe34a605e41b0
|
[
"Unlicense"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
if 0:
path = "points_elephant.npy"
# superfluous parameters will turn out to be zero
n_parameters = 10*4
else:
path = "points_fancy_elephant.npy"
# must be multiple of 4
n_parameters = 100*4
assert(n_parameters % 4 == 0)
points = np.load(path)
parameters = np.random.randn(n_parameters//4, 4)
parameters = tf.Variable(parameters, dtype=np.float32)
ax = parameters[:, 0:1]
bx = parameters[:, 1:2]
ay = parameters[:, 2:3]
by = parameters[:, 3:4]
tp = tf.placeholder(tf.float32, [1, len(points)])
# weird shapes so (k*t) broadcasts to shape (n_parameters/4, len(points))
t = np.linspace(0, 2*np.pi, len(points), endpoint=False).reshape(1, -1).astype(np.float32)
k = np.arange(1, n_parameters//4 + 1).reshape(-1, 1)
warp = False
if warp:
dense1 = tf.layers.dense(inputs=tf.transpose(tp), units=100, activation=tf.nn.relu)
# w as in warped t
wt = tf.layers.dense(inputs=dense1, units=1, activation=None)
wt = tf.transpose(wt)
else:
wt = tp
c = tf.cos(k*wt)
s = tf.sin(k*wt)
# sum over parameter axis
x = tf.reduce_sum(ax*c + bx*s, axis=0)
y = tf.reduce_sum(ay*c + by*s, axis=0)
approximated_points = tf.stack([x, y], axis=1)
difference = points - approximated_points
loss = tf.reduce_mean(tf.square(difference))
# for sparsity
# loss += tf.reduce_mean(tf.abs(parameters)) * 0.5
if warp:
# boundary condition as soft constraint
loss += 10 * tf.reduce_mean( tf.square(wt[0, 0]) + tf.square(wt[0, -1] - 2*np.pi) )
optimizer = tf.train.AdamOptimizer(0.01)
train_op = optimizer.minimize(loss)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
n_iterations = 1000
for iteration in range(1, n_iterations + 1):
train_loss, wt_np, _ = sess.run([loss, wt, train_op], feed_dict={tp: t})
if iteration % 10 == 0:
print("iteration %d/%d, loss: %f"%(iteration, n_iterations, train_loss))
print("number of non-zero parameters:")
print(np.sum(np.abs(sess.run(parameters)) > 0.01))
points2 = sess.run(approximated_points, feed_dict={tp: t})
# append first point to plot closed curve
points2 = np.concatenate([points2, points2[:1]])
if 1:
plt.plot(points2[:, 0], points2[:, 1])
# eye
if len(points2) == 129: plt.plot([0.1], [1], 'bo')
plt.show()
# plt.plot(t[0], wt_np[0])
# plt.show()
| 25.580645
| 90
| 0.67045
|
62022d14889d867127e13cb8bb9a7d6d4cc3e971
| 12,477
|
py
|
Python
|
timeSeriesVisualisation.py
|
karhunenloeve/Twirlflake
|
a5a71279cf6f1001f6c8c45e05b7f75d2830dec6
|
[
"MIT"
] | null | null | null |
timeSeriesVisualisation.py
|
karhunenloeve/Twirlflake
|
a5a71279cf6f1001f6c8c45e05b7f75d2830dec6
|
[
"MIT"
] | null | null | null |
timeSeriesVisualisation.py
|
karhunenloeve/Twirlflake
|
a5a71279cf6f1001f6c8c45e05b7f75d2830dec6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 24 10:42:29 2020
@author: Luciano Melodia
"""
import os
import numpy as np
import random
import timeSeriesConfig as cfg
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib
import chart_studio.plotly as py
import plotly.graph_objs as go
import plotly
from matplotlib.collections import LineCollection
from matplotlib import colors as mcolors
from scipy.interpolate import make_interp_spline, BSpline
from mpl_toolkits.mplot3d import Axes3D
from scipy import stats
from cycler import cycler
from gtda.diagrams import HeatKernel
from gtda.plotting import plot_heatmap, plot_betti_surfaces, plot_betti_curves
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import to_tree
matplotlib.use("WebAgg")
def export_cmap_to_cpt(
cmap,
vmin: float = 0,
vmax: float = 1,
N: int = 255,
filename: str = "test.cpt",
**kwargs
):
"""
**Exports a custom matplotlib color map to files.**
Generates a color map for matplotlib at a desired normalized interval of choice. The map is not returned, but
saved as a text file. The default name for this file is `test.cpt`. This file then contains the information
for the color maps in matplotlib and can then be loaded.
+ param **cmap**: Name of the color map, type `str`.
+ param **vmin**: lower limit of normalization, type `float`.
+ param **vmax**: upper limit of normalization, type `float`.
+ param **N**: highest color value in `RGB`, type `int`.
+ param **filename**: name of the color map file, type `str`.
+ param **kwargs**: additional arguments like `B`, `F` or `N` for color definition, type `str`.
"""
# Create string for upper, lower colors.
b = np.array(kwargs.get("B", cmap(0.0)))
f = np.array(kwargs.get("F", cmap(1.0)))
na = np.array(kwargs.get("N", (0, 0, 0))).astype(float)
ext = (np.c_[b[:3], f[:3], na[:3]].T * 255).astype(int)
extstr = "B {:3d} {:3d} {:3d}\nF {:3d} {:3d} {:3d}\nN {:3d} {:3d} {:3d}"
ex = extstr.format(*list(ext.flatten()))
# Create colormap.
cols = (cmap(np.linspace(0.0, 1.0, N))[:, :3] * 255).astype(int)
vals = np.linspace(vmin, vmax, N)
arr = np.c_[vals[:-1], cols[:-1], vals[1:], cols[1:]]
# Save to file.
fmt = "%e %3d %3d %3d %e %3d %3d %3d"
np.savetxt(
filename, arr, fmt=fmt, header="# COLOR_MODEL = RGB", footer=ex, comments=""
)
def plot_embedding3D(path: str):
"""
**Plots 3-1 embeddings iteratively within a directory.
Plots a set of embeddings always with three dimensions and a time delay of 1.
This can be changed arbitrarily, according to the estimated parameters.
+ param **path**: Path to the directory containing `.npy` files, type `.npy`.
"""
files = os.listdir(path)
for i in files:
if "embedded_3-1" in i:
data = np.load(path + i)
fig = plt.figure()
ax = plt.axes(projection="3d")
zdata = data.transpose()[0]
xdata = data.transpose()[1]
ydata = data.transpose()[2]
ax.scatter3D(xdata, ydata, zdata, c=zdata, cmap="Blues")
plt.show()
def mean_heatkernel2D(
directory: str,
homgroup: int,
limit: int = 0,
store: bool = False,
plot: bool = False,
filename: str = "figure",
filetype: str = "svg",
colormap: str = "Reds",
):
"""
**Calculates a mean heat core over a large collection of files in a directory.**
Calculates a mean heat core map from a folder full of `.npy` files with heat maps.
This can optionally be saved or displayed as a plot in the browser.
+ param **directory**: directory of `.npy` files for line plots, type `str`.
+ param **homgroup**: specify which homology group to plot, type `int`.
+ param **limit**: limit the number of files to display, type `int`.
+ param **store**: whether to store the file or not, type `bool`.
+ param **filename**: name of the file to be saved, type `str`.
+ param **colormap**: plot color scales, type `str`.
+ return **fig**: figure object, type `plotly.graph_objects.Figure`.
"""
files = []
count = 0
for dirpath, _, filenames in os.walk(directory):
for f in filenames:
file = os.path.abspath(os.path.join(dirpath, f))
try:
data = np.load(file)
files.append(data[0][homgroup])
if limit > 0:
count += 1
if count == limit:
count = 0
break
except IndexError:
pass
data = np.mean(files, axis=0)
fig = plot_heatmap(data, colorscale=colormap)
if plot:
fig.show()
if store:
plotly.io.write_image(fig, filename, format=filetype)
return fig
def massive_surface_plot3D(
directory: str,
homgroup: int,
title: str = "Default",
limit: int = 45000,
store: bool = False,
plotting: bool = False,
filename: str = "figure",
filetype: str = "svg",
colormap: str = "Reds",
):
"""
**Calculates a solid surface from curves.**
Calculates a surface from a directory full of `npy` files of curves (intended for Betti curves
from persistence diagrams). For the `x` and `y` coordinates, the corresponding indices of the Betti
curves themselves and the filtration index are selected. The value of the function is then visible
on the 'z' axis. Optionally, these can be displayed as a graph in the browser or also saved.
Example:
``Python
massive_surface_plot3D(
"/home/lume/documents/siemens_samples/power_plant_silhouette/",
homgroup=1,
store=True,
plotting=True,
limit=1000
)
```
+ param **directory**: directory of `.npy` files for line plots, type `str`.
+ param **homgroup**: determines which homology group to plot, type `int`.
+ param **limit**: limit on the number of files to display, type `int`.
+ param **plotting**: whether the file should be plotted or not, type `bool`.
+ param **store**: whether the file should be stored or not, type `bool`.
+ param **filename**: name of the file to be saved, type `str`.
+ param **colormap**: plot color scales, type `str`.
+ return **fig**: figure object, type `plotly.graph_objects.Figure`.
"""
files = []
count = 0
for dirpath, _, filenames in os.walk(directory):
for f in filenames:
file = os.path.abspath(os.path.join(dirpath, f))
try:
data = np.load(file)[0][homgroup]
files.append(data)
if limit > 0:
count += 1
if count == limit:
count = 0
break
except IndexError:
pass
files = np.array(files)
x = np.linspace(0, 1, files.shape[0])
fig = go.Figure(
data=[go.Surface(z=files, x=x, y=x, colorscale=colormap, reversescale=True)]
)
camera = dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=1.5, y=1.5, z=0.25),
)
fig.update_layout(
title=title,
autosize=False,
width=1000,
height=1000,
margin=dict(l=65, r=50, b=90, t=90),
scene_camera=camera,
)
if plotting:
fig.show()
if store:
fig.write_image(filename + "." + filetype)
return fig
def massive_line_plot3D(
directory: str,
homgroup: int,
resolution: int = 300,
k: int = 3,
limit: int = 0,
elev: float = 20,
azim: int = 135,
KKS: str = "LBB",
fignum: int = 0,
plot: bool = False,
):
"""
**Function creates a massive line chart from a directory of `.npy` files that contain the data.**
This function creates a line graph from a set of `.npy` files. The line graph will be three dimensional
and each line will be plotted along the `z` axis, while the other two axes will represent the plot
or time step. It is assumed that the `.npy` file stores a one-dimensional array. The method iterates over
to populate a directory of `.npy` files, each of which contains a one-dimensional time series.
Examples:
`python
massive_line_plot3D(
directory="/home/lume/documents/siemens_samples/kraftwerk_betticurve/", homgroup=0
)
```
Example of multiple plots of Betti curves / persistence silhouettes:
``python
number = 0
for i in cfg.pptcat:
massive_line_plot3D(
directory="/home/lume/documents/siemens_kraftwerk_samples/kraftwerk_bettikurve/",
homgroup=0,
KKS=i,
fignum=count,
)
count += 1
plt.show()
plt.close()
```
+ param **directory**: directory of `.npy` files for line plots, type `str`.
+ param **homgroup**: specify which homology group to plot, type `int`.
+ param **resolution**: number of points added between min and max for interpolation, type `int`.
+ param **k**: B-spline degree, type `int`.
+ param **limit**: limit of the number of files to be displayed, type `int`.
+ param **elev**: angle of horizontal shift, type `float`.
+ param **azim**: degree of counterclockwise rotation, type `int`.
+ param **plot**: whether to plot or not, type `bool`.
+ param **fignum**: figure number for multiple figures, type `int`.
+ return **True**: True if plot was successful, type `bool`.
"""
files = []
count = 0
fig = plt.figure(fignum)
ax = fig.add_subplot(111, projection="3d")
for dirpath, _, filenames in os.walk(directory):
for f in filenames:
if KKS in f:
file = os.path.abspath(os.path.join(dirpath, f))
try:
data = np.load(file)[0][homgroup]
files.append(data)
if limit > 0:
count += 1
if count == limit:
count = 0
break
except IndexError:
pass
else:
pass
if not files:
return False
# files = sorted(files, key=sum)
for f in files:
x_array = np.full(len(f), count, dtype=int)
y_array = np.arange(start=0, stop=len(f), step=1)
evenly_spaced_interval = np.linspace(0, 1, len(files))
if "betticurve" in directory:
interval = evenly_spaced_interval[count]
color1 = cm.RdGy(interval)
color2 = cm.RdBu(interval)
else:
color1 = cm.RdGy(evenly_spaced_interval[count])
color2 = cm.RdBu(evenly_spaced_interval[count])
ax.plot(x_array, y_array, f, "x", markersize=2, color=color1)
ax.plot(x_array, y_array, f, color=color2)
ax.set_title(KKS + "_homgroup_" + str(homgroup), y=1.08)
ax.view_init(elev=elev, azim=azim)
ax.ticklabel_format(style="sci", useOffset=True)
ax.w_xaxis.set_pane_color((1, 0.921, 0.803, 0.1))
ax.w_yaxis.set_pane_color((1, 0.921, 0.803, 0.1))
ax.w_zaxis.set_pane_color((1, 0.921, 0.803, 0.1))
ax.w_xaxis.set_tick_params(
labelsize=12,
which="major",
pad=-5,
colors="black",
labelrotation=23,
direction="in",
)
ax.w_yaxis.set_tick_params(
labelsize=12,
which="major",
pad=-5,
colors="black",
labelrotation=-23,
direction="in",
)
ax.w_zaxis.set_tick_params(
labelsize=12, which="major", pad=4, colors="black", direction="in"
)
formatter = ticker.ScalarFormatter(useMathText=False)
formatter.set_scientific(False)
formatter.set_powerlimits((-2, 3))
ax.w_xaxis.set_major_formatter(formatter)
# ax.w_yaxis.set_major_formatter(formatter)
# ax.w_zaxis.set_major_formatter(formatter)
# ax.set_yticklabels([])
# ax.set_xticklabels([])
# ax.set_zticklabels([])
count += 1
if limit > 0:
if count == limit:
if plot:
plt.show()
return True
if plot:
plt.show()
return True
| 32.577023
| 113
| 0.588282
|
cc7dae20a71817480a5112fcd8fc75e6b87a72f8
| 28,753
|
py
|
Python
|
photutils/psf/epsf_stars.py
|
prajwel/photutils
|
f59bae804393be70131cb716d53207d3d56a83c7
|
[
"BSD-3-Clause"
] | 167
|
2015-05-17T15:03:58.000Z
|
2022-03-23T13:31:33.000Z
|
photutils/psf/epsf_stars.py
|
prajwel/photutils
|
f59bae804393be70131cb716d53207d3d56a83c7
|
[
"BSD-3-Clause"
] | 701
|
2015-01-05T11:47:12.000Z
|
2022-03-29T14:37:03.000Z
|
photutils/psf/epsf_stars.py
|
prajwel/photutils
|
f59bae804393be70131cb716d53207d3d56a83c7
|
[
"BSD-3-Clause"
] | 119
|
2015-02-04T21:43:02.000Z
|
2022-02-15T10:55:13.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides tools to extract cutouts of stars and data
structures to hold the cutouts for fitting and building ePSFs.
"""
import warnings
from astropy.nddata import NDData
from astropy.nddata.utils import (overlap_slices, NoOverlapError,
PartialOverlapError)
from astropy.table import Table
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
from astropy.wcs import WCS
from astropy.wcs.utils import skycoord_to_pixel
import numpy as np
from ..aperture import BoundingBox
__all__ = ['EPSFStar', 'EPSFStars', 'LinkedEPSFStar', 'extract_stars']
class EPSFStar:
"""
A class to hold a 2D cutout image and associated metadata of a star
used to build an ePSF.
Parameters
----------
data : `~numpy.ndarray`
A 2D cutout image of a single star.
weights : `~numpy.ndarray` or `None`, optional
A 2D array of the weights associated with the input ``data``.
cutout_center : tuple of two floats or `None`, optional
The ``(x, y)`` position of the star's center with respect to the
input cutout ``data`` array. If `None`, then the center of of
the input cutout ``data`` array will be used.
origin : tuple of two int, optional
The ``(x, y)`` index of the origin (bottom-left corner) pixel of
the input cutout array with respect to the original array from
which the cutout was extracted. This can be used to convert
positions within the cutout image to positions in the original
image. ``origin`` and ``wcs_large`` must both be input for a
linked star (a single star extracted from different images).
wcs_large : `None` or WCS object, optional
A WCS object associated with the large image from which
the cutout array was extracted. It should not be the
WCS object of the input cutout ``data`` array. The WCS
object must support the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_ (e.g.,
`astropy.wcs.WCS`, `gwcs.wcs.WCS`). ``origin`` and ``wcs_large``
must both be input for a linked star (a single star extracted
from different images).
id_label : int, str, or `None`, optional
An optional identification number or label for the star.
"""
def __init__(self, data, weights=None, cutout_center=None, origin=(0, 0),
wcs_large=None, id_label=None):
self._data = np.asanyarray(data)
self.shape = self._data.shape
if weights is not None:
if weights.shape != data.shape:
raise ValueError('weights must have the same shape as the '
'input data array.')
self.weights = np.asanyarray(weights, dtype=float).copy()
else:
self.weights = np.ones_like(self._data, dtype=float)
self.mask = (self.weights <= 0.)
# mask out invalid image data
invalid_data = np.logical_not(np.isfinite(self._data))
if np.any(invalid_data):
self.weights[invalid_data] = 0.
self.mask[invalid_data] = True
self._cutout_center = cutout_center
self.origin = np.asarray(origin)
self.wcs_large = wcs_large
self.id_label = id_label
self.flux = self.estimate_flux()
self._excluded_from_fit = False
self._fitinfo = None
def __array__(self):
"""
Array representation of the mask data array (e.g., for
matplotlib).
"""
return self._data
@property
def data(self):
"""The 2D cutout image."""
return self._data
@property
def cutout_center(self):
"""
A `~numpy.ndarray` of the ``(x, y)`` position of the star's
center with respect to the input cutout ``data`` array.
"""
return self._cutout_center
@cutout_center.setter
def cutout_center(self, value):
if value is None:
value = ((self.shape[1] - 1) / 2., (self.shape[0] - 1) / 2.)
else:
if len(value) != 2:
raise ValueError('The "cutout_center" attribute must have '
'two elements in (x, y) form.')
self._cutout_center = np.asarray(value)
@property
def center(self):
"""
A `~numpy.ndarray` of the ``(x, y)`` position of the star's
center in the original (large) image (not the cutout image).
"""
return self.cutout_center + self.origin
@lazyproperty
def slices(self):
"""
A tuple of two slices representing the cutout region with
respect to the original (large) image.
"""
return (slice(self.origin[1], self.origin[1] + self.shape[1]),
slice(self.origin[0], self.origin[0] + self.shape[0]))
@lazyproperty
def bbox(self):
"""
The minimal `~photutils.aperture.BoundingBox` for the cutout
region with respect to the original (large) image.
"""
return BoundingBox(self.slices[1].start, self.slices[1].stop,
self.slices[0].start, self.slices[0].stop)
def estimate_flux(self):
"""
Estimate the star's flux by summing values in the input cutout
array.
Missing data is filled in by interpolation to better estimate
the total flux.
"""
from .epsf import _interpolate_missing_data
if np.any(self.mask):
data_interp = _interpolate_missing_data(self.data, method='cubic',
mask=self.mask)
data_interp = _interpolate_missing_data(data_interp,
method='nearest',
mask=self.mask)
flux = np.sum(data_interp, dtype=float)
else:
flux = np.sum(self.data, dtype=float)
return flux
def register_epsf(self, epsf):
"""
Register and scale (in flux) the input ``epsf`` to the star.
Parameters
----------
epsf : `EPSFModel`
The ePSF to register.
Returns
-------
data : `~numpy.ndarray`
A 2D array of the registered/scaled ePSF.
"""
yy, xx = np.indices(self.shape, dtype=float)
xx = xx - self.cutout_center[0]
yy = yy - self.cutout_center[1]
return self.flux * epsf.evaluate(xx, yy, flux=1.0, x_0=0.0, y_0=0.0)
def compute_residual_image(self, epsf):
"""
Compute the residual image of the star data minus the
registered/scaled ePSF.
Parameters
----------
epsf : `EPSFModel`
The ePSF to subtract.
Returns
-------
data : `~numpy.ndarray`
A 2D array of the residual image.
"""
return self.data - self.register_epsf(epsf)
@lazyproperty
def _xy_idx(self):
"""
1D arrays of x and y indices of unmasked pixels in the cutout
reference frame.
"""
yidx, xidx = np.indices(self._data.shape)
return xidx[~self.mask].ravel(), yidx[~self.mask].ravel()
@lazyproperty
def _xidx(self):
"""
1D arrays of x indices of unmasked pixels in the cutout
reference frame.
"""
return self._xy_idx[0]
@lazyproperty
def _yidx(self):
"""
1D arrays of y indices of unmasked pixels in the cutout
reference frame.
"""
return self._xy_idx[1]
@property
def _xidx_centered(self):
"""
1D array of x indices of unmasked pixels, with respect to the
star center, in the cutout reference frame.
"""
return self._xy_idx[0] - self.cutout_center[0]
@property
def _yidx_centered(self):
"""
1D array of y indices of unmasked pixels, with respect to the
star center, in the cutout reference frame.
"""
return self._xy_idx[1] - self.cutout_center[1]
@lazyproperty
def _data_values(self):
"""1D array of unmasked cutout data values."""
return self.data[~self.mask].ravel()
@lazyproperty
def _data_values_normalized(self):
"""
1D array of unmasked cutout data values, normalized by the
star's total flux.
"""
return self._data_values / self.flux
@lazyproperty
def _weight_values(self):
"""
1D array of unmasked weight values.
"""
return self.weights[~self.mask].ravel()
class EPSFStars:
"""
Class to hold a list of `EPSFStar` and/or `LinkedEPSFStar` objects.
Parameters
----------
star_list : list of `EPSFStar` or `LinkedEPSFStar` objects
A list of `EPSFStar` and/or `LinkedEPSFStar` objects.
"""
def __init__(self, stars_list):
if isinstance(stars_list, (EPSFStar, LinkedEPSFStar)):
self._data = [stars_list]
elif isinstance(stars_list, list):
self._data = stars_list
else:
raise ValueError('stars_list must be a list of EPSFStar and/or '
'LinkedEPSFStar objects.')
def __len__(self):
return len(self._data)
def __getitem__(self, index):
return self.__class__(self._data[index])
def __delitem__(self, index):
del self._data[index]
def __iter__(self):
for i in self._data:
yield i
# explicit set/getstate to avoid infinite recursion
# from pickler using __getattr__
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__ = d
def __getattr__(self, attr):
if attr in ['cutout_center', 'center', 'flux',
'_excluded_from_fit']:
result = np.array([getattr(star, attr) for star in self._data])
else:
result = [getattr(star, attr) for star in self._data]
if len(self._data) == 1:
result = result[0]
return result
def _getattr_flat(self, attr):
values = []
for item in self._data:
if isinstance(item, LinkedEPSFStar):
values.extend(getattr(item, attr))
else:
values.append(getattr(item, attr))
return np.array(values)
@property
def cutout_center_flat(self):
"""
A `~numpy.ndarray` of the ``(x, y)`` position of all the
stars' centers (including linked stars) with respect to the
input cutout ``data`` array, as a 2D array (``n_all_stars`` x
2).
Note that when `EPSFStars` contains any `LinkedEPSFStar`, the
``cutout_center`` attribute will be a nested 3D array.
"""
return self._getattr_flat('cutout_center')
@property
def center_flat(self):
"""
A `~numpy.ndarray` of the ``(x, y)`` position of all the stars'
centers (including linked stars) with respect to the original
(large) image (not the cutout image) as a 2D array
(``n_all_stars`` x 2).
Note that when `EPSFStars` contains any `LinkedEPSFStar`, the
``center`` attribute will be a nested 3D array.
"""
return self._getattr_flat('center')
@lazyproperty
def all_stars(self):
"""
A list of all `EPSFStar` objects stored in this object,
including those that comprise linked stars (i.e.,
`LinkedEPSFStar`), as a flat list.
"""
stars = []
for item in self._data:
if isinstance(item, LinkedEPSFStar):
stars.extend(item.all_stars)
else:
stars.append(item)
return stars
@property
def all_good_stars(self):
"""
A list of all `EPSFStar` objects stored in this object that have
not been excluded from fitting, including those that comprise
linked stars (i.e., `LinkedEPSFStar`), as a flat list.
"""
stars = []
for star in self.all_stars:
if star._excluded_from_fit:
continue
else:
stars.append(star)
return stars
@lazyproperty
def n_stars(self):
"""
The total number of stars.
A linked star is counted only once.
"""
return len(self._data)
@lazyproperty
def n_all_stars(self):
"""
The total number of `EPSFStar` objects, including all the linked
stars within `LinkedEPSFStar`. Each linked star is included in
the count.
"""
return len(self.all_stars)
@property
def n_good_stars(self):
"""
The total number of `EPSFStar` objects, including all the linked
stars within `LinkedEPSFStar`, that have not been excluded from
fitting. Each non-excluded linked star is included in the
count.
"""
return len(self.all_good_stars)
@lazyproperty
def _max_shape(self):
"""
The maximum x and y shapes of all the `EPSFStar` objects
(including linked stars).
"""
return np.max([star.shape for star in self.all_stars],
axis=0)
class LinkedEPSFStar(EPSFStars):
"""
A class to hold a list of `EPSFStar` objects for linked stars.
Linked stars are `EPSFStar` cutouts from different images that
represent the same physical star. When building the ePSF, linked
stars are constrained to have the same sky coordinates.
Parameters
----------
star_list : list of `EPSFStar` objects
A list of `EPSFStar` objects for the same physical star. Each
`EPSFStar` object must have a valid ``wcs_large`` attribute to
convert between pixel and sky coordinates.
"""
def __init__(self, stars_list):
for star in stars_list:
if not isinstance(star, EPSFStar):
raise ValueError('stars_list must contain only EPSFStar '
'objects.')
if star.wcs_large is None:
raise ValueError('Each EPSFStar object must have a valid '
'wcs_large attribute.')
super().__init__(stars_list)
def constrain_centers(self):
"""
Constrain the centers of linked `EPSFStar` objects (i.e., the
same physical star) to have the same sky coordinate.
Only `EPSFStar` objects that have not been excluded during the
ePSF build process will be used to constrain the centers.
The single sky coordinate is calculated as the mean of sky
coordinates of the linked stars.
"""
if len(self._data) < 2: # no linked stars
return
idx = np.logical_not(self._excluded_from_fit).nonzero()[0]
if idx.size == 0:
warnings.warn('Cannot constrain centers of linked stars because '
'all the stars have been excluded during the ePSF '
'build process.', AstropyUserWarning)
return
good_stars = [self._data[i] for i in idx]
coords = []
for star in good_stars:
wcs = star.wcs_large
xposition = star.center[0]
yposition = star.center[1]
try:
coords.append(wcs.pixel_to_world_values(xposition, yposition))
except AttributeError:
if isinstance(wcs, WCS):
# for Astropy < 3.1 WCS support
coords.append(wcs.all_pix2world(xposition, yposition, 0))
else:
raise ValueError('Input wcs does not support the shared '
'WCS interface.')
# compute mean cartesian coordinates
lon, lat = np.transpose(coords)
lon *= np.pi / 180.
lat *= np.pi / 180.
x_mean = np.mean(np.cos(lat) * np.cos(lon))
y_mean = np.mean(np.cos(lat) * np.sin(lon))
z_mean = np.mean(np.sin(lat))
# convert mean cartesian coordinates back to spherical
hypot = np.hypot(x_mean, y_mean)
mean_lon = np.arctan2(y_mean, x_mean)
mean_lat = np.arctan2(z_mean, hypot)
mean_lon *= 180. / np.pi
mean_lat *= 180. / np.pi
# convert mean sky coordinates back to center pixel coordinates
# for each star
for star in good_stars:
try:
center = star.wcs_large.world_to_pixel_values(mean_lon,
mean_lat)
except AttributeError:
# for Astropy < 3.1 WCS support
center = star.wcs_large.all_world2pix(mean_lon, mean_lat, 0)
star.cutout_center = np.array(center) - star.origin
def extract_stars(data, catalogs, size=(11, 11)):
"""
Extract cutout images centered on stars defined in the input
catalog(s).
Stars where the cutout array bounds partially or completely lie
outside of the input ``data`` image will not be extracted.
Parameters
----------
data : `~astropy.nddata.NDData` or list of `~astropy.nddata.NDData`
A `~astropy.nddata.NDData` object or a list of
`~astropy.nddata.NDData` objects containing the 2D image(s) from
which to extract the stars. If the input ``catalogs`` contain
only the sky coordinates (i.e., not the pixel coordinates) of
the stars then each of the `~astropy.nddata.NDData` objects must
have a valid ``wcs`` attribute.
catalogs : `~astropy.table.Table`, list of `~astropy.table.Table`
A catalog or list of catalogs of sources to be extracted from
the input ``data``. To link stars in multiple images as a
single source, you must use a single source catalog where the
positions defined in sky coordinates.
If a list of catalogs is input (or a single catalog with a
single `~astropy.nddata.NDData` object), they are assumed to
correspond to the list of `~astropy.nddata.NDData` objects
input in ``data`` (i.e., a separate source catalog for each
2D image). For this case, the center of each source can be
defined either in pixel coordinates (in ``x`` and ``y`` columns)
or sky coordinates (in a ``skycoord`` column containing a
`~astropy.coordinates.SkyCoord` object). If both are specified,
then the pixel coordinates will be used.
If a single source catalog is input with multiple
`~astropy.nddata.NDData` objects, then these sources will be
extracted from every 2D image in the input ``data``. In this
case, the sky coordinates for each source must be specified as a
`~astropy.coordinates.SkyCoord` object contained in a column
called ``skycoord``. Each `~astropy.nddata.NDData` object in
the input ``data`` must also have a valid ``wcs`` attribute.
Pixel coordinates (in ``x`` and ``y`` columns) will be ignored.
Optionally, each catalog may also contain an ``id`` column
representing the ID/name of stars. If this column is not
present then the extracted stars will be given an ``id`` number
corresponding the the table row number (starting at 1). Any
other columns present in the input ``catalogs`` will be ignored.
size : int or array_like (int), optional
The extraction box size along each axis. If ``size`` is a
scalar then a square box of size ``size`` will be used. If
``size`` has two elements, they should be in ``(ny, nx)`` order.
The size must be greater than or equal to 3 pixel for both axes.
Size must be odd in both axes; if either is even, it is padded
by one to force oddness.
Returns
-------
stars : `EPSFStars` instance
A `EPSFStars` instance containing the extracted stars.
"""
if isinstance(data, NDData):
data = [data]
if isinstance(catalogs, Table):
catalogs = [catalogs]
for img in data:
if not isinstance(img, NDData):
raise ValueError('data must be a single or list of NDData '
'objects.')
for cat in catalogs:
if not isinstance(cat, Table):
raise ValueError('catalogs must be a single or list of Table '
'objects.')
if len(catalogs) == 1 and len(data) > 1:
if 'skycoord' not in catalogs[0].colnames:
raise ValueError('When inputting a single catalog with multiple '
'NDData objects, the catalog must have a '
'"skycoord" column.')
if any([img.wcs is None for img in data]):
raise ValueError('When inputting a single catalog with multiple '
'NDData objects, each NDData object must have '
'a wcs attribute.')
else:
for cat in catalogs:
if 'x' not in cat.colnames or 'y' not in cat.colnames:
if 'skycoord' not in cat.colnames:
raise ValueError('When inputting multiple catalogs, '
'each one must have a "x" and "y" '
'column or a "skycoord" column.')
else:
if any([img.wcs is None for img in data]):
raise ValueError('When inputting catalog(s) with '
'only skycoord positions, each '
'NDData object must have a wcs '
'attribute.')
if len(data) != len(catalogs):
raise ValueError('When inputting multiple catalogs, the number '
'of catalogs must match the number of input '
'images.')
size = np.atleast_1d(size)
if len(size) == 1:
size = np.repeat(size, 2)
# Force size to odd numbers such that there is always a central pixel with
# even spacing either side of the pixel.
size = tuple(_size+1 if _size % 2 == 0 else _size for _size in size)
min_size = 3
if size[0] < min_size or size[1] < min_size:
raise ValueError(f'size must be >= {min_size} for x and y')
if len(catalogs) == 1: # may included linked stars
use_xy = True
if len(data) > 1:
use_xy = False # linked stars require skycoord positions
stars = []
# stars is a list of lists, one list of stars in each image
for img in data:
stars.append(_extract_stars(img, catalogs[0], size=size,
use_xy=use_xy))
# transpose the list of lists, to associate linked stars
stars = list(map(list, zip(*stars)))
# remove 'None' stars (i.e., no or partial overlap in one or
# more images) and handle the case of only one "linked" star
stars_out = []
n_input = len(catalogs[0]) * len(data)
n_extracted = 0
for star in stars:
good_stars = [i for i in star if i is not None]
n_extracted += len(good_stars)
if not good_stars:
continue # no overlap in any image
elif len(good_stars) == 1:
good_stars = good_stars[0] # only one star, cannot be linked
else:
good_stars = LinkedEPSFStar(good_stars)
stars_out.append(good_stars)
else: # no linked stars
stars_out = []
for img, cat in zip(data, catalogs):
stars_out.extend(_extract_stars(img, cat, size=size, use_xy=True))
n_input = len(stars_out)
stars_out = [star for star in stars_out if star is not None]
n_extracted = len(stars_out)
n_excluded = n_input - n_extracted
if n_excluded > 0:
warnings.warn('{} star(s) were not extracted because their cutout '
'region extended beyond the input image.'
.format(n_excluded), AstropyUserWarning)
return EPSFStars(stars_out)
def _extract_stars(data, catalog, size=(11, 11), use_xy=True):
"""
Extract cutout images from a single image centered on stars defined
in the single input catalog.
Parameters
----------
data : `~astropy.nddata.NDData`
A `~astropy.nddata.NDData` object containing the 2D image from
which to extract the stars. If the input ``catalog`` contains
only the sky coordinates (i.e., not the pixel coordinates) of
the stars then the `~astropy.nddata.NDData` object must have a
valid ``wcs`` attribute.
catalog : `~astropy.table.Table`
A single catalog of sources to be extracted from the input
``data``. The center of each source can be defined either in
pixel coordinates (in ``x`` and ``y`` columns) or sky
coordinates (in a ``skycoord`` column containing a
`~astropy.coordinates.SkyCoord` object). If both are specified,
then the value of the ``use_xy`` keyword determines which
coordinates will be used.
size : int or array_like (int), optional
The extraction box size along each axis. If ``size`` is a
scalar then a square box of size ``size`` will be used. If
``size`` has two elements, they should be in ``(ny, nx)`` order.
The size must be greater than or equal to 3 pixel for both axes.
Size must be odd in both axes; if either is even, it is padded
by one to force oddness.
use_xy : bool, optional
Whether to use the ``x`` and ``y`` pixel positions when both
pixel and sky coordinates are present in the input catalog
table. If `False` then sky coordinates are used instead of pixel
coordinates (e.g., for linked stars). The default is `True`.
Returns
-------
stars : list of `EPSFStar` objects
A list of `EPSFStar` instances containing the extracted stars.
"""
# Force size to odd numbers such that there is always a central pixel with
# even spacing either side of the pixel.
if np.isscalar(size):
size = size+1 if size % 2 == 0 else size
else:
size = tuple(_size+1 if _size % 2 == 0 else _size for _size in size)
colnames = catalog.colnames
if ('x' not in colnames or 'y' not in colnames) or not use_xy:
try:
xcenters, ycenters = data.wcs.world_to_pixel(catalog['skycoord'])
except AttributeError:
# for Astropy < 3.1 WCS support
xcenters, ycenters = skycoord_to_pixel(catalog['skycoord'],
data.wcs, origin=0,
mode='all')
else:
xcenters = catalog['x'].data.astype(float)
ycenters = catalog['y'].data.astype(float)
if 'id' in colnames:
ids = catalog['id']
else:
ids = np.arange(len(catalog), dtype=int) + 1
if data.uncertainty is None:
weights = np.ones_like(data.data)
else:
if data.uncertainty.uncertainty_type == 'weights':
weights = np.asanyarray(data.uncertainty.array, dtype=float)
else:
warnings.warn('The data uncertainty attribute has an unsupported '
'type. Only uncertainty_type="weights" can be '
'used to set weights. Weights will be set to 1.',
AstropyUserWarning)
weights = np.ones_like(data.data)
if data.mask is not None:
weights[data.mask] = 0.
stars = []
for xcenter, ycenter, obj_id in zip(xcenters, ycenters, ids):
try:
large_slc, _ = overlap_slices(data.data.shape, size,
(ycenter, xcenter), mode='strict')
data_cutout = data.data[large_slc]
weights_cutout = weights[large_slc]
except (PartialOverlapError, NoOverlapError):
stars.append(None)
continue
origin = (large_slc[1].start, large_slc[0].start)
cutout_center = (xcenter - origin[0], ycenter - origin[1])
star = EPSFStar(data_cutout, weights_cutout,
cutout_center=cutout_center, origin=origin,
wcs_large=data.wcs, id_label=obj_id)
stars.append(star)
return stars
| 36.258512
| 78
| 0.586095
|
6a499f76c82b055790f0c809f2a664890889ddf1
| 33,775
|
py
|
Python
|
tracjsgantt/tracjsgantt.py
|
trac-hacks/trac-jsGantt
|
bea7edee90cf0769fba0a4b9b645c88b72f7e5fc
|
[
"BSD-3-Clause"
] | null | null | null |
tracjsgantt/tracjsgantt.py
|
trac-hacks/trac-jsGantt
|
bea7edee90cf0769fba0a4b9b645c88b72f7e5fc
|
[
"BSD-3-Clause"
] | null | null | null |
tracjsgantt/tracjsgantt.py
|
trac-hacks/trac-jsGantt
|
bea7edee90cf0769fba0a4b9b645c88b72f7e5fc
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2014 Chris Nelson <Chris.Nelson@SIXNET.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import re
import time
from datetime import timedelta, datetime
from operator import itemgetter, attrgetter
from trac.util.datefmt import localtz
try:
from trac.util.datefmt import to_utimestamp
except ImportError:
from trac.util.datefmt import to_timestamp as to_utimestamp
from trac.util.text import to_unicode
from trac.util.html import Markup
from trac.wiki.macros import WikiMacroBase
from trac.web.chrome import Chrome
import copy
from trac.ticket.query import Query
from trac.config import IntOption, Option
from trac.core import implements, Component, TracError
from trac.web.api import IRequestFilter
from trac.web.chrome import ITemplateProvider, add_script, add_stylesheet
from pkg_resources import resource_filename
from trac.wiki.api import parse_args
from tracpm import TracPM
try:
from trac.util.text import javascript_quote
except ImportError:
# Fallback for Trac<0.11.3 - verbatim copy from Trac 1.0
_js_quote = {'\\': '\\\\', '"': '\\"', '\b': '\\b', '\f': '\\f',
'\n': '\\n', '\r': '\\r', '\t': '\\t', "'": "\\'"}
for i in range(0x20) + [ord(c) for c in '&<>']:
_js_quote.setdefault(chr(i), '\\u%04x' % i)
_js_quote_re = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t\'&<>]')
def javascript_quote(text):
"""Quote strings for inclusion in javascript"""
if not text:
return ''
def replace(match):
return _js_quote[match.group(0)]
return _js_quote_re.sub(replace, text)
# ========================================================================
class TracJSGanttSupport(Component):
implements(IRequestFilter, ITemplateProvider)
Option('trac-jsgantt', 'option.format', 'day',
"""Initial format of Gantt chart""")
Option('trac-jsgantt', 'option.formats', 'day|week|month|quarter',
"""Formats to show for Gantt chart""")
IntOption('trac-jsgantt', 'option.sample', 0,
"""Show sample Gantt""")
IntOption('trac-jsgantt', 'option.res', 1,
"""Show resource column""")
IntOption('trac-jsgantt', 'option.dur', 1,
"""Show duration column""")
IntOption('trac-jsgantt', 'option.comp', 1,
"""Show percent complete column""")
Option('trac-jsgantt', 'option.caption', 'Resource',
"""Caption to follow task in Gantt""")
IntOption('trac-jsgantt', 'option.startDate', 1,
"""Show start date column""")
IntOption('trac-jsgantt', 'option.endDate', 1,
"""Show finish date column""")
Option('trac-jsgantt', 'option.dateDisplay', 'mm/dd/yyyy',
"""Format to display dates""")
IntOption('trac-jsgantt', 'option.openLevel', 999,
"""How many levels of task hierarchy to show open""")
IntOption('trac-jsgantt', 'option.expandClosedTickets', 1,
"""Show children of closed tasks in the task hierarchy""")
Option('trac-jsgantt', 'option.colorBy', 'priority',
"""Field to use to color tasks""")
IntOption('trac-jsgantt', 'option.lwidth', None,
"""Width (in pixels) of left table""")
IntOption('trac-jsgantt', 'option.showdep', 1,
"""Show dependencies in Gantt""")
IntOption('trac-jsgantt', 'option.userMap', 1,
"""Map user IDs to user names""")
IntOption('trac-jsgantt', 'option.omitMilestones', 0,
"""Omit milestones""")
Option('trac-jsgantt', 'option.schedule', 'alap',
"""Schedule algorithm: alap or asap""")
IntOption('trac-jsgantt', 'option.doResourceLeveling', 0,
"""Resource level (1) or not (0)""")
# This seems to be the first floating point option.
Option('trac-jsgantt', 'option.hoursPerDay', '8.0',
"""Hours worked per day""")
Option('trac-jsgantt', 'option.display', None,
"""Display filter for tickets in the form 'field1:value1|field2:value2' or 'field:value1|value2'; displays tickets where field1==value1, etc.""")
Option('trac-jsgantt', 'option.order', 'wbs',
"""Fields to sort tasks by before display. May include tickets fields (including custom fields) or 'wbs'.""")
Option('trac-jsgantt', 'option.scrollTo', None,
"""Date to scroll chart to (yyyy-mm--dd or 'today')""")
Option('trac-jsGantt', 'option.linkStyle', 'standard',
"""Style for ticket links; jsgantt (new window) or standard browser behavior like ticket links.""")
# ITemplateProvider methods
def get_htdocs_dirs(self):
return [('tracjsgantt', resource_filename(__name__, 'htdocs'))]
def get_templates_dirs(self):
return []
# IRequestFilter methods
def pre_process_request(self, req, handler):
# I think we should look for a TracJSGantt on the page and set
# a flag for the post_process_request handler if found
return handler
def post_process_request(self, req, template, data, content_type):
add_script(req, 'tracjsgantt/jsgantt.js')
add_stylesheet(req, 'tracjsgantt/jsgantt.css')
add_stylesheet(req, 'tracjsgantt/tracjsgantt.css')
return template, data, content_type
class TracJSGanttChart(WikiMacroBase):
"""
Displays a Gantt chart for the specified tickets.
The chart display can be controlled with a number of macro arguments:
||'''Argument'''||'''Description'''||'''Default'''||
|| `formats`||What to display in the format control. A pipe-separated list of `minute`, `hour`, `day`, `week`, `month`, and `quarter` (though `minute` may not be very useful). ||'day|week|month|quarter'||
|| `format`||Initial display format, one of those listed in `formats` || First format ||
|| `sample`||Display sample tasks (1) or not (0) || 0 ||
|| `res`||Show resource column (1) or not (0) || 1 ||
|| `dur`||Show duration colunn (1) or not (0) || 1 ||
|| `comp`||Show percent complete column (1) or not (0) || 1 ||
|| `caption`||Caption to place to right of tasks: None, Caption, Resource, Duration, %Complete || Resource ||
|| `startDate`||Show start date column (1) or not (0) || 1 ||
|| `endDate`||Show end date column (1) or not (0) || 1 ||
|| `dateDisplay`||Date display format: 'mm/dd/yyyy', 'dd/mm/yyyy', or 'yyyy-mm-dd' || 'mm/dd/yyyy' ||
|| `openLevel`||Number of levels of tasks to show. 1 = only top level task. || 999 ||
|| `colorBy`||Field to use to choose task colors. Each unique value of the field will have a different color task. Other likely useful values are owner and milestone but any field can be used. || priority ||
|| `root`||When using something like Subtickets plugin to maintain a tree of tickets and subtickets, you may create a Gantt showing a ticket and all of its descendants with `root=<ticket#>`. The macro uses the configured `parent` field to find all descendant tasks and build an `id=` argument for Trac's native query handler.[[br]][[br]]Multiple roots may be provided like `root=1|12|32`.[[br]][[br]]When used in a ticket description or comment, `root=self` will display the current ticket's descendants.||None||
|| `goal`||When using something like MasterTickets plugin to maintain ticket dependencies, you may create a Gantt showing a ticket and all of its predecessors with `goal=<ticket#>`. The macro uses the configured `succ` field to find all predecessor tasks and build an `id=` argument for Trac's native query handler.[[br]][[br]]Multiple goals may be provided like `goal=1|12|32`.[[br]][[br]]When used in a ticket description or comment, `goal=self` will display the current ticket's predecessors.||None||
|| `lwidth`||The width, in pixels, of the table of task names, etc. on the left of the Gantt. || ||
|| `showdep`||Show dependencies (1) or not (0)||1||
|| `userMap`||Map user !IDs to full names (1) or not (0).||1||
|| `omitMilestones`||Show milestones for displayed tickets (0) or only those specified by `milestone=` (1)||0||
|| `schedule`||Schedule tasks based on dependenies and estimates. Either as soon as possible (asap) or as late as possible (alap)||alap||
||`doResourceLeveling`||Resolve resource conflicts (1) or not (0) when scheduling tickets.||0||
||`display`||Filter for limiting display of tickets. `owner:fred` shows only tickets owned by fred. `status:closed` shows only closed tickets.||None||
||`order`||Order of fields used to sort tickets before display. `order=milestone` sorts by milestone. May include ticket fields, including custom fields, or "wbs" (work breakdown structure).||wbs||
Site-wide defaults for macro arguments may be set in the `trac-jsgantt` section of `trac.ini`. `option.<opt>` overrides the built-in default for `<opt>` from the table above.
All other macro arguments are treated as TracQuery specification (e.g., milestone=ms1|ms2) to control which tickets are displayed.
"""
pm = None
options = {}
# The date part of these formats has to be in sync. Including
# hour and minute in the pyDateFormat makes the plugin easier to
# debug at times because that's how the date shows up in page
# source.
#
# jsDateFormat is the date format that the JavaScript expects
# dates in. It can be one of 'mm/dd/yyyy', 'dd/mm/yyyy', or
# 'yyyy-mm-dd'. pyDateFormat is a strptime() format that matches
# jsDateFormat. As long as they are in sync, there's no real
# reason to change them.
jsDateFormat = 'yyyy-mm-dd'
pyDateFormat = '%Y-%m-%d %H:%M'
# User map (login -> realname) is loaded on demand, once.
# Initialization to None means it is not yet initialized.
user_map = None
def __init__(self):
# Instantiate the PM component
self.pm = TracPM(self.env)
self.GanttID = 'g'
# All the macro's options with default values.
# Anything else passed to the macro is a TracQuery field.
options = ('format', 'formats', 'sample', 'res', 'dur', 'comp',
'caption', 'startDate', 'endDate', 'dateDisplay',
'openLevel', 'expandClosedTickets', 'colorBy', 'lwidth',
'showdep', 'userMap', 'omitMilestones',
'schedule', 'hoursPerDay', 'doResourceLeveling',
'display', 'order', 'scrollTo', 'linkStyle')
for opt in options:
self.options[opt] = self.config.get('trac-jsgantt',
'option.%s' % opt)
def _begin_gantt(self, options):
if options['format']:
defaultFormat = options['format']
else:
defaultFormat = options['formats'].split('|')[0]
showdep = options['showdep']
text = ''
text += '<div style="position:relative" class="gantt" ' + \
'id="GanttChartDIV_'+self.GanttID+'"></div>\n'
text += '<script language="javascript">\n'
text += 'var '+self.GanttID+' = new JSGantt.GanttChart("'+ \
self.GanttID+'",document.getElementById("GanttChartDIV_'+ \
self.GanttID+'"), "%s", "%s");\n' % \
(javascript_quote(defaultFormat), showdep)
text += 'var t;\n'
text += 'if (window.addEventListener){\n'
text += ' window.addEventListener("resize", ' + \
'function() { ' + self.GanttID+'.Draw(); '
if options['showdep']:
text += self.GanttID+'.DrawDependencies();'
text += '}, false);\n'
text += '} else {\n'
text += ' window.attachEvent("onresize", ' + \
'function() { '+self.GanttID+'.Draw(); '
if options['showdep']:
text += self.GanttID+'.DrawDependencies();'
text += '});\n'
text += '}\n'
return text
def _end_gantt(self, options):
chart = ''
chart += self.GanttID+'.Draw();\n'
if options['showdep']:
chart += self.GanttID+'.DrawDependencies();\n'
chart += '</script>\n'
return chart
def _gantt_options(self, options):
opt = ''
if (options['linkStyle']):
linkStyle = options['linkStyle']
else:
linkStyle = 'standard'
opt += self.GanttID+'.setLinkStyle("%s")\n' % linkStyle
opt += self.GanttID+'.setShowRes(%s);\n' % options['res']
opt += self.GanttID+'.setShowDur(%s);\n' % options['dur']
opt += self.GanttID+'.setShowComp(%s);\n' % options['comp']
if (options['scrollTo']):
opt += self.GanttID+'.setScrollDate("%s");\n' % options['scrollTo']
w = options['lwidth']
if w:
opt += self.GanttID+'.setLeftWidth(%s);\n' % w
opt += self.GanttID+'.setCaptionType("%s");\n' % \
javascript_quote(options['caption'])
opt += self.GanttID+'.setShowStartDate(%s);\n' % options['startDate']
opt += self.GanttID+'.setShowEndDate(%s);\n' % options['endDate']
opt += self.GanttID+'.setDateInputFormat("%s");\n' % \
javascript_quote(self.jsDateFormat)
opt += self.GanttID+'.setDateDisplayFormat("%s");\n' % \
javascript_quote(options['dateDisplay'])
opt += self.GanttID+'.setFormatArr(%s);\n' % ','.join(
'"%s"' % javascript_quote(f) for f in options['formats'].split('|'))
opt += self.GanttID+'.setPopupFeatures("location=1,scrollbars=1");\n'
return opt
# TODO - use ticket-classN styles instead of colors?
def _add_sample_tasks(self):
task= ''
tasks = self.GanttID+'.setDateInputFormat("mm/dd/yyyy");\n'
# ID Name Start End Display Link MS Res Pct Gr Par Open Dep Cap
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',1, "Define Chart API", "", "", "#ff0000", "http://help.com", 0, "Brian", 0, 1, 0, 1));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',11, "Chart Object", "2/20/2011", "2/20/2011", "#ff00ff", "http://www.yahoo.com", 1, "Shlomy", 100, 0, 1, 1));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',12, "Task Objects", "", "", "#00ff00", "", 0, "Shlomy", 40, 1, 1, 1));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',121, "Constructor Proc", "2/21/2011", "3/9/2011", "#00ffff", "http://www.yahoo.com", 0, "Brian T.", 60, 0, 12, 1));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',122, "Task Variables", "3/6/2011", "3/11/2011", "#ff0000", "http://help.com", 0, "", 60, 0, 12, 1,121));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',123, "Task Functions", "3/9/2011", "3/29/2011", "#ff0000", "http://help.com", 0, "Anyone", 60, 0, 12, 1, 0, "This is another caption"));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',2, "Create HTML Shell", "3/24/2011", "3/25/2011", "#ffff00", "http://help.com", 0, "Brian", 20, 0, 0, 1,122));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',3, "Code Javascript", "", "", "#ff0000", "http://help.com", 0, "Brian", 0, 1, 0, 1));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',31, "Define Variables", "2/25/2011", "3/17/2011", "#ff00ff", "http://help.com", 0, "Brian", 30, 0, 3, 1, 0,"Caption 1"));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',32, "Calculate Chart Size", "3/15/2011", "3/24/2011", "#00ff00", "http://help.com", 0, "Shlomy", 40, 0, 3, 1));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',33, "Draw Taks Items", "", "", "#00ff00", "http://help.com", 0, "Someone", 40, 1, 3, 1));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',332, "Task Label Table", "3/6/2011", "3/11/2011", "#0000ff", "http://help.com", 0, "Brian", 60, 0, 33, 1));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',333, "Task Scrolling Grid", "3/9/2011", "3/20/2011", "#0000ff", "http://help.com", 0, "Brian", 60, 0, 33, 1));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',34, "Draw Task Bars", "", "", "#990000", "http://help.com", 0, "Anybody", 60, 1, 3, 1));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',341, "Loop each Task", "3/26/2011", "4/11/2011", "#ff0000", "http://help.com", 0, "Brian", 60, 0, 34, 1, "332,333"));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',342, "Calculate Start/Stop", "4/12/2011", "5/18/2011", "#ff6666", "http://help.com", 0, "Brian", 60, 0, 34, 1));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',343, "Draw Task Div", "5/13/2011", "5/17/2011", "#ff0000", "http://help.com", 0, "Brian", 60, 0, 34, 1));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',344, "Draw Completion Div", "5/17/2011", "6/04/2011", "#ff0000", "http://help.com", 0, "Brian", 60, 0, 34, 1));\n'
tasks += self.GanttID+'.AddTaskItem(new JSGantt.TaskItem('+self.GanttID+',35, "Make Updates", "10/17/2011","12/04/2011","#f600f6", "http://help.com", 0, "Brian", 30, 0, 3, 1));\n'
return tasks
# Get the required columns for the tickets which match the
# criteria in options.
def _query_tickets(self, options):
query_options = {}
for key in options.keys():
if not key in self.options:
query_options[key] = options[key]
# The fields always needed by the Gantt
fields = set([
'description',
'owner',
'type',
'status',
'summary',
'milestone',
'priority'])
# Make sure the coloring field is included
if 'colorBy' in options:
fields.add(str(options['colorBy']))
rawtickets = self.pm.query(query_options, fields, self.req)
# Do permissions check on tickets
tickets = [t for t in rawtickets
if 'TICKET_VIEW' in self.req.perm('ticket', t['id'])]
return tickets
def _compare_tickets(self, t1, t2):
# If t2 depends on t1, t2 is first
if t1['id'] in self.pm.successors(t2):
return 1
# If t1 depends on t2, t1 is first
elif t2['id'] in self.pm.successors(t1):
return -1
# If t1 ends first, it's first
elif self.pm.finish(t1) < self.pm.finish(t2):
return -1
# If t2 ends first, it's first
elif self.pm.finish(t1) > self.pm.finish(t2):
return 1
# End dates are same. If t1 starts later, it's later
elif self.pm.start(t1) > self.pm.start(t2):
return 1
# Otherwise, preserve order (assume t1 is before t2 when called)
else:
return 0
# Compute WBS for sorting and figure out the tickets' levels for
# controlling how many levels are open.
#
# WBS is a list like [ 2, 4, 1] (the first child of the fourth
# child of the second top-level element).
def _compute_wbs(self):
# Set the ticket's level and wbs then recurse to children.
def _setLevel(tid, wbs, level):
# Update this node
self.ticketsByID[tid]['level'] = level
self.ticketsByID[tid]['wbs'] = copy.copy(wbs)
# Recurse to children
childIDs = self.pm.children(self.ticketsByID[tid])
if childIDs:
childTickets = [self.ticketsByID[cid] for cid in childIDs]
childTickets.sort(self._compare_tickets)
childIDs = [ct['id'] for ct in childTickets]
# Add another level
wbs.append(1)
for c in childIDs:
wbs = _setLevel(c, wbs, level+1)
# Remove the level we added
wbs.pop()
# Increment last element of wbs
wbs[len(wbs)-1] += 1
return wbs
# Set WBS and level on all top level tickets (and recurse) If
# a ticket's parent is not in the viewed tickets, consider it
# top-level
wbs = [ 1 ]
roots = self.pm.roots(self.ticketsByID)
for t in self.tickets:
if t['id'] in roots:
wbs = _setLevel(t['id'], wbs, 1)
def _task_display(self, t, options):
def _buildMap(field):
self.classMap = {}
i = 0
for t in self.tickets:
if t[field] not in self.classMap:
i = i + 1
self.classMap[t[field]] = i
def _buildEnumMap(field):
self.classMap = {}
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT name," +
db.cast('value', 'int') +
" FROM enum WHERE type=%s", (field,))
for name, value in cursor:
self.classMap[name] = value
display = None
colorBy = options['colorBy']
# Build the map the first time we need it
if self.classMap == None:
# Enums (TODO: what others should I list?)
if options['colorBy'] in ['priority', 'severity']:
_buildEnumMap(colorBy)
else:
_buildMap(colorBy)
# Set display based on class map
if t[colorBy] in self.classMap:
display = 'class=ticket-class%d' % self.classMap[t[colorBy]]
# Add closed status for strike through
if t['status'] == 'closed':
if display == None:
display = 'class=ticket-closed'
else:
display += ' ticket-closed'
if display == None:
display = '#ff7f3f'
return display
# Format a ticket into JavaScript source to display the
# task. ticket is expected to have:
# children - child ticket IDs or None
# description - ticket description.
# id - ticket ID, an integer
# level - levels from root (0)
# link - What to link to
# owner - Used as resource name.
# percent - integer percent complete, 0..100 (or "act/est")
# priority - used to color the task
# calc_finish - end date (ignored if children is not None)
# self.fields[parent] - parent ticket ID
# self.fields[pred] - predecessor ticket IDs
# calc_start - start date (ignored if children is not None)
# status - string displayed in tool tip ; FIXME - not displayed yet
# summary - ticket summary
# type - string displayed in tool tip FIXME - not displayed yet
def _format_ticket(self, ticket, options):
# Translate owner to full name
def _owner(ticket):
if self.pm.isMilestone(ticket):
owner_name = ''
else:
owner_name = ticket['owner']
if options['userMap']:
# Build the map the first time we use it
if self.user_map is None:
self.user_map = {}
for username, name, email in self.env.get_known_users():
self.user_map[username] = name
# Map the user name
if self.user_map.get(owner_name):
owner_name = self.user_map[owner_name]
return owner_name
task = ''
# pID, pName
if self.pm.isMilestone(ticket):
if ticket['id'] > 0:
# Put ID number on inchpebbles
name = 'MS:%s (#%s)' % (ticket['summary'], ticket['id'])
else:
# Don't show bogus ID of milestone pseudo tickets.
name = 'MS:%s' % ticket['summary']
else:
name = "#%d:%s (%s %s)" % \
(ticket['id'], ticket['summary'],
ticket['status'], ticket['type'])
task += 't = new JSGantt.TaskItem(%s,%d,"%s",' % \
(self.GanttID, ticket['id'], javascript_quote(name))
# pStart, pEnd
task += '"%s",' % self.pm.start(ticket).strftime(self.pyDateFormat)
task += '"%s",' % self.pm.finish(ticket).strftime(self.pyDateFormat)
# pDisplay
task += '"%s",' % javascript_quote(self._task_display(ticket, options))
# pLink
task += '"%s",' % javascript_quote(ticket['link'])
# pMile
if self.pm.isMilestone(ticket):
task += '1,'
else:
task += '0,'
# pRes (owner)
task += '"%s",' % javascript_quote(_owner(ticket))
# pComp (percent complete); integer 0..100
task += '"%s",' % self.pm.percentComplete(ticket)
# pGroup (has children)
if self.pm.children(ticket):
task += '%s,' % 1
else:
task += '%s,' % 0
# pParent (parent task ID)
# If there's no parent, don't link to it
if self.pm.parent(ticket) == None:
task += '%s,' % 0
else:
task += '%s,' % self.pm.parent(ticket)
# open
if int(ticket['level']) < int(options['openLevel']) and \
((options['expandClosedTickets'] != 0) or \
(ticket['status'] != 'closed')):
openGroup = 1
else:
openGroup = 0
task += '%d,' % openGroup
# predecessors
pred = [str(s) for s in self.pm.predecessors(ticket)]
if len(pred):
task += '"%s",' % javascript_quote(','.join(pred))
else:
task += '"%s",' % javascript_quote(','.join(''))
# caption
# FIXME - if caption isn't set to caption, use "" because the
# description could be quite long and take a long time to make
# safe and display.
task += '"%s (%s %s)"' % (javascript_quote(ticket['description']),
javascript_quote(ticket['status']),
javascript_quote(ticket['type']))
task += ');\n'
task += self.GanttID+'.AddTaskItem(t);\n'
return task
def _filter_tickets(self, options, tickets):
# Build the list of display filters from the configured value
if not options.get('display') or options['display'] == '':
displayFilter = {}
else:
# The general form is
# 'display=field:value|field:value...'. Split on pipe to
# get each part
displayList = options['display'].split('|')
# Process each part into the display filter
displayFilter = {}
field = None
for f in displayList:
parts = f.split(':')
# Just one part, it's a value for the previous field
if len(parts) == 1:
if field == None:
raise TracError(('display option error in "%s".' +
' Should be "display=f1:v1|f2:v2"' +
' or "display=f:v1|v2".') %
options['display'])
else:
value = parts[0]
else:
field = parts[0]
value = parts[1]
if field in displayFilter:
displayFilter[field].append(value)
else:
displayFilter[field] = [ value ]
# If present and 1, true, otherwise false.
if options.get('omitMilestones') \
and int(options['omitMilestones']) == 1:
omitMilestones = True
else:
omitMilestones = False
# Filter the tickets
filteredTickets = []
for ticket in tickets:
# Default to showing every ticket
fieldDisplay = True
if omitMilestones and \
self.pm.isTracMilestone(ticket):
fieldDisplay = False
else:
# Process each element and disable display if all
# filters fail to match. ((or) disjunction)
for f in displayFilter:
display = True
for v in displayFilter[f]:
if ticket[f] == v:
display = True
break
display = False
fieldDisplay = fieldDisplay & display
if fieldDisplay:
filteredTickets.append(ticket)
return filteredTickets
# Sort tickets by options['order']. For example,
# order=milestone|wbs sorts by wbs within milestone.
#
# http://wiki.python.org/moin/HowTo/Sorting (at
# #Sort_Stability_and_Complex_Sorts) notes that Python list
# sorting is stable so you can sort by increasing priority of keys
# (tertiary, then secondary, then primary) to get a multi-key
# sort.
#
# FIXME - this sorts enums by text, not value.
def _sortTickets(self, tickets, options):
# Force milestones to the end
def msSorter(t1, t2):
# If t1 is a not milestone and t2 is, t1 comes first
if not self.pm.isMilestone(t1) and self.pm.isMilestone(t2):
result = -1
elif self.pm.isMilestone(t1) and not self.pm.isMilestone(t2):
result = 1
else:
result = 0
return result
# Get all the sort fields
sortFields = options['order'].split('|')
# If sorting by milestone, force milestone type tickets to the
# end before any other sort. The stability of the other sorts
# will keep them at the end of the milestone group (unless
# overridden by other fields listed in `order`).
if 'milestone' in sortFields:
tickets.sort(msSorter)
# Reverse sort fields so lowest priority is first
sortFields.reverse()
# Do the sort by each field
for field in sortFields:
tickets.sort(key=itemgetter(field))
return tickets
def _add_tasks(self, options):
if options.get('sample') and int(options['sample']) != 0:
tasks = self._add_sample_tasks()
else:
tasks = ''
self.tickets = self._query_tickets(options)
# Faster lookups for WBS and scheduling.
self.ticketsByID = {}
for t in self.tickets:
self.ticketsByID[t['id']] = t
# Schedule the tasks
self.pm.computeSchedule(options, self.tickets)
# Sort tickets by date for computing WBS
self.tickets.sort(self._compare_tickets)
# Compute the WBS
self._compute_wbs()
# Set the link for clicking through the Gantt chart
for t in self.tickets:
if t['id'] > 0:
t['link'] = self.req.href.ticket(t['id'])
else:
t['link'] = self.req.href.milestone(t['summary'])
# Filter tickets based on options (omitMilestones, display, etc.)
displayTickets = self._filter_tickets(options, self.tickets)
# Sort the remaining tickets for display (based on order option).
displayTickets = self._sortTickets(displayTickets, options)
for ticket in displayTickets:
tasks += self._format_ticket(ticket, options)
return tasks
def _parse_options(self, content):
_, options = parse_args(content, strict=False)
for opt in self.options.keys():
if opt in options:
# FIXME - test for success, log on failure
if isinstance(self.options[opt], (int, long)):
options[opt] = int(options[opt])
else:
options[opt] = self.options[opt]
# FIXME - test for success, log on failure
options['hoursPerDay'] = float(options['hoursPerDay'])
# Make sure we get all the tickets. (For complex Gantts,
# there can be a lot of tickets, easily more than the default
# max.)
if 'max' not in options:
options['max'] = 999
return options
def expand_macro(self, formatter, name, content):
self.req = formatter.req
# Each invocation needs to build its own map.
self.classMap = None
options = self._parse_options(content)
# Surely we can't create two charts in one microsecond.
self.GanttID = 'g_'+str(to_utimestamp(datetime.now(localtz)))
chart = ''
tasks = self._add_tasks(options)
if len(tasks) == 0:
chart += 'No tasks selected.'
else:
chart += self._begin_gantt(options)
chart += self._gantt_options(options)
chart += tasks
chart += self._end_gantt(options)
return chart
| 45.093458
| 513
| 0.564352
|
a0d7dee0382f1011c6ac2dcd68a2950c47317be8
| 1,036
|
py
|
Python
|
src/dataset_reader/sequence_tagging.py
|
bupt-nlp/few-shot-learning-for-slu
|
7ec3c9d245e8ce60d227c5414b7e1010174f5a62
|
[
"Apache-2.0"
] | null | null | null |
src/dataset_reader/sequence_tagging.py
|
bupt-nlp/few-shot-learning-for-slu
|
7ec3c9d245e8ce60d227c5414b7e1010174f5a62
|
[
"Apache-2.0"
] | 1
|
2022-02-18T16:45:39.000Z
|
2022-02-18T16:45:39.000Z
|
src/dataset_reader/sequence_tagging.py
|
bupt-nlp/few-shot-learning-for-slu
|
7ec3c9d245e8ce60d227c5414b7e1010174f5a62
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from typing import List
from collections import defaultdict
import pandas as pd
from src.config import (
Config
)
from src.schema import SequenceTaggingInputExample
from src.dataset_reader.base import DataSetReader
class SnipsDataSetReader(DataSetReader):
def __init__(self, config: Config):
super().__init__(config)
self.label_domain = defaultdict(str)
def _get_domain(self, label: str) -> str:
return self.label_domain[label]
def read(self, file: str) -> List[SequenceTaggingInputExample]:
"""snips dataset read example from file"""
table = pd.read_csv(file)
examples: List[SequenceTaggingInputExample] = []
for row_index, row in table.iterrows():
examples.append(SequenceTaggingInputExample(
example_id=row_index,
raw_text=row['text'],
labels=row['slot'].split()
))
return examples
class ATISDataSetReader(SnipsDataSetReader):
pass
| 26.564103
| 67
| 0.676641
|
3789b7b4fd0342c64a3028db0d9c5a55baaf8636
| 10,106
|
py
|
Python
|
tests/ci/rally_verify.py
|
ewhseo/rally
|
37e5475b7785e987173e118e89dbab357cd64b66
|
[
"Apache-2.0"
] | null | null | null |
tests/ci/rally_verify.py
|
ewhseo/rally
|
37e5475b7785e987173e118e89dbab357cd64b66
|
[
"Apache-2.0"
] | null | null | null |
tests/ci/rally_verify.py
|
ewhseo/rally
|
37e5475b7785e987173e118e89dbab357cd64b66
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import gzip
import json
import logging
import os
import subprocess
import sys
import uuid
import yaml
from rally.cli import envutils
from rally.common import objects
from rally import osclients
from rally.ui import utils
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
MODES_PARAMETERS = {
"full": "--set full",
"light": "--set smoke"
}
BASE_DIR = "rally-verify"
EXPECTED_FAILURES_FILE = "expected_failures.yaml"
EXPECTED_FAILURES = {
"tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON."
"test_get_vnc_console[id-c6bc11bf-592e-4015-9319-1c98dc64daf5]":
"This test fails because 'novnc' console type is unavailable."
}
# NOTE(andreykurilin): this variable is used to generate output file names
# with prefix ${CALL_COUNT}_ .
_call_count = 0
# NOTE(andreykurilin): if some command fails, script should end with
# error status
_return_status = 0
def call_rally(cmd, print_output=False, output_type=None):
global _return_status
global _call_count
_call_count += 1
data = {"cmd": "rally --rally-debug %s" % cmd}
stdout_file = "{base}/{prefix}_{cmd}.txt.gz"
if "--xfails-file" in cmd:
cmd_items = cmd.split()
for num, item in enumerate(cmd_items):
if EXPECTED_FAILURES_FILE in item:
cmd_items[num] = os.path.basename(item)
break
cmd = " ".join(cmd_items)
data.update({"stdout_file": stdout_file.format(base=BASE_DIR,
prefix=_call_count,
cmd=cmd.replace(" ", "_"))})
if output_type:
data["output_file"] = data["stdout_file"].replace(
".txt.", ".%s." % output_type)
data["cmd"] += " --%(type)s --output-file %(file)s" % {
"type": output_type, "file": data["output_file"]}
try:
LOG.info("Try to launch `%s`." % data["cmd"])
stdout = subprocess.check_output(data["cmd"], shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.error("Command `%s` is failed." % data["cmd"])
stdout = e.output
data["status"] = "fail"
_return_status = 1
else:
data["status"] = "pass"
if output_type:
# lets gzip results
with open(data["output_file"]) as f:
output = f.read()
with gzip.open(data["output_file"], "wb") as f:
f.write(output)
stdout = "$ %s\n%s" % (data["cmd"], stdout)
with gzip.open(data["stdout_file"], "wb") as f:
f.write(stdout)
if print_output:
print(stdout)
return data
def create_file_with_xfails():
"""Create a YAML file with a list of tests that are expected to fail."""
with open(os.path.join(BASE_DIR, EXPECTED_FAILURES_FILE), "wb") as f:
yaml.dump(EXPECTED_FAILURES, f, default_flow_style=False)
return os.path.join(os.getcwd(), BASE_DIR, EXPECTED_FAILURES_FILE)
def launch_verification_once(launch_parameters):
"""Launch verification and show results in different formats."""
results = call_rally("verify start %s" % launch_parameters)
results["uuid"] = envutils.get_global(envutils.ENV_VERIFICATION)
results["result_in_html"] = call_rally("verify results",
output_type="html")
results["result_in_json"] = call_rally("verify results",
output_type="json")
results["show"] = call_rally("verify show")
results["show_detailed"] = call_rally("verify show --detailed")
# NOTE(andreykurilin): we need to clean verification uuid from global
# environment to be able to load it next time(for another verification).
envutils.clear_global(envutils.ENV_VERIFICATION)
return results
def do_compare(uuid_1, uuid_2):
"""Compare and save results in different formats."""
results = {}
for output_format in ("csv", "html", "json"):
cmd = "verify compare --uuid-1 %(uuid-1)s --uuid-2 %(uuid-2)s" % {
"uuid-1": uuid_1,
"uuid-2": uuid_2
}
results[output_format] = call_rally(cmd, output_type=output_format)
return results
def render_page(**render_vars):
template = utils.get_template("ci/index_verify.html")
with open(os.path.join(BASE_DIR, "extra/index.html"), "w") as f:
f.write(template.render(**render_vars))
def main():
parser = argparse.ArgumentParser(description="Launch rally-verify job.")
parser.add_argument(
"--mode",
type=str,
default="light",
help="Mode of job. The 'full' mode corresponds to the full set of "
"Tempest tests. The 'light' mode corresponds to the smoke set "
"of Tempest tests.",
choices=MODES_PARAMETERS.keys())
parser.add_argument(
"--compare",
action="store_true",
help="Launch 2 verifications and compare them.")
parser.add_argument(
"--ctx-create-resources",
action="store_true",
help="Make Tempest context create needed resources for the tests.")
args = parser.parse_args()
if not os.path.exists("%s/extra" % BASE_DIR):
os.makedirs("%s/extra" % BASE_DIR)
# Check deployment
call_rally("deployment use --deployment devstack", print_output=True)
call_rally("deployment check", print_output=True)
config = json.loads(
subprocess.check_output(["rally", "deployment", "config"]))
config.update(config.pop("admin"))
del config["type"]
clients = osclients.Clients(objects.Credential(**config))
if args.ctx_create_resources:
# If the 'ctx-create-resources' arg is provided, delete images and
# flavors, and also create a shared network to make Tempest context
# create needed resources.
LOG.info("The 'ctx-create-resources' arg is provided. Deleting "
"images and flavors, and also creating a shared network "
"to make Tempest context create needed resources.")
LOG.info("Deleting images.")
for image in clients.glance().images.list():
clients.glance().images.delete(image.id)
LOG.info("Deleting flavors.")
for flavor in clients.nova().flavors.list():
clients.nova().flavors.delete(flavor.id)
LOG.info("Creating a shared network.")
tenant_name = clients.keystone().tenant_name
tenant_id = clients.keystone().get_project_id(tenant_name)
net_body = {
"network": {
"name": "shared-net-%s" % str(uuid.uuid4()),
"tenant_id": tenant_id,
"shared": True
}
}
clients.neutron().create_network(net_body)
else:
# Otherwise, just in case create only flavors with the following
# properties: RAM = 64MB and 128MB, VCPUs = 1, disk = 0GB to make
# Tempest context discover them.
LOG.info("The 'ctx-create-resources' arg is not provided. "
"Creating flavors to make Tempest context discover them.")
for flv_ram in [64, 128]:
params = {
"name": "flavor-%s" % str(uuid.uuid4()),
"ram": flv_ram,
"vcpus": 1,
"disk": 0
}
LOG.info(
"Creating flavor '%s' with the following properties: RAM "
"= %dMB, VCPUs = 1, disk = 0GB" % (params["name"], flv_ram))
clients.nova().flavors.create(**params)
render_vars = {"verifications": []}
# Install Tempest
render_vars["install"] = call_rally("verify install")
# Discover tests depending on Tempest suite
discover_cmd = "verify discover"
if args.mode == "light":
discover_cmd += " --pattern smoke"
render_vars["discover"] = call_rally(discover_cmd)
# Get Rally deployment ID
rally_deployment_id = subprocess.check_output(
"rally deployment list | awk '/devstack/ {print $2}'",
shell=True, stderr=subprocess.STDOUT)
# Get the penultimate Tempest commit ID
tempest_commit_id = subprocess.check_output(
"cd /home/jenkins/.rally/tempest/for-deployment-%s "
"git log --skip 1 -n 1 | awk '/commit/ {print $2}' | head -1"
% rally_deployment_id, shell=True, stderr=subprocess.STDOUT).strip()
# Reinstall Tempest with providing the --version arg to the command
render_vars["reinstall"] = call_rally(
"verify reinstall --version %s" % tempest_commit_id)
render_vars["genconfig"] = call_rally("verify genconfig")
render_vars["showconfig"] = call_rally("verify showconfig")
# Create a file with a list of tests that are expected to fail
xfails_file_path = create_file_with_xfails()
# Launch verification
launch_params = "%s --xfails-file %s" % (
MODES_PARAMETERS[args.mode], xfails_file_path)
render_vars["verifications"].append(
launch_verification_once(launch_params))
if args.compare:
render_vars["verifications"].append(
launch_verification_once(launch_params))
render_vars["compare"] = do_compare(
render_vars["verifications"][-2]["uuid"],
render_vars["verifications"][-1]["uuid"])
render_vars["list"] = call_rally("verify list")
render_page(**render_vars)
return _return_status
if __name__ == "__main__":
sys.exit(main())
| 35.459649
| 79
| 0.628933
|
3007c947ae0802e153061d3cf043925faa1d813a
| 283
|
py
|
Python
|
6.py
|
ElandGroup/py-first
|
221d87cf1b868d57925913314081349a45fb3859
|
[
"Apache-2.0"
] | null | null | null |
6.py
|
ElandGroup/py-first
|
221d87cf1b868d57925913314081349a45fb3859
|
[
"Apache-2.0"
] | null | null | null |
6.py
|
ElandGroup/py-first
|
221d87cf1b868d57925913314081349a45fb3859
|
[
"Apache-2.0"
] | null | null | null |
fruits = ["apple","orange","pear"]
for fruit in fruits:
print(fruit)
print(fruits[0])
print(fruits[1])
print(fruits[2])
for v in range(10):
print(v)
for v in range(5,10):
print(v)
for v in range(len(fruits)):
print(v)
print(fruits[v])
print(len(fruits))
| 11.791667
| 34
| 0.614841
|
7481c9e298370f034602faff6c045cb1fecf504b
| 744
|
py
|
Python
|
venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/packaging/__about__.py
|
xiegudong45/typeidea
|
db6504a232d120d6ffa185730bd35b9b9ecffa6c
|
[
"Apache-2.0"
] | 9,953
|
2019-04-03T23:41:04.000Z
|
2022-03-31T11:54:44.000Z
|
venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/packaging/__about__.py
|
xiegudong45/typeidea
|
db6504a232d120d6ffa185730bd35b9b9ecffa6c
|
[
"Apache-2.0"
] | 242
|
2019-01-29T15:48:27.000Z
|
2022-03-31T22:09:21.000Z
|
virtual/lib/python3.6/site-packages/pip/_vendor/packaging/__about__.py
|
eyern/instagram_clone
|
c18da15b35d28d91c3f63904af9d5da4e8e3e8ae
|
[
"MIT"
] | 2,803
|
2019-04-06T13:15:33.000Z
|
2022-03-31T07:42:01.000Z
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__",
"__summary__",
"__uri__",
"__version__",
"__author__",
"__email__",
"__license__",
"__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "19.0"
__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"
__license__ = "BSD or Apache License, Version 2.0"
__copyright__ = "Copyright 2014-2019 %s" % __author__
| 26.571429
| 79
| 0.724462
|
3b74b38bd2e449b2d7e28977991ad91426958327
| 2,336
|
py
|
Python
|
src/rubrix/server/tasks/commons/task_factory.py
|
davidkartchner/rubrix
|
33faa006d7498a806a9fd594036d4a42c7d70da2
|
[
"Apache-2.0"
] | 1
|
2022-01-06T09:05:06.000Z
|
2022-01-06T09:05:06.000Z
|
src/rubrix/server/tasks/commons/task_factory.py
|
davidkartchner/rubrix
|
33faa006d7498a806a9fd594036d4a42c7d70da2
|
[
"Apache-2.0"
] | null | null | null |
src/rubrix/server/tasks/commons/task_factory.py
|
davidkartchner/rubrix
|
33faa006d7498a806a9fd594036d4a42c7d70da2
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Dict, List, Optional, Type
from pydantic import BaseModel
from rubrix.server.commons.errors import WrongTaskError
from rubrix.server.datasets.model import DatasetDB
from rubrix.server.tasks.commons import BaseRecord, TaskType
from rubrix.server.tasks.commons.metrics.model.base import BaseTaskMetrics
class TaskConfig(BaseModel):
task: TaskType
query: Any
dataset: Type[DatasetDB]
record: Type[BaseRecord]
metrics: Optional[Type[BaseTaskMetrics]]
es_mappings: Dict[str, Any]
class TaskFactory:
_REGISTERED_TASKS = dict()
@classmethod
def register_task(
cls,
task_type: TaskType,
dataset_class: Type[DatasetDB],
query_request: Type[Any],
es_mappings: Dict[str, Any],
record_class: Type[BaseRecord],
metrics: Optional[Type[BaseTaskMetrics]] = None,
):
cls._REGISTERED_TASKS[task_type] = TaskConfig(
task=task_type,
dataset=dataset_class,
es_mappings=es_mappings,
query=query_request,
record=record_class,
metrics=metrics,
)
@classmethod
def get_all_configs(cls) -> List[TaskConfig]:
return [cfg for cfg in cls._REGISTERED_TASKS.values()]
@classmethod
def get_task_by_task_type(cls, task_type: TaskType) -> Optional[TaskConfig]:
return cls._REGISTERED_TASKS.get(task_type)
@classmethod
def get_task_metrics(cls, task: TaskType) -> Optional[Type[BaseTaskMetrics]]:
config = cls.get_task_by_task_type(task)
if config:
return config.metrics
@classmethod
def get_task_dataset(cls, task: TaskType) -> Type[DatasetDB]:
config = cls.__get_task_config__(task)
return config.dataset
@classmethod
def get_task_record(cls, task: TaskType) -> Type[BaseRecord]:
config = cls.__get_task_config__(task)
return config.record
@classmethod
def get_task_mappings(cls, task: TaskType) -> Dict[str, Any]:
config = cls.__get_task_config__(task)
return config.es_mappings
@classmethod
def __get_task_config__(cls, task):
config = cls.get_task_by_task_type(task)
if not config:
raise WrongTaskError(f"No configuration found for task {task}")
return config
| 29.948718
| 81
| 0.677654
|
a60b5be87f97cef216ce91314a29ab1e0378b810
| 30
|
py
|
Python
|
tests/conftest.py
|
azmeuk/pytest-parallel
|
5610f2fd6f90fdb17b63df2f9a329fe9f2b1a39c
|
[
"MIT"
] | 219
|
2018-05-30T00:34:22.000Z
|
2022-03-28T07:42:44.000Z
|
tests/conftest.py
|
azmeuk/pytest-parallel
|
5610f2fd6f90fdb17b63df2f9a329fe9f2b1a39c
|
[
"MIT"
] | 153
|
2019-05-02T19:06:45.000Z
|
2020-12-15T10:39:03.000Z
|
tests/conftest.py
|
azmeuk/pytest-parallel
|
5610f2fd6f90fdb17b63df2f9a329fe9f2b1a39c
|
[
"MIT"
] | 59
|
2018-08-27T07:12:18.000Z
|
2022-03-18T03:21:39.000Z
|
pytest_plugins = ['pytester']
| 15
| 29
| 0.733333
|
8809b5dd3d564bc524209316bee5382f5565a9fd
| 842
|
py
|
Python
|
examples/quickstart.py
|
killionadmin/ILOscripts
|
951f53df9bf58bc7b186a501e2d123645f0e55a3
|
[
"Apache-2.0"
] | 27
|
2015-04-07T13:44:20.000Z
|
2016-03-26T01:23:58.000Z
|
examples/quickstart.py
|
killionadmin/ILOscripts
|
951f53df9bf58bc7b186a501e2d123645f0e55a3
|
[
"Apache-2.0"
] | 5
|
2017-05-11T23:36:34.000Z
|
2018-05-27T09:11:17.000Z
|
examples/quickstart.py
|
killionadmin/ILOscripts
|
951f53df9bf58bc7b186a501e2d123645f0e55a3
|
[
"Apache-2.0"
] | 13
|
2015-03-25T19:03:36.000Z
|
2016-03-11T13:21:18.000Z
|
import sys
import redfish
# When running on the server locally use the following commented values
# iLO_host = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO address, iLO account name,
# and password to send https requests
iLO_host = "https://10.0.0.100"
login_account = "admin"
login_password = "password"
# Create a REST object
REST_OBJ = redfish.rest_client(base_url=iLO_host,username=login_account, \
password=login_password, default_prefix='/rest/v1')
# Login into the server and create a session
REST_OBJ.login(auth="session")
# Do a GET on a given path
response = REST_OBJ.get("/rest/v1/systems/1", None)
# Print out the response
sys.stdout.write("%s\n" % response)
# Logout of the current session
REST_OBJ.logout()
| 29.034483
| 75
| 0.706651
|
023b36221ba83d14318b8cb009d6d2dd5ddbc11f
| 4,134
|
py
|
Python
|
facebook-echobot/lib/python2.7/site-packages/txaio/_common.py
|
mauriciofh/FacebookBot
|
c629922e1d90cf970c52c9649530a1f77f8f83d2
|
[
"MIT"
] | null | null | null |
facebook-echobot/lib/python2.7/site-packages/txaio/_common.py
|
mauriciofh/FacebookBot
|
c629922e1d90cf970c52c9649530a1f77f8f83d2
|
[
"MIT"
] | null | null | null |
facebook-echobot/lib/python2.7/site-packages/txaio/_common.py
|
mauriciofh/FacebookBot
|
c629922e1d90cf970c52c9649530a1f77f8f83d2
|
[
"MIT"
] | 1
|
2018-11-07T12:52:07.000Z
|
2018-11-07T12:52:07.000Z
|
import math
from txaio.interfaces import IBatchedTimer
class _BatchedCall(object):
"""
Wraps IDelayedCall-implementing objects, implementing only the API
which txaio promised in the first place: .cancel
Do not create these yourself; use _BatchedTimer.call_later()
"""
def __init__(self, timer, index, the_call):
# XXX WeakRef?
self._timer = timer
self._index = index
self._call = the_call
def cancel(self):
self._timer._remove_call(self._index, self)
self._timer = None
def __call__(self):
return self._call()
class _BatchedTimer(IBatchedTimer):
"""
Internal helper.
Instances of this are returned from
:meth:`txaio.make_batched_timer` and that is the only way they
should be instantiated. You may depend on methods from the
interface class only (:class:`txaio.IBatchedTimer`)
**NOTE** that the times are in milliseconds in this class!
"""
def __init__(self, bucket_milliseconds, chunk_size,
seconds_provider, delayed_call_creator):
self._bucket_milliseconds = float(bucket_milliseconds)
self._chunk_size = chunk_size
self._get_seconds = seconds_provider
self._create_delayed_call = delayed_call_creator
self._buckets = dict() # real seconds -> (IDelayedCall, list)
def call_later(self, delay, func, *args, **kwargs):
"""
IBatchedTimer API
"""
# "quantize" the delay to the nearest bucket
real_time = int(self._get_seconds() + delay) * 1000
real_time -= int(real_time % self._bucket_milliseconds)
call = _BatchedCall(self, real_time, lambda: func(*args, **kwargs))
try:
self._buckets[real_time][1].append(call)
except KeyError:
# new bucket; need to add "actual" underlying IDelayedCall
delayed_call = self._create_delayed_call(
(real_time / 1000.0) - self._get_seconds(),
self._notify_bucket, real_time,
)
self._buckets[real_time] = (delayed_call, [call])
return call
def _notify_bucket(self, real_time):
"""
Internal helper. This 'does' the callbacks in a particular bucket.
:param real_time: the bucket to do callbacks on
"""
(delayed_call, calls) = self._buckets[real_time]
del self._buckets[real_time]
errors = []
def notify_one_chunk(calls, chunk_size, chunk_delay_ms):
for call in calls[:chunk_size]:
try:
call()
except Exception as e:
errors.append(e)
calls = calls[chunk_size:]
if calls:
self._create_delayed_call(
chunk_delay_ms / 1000.0,
notify_one_chunk, calls, chunk_size, chunk_delay_ms,
)
else:
# done all calls; make sure there were no errors
if len(errors):
msg = u"Error(s) processing call_later bucket:\n"
for e in errors:
msg += u"{}\n".format(e)
raise RuntimeError(msg)
# ceil()ing because we want the number of chunks, and a
# partial chunk is still a chunk
delay_ms = self._bucket_milliseconds / math.ceil(float(len(calls)) / self._chunk_size)
notify_one_chunk(calls, self._chunk_size, delay_ms)
def _remove_call(self, real_time, call):
"""
Internal helper. Removes a (possibly still pending) call from a
bucket. It is *not* an error of the bucket is gone (e.g. the
call has already happened).
"""
try:
(delayed_call, calls) = self._buckets[real_time]
except KeyError:
# no such bucket ... error? swallow?
return
# remove call; if we're empty, cancel underlying
# bucket-timeout IDelayedCall
calls.remove(call)
if not calls:
del self._buckets[real_time]
delayed_call.cancel()
| 35.033898
| 94
| 0.597242
|
8666fdeef06c230df84c39fd441f5024eea1e8ed
| 28,391
|
py
|
Python
|
tests/unit/schema/test_schemas.py
|
niteoweb/openapi-core
|
ed39ee8fcf6a9e5c43411f891ec028a5f84bc0d8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/schema/test_schemas.py
|
niteoweb/openapi-core
|
ed39ee8fcf6a9e5c43411f891ec028a5f84bc0d8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/schema/test_schemas.py
|
niteoweb/openapi-core
|
ed39ee8fcf6a9e5c43411f891ec028a5f84bc0d8
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import uuid
import mock
import pytest
from openapi_core.extensions.models.models import Model
from openapi_core.schema.schemas.enums import SchemaFormat, SchemaType
from openapi_core.schema.schemas.exceptions import (
InvalidSchemaValue, MultipleOneOfSchema, NoOneOfSchema, OpenAPISchemaError,
UndefinedSchemaProperty
)
from openapi_core.schema.schemas.models import Schema
from six import b, u
class TestSchemaIteritems(object):
@pytest.fixture
def schema(self):
properties = {
'application/json': mock.sentinel.application_json,
'text/csv': mock.sentinel.text_csv,
}
return Schema('object', properties=properties)
@property
def test_valid(self, schema):
for name in schema.properties.keys():
assert schema[name] == schema.properties[name]
class TestSchemaUnmarshal(object):
def test_deprecated(self):
schema = Schema('string', deprecated=True)
value = 'test'
with pytest.warns(DeprecationWarning):
result = schema.unmarshal(value)
assert result == value
def test_string_valid(self):
schema = Schema('string')
value = 'test'
result = schema.unmarshal(value)
assert result == value
def test_string_format_uuid_valid(self):
schema = Schema(SchemaType.STRING, schema_format=SchemaFormat.UUID)
value = str(uuid.uuid4())
result = schema.unmarshal(value)
assert result == uuid.UUID(value)
def test_string_format_uuid_uuid_quirks_valid(self):
schema = Schema(SchemaType.STRING, schema_format=SchemaFormat.UUID)
value = uuid.uuid4()
result = schema.unmarshal(value, strict=False)
assert result == value
def test_string_float_invalid(self):
schema = Schema('string')
value = 1.23
with pytest.raises(InvalidSchemaValue):
schema.unmarshal(value)
def test_string_none(self):
schema = Schema('string')
value = None
with pytest.raises(InvalidSchemaValue):
schema.unmarshal(value)
def test_string_default(self):
default_value = 'default'
schema = Schema('string', default=default_value)
value = None
with pytest.raises(InvalidSchemaValue):
schema.unmarshal(value)
def test_string_default_nullable(self):
default_value = 'default'
schema = Schema('string', default=default_value, nullable=True)
value = None
result = schema.unmarshal(value)
assert result == default_value
def test_string_format_date(self):
schema = Schema('string', schema_format='date')
value = '2018-01-02'
result = schema.unmarshal(value)
assert result == datetime.date(2018, 1, 2)
def test_string_format_datetime(self):
schema = Schema('string', schema_format='date-time')
value = '2018-01-02T00:00:00Z'
result = schema.unmarshal(value)
assert result == datetime.datetime(2018, 1, 2, 0, 0)
@pytest.mark.xfail(reason="No custom formats support atm")
def test_string_format_custom(self):
custom_format = 'custom'
schema = Schema('string', schema_format=custom_format)
value = 'x'
with mock.patch.dict(
Schema.STRING_FORMAT_CAST_CALLABLE_GETTER,
{custom_format: lambda x: x + '-custom'},
):
result = schema.unmarshal(value)
assert result == 'x-custom'
def test_string_format_unknown(self):
unknown_format = 'unknown'
schema = Schema('string', schema_format=unknown_format)
value = 'x'
with pytest.raises(OpenAPISchemaError):
schema.unmarshal(value)
@pytest.mark.xfail(reason="No custom formats support atm")
def test_string_format_invalid_value(self):
custom_format = 'custom'
schema = Schema('string', schema_format=custom_format)
value = 'x'
with mock.patch.dict(
Schema.STRING_FORMAT_CALLABLE_GETTER,
{custom_format: mock.Mock(side_effect=ValueError())},
), pytest.raises(
InvalidSchemaValue, message='Failed to format value'
):
schema.unmarshal(value)
def test_integer_valid(self):
schema = Schema('integer')
value = 123
result = schema.unmarshal(value)
assert result == int(value)
def test_integer_string_invalid(self):
schema = Schema('integer')
value = '123'
with pytest.raises(InvalidSchemaValue):
schema.unmarshal(value)
def test_integer_enum_invalid(self):
schema = Schema('integer', enum=[1, 2, 3])
value = '123'
with pytest.raises(InvalidSchemaValue):
schema.unmarshal(value)
def test_integer_enum(self):
schema = Schema('integer', enum=[1, 2, 3])
value = 2
result = schema.unmarshal(value)
assert result == int(value)
def test_integer_enum_string_invalid(self):
schema = Schema('integer', enum=[1, 2, 3])
value = '2'
with pytest.raises(InvalidSchemaValue):
schema.unmarshal(value)
def test_integer_default(self):
default_value = '123'
schema = Schema('integer', default=default_value)
value = None
with pytest.raises(InvalidSchemaValue):
schema.unmarshal(value)
def test_integer_default_nullable(self):
default_value = '123'
schema = Schema('integer', default=default_value, nullable=True)
value = None
result = schema.unmarshal(value)
assert result == default_value
def test_integer_invalid(self):
schema = Schema('integer')
value = 'abc'
with pytest.raises(InvalidSchemaValue):
schema.unmarshal(value)
def test_array_valid(self):
schema = Schema('array', items=Schema('integer'))
value = [1, 2, 3]
result = schema.unmarshal(value)
assert result == value
def test_array_of_string_string_invalid(self):
schema = Schema('array', items=Schema('string'))
value = '123'
with pytest.raises(InvalidSchemaValue):
schema.unmarshal(value)
def test_array_of_integer_string_invalid(self):
schema = Schema('array', items=Schema('integer'))
value = '123'
with pytest.raises(InvalidSchemaValue):
schema.unmarshal(value)
def test_boolean_valid(self):
schema = Schema('boolean')
value = True
result = schema.unmarshal(value)
assert result == value
def test_boolean_string_invalid(self):
schema = Schema('boolean')
value = 'True'
with pytest.raises(InvalidSchemaValue):
schema.unmarshal(value)
def test_number_valid(self):
schema = Schema('number')
value = 1.23
result = schema.unmarshal(value)
assert result == value
def test_number_string_invalid(self):
schema = Schema('number')
value = '1.23'
with pytest.raises(InvalidSchemaValue):
schema.unmarshal(value)
def test_number_int(self):
schema = Schema('number')
value = 1
result = schema.unmarshal(value)
assert result == value
class TestSchemaValidate(object):
@pytest.mark.parametrize('schema_type', [
'boolean', 'array', 'integer', 'number', 'string',
])
def test_null(self, schema_type):
schema = Schema(schema_type)
value = None
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('schema_type', [
'boolean', 'array', 'integer', 'number', 'string',
])
def test_nullable(self, schema_type):
schema = Schema(schema_type, nullable=True)
value = None
result = schema.validate(value)
assert result is None
@pytest.mark.parametrize('value', [False, True])
def test_boolean(self, value):
schema = Schema('boolean')
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [1, 3.14, u('true'), [True, False]])
def test_boolean_invalid(self, value):
schema = Schema('boolean')
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [[1, 2], (3, 4)])
def test_array_no_schema(self, value):
schema = Schema('array')
with pytest.raises(OpenAPISchemaError):
schema.validate(value)
@pytest.mark.parametrize('value', [[1, 2], (3, 4)])
def test_array(self, value):
schema = Schema('array', items=Schema('integer'))
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [False, 1, 3.14, u('true')])
def test_array_invalid(self, value):
schema = Schema('array')
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [1, 3])
def test_integer(self, value):
schema = Schema('integer')
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [False, 3.14, u('true'), [1, 2]])
def test_integer_invalid(self, value):
schema = Schema('integer')
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [0, 1, 2])
def test_integer_minimum_invalid(self, value):
schema = Schema('integer', minimum=3)
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [4, 5, 6])
def test_integer_minimum(self, value):
schema = Schema('integer', minimum=3)
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [4, 5, 6])
def test_integer_maximum_invalid(self, value):
schema = Schema('integer', maximum=3)
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [0, 1, 2])
def test_integer_maximum(self, value):
schema = Schema('integer', maximum=3)
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [1, 2, 4])
def test_integer_multiple_of_invalid(self, value):
schema = Schema('integer', multiple_of=3)
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [3, 6, 18])
def test_integer_multiple_of(self, value):
schema = Schema('integer', multiple_of=3)
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [1, 3.14])
def test_number(self, value):
schema = Schema('number')
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [False, 'true', [1, 3]])
def test_number_invalid(self, value):
schema = Schema('number')
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [0, 1, 2])
def test_number_minimum_invalid(self, value):
schema = Schema('number', minimum=3)
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [3, 4, 5])
def test_number_minimum(self, value):
schema = Schema('number', minimum=3)
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [1, 2, 3])
def test_number_exclusive_minimum_invalid(self, value):
schema = Schema('number', minimum=3, exclusive_minimum=3)
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [4, 5, 6])
def test_number_exclusive_minimum(self, value):
schema = Schema('number', minimum=3)
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [4, 5, 6])
def test_number_maximum_invalid(self, value):
schema = Schema('number', maximum=3)
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [1, 2, 3])
def test_number_maximum(self, value):
schema = Schema('number', maximum=3)
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [3, 4, 5])
def test_number_exclusive_maximum_invalid(self, value):
schema = Schema('number', maximum=3, exclusive_maximum=True)
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [0, 1, 2])
def test_number_exclusive_maximum(self, value):
schema = Schema('number', maximum=3, exclusive_maximum=True)
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [1, 2, 4])
def test_number_multiple_of_invalid(self, value):
schema = Schema('number', multiple_of=3)
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [3, 6, 18])
def test_number_multiple_of(self, value):
schema = Schema('number', multiple_of=3)
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [u('true'), ])
def test_string(self, value):
schema = Schema('string')
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [b('test'), False, 1, 3.14, [1, 3]])
def test_string_invalid(self, value):
schema = Schema('string')
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [
b('true'), u('test'), False, 1, 3.14, [1, 3],
datetime.datetime(1989, 1, 2),
])
def test_string_format_date_invalid(self, value):
schema = Schema('string', schema_format='date')
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [
datetime.date(1989, 1, 2), datetime.date(2018, 1, 2),
])
def test_string_format_date(self, value):
schema = Schema('string', schema_format='date')
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [
uuid.UUID('{12345678-1234-5678-1234-567812345678}'),
])
def test_string_format_uuid(self, value):
schema = Schema('string', schema_format='uuid')
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [
b('true'), u('true'), False, 1, 3.14, [1, 3],
datetime.date(2018, 1, 2), datetime.datetime(2018, 1, 2, 23, 59, 59),
])
def test_string_format_uuid_invalid(self, value):
schema = Schema('string', schema_format='uuid')
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [
b('true'), u('true'), False, 1, 3.14, [1, 3],
datetime.date(1989, 1, 2),
])
def test_string_format_datetime_invalid(self, value):
schema = Schema('string', schema_format='date-time')
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [
datetime.datetime(1989, 1, 2, 0, 0, 0),
datetime.datetime(2018, 1, 2, 23, 59, 59),
])
def test_string_format_datetime(self, value):
schema = Schema('string', schema_format='date-time')
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [
u('true'), False, 1, 3.14, [1, 3], datetime.date(1989, 1, 2),
datetime.datetime(1989, 1, 2, 0, 0, 0),
])
def test_string_format_binary_invalid(self, value):
schema = Schema('string', schema_format='binary')
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [
b('stream'), b('text'),
])
def test_string_format_binary(self, value):
schema = Schema('string', schema_format='binary')
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [
b('tsssst'), b('dGVzdA=='),
])
def test_string_format_byte_invalid(self, value):
schema = Schema('string', schema_format='byte')
with pytest.raises(OpenAPISchemaError):
schema.validate(value)
@pytest.mark.parametrize('value', [
u('tsssst'), u('dGVzdA=='),
])
def test_string_format_byte(self, value):
schema = Schema('string', schema_format='byte')
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [
u('test'), b('stream'), datetime.date(1989, 1, 2),
datetime.datetime(1989, 1, 2, 0, 0, 0),
])
def test_string_format_unknown(self, value):
unknown_format = 'unknown'
schema = Schema('string', schema_format=unknown_format)
with pytest.raises(OpenAPISchemaError):
schema.validate(value)
@pytest.mark.parametrize('value', [u(""), ])
def test_string_min_length_invalid_schema(self, value):
schema = Schema('string', min_length=-1)
with pytest.raises(OpenAPISchemaError):
schema.validate(value)
@pytest.mark.parametrize('value', [u(""), u("a"), u("ab")])
def test_string_min_length_invalid(self, value):
schema = Schema('string', min_length=3)
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [u("abc"), u("abcd")])
def test_string_min_length(self, value):
schema = Schema('string', min_length=3)
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [u(""), ])
def test_string_max_length_invalid_schema(self, value):
schema = Schema('string', max_length=-1)
with pytest.raises(OpenAPISchemaError):
schema.validate(value)
@pytest.mark.parametrize('value', [u("ab"), u("abc")])
def test_string_max_length_invalid(self, value):
schema = Schema('string', max_length=1)
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [u(""), u("a")])
def test_string_max_length(self, value):
schema = Schema('string', max_length=1)
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [u("foo"), u("bar")])
def test_string_pattern_invalid(self, value):
schema = Schema('string', pattern='baz')
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [u("bar"), u("foobar")])
def test_string_pattern(self, value):
schema = Schema('string', pattern='bar')
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', ['true', False, 1, 3.14, [1, 3]])
def test_object_not_an_object(self, value):
schema = Schema('object')
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [Model(), ])
def test_object_multiple_one_of(self, value):
one_of = [
Schema('object'), Schema('object'),
]
schema = Schema('object', one_of=one_of)
with pytest.raises(MultipleOneOfSchema):
schema.validate(value)
@pytest.mark.parametrize('value', [Model(), ])
def test_object_defferent_type_one_of(self, value):
one_of = [
Schema('integer'), Schema('string'),
]
schema = Schema('object', one_of=one_of)
with pytest.raises(MultipleOneOfSchema):
schema.validate(value)
@pytest.mark.parametrize('value', [Model(), ])
def test_object_no_one_of(self, value):
one_of = [
Schema(
'object',
properties={'test1': Schema('string')},
required=['test1', ],
),
Schema(
'object',
properties={'test2': Schema('string')},
required=['test2', ],
),
]
schema = Schema('object', one_of=one_of)
with pytest.raises(NoOneOfSchema):
schema.validate(value)
@pytest.mark.parametrize('value', [
Model({
'foo': u("FOO"),
}),
Model({
'foo': u("FOO"),
'bar': u("BAR"),
}),
])
def test_unambiguous_one_of(self, value):
one_of = [
Schema(
'object',
properties={
'foo': Schema('string'),
},
additional_properties=False,
required=['foo'],
),
Schema(
'object',
properties={
'foo': Schema('string'),
'bar': Schema('string'),
},
additional_properties=False,
required=['foo', 'bar'],
),
]
schema = Schema('object', one_of=one_of)
schema.validate(value)
@pytest.mark.parametrize('value', [Model(), ])
def test_object_default_property(self, value):
schema = Schema('object', default='value1')
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [Model(), ])
def test_object_min_properties_invalid_schema(self, value):
schema = Schema('object', min_properties=-1)
with pytest.raises(OpenAPISchemaError):
schema.validate(value)
@pytest.mark.parametrize('value', [
Model({'a': 1}),
Model({'a': 1, 'b': 2}),
Model({'a': 1, 'b': 2, 'c': 3})])
def test_object_min_properties_invalid(self, value):
schema = Schema(
'object',
properties={k: Schema('number')
for k in ['a', 'b', 'c']},
min_properties=4,
)
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [
Model({'a': 1}),
Model({'a': 1, 'b': 2}),
Model({'a': 1, 'b': 2, 'c': 3})])
def test_object_min_properties(self, value):
schema = Schema(
'object',
properties={k: Schema('number')
for k in ['a', 'b', 'c']},
min_properties=1,
)
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [Model(), ])
def test_object_max_properties_invalid_schema(self, value):
schema = Schema('object', max_properties=-1)
with pytest.raises(OpenAPISchemaError):
schema.validate(value)
@pytest.mark.parametrize('value', [
Model({'a': 1}),
Model({'a': 1, 'b': 2}),
Model({'a': 1, 'b': 2, 'c': 3})])
def test_object_max_properties_invalid(self, value):
schema = Schema(
'object',
properties={k: Schema('number')
for k in ['a', 'b', 'c']},
max_properties=0,
)
with pytest.raises(InvalidSchemaValue):
schema.validate(value)
@pytest.mark.parametrize('value', [
Model({'a': 1}),
Model({'a': 1, 'b': 2}),
Model({'a': 1, 'b': 2, 'c': 3})])
def test_object_max_properties(self, value):
schema = Schema(
'object',
properties={k: Schema('number')
for k in ['a', 'b', 'c']},
max_properties=3,
)
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [Model({'additional': 1}), ])
def test_object_additional_propetries(self, value):
schema = Schema('object')
schema.validate(value)
@pytest.mark.parametrize('value', [Model({'additional': 1}), ])
def test_object_additional_propetries_false(self, value):
schema = Schema('object', additional_properties=False)
with pytest.raises(UndefinedSchemaProperty):
schema.validate(value)
@pytest.mark.parametrize('value', [Model({'additional': 1}), ])
def test_object_additional_propetries_object(self, value):
additional_properties = Schema('integer')
schema = Schema('object', additional_properties=additional_properties)
schema.validate(value)
@pytest.mark.parametrize('value', [[], ])
def test_list_min_items_invalid_schema(self, value):
schema = Schema(
'array',
items=Schema('number'),
min_items=-1,
)
with pytest.raises(OpenAPISchemaError):
schema.validate(value)
@pytest.mark.parametrize('value', [[], [1], [1, 2]])
def test_list_min_items_invalid(self, value):
schema = Schema(
'array',
items=Schema('number'),
min_items=3,
)
with pytest.raises(Exception):
schema.validate(value)
@pytest.mark.parametrize('value', [[], [1], [1, 2]])
def test_list_min_items(self, value):
schema = Schema(
'array',
items=Schema('number'),
min_items=0,
)
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [[], ])
def test_list_max_items_invalid_schema(self, value):
schema = Schema(
'array',
items=Schema('number'),
max_items=-1,
)
with pytest.raises(OpenAPISchemaError):
schema.validate(value)
@pytest.mark.parametrize('value', [[1, 2], [2, 3, 4]])
def test_list_max_items_invalid(self, value):
schema = Schema(
'array',
items=Schema('number'),
max_items=1,
)
with pytest.raises(Exception):
schema.validate(value)
@pytest.mark.parametrize('value', [[1, 2, 1], [2, 2]])
def test_list_unique_items_invalid(self, value):
schema = Schema(
'array',
items=Schema('number'),
unique_items=True,
)
with pytest.raises(Exception):
schema.validate(value)
@pytest.mark.parametrize('value', [
Model({
'someint': 123,
}),
Model({
'somestr': u('content'),
}),
Model({
'somestr': u('content'),
'someint': 123,
}),
])
def test_object_with_properties(self, value):
schema = Schema(
'object',
properties={
'somestr': Schema('string'),
'someint': Schema('integer'),
},
)
result = schema.validate(value)
assert result == value
@pytest.mark.parametrize('value', [
Model({
'somestr': Model(),
'someint': 123,
}),
Model({
'somestr': {},
'someint': 123,
}),
Model({
'somestr': [
'content1', 'content2'
],
'someint': 123,
}),
Model({
'somestr': 123,
'someint': 123,
}),
Model({
'somestr': 'content',
'someint': 123,
'not_in_scheme_prop': 123,
}),
])
def test_object_with_invalid_properties(self, value):
schema = Schema(
'object',
properties={
'somestr': Schema('string'),
'someint': Schema('integer'),
},
additional_properties=False,
)
with pytest.raises(Exception):
schema.validate(value)
| 28.970408
| 79
| 0.586489
|
3fbc3e36fee79c7be10893b225fd26ff84bdc509
| 3,619
|
py
|
Python
|
examples/example.py
|
kyeongsoo/runlmc
|
abd369d2b72a9146cf4a21d5cc144e272c637e29
|
[
"BSD-3-Clause"
] | null | null | null |
examples/example.py
|
kyeongsoo/runlmc
|
abd369d2b72a9146cf4a21d5cc144e272c637e29
|
[
"BSD-3-Clause"
] | null | null | null |
examples/example.py
|
kyeongsoo/runlmc
|
abd369d2b72a9146cf4a21d5cc144e272c637e29
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# get_ipython().run_line_magic('env', 'OMP_NUM_THREADS=1')
import os, sys
if os.path.basename(os.getcwd()) != 'runlmc':
os.chdir('..')
sys.path.append('benchmarks/benchlib')
from runlmc.models.interpolated_llgp import InterpolatedLLGP
from runlmc.lmc.functional_kernel import FunctionalKernel
from runlmc.kern.rbf import RBF
from runlmc.models.optimization import AdaDelta
import numpy as np
# get_ipython().run_line_magic('matplotlib', 'inline')
# import matplotlib.pyplot as pltv
import matplotlib.pyplot as plt
np.random.seed(1234)
# In[2]:
n_per_output = [65, 100]
xss = list(map(np.random.rand, n_per_output))
nout = len(n_per_output)
yss = [np.sin(2 * np.pi * xs + i * 2 * np.pi / nout)
+ np.random.randn(len(xs)) * (i + 1) * 0.1 / nout
for i, xs in enumerate(xss)]
ks = [RBF(name='rbf{}'.format(i)) for i in range(nout)]
ranks = [1, 1]
fk = FunctionalKernel(D=len(xss), lmc_kernels=ks, lmc_ranks=ranks)
# In[3]:
def print_diagram(lmc):
plot_xs = np.arange(0, 1, 0.01)
ys, var = lmc.predict([plot_xs for _ in range(nout)])
for i, (y, v, xs, ys) in enumerate(zip(ys, var, xss, yss)):
sd = np.sqrt(v)
order = xs.argsort()
plt.scatter(xs[order], ys[order])
plt.title('output {} (95%)'.format(i))
plt.plot(plot_xs, y)
plt.plot(plot_xs, y + 2 * sd, ls='--', c='g')
plt.plot(plot_xs, y - 2 * sd, ls='--', c='g')
plt.show()
# In[4]:
# Unoptimized
lmc = InterpolatedLLGP(xss, yss, functional_kernel=fk)
print_diagram(lmc)
# In[5]:
lmc.optimize(optimizer=AdaDelta(verbosity=10))
#optimized
print(lmc)
print(lmc.kern.noise)
print_diagram(lmc)
# In[6]:
import GPy
rbfs = [GPy.kern.RBF(1) for _ in range(nout)]
# not exactly the same since mine is rank-1 only for now
# This is why we need as many kernels as outputs, because we'd be rank-deficient o/w
k = GPy.util.multioutput.LCM(input_dim=1,num_outputs=nout,kernels_list=rbfs)
xss_reshaped = [xs.reshape(-1, 1) for xs in xss]
yss_reshaped = [ys.reshape(-1, 1) for ys in yss]
m = GPy.models.GPCoregionalizedRegression(
xss_reshaped, yss_reshaped, kernel=k)
m.optimize()
print(m)
# In[7]:
# Plotting code adapted from GPy coregionalization tutorial
# Also 95% confidence
data_rows = np.add.accumulate(n_per_output)
data_rows = np.insert(data_rows, 0, 0)
for i in range(nout):
m.plot(
plot_limits=(0, 1),
fixed_inputs=[(1,i)],
which_data_rows=slice(data_rows[i],data_rows[i + 1]))
# In[8]:
# Adding a prior
from runlmc.parameterization.priors import InverseGamma, Gaussian, HalfLaplace
ig = InverseGamma(0.5, 0.5)
lmc.kern.rbf0.inv_lengthscale.set_prior(ig)
lmc.kern.rbf1.inv_lengthscale.set_prior(ig)
n = Gaussian(0, 1)
lmc.kern.a0.set_prior(n)
lmc.kern.a1.set_prior(n)
h = HalfLaplace(1)
lmc.kern.kappa0.set_prior(h)
lmc.kern.kappa1.set_prior(h)
lmc.optimize()
print(lmc)
print(lmc.kern.kappa0)
# In[9]:
# Multilevel prior
from runlmc.parameterization.param import Param
# A param is anything that is modifiable during the optimization
# We add a param shape such that
# shape ~ IG(0.5, 0.5)
# rbf*.inv_lengthscale ~ IG(0.5, shape)
ig = InverseGamma(0.5, 0.5)
initial_value = 1
shape = Param('shape', initial_value)
lmc.link_parameter(shape) # wire the parameter into the model (otherwise it won't get updated)
shape.set_prior(ig)
ig2 = InverseGamma(0.5, shape)
for il in [lmc.kern.rbf0.inv_lengthscale, lmc.kern.rbf1.inv_lengthscale]:
il.set_prior(ig2)
lmc.optimize(optimizer=AdaDelta())
print(lmc)
print(lmc.kern.kappa0)
# In[ ]:
| 21.801205
| 94
| 0.68693
|
08167550b2f6308d4bbc9386c64109468abc53a6
| 26,791
|
py
|
Python
|
electra_pyt/configuration_utils.py
|
puririshi98/benchmark
|
79f554f1e1cf36f62994c78e0e6e5b360f554022
|
[
"BSD-3-Clause"
] | null | null | null |
electra_pyt/configuration_utils.py
|
puririshi98/benchmark
|
79f554f1e1cf36f62994c78e0e6e5b360f554022
|
[
"BSD-3-Clause"
] | null | null | null |
electra_pyt/configuration_utils.py
|
puririshi98/benchmark
|
79f554f1e1cf36f62994c78e0e6e5b360f554022
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
import copy
import json
import logging
import os
from typing import Dict, Tuple
from file_utils import CONFIG_NAME, cached_path, hf_bucket_url, is_remote_url
logger = logging.getLogger(__name__)
class PretrainedConfig(object):
r""" Base class for all configuration classes.
Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving configurations.
Note:
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does **not** load the model weights.
It only affects the model's configuration.
Class attributes (overridden by derived classes):
- ``model_type``: a string that identifies the model type, that we serialize into the JSON file, and that we use to recreate the correct object in :class:`~transformers.AutoConfig`.
Args:
finetuning_task (:obj:`string` or :obj:`None`, `optional`, defaults to :obj:`None`):
Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint.
num_labels (:obj:`int`, `optional`, defaults to `2`):
Number of classes to use when the model is a classification model (sequences/tokens)
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):
Should the model returns attentions weights.
output_hidden_states (:obj:`string`, `optional`, defaults to :obj:`False`):
Should the model returns all hidden-states.
torchscript (:obj:`bool`, `optional`, defaults to :obj:`False`):
Is the model used with Torchscript (for PyTorch models).
"""
model_type: str = ""
def __init__(self, **kwargs):
# Attributes with defaults
self.output_attentions = kwargs.pop("output_attentions", False)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.use_cache = kwargs.pop("use_cache", True) # Not used by all models
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
# Keys are always strings in JSON so convert ids to int here.
else:
self.num_labels = kwargs.pop("num_labels", 2)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# TPU arguments
self.xla_device = kwargs.pop("xla_device", None)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error("Can't set {} with value {} for {}".format(key, value, self))
raise err
@property
def num_labels(self):
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels):
self.id2label = {i: "LABEL_{}".format(i) for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
def save_pretrained(self, save_directory):
"""
Save a configuration object to the directory `save_directory`, so that it
can be re-loaded using the :func:`~transformers.PretrainedConfig.from_pretrained` class method.
Args:
save_directory (:obj:`str`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
"""
if os.path.isfile(save_directory):
raise AssertionError("Provided path ({}) should be a directory, not a file".format(save_directory))
os.makedirs(save_directory, exist_ok=True)
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info("Configuration saved in {}".format(output_config_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs) -> "PretrainedConfig":
r"""
Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pre-trained model configuration.
Args:
pretrained_model_name_or_path (:obj:`string`):
either:
- a string with the `shortcut name` of a pre-trained model configuration to load from cache or
download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model configuration that was user-uploaded to
our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing a configuration file saved using the
:func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``.
- a path or url to a saved configuration JSON `file`, e.g.:
``./my_model_directory/configuration.json``.
cache_dir (:obj:`string`, `optional`):
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
kwargs (:obj:`Dict[str, any]`, `optional`):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is
controlled by the `return_unused_kwargs` keyword parameter.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Force to (re-)download the model weights and configuration files and override the cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies (:obj:`Dict`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g.:
:obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.`
The proxies are used on each request.
return_unused_kwargs: (`optional`) bool:
If False, then this function returns just the final configuration object.
If True, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs` is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: ie the part
of kwargs which has not been used to update `config` and is otherwise ignored.
Returns:
:class:`PretrainedConfig`: An instance of a configuration object
Examples::
# We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)
assert config.output_attention == True
config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,
foo=False, return_unused_kwargs=True)
assert config.output_attention == True
assert unused_kwargs == {'foo': False}
"""
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(cls, pretrained_model_name_or_path: str, **kwargs) -> Tuple[Dict, Dict]:
"""
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used
for instantiating a Config using `from_dict`.
Parameters:
pretrained_model_name_or_path (:obj:`string`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
:obj:`Tuple[Dict, Dict]`: The dictionary that will be used to instantiate the configuration object.
"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
config_file = hf_bucket_url(pretrained_model_name_or_path, filename=CONFIG_NAME, use_cdn=False)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
config_dict = cls._dict_from_json_file(resolved_config_file)
except EnvironmentError:
msg = (
f"Can't load config for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {CONFIG_NAME} file\n\n"
)
raise EnvironmentError(msg)
except json.JSONDecodeError:
msg = (
"Couldn't reach server at '{}' to download configuration file or "
"configuration file is not a valid JSON file. "
"Please check network or file content here: {}.".format(config_file, resolved_config_file)
)
raise EnvironmentError(msg)
if resolved_config_file == config_file:
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading configuration file {} from cache at {}".format(config_file, resolved_config_file))
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict, **kwargs) -> "PretrainedConfig":
"""
Constructs a `Config` from a Python dictionary of parameters.
Args:
config_dict (:obj:`Dict[str, any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be retrieved
from a pre-trained checkpoint by leveraging the :func:`~transformers.PretrainedConfig.get_config_dict`
method.
kwargs (:obj:`Dict[str, any]`):
Additional parameters from which to initialize the configuration object.
Returns:
:class:`PretrainedConfig`: An instance of a configuration object
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info("Model config %s", str(config))
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: str) -> "PretrainedConfig":
"""
Constructs a `Config` from the path to a json file of parameters.
Args:
json_file (:obj:`string`):
Path to the JSON file containing the parameters.
Returns:
:class:`PretrainedConfig`: An instance of a configuration object
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: str):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return "{} {}".format(self.__class__.__name__, self.to_json_string())
def to_diff_dict(self):
"""
Removes all attributes from config which correspond to the default
config attributes for better readability and serializes to a Python
dictionary.
Returns:
:obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = PretrainedConfig().to_dict()
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if key not in default_config_dict or value != default_config_dict[key]:
serializable_config_dict[key] = value
return serializable_config_dict
def to_dict(self):
"""
Serializes this instance to a Python dictionary.
Returns:
:obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
return output
def to_json_string(self, use_diff=True):
"""
Serializes this instance to a JSON string.
Args:
use_diff (:obj:`bool`):
If set to True, only the difference between the config instance and the default PretrainedConfig() is serialized to JSON string.
Returns:
:obj:`string`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path, use_diff=True):
"""
Save this instance to a json file.
Args:
json_file_path (:obj:`string`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (:obj:`bool`):
If set to True, only the difference between the config instance and the default PretrainedConfig() is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict):
"""
Updates attributes of this class
with attributes from `config_dict`.
Args:
:obj:`Dict[str, any]`: Dictionary of attributes that shall be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value)
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
"bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
"bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
"bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
"bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
"bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
"bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
"bert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
"bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
"bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
"bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
"bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
"bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
"bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-config.json",
"bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-config.json",
"cl-tohoku/bert-base-japanese": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking/config.json",
"cl-tohoku/bert-base-japanese-char": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char/config.json",
"cl-tohoku/bert-base-japanese-char-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking/config.json",
"TurkuNLP/bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/config.json",
"TurkuNLP/bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/config.json",
"wietsedv/bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class BertConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.BertModel`.
It is used to instantiate an BERT model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the BERT `bert-base-uncased <https://huggingface.co/bert-base-uncased>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
for more information.
Args:
vocab_size (:obj:`int`, optional, defaults to 30522):
Vocabulary size of the BERT model. Defines the different tokens that
can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.BertModel`.
hidden_size (:obj:`int`, optional, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, optional, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, optional, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, optional, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "gelu"):
The non-linear activation function (function or string) in the encoder and pooler.
If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, optional, defaults to 512):
The maximum sequence length that this model might ever be used with.
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, optional, defaults to 2):
The vocabulary size of the `token_type_ids` passed into :class:`~transformers.BertModel`.
initializer_range (:obj:`float`, optional, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
The epsilon used by the layer normalization layers.
Example::
from transformers import BertModel, BertConfig
# Initializing a BERT bert-base-uncased style configuration
configuration = BertConfig()
# Initializing a model from the bert-base-uncased style configuration
model = BertModel(configuration)
# Accessing the model configuration
configuration = model.config
"""
model_type = "bert"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
| 51.030476
| 193
| 0.661491
|
59cc51e70042824b02ceb3fe6e9169287cea1a5f
| 2,619
|
py
|
Python
|
site-packages/cinderclient/contrib/noauth.py
|
hariza17/freezer_libraries
|
e0bd890eba5e7438976fb3b4d66c41c128bab790
|
[
"PSF-2.0"
] | 74
|
2015-01-29T20:10:47.000Z
|
2022-03-03T05:09:00.000Z
|
site-packages/cinderclient/contrib/noauth.py
|
hariza17/freezer_libraries
|
e0bd890eba5e7438976fb3b4d66c41c128bab790
|
[
"PSF-2.0"
] | 6
|
2015-08-10T10:23:42.000Z
|
2022-02-16T02:28:22.000Z
|
site-packages/cinderclient/contrib/noauth.py
|
hariza17/freezer_libraries
|
e0bd890eba5e7438976fb3b4d66c41c128bab790
|
[
"PSF-2.0"
] | 125
|
2015-02-24T11:04:51.000Z
|
2021-12-23T01:28:05.000Z
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from keystoneauth1 import loading
from keystoneauth1 import plugin
class CinderNoAuthPlugin(plugin.BaseAuthPlugin):
def __init__(self, user_id, project_id=None, roles=None, endpoint=None):
self._user_id = user_id
self._project_id = project_id if project_id else user_id
self._endpoint = endpoint
self._roles = roles
self.auth_token = '%s:%s' % (self._user_id,
self._project_id)
def get_headers(self, session, **kwargs):
return {'x-user-id': self._user_id,
'x-project-id': self._project_id,
'X-Auth-Token': self.auth_token}
def get_user_id(self, session, **kwargs):
return self._user_id
def get_project_id(self, session, **kwargs):
return self._project_id
def get_endpoint(self, session, **kwargs):
return '%s/%s' % (self._endpoint, self._project_id)
def invalidate(self):
pass
class CinderOpt(loading.Opt):
@property
def argparse_args(self):
return ['--%s' % o.name for o in self._all_opts]
@property
def argparse_default(self):
# select the first ENV that is not false-y or return None
for o in self._all_opts:
v = os.environ.get('Cinder_%s' % o.name.replace('-', '_').upper())
if v:
return v
return self.default
class CinderNoAuthLoader(loading.BaseLoader):
plugin_class = CinderNoAuthPlugin
def get_options(self):
options = super(CinderNoAuthLoader, self).get_options()
options.extend([
CinderOpt('user-id', help='User ID', required=True,
metavar="<cinder user id>"),
CinderOpt('project-id', help='Project ID',
metavar="<cinder project id>"),
CinderOpt('endpoint', help='Cinder endpoint',
dest="endpoint", required=True,
metavar="<cinder endpoint>"),
])
return options
| 33.576923
| 78
| 0.624284
|
c3536440e1add8767187c5961271975ec787e28b
| 959
|
py
|
Python
|
third_party/webrtc/src/chromium/src/third_party/webdriver/pylib/test/selenium/selenium_test_suite_headless.py
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
third_party/webrtc/src/chromium/src/third_party/webdriver/pylib/test/selenium/selenium_test_suite_headless.py
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
third_party/webrtc/src/chromium/src/third_party/webdriver/pylib/test/selenium/selenium_test_suite_headless.py
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
"""
Copyright 2011 Software Freedom Conservancy.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import test_ajax_jsf
import test_default_server
import test_google
import test_i18n
import sys
def suite():
return unittest.TestSuite((\
unittest.makeSuite(test_i18n.TestI18n),
))
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite())
sys.exit(not result.wasSuccessful())
| 29.96875
| 73
| 0.74244
|
86aadf93e7fc24127c6c5e4a25aab57d6f03fea1
| 17,433
|
py
|
Python
|
tf_quant_finance/models/ito_process.py
|
nethask/tf-quant-finance
|
59aea04121405302a1b7804fb72e57e65cae0b47
|
[
"Apache-2.0"
] | 1
|
2021-07-05T14:32:11.000Z
|
2021-07-05T14:32:11.000Z
|
tf_quant_finance/models/ito_process.py
|
FranklinMa810/tf-quant-finance
|
59aea04121405302a1b7804fb72e57e65cae0b47
|
[
"Apache-2.0"
] | null | null | null |
tf_quant_finance/models/ito_process.py
|
FranklinMa810/tf-quant-finance
|
59aea04121405302a1b7804fb72e57e65cae0b47
|
[
"Apache-2.0"
] | 1
|
2020-04-24T22:20:18.000Z
|
2020-04-24T22:20:18.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Defines Ito processes.
Ito processes underlie most quantitative finance models. This module defines
a framework for describing Ito processes. An Ito process is usually defined
via an Ito SDE:
```
dX = a(t, X_t) dt + b(t, X_t) dW_t
```
where `a(t, x)` is a function taking values in `R^n`, `b(t, X_t)` is a function
taking values in `n x n` matrices. For a complete mathematical definition,
including the regularity conditions that must be imposed on the coefficients
`a(t, X)` and `b(t, X)`, see Ref [1].
### References:
[1]: Brent Oksendal. Stochastic Differential Equations: An Introduction with
Applications. Springer. 2010.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
import tensorflow as tf
from tf_quant_finance.math import random
@six.add_metaclass(abc.ABCMeta)
class ItoProcess(object):
"""Base class for Ito processes.
Represents a general Ito process:
```None
dX_i = a_i(t, X) dt + Sum(S_{ij}(t, X) dW_j for 1 <= j <= n), 1 <= i <= n
```
`a_i(t, S)` is the instantaneous drift rate and the `S_{ij}(t)` is the
volatility matrix. While this is how the process is usually presented
mathematically, the initializer requires a slightly different
specification. Instead of the instantaneous drift and volatility, it is
computationally more efficient to use the total drift and covariance. These
are defined as:
```None
total_drift_{i}(t1, t2, X) = Integrate(a_{i}(t, X), t1 <= t <= t2)
total_covariance_{ij}(t1, t2, X) = Integrate(inst_covariance_{ij}(t, X),
t1 <= t <= t2)
inst_covariance_{ij}(t, X) = (S.S^T)_{ij}
```
### Example. 2-dimensional Ito process of the form
```none
dX_1 = mu_1 * sqrt(t) dt + s11 * dW_1 + s12 * dW_2
dX_2 = mu_2 * sqrt(t) dt + s21 * dW_1 + s22 * dW_2
```
class SimpleItoProcess(ito_process.ItoProcess):
def __init__(self, dim, drift_fn, vol_fn, dtype=tf.float64):
self._dim = dim
self._drift_fn = drift_fn
self._vol_fn = vol_fn
self._dtype = dtype
def dim(self):
return self._dim
def drift_fn(self):
return self._drift_fn
def volatility_fn(self):
return self._vol_fn
def dtype(self):
return self._dtype
def name(self):
return 'ito_process'
mu = np.array([0.2, 0.7])
s = np.array([[0.3, 0.1], [0.1, 0.3]])
num_samples = 10000
dim = 2
dtype=tf.float64
# Define drift and volatility functions
def drift_fn(t, x):
return mu * tf.sqrt(t) * tf.ones([num_samples, dim], dtype=dtype)
def vol_fn(t, x):
return s * tf.ones([num_samples, dim, dim], dtype=dtype)
# Initialize `SimpleItoProcess`
process = TestItoProcess(dim=2, drift_fn=drift_fn, vol_fn=vol_fn, dtype=dtype)
# Set starting location
x0 = np.array([0.1, -1.1])
# Sample `num_samples` paths at specified `times` locations using built-in
# Euler scheme.
paths = process.sample_paths(
times,
num_samples=num_samples,
initial_state=x0,
grid_step=0.01,
seed=42)
"""
@abc.abstractmethod
def dim(self):
"""The dimension of the process."""
return None
@abc.abstractmethod
def dtype(self):
"""The data type of process realizations."""
return None
@abc.abstractmethod
def name(self):
"""The name to give to the ops created by this class."""
return None
def drift_fn(self):
"""Python callable calculating instantaneous drift.
The callable should accept two real `Tensor` arguments of the same dtype.
The first argument is the scalar time t, the second argument is the value of
Ito process X - tensor of shape `batch_shape + [dim]`. The result is value
of drift a(t, X). The return value of the callable is a real `Tensor` of the
same dtype as the input arguments and of shape `batch_shape + [dim]`.
"""
return None
def volatility_fn(self):
"""Python callable calculating the instantaneous volatility.
The callable should accept two real `Tensor` arguments of the same dtype and
shape `times_shape`. The first argument is the scalar time t, the second
argument is the value of Ito process X - tensor of shape `batch_shape +
[dim]`. The result is value of drift S(t, X). The return value of the
callable is a real `Tensor` of the same dtype as the input arguments and of
shape `batch_shape + [dim, dim]`.
"""
return None
def total_drift_fn(self):
"""Computes integrated drift of the process between two times.
Returns:
A Python callable returning the integrated drift rate
between two times. The callable accepts three real `Tensor`
arguments. The first argument is the left end point
and the second is the right end point of the time interval for which
the total drift is needed. The third argument contains the values of
the state at which the drift is to be computed. In most cases where
the instantaneous drift depends on the state the time step for which
the total drift is needed is very small. However, the interface allows
asking for a finite time step at a fixed value of the state (i.e. the
state at the start of the time interval). Implementations
are free to either raise an error for larger time steps or give the best
approximation they can for the given parameters. The two main advantages
of working with total drifts/covariances vs instantaneous quantities are
(a) If needed, the instantaneous values can be derived using either
automatic differentiation or finite difference.
(b) For cases where there is no non-trivial state dependence
(e.g. Brownian motion with time dependent drifts and covariance) or
the state dependence can be easily removed by a transformation of the
process (e.g. geometric brownian motion), it
is possible to directly generate samples from the joint distributions
of the process and these depend only on the total drift and
covariances.
The precise definition of total drift function is as follows.
Consider a general `dim` dimensional Ito process:
```None
dX_i = mu_i(t, X) dt + Sum(S_{ij}(t, X) dW_j for 1 <= j <= dim)
```
`mu_i(t, S)` is the instantaneous drift rate and the `S_{ij}(t)` is the
volatility matrix. The total drift function `M_{ij}(t1, t2, X)`
is then defined by
```None
M_{i}(t1, t2, X) = Integral[ mu_{i}(t, X), t1 <= t <= t2]
```
The shapes of the inputs and outputs of the callable are as follows:
Input `Tensor`s.
1. `start time` and `end time` are of shape `time_batch_shape`.
Here `time_batch_shape` is the shape of the times `Tensor` at which
to evaluate the integrated drift.
2. The state is of shape `time_batch_shape + [dim]` where `dim` is the
dimension of the process.
The output of this callable is a `Tensor` of shape
`time_batch_shape + [dim]` containing the integrated drift between the
start times and end times.
"""
return None
def total_covariance_fn(self):
"""The total covariance of the process between two times.
A Python callable returning the integrated covariances between two times.
The callable accepts three real `Tensor` arguments. The first argument is
the left end point and the second is the right end point of the time
interval for which the total covariance is needed. The third argument
contains the values of the state at which the covariance is to be
computed. In most cases where the instantaneous covariance depends on
the state the time step for which the total covariance is needed is
small (e.g. while doing finite differences or generating
Monte Carlo paths). However, the interface allows asking for a finite
time step at a fixed value of the state (i.e. the state at the start
of the time interval). Implementations are free to either raise an error
for larger time steps or give the best approximation they can for the
given parameters. The two main advantages of working with total
covariances/drifts vs instantaneous quantities are
(a) If needed, the instantaneous values can be derived using either
automatic differentiation or finite difference.
(b) For cases where there is no non-trivial state dependence
(e.g. Brownian motion with time dependent drifts and covariance) or
the state dependence can be easily removed by a transformation of the
process (e.g. geometric brownian motion), it
is possible to directly generate samples from the joint distributions
of the process and these depend only on the total drift and
covariances.
The precise definition of total covariance function is as follows.
Consider a general `dim` dimensional Ito process:
```None
dX_i = mu_i(t, X) dt + Sum(S_{ij}(t, X) dW_j for 1 <= j <= dim)
```
`mu_i(t, S)` is the instantaneous drift rate and the `S_{ij}(t)` is the
volatility matrix. The total covariance function `V_{ij}(t1, t2, X)`
is then defined by
```None
V_{ij}(t1, t2, X) = Integral[ (S.S^T)_{ij}(t), t1 <= t <= t2]
```
The shapes of the inputs and outputs of the callable are as follows:
Input `Tensor`s.
1. `start time` and `end time` are of shape `time_batch_shape`.
Here `time_batch_shape` is the shape of the times `Tensor` at which
to evaluate the integrated covariances.
2. The state is of shape `time_batch_shape + [dim]` where `dim` is the
dimension of the process.
The output of this callable is a `Tensor` of shape
`time_batch_shape + [dim, dim]` containing the integrated covariances
between the start times and end times.
"""
return None
def sample_paths(self,
times,
num_samples=1,
initial_state=None,
random_type=None,
seed=None,
swap_memory=True,
name=None,
**kwargs):
"""Returns a sample of paths from the process.
The default implementation uses Euler schema. However, for particular types
of Ito processes more efficient schemes can be used.
Args:
times: Rank 1 `Tensor` of increasing positive real values. The times at
which the path points are to be evaluated.
num_samples: Positive scalar `int`. The number of paths to draw.
initial_state: `Tensor` of shape `[dim]`. The initial state of the
process.
Default value: None which maps to a zero initial state.
random_type: Enum value of `RandomType`. The type of (quasi)-random
number generator to use to generate the paths.
Default value: None which maps to the standard pseudo-random numbers.
seed: Python `int`. The random seed to use. If not supplied, no seed is
set.
swap_memory: Whether GPU-CPU memory swap is enabled for this op. See
equivalent flag in `tf.while_loop` documentation for more details.
Useful when computing a gradient of the op since `tf.while_loop` is
used to propagate stochastic process in time.
name: str. The name to give this op. If not supplied, default name of
`sample_paths` is used.
**kwargs: parameters, specific to Euler schema:
`grid_step` is rank 0 real `Tensor` - maximal distance between points
in grid in Euler schema.
Returns:
A real `Tensor` of shape [num_samples, k, n] where `k` is the size of the
`times`, `n` is the dimension of the process.
"""
if self.drift_fn() is None or self.volatility_fn() is None:
raise NotImplementedError(
'In order to use Euler scheme, both drift_fn and volatility_fn '
'should be provided.')
default_name = self.name() + '_sample_paths'
with tf.compat.v1.name_scope(
name, default_name=default_name, values=[times, initial_state]):
if initial_state is None:
initial_state = tf.zeros(self._dim, dtype=self._dtype)
times = tf.convert_to_tensor(times, dtype=self._dtype)
initial_state = tf.convert_to_tensor(initial_state, dtype=self._dtype,
name='initial_state')
times_size = tf.shape(times)[-1]
grid_step = kwargs['grid_step']
times, keep_mask = self._prepare_grid(times, grid_step)
return self._sample_paths(
times, grid_step, keep_mask, times_size, num_samples, initial_state,
random_type, seed, swap_memory)
def fd_method(self, *args, **kwargs):
"""Returns a finite difference solver for solving Kolmogorov equations.
TODO: Complete the interface specification for this.
Args:
*args: Place value arguments.
**kwargs: Keyword args.
Returns:
An instance of `BackwardGridStepper` to solve the backward Kolmogorov
equation.
TODO: decide on the interface for solving the forward
Kolmogorov equation.
"""
raise NotImplementedError('Finite difference solvers are not yet '
'implemented for ItoProcess.')
def _sample_paths(self, times, grid_step, keep_mask, times_size, num_samples,
initial_state, random_type, seed, swap_memory):
"""Returns a sample of paths from the process."""
dt = times[1:] - times[:-1]
sqrt_dt = tf.sqrt(dt)
current_state = initial_state + tf.zeros([num_samples, self.dim()],
dtype=initial_state.dtype)
steps_num = tf.shape(dt)[-1]
wiener_mean = tf.zeros((self.dim(), 1), dtype=self._dtype)
cond_fn = lambda i, *args: i < steps_num
def step_fn(i, written_count, current_state, result):
"""Performs one step of Euler scheme."""
current_time = times[i + 1]
dw = random.mv_normal_sample((num_samples,),
mean=wiener_mean,
random_type=random_type,
seed=seed)
dw = dw * sqrt_dt[i]
dt_inc = dt[i] * self.drift_fn()(current_time, current_state) # pylint: disable=not-callable
dw_inc = tf.squeeze(
tf.matmul(self.volatility_fn()(current_time, current_state), dw), -1) # pylint: disable=not-callable
next_state = current_state + dt_inc + dw_inc
# Keep only states for times, requested by user.
result = tf.cond(keep_mask[i + 1],
(lambda: result.write(written_count, next_state)),
(lambda: result))
written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
return (i + 1, written_count, next_state, result)
# Maximum number iterations is passed to the while loop below. It improves
# performance of the while loop on a GPU and is needed for XLA-compilation
# comptatiblity
maximum_iterations = (tf.cast(1. / grid_step, dtype=tf.int32)
+ tf.size(times))
result = tf.TensorArray(dtype=self._dtype, size=times_size)
_, _, _, result = tf.compat.v1.while_loop(
cond_fn, step_fn, (0, 0, current_state, result),
maximum_iterations=maximum_iterations,
swap_memory=swap_memory)
return tf.transpose(result.stack(), (1, 0, 2))
def _prepare_grid(self, times, grid_step):
"""Prepares grid of times for path generation.
Args:
times: Rank 1 `Tensor` of increasing positive real values. The times at
which the path points are to be evaluated.
grid_step: Rank 0 real `Tensor`. Maximal distance between points in
resulting grid.
Returns:
Tuple `(all_times, mask)`.
`all_times` is 1-D real `Tensor` containing all points from 'times` and
whose intervals are at most `grid_step`.
`mask` is a boolean 1-D tensor of the same shape as 'all_times', showing
which elements of 'all_times' correspond to values from `times`.
Guarantees that times[0]=0 and grid_step[0]=False.
'all_times` is sorted ascending and may contain duplicates.
"""
grid = tf.range(0.0, times[-1], grid_step, dtype=self._dtype)
all_times = tf.concat([grid, times], axis=0)
mask = tf.concat([
tf.zeros_like(grid, dtype=tf.bool),
tf.ones_like(times, dtype=tf.bool)
],
axis=0)
perm = tf.argsort(all_times, stable=True)
all_times = tf.gather(all_times, perm)
mask = tf.gather(mask, perm)
return all_times, mask
| 40.26097
| 111
| 0.660242
|
b6719bf3f4c15a1446a5673db54090dc9ea2971a
| 5,452
|
py
|
Python
|
functions/source/CreateEnvironment/index.py
|
DuckDao/quickstart-codepipeline-bluegreen-deployment
|
cd8b9b2de7a42c108be5bf24255bbb3bd5305bd5
|
[
"Apache-2.0"
] | 52
|
2018-07-26T19:01:53.000Z
|
2021-10-30T16:46:56.000Z
|
functions/source/CreateEnvironment/index.py
|
nicolaskenner/quickstart-codepipeline-bluegreen-deployment
|
5586d64a6e482a916f81a2732aaaebc2b3677fc3
|
[
"Apache-2.0"
] | 4
|
2018-05-24T18:53:35.000Z
|
2021-07-06T05:16:54.000Z
|
functions/source/CreateEnvironment/index.py
|
nicolaskenner/quickstart-codepipeline-bluegreen-deployment
|
5586d64a6e482a916f81a2732aaaebc2b3677fc3
|
[
"Apache-2.0"
] | 43
|
2018-05-10T00:41:46.000Z
|
2022-02-18T22:06:23.000Z
|
import boto3
import json
import traceback
import sys
import logging
import threading
import time
beanstalkclient = boto3.client('elasticbeanstalk')
codepipelineclient = boto3.client('codepipeline')
def handler(event, context):
timer = threading.Timer((context.get_remaining_time_in_millis() / 1000.00) - 0.5, timeout, args=[event, context])
timer.start()
try:
# Extract the Job ID
job_id = event['CodePipeline.job']['id']
# Extract the Job Data
job_data = event['CodePipeline.job']['data']
user_parameters = job_data['actionConfiguration']['configuration']['UserParameters']
print(job_data)
print(event)
BlueEnvInfo=GetBlueEnvInfo(EnvName=(json.loads(user_parameters)['BlueEnvName']))
BlueEnvId=(BlueEnvInfo['Environments'][0]['EnvironmentId'])
BlueVersionLabel=(BlueEnvInfo['Environments'][0]['VersionLabel'])
#Calling CreateConfigTemplate API
ConfigTemplate=CreateConfigTemplateBlue(AppName=(json.loads(user_parameters)['BeanstalkAppName']),BlueEnvId=BlueEnvId,TempName=json.loads(user_parameters)['CreateConfigTempName'])
ReturnedTempName=ConfigTemplate
print (ReturnedTempName)
if not ReturnedTempName:
#raise Exception if the Config file does not exist
raise Exception("There were some issue while creating a Configuration Template from the Blue Environment")
else:
GreenEnvId=CreateGreenEnvironment(EnvName=(json.loads(user_parameters)['GreenEnvName']),ConfigTemplate=ReturnedTempName,AppVersion=BlueVersionLabel,AppName=(json.loads(user_parameters)['BeanstalkAppName']))
print (GreenEnvId)
#print (GreenEnvIddetails)
if GreenEnvId:
Status="Success"
Message="Successfully created the Green Environment/Environment with the provided name already exists"
#Create a CNAME Config file
BlueEnvCname=(BlueEnvInfo['Environments'][0]['CNAME'])
s3 = boto3.resource('s3')
bucket = s3.Bucket(json.loads(user_parameters)['BlueCNAMEConfigBucket'])
key = json.loads(user_parameters)['BlueCNAMEConfigFile']
objs = list(bucket.objects.filter(Prefix=key))
if len(objs) > 0 and objs[0].key == key:
print("BlueCNAMEConfigFile Already Exists!")
else:
obj = s3.Object(json.loads(user_parameters)['BlueCNAMEConfigBucket'], json.loads(user_parameters)['BlueCNAMEConfigFile'])
BlueEnvCnameFile = {'BlueEnvUrl': BlueEnvCname}
obj.put(Body=json.dumps(BlueEnvCnameFile))
print ("Created a new CNAME file")
else:
Status="Failure"
Message="Something went wrong on GreenEnv Creation"
except Exception as e:
print('Function failed due to exception.')
e = sys.exc_info()[0]
print(e)
traceback.print_exc()
Status="Failure"
Message=("Error occured while executing this. The error is %s" %e)
finally:
timer.cancel()
if (Status =="Success"):
put_job_success(job_id, Message)
else:
put_job_failure(job_id, Message)
def CreateConfigTemplateBlue(AppName,BlueEnvId,TempName):
ListTemplates = beanstalkclient.describe_applications(ApplicationNames=[AppName])['Applications'][0]['ConfigurationTemplates']
count = 0
while count < len(ListTemplates):
print (ListTemplates[count])
if ListTemplates[count] == TempName:
print ("ConfigTempAlreadyExists")
return TempName
break
count += 1
response = beanstalkclient.create_configuration_template(
ApplicationName=AppName,
TemplateName=TempName,
EnvironmentId=BlueEnvId)
return response['TemplateName']
def GetBlueEnvInfo(EnvName):
response = beanstalkclient.describe_environments(
EnvironmentNames=[
EnvName
])
print("Described the environment")
return response
def CreateGreenEnvironment(EnvName,ConfigTemplate,AppVersion,AppName):
GetEnvData = (beanstalkclient.describe_environments(EnvironmentNames=[EnvName]))
print(GetEnvData)
#print (B['Environments'][0]['Status'])
InvalidStatus = ["Terminating","Terminated"]
if not(GetEnvData['Environments']==[]):
print("Environment Exists")
if not(GetEnvData['Environments'][0]['Status']) in InvalidStatus:
print("Existing Environment with the name %s not in Invalid Status" % EnvName)
return (GetEnvData['Environments'][0]['EnvironmentId'])
print ("Creating a new Environment")
response = beanstalkclient.create_environment(
ApplicationName=AppName,
EnvironmentName=EnvName,
TemplateName=ConfigTemplate,
VersionLabel=AppVersion)
return response['EnvironmentId']
def timeout(event, context):
logging.error('Execution is about to time out, sending failure response to CodePipeline')
put_job_failure(event['CodePipeline.job']['id'], "FunctionTimeOut")
def put_job_success(job, message):
print('Putting job success')
print(message)
codepipelineclient.put_job_success_result(jobId=job)
def put_job_failure(job, message):
print('Putting job failure')
print(message)
codepipelineclient.put_job_failure_result(jobId=job, failureDetails={'message': message, 'type': 'JobFailed'})
| 43.269841
| 216
| 0.680851
|
469a7b8450904f46053b374129fae9720186b5e1
| 752
|
py
|
Python
|
leetcode/30DayChallenge/week2/4_diameter_bst.py
|
Gaurav-Pande/DataStructures
|
0e4af391274e33a9bb9f999a9032b74d06fc878e
|
[
"MIT"
] | 5
|
2018-09-14T13:14:34.000Z
|
2021-12-29T11:07:35.000Z
|
leetcode/30DayChallenge/week2/4_diameter_bst.py
|
Gaurav-Pande/DataStructures
|
0e4af391274e33a9bb9f999a9032b74d06fc878e
|
[
"MIT"
] | null | null | null |
leetcode/30DayChallenge/week2/4_diameter_bst.py
|
Gaurav-Pande/DataStructures
|
0e4af391274e33a9bb9f999a9032b74d06fc878e
|
[
"MIT"
] | null | null | null |
# link: https://leetcode.com/explore/challenge/card/30-day-leetcoding-challenge/529/week-2/3293/
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
#####
## The idea is to calculate height of left and right subtree and add 2 to it
## and return maximum of it.
##
#####
def dfs(root):
if not root:
return -1
else:
lh = dfs(root.left)
rh = dfs(root.right)
result.append(lh + rh + 2)
return 1 + max(lh, rh)
if not root:
return 0
result = []
dfs(root)
return max(result)
| 19.789474
| 96
| 0.606383
|
4f94b70caca4d2b6cd6848a0359e23754f9cacd2
| 14,589
|
py
|
Python
|
tests/unit/modules/test_win_groupadd.py
|
hvbarker/salt
|
0b1e299b8983854bd55163439e4ac20d81a9dab7
|
[
"Apache-2.0"
] | 1
|
2020-05-17T18:00:38.000Z
|
2020-05-17T18:00:38.000Z
|
tests/unit/modules/test_win_groupadd.py
|
hvbarker/salt
|
0b1e299b8983854bd55163439e4ac20d81a9dab7
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/modules/test_win_groupadd.py
|
hvbarker/salt
|
0b1e299b8983854bd55163439e4ac20d81a9dab7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.win_groupadd as win_groupadd
import salt.utils.win_functions
# Import Salt Testing Libs
from tests.support.helpers import TstSuiteLoggingHandler
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, Mock, patch
from tests.support.unit import TestCase, skipIf
# Import Other Libs
# pylint: disable=unused-import
try:
import win32com
import pythoncom
import pywintypes
PYWINTYPES_ERROR = pywintypes.com_error(
-1234, "Exception occurred.", (0, None, "C", None, 0, -2147352567), None
)
HAS_WIN_LIBS = True
except ImportError:
HAS_WIN_LIBS = False
# pylint: enable=unused-import
class MockMember(object):
def __init__(self, name):
self.ADSPath = name
class MockGroupObj(object):
def __init__(self, ads_name, ads_users):
self._members = [MockMember(x) for x in ads_users]
self.Name = ads_name
def members(self):
return self._members
def Add(self, name):
"""
This should be a no-op unless we want to test raising an error, in
which case this should be overridden in a subclass.
"""
def Remove(self, name):
"""
This should be a no-op unless we want to test raising an error, in
which case this should be overridden in a subclass.
"""
sam_mock = MagicMock(side_effect=lambda x: "HOST\\" + x)
@skipIf(
not HAS_WIN_LIBS,
"win_groupadd unit tests can only be run if win32com, pythoncom, and pywintypes are installed",
)
class WinGroupTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.win_groupadd
"""
def setup_loader_modules(self):
return {win_groupadd: {"__opts__": {"test": False}}}
def test_add(self):
"""
Test adding a new group
"""
info = MagicMock(return_value=False)
with patch.object(win_groupadd, "info", info), patch.object(
win_groupadd, "_get_computer_object", Mock()
):
self.assertTrue(win_groupadd.add("foo"))
def test_add_group_exists(self):
"""
Test adding a new group if the group already exists
"""
info = MagicMock(
return_value={
"name": "foo",
"passwd": None,
"gid": None,
"members": ["HOST\\spongebob"],
}
)
with patch.object(win_groupadd, "info", info), patch.object(
win_groupadd, "_get_computer_object", Mock()
):
self.assertFalse(win_groupadd.add("foo"))
def test_add_error(self):
"""
Test adding a group and encountering an error
"""
class CompObj(object):
def Create(self, type, name):
raise PYWINTYPES_ERROR
obj_comp_mock = MagicMock(return_value=CompObj())
info = MagicMock(return_value=False)
with patch.object(win_groupadd, "info", info), patch.object(
win_groupadd, "_get_computer_object", obj_comp_mock
):
self.assertFalse(win_groupadd.add("foo"))
def test_delete(self):
"""
Test removing a group
"""
info = MagicMock(
return_value={
"name": "foo",
"passwd": None,
"gid": None,
"members": ["HOST\\spongebob"],
}
)
with patch.object(win_groupadd, "info", info), patch.object(
win_groupadd, "_get_computer_object", Mock()
):
self.assertTrue(win_groupadd.delete("foo"))
def test_delete_no_group(self):
"""
Test removing a group that doesn't exists
"""
info = MagicMock(return_value=False)
with patch.object(win_groupadd, "info", info), patch.object(
win_groupadd, "_get_computer_object", Mock()
):
self.assertFalse(win_groupadd.delete("foo"))
def test_delete_error(self):
"""
Test removing a group and encountering an error
"""
class CompObj(object):
def Delete(self, type, name):
raise PYWINTYPES_ERROR
obj_comp_mock = MagicMock(return_value=CompObj())
info = MagicMock(
return_value={
"name": "foo",
"passwd": None,
"gid": None,
"members": ["HOST\\spongebob"],
}
)
with patch.object(win_groupadd, "info", info), patch.object(
win_groupadd, "_get_computer_object", obj_comp_mock
):
self.assertFalse(win_groupadd.delete("foo"))
def test_info(self):
"""
Test if it return information about a group.
"""
obj_group_mock = MagicMock(
return_value=MockGroupObj("salt", ["WinNT://HOST/steve"])
)
with patch.object(win_groupadd, "_get_group_object", obj_group_mock):
self.assertDictEqual(
win_groupadd.info("salt"),
{
"gid": None,
"members": ["HOST\\steve"],
"passwd": None,
"name": "salt",
},
)
def test_getent(self):
obj_group_mock = MagicMock(
return_value=[
MockGroupObj("salt", ["WinNT://HOST/steve"]),
MockGroupObj("salty", ["WinNT://HOST/spongebob"]),
]
)
mock_g_to_g = MagicMock(side_effect=[1, 2])
with patch.object(win_groupadd, "_get_all_groups", obj_group_mock), patch.dict(
win_groupadd.__salt__, {"file.group_to_gid": mock_g_to_g}
):
self.assertListEqual(
win_groupadd.getent(),
[
{
"gid": 1,
"members": ["HOST\\steve"],
"name": "salt",
"passwd": "x",
},
{
"gid": 2,
"members": ["HOST\\spongebob"],
"name": "salty",
"passwd": "x",
},
],
)
def test_getent_context(self):
"""
Test group.getent is using the values in __context__
"""
with patch.dict(win_groupadd.__context__, {"group.getent": True}):
self.assertTrue(win_groupadd.getent())
def test_adduser(self):
"""
Test adding a user to a group
"""
obj_group_mock = MagicMock(
return_value=MockGroupObj("foo", ["WinNT://HOST/steve"])
)
with patch.object(
win_groupadd, "_get_group_object", obj_group_mock
), patch.object(salt.utils.win_functions, "get_sam_name", sam_mock):
self.assertTrue(win_groupadd.adduser("foo", "spongebob"))
def test_adduser_already_exists(self):
"""
Test adding a user that already exists
"""
obj_group_mock = MagicMock(
return_value=MockGroupObj("foo", ["WinNT://HOST/steve"])
)
with patch.object(
win_groupadd, "_get_group_object", obj_group_mock
), patch.object(salt.utils.win_functions, "get_sam_name", sam_mock):
self.assertFalse(win_groupadd.adduser("foo", "steve"))
def test_adduser_error(self):
"""
Test adding a user and encountering an error
"""
msg = "An unknown directory object was requested"
error = pywintypes.com_error(
-1234, "Exception occurred.", (0, None, msg, None, 0, -2147352567), None
)
# Create mock group object with mocked Add function which raises the
# exception we need in order to test the error case.
class GroupObj(MockGroupObj):
def Add(self, name):
raise error
obj_group_mock = MagicMock(return_value=GroupObj("foo", ["WinNT://HOST/steve"]))
with patch.object(
win_groupadd, "_get_group_object", obj_group_mock
), patch.object(salt.utils.win_functions, "get_sam_name", sam_mock):
with TstSuiteLoggingHandler() as handler:
self.assertFalse(win_groupadd.adduser("foo", "username"))
expected = "ERROR:Failed to add HOST\\username to group foo. An unknown directory object was requested"
self.assertIn(expected, handler.messages)
def test_adduser_group_does_not_exist(self):
obj_group_mock = MagicMock(side_effect=PYWINTYPES_ERROR)
with patch.object(win_groupadd, "_get_group_object", obj_group_mock):
self.assertFalse(win_groupadd.adduser("foo", "spongebob"))
def test_deluser(self):
"""
Test removing a user from a group
"""
# Test removing a user
obj_group_mock = MagicMock(
return_value=MockGroupObj("foo", ["WinNT://HOST/spongebob"])
)
with patch.object(
win_groupadd, "_get_group_object", obj_group_mock
), patch.object(salt.utils.win_functions, "get_sam_name", sam_mock):
self.assertTrue(win_groupadd.deluser("foo", "spongebob"))
def test_deluser_no_user(self):
"""
Test removing a user from a group and that user is not a member of the
group
"""
obj_group_mock = MagicMock(
return_value=MockGroupObj("foo", ["WinNT://HOST/steve"])
)
with patch.object(
win_groupadd, "_get_group_object", obj_group_mock
), patch.object(salt.utils.win_functions, "get_sam_name", sam_mock):
self.assertFalse(win_groupadd.deluser("foo", "spongebob"))
def test_deluser_error(self):
"""
Test removing a user and encountering an error
"""
class GroupObj(MockGroupObj):
def Remove(self, name):
raise PYWINTYPES_ERROR
obj_group_mock = MagicMock(
return_value=GroupObj("foo", ["WinNT://HOST/spongebob"])
)
with patch.object(
win_groupadd, "_get_group_object", obj_group_mock
), patch.object(salt.utils.win_functions, "get_sam_name", sam_mock):
self.assertFalse(win_groupadd.deluser("foo", "spongebob"))
def test_deluser_group_does_not_exist(self):
obj_group_mock = MagicMock(side_effect=PYWINTYPES_ERROR)
with patch.object(win_groupadd, "_get_group_object", obj_group_mock):
self.assertFalse(win_groupadd.deluser("foo", "spongebob"))
def test_members(self):
"""
Test adding a list of members to a group, all existing users removed
"""
obj_group_mock = MagicMock(
return_value=MockGroupObj("foo", ["WinNT://HOST/steve"])
)
with patch.object(
win_groupadd, "_get_group_object", obj_group_mock
), patch.object(salt.utils.win_functions, "get_sam_name", sam_mock):
self.assertTrue(win_groupadd.members("foo", "spongebob,patrick,squidward"))
obj_group_mock.assert_called_once_with("foo")
def test_members_correct_membership(self):
"""
Test adding a list of users where the list of users already exists
"""
members_list = [
"WinNT://HOST/spongebob",
"WinNT://HOST/squidward",
"WinNT://HOST/patrick",
]
obj_group_mock = MagicMock(return_value=MockGroupObj("foo", members_list))
with patch.object(
win_groupadd, "_get_group_object", obj_group_mock
), patch.object(salt.utils.win_functions, "get_sam_name", sam_mock):
self.assertTrue(win_groupadd.members("foo", "spongebob,patrick,squidward"))
obj_group_mock.assert_called_once_with("foo")
def test_members_group_does_not_exist(self):
"""
Test adding a list of users where the group does not exist
"""
obj_group_mock = MagicMock(side_effect=PYWINTYPES_ERROR)
with patch.object(
salt.utils.win_functions, "get_sam_name", sam_mock
), patch.object(win_groupadd, "_get_group_object", obj_group_mock):
self.assertFalse(win_groupadd.members("foo", "spongebob"))
def test_members_fail_to_remove(self):
"""
Test adding a list of members and fail to remove members not in the list
"""
class GroupObj(MockGroupObj):
def Remove(self, name):
raise PYWINTYPES_ERROR
obj_group_mock = MagicMock(
return_value=GroupObj("foo", ["WinNT://HOST/spongebob"])
)
with patch.object(
win_groupadd, "_get_group_object", obj_group_mock
), patch.object(salt.utils.win_functions, "get_sam_name", sam_mock):
self.assertFalse(win_groupadd.members("foo", "patrick"))
obj_group_mock.assert_called_once_with("foo")
def test_members_fail_to_add(self):
"""
Test adding a list of members and failing to add
"""
class GroupObj(MockGroupObj):
def Add(self, name):
raise PYWINTYPES_ERROR
obj_group_mock = MagicMock(
return_value=GroupObj("foo", ["WinNT://HOST/spongebob"])
)
with patch.object(
win_groupadd, "_get_group_object", obj_group_mock
), patch.object(salt.utils.win_functions, "get_sam_name", sam_mock):
self.assertFalse(win_groupadd.members("foo", "patrick"))
obj_group_mock.assert_called_once_with("foo")
def test_list_groups(self):
"""
Test that list groups returns a list of groups by name
"""
obj_group_mock = MagicMock(
return_value=[
MockGroupObj("salt", ["WinNT://HOST/steve"]),
MockGroupObj("salty", ["WinNT://HOST/Administrator"]),
]
)
with patch.object(win_groupadd, "_get_all_groups", obj_group_mock):
self.assertListEqual(win_groupadd.list_groups(), ["salt", "salty"])
def test_list_groups_context(self):
"""
Test group.list_groups is using the values in __context__
"""
with patch.dict(win_groupadd.__context__, {"group.list_groups": True}):
self.assertTrue(win_groupadd.list_groups())
| 34.901914
| 119
| 0.587703
|
1abfbe9c65a87f718ec052856a123254c1f8ac71
| 501
|
py
|
Python
|
number_parser/data/ak.py
|
hellc/number-parser
|
1e62fe5562f334f1fbac7eeb3b208e98b255db5f
|
[
"BSD-3-Clause"
] | null | null | null |
number_parser/data/ak.py
|
hellc/number-parser
|
1e62fe5562f334f1fbac7eeb3b208e98b255db5f
|
[
"BSD-3-Clause"
] | null | null | null |
number_parser/data/ak.py
|
hellc/number-parser
|
1e62fe5562f334f1fbac7eeb3b208e98b255db5f
|
[
"BSD-3-Clause"
] | null | null | null |
info = {
"UNIT_NUMBERS": {
"a-ɛ-tɔ-so-hwee": 0,
"hwee": 0,
"a-ɛ-di-kane": 1,
"biako": 1,
"koro": 1,
"abien": 2,
"abiasa": 3,
"anan": 4,
"anum": 5,
"asia": 6,
"asuon": 7,
"awɔtwe": 8,
"akron": 9
},
"DIRECT_NUMBERS": {},
"TENS": {
"aduonu": 20,
"aduasa": 30
},
"HUNDREDS": {},
"BIG_POWERS_OF_TEN": {},
"SKIP_TOKENS": [],
"USE_LONG_SCALE": False
}
| 18.555556
| 28
| 0.37525
|
f57b7aec7b4f6f5f2cbb54c1372ba4e16e1f4ae7
| 3,425
|
py
|
Python
|
nPYc/plotting/_plotDiscreteLoadings.py
|
ghaggart/nPYc-Toolbox
|
d0160b476581fbd695f3f5f0303048466ed95864
|
[
"MIT"
] | 14
|
2018-01-23T23:10:40.000Z
|
2022-02-03T15:15:52.000Z
|
nPYc/plotting/_plotDiscreteLoadings.py
|
ghaggart/nPYc-Toolbox
|
d0160b476581fbd695f3f5f0303048466ed95864
|
[
"MIT"
] | 76
|
2018-01-24T17:37:25.000Z
|
2022-03-23T14:12:54.000Z
|
nPYc/plotting/_plotDiscreteLoadings.py
|
ghaggart/nPYc-Toolbox
|
d0160b476581fbd695f3f5f0303048466ed95864
|
[
"MIT"
] | 11
|
2018-01-25T11:35:47.000Z
|
2022-03-07T15:04:02.000Z
|
import numpy
import seaborn as sns
import matplotlib.pyplot as plt
from pyChemometrics.ChemometricsPCA import ChemometricsPCA
from nPYc.objects import Dataset
def plotDiscreteLoadings(npycDataset, pcaModel, nbComponentPerRow=3, firstComponent=1, metadataColumn='Feature Name', sort=True, savePath=None, figureFormat='png', dpi=72, figureSize=(11, 7)):
"""
plotDiscreteLoadings(pcaModel, nbComponentPerRow=3, firstComponent=1, sort=True, **kwargs)
Plot loadings for a linear model as a set of parallel vertical scatter plots.
:param ChemometricsPCA pcaModel: Model to plot
:param int nbComponentPerRow: Number of side-by-side loading plots to place per row
:param int firstComponent: Start plotting components from this component
:param bool sort: Plot variable in order of their magnitude in component one
"""
if not isinstance(npycDataset, Dataset):
raise TypeError('npycDataset must be a Dataset object')
if not isinstance(pcaModel, ChemometricsPCA):
raise TypeError('pcaModel must be an instance of ChemometricsPCA.')
if (firstComponent >= pcaModel.ncomps) or (firstComponent <= 0):
raise ValueError(
'firstComponent must be greater than zero and less than or equal to the number of components in the model.')
if sort:
sortOrder = numpy.argsort(pcaModel.loadings[0, :])
else:
sortOrder = numpy.arange(0, pcaModel.loadings.shape[1])
# Define how many components to plot and how many rows
firstComponent = firstComponent - 1
lastComponent = pcaModel.ncomps - 1
numberComponent = lastComponent - firstComponent + 1
numberRows = int(numpy.ceil(numberComponent / nbComponentPerRow))
# It is not possible to plot more than 30 rows clearly, extend the plot height
extFactor = pcaModel.loadings.shape[1] / 30
newHeight = figureSize[1] * extFactor
# Extend by the number of rows
newHeight = newHeight * numberRows
figsize = (figureSize[0], newHeight)
fig, axes = plt.subplots(numberRows, nbComponentPerRow, sharey=True, figsize=figsize, dpi=dpi)
# Plot each component
for i in range(firstComponent, lastComponent + 1):
# grid position
rowPos = int(numpy.floor((i - firstComponent) / nbComponentPerRow))
colPos = (i - firstComponent) % nbComponentPerRow
# different indexing of axes if only 1 row or multiple rows
if nbComponentPerRow >= numberComponent:
currentAxes = axes[colPos]
else:
currentAxes = axes[rowPos, colPos]
currentAxes.scatter(pcaModel.loadings[i, sortOrder],
numpy.arange(0, pcaModel.loadings.shape[1]),
s=100,
c=numpy.absolute(pcaModel.loadings[i, sortOrder]),
linewidths=1,
edgecolor='none',
cmap=plt.get_cmap('plasma'),
marker='o',
zorder=10)
currentAxes.axvline(x=0, zorder=1)
currentAxes.set_title('PC %i' % (i + 1))
currentAxes.set_xlabel('%.2f%%' % (pcaModel.modelParameters['VarExpRatio'][i] * 100))
# Add y-label to first plot of row
if rowPos == 0:
currentAxes.axes.set_yticks(numpy.arange(0, pcaModel.loadings.shape[1]))
currentAxes.axes.set_yticklabels(npycDataset.featureMetadata[metadataColumn].values[sortOrder])
currentAxes.set_ylim((-0.5, pcaModel.loadings.shape[1] - 0.5))
# Random 'ValueError: bottom cannot be >= top' from mpl which they cannot reliably correct
try:
plt.tight_layout()
except ValueError:
pass
##
# Save or draw
##
if savePath:
fig.savefig(savePath, format=figureFormat, dpi=dpi)
plt.close()
else:
plt.show()
| 36.052632
| 192
| 0.741606
|
0719590f4673ff11b6e57d81a9c722c86180f5b6
| 1,667
|
py
|
Python
|
src/action/PawnMove.py
|
jniestroy/quoridor_bot
|
afe5acba84a60b3ed03c447b2426794cf1000531
|
[
"MIT"
] | null | null | null |
src/action/PawnMove.py
|
jniestroy/quoridor_bot
|
afe5acba84a60b3ed03c447b2426794cf1000531
|
[
"MIT"
] | null | null | null |
src/action/PawnMove.py
|
jniestroy/quoridor_bot
|
afe5acba84a60b3ed03c447b2426794cf1000531
|
[
"MIT"
] | null | null | null |
#
# PawnMove.py
#
# @author Alain Rinder
# @date 2017.06.02
# @version 0.1
#
from src.action.IAction import *
class PawnMove(IAction):
def __init__(self, fromCoord, toCoord, throughCoord = None):
self.fromCoord = fromCoord
self.toCoord = toCoord
self.throughCoord = throughCoord
def isJump(self):
return (self.throughCoord is not None)
# https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes
def __eq__(self, other):
"""Override the default Equals behavior"""
if isinstance(other, self.__class__):
#return self.__dict__ == other.__dict__
return self.fromCoord == other.fromCoord and self.toCoord == other.toCoord and self.throughCoord == other.throughCoord
return NotImplemented
# https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes
def __ne__(self, other):
"""Define a non-equality test"""
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
# https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes
def __hash__(self):
"""Override the default hash behavior (that returns the id or the object)"""
#return hash(tuple(sorted(self.__dict__.items())))
return hash((self.fromCoord, self.toCoord, self.throughCoord))
def __str__(self):
return "from %s to %s%s" % (self.fromCoord, self.toCoord, " through %s" % self.throughCoord if self.throughCoord is not None else "")
| 36.23913
| 139
| 0.678464
|
3db4d546d1c05f36687b755f020edcdbb855382b
| 3,740
|
py
|
Python
|
src/figures/model_comp.py
|
rodluger/Z-dependent-DWDs
|
d61cf2b883ed5ef204cd65aae42c2cb690189251
|
[
"MIT"
] | null | null | null |
src/figures/model_comp.py
|
rodluger/Z-dependent-DWDs
|
d61cf2b883ed5ef204cd65aae42c2cb690189251
|
[
"MIT"
] | null | null | null |
src/figures/model_comp.py
|
rodluger/Z-dependent-DWDs
|
d61cf2b883ed5ef204cd65aae42c2cb690189251
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import astropy.units as u
models = ["log_uniform", "qcflag_4", "alpha_0.25", "alpha_5"]
model_names = ["fiducial", r"q3", r"$\alpha25$", r"$\alpha5$"]
colors = sns.color_palette("mako", n_colors=len(models))
Tobs = 4 * u.yr
def func(x, a, b, c, d, e):
return a + b * x + c * x ** 2 + d * x ** 3 + e * x ** 4
mosaic = """
AA
AA
BB
"""
fig = plt.figure(figsize=(6, 8))
ax_dict = fig.subplot_mosaic(mosaic)
lisa_ratio = []
n_lisa_F50_list = []
popt_F50_list = []
popt_FZ_list = []
for m in models:
if m == "log_uniform":
n_lisa_F50 = pd.read_hdf("../data/numLISA_30bins_F50.hdf", key="data")
n_lisa_FZ = pd.read_hdf("../data/numLISA_30bins_FZ.hdf", key="data")
popt_F50 = pd.read_hdf("../data/resolved_DWDs_F50.hdf", key="conf_fit")
popt_FZ = pd.read_hdf("../data/resolved_DWDs_FZ.hdf", key="conf_fit")
else:
n_lisa_F50 = pd.read_hdf(
"../data/numLISA_30bins_F50_{}.hdf".format(m), key="data"
)
n_lisa_FZ = pd.read_hdf(
"../data/numLISA_30bins_FZ_{}.hdf".format(m), key="data"
)
popt_F50 = pd.read_hdf(
"../data/resolved_DWDs_F50_{}.hdf".format(m), key="conf_fit"
)
popt_FZ = pd.read_hdf(
"../data/resolved_DWDs_FZ_{}.hdf".format(m), key="conf_fit"
)
n_lisa_F50 = np.sum(n_lisa_F50.values.flatten())
n_lisa_FZ = np.sum(n_lisa_FZ.values.flatten())
lisa_ratio.append(n_lisa_FZ / n_lisa_F50)
n_lisa_F50_list.append(n_lisa_F50)
popt_F50 = popt_F50.values.flatten()
popt_FZ = popt_FZ.values.flatten()
popt_F50_list.append(popt_F50)
popt_FZ_list.append(popt_FZ)
for popt_F50, popt_FZ, ii in zip(popt_F50_list, popt_FZ_list, range(len(popt_FZ_list))):
conf_fit_FZ = (
10
** func(
x=np.log10(np.linspace(1e-4, 1e-1, 100000)),
a=popt_FZ[0],
b=popt_FZ[1],
c=popt_FZ[2],
d=popt_FZ[3],
e=popt_FZ[4],
)
* Tobs.to(u.s).value
)
conf_fit_F50 = (
10
** func(
x=np.log10(np.linspace(1e-4, 1e-1, 100000)),
a=popt_F50[0],
b=popt_F50[1],
c=popt_F50[2],
d=popt_F50[3],
e=popt_F50[4],
)
* Tobs.to(u.s).value
)
ax_dict["A"].plot(
np.linspace(1e-4, 1e-1, 100000),
conf_fit_F50,
color=colors[ii],
ls="--",
lw=2.5,
zorder=10 - ii,
)
ax_dict["A"].plot(
np.linspace(1e-4, 1e-1, 100000),
conf_fit_FZ,
color=colors[ii],
ls="-",
lw=2.5,
label=model_names[ii],
)
ax_dict["A"].set_xscale("log")
ax_dict["A"].set_yscale("log")
ax_dict["A"].set_ylabel(r"confusion fit [Hz$^{-1}$]", size=16)
ax_dict["A"].set_xlabel(r"f$_{\rm{GW}}$ [Hz]", size=16)
ax_dict["A"].set_xlim(1e-4, 3.5e-3)
ax_dict["A"].set_ylim(1e-38, 7e-35)
for ii in range(len(lisa_ratio)):
ax_dict["B"].scatter(
n_lisa_F50_list[ii],
lisa_ratio[ii],
color=colors[ii],
marker="s",
s=45,
label=model_names[ii],
)
ax_dict["A"].legend(prop={"size": 12}, frameon=False, loc="upper right")
ax_dict["B"].set_xscale("log")
ax_dict["B"].axhline(0.5, ls="--", color="silver", lw=2, zorder=0)
ax_dict["B"].set_ylim(0.2, 0.8)
ax_dict["B"].set_xlim(3e5, 1e8)
ax_dict["B"].set_ylabel(r"N$_{\rm{LISA, FZ}}$/N$_{\rm{LISA, F50}}$", size=16)
ax_dict["B"].set_xlabel(r"N$_{\rm{LISA, F50}}$", size=16)
ax_dict["A"].tick_params(labelsize=12)
ax_dict["B"].tick_params(labelsize=12)
plt.tight_layout()
plt.savefig("model_comp.pdf", dpi=100)
| 26.153846
| 88
| 0.573529
|
80fda888c355ffe93c1fce36666c28d58d065d90
| 5,813
|
py
|
Python
|
client/verta/verta/registry/_docker_image.py
|
vishalbelsare/modeldb
|
0c135828dae668c769a4c4a5e933d38a934a9d10
|
[
"Apache-2.0"
] | 835
|
2017-02-08T20:14:24.000Z
|
2020-03-12T17:37:49.000Z
|
client/verta/verta/registry/_docker_image.py
|
vishalbelsare/modeldb
|
0c135828dae668c769a4c4a5e933d38a934a9d10
|
[
"Apache-2.0"
] | 651
|
2019-04-18T12:55:07.000Z
|
2022-03-31T23:45:09.000Z
|
client/verta/verta/registry/_docker_image.py
|
vishalbelsare/modeldb
|
0c135828dae668c769a4c4a5e933d38a934a9d10
|
[
"Apache-2.0"
] | 170
|
2017-02-13T14:49:22.000Z
|
2020-02-19T17:59:12.000Z
|
# -*- coding: utf-8 -*-
from verta.external import six
from verta._protos.public.registry import RegistryService_pb2
from verta import environment
from verta._internal_utils import arg_handler
class DockerImage(object):
"""Docker image information.
For use around :meth:`RegisteredModelVersion.log_docker() <verta.registry.entities.RegisteredModelVersion.log_docker>`.
.. versionadded:: 0.20.0
Parameters
----------
port : int
Container port for access.
request_path : str
URL path for routing predictions.
health_path : str
URL path for container health checks.
repository : str
Image repository.
tag : str, optional
Image tag. Either this or `sha` must be provided.
sha : str, optional
Image ID. Either this or `tag` must be provided.
env_vars : list of str, or dict of str to str, optional
Environment variables. If a list of names is provided, the values will
be captured from the current environment. If not provided, nothing
will be captured.
Attributes
----------
port : int
Container port for access.
request_path : str
URL path for routing predictions.
health_path : str
URL path for container health checks.
repository : str
Image repository.
tag : str or None
Image tag.
sha : str or None
Image ID.
env_vars : dict of str to str, or None
Environment variables.
Examples
--------
.. code-block:: python
from verta.registry import DockerImage
docker_image = DockerImage(
port=5000,
request_path="/predict_json",
health_path="/health",
repository="012345678901.dkr.ecr.apne2-az1.amazonaws.com/models/example",
tag="example",
env_vars={"CUDA_VISIBLE_DEVICES": "0,1"},
)
"""
def __init__(
self,
port,
request_path,
health_path,
repository,
tag=None,
sha=None,
env_vars=None,
):
self._port = int(port)
self._request_path = arg_handler.ensure_starts_with_slash(request_path)
self._health_path = arg_handler.ensure_starts_with_slash(health_path)
self._docker_env = environment.Docker(
repository=repository,
tag=tag,
sha=sha,
env_vars=env_vars,
)
def __repr__(self):
lines = ["Docker Image"]
lines.extend(
[
"port: {}".format(self.port),
"request path: {}".format(self.request_path),
"health path: {}".format(self.health_path),
"repository: {}".format(self.repository),
]
)
# TODO: consolidate the following lines with Docker.__repr__()
if self.tag:
lines.append("tag: {}".format(six.ensure_str(self.tag)))
if self.sha:
lines.append("sha: {}".format(six.ensure_str(self.sha)))
if self.env_vars:
lines.append("environment variables:")
lines.extend(
sorted(
" {}={}".format(name, value)
for name, value in self.env_vars.items()
)
)
return "\n ".join(lines)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self._as_model_ver_proto() == other._as_model_ver_proto()
def __ne__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return not self.__eq__(other)
@property
def port(self):
return self._port
@property
def request_path(self):
return self._request_path
@property
def health_path(self):
return self._health_path
@property
def repository(self):
return self._docker_env.repository
@property
def tag(self):
return self._docker_env.tag
@property
def sha(self):
return self._docker_env.sha
@property
def env_vars(self):
return self._docker_env.env_vars
@classmethod
def _from_model_ver_proto(cls, model_ver_msg):
return cls(
port=model_ver_msg.docker_metadata.request_port,
request_path=model_ver_msg.docker_metadata.request_path,
health_path=model_ver_msg.docker_metadata.health_path,
repository=model_ver_msg.environment.docker.repository,
tag=model_ver_msg.environment.docker.tag,
sha=model_ver_msg.environment.docker.sha,
env_vars={
var.name: var.value
for var
in model_ver_msg.environment.environment_variables
},
)
def _as_model_ver_proto(self):
"""Return a protobuf representation of this Docker image information.
Returns
-------
RegistryService_pb2.ModelVersion
"""
return RegistryService_pb2.ModelVersion(
docker_metadata=RegistryService_pb2.DockerMetadata(
request_port=self._port,
request_path=self._request_path,
health_path=self._health_path,
),
environment=self._docker_env._as_env_proto(),
)
def _merge_into_model_ver_proto(self, model_ver_msg):
"""Set `model_ver_msg`'s ``docker_metadata`` and ``environment``.
Parameters
----------
model_ver_msg : RegistryService_pb2.ModelVersion
A model version's protobuf message.
"""
model_ver_msg.docker_metadata.Clear()
model_ver_msg.environment.Clear()
model_ver_msg.MergeFrom(self._as_model_ver_proto())
| 28.495098
| 123
| 0.599002
|
7184f30c003f21d72a9fb04be779995a0bab3f7d
| 330
|
py
|
Python
|
ao2j/lt1300/017/A.py
|
neshdev/competitive-prog
|
f406a85d62e83c3dbd3ad41f42ae121ebefd0fda
|
[
"MIT"
] | null | null | null |
ao2j/lt1300/017/A.py
|
neshdev/competitive-prog
|
f406a85d62e83c3dbd3ad41f42ae121ebefd0fda
|
[
"MIT"
] | null | null | null |
ao2j/lt1300/017/A.py
|
neshdev/competitive-prog
|
f406a85d62e83c3dbd3ad41f42ae121ebefd0fda
|
[
"MIT"
] | null | null | null |
n = int(input())
lefts = 0
rights = 0
for _ in range(n):
l,r = [int(x) for x in input().split()]
lefts += l
rights += r
def counts(doors, n):
closed = n - doors
opened = doors
if opened > closed:
return closed
else:
return opened
ans = counts(lefts, n) + counts(rights, n)
print(ans)
| 17.368421
| 43
| 0.560606
|
cd39a19e299fcd33b6add9c9f737f3ee0d0e974a
| 3,754
|
py
|
Python
|
services/app/src/tests/test_profile.py
|
chimailo/livia
|
82447871a2ad0dc5e964b6298140409b27b12a7b
|
[
"MIT"
] | null | null | null |
services/app/src/tests/test_profile.py
|
chimailo/livia
|
82447871a2ad0dc5e964b6298140409b27b12a7b
|
[
"MIT"
] | null | null | null |
services/app/src/tests/test_profile.py
|
chimailo/livia
|
82447871a2ad0dc5e964b6298140409b27b12a7b
|
[
"MIT"
] | null | null | null |
import json
import unittest
from src.tests.base import BaseTestCase
from src.tests.utils import create_token, add_user
class TestProfileBlueprint(BaseTestCase):
def test_check_username_does_not_exist(self):
token = create_token()
response = self.client.post(
'/api/profile/check-username',
headers={'Authorization': f'Bearer {token}'},
data=json.dumps({'username': 'user'}),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertTrue(data.get('res'))
def test_check_username_do_exist(self):
user = add_user(firstname='test', lastname='user6',
username='testuser6', email='testuser6@test.com')
token = create_token()
response = self.client.post(
'/api/profile/check-username',
data=json.dumps({'username': 'testuser6'}),
content_type='application/json',
headers={'Authorization': f'Bearer {token}'},
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertFalse(data.get('res'))
def test_get_profile(self):
token = create_token()
response = self.client.get(
'/api/profile',
headers={'Authorization': f'Bearer {token}'}
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertTrue(data.get('user') is not None)
self.assertTrue(data.get('profile') is not None)
def test_update_profile(self):
token = create_token()
response = self.client.put(
'/api/profile',
data=json.dumps({
'firstname': 'user',
'lastname': 'admin',
'username': 'useradmin',
'bio': 'I am the admin'
}),
content_type='application/json',
headers={'Authorization': f'Bearer {token}'}
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertIn('updated your profile', data.get('message'))
def test_update_profile_username_exists(self):
add_user(firstname='test', lastname='user2',
username='testuser2', email='testuser2@test.com')
token = create_token()
response = self.client.put(
'/api/profile',
headers={'Authorization': f'Bearer {token}'},
data=json.dumps({
'firstname': 'user',
'lastname': 'test',
'username': 'testuser2',
'bio': 'I am the admin'
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn("already taken", data.get('message'))
def test_update_profile_invalid_data(self):
token = create_token()
response = self.client.put(
'/api/profile',
headers={'Authorization': f'Bearer {token}'},
data=json.dumps({
'firstname': 'u',
'bio': 'I am the admin'
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 422)
self.assertFalse(data.get('error') is None)
def test_delete_profile(self):
token = create_token()
response = self.client.delete(
'/api/profile',
headers={'Authorization': f'Bearer {token}'}
)
self.assertEqual(response.status_code, 200)
| 36.096154
| 66
| 0.574054
|
3fcd9c717539e67e128f2b028eb390582231b755
| 5,453
|
py
|
Python
|
mcculw-master/examples/ui/ULTI01.py
|
Tridentflayer/structure_tester_project
|
0c67e450f3c1cd29dd9385ce407cc1407d9b9251
|
[
"MIT"
] | null | null | null |
mcculw-master/examples/ui/ULTI01.py
|
Tridentflayer/structure_tester_project
|
0c67e450f3c1cd29dd9385ce407cc1407d9b9251
|
[
"MIT"
] | null | null | null |
mcculw-master/examples/ui/ULTI01.py
|
Tridentflayer/structure_tester_project
|
0c67e450f3c1cd29dd9385ce407cc1407d9b9251
|
[
"MIT"
] | null | null | null |
"""
File: ULTI01.py
Library Call Demonstrated: mcculw.ul.t_in()
Purpose: Reads a temperature input channel.
Demonstration: Displays the temperature input.
Special Requirements: Unless the board at BoardNum(=0) does not use
EXP boards for temperature measurements(the
CIO-DAS-TC or USB-2001-TC for example), it must
have an A/D converter with an attached EXP
board. Thermocouples must be wired to EXP
channels selected.
"""
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
import tkinter as tk
from mcculw import ul
from mcculw.enums import TempScale
from mcculw.ul import ULError
from mcculw.device_info import DaqDeviceInfo
try:
from ui_examples_util import UIExample, show_ul_error
except ImportError:
from .ui_examples_util import UIExample, show_ul_error
class ULTI01(UIExample):
def __init__(self, master):
super(ULTI01, self).__init__(master)
# By default, the example detects all available devices and selects the
# first device listed.
# If use_device_detection is set to False, the board_num property needs
# to match the desired board number configured with Instacal.
use_device_detection = True
self.board_num = 0
self.running = False
try:
if use_device_detection:
self.configure_first_detected_device()
self.device_info = DaqDeviceInfo(self.board_num)
self.ai_info = self.device_info.get_ai_info()
if self.ai_info.temp_supported:
self.create_widgets()
else:
self.create_unsupported_widgets()
except ULError:
self.create_unsupported_widgets(True)
def update_value(self):
channel = self.get_channel_num()
try:
# Get a value from the device
value = ul.t_in(self.board_num, channel, TempScale.CELSIUS)
# Display the raw value
self.value_label["text"] = '{:.3f}'.format(value)
# Call this method again until the stop button is pressed (or an
# error occurs)
if self.running:
self.after(100, self.update_value)
except ULError as e:
self.stop()
show_ul_error(e)
def stop(self):
self.running = False
self.start_button["command"] = self.start
self.start_button["text"] = "Start"
def start(self):
self.running = True
self.start_button["command"] = self.stop
self.start_button["text"] = "Stop"
self.update_value()
def get_channel_num(self):
if self.ai_info.num_temp_chans == 1:
return 0
try:
return int(self.channel_entry.get())
except ValueError:
return 0
def validate_channel_entry(self, p):
if p == '':
return True
try:
value = int(p)
if value < 0 or value > self.ai_info.num_temp_chans - 1:
return False
except ValueError:
return False
return True
def create_widgets(self):
'''Create the tkinter UI'''
self.device_label = tk.Label(self)
self.device_label.pack(fill=tk.NONE, anchor=tk.NW)
self.device_label["text"] = ('Board Number ' + str(self.board_num)
+ ": " + self.device_info.product_name
+ " (" + self.device_info.unique_id + ")")
main_frame = tk.Frame(self)
main_frame.pack(fill=tk.X, anchor=tk.NW)
channel_vcmd = self.register(self.validate_channel_entry)
curr_row = 0
if self.ai_info.num_temp_chans > 1:
channel_entry_label = tk.Label(main_frame)
channel_entry_label["text"] = "Channel Number:"
channel_entry_label.grid(
row=curr_row, column=0, sticky=tk.W)
self.channel_entry = tk.Spinbox(
main_frame, from_=0,
to=max(self.ai_info.num_temp_chans - 1, 0),
validate='key', validatecommand=(channel_vcmd, '%P'))
self.channel_entry.grid(row=curr_row, column=1, sticky=tk.W)
curr_row += 1
value_left_label = tk.Label(main_frame)
value_left_label["text"] = (
"Value read from selected channel (\N{DEGREE SIGN}C):")
value_left_label.grid(row=curr_row, column=0, sticky=tk.W)
self.value_label = tk.Label(main_frame)
self.value_label.grid(row=curr_row, column=1, sticky=tk.W)
button_frame = tk.Frame(self)
button_frame.pack(fill=tk.X, side=tk.RIGHT, anchor=tk.SE)
self.start_button = tk.Button(button_frame)
self.start_button["text"] = "Start"
self.start_button["command"] = self.start
self.start_button.grid(row=0, column=0, padx=3, pady=3)
quit_button = tk.Button(button_frame)
quit_button["text"] = "Quit"
quit_button["command"] = self.master.destroy
quit_button.grid(row=0, column=1, padx=3, pady=3)
# Start the example if this module is being run
if __name__ == "__main__":
# Start the example
ULTI01(master=tk.Tk()).mainloop()
| 33.869565
| 79
| 0.597286
|
56cf8b2ff9a6bf7dcda033c97ae2c3a444a91cf5
| 43,568
|
py
|
Python
|
nnunet/network_architecture/custom_modules/error.py
|
PingjiaZhang/nnUNet
|
f07bd13556f59ef3689fc53f5294de78764d3ae9
|
[
"Apache-2.0"
] | null | null | null |
nnunet/network_architecture/custom_modules/error.py
|
PingjiaZhang/nnUNet
|
f07bd13556f59ef3689fc53f5294de78764d3ae9
|
[
"Apache-2.0"
] | null | null | null |
nnunet/network_architecture/custom_modules/error.py
|
PingjiaZhang/nnUNet
|
f07bd13556f59ef3689fc53f5294de78764d3ae9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from batchgenerators.augmentations.utils import pad_nd_image
from nnunet.utilities.random_stuff import no_op
from nnunet.utilities.to_torch import to_cuda, maybe_to_torch
from torch import nn
import torch
from scipy.ndimage.filters import gaussian_filter
from typing import Union, Tuple, List
from torch.cuda.amp import autocast
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
def get_device(self):
if next(self.parameters()).device == "cpu":
return "cpu"
else:
return next(self.parameters()).device.index
def set_device(self, device):
if device == "cpu":
self.cpu()
else:
self.cuda(device)
def forward(self, x):
raise NotImplementedError
class SegmentationNetwork(NeuralNetwork):
def __init__(self):
super(NeuralNetwork, self).__init__()
# if we have 5 pooling then our patch size must be divisible by 2**5
self.input_shape_must_be_divisible_by = None # for example in a 2d network that does 5 pool in x and 6 pool
# in y this would be (32, 64)
# we need to know this because we need to know if we are a 2d or a 3d netowrk
self.conv_op = None # nn.Conv2d or nn.Conv3d
# this tells us how many channely we have in the output. Important for preallocation in inference
self.num_classes = None # number of channels in the output
# depending on the loss, we do not hard code a nonlinearity into the architecture. To aggregate predictions
# during inference, we need to apply the nonlinearity, however. So it is important to let the newtork know what
# to apply in inference. For the most part this will be softmax
self.inference_apply_nonlin = lambda x: x # softmax_helper
# This is for saving a gaussian importance map for inference. It weights voxels higher that are closer to the
# center. Prediction at the borders are often less accurate and are thus downweighted. Creating these Gaussians
# can be expensive, so it makes sense to save and reuse them.
self._gaussian_3d = self._patch_size_for_gaussian_3d = None
self._gaussian_2d = self._patch_size_for_gaussian_2d = None
def predict_3D(self, x: np.ndarray, do_mirroring: bool, mirror_axes: Tuple[int, ...] = (0, 1, 2),
use_sliding_window: bool = False,
step_size: float = 0.5, patch_size: Tuple[int, ...] = None, regions_class_order: Tuple[int, ...] = None,
use_gaussian: bool = False, pad_border_mode: str = "constant",
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
Use this function to predict a 3D image. It does not matter whether the network is a 2D or 3D U-Net, it will
detect that automatically and run the appropriate code.
When running predictions, you need to specify whether you want to run fully convolutional of sliding window
based inference. We very strongly recommend you use sliding window with the default settings.
It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If
the network is not in eval mode it will print a warning.
:param x: Your input data. Must be a nd.ndarray of shape (c, x, y, z).
:param do_mirroring: If True, use test time data augmentation in the form of mirroring
:param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three
axes
:param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default
:param step_size: When running sliding window prediction, the step size determines the distance between adjacent
predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given
as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between
predictions. step_size cannot be larger than 1!
:param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,
this will either crash or give potentially less accurate segmentations
:param regions_class_order: Fabian only
:param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting
to weigh predictions closer to the center of the current patch higher than those at the borders. The reason
behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True
:param pad_border_mode: leave this alone
:param pad_kwargs: leave this alone
:param all_in_gpu: experimental. You probably want to leave this as is it
:param verbose: Do you want a wall of text? If yes then set this to True
:param mixed_precision: if True, will run inference in mixed precision with autocast()
:return:
"""
torch.cuda.empty_cache()
assert step_size <= 1, 'step_size must be smaller than 1. Otherwise there will be a gap between consecutive ' \
'predictions'
if verbose: print("debug: mirroring", do_mirroring, "mirror_axes", mirror_axes)
assert self.get_device() != "cpu", "CPU not implemented"
if pad_kwargs is None:
pad_kwargs = {'constant_values': 0}
# A very long time ago the mirror axes were (2, 3, 4) for a 3d network. This is just to intercept any old
# code that uses this convention
if len(mirror_axes):
if self.conv_op == nn.Conv2d:
if max(mirror_axes) > 1:
raise ValueError("mirror axes. duh")
if self.conv_op == nn.Conv3d:
if max(mirror_axes) > 2:
raise ValueError("mirror axes. duh")
if self.training:
print('WARNING! Network is in train mode during inference. This may be intended, or not...')
assert len(x.shape) == 4, "data must have shape (c,x,y,z)"
if mixed_precision:
context = autocast
else:
context = no_op
with context():
with torch.no_grad():
if self.conv_op == nn.Conv3d:
if use_sliding_window:
res = self._internal_predict_3D_3Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu,
verbose=verbose)
else:
res = self._internal_predict_3D_3Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs=pad_kwargs, verbose=verbose)
elif self.conv_op == nn.Conv2d:
if use_sliding_window:
res = self._internal_predict_3D_2Dconv_tiled(x, patch_size, do_mirroring, mirror_axes, step_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs, all_in_gpu, False)
else:
res = self._internal_predict_3D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs, all_in_gpu, False)
else:
raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
return res
def predict_2D(self, x, do_mirroring: bool, mirror_axes: tuple = (0, 1, 2), use_sliding_window: bool = False,
step_size: float = 0.5, patch_size: tuple = None, regions_class_order: tuple = None,
use_gaussian: bool = False, pad_border_mode: str = "constant",
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
Use this function to predict a 2D image. If this is a 3D U-Net it will crash because you cannot predict a 2D
image with that (you dummy).
When running predictions, you need to specify whether you want to run fully convolutional of sliding window
based inference. We very strongly recommend you use sliding window with the default settings.
It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If
the network is not in eval mode it will print a warning.
:param x: Your input data. Must be a nd.ndarray of shape (c, x, y).
:param do_mirroring: If True, use test time data augmentation in the form of mirroring
:param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three
axes
:param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default
:param step_size: When running sliding window prediction, the step size determines the distance between adjacent
predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given
as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between
predictions. step_size cannot be larger than 1!
:param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,
this will either crash or give potentially less accurate segmentations
:param regions_class_order: Fabian only
:param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting
to weigh predictions closer to the center of the current patch higher than those at the borders. The reason
behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True
:param pad_border_mode: leave this alone
:param pad_kwargs: leave this alone
:param all_in_gpu: experimental. You probably want to leave this as is it
:param verbose: Do you want a wall of text? If yes then set this to True
:return:
"""
torch.cuda.empty_cache()
assert step_size <= 1, 'step_size must be smaler than 1. Otherwise there will be a gap between consecutive ' \
'predictions'
if self.conv_op == nn.Conv3d:
raise RuntimeError("Cannot predict 2d if the network is 3d. Dummy.")
if verbose: print("debug: mirroring", do_mirroring, "mirror_axes", mirror_axes)
assert self.get_device() != "cpu", "CPU not implemented"
if pad_kwargs is None:
pad_kwargs = {'constant_values': 0}
# A very long time ago the mirror axes were (2, 3) for a 2d network. This is just to intercept any old
# code that uses this convention
if len(mirror_axes):
if max(mirror_axes) > 1:
raise ValueError("mirror axes. duh")
if self.training:
print('WARNING! Network is in train mode during inference. This may be intended, or not...')
assert len(x.shape) == 3, "data must have shape (c,x,y)"
if mixed_precision:
context = autocast
else:
context = no_op
with context():
with torch.no_grad():
if self.conv_op == nn.Conv2d:
if use_sliding_window:
res = self._internal_predict_2D_2Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs, all_in_gpu, verbose)
else:
res = self._internal_predict_2D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs, verbose)
else:
raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
return res
@staticmethod
def _get_gaussian(patch_size, sigma_scale=1. / 8) -> np.ndarray:
tmp = np.zeros(patch_size)
center_coords = [i // 2 for i in patch_size]
sigmas = [i * sigma_scale for i in patch_size]
tmp[tuple(center_coords)] = 1
gaussian_importance_map = gaussian_filter(tmp, sigmas, 0, mode='constant', cval=0)
gaussian_importance_map = gaussian_importance_map / np.max(gaussian_importance_map) * 1
gaussian_importance_map = gaussian_importance_map.astype(np.float32)
# gaussian_importance_map cannot be 0, otherwise we may end up with nans!
gaussian_importance_map[gaussian_importance_map == 0] = np.min(
gaussian_importance_map[gaussian_importance_map != 0])
return gaussian_importance_map
@staticmethod
def _compute_steps_for_sliding_window(patch_size: Tuple[int, ...], image_size: Tuple[int, ...], step_size: float) -> List[List[int]]:
assert [i >= j for i, j in zip(image_size, patch_size)], "image size must be as large or larger than patch_size"
assert 0 < step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1'
# our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of
# 110, patch size of 32 and step_size of 0.5, then we want to make 4 steps starting at coordinate 0, 27, 55, 78
target_step_sizes_in_voxels = [i * step_size for i in patch_size]
num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j, k in zip(image_size, target_step_sizes_in_voxels, patch_size)]
steps = []
for dim in range(len(patch_size)):
# the highest step value for this dimension is
max_step_value = image_size[dim] - patch_size[dim]
if num_steps[dim] > 1:
actual_step_size = max_step_value / (num_steps[dim] - 1)
else:
actual_step_size = 99999999999 # does not matter because there is only one step at 0
steps_here = [int(np.round(actual_step_size * i)) for i in range(num_steps[dim])]
steps.append(steps_here)
return steps
def _internal_predict_3D_3Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,
patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,
pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,
verbose: bool) -> Tuple[np.ndarray, np.ndarray]:
# better safe than sorry
assert len(x.shape) == 4, "x must be (c, x, y, z)"
assert self.get_device() != "cpu"
if verbose: print("step_size:", step_size)
if verbose: print("do mirror:", do_mirroring)
assert patch_size is not None, "patch_size cannot be None for tiled prediction"
# for sliding window inference the image must at least be as large as the patch size. It does not matter
# whether the shape is divisible by 2**num_pool as long as the patch size is
data, slicer = pad_nd_image(x, patch_size, pad_border_mode, pad_kwargs, True, None)
data_shape = data.shape # still c, x, y, z
# compute the steps for sliding window
steps = self._compute_steps_for_sliding_window(patch_size, data_shape[1:], step_size)
num_tiles = len(steps[0]) * len(steps[1]) * len(steps[2])
if verbose:
print("data shape:", data_shape)
print("patch size:", patch_size)
print("steps (x, y, and z):", steps)
print("number of tiles:", num_tiles)
# we only need to compute that once. It can take a while to compute this due to the large sigma in
# gaussian_filter
if use_gaussian and num_tiles > 1:
if self._gaussian_3d is None or not all(
[i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_3d)]):
if verbose: print('computing Gaussian')
gaussian_importance_map = self._get_gaussian(patch_size, sigma_scale=1. / 8)
self._gaussian_3d = gaussian_importance_map
self._patch_size_for_gaussian_3d = patch_size
else:
if verbose: print("using precomputed Gaussian")
gaussian_importance_map = self._gaussian_3d
gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),
non_blocking=True)
else:
gaussian_importance_map = None
if all_in_gpu:
# If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces
# CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU
if use_gaussian and num_tiles > 1:
# half precision for the outputs should be good enough. If the outputs here are half, the
# gaussian_importance_map should be as well
gaussian_importance_map = gaussian_importance_map.half()
# make sure we did not round anything to 0
gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[
gaussian_importance_map != 0].min()
add_for_nb_of_preds = gaussian_importance_map
else:
add_for_nb_of_preds = torch.ones(data.shape[1:], device=self.get_device())
if verbose: print("initializing result array (on GPU)")
aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
if verbose: print("moving data to GPU")
data = torch.from_numpy(data).cuda(self.get_device(), non_blocking=True)
if verbose: print("initializing result_numsamples (on GPU)")
aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
else:
if use_gaussian and num_tiles > 1:
add_for_nb_of_preds = self._gaussian_3d
else:
add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)
aggregated_results = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32)
aggregated_nb_of_predictions = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32)
for x in steps[0]:
lb_x = x
ub_x = x + patch_size[0]
for y in steps[1]:
lb_y = y
ub_y = y + patch_size[1]
for z in steps[2]:
lb_z = z
ub_z = z + patch_size[2]
predicted_patch = self._internal_maybe_mirror_and_pred_3D(
data[None, :, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z], mirror_axes, do_mirroring,
gaussian_importance_map)[0]
if all_in_gpu:
predicted_patch = predicted_patch.half()
else:
predicted_patch = predicted_patch.cpu().numpy()
aggregated_results[:, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z] += predicted_patch
aggregated_nb_of_predictions[:, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z] += add_for_nb_of_preds
# we reverse the padding here (remeber that we padded the input to be at least as large as the patch size
slicer = tuple(
[slice(0, aggregated_results.shape[i]) for i in
range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])
aggregated_results = aggregated_results[slicer]
aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]
# computing the class_probabilities by dividing the aggregated result with result_numsamples
class_probabilities = aggregated_results / aggregated_nb_of_predictions
if regions_class_order is None:
predicted_segmentation = class_probabilities.argmax(0)
else:
if all_in_gpu:
class_probabilities_here = class_probabilities.detach().cpu().numpy()
else:
class_probabilities_here = class_probabilities
predicted_segmentation = np.zeros(class_probabilities_here.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[class_probabilities_here[i] > 0.5] = c
if all_in_gpu:
if verbose: print("copying results to CPU")
if regions_class_order is None:
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
class_probabilities = class_probabilities.detach().cpu().numpy()
if verbose: print("prediction done")
return predicted_segmentation, class_probabilities
def _internal_predict_2D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
This one does fully convolutional inference. No sliding window
"""
assert len(x.shape) == 3, "x must be (c, x, y)"
assert self.get_device() != "cpu"
assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \
'run _internal_predict_2D_2Dconv'
if verbose: print("do mirror:", do_mirroring)
data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,
self.input_shape_must_be_divisible_by)
predicted_probabilities = self._internal_maybe_mirror_and_pred_2D(data[None], mirror_axes, do_mirroring,
None)[0]
slicer = tuple(
[slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -
(len(slicer) - 1))] + slicer[1:])
predicted_probabilities = predicted_probabilities[slicer]
if regions_class_order is None:
predicted_segmentation = predicted_probabilities.argmax(0)
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
else:
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
predicted_segmentation = np.zeros(predicted_probabilities.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[predicted_probabilities[i] > 0.5] = c
return predicted_segmentation, predicted_probabilities
def _internal_predict_3D_3Dconv(self, x: np.ndarray, min_size: Tuple[int, ...], do_mirroring: bool,
mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
This one does fully convolutional inference. No sliding window
"""
assert len(x.shape) == 4, "x must be (c, x, y, z)"
assert self.get_device() != "cpu"
assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \
'run _internal_predict_3D_3Dconv'
if verbose: print("do mirror:", do_mirroring)
data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,
self.input_shape_must_be_divisible_by)
predicted_probabilities = self._internal_maybe_mirror_and_pred_3D(data[None], mirror_axes, do_mirroring,
None)[0]
slicer = tuple(
[slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -
(len(slicer) - 1))] + slicer[1:])
predicted_probabilities = predicted_probabilities[slicer]
if regions_class_order is None:
predicted_segmentation = predicted_probabilities.argmax(0)
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
else:
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
predicted_segmentation = np.zeros(predicted_probabilities.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[predicted_probabilities[i] > 0.5] = c
return predicted_segmentation, predicted_probabilities
def _internal_maybe_mirror_and_pred_3D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,
do_mirroring: bool = True,
mult: np.ndarray or torch.tensor = None) -> torch.tensor:
assert len(x.shape) == 5, 'x must be (b, c, x, y, z)'
# everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here
# we now return a cuda tensor! Not numpy array!
x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())
result_torch = torch.zeros([1, self.num_classes] + list(x.shape[2:]),
dtype=torch.float).cuda(self.get_device(), non_blocking=True)
if mult is not None:
mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())
if do_mirroring:
mirror_idx = 8
num_results = 2 ** len(mirror_axes)
else:
mirror_idx = 1
num_results = 1
for m in range(mirror_idx):
if m == 0:
pred = self(x)
result_torch += 1 / num_results * pred
if m == 1 and (2 in mirror_axes):
pred = self(torch.flip(x, (4, )))
result_torch += 1 / num_results * torch.flip(pred, (4,))
if m == 2 and (1 in mirror_axes):
pred = self(torch.flip(x, (3, )))
result_torch += 1 / num_results * torch.flip(pred, (3,))
if m == 3 and (2 in mirror_axes) and (1 in mirror_axes):
pred = self(torch.flip(x, (4, 3)))
result_torch += 1 / num_results * torch.flip(pred, (4, 3))
if m == 4 and (0 in mirror_axes):
pred = self(torch.flip(x, (2, )))
result_torch += 1 / num_results * torch.flip(pred, (2,))
if m == 5 and (0 in mirror_axes) and (2 in mirror_axes):
pred = self(torch.flip(x, (4, 2)))
result_torch += 1 / num_results * torch.flip(pred, (4, 2))
if m == 6 and (0 in mirror_axes) and (1 in mirror_axes):
pred =self(torch.flip(x, (3, 2)))
result_torch += 1 / num_results * torch.flip(pred, (3, 2))
if m == 7 and (0 in mirror_axes) and (1 in mirror_axes) and (2 in mirror_axes):
pred = self(torch.flip(x, (4, 3, 2)))
result_torch += 1 / num_results * torch.flip(pred, (4, 3, 2))
if mult is not None:
result_torch[:, :] *= mult
return result_torch
def _internal_maybe_mirror_and_pred_2D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,
do_mirroring: bool = True,
mult: np.ndarray or torch.tensor = None) -> torch.tensor:
# everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here
# we now return a cuda tensor! Not numpy array!
assert len(x.shape) == 4, 'x must be (b, c, x, y)'
x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())
result_torch = torch.zeros([x.shape[0], self.num_classes] + list(x.shape[2:]),
dtype=torch.float).cuda(self.get_device(), non_blocking=True)
if mult is not None:
mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())
if do_mirroring:
mirror_idx = 4
num_results = 2 ** len(mirror_axes)
else:
mirror_idx = 1
num_results = 1
for m in range(mirror_idx):
if m == 0:
pred = self.inference_apply_nonlin(self(x))
result_torch += 1 / num_results * pred
if m == 1 and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))
result_torch += 1 / num_results * torch.flip(pred, (3, ))
if m == 2 and (0 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))
result_torch += 1 / num_results * torch.flip(pred, (2, ))
if m == 3 and (0 in mirror_axes) and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))
result_torch += 1 / num_results * torch.flip(pred, (3, 2))
if mult is not None:
result_torch[:, :] *= mult
return result_torch
def _internal_predict_2D_2Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,
patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,
pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,
verbose: bool) -> Tuple[np.ndarray, np.ndarray]:
# better safe than sorry
assert len(x.shape) == 3, "x must be (c, x, y)"
assert self.get_device() != "cpu"
if verbose: print("step_size:", step_size)
if verbose: print("do mirror:", do_mirroring)
assert patch_size is not None, "patch_size cannot be None for tiled prediction"
# for sliding window inference the image must at least be as large as the patch size. It does not matter
# whether the shape is divisible by 2**num_pool as long as the patch size is
data, slicer = pad_nd_image(x, patch_size, pad_border_mode, pad_kwargs, True, None)
data_shape = data.shape # still c, x, y
# compute the steps for sliding window
steps = self._compute_steps_for_sliding_window(patch_size, data_shape[1:], step_size)
num_tiles = len(steps[0]) * len(steps[1])
if verbose:
print("data shape:", data_shape)
print("patch size:", patch_size)
print("steps (x, y, and z):", steps)
print("number of tiles:", num_tiles)
# we only need to compute that once. It can take a while to compute this due to the large sigma in
# gaussian_filter
if use_gaussian and num_tiles > 1:
if self._gaussian_2d is None or not all(
[i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_2d)]):
if verbose: print('computing Gaussian')
gaussian_importance_map = self._get_gaussian(patch_size, sigma_scale=1. / 8)
self._gaussian_2d = gaussian_importance_map
self._patch_size_for_gaussian_2d = patch_size
else:
if verbose: print("using precomputed Gaussian")
gaussian_importance_map = self._gaussian_2d
gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),
non_blocking=True)
else:
gaussian_importance_map = None
if all_in_gpu:
# If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces
# CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU
if use_gaussian and num_tiles > 1:
# half precision for the outputs should be good enough. If the outputs here are half, the
# gaussian_importance_map should be as well
gaussian_importance_map = gaussian_importance_map.half()
# make sure we did not round anything to 0
gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[
gaussian_importance_map != 0].min()
add_for_nb_of_preds = gaussian_importance_map
else:
add_for_nb_of_preds = torch.ones(data.shape[1:], device=self.get_device())
if verbose: print("initializing result array (on GPU)")
aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
if verbose: print("moving data to GPU")
data = torch.from_numpy(data).cuda(self.get_device(), non_blocking=True)
if verbose: print("initializing result_numsamples (on GPU)")
aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
else:
if use_gaussian and num_tiles > 1:
add_for_nb_of_preds = self._gaussian_2d
else:
add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)
aggregated_results = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32)
aggregated_nb_of_predictions = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32)
for x in steps[0]:
lb_x = x
ub_x = x + patch_size[0]
for y in steps[1]:
lb_y = y
ub_y = y + patch_size[1]
predicted_patch = self._internal_maybe_mirror_and_pred_2D(
data[None, :, lb_x:ub_x, lb_y:ub_y], mirror_axes, do_mirroring,
gaussian_importance_map)[0]
if all_in_gpu:
predicted_patch = predicted_patch.half()
else:
predicted_patch = predicted_patch.cpu().numpy()
aggregated_results[:, lb_x:ub_x, lb_y:ub_y] += predicted_patch
aggregated_nb_of_predictions[:, lb_x:ub_x, lb_y:ub_y] += add_for_nb_of_preds
# we reverse the padding here (remeber that we padded the input to be at least as large as the patch size
slicer = tuple(
[slice(0, aggregated_results.shape[i]) for i in
range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])
aggregated_results = aggregated_results[slicer]
aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]
# computing the class_probabilities by dividing the aggregated result with result_numsamples
class_probabilities = aggregated_results / aggregated_nb_of_predictions
if regions_class_order is None:
predicted_segmentation = class_probabilities.argmax(0)
else:
if all_in_gpu:
class_probabilities_here = class_probabilities.detach().cpu().numpy()
else:
class_probabilities_here = class_probabilities
predicted_segmentation = np.zeros(class_probabilities_here.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[class_probabilities_here[i] > 0.5] = c
if all_in_gpu:
if verbose: print("copying results to CPU")
if regions_class_order is None:
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
class_probabilities = class_probabilities.detach().cpu().numpy()
if verbose: print("prediction done")
return predicted_segmentation, class_probabilities
def _internal_predict_3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
all_in_gpu: bool = False, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
predicted_segmentation = []
softmax_pred = []
for s in range(x.shape[1]):
pred_seg, softmax_pres = self._internal_predict_2D_2Dconv(
x[:, s], min_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
def predict_3D_pseudo3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,
pseudo3D_slices: int = 5, all_in_gpu: bool = False,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
assert pseudo3D_slices % 2 == 1, "pseudo3D_slices must be odd"
extra_slices = (pseudo3D_slices - 1) // 2
shp_for_pad = np.array(x.shape)
shp_for_pad[1] = extra_slices
pad = np.zeros(shp_for_pad, dtype=np.float32)
data = np.concatenate((pad, x, pad), 1)
predicted_segmentation = []
softmax_pred = []
for s in range(extra_slices, data.shape[1] - extra_slices):
d = data[:, (s - extra_slices):(s + extra_slices + 1)]
d = d.reshape((-1, d.shape[-2], d.shape[-1]))
pred_seg, softmax_pres = \
self._internal_predict_2D_2Dconv(d, min_size, do_mirroring, mirror_axes,
regions_class_order, pad_border_mode, pad_kwargs, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
def _internal_predict_3D_2Dconv_tiled(self, x: np.ndarray, patch_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), step_size: float = 0.5,
regions_class_order: tuple = None, use_gaussian: bool = False,
pad_border_mode: str = "edge", pad_kwargs: dict =None,
all_in_gpu: bool = False,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
predicted_segmentation = []
softmax_pred = []
for s in range(x.shape[1]):
pred_seg, softmax_pres = self._internal_predict_2D_2Dconv_tiled(
x[:, s], step_size, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs, all_in_gpu, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
if __name__ == '__main__':
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 0.5))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 0.1))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (60, 448, 224), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (60, 448, 224), 0.5))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (30, 224, 224), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (30, 224, 224), 0.125))
print(SegmentationNetwork._compute_steps_for_sliding_window((123, 54, 123), (246, 162, 369), 0.25))
| 52.681983
| 137
| 0.606959
|
32f3a24a5216cc73af31b49dd39f24450f7e9bd4
| 978
|
py
|
Python
|
packages/kdpart/package.py
|
hirschsn/spack-hirschsn
|
fbcb4ca03a3e3bca4006de4dcb6462edfef7bce3
|
[
"MIT"
] | null | null | null |
packages/kdpart/package.py
|
hirschsn/spack-hirschsn
|
fbcb4ca03a3e3bca4006de4dcb6462edfef7bce3
|
[
"MIT"
] | null | null | null |
packages/kdpart/package.py
|
hirschsn/spack-hirschsn
|
fbcb4ca03a3e3bca4006de4dcb6462edfef7bce3
|
[
"MIT"
] | null | null | null |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Kdpart(MakefilePackage):
"""Simple struct-of-arrays implementation of a k-d tree over a discrete
domain for partitioning regular grids amongst a set of processes."""
homepage = "https://github.com/hirschsn/kdpart"
url = "https://github.com/hirschsn/kdpart/archive/v1.0.1.tar.gz"
version('1.0.1', sha256='01997e73d12af149285d3a8683d8d53eb6da053b52be1be2c7c1fec8e3c68fd0')
depends_on('mpi')
def edit(self, spec, prefix):
makefile = FileFilter('Makefile')
makefile.filter('CXX = .*', 'CXX = ' + spec['mpi'].mpicxx)
@property
def build_targets(self):
return ["libkdpart.so"]
@property
def install_targets(self):
return ["install", "PREFIX={}".format(self.prefix)]
| 30.5625
| 95
| 0.687117
|
04b746fec8b36475dc42da2437346164e10c9421
| 420
|
py
|
Python
|
2014/12/fc_2014_12_25.py
|
mfwarren/FreeCoding
|
58ac87f35ad2004a3514782556762ee0ed72c39a
|
[
"MIT"
] | null | null | null |
2014/12/fc_2014_12_25.py
|
mfwarren/FreeCoding
|
58ac87f35ad2004a3514782556762ee0ed72c39a
|
[
"MIT"
] | 1
|
2015-04-27T01:43:45.000Z
|
2015-04-27T01:43:45.000Z
|
2014/12/fc_2014_12_25.py
|
mfwarren/FreeCoding
|
58ac87f35ad2004a3514782556762ee0ed72c39a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# imports go here
from flask import Flask, render_template
from flask.ext.script import Manager
from flask.ext.bootstrap import Bootstrap
#
# Free Coding session for 2014-12-25
# Written by Matt Warren
#
app = Flask(__name__)
manager = Manager(app)
bootstrap = Bootstrap(app)
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
manager.run()
| 17.5
| 41
| 0.728571
|
76cec6f05bc17b958cfbc73e7480e6cc1b87e9e1
| 14,306
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/keyvault/_validators.py
|
changlong-liu/azure-cli
|
c4e796bbe7db0cef4ac34df8413067c2c0c98324
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/keyvault/_validators.py
|
changlong-liu/azure-cli
|
c4e796bbe7db0cef4ac34df8413067c2c0c98324
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/keyvault/_validators.py
|
changlong-liu/azure-cli
|
c4e796bbe7db0cef4ac34df8413067c2c0c98324
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import argparse
import base64
import binascii
from datetime import datetime
import re
from knack.util import CLIError
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.validators import validate_tags
secret_text_encoding_values = ['utf-8', 'utf-16le', 'utf-16be', 'ascii']
secret_binary_encoding_values = ['base64', 'hex']
def _extract_version(item_id):
return item_id.split('/')[-1]
def _get_resource_group_from_vault_name(cli_ctx, vault_name):
"""
Fetch resource group from vault name
:param str vault_name: name of the key vault
:return: resource group name or None
:rtype: str
"""
from azure.cli.core.profiles import ResourceType
from msrestazure.tools import parse_resource_id
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_KEYVAULT).vaults
for vault in client.list():
id_comps = parse_resource_id(vault.id)
if 'name' in id_comps and id_comps['name'].lower() == vault_name.lower():
return id_comps['resource_group']
return None
# COMMAND NAMESPACE VALIDATORS
def process_certificate_cancel_namespace(namespace):
namespace.cancellation_requested = True
def process_secret_set_namespace(cmd, namespace):
validate_tags(namespace)
content = namespace.value
file_path = namespace.file_path
encoding = namespace.encoding
tags = namespace.tags or {}
use_error = CLIError("incorrect usage: [Required] --value VALUE | --file PATH")
if (content and file_path) or (not content and not file_path):
raise use_error
from azure.cli.core.profiles import ResourceType
SecretAttributes = cmd.get_models('SecretAttributes', resource_type=ResourceType.DATA_KEYVAULT)
namespace.secret_attributes = SecretAttributes()
if namespace.expires:
namespace.secret_attributes.expires = namespace.expires
if namespace.disabled:
namespace.secret_attributes.enabled = not namespace.disabled
if namespace.not_before:
namespace.secret_attributes.not_before = namespace.not_before
encoding = encoding or 'utf-8'
if file_path:
if encoding in secret_text_encoding_values:
with open(file_path, 'r') as f:
try:
content = f.read()
except UnicodeDecodeError:
raise CLIError("Unable to decode file '{}' with '{}' encoding.".format(
file_path, encoding))
encoded_str = content
encoded = content.encode(encoding)
decoded = encoded.decode(encoding)
elif encoding == 'base64':
with open(file_path, 'rb') as f:
content = f.read()
try:
encoded = base64.encodebytes(content)
except AttributeError:
encoded = base64.encodestring(content) # pylint: disable=deprecated-method
encoded_str = encoded.decode('utf-8')
decoded = base64.b64decode(encoded_str)
elif encoding == 'hex':
with open(file_path, 'rb') as f:
content = f.read()
encoded = binascii.b2a_hex(content)
encoded_str = encoded.decode('utf-8')
decoded = binascii.unhexlify(encoded_str)
if content != decoded:
raise CLIError("invalid encoding '{}'".format(encoding))
content = encoded_str
tags.update({'file-encoding': encoding})
namespace.tags = tags
namespace.value = content
# PARAMETER NAMESPACE VALIDATORS
def get_attribute_validator(name, attribute_class, create=False):
def validator(ns):
ns_dict = ns.__dict__
enabled = not ns_dict.pop('disabled') if create else ns_dict.pop('enabled')
attributes = attribute_class(
enabled=enabled,
not_before=ns_dict.pop('not_before', None),
expires=ns_dict.pop('expires', None))
setattr(ns, '{}_attributes'.format(name), attributes)
return validator
def validate_key_import_source(ns):
byok_file = ns.byok_file
byok_string = ns.byok_string
pem_file = ns.pem_file
pem_string = ns.pem_string
pem_password = ns.pem_password
if len([arg for arg in [byok_file, byok_string, pem_file, pem_string] if arg]) != 1:
raise ValueError('supply exactly one: --byok-file, --byok-string, --pem-file, --pem-string')
if (byok_file or byok_string) and pem_password:
raise ValueError('--byok-file or --byok-string cannot be used with --pem-password')
if pem_password and not pem_file and not pem_string:
raise ValueError('--pem-password must be used with --pem-file or --pem-string')
def validate_key_type(ns):
crv = getattr(ns, 'curve', None)
kty = getattr(ns, 'kty', None) or ('EC' if crv else 'RSA')
protection = getattr(ns, 'protection', None)
if protection == 'hsm':
kty = kty if kty.endswith('-HSM') else kty + '-HSM'
elif protection == 'software':
if getattr(ns, 'byok_file', None):
raise CLIError('BYOK keys are hardware protected. Omit --protection')
if kty.endswith('-HSM'):
raise CLIError('The key type {} is invalid for software protected keys. Omit --protection')
setattr(ns, 'kty', kty)
def validate_policy_permissions(ns):
key_perms = ns.key_permissions
secret_perms = ns.secret_permissions
cert_perms = ns.certificate_permissions
storage_perms = ns.storage_permissions
if not any([key_perms, secret_perms, cert_perms, storage_perms]):
raise argparse.ArgumentError(
None,
'specify at least one: --key-permissions, --secret-permissions, '
'--certificate-permissions --storage-permissions')
def validate_private_endpoint_connection_id(cmd, ns):
if ns.connection_id:
from azure.cli.core.util import parse_proxy_resource_id
result = parse_proxy_resource_id(ns.connection_id)
ns.resource_group_name = result['resource_group']
ns.vault_name = result['name']
ns.private_endpoint_connection_name = result['child_name_1']
if ns.vault_name and not ns.resource_group_name:
ns.resource_group_name = _get_resource_group_from_vault_name(cmd.cli_ctx, ns.vault_name)
if not all([ns.vault_name, ns.resource_group_name, ns.private_endpoint_connection_name]):
raise CLIError('incorrect usage: [--id ID | --name NAME --vault-name NAME]')
del ns.connection_id
def validate_principal(ns):
num_set = sum(1 for p in [ns.object_id, ns.spn, ns.upn] if p)
if num_set != 1:
raise argparse.ArgumentError(
None, 'specify exactly one: --object-id, --spn, --upn')
def validate_resource_group_name(cmd, ns):
"""
Populate resource_group_name, if not provided
"""
if not ns.resource_group_name:
vault_name = ns.vault_name
group_name = _get_resource_group_from_vault_name(cmd.cli_ctx, vault_name)
if group_name:
ns.resource_group_name = group_name
else:
msg = "The Resource 'Microsoft.KeyVault/vaults/{}' not found within subscription."
raise CLIError(msg.format(vault_name))
def validate_deleted_vault_name(cmd, ns):
"""
Validate a deleted vault name; populate or validate location and resource_group_name
"""
from azure.cli.core.profiles import ResourceType
from msrestazure.tools import parse_resource_id
vault_name = ns.vault_name
vault = None
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT).vaults
# if the location is specified, use get_deleted rather than list_deleted
if ns.location:
vault = client.get_deleted(vault_name, ns.location)
id_comps = parse_resource_id(vault.properties.vault_id)
# otherwise, iterate through deleted vaults to find one with a matching name
else:
for v in client.list_deleted():
id_comps = parse_resource_id(v.properties.vault_id)
if id_comps['name'].lower() == vault_name.lower():
vault = v
ns.location = vault.properties.location
break
# if the vault was not found, throw an error
if not vault:
raise CLIError('No deleted vault was found with name ' + ns.vault_name)
setattr(ns, 'resource_group_name', getattr(ns, 'resource_group_name', None) or id_comps['resource_group'])
# resource_group_name must match the resource group of the deleted vault
if id_comps['resource_group'] != ns.resource_group_name:
raise CLIError("The specified resource group does not match that of the deleted vault %s. The vault "
"must be recovered to the original resource group %s."
% (vault_name, id_comps['resource_group']))
def validate_x509_certificate_chain(ns):
def _load_certificate_as_bytes(file_name):
cert_list = []
regex = r'-----BEGIN CERTIFICATE-----([^-]+)-----END CERTIFICATE-----'
with open(file_name, 'r') as f:
cert_data = f.read()
for entry in re.findall(regex, cert_data):
cert_list.append(base64.b64decode(entry.replace('\n', '')))
return cert_list
ns.x509_certificates = _load_certificate_as_bytes(ns.x509_certificates)
# ARGUMENT TYPES
def certificate_type(string):
""" Loads file and outputs contents as base64 encoded string. """
import os
try:
with open(os.path.expanduser(string), 'rb') as f:
cert_data = f.read()
return cert_data
except (IOError, OSError) as e:
raise CLIError("Unable to load certificate file '{}': {}.".format(string, e.strerror))
def datetime_type(string):
""" Validates UTC datettime in accepted format. Examples: 2017-12-31T01:11:59Z,
2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
accepted_date_formats = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%MZ',
'%Y-%m-%dT%HZ', '%Y-%m-%d']
for form in accepted_date_formats:
try:
return datetime.strptime(string, form)
except ValueError: # checks next format
pass
raise ValueError("Input '{}' not valid. Valid example: 2000-12-31T12:59:59Z".format(string))
def get_vault_base_url_type(cli_ctx):
suffix = cli_ctx.cloud.suffixes.keyvault_dns
def vault_base_url_type(name):
return 'https://{}{}'.format(name, suffix)
return vault_base_url_type
def _construct_vnet(cmd, resource_group_name, vnet_name, subnet_name):
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_name,
child_type_1='subnets',
child_name_1=subnet_name)
def validate_subnet(cmd, namespace):
from msrestazure.tools import is_valid_resource_id
subnet = namespace.subnet
subnet_is_id = is_valid_resource_id(subnet)
vnet = namespace.vnet_name
if (subnet_is_id and not vnet) or (not subnet and not vnet):
return
if subnet and not subnet_is_id and vnet:
namespace.subnet = _construct_vnet(cmd, namespace.resource_group_name, vnet, subnet)
else:
raise CLIError('incorrect usage: [--subnet ID | --subnet NAME --vnet-name NAME]')
def validate_vault_id(entity_type):
def _validate(ns):
from azure.keyvault.key_vault_id import KeyVaultIdentifier
pure_entity_type = entity_type.replace('deleted', '')
name = getattr(ns, pure_entity_type + '_name', None)
vault = getattr(ns, 'vault_base_url', None)
identifier = getattr(ns, 'identifier', None)
if identifier:
ident = KeyVaultIdentifier(uri=identifier, collection=entity_type + 's')
setattr(ns, pure_entity_type + '_name', ident.name)
setattr(ns, 'vault_base_url', ident.vault)
if hasattr(ns, pure_entity_type + '_version'):
setattr(ns, pure_entity_type + '_version', ident.version)
elif not (name and vault):
raise CLIError('incorrect usage: --id ID | --vault-name VAULT --name NAME [--version VERSION]')
return _validate
def validate_sas_definition_id(ns):
from azure.keyvault import StorageSasDefinitionId
acct_name = getattr(ns, 'storage_account_name', None)
sas_name = getattr(ns, 'sas_definition_name', None)
vault = getattr(ns, 'vault_base_url', None)
identifier = getattr(ns, 'identifier', None)
if identifier:
ident = StorageSasDefinitionId(uri=identifier)
setattr(ns, 'sas_definition_name', getattr(ident, 'sas_definition'))
setattr(ns, 'storage_account_name', getattr(ident, 'account_name'))
setattr(ns, 'vault_base_url', ident.vault)
elif not (acct_name and sas_name and vault):
raise CLIError('incorrect usage: --id ID | --vault-name VAULT --account-name --name NAME')
def validate_storage_account_id(ns):
from azure.keyvault import StorageAccountId
acct_name = getattr(ns, 'storage_account_name', None)
vault = getattr(ns, 'vault_base_url', None)
identifier = getattr(ns, 'identifier', None)
if identifier:
ident = StorageAccountId(uri=identifier)
setattr(ns, 'storage_account_name', ident.name)
setattr(ns, 'vault_base_url', ident.vault)
elif not (acct_name and vault):
raise CLIError('incorrect usage: --id ID | --vault-name VAULT --name NAME')
def validate_storage_disabled_attribute(attr_arg_name, attr_type):
def _validate(ns):
disabled = getattr(ns, 'disabled', None)
attr_arg = attr_type(enabled=(not disabled))
setattr(ns, attr_arg_name, attr_arg)
return _validate
| 37.158442
| 110
| 0.66217
|
804c764fd2a0e3b39fe2317106a18bfaff40abc2
| 6,663
|
py
|
Python
|
bot.py
|
le-sanglier/reddit_bots
|
6e45914b7156348383dc585915b3f0b9d6f029c9
|
[
"MIT"
] | null | null | null |
bot.py
|
le-sanglier/reddit_bots
|
6e45914b7156348383dc585915b3f0b9d6f029c9
|
[
"MIT"
] | null | null | null |
bot.py
|
le-sanglier/reddit_bots
|
6e45914b7156348383dc585915b3f0b9d6f029c9
|
[
"MIT"
] | null | null | null |
import praw
import re
import random
import os
import threading
#TODO find blacklisted sites, test, change 'crypto_cust_service' to submission.author and comment.author.name, test
def comment_loop():
for comment in subreddit.stream.comments():
for blink in blacklist:
normalized_comment = comment.body.lower()
normalized_link = blink.lower()
if normalized_link in normalized_comment:
if comment.author in warned_users:
print("comment ban")
print(normalized_comment)
print("----------------------------------------------------------")
#ban
#reddit.redditor('crypto_cust_service').message('Ban for linking a blacklisted website', 'You have posted a link to a blacklisted website, and this is not your first offence. You have been banned for X.')
else:
print("comment warning")
print(normalized_comment)
print("----------------------------------------------------------")
with open("warned_users.txt", "a") as f:
f.write(comment.author.name + "\n")
warned_users.append(comment.author.name)
#reddit.redditor('crypto_cust_service').message('Blacklisted link warning', 'This is your one and only warning for posting links to blacklisted websites on r/cryptocurrency. You can find a list of blacklisted websites here. Another violation of this rule will result in a ban.')
# if re.search("Blacklisted sites", comment.body, re.IGNORECASE):
# if comment.author.name in warned_users:
# #ban
# reddit.redditor('crypto_cust_service').message('Ban for linking a blacklisted website', 'You have posted a link to a blacklisted website, and this is not your first offence. You have been banned for X.')
# else:
# with open("warned_users.txt", "a") as f:
# f.write(comment.author.name + "\n")
# warned_users.append(comment.author.name)
# reddit.redditor('crypto_cust_service').message('Blacklisted link warning', 'This is your one and only warning for posting links to blacklisted websites on r/cryptocurrency. You can find a list of blacklisted websites here. Another violation of this rule will result in a ban.'
def submision_loop():
for submission in subreddit.stream.submissions():
for blink in blacklist:
normalized_submission_body = submission.selftext.lower()
normalized_submission_title = submission.title.lower()
normalized_link = blink.lower()
if normalized_link in normalized_submission_body:
if submission.author in warned_users:
print("submission ban")
print(normalized_submission_body)
print("----------------------------------------------------------")
#ban
#reddit.redditor('crypto_cust_service').message('Ban for linking a blacklisted website', 'You have posted a link to a blacklisted website, and this is not your first offence. You have been banned for X.')
else:
print("submission warning")
print(normalized_submission_body)
print("----------------------------------------------------------")
with open("warned_users.txt", "a") as f:
f.write(comment.author.name + "\n")
warned_users.append(comment.author.name)
#reddit.redditor('crypto_cust_service').message('Blacklisted link warning', 'This is your one and only warning for posting links to blacklisted websites on r/cryptocurrency. You can find a list of blacklisted websites here. Another violation of this rule will result in a ban.')
if normalized_link in normalized_submission_title:
if submission.author in warned_users:
print("submission ban")
print(normalized_submission_title)
print("----------------------------------------------------------")
#ban
#reddit.redditor('crypto_cust_service').message('Ban for linking a blacklisted website', 'You have posted a link to a blacklisted website, and this is not your first offence. You have been banned for X.')
else:
print("submission warning")
print(normalized_submission_title)
print("----------------------------------------------------------")
with open("warned_users.txt", "a") as f:
f.write(comment.author.name + "\n")
warned_users.append(comment.author.name)
#reddit.redditor('crypto_cust_service').message('Blacklisted link warning', 'This is your one and only warning for posting links to blacklisted websites on r/cryptocurrency. You can find a list of blacklisted websites here. Another violation of this rule will result in a ban.')
# if submission.author in warned_users:
# #ban
# reddit.redditor('crypto_cust_service').message('Ban for linking a blacklisted website', 'You have posted a link to a blacklisted website, and this is not your first offence. You have been banned for X.')
# else:
# with open("warned_users.txt", "a") as f:
# f.write(submission.author + "\n")
# warned_users.append(submission.author)
# reddit.redditor('crypto_cust_service').message('Blacklisted link warning', 'This is your one and only warning for posting links to blacklisted websites on r/cryptocurrency. You can find a list of blacklisted websites here. Another violation of this rule will result in a ban.'
reddit = praw.Reddit('TESTBOT')
subreddit = reddit.subreddit("cryptocurrency")
#add blacklist to list
if not os.path.isfile("warned_users.txt"):
warned_users = []
else:
with open("warned_users.txt", "r") as f:
warned_users = f.read()
warned_users = warned_users.split("\n")
warned_users = list(filter(None, warned_users))
with open("blacklisted_sites.txt", "r") as f:
blacklist = f.read()
blacklist = blacklist.split("\n")
blacklist = list(filter(None, blacklist))
p1 = threading.Thread(target=comment_loop)
p2 = threading.Thread(target=submision_loop)
p1.start()
p2.start()
| 61.12844
| 300
| 0.590575
|
629c19c16462684c16e91d4000973405610399c3
| 2,939
|
py
|
Python
|
torchvision/ops/_register_onnx_ops.py
|
yassineAlouini/vision-1
|
ee26e9c260a255e2afb5e691e713349529170c8b
|
[
"BSD-3-Clause"
] | 1
|
2022-02-14T09:16:02.000Z
|
2022-02-14T09:16:02.000Z
|
torchvision/ops/_register_onnx_ops.py
|
yassineAlouini/vision-1
|
ee26e9c260a255e2afb5e691e713349529170c8b
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/ops/_register_onnx_ops.py
|
yassineAlouini/vision-1
|
ee26e9c260a255e2afb5e691e713349529170c8b
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import warnings
import torch
_onnx_opset_version = 11
def _register_custom_op():
from torch.onnx.symbolic_helper import parse_args
from torch.onnx.symbolic_opset11 import select, squeeze, unsqueeze
from torch.onnx.symbolic_opset9 import _cast_Long
@parse_args("v", "v", "f")
def symbolic_multi_label_nms(g, boxes, scores, iou_threshold):
boxes = unsqueeze(g, boxes, 0)
scores = unsqueeze(g, unsqueeze(g, scores, 0), 0)
max_output_per_class = g.op("Constant", value_t=torch.tensor([sys.maxsize], dtype=torch.long))
iou_threshold = g.op("Constant", value_t=torch.tensor([iou_threshold], dtype=torch.float))
nms_out = g.op("NonMaxSuppression", boxes, scores, max_output_per_class, iou_threshold)
return squeeze(g, select(g, nms_out, 1, g.op("Constant", value_t=torch.tensor([2], dtype=torch.long))), 1)
@parse_args("v", "v", "f", "i", "i", "i", "i")
def roi_align(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
batch_indices = _cast_Long(
g, squeeze(g, select(g, rois, 1, g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))), 1), False
)
rois = select(g, rois, 1, g.op("Constant", value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long)))
# TODO: Remove this warning after ONNX opset 16 is supported.
if aligned:
warnings.warn(
"ROIAlign with aligned=True is not supported in ONNX, but will be supported in opset 16. "
"The workaround is that the user need apply the patch "
"https://github.com/microsoft/onnxruntime/pull/8564 "
"and build ONNXRuntime from source."
)
# ONNX doesn't support negative sampling_ratio
if sampling_ratio < 0:
warnings.warn(
"ONNX doesn't support negative sampling ratio, therefore is set to 0 in order to be exported."
)
sampling_ratio = 0
return g.op(
"RoiAlign",
input,
rois,
batch_indices,
spatial_scale_f=spatial_scale,
output_height_i=pooled_height,
output_width_i=pooled_width,
sampling_ratio_i=sampling_ratio,
)
@parse_args("v", "v", "f", "i", "i")
def roi_pool(g, input, rois, spatial_scale, pooled_height, pooled_width):
roi_pool = g.op(
"MaxRoiPool", input, rois, pooled_shape_i=(pooled_height, pooled_width), spatial_scale_f=spatial_scale
)
return roi_pool, None
from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic("torchvision::nms", symbolic_multi_label_nms, _onnx_opset_version)
register_custom_op_symbolic("torchvision::roi_align", roi_align, _onnx_opset_version)
register_custom_op_symbolic("torchvision::roi_pool", roi_pool, _onnx_opset_version)
| 43.865672
| 118
| 0.650902
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.