hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
400708af3eb122277da5836ea70cf914223ea9b3
| 1,387
|
py
|
Python
|
src/c3_price/main.py
|
KlimaDAO/discord-bots
|
96fefab99e27abccd0b2d5c9d3812f8ad60e94d6
|
[
"MIT"
] | 2
|
2022-01-24T19:58:44.000Z
|
2022-03-07T20:16:52.000Z
|
src/c3_price/main.py
|
KlimaDAO/discord-bots
|
96fefab99e27abccd0b2d5c9d3812f8ad60e94d6
|
[
"MIT"
] | 8
|
2021-11-29T19:38:01.000Z
|
2022-03-29T19:19:39.000Z
|
src/c3_price/main.py
|
KlimaDAO/discord-bots
|
96fefab99e27abccd0b2d5c9d3812f8ad60e94d6
|
[
"MIT"
] | 12
|
2021-10-06T20:23:08.000Z
|
2022-03-31T23:51:03.000Z
|
import os
from discord.ext import tasks
from ..constants import C3_ADDRESS, FRAX_DECIMALS, \
C3_DECIMALS, FRAX_C3_POOL
from ..contract_info import token_supply, uni_v2_pool_price
from ..utils import get_discord_client, get_eth_web3, \
get_polygon_web3, load_abi, \
update_nickname, update_presence, \
prettify_number
BOT_TOKEN = os.environ["DISCORD_BOT_TOKEN"]
# Initialized Discord client
client = get_discord_client()
# Initialize web3
web3 = get_polygon_web3()
web3_eth = get_eth_web3()
# Load ABI
c3_abi = load_abi('erc20_token.json')
@client.event
@tasks.loop(seconds=300)
client.run(BOT_TOKEN)
| 26.169811
| 78
| 0.674838
|
import os
from discord.ext import tasks
from ..constants import C3_ADDRESS, FRAX_DECIMALS, \
C3_DECIMALS, FRAX_C3_POOL
from ..contract_info import token_supply, uni_v2_pool_price
from ..utils import get_discord_client, get_eth_web3, \
get_polygon_web3, load_abi, \
update_nickname, update_presence, \
prettify_number
BOT_TOKEN = os.environ["DISCORD_BOT_TOKEN"]
# Initialized Discord client
client = get_discord_client()
# Initialize web3
web3 = get_polygon_web3()
web3_eth = get_eth_web3()
# Load ABI
c3_abi = load_abi('erc20_token.json')
@client.event
async def on_ready():
print('Logged in as {0.user}'.format(client))
if not update_info.is_running():
update_info.start()
@tasks.loop(seconds=300)
async def update_info():
price = uni_v2_pool_price(web3, FRAX_C3_POOL, FRAX_DECIMALS - C3_DECIMALS)
supply = token_supply(web3, C3_ADDRESS, c3_abi, C3_DECIMALS)
if price is not None and supply is not None:
price_text = f'${price:,.3f} C3'
print(price_text)
success = await update_nickname(client, price_text)
if not success:
return
supply_text = f'Supply: {prettify_number(supply)}'
success = await update_presence(client, supply_text)
if not success:
return
client.run(BOT_TOKEN)
| 647
| 0
| 44
|
1d1b2e85d0c1358607cd2dd62f5bab1dffbce3a4
| 191
|
py
|
Python
|
setup.py
|
kempei/hx711py
|
1256669450f757e46c859c8b21b40e278f31fef3
|
[
"Apache-2.0"
] | 1
|
2022-03-24T13:50:48.000Z
|
2022-03-24T13:50:48.000Z
|
setup.py
|
kempei/hx711py-jetsonnano
|
1256669450f757e46c859c8b21b40e278f31fef3
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
kempei/hx711py-jetsonnano
|
1256669450f757e46c859c8b21b40e278f31fef3
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
setup(
name='hx711',
version='0.1',
description='HX711 Python Library for Jetson Nano',
py_modules=['hx711'],
install_requires=['logzero']
)
| 19.1
| 55
| 0.670157
|
from setuptools import setup
setup(
name='hx711',
version='0.1',
description='HX711 Python Library for Jetson Nano',
py_modules=['hx711'],
install_requires=['logzero']
)
| 0
| 0
| 0
|
2b8e9a78122675bae02f4d3f6416c2c2d0b63adb
| 168
|
py
|
Python
|
snippets/delete_from_multi_tables.py
|
hit9/skylark
|
5b7a14e401196e025117b095a7c5e68e551e547a
|
[
"BSD-2-Clause",
"MIT"
] | 114
|
2015-01-06T06:12:30.000Z
|
2021-08-25T06:17:05.000Z
|
snippets/delete_from_multi_tables.py
|
keng-king/skylark
|
5b7a14e401196e025117b095a7c5e68e551e547a
|
[
"BSD-2-Clause",
"MIT"
] | 10
|
2015-03-23T17:05:13.000Z
|
2017-03-24T11:50:18.000Z
|
snippets/delete_from_multi_tables.py
|
keng-king/skylark
|
5b7a14e401196e025117b095a7c5e68e551e547a
|
[
"BSD-2-Clause",
"MIT"
] | 59
|
2015-01-21T14:56:23.000Z
|
2021-09-05T01:24:37.000Z
|
from models import User, Post
# delete user from post, user where post.user_id = user.id
query = (Post & User).delete(User) # mysql supports; sqlite3 dosenot support
| 33.6
| 77
| 0.744048
|
from models import User, Post
# delete user from post, user where post.user_id = user.id
query = (Post & User).delete(User) # mysql supports; sqlite3 dosenot support
| 0
| 0
| 0
|
b0e41ddb7c1863a92a19e81a28f9239f50f770ff
| 868
|
py
|
Python
|
Semproject/Courseoutline/choices.py
|
sllash2000/CourseOutline
|
d44a2a7d49695d988f8ffa6eb5407011029ece25
|
[
"MIT"
] | null | null | null |
Semproject/Courseoutline/choices.py
|
sllash2000/CourseOutline
|
d44a2a7d49695d988f8ffa6eb5407011029ece25
|
[
"MIT"
] | null | null | null |
Semproject/Courseoutline/choices.py
|
sllash2000/CourseOutline
|
d44a2a7d49695d988f8ffa6eb5407011029ece25
|
[
"MIT"
] | null | null | null |
FacultyNameChoices = [
('FAH','Arts And Humanities'),('FBA','Business Admin'),
('FEd','Education'),('FRS','Religous Studies'),('FOS','Science'),
('FIT','Information Technology'),('FON','Nursing')]
CourseCategory = [
('Core Course','Core Course'),('General Education Course','General Education Course'),
('Major Required Course','Major Required Course'),('Major Elective Course','Major Elective Course')
]
HoursChoices = [
(1,'1'),(2,'2'),(3,'3'),(4,'4')
]
Credits = [
('1','1'),('2','2'),('3','3'),('4','4')
]
ResourcesTypes = [
('Text Book','Text Book'),('Internet Resources','Internet Resources'),
('Research Paper','Reserach Paper')
]
EvaluationTypes = [
('Assignments','Assignments'),('Quizzes','Quizzes'),('Attendance','Attendance'),
('Midterm Exam','Midterm Exam'),('Final Exam','Final Exam'),('Projects','Projects')
]
| 39.454545
| 103
| 0.615207
|
FacultyNameChoices = [
('FAH','Arts And Humanities'),('FBA','Business Admin'),
('FEd','Education'),('FRS','Religous Studies'),('FOS','Science'),
('FIT','Information Technology'),('FON','Nursing')]
CourseCategory = [
('Core Course','Core Course'),('General Education Course','General Education Course'),
('Major Required Course','Major Required Course'),('Major Elective Course','Major Elective Course')
]
HoursChoices = [
(1,'1'),(2,'2'),(3,'3'),(4,'4')
]
Credits = [
('1','1'),('2','2'),('3','3'),('4','4')
]
ResourcesTypes = [
('Text Book','Text Book'),('Internet Resources','Internet Resources'),
('Research Paper','Reserach Paper')
]
EvaluationTypes = [
('Assignments','Assignments'),('Quizzes','Quizzes'),('Attendance','Attendance'),
('Midterm Exam','Midterm Exam'),('Final Exam','Final Exam'),('Projects','Projects')
]
| 0
| 0
| 0
|
960ae6c2cee95810d30359e79da59c8a97171c61
| 894
|
py
|
Python
|
diagnose.py
|
youngdon95/start_over
|
48352c42752f6dd24c82250f57c5ee1434352688
|
[
"MIT"
] | null | null | null |
diagnose.py
|
youngdon95/start_over
|
48352c42752f6dd24c82250f57c5ee1434352688
|
[
"MIT"
] | null | null | null |
diagnose.py
|
youngdon95/start_over
|
48352c42752f6dd24c82250f57c5ee1434352688
|
[
"MIT"
] | null | null | null |
import xlrd
from datetime import date
from datetime import datetime
import random
workbook = xlrd.open_workbook('diagnose.xlsx')
worksheet = workbook.sheet_by_name('Sheet1')
file = open("diagnoseInsert.txt","w")
empNum = 2
for x in range(0, 30884):
year = random.choice(range(1990, 2018))
month = random.choice(range(1, 13))
day = random.choice(range(1, 29))
disease=worksheet.cell(x, 0).value
pid=worksheet.cell(x, 1).value
pid = int(pid)
pidStr = str(pid)
disease=disease.encode('utf-8')
pidStr=pidStr.encode('utf-8')
date_diagnosed = date(year,month, day)
eid = "Doc" + str(empNum)
patient = "P-" + str(pidStr)
file.write("Insert into diagnose" +" " + "values ('"+(eid)+"', '"+(disease)+"','"+(patient)+"','"+str(date_diagnosed)+"');\n")
if empNum <30:
empNum += 2
else: empNum = 2
file.close()
| 26.294118
| 130
| 0.619687
|
import xlrd
from datetime import date
from datetime import datetime
import random
workbook = xlrd.open_workbook('diagnose.xlsx')
worksheet = workbook.sheet_by_name('Sheet1')
file = open("diagnoseInsert.txt","w")
empNum = 2
for x in range(0, 30884):
year = random.choice(range(1990, 2018))
month = random.choice(range(1, 13))
day = random.choice(range(1, 29))
disease=worksheet.cell(x, 0).value
pid=worksheet.cell(x, 1).value
pid = int(pid)
pidStr = str(pid)
disease=disease.encode('utf-8')
pidStr=pidStr.encode('utf-8')
date_diagnosed = date(year,month, day)
eid = "Doc" + str(empNum)
patient = "P-" + str(pidStr)
file.write("Insert into diagnose" +" " + "values ('"+(eid)+"', '"+(disease)+"','"+(patient)+"','"+str(date_diagnosed)+"');\n")
if empNum <30:
empNum += 2
else: empNum = 2
file.close()
| 0
| 0
| 0
|
60a92fc32f9436c820e1f78c7327d9e09aea64ef
| 881
|
py
|
Python
|
day9/student_sqlcloud.py
|
dikshaa1702/ml
|
c35f279b8fa7544517ca713c2c1e55f08270d4c3
|
[
"Apache-2.0"
] | 1
|
2019-06-13T13:52:09.000Z
|
2019-06-13T13:52:09.000Z
|
day9/student_sqlcloud.py
|
dikshaa1702/ml
|
c35f279b8fa7544517ca713c2c1e55f08270d4c3
|
[
"Apache-2.0"
] | null | null | null |
day9/student_sqlcloud.py
|
dikshaa1702/ml
|
c35f279b8fa7544517ca713c2c1e55f08270d4c3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu May 16 11:50:16 2019
@author: DiPu
"""
import mysql.connector
con=mysql.connector.connect(user="diksha",password='diksha1702',
host='db4free.net',database='diksha')
c=con.cursor()
c.execute("""CREATE TABLE student(
name text,
age int,
roll_no int,
branch text
)""")
c.execute("INSERT INTO student VALUES ('nEHA',21, 18, 'cs')")
c.execute("INSERT INTO student VALUES ('PRATIK',21, 18, 'IT')")
c.execute("INSERT INTO student VALUES ('pooja',21, 18, 'ec')")
c.execute("INSERT INTO student VALUES ('smita',21, 18, 'IT')")
c.execute("INSERT INTO student VALUES ('saurav',21, 18, 'ec')")
c.execute("INSERT INTO student VALUES ('gaurav',21, 18, 'ee')")
c.execute("INSERT INTO student VALUES ('Ria',21, 18, 'ee')")
c.execute("SELECT * FROM student")
print ( c.fetchall() )
| 29.366667
| 65
| 0.624291
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 16 11:50:16 2019
@author: DiPu
"""
import mysql.connector
con=mysql.connector.connect(user="diksha",password='diksha1702',
host='db4free.net',database='diksha')
c=con.cursor()
c.execute("""CREATE TABLE student(
name text,
age int,
roll_no int,
branch text
)""")
c.execute("INSERT INTO student VALUES ('nEHA',21, 18, 'cs')")
c.execute("INSERT INTO student VALUES ('PRATIK',21, 18, 'IT')")
c.execute("INSERT INTO student VALUES ('pooja',21, 18, 'ec')")
c.execute("INSERT INTO student VALUES ('smita',21, 18, 'IT')")
c.execute("INSERT INTO student VALUES ('saurav',21, 18, 'ec')")
c.execute("INSERT INTO student VALUES ('gaurav',21, 18, 'ee')")
c.execute("INSERT INTO student VALUES ('Ria',21, 18, 'ee')")
c.execute("SELECT * FROM student")
print ( c.fetchall() )
| 0
| 0
| 0
|
2ceb31c77a59667b68c5c12e1c7cd4b434dda44e
| 1,494
|
py
|
Python
|
16. 3Sum Closest.py
|
XinchaoGou/MyLeetCode
|
bba0ab077374f7da2cb1a990266bc59fa7ddf23c
|
[
"MIT"
] | null | null | null |
16. 3Sum Closest.py
|
XinchaoGou/MyLeetCode
|
bba0ab077374f7da2cb1a990266bc59fa7ddf23c
|
[
"MIT"
] | null | null | null |
16. 3Sum Closest.py
|
XinchaoGou/MyLeetCode
|
bba0ab077374f7da2cb1a990266bc59fa7ddf23c
|
[
"MIT"
] | null | null | null |
from typing import List
| 36.439024
| 98
| 0.436412
|
from typing import List
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
nums.sort()
length = len(nums)
resSum3 = float("inf")
res = 0
for p1 in range(length):
p3 = length - 1
for p2 in range(p1+1, length):
while p2 < p3 and nums[p1] + nums[p2] + nums[p3] > target:
p3 -= 1
if p2 < p3 and abs(nums[p1] + nums[p2] + nums[p3] - target) < resSum3:
res = nums[p1] + nums[p2] + nums[p3]
resSum3 = abs(res - target)
if p3 + 1 < length and abs(nums[p1] + nums[p2] + nums[p3 + 1] - target) < resSum3:
res = nums[p1] + nums[p2] + nums[p3+1]
resSum3 = abs(res - target)
return res
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
nums.sort()
length = len(nums)
resSum3 = float("inf")
res = 0
for p1 in range(length):
p3 = length - 1
p2 = p1 +1
while p2 <p3:
sum3 = nums[p1] + nums[p2] + nums[p3]
if sum3 == target:
return target
if abs(sum3-target) < resSum3:
resSum3 = abs(sum3-target)
res = sum3
if sum3 > target:
p3 -= 1
else:
p2 += 1
return res
| 1,386
| -12
| 97
|
7e88360483551114fdce570e13880bcbf47a3724
| 5,775
|
py
|
Python
|
Optic/WrapperBl2Seq.py
|
CGATOxford/Optic
|
2df92e953b5139ff4e5c383cb4383e6367cd47f1
|
[
"MIT"
] | null | null | null |
Optic/WrapperBl2Seq.py
|
CGATOxford/Optic
|
2df92e953b5139ff4e5c383cb4383e6367cd47f1
|
[
"MIT"
] | null | null | null |
Optic/WrapperBl2Seq.py
|
CGATOxford/Optic
|
2df92e953b5139ff4e5c383cb4383e6367cd47f1
|
[
"MIT"
] | 1
|
2020-03-31T22:55:50.000Z
|
2020-03-31T22:55:50.000Z
|
##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
WrapperBl2Seq.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Code
----
'''
import os
import sys
import string
import tempfile
import subprocess
from CGAT import Experiment as Experiment
from CGAT import FastaIterator as FastaIterator
if __name__ == "__main__":
parser = E.OptionParser(
version="%prog version: $Id: WrapperBl2Seq.py 2781 2009-09-10 11:33:14Z andreas $")
parser.add_option("-f", "--input-file", dest="input_filename", type="string",
help="input filename. If '-', stdin is used [default=%default].",
metavar="FILE")
parser.add_option("-o", "--output-file", dest="output_filename", type="string",
help="output filename for codon usage. If '-', output is stdout [default=%default].",
metavar="FILE")
parser.add_option("-e", "--error-file", dest="error_filename", type="string",
help="output filename for error messages. If '-', output is stderr [default=%default].",
metavar="FILE")
parser.set_defaults(
input_filename="-",
output_filename="-",
error_filename="/dev/null",
)
(options, args) = Experiment.Start(parser)
wrapper = Bl2Seq()
if options.input_filename == "-":
file_stdin = sys.stdin
else:
file_stdin = open(options.input_filename, "r")
if options.output_filename:
if options.output_filename == "-":
file_stdout = sys.stdout
else:
file_stdout = open(options.output_filename, "w")
if options.error_filename:
if options.error_filename == "-":
file_stderr = sys.stderr
else:
file_stderr = open(options.error_filename, "w")
wrapper.RunOnFile(file_stdin, file_stdout, file_stderr)
if file_stdin and file_stdin != sys.stdin:
file_stdin.close()
if file_stdout and file_stdout != sys.stdout:
file_stdout.close()
if file_stderr and file_stderr != sys.stderr:
file_stderr.close()
Experiment.Stop()
| 29.020101
| 110
| 0.571602
|
##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
WrapperBl2Seq.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Code
----
'''
import os
import sys
import string
import tempfile
import subprocess
from CGAT import Experiment as Experiment
from CGAT import FastaIterator as FastaIterator
class Bl2SeqError(Exception):
pass
class Bl2Seq:
mOptions = ""
mExecutable = "bl2seq"
mStderr = sys.stderr
def __init__(self, options=""):
self.mOptions = options
def CreateTemporaryFiles(self):
"""create temporary files."""
self.mTempDirectory = tempfile.mkdtemp()
self.mFilenameTempInput = self.mTempDirectory + "/input"
self.mFilenameTempOutput = self.mTempDirectory + "/output"
def DeleteTemporaryFiles(self):
"""clean up."""
os.remove(self.mFilenameTempInput)
os.remove(self.mFilenameTempOutput)
os.rmdir(self.mTempDirectory)
def SetStderr(self, file=None):
"""set file for dumping stderr."""
self.mStderr = file
def WriteOutput(self, lines, filename_output=None):
"""write output to file.
If file is not given, lines are written to stdout.
"""
if filename_output:
outfile = open(filename_output, "w")
else:
outfile = sys.stdout
outfile.write(string.join(lines, ""))
if filename_output:
outfile.close()
def ParseResult(self, trace_file=None, information_file=None):
result = AdaptiveCAIResult()
result.Read(trace_file, information_file)
return result
def RunOnFile(self, infile, outfile, errfile):
self.CreateTemporaryFiles()
statement = string.join((self.mExecutable,
self.mFilenameTempInput,
self.mFilenameTempOutput),
" ")
i = FastaIterator.FastaIterator(infile)
outfile.write("GENE\tBl2Seq\n")
while 1:
f = i.next()
if f is None:
break
file = open(self.mFilenameTempInput, "w")
file.write(">%s\n%s" % (f.title, f.sequence))
file.close()
s = subprocess.Popen(statement,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.mTempDirectory,
close_fds=True)
(out, err) = s.communicate()
if s.returncode != 0:
raise Bl2SeqError, "Error in calculating Bl2Seq\n%s" % err
d = open(self.mFilenameTempOutput).readlines()[2][:-1]
enc = d.split(" ")[2]
outfile.write((string.join((f.title, enc), "\t")) + "\n")
errfile.write(err)
self.DeleteTemporaryFiles()
if __name__ == "__main__":
parser = E.OptionParser(
version="%prog version: $Id: WrapperBl2Seq.py 2781 2009-09-10 11:33:14Z andreas $")
parser.add_option("-f", "--input-file", dest="input_filename", type="string",
help="input filename. If '-', stdin is used [default=%default].",
metavar="FILE")
parser.add_option("-o", "--output-file", dest="output_filename", type="string",
help="output filename for codon usage. If '-', output is stdout [default=%default].",
metavar="FILE")
parser.add_option("-e", "--error-file", dest="error_filename", type="string",
help="output filename for error messages. If '-', output is stderr [default=%default].",
metavar="FILE")
parser.set_defaults(
input_filename="-",
output_filename="-",
error_filename="/dev/null",
)
(options, args) = Experiment.Start(parser)
wrapper = Bl2Seq()
if options.input_filename == "-":
file_stdin = sys.stdin
else:
file_stdin = open(options.input_filename, "r")
if options.output_filename:
if options.output_filename == "-":
file_stdout = sys.stdout
else:
file_stdout = open(options.output_filename, "w")
if options.error_filename:
if options.error_filename == "-":
file_stderr = sys.stderr
else:
file_stderr = open(options.error_filename, "w")
wrapper.RunOnFile(file_stdin, file_stdout, file_stderr)
if file_stdin and file_stdin != sys.stdin:
file_stdin.close()
if file_stdout and file_stdout != sys.stdout:
file_stdout.close()
if file_stderr and file_stderr != sys.stderr:
file_stderr.close()
Experiment.Stop()
| 1,489
| 1,103
| 46
|
13088a8ac517f64c48d773cb8a5a92fb091eb78b
| 1,728
|
py
|
Python
|
src/prefect/cli/__init__.py
|
jamestwebber/prefect
|
410c4ac37d2595ab61007742883687f5e284821d
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/cli/__init__.py
|
jamestwebber/prefect
|
410c4ac37d2595ab61007742883687f5e284821d
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/cli/__init__.py
|
jamestwebber/prefect
|
410c4ac37d2595ab61007742883687f5e284821d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import click
import json
import logging
import os
import requests
import sys
import prefect
@click.group()
def cli():
"""
The Prefect CLI
"""
pass
@cli.command()
def make_user_config():
"""
Generates a user configuration file
"""
user_config_path = prefect.config.get("user_config_path")
if not user_config_path:
raise ValueError("No user config path set!")
elif os.path.isfile(user_config_path):
raise ValueError("A file already exists at {}".format(user_config_path))
os.makedirs(os.path.dirname(user_config_path), exist_ok=True)
with open(user_config_path, "w") as user_config:
user_config.write(
"# This is a user configuration file.\n"
"# Settings placed here will overwrite Prefect's defaults."
)
click.secho("Config created at {}".format(user_config_path), fg="green")
@cli.command()
@click.argument("environment_file", type=click.Path(exists=True))
@click.option("--runner_kwargs", default={})
def run(environment_file, runner_kwargs):
"""
Run a flow from an environment file.
"""
schema = prefect.serialization.environment.EnvironmentSchema()
with open(environment_file, "r") as f:
environment = schema.load(json.load(f))
click.echo(environment.run(runner_kwargs=runner_kwargs))
@cli.command()
@click.argument("environment_metadata")
def create_environment(environment_metadata):
"""
Call the setup and execute functions for a given environment.
"""
schema = prefect.serialization.environment.EnvironmentSchema()
environment = schema.load(json.loads(environment_metadata))
environment.setup()
environment.execute()
| 25.791045
| 80
| 0.696759
|
#!/usr/bin/env python
import click
import json
import logging
import os
import requests
import sys
import prefect
@click.group()
def cli():
"""
The Prefect CLI
"""
pass
@cli.command()
def make_user_config():
"""
Generates a user configuration file
"""
user_config_path = prefect.config.get("user_config_path")
if not user_config_path:
raise ValueError("No user config path set!")
elif os.path.isfile(user_config_path):
raise ValueError("A file already exists at {}".format(user_config_path))
os.makedirs(os.path.dirname(user_config_path), exist_ok=True)
with open(user_config_path, "w") as user_config:
user_config.write(
"# This is a user configuration file.\n"
"# Settings placed here will overwrite Prefect's defaults."
)
click.secho("Config created at {}".format(user_config_path), fg="green")
@cli.command()
@click.argument("environment_file", type=click.Path(exists=True))
@click.option("--runner_kwargs", default={})
def run(environment_file, runner_kwargs):
"""
Run a flow from an environment file.
"""
schema = prefect.serialization.environment.EnvironmentSchema()
with open(environment_file, "r") as f:
environment = schema.load(json.load(f))
click.echo(environment.run(runner_kwargs=runner_kwargs))
@cli.command()
@click.argument("environment_metadata")
def create_environment(environment_metadata):
"""
Call the setup and execute functions for a given environment.
"""
schema = prefect.serialization.environment.EnvironmentSchema()
environment = schema.load(json.loads(environment_metadata))
environment.setup()
environment.execute()
| 0
| 0
| 0
|
159bef3318bfb6c684b7d77703c8e85be28688b9
| 411
|
py
|
Python
|
year2019/python/day1/day1.py
|
3schwartz/AdventOfCode
|
32f259c4e20c3c4834718411f1053b6a11f71c86
|
[
"MIT"
] | null | null | null |
year2019/python/day1/day1.py
|
3schwartz/AdventOfCode
|
32f259c4e20c3c4834718411f1053b6a11f71c86
|
[
"MIT"
] | null | null | null |
year2019/python/day1/day1.py
|
3schwartz/AdventOfCode
|
32f259c4e20c3c4834718411f1053b6a11f71c86
|
[
"MIT"
] | null | null | null |
from day1_func import get_fuel
lines = open('../../data/day1_data.txt').read().strip().split('\n')
fuels = [get_fuel(int(line)) for line in lines]
print(f'Part 1: {sum(fuels)}')
total_sum = 0
for line in lines:
last_value = int(line)
while True:
last_value = get_fuel(last_value)
if last_value <= 0:
break
total_sum += last_value
print(f"Part 2: {total_sum}")
| 19.571429
| 67
| 0.620438
|
from day1_func import get_fuel
lines = open('../../data/day1_data.txt').read().strip().split('\n')
fuels = [get_fuel(int(line)) for line in lines]
print(f'Part 1: {sum(fuels)}')
total_sum = 0
for line in lines:
last_value = int(line)
while True:
last_value = get_fuel(last_value)
if last_value <= 0:
break
total_sum += last_value
print(f"Part 2: {total_sum}")
| 0
| 0
| 0
|
f1d40cad2f2fbe51cb27d3a15929184262878d47
| 518
|
py
|
Python
|
postgis_helpers/__init__.py
|
aaronfraint/postgis-helpers
|
99f4f9ae50c1197fe6d2d0fe42884c06d2c3589c
|
[
"MIT"
] | 1
|
2021-02-25T21:52:24.000Z
|
2021-02-25T21:52:24.000Z
|
postgis_helpers/__init__.py
|
aaronfraint/postgis-helpers
|
99f4f9ae50c1197fe6d2d0fe42884c06d2c3589c
|
[
"MIT"
] | null | null | null |
postgis_helpers/__init__.py
|
aaronfraint/postgis-helpers
|
99f4f9ae50c1197fe6d2d0fe42884c06d2c3589c
|
[
"MIT"
] | 1
|
2021-02-26T00:33:29.000Z
|
2021-02-26T00:33:29.000Z
|
from .PgSQL import PostgreSQL
from .config_helpers import make_config_file, read_config_file, configurations
from .geopandas_helpers import spatialize_point_dataframe
from .raw_data import DataSource
from .console import _console
__VERSION__ = "0.2.2"
# _console.print(":globe_showing_americas:", justify="left")
# _console.print(":globe_showing_europe-africa:", justify="center")
# _console.print(":globe_showing_asia-australia:", justify="right")
# _console.print(f"-> postGIS-helpers version {__VERSION__}\n\n")
| 37
| 78
| 0.797297
|
from .PgSQL import PostgreSQL
from .config_helpers import make_config_file, read_config_file, configurations
from .geopandas_helpers import spatialize_point_dataframe
from .raw_data import DataSource
from .console import _console
__VERSION__ = "0.2.2"
# _console.print(":globe_showing_americas:", justify="left")
# _console.print(":globe_showing_europe-africa:", justify="center")
# _console.print(":globe_showing_asia-australia:", justify="right")
# _console.print(f"-> postGIS-helpers version {__VERSION__}\n\n")
| 0
| 0
| 0
|
35abc541d0d7ce086f795d6703dfd297577c9cb7
| 424
|
py
|
Python
|
client/delivery/urls.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | 6
|
2019-11-21T10:09:49.000Z
|
2021-06-19T09:52:59.000Z
|
client/delivery/urls.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | null | null | null |
client/delivery/urls.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from .views import CreateDeliveryInfo, DeliveryInfoApi, ListDeliveryInfo
app_name = 'delivery'
urlpatterns = [
path('delivery-information/add', CreateDeliveryInfo.as_view(), name='add_delivery_info'),
path('delivery-information/<int:pk>', DeliveryInfoApi.as_view(), name='delivery_info'),
path('delivery-information/list', ListDeliveryInfo.as_view(), name='list_delivery_info')
]
| 35.333333
| 93
| 0.768868
|
from django.urls import path
from .views import CreateDeliveryInfo, DeliveryInfoApi, ListDeliveryInfo
app_name = 'delivery'
urlpatterns = [
path('delivery-information/add', CreateDeliveryInfo.as_view(), name='add_delivery_info'),
path('delivery-information/<int:pk>', DeliveryInfoApi.as_view(), name='delivery_info'),
path('delivery-information/list', ListDeliveryInfo.as_view(), name='list_delivery_info')
]
| 0
| 0
| 0
|
976b5d264ccc2ded21f52e33be4c2ab378f4c30d
| 2,709
|
py
|
Python
|
python/day11/day11.py
|
secworks/advent_of_code_2020
|
b90e4e1d27c1e4b597a08cac8ff13e63686769f2
|
[
"BSD-2-Clause"
] | null | null | null |
python/day11/day11.py
|
secworks/advent_of_code_2020
|
b90e4e1d27c1e4b597a08cac8ff13e63686769f2
|
[
"BSD-2-Clause"
] | null | null | null |
python/day11/day11.py
|
secworks/advent_of_code_2020
|
b90e4e1d27c1e4b597a08cac8ff13e63686769f2
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#=======================================================================
#
# day11.py
# -------
# Solutions to Advent of Code 2020, day 11.
# https://adventofcode.com/2020/day/11
#
#=======================================================================
import sys
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# Implement the seating update logic.
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
if __name__=="__main__":
print("Advent of Code 2020, day 11")
print("==========================")
problem1("day11_example.txt")
problem2("day11_input.txt")
sys.exit(0)
#=======================================================================
#=======================================================================
| 27.927835
| 72
| 0.298265
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#=======================================================================
#
# day11.py
# -------
# Solutions to Advent of Code 2020, day 11.
# https://adventofcode.com/2020/day/11
#
#=======================================================================
import sys
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def get_input(filename):
l = []
with open(filename,'r') as f:
for line in f:
l.append(line.strip())
return l
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def compare_seatings(s1, s2):
assert len(s1) == len(s2), "Error: Size of seatings are not equal."
for i in range(len(s1)):
if s1[i] != s2[i]:
return False
return True
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def occupy_seat(seat, left, right):
if seat = ".":
return False
if seat = "L":
tmp = True
for s in left:
if s = "L" or s = ".2"
return False
#-------------------------------------------------------------------
# Implement the seating update logic.
#-------------------------------------------------------------------
def update_seating(seating):
tmp = seating[:]
w = len(seating[1])
print("Width of seating row: %d" % (w))
# Handle the middle 1..(w-2) seats
for i in range(1, w - 2):
if
return tmp
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def problem1(filename):
print("Problem 1")
print("---------")
seating = get_input(filename)
new_seating = update_seating(seating)
print(compare_seatings(seating, new_seating))
print("")
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def problem2(filename):
print("Problem 2")
print("---------")
print("")
#-------------------------------------------------------------------
#-------------------------------------------------------------------
if __name__=="__main__":
print("Advent of Code 2020, day 11")
print("==========================")
problem1("day11_example.txt")
problem2("day11_input.txt")
sys.exit(0)
#=======================================================================
#=======================================================================
| 908
| 0
| 132
|
7eb7e67de869f3bb153180f2431ce5d9de6c4ecb
| 389
|
py
|
Python
|
tests/edgestats_test.py
|
devjack/edgestats
|
d668cfdc4a6962c0f02a76916fc58d43605d46b2
|
[
"MIT"
] | null | null | null |
tests/edgestats_test.py
|
devjack/edgestats
|
d668cfdc4a6962c0f02a76916fc58d43605d46b2
|
[
"MIT"
] | null | null | null |
tests/edgestats_test.py
|
devjack/edgestats
|
d668cfdc4a6962c0f02a76916fc58d43605d46b2
|
[
"MIT"
] | null | null | null |
import unittest
import gzip
from edgestats import EdgeStats
if __name__ == '__main__':
unittest.main()
| 27.785714
| 69
| 0.699229
|
import unittest
import gzip
from edgestats import EdgeStats
class EdgestatsTest(unittest.TestCase):
def test_can_parse_log_file(self):
with gzip.open('tests/data/simple_unzip_test.gz', 'rb') as f:
file_content = f.read()
expected = b"Simple gzipped string\n"
self.assertEqual(expected, file_content)
if __name__ == '__main__':
unittest.main()
| 214
| 18
| 49
|
73dd5e51df3dee9bc1040211fdb75399402c8941
| 15,096
|
py
|
Python
|
sdk/servicefabricmanagedclusters/azure-mgmt-servicefabricmanagedclusters/azure/mgmt/servicefabricmanagedclusters/models/_service_fabric_managed_clusters_management_client_enums.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/servicefabricmanagedclusters/azure-mgmt-servicefabricmanagedclusters/azure/mgmt/servicefabricmanagedclusters/models/_service_fabric_managed_clusters_management_client_enums.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/servicefabricmanagedclusters/azure-mgmt-servicefabricmanagedclusters/azure/mgmt/servicefabricmanagedclusters/models/_service_fabric_managed_clusters_management_client_enums.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class Access(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The network traffic is allowed or denied.
"""
ALLOW = "allow"
DENY = "deny"
class ClusterState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The current state of the cluster.
"""
#: Indicates that the cluster resource is created and the resource provider is waiting for Service
#: Fabric VM extension to boot up and report to it.
WAITING_FOR_NODES = "WaitingForNodes"
#: Indicates that the Service Fabric runtime is being installed on the VMs. Cluster resource will
#: be in this state until the cluster boots up and system services are up.
DEPLOYING = "Deploying"
#: Indicates that the cluster is upgrading to establishes the cluster version. This upgrade is
#: automatically initiated when the cluster boots up for the first time.
BASELINE_UPGRADE = "BaselineUpgrade"
#: Indicates that the cluster is being upgraded with the user provided configuration.
UPGRADING = "Upgrading"
#: Indicates that the last upgrade for the cluster has failed.
UPGRADE_FAILED = "UpgradeFailed"
#: Indicates that the cluster is in a stable state.
READY = "Ready"
class ClusterUpgradeCadence(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates when new cluster runtime version upgrades will be applied after they are released. By
default is Wave0.
"""
#: Cluster upgrade starts immediately after a new version is rolled out. Recommended for Test/Dev
#: clusters.
WAVE0 = "Wave0"
#: Cluster upgrade starts 7 days after a new version is rolled out. Recommended for Pre-prod
#: clusters.
WAVE1 = "Wave1"
#: Cluster upgrade starts 14 days after a new version is rolled out. Recommended for Production
#: clusters.
WAVE2 = "Wave2"
class ClusterUpgradeMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The upgrade mode of the cluster when new Service Fabric runtime version is available.
"""
#: The cluster will be automatically upgraded to the latest Service Fabric runtime version,
#: **clusterUpgradeCadence** will determine when the upgrade starts after the new version becomes
#: available.
AUTOMATIC = "Automatic"
#: The cluster will not be automatically upgraded to the latest Service Fabric runtime version.
#: The cluster is upgraded by setting the **clusterCodeVersion** property in the cluster resource.
MANUAL = "Manual"
class Direction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Network security rule direction.
"""
INBOUND = "inbound"
OUTBOUND = "outbound"
class DiskType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Managed data disk type. IOPS and throughput are given by the disk size, to see more information
go to https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types.
"""
#: Standard HDD locally redundant storage. Best for backup, non-critical, and infrequent access.
STANDARD_LRS = "Standard_LRS"
#: Standard SSD locally redundant storage. Best for web servers, lightly used enterprise
#: applications and dev/test.
STANDARD_SSD_LRS = "StandardSSD_LRS"
#: Premium SSD locally redundant storage. Best for production and performance sensitive workloads.
PREMIUM_LRS = "Premium_LRS"
class FailureAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The compensating action to perform when a Monitored upgrade encounters monitoring policy or
health policy violations. Invalid indicates the failure action is invalid. Rollback specifies
that the upgrade will start rolling back automatically. Manual indicates that the upgrade will
switch to UnmonitoredManual upgrade mode.
"""
#: Indicates that a rollback of the upgrade will be performed by Service Fabric if the upgrade
#: fails.
ROLLBACK = "Rollback"
#: Indicates that a manual repair will need to be performed by the administrator if the upgrade
#: fails. Service Fabric will not proceed to the next upgrade domain automatically.
MANUAL = "Manual"
class IPAddressType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The IP address type.
"""
#: IPv4 address type.
I_PV4 = "IPv4"
#: IPv6 address type.
I_PV6 = "IPv6"
class ManagedClusterAddOnFeature(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Available cluster add-on features
"""
#: Dns service.
DNS_SERVICE = "DnsService"
#: Backup and restore service.
BACKUP_RESTORE_SERVICE = "BackupRestoreService"
#: Resource monitor service.
RESOURCE_MONITOR_SERVICE = "ResourceMonitorService"
class ManagedIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of managed identity for the resource.
"""
#: Indicates that no identity is associated with the resource.
NONE = "None"
#: Indicates that system assigned identity is associated with the resource.
SYSTEM_ASSIGNED = "SystemAssigned"
#: Indicates that user assigned identity is associated with the resource.
USER_ASSIGNED = "UserAssigned"
#: Indicates that both system assigned and user assigned identity are associated with the
#: resource.
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
class ManagedResourceProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The provisioning state of the managed resource.
"""
NONE = "None"
CREATING = "Creating"
CREATED = "Created"
UPDATING = "Updating"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
DELETING = "Deleting"
DELETED = "Deleted"
OTHER = "Other"
class MoveCost(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the move cost for the service.
"""
#: Zero move cost. This value is zero.
ZERO = "Zero"
#: Specifies the move cost of the service as Low. The value is 1.
LOW = "Low"
#: Specifies the move cost of the service as Medium. The value is 2.
MEDIUM = "Medium"
#: Specifies the move cost of the service as High. The value is 3.
HIGH = "High"
class NodeTypeSkuScaleType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Node type capacity scale type.
"""
#: Node count is not adjustable in any way (e.g. it is fixed).
NONE = "None"
#: The user must manually scale out/in.
MANUAL = "Manual"
#: Automatic scale is allowed.
AUTOMATIC = "Automatic"
class NsgProtocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Network protocol this rule applies to.
"""
HTTP = "http"
HTTPS = "https"
TCP = "tcp"
UDP = "udp"
ICMP = "icmp"
AH = "ah"
ESP = "esp"
class OsType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Cluster operating system, the default will be Windows
"""
#: Indicates os is Windows.
WINDOWS = "Windows"
class PartitionScheme(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enumerates the ways that a service can be partitioned.
"""
#: Indicates that the partition is based on string names, and is a SingletonPartitionScheme
#: object, The value is 0.
SINGLETON = "Singleton"
#: Indicates that the partition is based on Int64 key ranges, and is a
#: UniformInt64RangePartitionScheme object. The value is 1.
UNIFORM_INT64_RANGE = "UniformInt64Range"
#: Indicates that the partition is based on string names, and is a NamedPartitionScheme object.
#: The value is 2.
NAMED = "Named"
class PrivateEndpointNetworkPolicies(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enable or Disable apply network policies on private end point in the subnet.
"""
ENABLED = "enabled"
DISABLED = "disabled"
class PrivateLinkServiceNetworkPolicies(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enable or Disable apply network policies on private link service in the subnet.
"""
ENABLED = "enabled"
DISABLED = "disabled"
class ProbeProtocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""the reference to the load balancer probe used by the load balancing rule.
"""
TCP = "tcp"
HTTP = "http"
HTTPS = "https"
class Protocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The reference to the transport protocol used by the load balancing rule.
"""
TCP = "tcp"
UDP = "udp"
class RollingUpgradeMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The mode used to monitor health during a rolling upgrade. The values are Monitored, and
UnmonitoredAuto.
"""
#: The upgrade will stop after completing each upgrade domain and automatically monitor health
#: before proceeding. The value is 0.
MONITORED = "Monitored"
#: The upgrade will proceed automatically without performing any health monitoring. The value is
#: 1.
UNMONITORED_AUTO = "UnmonitoredAuto"
class ServiceCorrelationScheme(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The service correlation scheme.
"""
#: Aligned affinity ensures that the primaries of the partitions of the affinitized services are
#: collocated on the same nodes. This is the default and is the same as selecting the Affinity
#: scheme. The value is 0.
ALIGNED_AFFINITY = "AlignedAffinity"
#: Non-Aligned affinity guarantees that all replicas of each service will be placed on the same
#: nodes. Unlike Aligned Affinity, this does not guarantee that replicas of particular role will
#: be collocated. The value is 1.
NON_ALIGNED_AFFINITY = "NonAlignedAffinity"
class ServiceKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The kind of service (Stateless or Stateful).
"""
#: Does not use Service Fabric to make its state highly available or reliable. The value is 0.
STATELESS = "Stateless"
#: Uses Service Fabric to make its state or part of its state highly available and reliable. The
#: value is 1.
STATEFUL = "Stateful"
class ServiceLoadMetricWeight(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Determines the metric weight relative to the other metrics that are configured for this
service. During runtime, if two metrics end up in conflict, the Cluster Resource Manager
prefers the metric with the higher weight.
"""
#: Disables resource balancing for this metric. This value is zero.
ZERO = "Zero"
#: Specifies the metric weight of the service load as Low. The value is 1.
LOW = "Low"
#: Specifies the metric weight of the service load as Medium. The value is 2.
MEDIUM = "Medium"
#: Specifies the metric weight of the service load as High. The value is 3.
HIGH = "High"
class ServicePackageActivationMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The activation Mode of the service package
"""
#: Indicates the application package activation mode will use shared process.
SHARED_PROCESS = "SharedProcess"
#: Indicates the application package activation mode will use exclusive process.
EXCLUSIVE_PROCESS = "ExclusiveProcess"
class ServicePlacementPolicyType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of placement policy for a service fabric service. Following are the possible values.
"""
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementInvalidDomainPolicyDescription, which indicates that a particular fault or
#: upgrade domain cannot be used for placement of this service. The value is 0.
INVALID_DOMAIN = "InvalidDomain"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementRequireDomainDistributionPolicyDescription indicating that the replicas of the
#: service must be placed in a specific domain. The value is 1.
REQUIRED_DOMAIN = "RequiredDomain"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementPreferPrimaryDomainPolicyDescription, which indicates that if possible the
#: Primary replica for the partitions of the service should be located in a particular domain as
#: an optimization. The value is 2.
PREFERRED_PRIMARY_DOMAIN = "PreferredPrimaryDomain"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementRequireDomainDistributionPolicyDescription, indicating that the system will
#: disallow placement of any two replicas from the same partition in the same domain at any time.
#: The value is 3.
REQUIRED_DOMAIN_DISTRIBUTION = "RequiredDomainDistribution"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementNonPartiallyPlaceServicePolicyDescription, which indicates that if possible all
#: replicas of a particular partition of the service should be placed atomically. The value is 4.
NON_PARTIALLY_PLACE_SERVICE = "NonPartiallyPlaceService"
class ServiceScalingMechanismKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enumerates the ways that a service can be partitioned.
"""
#: Represents a scaling mechanism for adding or removing instances of stateless service partition.
#: The value is 0.
SCALE_PARTITION_INSTANCE_COUNT = "ScalePartitionInstanceCount"
#: Represents a scaling mechanism for adding or removing named partitions of a stateless service.
#: The value is 1.
ADD_REMOVE_INCREMENTAL_NAMED_PARTITION = "AddRemoveIncrementalNamedPartition"
class ServiceScalingTriggerKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enumerates the ways that a service can be partitioned.
"""
#: Represents a scaling trigger related to an average load of a metric/resource of a partition.
#: The value is 0.
AVERAGE_PARTITION_LOAD_TRIGGER = "AveragePartitionLoadTrigger"
#: Represents a scaling policy related to an average load of a metric/resource of a service. The
#: value is 1.
AVERAGE_SERVICE_LOAD_TRIGGER = "AverageServiceLoadTrigger"
class SkuName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Sku Name.
"""
#: Basic requires a minimum of 3 nodes and allows only 1 node type.
BASIC = "Basic"
#: Requires a minimum of 5 nodes and allows 1 or more node type.
STANDARD = "Standard"
| 42.285714
| 102
| 0.72602
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class Access(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The network traffic is allowed or denied.
"""
ALLOW = "allow"
DENY = "deny"
class ClusterState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The current state of the cluster.
"""
#: Indicates that the cluster resource is created and the resource provider is waiting for Service
#: Fabric VM extension to boot up and report to it.
WAITING_FOR_NODES = "WaitingForNodes"
#: Indicates that the Service Fabric runtime is being installed on the VMs. Cluster resource will
#: be in this state until the cluster boots up and system services are up.
DEPLOYING = "Deploying"
#: Indicates that the cluster is upgrading to establishes the cluster version. This upgrade is
#: automatically initiated when the cluster boots up for the first time.
BASELINE_UPGRADE = "BaselineUpgrade"
#: Indicates that the cluster is being upgraded with the user provided configuration.
UPGRADING = "Upgrading"
#: Indicates that the last upgrade for the cluster has failed.
UPGRADE_FAILED = "UpgradeFailed"
#: Indicates that the cluster is in a stable state.
READY = "Ready"
class ClusterUpgradeCadence(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates when new cluster runtime version upgrades will be applied after they are released. By
default is Wave0.
"""
#: Cluster upgrade starts immediately after a new version is rolled out. Recommended for Test/Dev
#: clusters.
WAVE0 = "Wave0"
#: Cluster upgrade starts 7 days after a new version is rolled out. Recommended for Pre-prod
#: clusters.
WAVE1 = "Wave1"
#: Cluster upgrade starts 14 days after a new version is rolled out. Recommended for Production
#: clusters.
WAVE2 = "Wave2"
class ClusterUpgradeMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The upgrade mode of the cluster when new Service Fabric runtime version is available.
"""
#: The cluster will be automatically upgraded to the latest Service Fabric runtime version,
#: **clusterUpgradeCadence** will determine when the upgrade starts after the new version becomes
#: available.
AUTOMATIC = "Automatic"
#: The cluster will not be automatically upgraded to the latest Service Fabric runtime version.
#: The cluster is upgraded by setting the **clusterCodeVersion** property in the cluster resource.
MANUAL = "Manual"
class Direction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Network security rule direction.
"""
INBOUND = "inbound"
OUTBOUND = "outbound"
class DiskType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Managed data disk type. IOPS and throughput are given by the disk size, to see more information
go to https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types.
"""
#: Standard HDD locally redundant storage. Best for backup, non-critical, and infrequent access.
STANDARD_LRS = "Standard_LRS"
#: Standard SSD locally redundant storage. Best for web servers, lightly used enterprise
#: applications and dev/test.
STANDARD_SSD_LRS = "StandardSSD_LRS"
#: Premium SSD locally redundant storage. Best for production and performance sensitive workloads.
PREMIUM_LRS = "Premium_LRS"
class FailureAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The compensating action to perform when a Monitored upgrade encounters monitoring policy or
health policy violations. Invalid indicates the failure action is invalid. Rollback specifies
that the upgrade will start rolling back automatically. Manual indicates that the upgrade will
switch to UnmonitoredManual upgrade mode.
"""
#: Indicates that a rollback of the upgrade will be performed by Service Fabric if the upgrade
#: fails.
ROLLBACK = "Rollback"
#: Indicates that a manual repair will need to be performed by the administrator if the upgrade
#: fails. Service Fabric will not proceed to the next upgrade domain automatically.
MANUAL = "Manual"
class IPAddressType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The IP address type.
"""
#: IPv4 address type.
I_PV4 = "IPv4"
#: IPv6 address type.
I_PV6 = "IPv6"
class ManagedClusterAddOnFeature(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Available cluster add-on features
"""
#: Dns service.
DNS_SERVICE = "DnsService"
#: Backup and restore service.
BACKUP_RESTORE_SERVICE = "BackupRestoreService"
#: Resource monitor service.
RESOURCE_MONITOR_SERVICE = "ResourceMonitorService"
class ManagedClusterVersionEnvironment(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
WINDOWS = "Windows"
class ManagedIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of managed identity for the resource.
"""
#: Indicates that no identity is associated with the resource.
NONE = "None"
#: Indicates that system assigned identity is associated with the resource.
SYSTEM_ASSIGNED = "SystemAssigned"
#: Indicates that user assigned identity is associated with the resource.
USER_ASSIGNED = "UserAssigned"
#: Indicates that both system assigned and user assigned identity are associated with the
#: resource.
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
class ManagedResourceProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The provisioning state of the managed resource.
"""
NONE = "None"
CREATING = "Creating"
CREATED = "Created"
UPDATING = "Updating"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
DELETING = "Deleting"
DELETED = "Deleted"
OTHER = "Other"
class MoveCost(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the move cost for the service.
"""
#: Zero move cost. This value is zero.
ZERO = "Zero"
#: Specifies the move cost of the service as Low. The value is 1.
LOW = "Low"
#: Specifies the move cost of the service as Medium. The value is 2.
MEDIUM = "Medium"
#: Specifies the move cost of the service as High. The value is 3.
HIGH = "High"
class NodeTypeSkuScaleType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Node type capacity scale type.
"""
#: Node count is not adjustable in any way (e.g. it is fixed).
NONE = "None"
#: The user must manually scale out/in.
MANUAL = "Manual"
#: Automatic scale is allowed.
AUTOMATIC = "Automatic"
class NsgProtocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Network protocol this rule applies to.
"""
HTTP = "http"
HTTPS = "https"
TCP = "tcp"
UDP = "udp"
ICMP = "icmp"
AH = "ah"
ESP = "esp"
class OsType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Cluster operating system, the default will be Windows
"""
#: Indicates os is Windows.
WINDOWS = "Windows"
class PartitionScheme(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enumerates the ways that a service can be partitioned.
"""
#: Indicates that the partition is based on string names, and is a SingletonPartitionScheme
#: object, The value is 0.
SINGLETON = "Singleton"
#: Indicates that the partition is based on Int64 key ranges, and is a
#: UniformInt64RangePartitionScheme object. The value is 1.
UNIFORM_INT64_RANGE = "UniformInt64Range"
#: Indicates that the partition is based on string names, and is a NamedPartitionScheme object.
#: The value is 2.
NAMED = "Named"
class PrivateEndpointNetworkPolicies(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enable or Disable apply network policies on private end point in the subnet.
"""
ENABLED = "enabled"
DISABLED = "disabled"
class PrivateLinkServiceNetworkPolicies(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enable or Disable apply network policies on private link service in the subnet.
"""
ENABLED = "enabled"
DISABLED = "disabled"
class ProbeProtocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""the reference to the load balancer probe used by the load balancing rule.
"""
TCP = "tcp"
HTTP = "http"
HTTPS = "https"
class Protocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The reference to the transport protocol used by the load balancing rule.
"""
TCP = "tcp"
UDP = "udp"
class RollingUpgradeMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The mode used to monitor health during a rolling upgrade. The values are Monitored, and
UnmonitoredAuto.
"""
#: The upgrade will stop after completing each upgrade domain and automatically monitor health
#: before proceeding. The value is 0.
MONITORED = "Monitored"
#: The upgrade will proceed automatically without performing any health monitoring. The value is
#: 1.
UNMONITORED_AUTO = "UnmonitoredAuto"
class ServiceCorrelationScheme(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The service correlation scheme.
"""
#: Aligned affinity ensures that the primaries of the partitions of the affinitized services are
#: collocated on the same nodes. This is the default and is the same as selecting the Affinity
#: scheme. The value is 0.
ALIGNED_AFFINITY = "AlignedAffinity"
#: Non-Aligned affinity guarantees that all replicas of each service will be placed on the same
#: nodes. Unlike Aligned Affinity, this does not guarantee that replicas of particular role will
#: be collocated. The value is 1.
NON_ALIGNED_AFFINITY = "NonAlignedAffinity"
class ServiceKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The kind of service (Stateless or Stateful).
"""
#: Does not use Service Fabric to make its state highly available or reliable. The value is 0.
STATELESS = "Stateless"
#: Uses Service Fabric to make its state or part of its state highly available and reliable. The
#: value is 1.
STATEFUL = "Stateful"
class ServiceLoadMetricWeight(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Determines the metric weight relative to the other metrics that are configured for this
service. During runtime, if two metrics end up in conflict, the Cluster Resource Manager
prefers the metric with the higher weight.
"""
#: Disables resource balancing for this metric. This value is zero.
ZERO = "Zero"
#: Specifies the metric weight of the service load as Low. The value is 1.
LOW = "Low"
#: Specifies the metric weight of the service load as Medium. The value is 2.
MEDIUM = "Medium"
#: Specifies the metric weight of the service load as High. The value is 3.
HIGH = "High"
class ServicePackageActivationMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The activation Mode of the service package
"""
#: Indicates the application package activation mode will use shared process.
SHARED_PROCESS = "SharedProcess"
#: Indicates the application package activation mode will use exclusive process.
EXCLUSIVE_PROCESS = "ExclusiveProcess"
class ServicePlacementPolicyType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of placement policy for a service fabric service. Following are the possible values.
"""
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementInvalidDomainPolicyDescription, which indicates that a particular fault or
#: upgrade domain cannot be used for placement of this service. The value is 0.
INVALID_DOMAIN = "InvalidDomain"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementRequireDomainDistributionPolicyDescription indicating that the replicas of the
#: service must be placed in a specific domain. The value is 1.
REQUIRED_DOMAIN = "RequiredDomain"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementPreferPrimaryDomainPolicyDescription, which indicates that if possible the
#: Primary replica for the partitions of the service should be located in a particular domain as
#: an optimization. The value is 2.
PREFERRED_PRIMARY_DOMAIN = "PreferredPrimaryDomain"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementRequireDomainDistributionPolicyDescription, indicating that the system will
#: disallow placement of any two replicas from the same partition in the same domain at any time.
#: The value is 3.
REQUIRED_DOMAIN_DISTRIBUTION = "RequiredDomainDistribution"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementNonPartiallyPlaceServicePolicyDescription, which indicates that if possible all
#: replicas of a particular partition of the service should be placed atomically. The value is 4.
NON_PARTIALLY_PLACE_SERVICE = "NonPartiallyPlaceService"
class ServiceScalingMechanismKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enumerates the ways that a service can be partitioned.
"""
#: Represents a scaling mechanism for adding or removing instances of stateless service partition.
#: The value is 0.
SCALE_PARTITION_INSTANCE_COUNT = "ScalePartitionInstanceCount"
#: Represents a scaling mechanism for adding or removing named partitions of a stateless service.
#: The value is 1.
ADD_REMOVE_INCREMENTAL_NAMED_PARTITION = "AddRemoveIncrementalNamedPartition"
class ServiceScalingTriggerKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enumerates the ways that a service can be partitioned.
"""
#: Represents a scaling trigger related to an average load of a metric/resource of a partition.
#: The value is 0.
AVERAGE_PARTITION_LOAD_TRIGGER = "AveragePartitionLoadTrigger"
#: Represents a scaling policy related to an average load of a metric/resource of a service. The
#: value is 1.
AVERAGE_SERVICE_LOAD_TRIGGER = "AverageServiceLoadTrigger"
class SkuName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Sku Name.
"""
#: Basic requires a minimum of 3 nodes and allows only 1 node type.
BASIC = "Basic"
#: Requires a minimum of 5 nodes and allows 1 or more node type.
STANDARD = "Standard"
| 0
| 95
| 23
|
0f8896b2d56b1d6c742f1e75296d6e1c654c2549
| 10,751
|
py
|
Python
|
solarforecastarbiter/io/reference_observations/srml.py
|
wholmgren/solarforecastarbiter-core
|
e692c7e142f24c0253e4288a6ac760e10ba41dbd
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
solarforecastarbiter/io/reference_observations/srml.py
|
wholmgren/solarforecastarbiter-core
|
e692c7e142f24c0253e4288a6ac760e10ba41dbd
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
solarforecastarbiter/io/reference_observations/srml.py
|
wholmgren/solarforecastarbiter-core
|
e692c7e142f24c0253e4288a6ac760e10ba41dbd
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import logging
import json
from urllib import error
from pkg_resources import resource_filename, Requirement
import pandas as pd
from pvlib import iotools
from requests.exceptions import HTTPError
from solarforecastarbiter.datamodel import Observation, SolarPowerPlant
from solarforecastarbiter.io.reference_observations import (
common, default_forecasts)
DEFAULT_SITEFILE = resource_filename(
Requirement.parse('solarforecastarbiter'),
'solarforecastarbiter/io/reference_observations/'
'srml_reference_sites.json')
# maps the desired variable names to those returned by pvlib.iotools
srml_variable_map = {
'ghi_': 'ghi',
'dni_': 'dni',
'dhi_': 'dhi',
'wind_speed_': 'wind_speed',
'temp_air_': 'air_temperature',
}
# maps SolarForecastArbiter interval_label to the SRML infix which
# designates the time resolution of each file. The list of file types
# is tried in order, so file types starting with 'P' designating
# processed data are listed first, such that if processed data exists
# we retrieve that first.
FILE_TYPE_MAP = {
1: ['PO', 'RO'],
5: ['PF', 'RF'],
15: ['PQ', 'RQ'],
60: ['PH', 'RH'],
}
logger = logging.getLogger('reference_data')
def adjust_site_parameters(site):
"""Inserts modeling parameters for sites with pv measurments
Parameters
----------
site: dict
Returns
-------
dict
Copy of inputs plus a new key 'modeling_parameters'.
"""
return common.apply_json_site_parameters(DEFAULT_SITEFILE, site)
def request_data(site, year, month):
"""Makes a request for each file type until successful or we
run out of filetypes.
Parameters
----------
site: :py:class:`solarforecastarbiter.datamodel.Site`
year: int
The year of the data to request.
month: int
The month of the data to request.
Returns
-------
DataFrame
A month of SRML data.
"""
extra_params = common.decode_extra_parameters(site)
station_code = extra_params['network_api_abbreviation']
interval_length = extra_params['observation_interval_length']
file_types = FILE_TYPE_MAP[interval_length]
for file_type in file_types:
# The list file_types are listed with processed data
# file types first. On a successful retrieval we return
# the month of data, otherwise we log info and continue
# until we've exhausted the list.
try:
srml_month = iotools.read_srml_month_from_solardat(
station_code, year, month, file_type)
except error.URLError:
logger.warning(f'Could not retrieve {file_type} for SRML data '
f'for site {site.name} on {year}/{month} .')
logger.debug(f'Site abbreviation: {station_code}')
continue
except pd.errors.EmptyDataError:
logger.warning(f'SRML returned an empty file for station '
f'{site.name} on {year}/{month}.')
continue
else:
return srml_month
logger.warning(f'Could not retrieve data for site {site.name} on '
f'{year}/{month}.')
def fetch(api, site, start, end):
"""Retrieve observation data for a srml site between start and end.
Parameters
----------
api : :py:class:`solarforecastarbiter.io.api.APISession`
An APISession with a valid JWT for accessing the Reference Data
user.
site : :py:class:`solarforecastarbiter.datamodel.Site`
Site object with the appropriate metadata.
start : datetime
The beginning of the period to request data for. Must include timezone.
end : datetime
The end of the period to request data for. Must include timezone.
Returns
-------
data : pandas.DataFrame
All of the requested data concatenated into a single DataFrame.
Raises
------
TypeError
If start and end have different timezones, or if they do not include a
timezone.
"""
month_dfs = []
start_year = start.year
start_month = start.month
# Retrieve each month file necessary
if start.tzinfo != end.tzinfo:
raise TypeError('start and end cannot have different timezones')
while start_year * 100 + start_month <= end.year * 100 + end.month:
logger.info(f'Requesting data for SRML site {site.name}'
f' for {start_year}-{start_month}')
srml_month = request_data(site, start_year, start_month)
if srml_month is not None:
month_dfs.append(srml_month)
start_month += 1
if start_month > 12:
start_month = 1
start_year += 1
try:
all_period_data = pd.concat(month_dfs)
except ValueError:
logger.warning(f'No data available for site {site.name} '
f'from {start} to {end}.')
return pd.DataFrame()
var_columns = [col for col in all_period_data.columns
if '_flag' not in col]
power_columns = [col for col in var_columns
if col.startswith('5')]
# adjust power from watts to megawatts
for column in power_columns:
all_period_data[column] = all_period_data[column] / 1000000
all_period_data = all_period_data.loc[start:end, var_columns]
# remove possible trailing NaNs, it is necessary to do this after slicing
# because SRML data has nighttime data prefilled with 0s through the end of
# the month. This may not be effective if a given site has more than a 24
# hour lag, which will cause last_valid_index to return the latest
# timestamp just before sunrise, but will suffice for the typical lag on
# the order of hours.
all_period_data = all_period_data[:all_period_data.last_valid_index()]
return all_period_data
def initialize_site_observations(api, site):
"""Creates an observation at the site for each variable in
an SRML site's file.
Parameters
----------
api: :py:class:`solarforecastarbiter.io.api.APISession`
site : :py:class:`solarforecastarbiter.datamodel.Site
The site object for which to create Observations.
Notes
-----
Since variables are labelled with an integer instrument
number, Observations are named with their variable and
instrument number found in the source files.
e.g. A SRML file contains two columns labelled, 1001, and
1002. These columns represent GHI at instrument 1 and
instrument 2 respectively. The `pvlib.iotools` package
converts these to 'ghi_1' and 'ghi_2' for us. We use these
labels to differentiate between measurements recorded by
different instruments.
"""
# Request ~month old data at initialization to ensure we get a response.
start = pd.Timestamp.utcnow() - pd.Timedelta('30 days')
end = start
try:
extra_params = common.decode_extra_parameters(site)
except ValueError:
logger.warning('Cannot create reference observations at MIDC site '
f'{site.name}, missing required parameters.')
return
# use site name without network here to build
# a name with the original column label rather than
# the SFA variable
site_name = common.site_name_no_network(site)
try:
site_df = fetch(api, site, start, end)
except error.HTTPError:
logger.error('Could not find data to create observations '
f'for SRML site {site_name}.')
return
else:
if site_df is None:
logger.error('Could not find data to create observations '
f'for SRML site {site_name}.')
return
for variable in srml_variable_map.keys():
matches = [col for col in site_df.columns
if col.startswith(variable)]
for match in matches:
observation_extra_parameters = extra_params.copy()
observation_extra_parameters.update({
'network_data_label': match})
try:
# Here, we pass a name with match instead of variable
# to differentiate between multiple observations of
# the same variable
common.create_observation(
api, site, srml_variable_map[variable],
name=f'{site_name} {match}',
interval_label='beginning',
extra_params=observation_extra_parameters)
except HTTPError as e:
logger.error(
f'Failed to create {variable} observation at Site '
f'{site.name}. Error: {e.response.text}')
with open(DEFAULT_SITEFILE) as fp:
obs_metadata = json.load(fp)['observations']
for obs in obs_metadata:
obs_site_extra_params = json.loads(obs['site']['extra_parameters'])
if obs_site_extra_params['network_api_id'] == extra_params[
'network_api_id']:
obs['site'] = site
observation = Observation.from_dict(obs)
common.check_and_post_observation(api, observation)
def initialize_site_forecasts(api, site):
"""
Create a forecasts for each variable measured at the site
Parameters
----------
api : :py:class:`solarforecastarbiter.io.api.APISession`
An active Reference user session.
site : :py:class:`solarforecastarbiter.datamodel.Site`
The site object for which to create Forecasts.
"""
variables = list(srml_variable_map.values())
if isinstance(site, SolarPowerPlant):
variables += ['ac_power', 'dc_power']
common.create_forecasts(
api, site, variables,
default_forecasts.TEMPLATE_FORECASTS)
def update_observation_data(api, sites, observations, start, end):
"""Post new observation data to a list of SRML Observations
from start to end.
api : :py:class:`solarforecastarbiter.io.api.APISession`
An active Reference user session.
sites: list of :py:class:`solarforecastarbiter.datamodel.Site`
List of all reference sites as Objects
observations: list of :py:class:`solarforecastarbiter.datamodel.Observation`
List of all reference observations as Objects
start : datetime
The beginning of the period to request data for.
end : datetime
The end of the period to request data for.
""" # noqa
srml_sites = common.filter_by_networks(sites, 'UO SRML')
for site in srml_sites:
common.update_site_observations(api, fetch, site, observations,
start, end)
| 36.692833
| 80
| 0.646172
|
import logging
import json
from urllib import error
from pkg_resources import resource_filename, Requirement
import pandas as pd
from pvlib import iotools
from requests.exceptions import HTTPError
from solarforecastarbiter.datamodel import Observation, SolarPowerPlant
from solarforecastarbiter.io.reference_observations import (
common, default_forecasts)
DEFAULT_SITEFILE = resource_filename(
Requirement.parse('solarforecastarbiter'),
'solarforecastarbiter/io/reference_observations/'
'srml_reference_sites.json')
# maps the desired variable names to those returned by pvlib.iotools
srml_variable_map = {
'ghi_': 'ghi',
'dni_': 'dni',
'dhi_': 'dhi',
'wind_speed_': 'wind_speed',
'temp_air_': 'air_temperature',
}
# maps SolarForecastArbiter interval_label to the SRML infix which
# designates the time resolution of each file. The list of file types
# is tried in order, so file types starting with 'P' designating
# processed data are listed first, such that if processed data exists
# we retrieve that first.
FILE_TYPE_MAP = {
1: ['PO', 'RO'],
5: ['PF', 'RF'],
15: ['PQ', 'RQ'],
60: ['PH', 'RH'],
}
logger = logging.getLogger('reference_data')
def adjust_site_parameters(site):
"""Inserts modeling parameters for sites with pv measurments
Parameters
----------
site: dict
Returns
-------
dict
Copy of inputs plus a new key 'modeling_parameters'.
"""
return common.apply_json_site_parameters(DEFAULT_SITEFILE, site)
def request_data(site, year, month):
"""Makes a request for each file type until successful or we
run out of filetypes.
Parameters
----------
site: :py:class:`solarforecastarbiter.datamodel.Site`
year: int
The year of the data to request.
month: int
The month of the data to request.
Returns
-------
DataFrame
A month of SRML data.
"""
extra_params = common.decode_extra_parameters(site)
station_code = extra_params['network_api_abbreviation']
interval_length = extra_params['observation_interval_length']
file_types = FILE_TYPE_MAP[interval_length]
for file_type in file_types:
# The list file_types are listed with processed data
# file types first. On a successful retrieval we return
# the month of data, otherwise we log info and continue
# until we've exhausted the list.
try:
srml_month = iotools.read_srml_month_from_solardat(
station_code, year, month, file_type)
except error.URLError:
logger.warning(f'Could not retrieve {file_type} for SRML data '
f'for site {site.name} on {year}/{month} .')
logger.debug(f'Site abbreviation: {station_code}')
continue
except pd.errors.EmptyDataError:
logger.warning(f'SRML returned an empty file for station '
f'{site.name} on {year}/{month}.')
continue
else:
return srml_month
logger.warning(f'Could not retrieve data for site {site.name} on '
f'{year}/{month}.')
def fetch(api, site, start, end):
"""Retrieve observation data for a srml site between start and end.
Parameters
----------
api : :py:class:`solarforecastarbiter.io.api.APISession`
An APISession with a valid JWT for accessing the Reference Data
user.
site : :py:class:`solarforecastarbiter.datamodel.Site`
Site object with the appropriate metadata.
start : datetime
The beginning of the period to request data for. Must include timezone.
end : datetime
The end of the period to request data for. Must include timezone.
Returns
-------
data : pandas.DataFrame
All of the requested data concatenated into a single DataFrame.
Raises
------
TypeError
If start and end have different timezones, or if they do not include a
timezone.
"""
month_dfs = []
start_year = start.year
start_month = start.month
# Retrieve each month file necessary
if start.tzinfo != end.tzinfo:
raise TypeError('start and end cannot have different timezones')
while start_year * 100 + start_month <= end.year * 100 + end.month:
logger.info(f'Requesting data for SRML site {site.name}'
f' for {start_year}-{start_month}')
srml_month = request_data(site, start_year, start_month)
if srml_month is not None:
month_dfs.append(srml_month)
start_month += 1
if start_month > 12:
start_month = 1
start_year += 1
try:
all_period_data = pd.concat(month_dfs)
except ValueError:
logger.warning(f'No data available for site {site.name} '
f'from {start} to {end}.')
return pd.DataFrame()
var_columns = [col for col in all_period_data.columns
if '_flag' not in col]
power_columns = [col for col in var_columns
if col.startswith('5')]
# adjust power from watts to megawatts
for column in power_columns:
all_period_data[column] = all_period_data[column] / 1000000
all_period_data = all_period_data.loc[start:end, var_columns]
# remove possible trailing NaNs, it is necessary to do this after slicing
# because SRML data has nighttime data prefilled with 0s through the end of
# the month. This may not be effective if a given site has more than a 24
# hour lag, which will cause last_valid_index to return the latest
# timestamp just before sunrise, but will suffice for the typical lag on
# the order of hours.
all_period_data = all_period_data[:all_period_data.last_valid_index()]
return all_period_data
def initialize_site_observations(api, site):
"""Creates an observation at the site for each variable in
an SRML site's file.
Parameters
----------
api: :py:class:`solarforecastarbiter.io.api.APISession`
site : :py:class:`solarforecastarbiter.datamodel.Site
The site object for which to create Observations.
Notes
-----
Since variables are labelled with an integer instrument
number, Observations are named with their variable and
instrument number found in the source files.
e.g. A SRML file contains two columns labelled, 1001, and
1002. These columns represent GHI at instrument 1 and
instrument 2 respectively. The `pvlib.iotools` package
converts these to 'ghi_1' and 'ghi_2' for us. We use these
labels to differentiate between measurements recorded by
different instruments.
"""
# Request ~month old data at initialization to ensure we get a response.
start = pd.Timestamp.utcnow() - pd.Timedelta('30 days')
end = start
try:
extra_params = common.decode_extra_parameters(site)
except ValueError:
logger.warning('Cannot create reference observations at MIDC site '
f'{site.name}, missing required parameters.')
return
# use site name without network here to build
# a name with the original column label rather than
# the SFA variable
site_name = common.site_name_no_network(site)
try:
site_df = fetch(api, site, start, end)
except error.HTTPError:
logger.error('Could not find data to create observations '
f'for SRML site {site_name}.')
return
else:
if site_df is None:
logger.error('Could not find data to create observations '
f'for SRML site {site_name}.')
return
for variable in srml_variable_map.keys():
matches = [col for col in site_df.columns
if col.startswith(variable)]
for match in matches:
observation_extra_parameters = extra_params.copy()
observation_extra_parameters.update({
'network_data_label': match})
try:
# Here, we pass a name with match instead of variable
# to differentiate between multiple observations of
# the same variable
common.create_observation(
api, site, srml_variable_map[variable],
name=f'{site_name} {match}',
interval_label='beginning',
extra_params=observation_extra_parameters)
except HTTPError as e:
logger.error(
f'Failed to create {variable} observation at Site '
f'{site.name}. Error: {e.response.text}')
with open(DEFAULT_SITEFILE) as fp:
obs_metadata = json.load(fp)['observations']
for obs in obs_metadata:
obs_site_extra_params = json.loads(obs['site']['extra_parameters'])
if obs_site_extra_params['network_api_id'] == extra_params[
'network_api_id']:
obs['site'] = site
observation = Observation.from_dict(obs)
common.check_and_post_observation(api, observation)
def initialize_site_forecasts(api, site):
"""
Create a forecasts for each variable measured at the site
Parameters
----------
api : :py:class:`solarforecastarbiter.io.api.APISession`
An active Reference user session.
site : :py:class:`solarforecastarbiter.datamodel.Site`
The site object for which to create Forecasts.
"""
variables = list(srml_variable_map.values())
if isinstance(site, SolarPowerPlant):
variables += ['ac_power', 'dc_power']
common.create_forecasts(
api, site, variables,
default_forecasts.TEMPLATE_FORECASTS)
def update_observation_data(api, sites, observations, start, end):
"""Post new observation data to a list of SRML Observations
from start to end.
api : :py:class:`solarforecastarbiter.io.api.APISession`
An active Reference user session.
sites: list of :py:class:`solarforecastarbiter.datamodel.Site`
List of all reference sites as Objects
observations: list of :py:class:`solarforecastarbiter.datamodel.Observation`
List of all reference observations as Objects
start : datetime
The beginning of the period to request data for.
end : datetime
The end of the period to request data for.
""" # noqa
srml_sites = common.filter_by_networks(sites, 'UO SRML')
for site in srml_sites:
common.update_site_observations(api, fetch, site, observations,
start, end)
| 0
| 0
| 0
|
23ff69637f82addae0325c842c00f5a222080a6a
| 2,369
|
py
|
Python
|
tests/test_estimators.py
|
astrojose9/fulmar
|
62a79fb9b7ab01e5b7b3acadaca8e4f0db0e0e2f
|
[
"MIT"
] | null | null | null |
tests/test_estimators.py
|
astrojose9/fulmar
|
62a79fb9b7ab01e5b7b3acadaca8e4f0db0e0e2f
|
[
"MIT"
] | null | null | null |
tests/test_estimators.py
|
astrojose9/fulmar
|
62a79fb9b7ab01e5b7b3acadaca8e4f0db0e0e2f
|
[
"MIT"
] | null | null | null |
import os
import sys
# sys.path.insert(0, os.path.abspath('/home/jrodrigues/Documents/PhD/fulmar'))
from fulmar.estimators import (
estimate_planet_mass,
estimate_semi_amplitude
)
from fulmar.utils import (
FulmarWarning
)
import astropy.units as u
import numpy as np
import numpy.testing as npt
from astropy.units import UnitConversionError
import pytest
def test_estimate_planet_mass():
"""test if estimate_planet_mass behaves as expected"""
npt.assert_equal(estimate_planet_mass(
1, 'Earth').value, 1) # * u.earthMass)
npt.assert_almost_equal(estimate_planet_mass(
1, 'Neptune').value, 0.29706202) # * u.earthMass)
npt.assert_almost_equal(estimate_planet_mass(
1, 5514).value, 1) # * u.earthMass)
with pytest.raises(TypeError, match='`astropy.units.Quantity` or float'):
estimate_planet_mass('string', 'Earth')
with pytest.raises(ValueError, match="Accepted str values for rho_p"):
estimate_planet_mass(1, 'Uranus')
with pytest.raises(UnitConversionError):
estimate_planet_mass(1 * u.s, 'neptune')
def test_estimate_semi_amplitude():
"""test if estime_semi_amplitude behaves as exected"""
npt.assert_almost_equal(estimate_semi_amplitude(
365, 1, 1).value, 0.08948015) # * u.m / u.s)
npt.assert_almost_equal(estimate_semi_amplitude(
365, 1, R_planet=1, rho_planet='earth').value, 0.08948015) # * u.m / u.s)
npt.assert_equal(estimate_semi_amplitude(
365, 1, 1, inc=0).value, 0)
npt.assert_almost_equal(estimate_semi_amplitude(
365, 1, 1, ecc=0.5).value, 0.12654404) # * u.m / u.s)
with pytest.raises(TypeError, match='`astropy.units.Quantity` or float'):
estimate_planet_mass('1 earthRad', 'earth')
estimate_semi_amplitude('1 year', 1, 1)
estimate_semi_amplitude(365, '1 solMass', 1)
estimate_semi_amplitude(365, 1, M_planet='1 earthMass')
estimate_semi_amplitude(365, 1, M_planet=1 * u.earthMass, inc='90 deg')
with pytest.raises(ValueError, match='required when M_planet is not'):
estimate_semi_amplitude(365, 1)
estimate_semi_amplitude(365, 1, R_planet=1)
estimate_semi_amplitude(365, 1, rho_planet=1)
with pytest.warns(FulmarWarning, match='overrides'):
estimate_semi_amplitude(365, 1, 1, R_planet=1, rho_planet='earth')
| 36.446154
| 82
| 0.699451
|
import os
import sys
# sys.path.insert(0, os.path.abspath('/home/jrodrigues/Documents/PhD/fulmar'))
from fulmar.estimators import (
estimate_planet_mass,
estimate_semi_amplitude
)
from fulmar.utils import (
FulmarWarning
)
import astropy.units as u
import numpy as np
import numpy.testing as npt
from astropy.units import UnitConversionError
import pytest
def test_estimate_planet_mass():
"""test if estimate_planet_mass behaves as expected"""
npt.assert_equal(estimate_planet_mass(
1, 'Earth').value, 1) # * u.earthMass)
npt.assert_almost_equal(estimate_planet_mass(
1, 'Neptune').value, 0.29706202) # * u.earthMass)
npt.assert_almost_equal(estimate_planet_mass(
1, 5514).value, 1) # * u.earthMass)
with pytest.raises(TypeError, match='`astropy.units.Quantity` or float'):
estimate_planet_mass('string', 'Earth')
with pytest.raises(ValueError, match="Accepted str values for rho_p"):
estimate_planet_mass(1, 'Uranus')
with pytest.raises(UnitConversionError):
estimate_planet_mass(1 * u.s, 'neptune')
def test_estimate_semi_amplitude():
"""test if estime_semi_amplitude behaves as exected"""
npt.assert_almost_equal(estimate_semi_amplitude(
365, 1, 1).value, 0.08948015) # * u.m / u.s)
npt.assert_almost_equal(estimate_semi_amplitude(
365, 1, R_planet=1, rho_planet='earth').value, 0.08948015) # * u.m / u.s)
npt.assert_equal(estimate_semi_amplitude(
365, 1, 1, inc=0).value, 0)
npt.assert_almost_equal(estimate_semi_amplitude(
365, 1, 1, ecc=0.5).value, 0.12654404) # * u.m / u.s)
with pytest.raises(TypeError, match='`astropy.units.Quantity` or float'):
estimate_planet_mass('1 earthRad', 'earth')
estimate_semi_amplitude('1 year', 1, 1)
estimate_semi_amplitude(365, '1 solMass', 1)
estimate_semi_amplitude(365, 1, M_planet='1 earthMass')
estimate_semi_amplitude(365, 1, M_planet=1 * u.earthMass, inc='90 deg')
with pytest.raises(ValueError, match='required when M_planet is not'):
estimate_semi_amplitude(365, 1)
estimate_semi_amplitude(365, 1, R_planet=1)
estimate_semi_amplitude(365, 1, rho_planet=1)
with pytest.warns(FulmarWarning, match='overrides'):
estimate_semi_amplitude(365, 1, 1, R_planet=1, rho_planet='earth')
| 0
| 0
| 0
|
745adf17e78917104f0f56ad44e17b5a7a1b9a7e
| 25,642
|
py
|
Python
|
rwb/images/__init__.py
|
boakley/robotframework-workbench
|
92f15845d6fa4baedd4f3c4346fb8ff5cf9149a6
|
[
"Apache-2.0"
] | 11
|
2015-03-09T01:53:21.000Z
|
2021-03-29T08:33:05.000Z
|
rwb/images/__init__.py
|
boakley/robotframework-workbench
|
92f15845d6fa4baedd4f3c4346fb8ff5cf9149a6
|
[
"Apache-2.0"
] | 1
|
2016-08-24T06:20:11.000Z
|
2016-08-24T06:20:11.000Z
|
rwb/images/__init__.py
|
boakley/robotframework-workbench
|
92f15845d6fa4baedd4f3c4346fb8ff5cf9149a6
|
[
"Apache-2.0"
] | 5
|
2016-03-03T15:27:09.000Z
|
2019-03-26T13:05:32.000Z
|
'''
This data was automatically generated by script img2data.py.
These images are part of the FAMFAMFAM silk icons set which is
provided under a creative commons license. For more information see
http://www.famfamfam.com/lab/icons/silk/
'''
data = {
'cancel': '''
R0lGODlhEAAQAPcAAAAAAAAAMwAAZgAAmQAAzAAA/wArAAArMwArZgArmQArzAAr
/wBVAABVMwBVZgBVmQBVzABV/wCAAACAMwCAZgCAmQCAzACA/wCqAACqMwCqZgCq
mQCqzACq/wDVAADVMwDVZgDVmQDVzADV/wD/AAD/MwD/ZgD/mQD/zAD//zMAADMA
MzMAZjMAmTMAzDMA/zMrADMrMzMrZjMrmTMrzDMr/zNVADNVMzNVZjNVmTNVzDNV
/zOAADOAMzOAZjOAmTOAzDOA/zOqADOqMzOqZjOqmTOqzDOq/zPVADPVMzPVZjPV
mTPVzDPV/zP/ADP/MzP/ZjP/mTP/zDP//2YAAGYAM2YAZmYAmWYAzGYA/2YrAGYr
M2YrZmYrmWYrzGYr/2ZVAGZVM2ZVZmZVmWZVzGZV/2aAAGaAM2aAZmaAmWaAzGaA
/2aqAGaqM2aqZmaqmWaqzGaq/2bVAGbVM2bVZmbVmWbVzGbV/2b/AGb/M2b/Zmb/
mWb/zGb//5kAAJkAM5kAZpkAmZkAzJkA/5krAJkrM5krZpkrmZkrzJkr/5lVAJlV
M5lVZplVmZlVzJlV/5mAAJmAM5mAZpmAmZmAzJmA/5mqAJmqM5mqZpmqmZmqzJmq
/5nVAJnVM5nVZpnVmZnVzJnV/5n/AJn/M5n/Zpn/mZn/zJn//8wAAMwAM8wAZswA
mcwAzMwA/8wrAMwrM8wrZswrmcwrzMwr/8xVAMxVM8xVZsxVmcxVzMxV/8yAAMyA
M8yAZsyAmcyAzMyA/8yqAMyqM8yqZsyqmcyqzMyq/8zVAMzVM8zVZszVmczVzMzV
/8z/AMz/M8z/Zsz/mcz/zMz///8AAP8AM/8AZv8Amf8AzP8A//8rAP8rM/8rZv8r
mf8rzP8r//9VAP9VM/9VZv9Vmf9VzP9V//+AAP+AM/+AZv+Amf+AzP+A//+qAP+q
M/+qZv+qmf+qzP+q///VAP/VM//VZv/Vmf/VzP/V////AP//M///Zv//mf//zP//
/wAAAAAAAAAAAAAAACH5BAEAAPwALAAAAAAQABAAAAiWAPcJHEiwYEFpCBMiNLhP
WjZz4CB+A5dN2sGH2TJm+7ax4kCHEOlx3EgPHEeLDc1loydwokB6G1EJlEYRHMt6
+1hW/IaSpreN+/ThzIYq5kyKGffV07ePpzSeMzl+UypU6aunMhtSdCcwI0t606A2
3PjN3VVXK2NO+/iKIzZp0xB+Q4Xt4re7te4WZSgNVV+EfhkKLhgQADs=
''',
'cog': '''
R0lGODlhEAAQAOYAAElJSU5OTlFRUVJSUlNTU1hYWFtbW2FhYWJiYmRkZGtra21t
bW5ubm9vb3FxcXl5eYCAgIGBgYKCgoODg4WFhYeHh4mJiYyMjI+Pj5ycnJ6enqCg
oKGhoaOjo6Wlpaampqenp6ioqKqqqqurq6ysrLCwsLGxsbKysrW1tbe3t7m5ubq6
ury8vL29vb6+vr+/v8DAwMHBwcLCwsPDw8TExMXFxcbGxsfHx8jIyMnJycrKysvL
y8zMzM3Nzc7Ozs/Pz9DQ0NHR0dLS0tTU1NXV1dbW1tjY2NnZ2dvb29zc3N3d3d7e
3uLi4uTk5OXl5efn5+np6e3t7QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAAFIALAAAAAAQABAAAAfDgFKCgzs3NIOIiVI3Tk0k
ioJBPjpSMktKJ1IkIIhASVFIMi5FQyUkO08piDRJQ0dIpEdCOzgPDohDPDkrGRwy
NjEpFoI4NDBGPSwSghgzMj0XFRM0UEU5Ph6IJDQrNkoKL0xCNj0miCEyKTBCBx0Y
Gz82PBrMMSwqCQUEgiQ1MTU3RICI4QKFCEQjPhCpsSNIjhs8arTYwQARiyUfJlCg
IWMBgw9CIAxA1CCBlAmFEEgpEAAAJCkRWpww8DJRAQEjEwUCADs=
''',
'control_play': '''
R0lGODlhEAAQAPZDAHNzc3NzdHV1dXV1dnZ2d3h4eHh4eXl5eo6OjpCQkJKSkpOT
k5aWlpeXl5mZmZycnJ2dnaKioqenp6qqqqurq7Ozs7m5ucDAwMLCwsPDw8TExMbG
xsnJyc3Nzc7OztHR0dLS0tPT09TU1NXV1dbW1tfX19jY2N3d3eHh4eLi4uPj4+Tk
5OXl5ebm5ufn5+jo6Onp6erq6uvr6+zs7O3t7e7u7u/v7/Dw8PHx8fLy8vPz8/T0
9PX19ff39/j4+Pr6+vv7+/z8/P7+/v///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAEQALAAAAAAQABAAAAebgESCg4SFhiYliSEhhoMm
MjtAQDouHY0mPUM9OjqZPRyFJkBDNjYYNjIymhaDJaqlNgIYqS5DJxWCJZ2pMgIB
FjIuPTYUuUClqQIFBx0uLkATgiHHvMoCNiouOhHSnS7BvjYuKTopEIIdKUPOLgDi
KSlAGw6Dp+su2PBDKfSEFjZDKGkDwq/RhGacUlhY0EgQBAYMFiBA0LAioUAAOw==
''',
'cut': '''
R0lGODlhEAAQAOZuAOnt8VaOvKnE19zp9Vum2Pv8/aTB1qXC12Cq3KbC2KrF2KvG
2ZK20eTt84WryVyj0mCFroetyGe372ex5Zy804Oqx9Dg8OLm6aXN77PF0cTW57fH
0ujs79vm7qC+1k14r8vc567I3nWiyl+m1lF6r1qi0mGdxmWz6s7e7cDU5ubq7V+K
uIOow4apwU16svDy9P39/vf5++Hr+FOQwdzn7q7H2uTs8qa4w12QvGOVv12m2KjE
16fD2fr8/WKr3UN2sqPA1puxwFWEtNPi8Zu93Ozv8VF6sHeewWOy50F3tWyewNjk
7cfU3OLo7F6fy8HN1Fmax2aw57TN4myhxF2CtJm62Haavf3+/p6+1oSkut3p9aPA
2Hejwd/p8Ed4s/H2+UV6uGms2mCt5HWavGa27Ofs74CoxkB4t/j5+pS30ff5+ZO1
zrDJ2unw9f///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAG4ALAAAAAAQABAAAAeWgG6Cg24dET2EiYRsaTGD
V4qDNR5fgktmkYILIQ1uNFhrmW4CKV02PFttogYoOwkaIKJuQEMMAwqiMG4HWlIU
mWVWOUcOFhUFkQA4CA8nVTIsT5FjI0wbYkQYEjo3ijNcbi1RIhMQUz9qiQFhSmRC
XlBuQWcqiRlOBCscLiUXWUkvFAFoIogKEhM+jMhyg4YEmA9FCAUCADs=
''',
'disk': '''
R0lGODlhEAAQAPemADZqu3uj4nmi4Xqj4/f6/XSd3PH2/Ojw+vf6/nuk4mWNyXeg
3tvn9zpuvTZru3qj4jRntDdsu+Hs+TJhp3qj4Xih4Huj4dnl97vQ77rQ7r3Q7Nvm
9+7z+3We3r7R7NHf9vL2/Pb6/UBrrbjO74SjzmWMyER0v9vn+Njl9jZqtzlsvOrx
++Xt+jJjrF6Jyevx+36o5/f7/snc9Iqn0sfZ9G2Sy+nx+unw+nSe3TJhqqnC546r
1WqTz2iQzXCVzYCq6WmQynGZ2N3o+HyayKS+5NHg97HF4mWNyn6o6OLs+Zq13TJh
qVWCxpWw2r7S8GqSzfP4/czd9bzO58LV8jJiqjhsunKb2Xef3nybydDf9kJ0wDNj
rXaf3vj6/u3y+zVot/P3/TRmsjtuvUN1wHqk40N0vTZqujVotYWl1kJzvcXY8nqi
4G2W046r2GySyzFgqDxpq+/0/HOb2nii4Heg35+64dHg9nKc2zJiqzhru3mYxzVo
tnOb2TRms9/p+H2m5k99w3af3Xuk47PK7aa94e70+/b6/meOya3G642r2YGezHCZ
1nqi4jhsu+rw+vD1/DNkrzJhqOPt+Xqi4Tptu2aNyXSc2t/p+TNlsGGGvH2n5zNk
rq3F6u70/MPV77bM7jRlsfb5/WSMyMLcv4jAYvj7/v///wAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAKYALAAAAAAQABAAAAj/AAklcjGmQQQHAACoEEPJ
hAIpptDYKVIqRqhHjryEIvDCj4cnRph8+GEohCgOK0CUMpDkRAYNZbRkQWLAgCgb
hQiEOiBkAwVPKRpEgXHgQKUbULrEucTgwhwdeyLI0CRBgqROBMCwaIpiAREIDmj8
EaXgkJsaQEqIWmupTh8AagQNIJMggAUBdLjguFNACSgAUwYMCBAggYAKCzoUkMOn
CSQATh48oGC3wpVABawEWbRjixkMjEqJHk2azQw8eUYIQECqtevWoXiQyNHo0yQE
o3Lrzh2qh6JIVQatYf3adagjWCYAQsSJtPNSQ/SIaOMjzZczEMJg2tSCypI3E+Bk
AgoIADs=
''',
'disk_multiple': '''
R0lGODlhEAAQAPcBAAAAAP//////////////////////////////////////////
/////////////////////26X1Hue1HGY0IKk1miOzWmQzXWa0HOZ0WKLyCBarf//
/////////////////////2WNzLDN8////7PH4////////////////6G/6mCJyf//
/////////////////////1uGx57A732i2Xqd04Cj1Y+u2nea0neb0nec0nGX0GKL
yCBarf///////////////12IyKG/73WZ0bjS9P///7vN5v7///////L2++3x+KG/
6mCJyf///////////////2WNypm46l+JyZu97W6X1Hue1HGY0IKk1miOzWmQzXWa
0HOZ0WKLyCBarf///////2GLyZK15mGLy5687mWNzLDN8////7PH4///////////
/////6G/6mCJyf///////1SBxJe26nOYzqG+6luGx57A7////26TzP////////f7
//H4/4yv5GGKx////////1F/w5q272+WzJG21l2IyKG/7/r8/fv8/v39/vz9/vr7
/fv8/YWo3VN/wf///////1WDxrrO72aOx5y84GWNypm46n6l3YCm3Xyj23qg2Xmg
2Xif2Hie2F2Ev////////zNouliEw1OAxZay7mGLyZK15oGn4oGn4X2j3nuh3Hmf
23ee2XOa1Fd+u////////////////1WDxrrO71SBxJe26urz8+bx7ebx7+bw7+Xx
7e3183mc1URwsP///////////////zNouliEw1F/w5q27+jz6oS/UZjJb5nJcYS/
VOn05Huc3Tppqv///////////////////////1WDxrrO7///3cfuh9f0otf2osfu
jP//4IWi3T5qq////////////////////////zNouliEw2iL03CP4WyN3G2L2m6K
12yLzURtqy5fpv//////////////////////////////////////////////////
/////////////////yH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'folder': '''
R0lGODlhEAAQANUAANiGLNiILdiyLNmOM9myNNqUNdqbNdqjNtqsNdu2POHCXuLD
YOPHa+XKdObNeenBc+vKa+7OiO7PdO/Qeu/Sgu/UlPDLJvHNLvHOMPHPafHQbPHS
cvLQOvLTfPLWg/PTRfPVTPPYjvTXVfTYXPTbnvTck/Tdp/XaZPXbavXgn/bedPbg
fPbhrPbpyffig/fji/jkjvjlk/jqwvnonPnpovrsrPrts/vvufvyyvvz2vv26fzz
0/767v778/7+/gAAACH5BAkAAD8ALAAAAAAQABAAAAaLwJ9wSCwaj8gkkaBYOBWJ
JMFHpeoYWIajQRAierBSisXrmXk8H8LbK5U8EAiFMpFIdOsfQleJPFpmZ2l5B1SB
OTEuKigjOwdCBz04NzUzLyuMIB87BkIGPTY0iSonIiAcGDidPwU8ooqlHxwXFjgF
QgM5JiQlIR4dHRsaGTIDQgAByAHLzMnJStDRQQA7
''',
'page_add': '''
R0lGODlhEAAQAPetANPl/cLc+O3z+NHk/FWQyjZrvPr7/fH1+dPl/M3j/DdrGbLS
ldDk+426ZGS+/Mff+tbn/tfo/s3j+1GNyDZqvMfg+tzq9uvy+Ozy99jo/szh+z1w
IJGugVOMKNXn/Y+8Z0+MyM/k/DltIlW0+G+Ud1O0+FqSykuGx4e5YO/0+kd8fLHW
kme9/LHTkNPm/EiEyI+7akyHyEJ8w9fm8maOTzpvIsjg++zy+NLm/NTm/VWPye30
+Z7X/8Pd+bTY9oy8ZZu4prnO6Pj7/5jX/87j+46tht/s98Td+brW9UiAw7TUlbXU
84GrYVuUzOjx+EyGxvL2+t/p9Ex7Mcnl+nSi0lWRysvi+32y4qDY/4e6YFa092W+
++/0+dfn/tbo/ury+lWQy8jg+WmW3Gmdz8nh+9Pn/cjg+lKNyM3m/IS24lOy+EBx
wEiEx/D0+E2IyJ+8rdXm/dDmunOYYL3a9maNzGGKSmK9/NXl/lS190aAxjdrITVq
u5Cx3fP3+3yq12i//d/r9V2ISou6Yszj+32w4cbf+uzz+ZbW/7vW9d7r9lePLc/j
/O70+MHb+E18xdTm/ISo1fj7/UB3wlKz98Xf+W7C/ZvW/6rA4muY3FCMyLzZ9t7q
9rPS8UuGyOPt9+nv9e30+LjW9Mrh+jZqZMni++bw97bUnOvz+vD0+e30+rvY9tbn
/f///////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAK0ALAAAAAAQABAAAAj/AFsF8dOEgMEqYHSM4WOg
lcNWVAQwAiWg4g0MM65gaujQBJQKZEjZoNQolBAfhMRwPHMgAREJVsIEQBIJzZQ0
kBxOOLCIQYgEoo5oioKhz6ckOlG5eFRmgAQzARAt4dTmicNMXLqs8pADh4YHAeao
ShHDIYgdGTJEgABggIYKPQKk6uTQjSEvEVZtBcBg0INCX144PHEBgt5VcNq+kUOj
jgqHbJzcAQAAAYIBQJgoiQNDEYdWeUqN0IKnhJpJgVqsYPXjw4ZWMjxVwsLD0pBD
Ukyx2r1AQStJgP6w2OLAgZ0agrKwQuG6laMLRhJtsmDhVJEODRY06PD5Ep01BcJT
C6CwZ5QeBSJItAoIADs=
''',
'page_copy': '''
R0lGODlhEAAQAPemAJK75E57yuDt/9zq/8vi/+ry+9fn/9vq/9Ll/83i/NXn/tPm
/G2X0u71+2SM0PH3/9rq/rzZ9+zz/MLc+efx7+fw+vn6/W2X02yX0pO75FyGzV6H
z9fn/OPv+tbn/FaFy6O+4ejy/G+Y0m6Z08jf+muW0unz/dHl/dDk/HCV053D6laB
zmqV0dPn/PP5/vb4+8rh+vL4/8Ti+lKEyr7Z9vP4/pfE7cLc+tLl/NXn//D2/urz
/e31/evz++vy+7TT9czm++jz/sfg+9Hk+3ye1nKk022T0cXe+PT4//X6/pa03K3F
5vP4/+r0+7vX9kZ2yJe04vP4+8Te+fj6/FaDzNTm/ViHzFeDzJa+5f7//5G75FWF
zefx++jy793p9e70/Yi76s3j/HSb1OTv+qK94vH3/trp/W6T0+nx++30/P////L3
/9vq/cvj+4ip3erz8YSm2vD2/M/j++Hs9uvz+nad1u/2/ufy/VeGzHCa1FiDy6/R
8/f5/MTd+ZPB7O/2+5G65O71/Mrg+vL3/lSDymqV08zh+ujx+uz1/drq/057y87k
/HKh0tfo/dbn/VWBynal06HJ7vX5/Ja/5sXe/FmHzPD3/9Hk/FOCz+fy+nqk1O31
/m+Y02SM0W6Z0uv0/Njo/GmS1czi+1aCy9np/97s/////wAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAKYALAAAAAAQABAAAAjuAOHUueBphIg8SyyYWsjQ
lBg1iHh84tLEBhQ+DRdyspNgEQwaSYBEApHRFINBC1rIOTKngSRNJRnUAMVhgSEV
kwAAKMIIEpkppjAggQCh0RAsWUzsCNEhExg3L0rESMTGjIcMmygJmfDDhQw/Slgw
KSWg1AAAOhKEIRHBC50ohArFIWsWQBkcl0T1cbJnTKVQb+gO0LJGgYITgqREuGPF
SKAeBSSkAWQph6MqKNrcCIKHyJkNDjo5mPGA1AFSBhAQePCh5JYGplGr/kOlJCYK
sVMT6LKi5CgJhyqgKVDAxxc9JVNceRJAUYDnjzQEBAA7
''',
'page_find': '''
R0lGODlhEAAQAPcBAAAAAP///0R9wU2LyE+MyE+MyE+MyE+MyE+NyE+MyU+Lx0uJ
xTt2xDxos////////////////0mFxOzy9+70+O30+O3z+O3z+O3z+Ozy+Ozy99fm
8n2y4mWU2ztos////////////0uIxfL2+sfg+snh+8ni+8jg+8Xf+cHb+LjW9Pj7
/7TY9n2w4WSS2z5rtP///3uHkmJtd2VtdbXI3V5mb1libMvi+8jg+cLc+LrW9fj7
/c3m/Mnl+oS24kJ5v2NsdYCMmIOPnEdMUqe3y0dNVHmFkH+Mmsbd9cTd+bzZ9t/p
9Ozy9/P3++nv9UV+wn2JlVZeZm94hTtBSnqGlUBKVnSDkkBHT7vP5Mjg+sLc+LvW
9bXU87PS8fD0+EaCxDQ5Pk1UW5CgsFFdaWV0hk5YY4iYqkpRWm55hczh+8ff+sLc
+L3a9rvY9u/0+kaDxlZeZlhhaUhPVkZJTXeAjDc7QDxCRjM4PSUoLNHk/Mzh+8fg
+sPd+cLc+O30+kWCxmNsda+5xI2ap3N6gLTD1lxjbKSvu4uYpkZNU9Pl/dDk+8zj
+8ff+sbf+ury+kKAxl9ocau2wWZweVZcYpypul1mbqWxvWp0fisuM9Pl/dHk/M3i
/Mvh+8nh+/L3+z98xVRbY42ap15ncDo/RZGcrU5VXH+KllZeZhgZG9Hk/M7i/Mzi
/Mng+8jh+/f6/Tt2wTg9QjxCSDE+S09UWEKGtiAwPSQ1QRsrNxUsO1Kz91Kz91Gy
90+y90+y9/X5/DZvv////////zx4wePt927C/aDY/57X/5vW/5jX/5bW/5XW/5TV
/5PU/2W++/T3+zFnu////////zhxv9/r9Wi//We9/GW++2S+/GS+/GK9/GO9+2G8
+2C+/GK8/Pj7/S1kuf///////zNpvOrx+N/s997r9t7q9tzq9tzq9uvz+uvz+ury
+vP3/PT4/P3+/ipgt////////zRquzBlujFmuzBmujBmujBmujBlui9lui5luS5l
uS5kuS5kuSxity5ityH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'page_paste': '''
R0lGODlhEAAQAPepAE57yn+s19mpX8vi/9Ll/9fn/9vq/4Sv2dGXU/3utdOaVtel
XP7vttXn/ury+9GVU7zZ9/PbgPDUddafWX2p1vH3/9yya82MTenx++nNk+nNke70
/enLjp5sLe31/tHl/d3Mtujy/N+dTtDk/Mvj+8rg+tyWQleGzKR1N+DJlcTi+qV2
O+/2+/H3/t2YRIGt13Gb09bn/WmU04i76tuVQevy++fy/erz/YGt2NLl/FiHzPb4
++G5gFaBzvL3/9ywaFWByoGu18Te+enz/bTT9dCSUEZ2yNWgWdyvZt3p9d2zbO71
++vg0/7//92ye7vX9u/apVaFy7eSY26Y0t61b1SDyvLirk57y9agWs3j/NimXc3i
/MLc+tXn/9qqddObVtmpYObGiebEiOfw+nCgzGePs8zi+9Hk/N6bS/D2/s6OTtyX
ROvSmOjz/vD3/7GIVfHdqtTm/ejx+uHSvliDy/j6/NqpdNquZdOcV+K/hOfJi9mq
YKuAStyueOPv+mePyeTv+tilXc+SUKFuLufx78fg++jy71mHzNyWQ9itdMXe/HCV
0/P5/vLhsPP4+7yab3Gfza/R89+2cJ1qKu7l2tqqYH2p2leDzNyVQsjf+uK9ee3W
npPB7Ovz+s+RUOfy+laDzMLc+eC5dFyGzcTd+dSeU+zz/Nnp/5hjIP///wAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAKkALAAAAAAQABAAAAj/AFOlooSqYMEOAhMKZCIF
FQoGESIwGITqEQiFK1JYEZFAgoQEaDhkmKQQVSNNLtYgMoGJRqkwqErCESVJiYUD
QQIEIAOpDIw6qVBBofIDjIAXTYbcCOHn0wwZO1BtsoCkkoADHhQVCkWEkQpOU1Cx
ubNHy4IDabZkyQQhSSdHVVBpEBAIywQcLXKcMUPqSSRAh1DpWXAEj4IAPho0+FBC
CAQbOlCJmfAFwYMAbrrEiDOCBJc2J1DlUYBAkCcKFU4ZOFWAwIAKUVDxeFBEzQUK
S1Szds0C1JtETvp4sWOJkO7WAwz1mMPHIKo/puSMweDAQY0NdBQKXHTJCIArAMID
AxkVEAA7
''',
'page_white': '''
R0lGODlhEAAQAMQAAJSUlJWVlZmZmebm5ufn5+np6erq6uvr6+zs7O3t7e/v7/Dw
8PHx8fLy8vPz8/T09PX19fb29vf39/j4+Pn5+fr6+vv7+/z8/AAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAABgALAAAAAAQABAAAAVjICaOZDlO1qWuE2BaDyQz
SnERQXlJlCQ5C8Ml8hCQLpSkJNI4rC7H1MUygTAQhgF09JxWG4rEVtT1RhyNMaZ8
qVAiETXbkpzIV5Z8pTKxH/EWe4J3gHl5hGwqJBSJKhQmkCUhADs=
''',
'page_white_add': '''
R0lGODlhEAAQAPcNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAP///////////////wAAAPj4+Pv7+/z8/Pz8/Pz8/Pz8/Pz8/Pz8/Pj4+JSU
lAAAAAAAAP///////////wAAAPv7+/T09PX19fX19fX19fHx8e/v7+np6fz8/Ofn
55WVlQAAAAAAAP///wAAAAAAAPz8/Pf39/n5+ff39/f39/Pz8/Dw8Orq6vz8/Pb2
9vT09JmZmQAAAP///wAAAAAAAPz8/Pn5+fn5+fn5+ff39/b29vLy8uvr6/z8/Pz8
/Pz8/Pz8/AAAAAAAAAAAAAAAAPz8/Pv7+/z8/Pz8/Pv7+/j4+PX19fHx8ezs7Orq
6ubm5vz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pv7+/j4+PX19fLy8u/v
7+3t7fz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pv7+/j4+Pb29vPz
8/Ly8vz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pr6+vn5+fb2
9vb29vz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pv7+7bIq3WZ
YGaOT2GKSjxiJgAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pv7+/v7+/v7+7HEpYGrYbTU
ldDmuo+7alePLTdrGQAAAAAAAPz8/Pz8/Pz8/Pz8/Pv7+/v7+/r6+mKLSrHTkLHW
kv///4y8ZY+8ZzdrGQAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pv7+/v7+0x7MbbUnP//
/////////7LSlTdrGQAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/D9xIou6Yoe6
YP///4e5YI+8ZzdrGf///wAAAPn5+fz8/Pz8/Pz8/Pz8/Pz8/Pz8/JaxhlOMKI26
ZLLSlY26ZFOMKDdrGf///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC5aFTZq
GTdrGTZqGTJhF////yH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'page_white_copy': '''
R0lGODlhEAAQANUgAPn5+fLy8sjIyMTExMLCwurq6vPz8/f39+/v78zMzJ6enuvr
6/X19crKysfHx/T09MbGxrm5ucDAwOLi4sXFxeHh4e3t7dHR0f39/fb29vr6+t/f
3/j4+Pv7+8nJyfz8/P///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAACAALAAAAAAQABAAAAaGwErDQyRuMKCkEtTQfJ4f
DuG4THo+BwOi8MkMNlXQlZMJLJ4dAXJ57XAYgUvEoRAYkdePOyNxQqVHeXpSWFpc
XhuCHwADUWVnT0RQHxoUg3AWXJJ6HRoQaGRaH5toDlAdABkPo4lFAgqTGgAHo1UY
ApOdTh5heR2/v7VVilAACWGtRUQJE0EAOw==
''',
'page_white_find': '''
R0lGODlhEAAQAPcBAAAAAP///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAP///////////////wAAAPj4+Pv7+/z8/Pz8/Pz8/Pz8/Pz8/Pz8/Pj4
+JSUlAAAAAAAAP///////////wAAAPv7+/T09PX19fX19fX19fHx8e/v7+np6fz8
/Ofn55WVlQAAAAAAAP///3WAi2NsdWZtddHV2V9nb2Jma/f39/Pz8/Dw8Orq6vz8
/Pb29vT09JmZmQAAAGNsdYCMmIOPnEhMUsTGyUdNVHuGkJOWmPHx8fLy8uvr6/z8
/Pz8/Pz8/Pz8/AAAAH2JlVZeZm94hTtBSn2IlUBKVnSDkkBHT+Hh4vX19fHx8ezs
7Orq6ubm5vz8/AAAADQ5Pk1UW5CgsFFdaWV0hk5YY4iYqkpRWoKDhPj4+PX19fLy
8u/v7+3t7fz8/AAAAFZeZlhhaUhPVkhKTYqKizk8QDxCRjM4PSorLPv7+/j4+Pb2
9vPz8/Ly8vz8/AAAAGNsda+5xI2ap3h8gNPU1F5kbKSvu4uYpk9RU/z8/Pr6+vn5
+fb29vb29vz8/AAAAF9ocau2wWZweVldYre4uF1mbqWxvWp0fi4wM/z8/Pv7+/n5
+fn5+fj4+Pz8/AAAAFRbY42ap15ncDtARaurrE5VXH+KllZeZhgZG/v7+/r6+vr6
+vj4+Pj4+Pz8/AAAADg9QjxCSCwxNVVXWbi5uTk7PT0/QjQ2Nzs8PPr6+vr6+vr6
+vr6+vr6+vz8/AAAAP///wAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pv7+/v7+/v7+/v7
+/v7+/v7+/z8/AAAAP///wAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8
/Pz8/Pz8/Pz8/AAAAP///////wAAAPn5+fz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8
/Pz8/Pz8/Pn5+QAAAP///////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'page_white_paste': '''
R0lGODlhEAAQAOZyAN/f3/b29vj4+NegU/n5+fr6+tmnXNefUv3utfPz8+rq6v7v
ts6LRszMzPPbgPf39/Ly8u/v7+C1cKqATal9QZdjINCPSPDUdXhOGf39/daeUsLC
wtCSStOXTeS9fKR1N86NSKFuLreSY+O7etSYTvT09NqbSt+dTt6yadeiYuS/fqh7
P6Z3PaNwOZ5sLeK5dtyraNejV9yuaaV0OuLi4t2uZevg09GUS9elYuC0btmmZPX1
9dyWQtyVQtmlWs2HQ6p+QaV2O96bS96xa9WdUdSbUNWdUMyJRe7l2tGTStyXRO3t
7cuFQuvr66uESd3MttyrYrGIVduqYdqoZdikWOHSvs+OSNOYTuG3c9acUNikYtWc
UMTExKyESdqUQdqoXvn49t+zbdyWQ9KTS51qKr+TUd2xbNuzcryab+S/f9KWTaFq
NKJtNquASvv7+8nJyZhjIPz8/P///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAHIALAAAAAAQABAAAAfUgHJySHCFhS6CiYI2InAf
Cw4OCyFwaE+KQWcqJwgXFwhCORJkinBpKCZKYjw9XkRmFaUeNVBfBmVdThQTGG8A
GXJwI1I+AwdAYHHKcQIbv3AvBlQDGhRxDwkRCnEBXABwWDEDRkUrzAEQTctvcBIH
WSRqLHFuAjsQS9vsYVtXNxwzlNULkC0OuyEdxlgA0WKZGwIBShiEIyOJBQZH2CyL
U4DAg4kwrDD4wWTNRjcFChiMgmOKDi0pJgh0Q9ONwSptDFXAsHEZgQaKBAF4Q7To
mwY0AgEAOw==
''',
'page_white_stack': '''
R0lGODlhEAAQAMQAAAAAAGhoaJOTk5SUlJiYmLi4uL29vcLCwsPDw8bGxsfHx9bW
1urq6uvr6+3t7e/v7/Ly8vPz8/T09PX19fb29vf39/j4+Pn5+fr6+vv7+/z8/AAA
AAAAAAAAAAAAAAAAACH5BAkAABsALAAAAAAQABAAAAVp4CaOZCliWppVgjlq1WIU
yhG4m2YhqFq5GgwCJqMdTJpMQsdLpSyDV0YRHFYiD4aGQhAlqUodBdJweqffpGUC
cWjPS7SagtWcwVTVhSKxjwBJS04YFxV+UnkqGCg4KhmPGYclTpQXOJchADs=
''',
'page_white_text': '''
R0lGODlhEAAQANUAAJSUlJWVlZmZmaWlpaqqqqysrK6urq+vr7GxsbS0tLi4uLq6
ury8vL29vb6+vsDAwMLCwsPDw8bGxsfHx8jIyMrKysvLy83Nzc7Ozs/Pz9PT09TU
1NXV1dbW1tfX19nZ2dvb293d3ebm5ufn5+np6erq6uzs7O3t7e/v7/Dw8PHx8fLy
8vPz8/T09PX19fb29vf39/j4+Pn5+fr6+vv7+/z8/AAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAADYALAAAAAAQABAAAAaAQJtwSCwOY7SachkDGGkt
l1SFItVGgWINJoPBWKlS7dUSEGuyxyJxIAyWtXOyRou5VKaSKD5UTiAOCgkIBgUn
fEJwSnUvLCuINkoYFRIRDw0MCy+QiosyMjGcNR0aGRcWFBQSoWdLNDQzsbGiISAf
HhwbukmtnXBEMr5LMkbFRUEAOw==
''',
'table': '''
R0lGODlhEAAQANUAAEJ3u1OEw1yMyV2KxWOSzmmW0W6a03Ke1nSg13uk2ny+dn6o
3YGp3oLCfIWt4Iiv4onGgo2y5I7Jh5K25pPLi5a66ZjOkJu86p3QlJ/A7aPUmqXC
6qvG6K7I6bPL6brP6rvQ69nj8OTr9erw9+7y+PP2+v///wAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAACcALAAAAAAQABAAAAaSwJNwSCwKN5vM5VKZTCIP
ByOROJau2OwVIbyUNBqMhSKBNBqKkqFbMrnf8FKBjazbNyWCcFLamDhvgG4jAkIR
JRyJHB0eHB6PIYUnhx0mHoFvIQFCDiWPn42OHZpCDCSXIG8fbSaRQgskILKzoCED
QgmxJqlugq23JwgkIyIiIcfIx8AHBgUEAgIB0tMBAEbXREEAOw==
''',
'table_gear': '''
R0lGODlhEAAQANUAAFaHx1h5o1tbW12Nyl6BrmOSzmVlZWmW0WxsbG6a03JycnKG
n3Ke1nSg13uk2ny+dn6o3YGp3oLCfIWFhYWt4Iiv4omJiYnGgo2y5I7Jh5K25pOT
k5PLi5a66ZjOkJqampu86p3QlJ6jq5/A7aKioqPUmqXC6qurq6vG6K7J6bHF37PM
6rW1tbbBzrfE1bu7u7vQ68LCwsrKytHX4NTU1NnZ2dzl8uHh4eTr9ejo6Orw9+7y
+PP2+v7+/gAAAAAAACH5BAkAAD4ALAAAAAAQABAAAAa1QJ9wSCz6TKYRCNTRaDAV
SsThEJp42KwW2xCCeKVSyMPJXCSSBy/h5fXe8Djv0Eba79eCUMMz9VBwgC8sOgNC
GDwoiigpLi8qMS8zIiQ+iCk9K28vNS80nTQWPhQ8K6amLTQnExYsMhs+ETuaMD2u
ORNvHzcsExA7MMEwJCw3H6YfNB8GDsA9tT0yMi8fHywxCD4NOzo4ODY2JDLiMSTY
PgwJBwUDAwALCAQkFgECAkZFCvdDQQA7
''',
'table_multiple': '''
R0lGODlhEAAQANUAAEJ3u1OExFyMyV2KxWOSzmmW0W6Z03Ke1nSg13uk23y+dn+o
3ICp3YLCfIWs4Iew2oiv4onGgouz3I2y45K435O25pPLi5a455u96pu/457B5Z/A
7aLD46PUmqTC6qTLxqTLyavG6K7I6a7K8LHK6Nnj8OXs9unv+Orw9+3y+PP2+vX4
+////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAAC0ALAAAAAAQABAAAAaXwJZwSCwKPZ4NpjKBOBiJ
RNGjqlqtiCJG9eFoMhTJY7KIDrejlXqtOqmywi0mBeIomU6otKLCrEJXgVkTfSlb
HR0WEQ0NCioGLYQXK1sslpcsKgUtDioVhipIokgqBC0IKXx8HiwhmCgCLQcGDiaE
IbghJLslsUKohCKtmCUBQ7Odu7kkIsVFqCgmJdPUA0WzBQQB29wAQQA7
''',
'zoom_in': '''
R0lGODlhEAAQAOZ8APTr4uny+vn7/T6DN+jx+dSwcPf6/fbv5L6HTeHJuFOeS1yo
Uu/1+zV5MPTs3Ony+YvGg+nXpdKuhPn7/t3Ckd7EjebRryprJuTOrNi5i72FTMqf
ZTJ0LNKubTBxK+jVo97Eo8OSW9KtbPHl2N/Fj/D2+2OyWfLn2ePMmb+LUOXPqde1
fffw5d3DkdCoatm7jMGOWHa3bd7Dpuzz+ovHhePNu/P4/ODHky5vKcyhZ2WnXmGw
V8+oY2usY9Grg8GPWs2mYsiaYMmbYc6nY/H3/J7RlZ/Sl9/Fo+bRrjN2LubRudGq
dsORVvH2++LLuYbFfbyEUffx7eTMrPHm2LmASMqgb/r29JdhRprPkl+tVoLCffPo
2rZ7Uffv5de2fezcv+71+/L3/ESLPefTuqxlP82naN/Ep9a1f8mbY82kcdq7gK5t
SKbVnZDKiM+pZdKtd+z0+k2WRV6rVOfToLd5Ute3fVqbU2e2XPjx7byDT+ry+uvz
+v///wAAAAAAAAAAACH5BAEAAHwALAAAAAAQABAAAAe6gHyCg4SFgw4tHW5DLi9b
hnxfBXUWLAcYbzljhQ4FKgYMentNAkdoU4QUXgZ7BA8BemACaRKEIkglrrB7e2Fm
IYQ8XXuwonc7CwAphEAHM3qie1lsCgAIhGVSRLwmcjFFPWIDhBlLAgxwC0ZYT20Q
DYQnGyATNgpxOjR2STg1hEpBqsgAAGCAFg4oKuTBQ2iEjx8INDTwcOFDBDVkokAS
5AQGiTk3hFzZKCgBlBVnmHAhWXINFTpW+AQCADs=
''',
'zoom_out': '''
R0lGODlhEAAQAOZ0APTr4u/1+/n7/eny+uzz+tSwcPbv5Ojx+fRFSO71++waI/Ts
3O4mLvdUVvpjYvxvbff6/fE1Or6HTeny+ez0+sGPWvjx7c2mYuPMmdKtd9Grg/D2
+/Hl2PHm2MORVunXpbyEUc6nY9/Fj9a1f8mbY/H3/OfTuuoRHL+LUObRrvPo2vfw
5d3Dkd7DptCoavn7/va2rvjy782kceDHk+LLueHJuNGqdvfv5eDHtvjIv/54dNu/
h9i5i8qfZdm7jPH2+7uGUtKubde2fd7EjfSrpN7Eo9KuhM+oY7FyRffx7ebRuejV
o8mbYeXPqbyDT8GNU9q7gN/Ep82naPL3/PfAt+zcv7uBTN3CkeTMrM+pZePNu8GO
WL2FTMOSW5dhRrNzS/Ln2bmASMqgb/P4/KxlP+bRr9y+pNa0eefToNe1faVcM8ia
YNe3fdKtbN/Fo8yhZ+TOrPOgm+ry+uvz+v///wAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAHQALAAAAAAQABAAAAe4gHSCg4SFgwssQVkhLj4q
hnRVBWxlKwZwGW8mhQsFTRABcnM/Am4kHYRXQhBzBxMDcgkCMkaEbSkbrrBzc1NR
XYRHN3OwonMECQAohBcGBHLGBBQBABKEUlglvDoPDg0IEQyEPDYCARQPOVQwRHEK
hGA9RS9j3uAMCidahEprYi0AAJh5cgbDECcWCHHQUEECFyBWdiz5AIVMEkiCaGwR
gWYGEy8YBdUAkWaEByQhBeFQE+ZLDDqBAAA7
''',
}
| 61.198091
| 73
| 0.77338
|
'''
This data was automatically generated by script img2data.py.
These images are part of the FAMFAMFAM silk icons set which is
provided under a creative commons license. For more information see
http://www.famfamfam.com/lab/icons/silk/
'''
data = {
'cancel': '''
R0lGODlhEAAQAPcAAAAAAAAAMwAAZgAAmQAAzAAA/wArAAArMwArZgArmQArzAAr
/wBVAABVMwBVZgBVmQBVzABV/wCAAACAMwCAZgCAmQCAzACA/wCqAACqMwCqZgCq
mQCqzACq/wDVAADVMwDVZgDVmQDVzADV/wD/AAD/MwD/ZgD/mQD/zAD//zMAADMA
MzMAZjMAmTMAzDMA/zMrADMrMzMrZjMrmTMrzDMr/zNVADNVMzNVZjNVmTNVzDNV
/zOAADOAMzOAZjOAmTOAzDOA/zOqADOqMzOqZjOqmTOqzDOq/zPVADPVMzPVZjPV
mTPVzDPV/zP/ADP/MzP/ZjP/mTP/zDP//2YAAGYAM2YAZmYAmWYAzGYA/2YrAGYr
M2YrZmYrmWYrzGYr/2ZVAGZVM2ZVZmZVmWZVzGZV/2aAAGaAM2aAZmaAmWaAzGaA
/2aqAGaqM2aqZmaqmWaqzGaq/2bVAGbVM2bVZmbVmWbVzGbV/2b/AGb/M2b/Zmb/
mWb/zGb//5kAAJkAM5kAZpkAmZkAzJkA/5krAJkrM5krZpkrmZkrzJkr/5lVAJlV
M5lVZplVmZlVzJlV/5mAAJmAM5mAZpmAmZmAzJmA/5mqAJmqM5mqZpmqmZmqzJmq
/5nVAJnVM5nVZpnVmZnVzJnV/5n/AJn/M5n/Zpn/mZn/zJn//8wAAMwAM8wAZswA
mcwAzMwA/8wrAMwrM8wrZswrmcwrzMwr/8xVAMxVM8xVZsxVmcxVzMxV/8yAAMyA
M8yAZsyAmcyAzMyA/8yqAMyqM8yqZsyqmcyqzMyq/8zVAMzVM8zVZszVmczVzMzV
/8z/AMz/M8z/Zsz/mcz/zMz///8AAP8AM/8AZv8Amf8AzP8A//8rAP8rM/8rZv8r
mf8rzP8r//9VAP9VM/9VZv9Vmf9VzP9V//+AAP+AM/+AZv+Amf+AzP+A//+qAP+q
M/+qZv+qmf+qzP+q///VAP/VM//VZv/Vmf/VzP/V////AP//M///Zv//mf//zP//
/wAAAAAAAAAAAAAAACH5BAEAAPwALAAAAAAQABAAAAiWAPcJHEiwYEFpCBMiNLhP
WjZz4CB+A5dN2sGH2TJm+7ax4kCHEOlx3EgPHEeLDc1loydwokB6G1EJlEYRHMt6
+1hW/IaSpreN+/ThzIYq5kyKGffV07ePpzSeMzl+UypU6aunMhtSdCcwI0t606A2
3PjN3VVXK2NO+/iKIzZp0xB+Q4Xt4re7te4WZSgNVV+EfhkKLhgQADs=
''',
'cog': '''
R0lGODlhEAAQAOYAAElJSU5OTlFRUVJSUlNTU1hYWFtbW2FhYWJiYmRkZGtra21t
bW5ubm9vb3FxcXl5eYCAgIGBgYKCgoODg4WFhYeHh4mJiYyMjI+Pj5ycnJ6enqCg
oKGhoaOjo6Wlpaampqenp6ioqKqqqqurq6ysrLCwsLGxsbKysrW1tbe3t7m5ubq6
ury8vL29vb6+vr+/v8DAwMHBwcLCwsPDw8TExMXFxcbGxsfHx8jIyMnJycrKysvL
y8zMzM3Nzc7Ozs/Pz9DQ0NHR0dLS0tTU1NXV1dbW1tjY2NnZ2dvb29zc3N3d3d7e
3uLi4uTk5OXl5efn5+np6e3t7QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAAFIALAAAAAAQABAAAAfDgFKCgzs3NIOIiVI3Tk0k
ioJBPjpSMktKJ1IkIIhASVFIMi5FQyUkO08piDRJQ0dIpEdCOzgPDohDPDkrGRwy
NjEpFoI4NDBGPSwSghgzMj0XFRM0UEU5Ph6IJDQrNkoKL0xCNj0miCEyKTBCBx0Y
Gz82PBrMMSwqCQUEgiQ1MTU3RICI4QKFCEQjPhCpsSNIjhs8arTYwQARiyUfJlCg
IWMBgw9CIAxA1CCBlAmFEEgpEAAAJCkRWpww8DJRAQEjEwUCADs=
''',
'control_play': '''
R0lGODlhEAAQAPZDAHNzc3NzdHV1dXV1dnZ2d3h4eHh4eXl5eo6OjpCQkJKSkpOT
k5aWlpeXl5mZmZycnJ2dnaKioqenp6qqqqurq7Ozs7m5ucDAwMLCwsPDw8TExMbG
xsnJyc3Nzc7OztHR0dLS0tPT09TU1NXV1dbW1tfX19jY2N3d3eHh4eLi4uPj4+Tk
5OXl5ebm5ufn5+jo6Onp6erq6uvr6+zs7O3t7e7u7u/v7/Dw8PHx8fLy8vPz8/T0
9PX19ff39/j4+Pr6+vv7+/z8/P7+/v///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAEQALAAAAAAQABAAAAebgESCg4SFhiYliSEhhoMm
MjtAQDouHY0mPUM9OjqZPRyFJkBDNjYYNjIymhaDJaqlNgIYqS5DJxWCJZ2pMgIB
FjIuPTYUuUClqQIFBx0uLkATgiHHvMoCNiouOhHSnS7BvjYuKTopEIIdKUPOLgDi
KSlAGw6Dp+su2PBDKfSEFjZDKGkDwq/RhGacUlhY0EgQBAYMFiBA0LAioUAAOw==
''',
'cut': '''
R0lGODlhEAAQAOZuAOnt8VaOvKnE19zp9Vum2Pv8/aTB1qXC12Cq3KbC2KrF2KvG
2ZK20eTt84WryVyj0mCFroetyGe372ex5Zy804Oqx9Dg8OLm6aXN77PF0cTW57fH
0ujs79vm7qC+1k14r8vc567I3nWiyl+m1lF6r1qi0mGdxmWz6s7e7cDU5ubq7V+K
uIOow4apwU16svDy9P39/vf5++Hr+FOQwdzn7q7H2uTs8qa4w12QvGOVv12m2KjE
16fD2fr8/WKr3UN2sqPA1puxwFWEtNPi8Zu93Ozv8VF6sHeewWOy50F3tWyewNjk
7cfU3OLo7F6fy8HN1Fmax2aw57TN4myhxF2CtJm62Haavf3+/p6+1oSkut3p9aPA
2Hejwd/p8Ed4s/H2+UV6uGms2mCt5HWavGa27Ofs74CoxkB4t/j5+pS30ff5+ZO1
zrDJ2unw9f///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAG4ALAAAAAAQABAAAAeWgG6Cg24dET2EiYRsaTGD
V4qDNR5fgktmkYILIQ1uNFhrmW4CKV02PFttogYoOwkaIKJuQEMMAwqiMG4HWlIU
mWVWOUcOFhUFkQA4CA8nVTIsT5FjI0wbYkQYEjo3ijNcbi1RIhMQUz9qiQFhSmRC
XlBuQWcqiRlOBCscLiUXWUkvFAFoIogKEhM+jMhyg4YEmA9FCAUCADs=
''',
'disk': '''
R0lGODlhEAAQAPemADZqu3uj4nmi4Xqj4/f6/XSd3PH2/Ojw+vf6/nuk4mWNyXeg
3tvn9zpuvTZru3qj4jRntDdsu+Hs+TJhp3qj4Xih4Huj4dnl97vQ77rQ7r3Q7Nvm
9+7z+3We3r7R7NHf9vL2/Pb6/UBrrbjO74SjzmWMyER0v9vn+Njl9jZqtzlsvOrx
++Xt+jJjrF6Jyevx+36o5/f7/snc9Iqn0sfZ9G2Sy+nx+unw+nSe3TJhqqnC546r
1WqTz2iQzXCVzYCq6WmQynGZ2N3o+HyayKS+5NHg97HF4mWNyn6o6OLs+Zq13TJh
qVWCxpWw2r7S8GqSzfP4/czd9bzO58LV8jJiqjhsunKb2Xef3nybydDf9kJ0wDNj
rXaf3vj6/u3y+zVot/P3/TRmsjtuvUN1wHqk40N0vTZqujVotYWl1kJzvcXY8nqi
4G2W046r2GySyzFgqDxpq+/0/HOb2nii4Heg35+64dHg9nKc2zJiqzhru3mYxzVo
tnOb2TRms9/p+H2m5k99w3af3Xuk47PK7aa94e70+/b6/meOya3G642r2YGezHCZ
1nqi4jhsu+rw+vD1/DNkrzJhqOPt+Xqi4Tptu2aNyXSc2t/p+TNlsGGGvH2n5zNk
rq3F6u70/MPV77bM7jRlsfb5/WSMyMLcv4jAYvj7/v///wAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAKYALAAAAAAQABAAAAj/AAklcjGmQQQHAACoEEPJ
hAIpptDYKVIqRqhHjryEIvDCj4cnRph8+GEohCgOK0CUMpDkRAYNZbRkQWLAgCgb
hQiEOiBkAwVPKRpEgXHgQKUbULrEucTgwhwdeyLI0CRBgqROBMCwaIpiAREIDmj8
EaXgkJsaQEqIWmupTh8AagQNIJMggAUBdLjguFNACSgAUwYMCBAggYAKCzoUkMOn
CSQATh48oGC3wpVABawEWbRjixkMjEqJHk2azQw8eUYIQECqtevWoXiQyNHo0yQE
o3Lrzh2qh6JIVQatYf3adagjWCYAQsSJtPNSQ/SIaOMjzZczEMJg2tSCypI3E+Bk
AgoIADs=
''',
'disk_multiple': '''
R0lGODlhEAAQAPcBAAAAAP//////////////////////////////////////////
/////////////////////26X1Hue1HGY0IKk1miOzWmQzXWa0HOZ0WKLyCBarf//
/////////////////////2WNzLDN8////7PH4////////////////6G/6mCJyf//
/////////////////////1uGx57A732i2Xqd04Cj1Y+u2nea0neb0nec0nGX0GKL
yCBarf///////////////12IyKG/73WZ0bjS9P///7vN5v7///////L2++3x+KG/
6mCJyf///////////////2WNypm46l+JyZu97W6X1Hue1HGY0IKk1miOzWmQzXWa
0HOZ0WKLyCBarf///////2GLyZK15mGLy5687mWNzLDN8////7PH4///////////
/////6G/6mCJyf///////1SBxJe26nOYzqG+6luGx57A7////26TzP////////f7
//H4/4yv5GGKx////////1F/w5q272+WzJG21l2IyKG/7/r8/fv8/v39/vz9/vr7
/fv8/YWo3VN/wf///////1WDxrrO72aOx5y84GWNypm46n6l3YCm3Xyj23qg2Xmg
2Xif2Hie2F2Ev////////zNouliEw1OAxZay7mGLyZK15oGn4oGn4X2j3nuh3Hmf
23ee2XOa1Fd+u////////////////1WDxrrO71SBxJe26urz8+bx7ebx7+bw7+Xx
7e3183mc1URwsP///////////////zNouliEw1F/w5q27+jz6oS/UZjJb5nJcYS/
VOn05Huc3Tppqv///////////////////////1WDxrrO7///3cfuh9f0otf2osfu
jP//4IWi3T5qq////////////////////////zNouliEw2iL03CP4WyN3G2L2m6K
12yLzURtqy5fpv//////////////////////////////////////////////////
/////////////////yH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'folder': '''
R0lGODlhEAAQANUAANiGLNiILdiyLNmOM9myNNqUNdqbNdqjNtqsNdu2POHCXuLD
YOPHa+XKdObNeenBc+vKa+7OiO7PdO/Qeu/Sgu/UlPDLJvHNLvHOMPHPafHQbPHS
cvLQOvLTfPLWg/PTRfPVTPPYjvTXVfTYXPTbnvTck/Tdp/XaZPXbavXgn/bedPbg
fPbhrPbpyffig/fji/jkjvjlk/jqwvnonPnpovrsrPrts/vvufvyyvvz2vv26fzz
0/767v778/7+/gAAACH5BAkAAD8ALAAAAAAQABAAAAaLwJ9wSCwaj8gkkaBYOBWJ
JMFHpeoYWIajQRAierBSisXrmXk8H8LbK5U8EAiFMpFIdOsfQleJPFpmZ2l5B1SB
OTEuKigjOwdCBz04NzUzLyuMIB87BkIGPTY0iSonIiAcGDidPwU8ooqlHxwXFjgF
QgM5JiQlIR4dHRsaGTIDQgAByAHLzMnJStDRQQA7
''',
'page_add': '''
R0lGODlhEAAQAPetANPl/cLc+O3z+NHk/FWQyjZrvPr7/fH1+dPl/M3j/DdrGbLS
ldDk+426ZGS+/Mff+tbn/tfo/s3j+1GNyDZqvMfg+tzq9uvy+Ozy99jo/szh+z1w
IJGugVOMKNXn/Y+8Z0+MyM/k/DltIlW0+G+Ud1O0+FqSykuGx4e5YO/0+kd8fLHW
kme9/LHTkNPm/EiEyI+7akyHyEJ8w9fm8maOTzpvIsjg++zy+NLm/NTm/VWPye30
+Z7X/8Pd+bTY9oy8ZZu4prnO6Pj7/5jX/87j+46tht/s98Td+brW9UiAw7TUlbXU
84GrYVuUzOjx+EyGxvL2+t/p9Ex7Mcnl+nSi0lWRysvi+32y4qDY/4e6YFa092W+
++/0+dfn/tbo/ury+lWQy8jg+WmW3Gmdz8nh+9Pn/cjg+lKNyM3m/IS24lOy+EBx
wEiEx/D0+E2IyJ+8rdXm/dDmunOYYL3a9maNzGGKSmK9/NXl/lS190aAxjdrITVq
u5Cx3fP3+3yq12i//d/r9V2ISou6Yszj+32w4cbf+uzz+ZbW/7vW9d7r9lePLc/j
/O70+MHb+E18xdTm/ISo1fj7/UB3wlKz98Xf+W7C/ZvW/6rA4muY3FCMyLzZ9t7q
9rPS8UuGyOPt9+nv9e30+LjW9Mrh+jZqZMni++bw97bUnOvz+vD0+e30+rvY9tbn
/f///////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAK0ALAAAAAAQABAAAAj/AFsF8dOEgMEqYHSM4WOg
lcNWVAQwAiWg4g0MM65gaujQBJQKZEjZoNQolBAfhMRwPHMgAREJVsIEQBIJzZQ0
kBxOOLCIQYgEoo5oioKhz6ckOlG5eFRmgAQzARAt4dTmicNMXLqs8pADh4YHAeao
ShHDIYgdGTJEgABggIYKPQKk6uTQjSEvEVZtBcBg0INCX144PHEBgt5VcNq+kUOj
jgqHbJzcAQAAAYIBQJgoiQNDEYdWeUqN0IKnhJpJgVqsYPXjw4ZWMjxVwsLD0pBD
Ukyx2r1AQStJgP6w2OLAgZ0agrKwQuG6laMLRhJtsmDhVJEODRY06PD5Ep01BcJT
C6CwZ5QeBSJItAoIADs=
''',
'page_copy': '''
R0lGODlhEAAQAPemAJK75E57yuDt/9zq/8vi/+ry+9fn/9vq/9Ll/83i/NXn/tPm
/G2X0u71+2SM0PH3/9rq/rzZ9+zz/MLc+efx7+fw+vn6/W2X02yX0pO75FyGzV6H
z9fn/OPv+tbn/FaFy6O+4ejy/G+Y0m6Z08jf+muW0unz/dHl/dDk/HCV053D6laB
zmqV0dPn/PP5/vb4+8rh+vL4/8Ti+lKEyr7Z9vP4/pfE7cLc+tLl/NXn//D2/urz
/e31/evz++vy+7TT9czm++jz/sfg+9Hk+3ye1nKk022T0cXe+PT4//X6/pa03K3F
5vP4/+r0+7vX9kZ2yJe04vP4+8Te+fj6/FaDzNTm/ViHzFeDzJa+5f7//5G75FWF
zefx++jy793p9e70/Yi76s3j/HSb1OTv+qK94vH3/trp/W6T0+nx++30/P////L3
/9vq/cvj+4ip3erz8YSm2vD2/M/j++Hs9uvz+nad1u/2/ufy/VeGzHCa1FiDy6/R
8/f5/MTd+ZPB7O/2+5G65O71/Mrg+vL3/lSDymqV08zh+ujx+uz1/drq/057y87k
/HKh0tfo/dbn/VWBynal06HJ7vX5/Ja/5sXe/FmHzPD3/9Hk/FOCz+fy+nqk1O31
/m+Y02SM0W6Z0uv0/Njo/GmS1czi+1aCy9np/97s/////wAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAKYALAAAAAAQABAAAAjuAOHUueBphIg8SyyYWsjQ
lBg1iHh84tLEBhQ+DRdyspNgEQwaSYBEApHRFINBC1rIOTKngSRNJRnUAMVhgSEV
kwAAKMIIEpkppjAggQCh0RAsWUzsCNEhExg3L0rESMTGjIcMmygJmfDDhQw/Slgw
KSWg1AAAOhKEIRHBC50ohArFIWsWQBkcl0T1cbJnTKVQb+gO0LJGgYITgqREuGPF
SKAeBSSkAWQph6MqKNrcCIKHyJkNDjo5mPGA1AFSBhAQePCh5JYGplGr/kOlJCYK
sVMT6LKi5CgJhyqgKVDAxxc9JVNceRJAUYDnjzQEBAA7
''',
'page_find': '''
R0lGODlhEAAQAPcBAAAAAP///0R9wU2LyE+MyE+MyE+MyE+MyE+NyE+MyU+Lx0uJ
xTt2xDxos////////////////0mFxOzy9+70+O30+O3z+O3z+O3z+Ozy+Ozy99fm
8n2y4mWU2ztos////////////0uIxfL2+sfg+snh+8ni+8jg+8Xf+cHb+LjW9Pj7
/7TY9n2w4WSS2z5rtP///3uHkmJtd2VtdbXI3V5mb1libMvi+8jg+cLc+LrW9fj7
/c3m/Mnl+oS24kJ5v2NsdYCMmIOPnEdMUqe3y0dNVHmFkH+Mmsbd9cTd+bzZ9t/p
9Ozy9/P3++nv9UV+wn2JlVZeZm94hTtBSnqGlUBKVnSDkkBHT7vP5Mjg+sLc+LvW
9bXU87PS8fD0+EaCxDQ5Pk1UW5CgsFFdaWV0hk5YY4iYqkpRWm55hczh+8ff+sLc
+L3a9rvY9u/0+kaDxlZeZlhhaUhPVkZJTXeAjDc7QDxCRjM4PSUoLNHk/Mzh+8fg
+sPd+cLc+O30+kWCxmNsda+5xI2ap3N6gLTD1lxjbKSvu4uYpkZNU9Pl/dDk+8zj
+8ff+sbf+ury+kKAxl9ocau2wWZweVZcYpypul1mbqWxvWp0fisuM9Pl/dHk/M3i
/Mvh+8nh+/L3+z98xVRbY42ap15ncDo/RZGcrU5VXH+KllZeZhgZG9Hk/M7i/Mzi
/Mng+8jh+/f6/Tt2wTg9QjxCSDE+S09UWEKGtiAwPSQ1QRsrNxUsO1Kz91Kz91Gy
90+y90+y9/X5/DZvv////////zx4wePt927C/aDY/57X/5vW/5jX/5bW/5XW/5TV
/5PU/2W++/T3+zFnu////////zhxv9/r9Wi//We9/GW++2S+/GS+/GK9/GO9+2G8
+2C+/GK8/Pj7/S1kuf///////zNpvOrx+N/s997r9t7q9tzq9tzq9uvz+uvz+ury
+vP3/PT4/P3+/ipgt////////zRquzBlujFmuzBmujBmujBmujBlui9lui5luS5l
uS5kuS5kuSxity5ityH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'page_paste': '''
R0lGODlhEAAQAPepAE57yn+s19mpX8vi/9Ll/9fn/9vq/4Sv2dGXU/3utdOaVtel
XP7vttXn/ury+9GVU7zZ9/PbgPDUddafWX2p1vH3/9yya82MTenx++nNk+nNke70
/enLjp5sLe31/tHl/d3Mtujy/N+dTtDk/Mvj+8rg+tyWQleGzKR1N+DJlcTi+qV2
O+/2+/H3/t2YRIGt13Gb09bn/WmU04i76tuVQevy++fy/erz/YGt2NLl/FiHzPb4
++G5gFaBzvL3/9ywaFWByoGu18Te+enz/bTT9dCSUEZ2yNWgWdyvZt3p9d2zbO71
++vg0/7//92ye7vX9u/apVaFy7eSY26Y0t61b1SDyvLirk57y9agWs3j/NimXc3i
/MLc+tXn/9qqddObVtmpYObGiebEiOfw+nCgzGePs8zi+9Hk/N6bS/D2/s6OTtyX
ROvSmOjz/vD3/7GIVfHdqtTm/ejx+uHSvliDy/j6/NqpdNquZdOcV+K/hOfJi9mq
YKuAStyueOPv+mePyeTv+tilXc+SUKFuLufx78fg++jy71mHzNyWQ9itdMXe/HCV
0/P5/vLhsPP4+7yab3Gfza/R89+2cJ1qKu7l2tqqYH2p2leDzNyVQsjf+uK9ee3W
npPB7Ovz+s+RUOfy+laDzMLc+eC5dFyGzcTd+dSeU+zz/Nnp/5hjIP///wAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAKkALAAAAAAQABAAAAj/AFOlooSqYMEOAhMKZCIF
FQoGESIwGITqEQiFK1JYEZFAgoQEaDhkmKQQVSNNLtYgMoGJRqkwqErCESVJiYUD
QQIEIAOpDIw6qVBBofIDjIAXTYbcCOHn0wwZO1BtsoCkkoADHhQVCkWEkQpOU1Cx
ubNHy4IDabZkyQQhSSdHVVBpEBAIywQcLXKcMUPqSSRAh1DpWXAEj4IAPho0+FBC
CAQbOlCJmfAFwYMAbrrEiDOCBJc2J1DlUYBAkCcKFU4ZOFWAwIAKUVDxeFBEzQUK
S1Szds0C1JtETvp4sWOJkO7WAwz1mMPHIKo/puSMweDAQY0NdBQKXHTJCIArAMID
AxkVEAA7
''',
'page_white': '''
R0lGODlhEAAQAMQAAJSUlJWVlZmZmebm5ufn5+np6erq6uvr6+zs7O3t7e/v7/Dw
8PHx8fLy8vPz8/T09PX19fb29vf39/j4+Pn5+fr6+vv7+/z8/AAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAABgALAAAAAAQABAAAAVjICaOZDlO1qWuE2BaDyQz
SnERQXlJlCQ5C8Ml8hCQLpSkJNI4rC7H1MUygTAQhgF09JxWG4rEVtT1RhyNMaZ8
qVAiETXbkpzIV5Z8pTKxH/EWe4J3gHl5hGwqJBSJKhQmkCUhADs=
''',
'page_white_add': '''
R0lGODlhEAAQAPcNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAP///////////////wAAAPj4+Pv7+/z8/Pz8/Pz8/Pz8/Pz8/Pz8/Pj4+JSU
lAAAAAAAAP///////////wAAAPv7+/T09PX19fX19fX19fHx8e/v7+np6fz8/Ofn
55WVlQAAAAAAAP///wAAAAAAAPz8/Pf39/n5+ff39/f39/Pz8/Dw8Orq6vz8/Pb2
9vT09JmZmQAAAP///wAAAAAAAPz8/Pn5+fn5+fn5+ff39/b29vLy8uvr6/z8/Pz8
/Pz8/Pz8/AAAAAAAAAAAAAAAAPz8/Pv7+/z8/Pz8/Pv7+/j4+PX19fHx8ezs7Orq
6ubm5vz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pv7+/j4+PX19fLy8u/v
7+3t7fz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pv7+/j4+Pb29vPz
8/Ly8vz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pr6+vn5+fb2
9vb29vz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pv7+7bIq3WZ
YGaOT2GKSjxiJgAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pv7+/v7+/v7+7HEpYGrYbTU
ldDmuo+7alePLTdrGQAAAAAAAPz8/Pz8/Pz8/Pz8/Pv7+/v7+/r6+mKLSrHTkLHW
kv///4y8ZY+8ZzdrGQAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pv7+/v7+0x7MbbUnP//
/////////7LSlTdrGQAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/D9xIou6Yoe6
YP///4e5YI+8ZzdrGf///wAAAPn5+fz8/Pz8/Pz8/Pz8/Pz8/Pz8/JaxhlOMKI26
ZLLSlY26ZFOMKDdrGf///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC5aFTZq
GTdrGTZqGTJhF////yH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'page_white_copy': '''
R0lGODlhEAAQANUgAPn5+fLy8sjIyMTExMLCwurq6vPz8/f39+/v78zMzJ6enuvr
6/X19crKysfHx/T09MbGxrm5ucDAwOLi4sXFxeHh4e3t7dHR0f39/fb29vr6+t/f
3/j4+Pv7+8nJyfz8/P///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAACAALAAAAAAQABAAAAaGwErDQyRuMKCkEtTQfJ4f
DuG4THo+BwOi8MkMNlXQlZMJLJ4dAXJ57XAYgUvEoRAYkdePOyNxQqVHeXpSWFpc
XhuCHwADUWVnT0RQHxoUg3AWXJJ6HRoQaGRaH5toDlAdABkPo4lFAgqTGgAHo1UY
ApOdTh5heR2/v7VVilAACWGtRUQJE0EAOw==
''',
'page_white_find': '''
R0lGODlhEAAQAPcBAAAAAP///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAP///////////////wAAAPj4+Pv7+/z8/Pz8/Pz8/Pz8/Pz8/Pz8/Pj4
+JSUlAAAAAAAAP///////////wAAAPv7+/T09PX19fX19fX19fHx8e/v7+np6fz8
/Ofn55WVlQAAAAAAAP///3WAi2NsdWZtddHV2V9nb2Jma/f39/Pz8/Dw8Orq6vz8
/Pb29vT09JmZmQAAAGNsdYCMmIOPnEhMUsTGyUdNVHuGkJOWmPHx8fLy8uvr6/z8
/Pz8/Pz8/Pz8/AAAAH2JlVZeZm94hTtBSn2IlUBKVnSDkkBHT+Hh4vX19fHx8ezs
7Orq6ubm5vz8/AAAADQ5Pk1UW5CgsFFdaWV0hk5YY4iYqkpRWoKDhPj4+PX19fLy
8u/v7+3t7fz8/AAAAFZeZlhhaUhPVkhKTYqKizk8QDxCRjM4PSorLPv7+/j4+Pb2
9vPz8/Ly8vz8/AAAAGNsda+5xI2ap3h8gNPU1F5kbKSvu4uYpk9RU/z8/Pr6+vn5
+fb29vb29vz8/AAAAF9ocau2wWZweVldYre4uF1mbqWxvWp0fi4wM/z8/Pv7+/n5
+fn5+fj4+Pz8/AAAAFRbY42ap15ncDtARaurrE5VXH+KllZeZhgZG/v7+/r6+vr6
+vj4+Pj4+Pz8/AAAADg9QjxCSCwxNVVXWbi5uTk7PT0/QjQ2Nzs8PPr6+vr6+vr6
+vr6+vr6+vz8/AAAAP///wAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pv7+/v7+/v7+/v7
+/v7+/v7+/z8/AAAAP///wAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8
/Pz8/Pz8/Pz8/AAAAP///////wAAAPn5+fz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8
/Pz8/Pz8/Pn5+QAAAP///////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'page_white_paste': '''
R0lGODlhEAAQAOZyAN/f3/b29vj4+NegU/n5+fr6+tmnXNefUv3utfPz8+rq6v7v
ts6LRszMzPPbgPf39/Ly8u/v7+C1cKqATal9QZdjINCPSPDUdXhOGf39/daeUsLC
wtCSStOXTeS9fKR1N86NSKFuLreSY+O7etSYTvT09NqbSt+dTt6yadeiYuS/fqh7
P6Z3PaNwOZ5sLeK5dtyraNejV9yuaaV0OuLi4t2uZevg09GUS9elYuC0btmmZPX1
9dyWQtyVQtmlWs2HQ6p+QaV2O96bS96xa9WdUdSbUNWdUMyJRe7l2tGTStyXRO3t
7cuFQuvr66uESd3MttyrYrGIVduqYdqoZdikWOHSvs+OSNOYTuG3c9acUNikYtWc
UMTExKyESdqUQdqoXvn49t+zbdyWQ9KTS51qKr+TUd2xbNuzcryab+S/f9KWTaFq
NKJtNquASvv7+8nJyZhjIPz8/P///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAHIALAAAAAAQABAAAAfUgHJySHCFhS6CiYI2InAf
Cw4OCyFwaE+KQWcqJwgXFwhCORJkinBpKCZKYjw9XkRmFaUeNVBfBmVdThQTGG8A
GXJwI1I+AwdAYHHKcQIbv3AvBlQDGhRxDwkRCnEBXABwWDEDRkUrzAEQTctvcBIH
WSRqLHFuAjsQS9vsYVtXNxwzlNULkC0OuyEdxlgA0WKZGwIBShiEIyOJBQZH2CyL
U4DAg4kwrDD4wWTNRjcFChiMgmOKDi0pJgh0Q9ONwSptDFXAsHEZgQaKBAF4Q7To
mwY0AgEAOw==
''',
'page_white_stack': '''
R0lGODlhEAAQAMQAAAAAAGhoaJOTk5SUlJiYmLi4uL29vcLCwsPDw8bGxsfHx9bW
1urq6uvr6+3t7e/v7/Ly8vPz8/T09PX19fb29vf39/j4+Pn5+fr6+vv7+/z8/AAA
AAAAAAAAAAAAAAAAACH5BAkAABsALAAAAAAQABAAAAVp4CaOZCliWppVgjlq1WIU
yhG4m2YhqFq5GgwCJqMdTJpMQsdLpSyDV0YRHFYiD4aGQhAlqUodBdJweqffpGUC
cWjPS7SagtWcwVTVhSKxjwBJS04YFxV+UnkqGCg4KhmPGYclTpQXOJchADs=
''',
'page_white_text': '''
R0lGODlhEAAQANUAAJSUlJWVlZmZmaWlpaqqqqysrK6urq+vr7GxsbS0tLi4uLq6
ury8vL29vb6+vsDAwMLCwsPDw8bGxsfHx8jIyMrKysvLy83Nzc7Ozs/Pz9PT09TU
1NXV1dbW1tfX19nZ2dvb293d3ebm5ufn5+np6erq6uzs7O3t7e/v7/Dw8PHx8fLy
8vPz8/T09PX19fb29vf39/j4+Pn5+fr6+vv7+/z8/AAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAADYALAAAAAAQABAAAAaAQJtwSCwOY7SachkDGGkt
l1SFItVGgWINJoPBWKlS7dUSEGuyxyJxIAyWtXOyRou5VKaSKD5UTiAOCgkIBgUn
fEJwSnUvLCuINkoYFRIRDw0MCy+QiosyMjGcNR0aGRcWFBQSoWdLNDQzsbGiISAf
HhwbukmtnXBEMr5LMkbFRUEAOw==
''',
'table': '''
R0lGODlhEAAQANUAAEJ3u1OEw1yMyV2KxWOSzmmW0W6a03Ke1nSg13uk2ny+dn6o
3YGp3oLCfIWt4Iiv4onGgo2y5I7Jh5K25pPLi5a66ZjOkJu86p3QlJ/A7aPUmqXC
6qvG6K7I6bPL6brP6rvQ69nj8OTr9erw9+7y+PP2+v///wAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAACcALAAAAAAQABAAAAaSwJNwSCwKN5vM5VKZTCIP
ByOROJau2OwVIbyUNBqMhSKBNBqKkqFbMrnf8FKBjazbNyWCcFLamDhvgG4jAkIR
JRyJHB0eHB6PIYUnhx0mHoFvIQFCDiWPn42OHZpCDCSXIG8fbSaRQgskILKzoCED
QgmxJqlugq23JwgkIyIiIcfIx8AHBgUEAgIB0tMBAEbXREEAOw==
''',
'table_gear': '''
R0lGODlhEAAQANUAAFaHx1h5o1tbW12Nyl6BrmOSzmVlZWmW0WxsbG6a03JycnKG
n3Ke1nSg13uk2ny+dn6o3YGp3oLCfIWFhYWt4Iiv4omJiYnGgo2y5I7Jh5K25pOT
k5PLi5a66ZjOkJqampu86p3QlJ6jq5/A7aKioqPUmqXC6qurq6vG6K7J6bHF37PM
6rW1tbbBzrfE1bu7u7vQ68LCwsrKytHX4NTU1NnZ2dzl8uHh4eTr9ejo6Orw9+7y
+PP2+v7+/gAAAAAAACH5BAkAAD4ALAAAAAAQABAAAAa1QJ9wSCz6TKYRCNTRaDAV
SsThEJp42KwW2xCCeKVSyMPJXCSSBy/h5fXe8Djv0Eba79eCUMMz9VBwgC8sOgNC
GDwoiigpLi8qMS8zIiQ+iCk9K28vNS80nTQWPhQ8K6amLTQnExYsMhs+ETuaMD2u
ORNvHzcsExA7MMEwJCw3H6YfNB8GDsA9tT0yMi8fHywxCD4NOzo4ODY2JDLiMSTY
PgwJBwUDAwALCAQkFgECAkZFCvdDQQA7
''',
'table_multiple': '''
R0lGODlhEAAQANUAAEJ3u1OExFyMyV2KxWOSzmmW0W6Z03Ke1nSg13uk23y+dn+o
3ICp3YLCfIWs4Iew2oiv4onGgouz3I2y45K435O25pPLi5a455u96pu/457B5Z/A
7aLD46PUmqTC6qTLxqTLyavG6K7I6a7K8LHK6Nnj8OXs9unv+Orw9+3y+PP2+vX4
+////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAAC0ALAAAAAAQABAAAAaXwJZwSCwKPZ4NpjKBOBiJ
RNGjqlqtiCJG9eFoMhTJY7KIDrejlXqtOqmywi0mBeIomU6otKLCrEJXgVkTfSlb
HR0WEQ0NCioGLYQXK1sslpcsKgUtDioVhipIokgqBC0IKXx8HiwhmCgCLQcGDiaE
IbghJLslsUKohCKtmCUBQ7Odu7kkIsVFqCgmJdPUA0WzBQQB29wAQQA7
''',
'zoom_in': '''
R0lGODlhEAAQAOZ8APTr4uny+vn7/T6DN+jx+dSwcPf6/fbv5L6HTeHJuFOeS1yo
Uu/1+zV5MPTs3Ony+YvGg+nXpdKuhPn7/t3Ckd7EjebRryprJuTOrNi5i72FTMqf
ZTJ0LNKubTBxK+jVo97Eo8OSW9KtbPHl2N/Fj/D2+2OyWfLn2ePMmb+LUOXPqde1
fffw5d3DkdCoatm7jMGOWHa3bd7Dpuzz+ovHhePNu/P4/ODHky5vKcyhZ2WnXmGw
V8+oY2usY9Grg8GPWs2mYsiaYMmbYc6nY/H3/J7RlZ/Sl9/Fo+bRrjN2LubRudGq
dsORVvH2++LLuYbFfbyEUffx7eTMrPHm2LmASMqgb/r29JdhRprPkl+tVoLCffPo
2rZ7Uffv5de2fezcv+71+/L3/ESLPefTuqxlP82naN/Ep9a1f8mbY82kcdq7gK5t
SKbVnZDKiM+pZdKtd+z0+k2WRV6rVOfToLd5Ute3fVqbU2e2XPjx7byDT+ry+uvz
+v///wAAAAAAAAAAACH5BAEAAHwALAAAAAAQABAAAAe6gHyCg4SFgw4tHW5DLi9b
hnxfBXUWLAcYbzljhQ4FKgYMentNAkdoU4QUXgZ7BA8BemACaRKEIkglrrB7e2Fm
IYQ8XXuwonc7CwAphEAHM3qie1lsCgAIhGVSRLwmcjFFPWIDhBlLAgxwC0ZYT20Q
DYQnGyATNgpxOjR2STg1hEpBqsgAAGCAFg4oKuTBQ2iEjx8INDTwcOFDBDVkokAS
5AQGiTk3hFzZKCgBlBVnmHAhWXINFTpW+AQCADs=
''',
'zoom_out': '''
R0lGODlhEAAQAOZ0APTr4u/1+/n7/eny+uzz+tSwcPbv5Ojx+fRFSO71++waI/Ts
3O4mLvdUVvpjYvxvbff6/fE1Or6HTeny+ez0+sGPWvjx7c2mYuPMmdKtd9Grg/D2
+/Hl2PHm2MORVunXpbyEUc6nY9/Fj9a1f8mbY/H3/OfTuuoRHL+LUObRrvPo2vfw
5d3Dkd7DptCoavn7/va2rvjy782kceDHk+LLueHJuNGqdvfv5eDHtvjIv/54dNu/
h9i5i8qfZdm7jPH2+7uGUtKubde2fd7EjfSrpN7Eo9KuhM+oY7FyRffx7ebRuejV
o8mbYeXPqbyDT8GNU9q7gN/Ep82naPL3/PfAt+zcv7uBTN3CkeTMrM+pZePNu8GO
WL2FTMOSW5dhRrNzS/Ln2bmASMqgb/P4/KxlP+bRr9y+pNa0eefToNe1faVcM8ia
YNe3fdKtbN/Fo8yhZ+TOrPOgm+ry+uvz+v///wAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAHQALAAAAAAQABAAAAe4gHSCg4SFgwssQVkhLj4q
hnRVBWxlKwZwGW8mhQsFTRABcnM/Am4kHYRXQhBzBxMDcgkCMkaEbSkbrrBzc1NR
XYRHN3OwonMECQAohBcGBHLGBBQBABKEUlglvDoPDg0IEQyEPDYCARQPOVQwRHEK
hGA9RS9j3uAMCidahEprYi0AAJh5cgbDECcWCHHQUEECFyBWdiz5AIVMEkiCaGwR
gWYGEy8YBdUAkWaEByQhBeFQE+ZLDDqBAAA7
''',
}
| 0
| 0
| 0
|
28a6cd67583aaea23b8d40e9061ec596cdb2ce3c
| 34,063
|
py
|
Python
|
chapisha/create/create.py
|
whythawk/chapisha
|
ddaa028a48d10ff5396e18d1c0ae01fd56c9f465
|
[
"BSD-3-Clause"
] | 2
|
2021-05-29T12:56:05.000Z
|
2021-10-31T04:56:32.000Z
|
chapisha/create/create.py
|
whythawk/chapisha
|
ddaa028a48d10ff5396e18d1c0ae01fd56c9f465
|
[
"BSD-3-Clause"
] | 1
|
2021-01-29T13:12:28.000Z
|
2021-01-30T16:14:04.000Z
|
chapisha/create/create.py
|
whythawk/chapisha
|
ddaa028a48d10ff5396e18d1c0ae01fd56c9f465
|
[
"BSD-3-Clause"
] | null | null | null |
"""
.. module:: create
:synopsis: Import a Word `docx` document, define its metadata, cover and rights, and publish it as an EPUB3.
.. moduleauthor:: Gavin Chait <github.com/turukawa>
CreateWork
==========
Publish a standards compliant EPUB3 creative work from a source Microsoft Word `docx` document, and define its
metadata, cover and publishing rights. Currently does not support `odt` since `Pandoc` seems to lose any embedded
graphics.
.. note:: This process will overwrite any existing EPUB3 file of the same name, if it already exists.
Workflow
--------
There are two main publication approaches, stateless and non-stateless. A non-stateless approach assumes you may be
starting each step discretely (perhaps via a set of one-time network calls). The second maintains state, so you can
complete the process in one step.
The *stateless* publication process runs as follows:
* Set the working directory on creation,
* Define and validate the metadata required for the creative work,
* Copy the `docx` file to import into the working directory,
* Copy the cover image to import into the working directory,
* Define and add any contributors, such as cover artist,
* Update the creative work's publication rights,
* Add in an optional dedication,
* Build the creative work,
* Validate the work is EPUB3 standards compliant.
The objective of this workflow is to support what may be a stateless process i.e. the individual steps first bring all
the data required to produce the creative work into a project directory, and then produces it. State does not need
to be maintained between steps.
The *non-stateless* process runs as follows:
* Define and validate the metadata required for the creative work,
* Supply the `docx` file as a base64 string,
* Copy the cover image as a base64 string,
* Add in an optional dedication,
* Build the creative work,
* Validate the work is EPUB3 standards compliant.
The objective in a non-stateless workflow is to minimise disruption, and store the minimum amount of information. Only
the epub itself will be saved, and then only because Pandoc does not support a memory-only epub build.
Build your work
---------------
Import **Chapisha** and create a work:
.. code-block:: python
from chapisha.create import CreateWork
work = CreateWork(directory)
Where `directory` is the complete path to where you would like the EPUB created. If you want a stateless workflow,
set the `stateless` boolean to `True`. If you already have the `metadata` (perhaps via a web form), you can skip
several steps and pick up again for setting the files and images.
.. code-block:: python
from chapisha.create import CreateWork
work = CreateWork(directory, metadata=metadata, stateless=True)
Set metadata
^^^^^^^^^^^^
`Dublin Core <https://www.dublincore.org/specifications/dublin-core/dces/>`_ is a vocabulary of fifteen properties for
use in resource description. Four of them - `title`, `identifier`, `language` and `rights` - are required. The
`language` code is defined by the `ISO 679-1 <https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes>`_ standard
(e.g. `en` for English, or `fr` for French).
Metadata properties:
* `identifier`: UUID, DOI or ISBN of the creative work. A UUID will be generated if not included.
* `title`: Name given to the creative work.
* `language`: Specify the language of the creative work. Two letter code defined by ISO 639-1.
* `creator`: Name of a person, organisation, etc. responsible for the creation of the work. May be more than one.
* `work_uri`: The URI for your creative work.
* `contributor`: Name of a person, organisation, etc. that played a secondary role - such as an editor - in the creation of the work. May be more than one.
* `date`: The publication date of the creative work. Provide in ISO format, YYYY-MM-DD.
* `subject`: The subject, or tag, of the creative work. May be more than one.
* `publisher`: Name of a person, organisation, etc. responsible for making the creative work available.
* `publisher_uri`: The URI for the publisher of your creative work.
* `rights`: A short, single-sentence statement of copyright and publication terms for the creative work, e.g. 'All rights reserved.' or 'Attribution-NonCommercial-ShareAlike 4.0 International.'
* `long_rights`: Lengthier description and information about copyright held in and over the creative work. Formatted as you wish it to appear.
* `description`: A short, single-sentence summary of the creative work.
* `long_description`: The pitch, or jacket-cover, description of the creative work.
Create a paired dictionary of these properties. As example:
.. code-block:: python
METADATA = {
"identifier": "isbn:9780993191459",
"title": "Usan Abasi's Lament",
"description": "Years after the events of \"Lament for the Fallen\", Isaiah tells of the myth of Usan Abasi, who was punished by the Sky God to spend eternity in the form of a brass bowl and imprisoned within a vast termite mountain. Now the ceremony which ensures that Usan Abasi remains dormant has failed, and his ancient evil awakes. A free, stand-alone short-story set in the city of Ewuru and linking \"Lament for the Fallen\" to a forthcoming novel.",
"language": "en",
"creator": ["Gavin Chait"],
"rights": "All rights reserved.",
"long_rights": ["The right of the creator to be identified as the author of the Work has been asserted by them in accordance with the Copyright, Designs and Patents Act 1988. This creator supports copyright. Copyright gives creators space to explore and provides for their long-term ability to sustain themselves from their work. Thank you for buying this work and for complying with copyright laws by not reproducing, scanning, or distributing any part of it without permission. Your support will contribute to future works by the creator."],
"publisher": "Qwyre Publishing",
"publisher_uri": "https://qwyre.com",
"work-uri": "https://gavinchait.com",
"date": "2017-07-23",
"subject": ["science fiction", "african mythology"]
}
Set the metadata:
.. code-block:: python
work.set_metadata(METADATA)
Set document
^^^^^^^^^^^^
Most writers still use `Microsoft Word <https://www.microsoft.com/en-us/microsoft-365/word>`_ as their default work tool.
There are certainly other word processors, but this is the one most people will work with if they intend to be
professionally published as publishers still expect Word `docx` files for editing and markup.
**Chapisha** will create your cover, rights and dedication pages, as well as the table of contents. Your `docx` file
must contain **only** the creative content you wish included in that table of contents. Your document must also be
correctly marked up to ensure proper chapter creation.
EPUB documents will be read on multiple and diverse electronic devices. Don't have any expectations for page
number-dependant formatting. Instead:
* Each chapter must have a title, formatted as `Heading 1`, with lower-level headings formatted for each heading type.
* There must be no title page, contents, or anything else. Chapter 1 starts at the top of the first line of the document.
* Page numbers and other page-specific information will be lost.
* Fonts or typographic formats and alignment will be lost, although `bold` and `italics` will be maintained.
* Images will be maintained.
Once the work is built you can enhance its styling. However, there are still limits in the EPUB3 standard in comparison
to a printed work.
.. code-block:: python
work.set_document(source)
Where `source` is any of the complete path to the source `docx` file, a `bytes` file import, or a `base64` string.
Set cover
^^^^^^^^^
There is, unfortunately, no standardisation on the image size, dimensions or resolution required for an EPUB. However,
a recommendation is an image (`.jpeg`, `.jpg` or `.png`) of 1,600 by 2,400 pixels, and less than 5Mb is size. You will
need to create your image (or have someone create it for you) exactly as you wish it to appear on the cover. Nothing
will be added, removed, or changed.
Please also ensure you have the appropriate rights to use the image on your cover. There are more than sufficient
services providing openly-licenced, or even public domain, work for you to use.
.. note:: You can optionally add the image contributor details here, or on the next step. Do not do it in both or the contributor information will be repeated.
Example code:
.. code-block:: python
CONTRIBUTOR = {
"role": "artist",
"name": "Rodd Halstead",
"terms": "Cover image 'Red Maple Fruit (Samara)' photograph. All rights reserved. Used under licence.",
"year": "2006"
}
work.set_cover(source, contributor=CONTRIBUTOR)
Where `source` is the complete path to the image file, a `bytes` file import, or a `base64` string.
Add contributors
^^^^^^^^^^^^^^^^
You may have numerous contributors you wish to acknowledge. Fields are:
* `role`: Contributor identity, based on a specified list of `artist`, `editor` or `translator`.
* `name`: Name of a person, organisation, etc. that played a secondary role - such as an editor - in the creation of the work.
* `terms`: Information about copyright held by the rights-holder in and over their contribution to the creative work. Formatted as you wish it to appear.
* `year`: The year of the contribution or publication of the contributor's work.
Example code:
.. code-block:: python
CONTRIBUTOR = {
"role": "artist",
"name": "Rodd Halstead",
"terms": "Cover image 'Red Maple Fruit (Samara)' photograph. All rights reserved. Used under licence.",
"year": "2006"
}
work.add_contributor(CONTRIBUTOR)
`add_contributor` as many times as you have people or organisations to acknowledge.
Set rights
^^^^^^^^^^
This refers to the `long_rights` you can set, and which you may wish to adjust for presentation on the colophon page.
There are obviously a broad range of rights with which you can release your creative work. Here are two examples which
you can modify as you require.
* Commercial copyright with all rights reserved:
The right of the creator to be identified as the author of the Work has been asserted by them in
accordance with the Copyright, Designs and Patents Act 1988. This creator supports copyright. Copyright
gives creators space to explore and provides for their long-term ability to sustain themselves from
their work. Thank you for buying this work and for complying with copyright laws by not reproducing,
scanning, or distributing any part of it without permission. Your support will contribute to future
works by the creator.
* Commercial copyright but licenced for distribution under Attribution-NonCommercial-ShareAlike 4.0 International (`CC BY-NC-SA 4.0 <https://creativecommons.org/licenses/by-nc-sa/4.0/>`_):
You are free to copy and redistribute the Work in any medium or format, and remix, transform, and build
upon the Work. The creator cannot revoke these freedoms as long as you follow the license terms.
In return: You may not use the material for commercial purposes. You must give appropriate credit, provide
a link to this license, and indicate if changes were made. You may do so in any reasonable manner, but not
in any way that suggests the creator endorses you or your use. If you remix, transform, or build upon the
material, you must distribute your contributions under the same license as the original. You may not apply
legal terms or technological measures that legally restrict others from doing anything the license
permits.
Example code:
.. code-block:: python
RIGHTS = [
"You are free to copy and redistribute the Work in any medium or format, and remix, transform, and build upon the Work. The creator cannot revoke these freedoms as long as you follow the license terms.",
"In return: You may not use the material for commercial purposes. You must give appropriate credit, provide a link to this license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the creator endorses you or your use. If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original. You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits."
]
work.set_rights(RIGHTS)
Rights terms can be one line of text, or several. If several, each line must be provided as a separate term in a `list`.
Set dedication
^^^^^^^^^^^^^^
Most creators have a dedication for their work in mind - usually to apologise for all the late nights and impoverishing
returns on their creative efforts.
This is optional, but you can include a dedication page. Each item in the list will be set on a different paragraph.
.. code-block:: python
dedication = [
"For those who leave.",
"For those who remain.",
"For the wings and tail.",
"But most, for her"
]
work.set_dedication(dedication)
The dedication can be one line of text, or several. If several, each line must be provided as a separate term in a `list`.
Build
^^^^^
The build function is straightforward. Once everything is in place:
.. code-block:: python
work.build()
You will find your EPUB in the directory you specified.
Validate
^^^^^^^^
If you have any doubts as to whether your EPUB is standards compliant, run the validation. This tests the `epub` file
against the standards maintained by the `DAISY Consortium <http://validator.idpf.org/>`_. You can check the file online
at that link. It's the same test.
.. code-block:: python
work.validate()
Output will be `True` or `False`.
"""
import pypandoc
from bs4 import BeautifulSoup
from epubcheck import EpubCheck
from typing import Optional, Literal, List
from urllib.parse import urlparse
from pathlib import Path
import os
import re
import base64
import filetype
from ..models.metadata import WorkMetadata, Contributor
from ..models.matter import Matter, MatterPartition
from ..helpers import pages, formats, coreio as _c
from ..helpers.updatezipfile import UpdateZipFile
class CreateWork:
"""
Publish a standards compliant EPUB3 creative work from a source Microsoft Word `docx` document, and
define its metadata, cover and publishing rights.
If the EPUB file already exists, then publishing this work will overwrite it.
On instantiation, checks `directory` to see if `DEFAULT_METADATA_SETTINGS` is present, loading the required data,
or replacing with specified defaults.
"""
def __init__(self,
directory: Optional[str] = None,
metadata: Optional[WorkMetadata] = None,
stateless: bool = False):
"""
Initialise the CreateWork class.
Parameters
----------
directory: str
A directory path where you would like to save your work.
metadata: WorkMetadata
A model defined by a dictionary of terms.
stateless: bool
Whether your workflow is stateless (default False).
"""
self.stateless = stateless
self.directory = Path(directory)
if self.stateless:
_c.check_path(self.directory)
# Load metadata settings, if exists
try:
_c.check_source(self.directory / _c.DEFAULT_METADATA_SETTINGS)
self.metadata = WorkMetadata(_c.load_json(self.directory / _c.DEFAULT_METADATA_SETTINGS))
self.work_name = self.directory.name # Since will be `.../work-name/`
except FileNotFoundError:
self.metadata = None
self.work_name = None
# Construct the metadata, if it is provided
if metadata:
if isinstance(metadata, WorkMetadata):
metadata = metadata.dict()
self.set_metadata(metadata)
self.source_path = _c.get_helper_path() / "data"
# Set default cover and work bytes
self.work = None
self.cover = None
self.dedication = None
############################################################################
# GATHER WORKING DATA
############################################################################
def get_metadata_schema(self) -> dict:
"""
Return the standard Dublin Core schema permitted for the EPUB3 standard.
Returns
-------
dict
"""
return self.metadata.schema()
def set_metadata(self, metadata: WorkMetadata) -> bool:
"""
Validate metadata values for the permitted Dublin Core schema terms, along with additional metadata. The full
schema, with descriptions, and requirements, is listed by `get_metadata_schema`.
.. note:: The terms `identifier`, `title`, `creator`, `rights` and `language` are required. A random UUID will be assigned if none is provided.
Parameters
----------
metadata: WorkMetadata
A model defined by a dictionary of terms.
Returns
-------
bool
"""
# Dict snake_case fields need to be hyphenated for import
# This as a result of alias names in model
if isinstance(metadata, dict):
for k in [k for k in metadata.keys()]:
hyphenated = "-".join(k.split("_"))
metadata[hyphenated] = metadata.pop(k)
# Rename 'isodate' if it exists
if "isodate" in metadata:
metadata["date"] = metadata.pop("isodate")
# Fix "long-rights" if needed
if "long-rights" in metadata:
metadata["long-rights"] = formats.get_text_paragraphs(metadata["long-rights"])
# Create a temporary WorkMetadata model to hold updated metadata
updated_metadata = WorkMetadata(**metadata)
# And update the original data
# https://fastapi.tiangolo.com/tutorial/body-updates/#partial-updates-with-patch
if self.metadata:
self.metadata = self.metadata.copy(update=updated_metadata.dict(exclude_unset=True))
else:
self.metadata = updated_metadata
work_name = "-".join(["".join([e for e in w if e.isalnum()])
for w in self.metadata.title.lower().split(" ")])
# Set the working directory, if it isn't already, and save metadata there
if not self.work_name:
self.work_name = work_name
self.directory = self.directory / work_name
# If stateless, save the metadata to the working folder
if self.stateless:
_c.check_path(self.directory)
_c.save_json(self.metadata.dict(by_alias=True), self.directory / _c.DEFAULT_METADATA_SETTINGS, overwrite=True)
return True
def _get_validated_bytes(self,
source: [Path, bytes],
base_type: Optional[List[Literal["cover", "work"]]] = None) -> bytes:
"""
Validate a source file, and return a bytes version.
Parameters
----------
source: Path, bytes or base64 string
Filename to open, base64 string, or bytes from an opened file
base_type: Optional, str
Must be one of "cover" or "work" for interpreting base64 mime type
Raises
------
PermissionError: if metadata not yet validated.
FileNotFoundError: if the source is not valid.
Returns
-------
bytes
"""
if not self.metadata:
e = "`set_metadata` before setting source document."
raise PermissionError(e)
if isinstance(source, Path):
try:
_c.check_source(source)
with open(source, "rb") as f:
source = f.read()
except FileNotFoundError:
e = F"`{source}` is not a valid file source."
raise FileNotFoundError(e)
if isinstance(source, str) and base_type:
# Base64 string, remove any provided mime type
source_type = re.search(_c.DEFAULT_BASE64_TYPES[base_type], source)
if source_type:
source = source.replace(source_type.group(0), "")
source = base64.b64decode(source)
if not isinstance(source, bytes):
e = F"File is not valid."
raise FileNotFoundError(e)
return source
def set_document(self, source: [Path, bytes, str]):
"""
Import source `docx` document and, if stateless, save to the working directory. If you're finding errors in
the build step, it could be you need to convert your base64 string to "utf-8" (`source.decode("utf-8")`).
Parameters
----------
source: Path, bytes, or str
Filename to open, bytes from an opened file, or a base64 string
Raises
------
PermissionError: if metadata not yet validated.
FileNotFoundError: if the source is not valid.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting source document."
raise PermissionError(e)
source = self._get_validated_bytes(source, base_type = "work")
if self.stateless:
with open(self.directory / F"{self.work_name}.docx", "wb") as w:
w.write(source)
else:
self.work = source
def set_cover(self,
source: [Path, bytes],
contributor: Optional[Contributor] = None):
"""
Import cover image and, if stateless, save to the working directory, along with any rights and contributor
information. If you're finding errors in the build step, it could be you need to convert your base64 string to
"utf-8" (`source.decode("utf-8")`).
Parameters
----------
source: Path or bytes
Filename to open, including path, or bytes for file
contributor: Contributor
Optional, string indicating contributor name for cover image.
Raises
------
PermissionError: if metadata not yet validated.
FileNotFoundError: if the source is not valid.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting cover."
raise PermissionError(e)
# Cover contributor
if contributor:
if self.metadata.contributor is None:
self.metadata.contributor = []
self.metadata.contributor.append(Contributor(**contributor))
# Cover image
source = self._get_validated_bytes(source, base_type = "cover")
if self.stateless:
kind = filetype.guess(source).extension
with open(self.directory / F"cover.{kind}", "wb") as w:
w.write(source)
_c.save_json(self.metadata.dict(by_alias=True),
self.directory / _c.DEFAULT_METADATA_SETTINGS,
overwrite=True)
else:
self.cover = source
def add_contributor(self, contributor: Contributor):
"""
Add a contributor to the list of those supporting the creation of the work. `contributor` is defined as a dict:
.. code-block:: python
contributor = {
"role": "artist",
"name": "Great Artist",
"year": "2021",
"terms": "Public Domain."
}
Parameters
----------
contributor: Contributor
Include the types of contributor who supported the creation of the work. `role`: `artist`, `editor`, `translator`.
Raises
------
PermissionError: if metadata not yet validated.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before adding contributors, or add the contributors when you set the metadata."
raise PermissionError(e)
if self.metadata.contributor is None:
self.metadata.contributor = []
self.metadata.contributor.append(Contributor(**contributor))
_c.save_json(self.metadata.dict(by_alias=True), self.directory / _c.DEFAULT_METADATA_SETTINGS, overwrite=True)
def set_dedication(self, dedication: [str, list[str]]):
"""
Set dedication page for creative work. Provide as a string, unless it is on multiple paragraphs.
Parameters
----------
dedication: str or list of str
Provide as a string, or list of strings for multiple paragraphs.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting dedication."
raise PermissionError(e)
self.dedication = pages.create_dedication_xhtml(dedication)
if self.stateless:
with open(self.directory / F"dedication.xhtml", "w") as w:
w.write(self.dedication)
def set_rights(self, rights: [str, list[str]]):
"""
Set publication `long_rights` for creative work. Provide as a string, or list of strings if it is on multiple
paragraphs.
There are multiple appropriate rights, and two examples are below. Modify as you require.
* Commercial copyright with all rights reserved:
The right of the creator to be identified as the author of the Work has been asserted by them in
accordance with the Copyright, Designs and Patents Act 1988. This creator supports copyright. Copyright
gives creators space to explore and provides for their long-term ability to sustain themselves from
their work. Thank you for buying this work and for complying with copyright laws by not reproducing,
scanning, or distributing any part of it without permission. Your support will contribute to future
works by the creator.
* Commercial copyright but licenced for distribution under Attribution-NonCommercial-ShareAlike 4.0 International (`CC BY-NC-SA 4.0 <https://creativecommons.org/licenses/by-nc-sa/4.0/>`_):
You are free to copy and redistribute the Work in any medium or format, and remix, transform, and build
upon the Work. The creator cannot revoke these freedoms as long as you follow the license terms.
In return: You may not use the material for commercial purposes. You must give appropriate credit, provide
a link to this license, and indicate if changes were made. You may do so in any reasonable manner, but not
in any way that suggests the creator endorses you or your use. If you remix, transform, or build upon the
material, you must distribute your contributions under the same license as the original. You may not apply
legal terms or technological measures that legally restrict others from doing anything the license
permits.
Parameters
----------
rights: str or list of str
Provide as a string, or list of strings for multiple paragraphs.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting rights."
raise PermissionError(e)
if isinstance(rights, str):
rights = [rights]
self.metadata.long_rights = rights
_c.save_json(self.metadata.dict(by_alias=True), self.directory / _c.DEFAULT_METADATA_SETTINGS, overwrite=True)
############################################################################
# BUILD CREATIVE WORK
############################################################################
def build(self):
"""
Automatically build the creative work as a standards compliant EPUB3. Save to the root directory.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before building creative work."
raise PermissionError(e)
epub_path = self.directory.parent / F"{self.work_name}.epub"
# Generate the initial creative content using Pandoc
# pypandoc can't handle PosixPaths ...
if self.stateless:
pypandoc.convert_file(str(self.directory / F"{self.work_name}.docx"),
format="docx",
to="epub3",
outputfile=str(epub_path))
else:
# Maybe one day Pandoc can return an epub object and we won't save the interim file
pypandoc.convert_text(self.work,
format="docx",
to="epub3",
outputfile=str(epub_path))
# Generate the epub version
with UpdateZipFile(epub_path, "a") as w:
# REMOVES
REMOVES = ["EPUB/styles/stylesheet1.css", "EPUB/text/title_page.xhtml", "EPUB/nav.xhtml"]
# DEFAULT COMPONENTS
DEFAULT = [(self.source_path / "css" / "core.css", "EPUB/css/core.css"),
(self.source_path / "images" / "logo.svg", "EPUB/images/logo.svg"),
(self.source_path / "xhtml" / "onix.xml", "EPUB/onix.xml"),
(self.source_path / "xhtml" / "container.xml", "META-INF/container.xml")]
for default_file, write_file in DEFAULT:
w.write(default_file, write_file)
# DEFAULT FONTS
for f in os.listdir(self.source_path / "fonts"):
w.write(self.source_path / "fonts" / f, F"EPUB/fonts/{f}")
# ADD titlepage.xhtml
w.writestr("EPUB/text/titlepage.xhtml", pages.create_titlepage_xhtml(self.metadata))
# ADD colophon.xhtml
w.writestr("EPUB/text/colophon.xhtml", pages.create_colophon_xhtml(self.metadata))
# ADD cover.img
if self.stateless:
for image_path in [self.directory / F"cover.{t}" for t in ["jpg", "jpeg", "png", "gif", "svg"]]:
if image_path.exists():
w.write(image_path, F"EPUB/images/{image_path.name}")
elif self.cover:
t = filetype.guess(self.cover).extension
w.writestr(F"EPUB/images/cover.{t}", self.cover)
# GET DEDICATION and CHAPTERS
spine = []
# check if the path to dedication exists, if it does, add it to the work and spine
if (self.directory / "dedication.xhtml").exists() or self.dedication:
if self.dedication:
w.writestr("EPUB/text/dedication.xhtml", self.dedication)
else:
w.write(self.directory / "dedication.xhtml", "EPUB/text/dedication.xhtml")
spine = [Matter(partition="frontmatter", content="dedication", title="Dedication")]
CHAPTERS = [f for f in w.namelist() if f.startswith("EPUB/text/ch")]
CHAPTERS.sort()
self.metadata.word_count = 0
for chapter in CHAPTERS:
file_as = F"EPUB/text/chapter-{chapter.split('.')[0][-1]}.xhtml"
try:
chapter_xml = w.read(chapter)
except KeyError:
continue
if file_as != chapter:
# If delete and then re-add same file, causes ZipFile confusion
REMOVES.append(chapter)
# Restructure chapter xml into standard format
chapter_xml = pages.restructure_chapter(chapter_xml)
chapter_title = chapter_xml.title.string
# Count the words (XHTML and HTML treated differently by BeautifulSoup, so first extract `section`)
words = BeautifulSoup(str(chapter_xml.section), "lxml").get_text()
self.metadata.word_count += len(words.replace("\n", " ").replace(" ", " ").strip().split())
w.writestr(file_as, str(chapter_xml))
spine.append(Matter(partition=MatterPartition.body, title=chapter_title))
# PANDOC MAY STILL ADD IMAGES FOUND IN THE WORK WHICH WE NEED TO DISCOVER AND ADD TO THE MANIFEST
# NOTE, these are not only to be added to the manifest, but the folder renamed as well
image_manifest = [f.replace("EPUB/", "") for f in w.namelist() if f.startswith("EPUB/images/")]
for img in [f for f in w.namelist() if f.startswith("EPUB/media/")]:
REMOVES.append(img)
new_img = img.replace("/media/", "/images/")
try:
old_img = w.read(img)
w.writestr(new_img, old_img)
except KeyError:
continue
image_manifest.append(new_img.replace("EPUB/", ""))
# ADD content.opf
w.writestr("EPUB/content.opf", pages.create_content_opf(self.metadata, image_manifest, spine))
# ADD toc.ncx
w.writestr("EPUB/toc.ncx", pages.create_toc_ncx(self.metadata, spine))
# ADD toc.xhtml
w.writestr("EPUB/toc.xhtml", pages.create_toc_xhtml(self.metadata, spine))
# PERFORM REMOVES
for remove in REMOVES:
try:
w.remove_file(remove)
except KeyError:
continue
def validate(self) -> bool:
"""
Validate the creative work as a standards compliant EPUB3.
"""
epub_path = self.directory.parent / F"{self.work_name}.epub"
_c.check_source(epub_path)
result = EpubCheck(epub_path)
return result.valid
| 46.661644
| 551
| 0.648622
|
"""
.. module:: create
:synopsis: Import a Word `docx` document, define its metadata, cover and rights, and publish it as an EPUB3.
.. moduleauthor:: Gavin Chait <github.com/turukawa>
CreateWork
==========
Publish a standards compliant EPUB3 creative work from a source Microsoft Word `docx` document, and define its
metadata, cover and publishing rights. Currently does not support `odt` since `Pandoc` seems to lose any embedded
graphics.
.. note:: This process will overwrite any existing EPUB3 file of the same name, if it already exists.
Workflow
--------
There are two main publication approaches, stateless and non-stateless. A non-stateless approach assumes you may be
starting each step discretely (perhaps via a set of one-time network calls). The second maintains state, so you can
complete the process in one step.
The *stateless* publication process runs as follows:
* Set the working directory on creation,
* Define and validate the metadata required for the creative work,
* Copy the `docx` file to import into the working directory,
* Copy the cover image to import into the working directory,
* Define and add any contributors, such as cover artist,
* Update the creative work's publication rights,
* Add in an optional dedication,
* Build the creative work,
* Validate the work is EPUB3 standards compliant.
The objective of this workflow is to support what may be a stateless process i.e. the individual steps first bring all
the data required to produce the creative work into a project directory, and then produces it. State does not need
to be maintained between steps.
The *non-stateless* process runs as follows:
* Define and validate the metadata required for the creative work,
* Supply the `docx` file as a base64 string,
* Copy the cover image as a base64 string,
* Add in an optional dedication,
* Build the creative work,
* Validate the work is EPUB3 standards compliant.
The objective in a non-stateless workflow is to minimise disruption, and store the minimum amount of information. Only
the epub itself will be saved, and then only because Pandoc does not support a memory-only epub build.
Build your work
---------------
Import **Chapisha** and create a work:
.. code-block:: python
from chapisha.create import CreateWork
work = CreateWork(directory)
Where `directory` is the complete path to where you would like the EPUB created. If you want a stateless workflow,
set the `stateless` boolean to `True`. If you already have the `metadata` (perhaps via a web form), you can skip
several steps and pick up again for setting the files and images.
.. code-block:: python
from chapisha.create import CreateWork
work = CreateWork(directory, metadata=metadata, stateless=True)
Set metadata
^^^^^^^^^^^^
`Dublin Core <https://www.dublincore.org/specifications/dublin-core/dces/>`_ is a vocabulary of fifteen properties for
use in resource description. Four of them - `title`, `identifier`, `language` and `rights` - are required. The
`language` code is defined by the `ISO 679-1 <https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes>`_ standard
(e.g. `en` for English, or `fr` for French).
Metadata properties:
* `identifier`: UUID, DOI or ISBN of the creative work. A UUID will be generated if not included.
* `title`: Name given to the creative work.
* `language`: Specify the language of the creative work. Two letter code defined by ISO 639-1.
* `creator`: Name of a person, organisation, etc. responsible for the creation of the work. May be more than one.
* `work_uri`: The URI for your creative work.
* `contributor`: Name of a person, organisation, etc. that played a secondary role - such as an editor - in the creation of the work. May be more than one.
* `date`: The publication date of the creative work. Provide in ISO format, YYYY-MM-DD.
* `subject`: The subject, or tag, of the creative work. May be more than one.
* `publisher`: Name of a person, organisation, etc. responsible for making the creative work available.
* `publisher_uri`: The URI for the publisher of your creative work.
* `rights`: A short, single-sentence statement of copyright and publication terms for the creative work, e.g. 'All rights reserved.' or 'Attribution-NonCommercial-ShareAlike 4.0 International.'
* `long_rights`: Lengthier description and information about copyright held in and over the creative work. Formatted as you wish it to appear.
* `description`: A short, single-sentence summary of the creative work.
* `long_description`: The pitch, or jacket-cover, description of the creative work.
Create a paired dictionary of these properties. As example:
.. code-block:: python
METADATA = {
"identifier": "isbn:9780993191459",
"title": "Usan Abasi's Lament",
"description": "Years after the events of \"Lament for the Fallen\", Isaiah tells of the myth of Usan Abasi, who was punished by the Sky God to spend eternity in the form of a brass bowl and imprisoned within a vast termite mountain. Now the ceremony which ensures that Usan Abasi remains dormant has failed, and his ancient evil awakes. A free, stand-alone short-story set in the city of Ewuru and linking \"Lament for the Fallen\" to a forthcoming novel.",
"language": "en",
"creator": ["Gavin Chait"],
"rights": "All rights reserved.",
"long_rights": ["The right of the creator to be identified as the author of the Work has been asserted by them in accordance with the Copyright, Designs and Patents Act 1988. This creator supports copyright. Copyright gives creators space to explore and provides for their long-term ability to sustain themselves from their work. Thank you for buying this work and for complying with copyright laws by not reproducing, scanning, or distributing any part of it without permission. Your support will contribute to future works by the creator."],
"publisher": "Qwyre Publishing",
"publisher_uri": "https://qwyre.com",
"work-uri": "https://gavinchait.com",
"date": "2017-07-23",
"subject": ["science fiction", "african mythology"]
}
Set the metadata:
.. code-block:: python
work.set_metadata(METADATA)
Set document
^^^^^^^^^^^^
Most writers still use `Microsoft Word <https://www.microsoft.com/en-us/microsoft-365/word>`_ as their default work tool.
There are certainly other word processors, but this is the one most people will work with if they intend to be
professionally published as publishers still expect Word `docx` files for editing and markup.
**Chapisha** will create your cover, rights and dedication pages, as well as the table of contents. Your `docx` file
must contain **only** the creative content you wish included in that table of contents. Your document must also be
correctly marked up to ensure proper chapter creation.
EPUB documents will be read on multiple and diverse electronic devices. Don't have any expectations for page
number-dependant formatting. Instead:
* Each chapter must have a title, formatted as `Heading 1`, with lower-level headings formatted for each heading type.
* There must be no title page, contents, or anything else. Chapter 1 starts at the top of the first line of the document.
* Page numbers and other page-specific information will be lost.
* Fonts or typographic formats and alignment will be lost, although `bold` and `italics` will be maintained.
* Images will be maintained.
Once the work is built you can enhance its styling. However, there are still limits in the EPUB3 standard in comparison
to a printed work.
.. code-block:: python
work.set_document(source)
Where `source` is any of the complete path to the source `docx` file, a `bytes` file import, or a `base64` string.
Set cover
^^^^^^^^^
There is, unfortunately, no standardisation on the image size, dimensions or resolution required for an EPUB. However,
a recommendation is an image (`.jpeg`, `.jpg` or `.png`) of 1,600 by 2,400 pixels, and less than 5Mb is size. You will
need to create your image (or have someone create it for you) exactly as you wish it to appear on the cover. Nothing
will be added, removed, or changed.
Please also ensure you have the appropriate rights to use the image on your cover. There are more than sufficient
services providing openly-licenced, or even public domain, work for you to use.
.. note:: You can optionally add the image contributor details here, or on the next step. Do not do it in both or the contributor information will be repeated.
Example code:
.. code-block:: python
CONTRIBUTOR = {
"role": "artist",
"name": "Rodd Halstead",
"terms": "Cover image 'Red Maple Fruit (Samara)' photograph. All rights reserved. Used under licence.",
"year": "2006"
}
work.set_cover(source, contributor=CONTRIBUTOR)
Where `source` is the complete path to the image file, a `bytes` file import, or a `base64` string.
Add contributors
^^^^^^^^^^^^^^^^
You may have numerous contributors you wish to acknowledge. Fields are:
* `role`: Contributor identity, based on a specified list of `artist`, `editor` or `translator`.
* `name`: Name of a person, organisation, etc. that played a secondary role - such as an editor - in the creation of the work.
* `terms`: Information about copyright held by the rights-holder in and over their contribution to the creative work. Formatted as you wish it to appear.
* `year`: The year of the contribution or publication of the contributor's work.
Example code:
.. code-block:: python
CONTRIBUTOR = {
"role": "artist",
"name": "Rodd Halstead",
"terms": "Cover image 'Red Maple Fruit (Samara)' photograph. All rights reserved. Used under licence.",
"year": "2006"
}
work.add_contributor(CONTRIBUTOR)
`add_contributor` as many times as you have people or organisations to acknowledge.
Set rights
^^^^^^^^^^
This refers to the `long_rights` you can set, and which you may wish to adjust for presentation on the colophon page.
There are obviously a broad range of rights with which you can release your creative work. Here are two examples which
you can modify as you require.
* Commercial copyright with all rights reserved:
The right of the creator to be identified as the author of the Work has been asserted by them in
accordance with the Copyright, Designs and Patents Act 1988. This creator supports copyright. Copyright
gives creators space to explore and provides for their long-term ability to sustain themselves from
their work. Thank you for buying this work and for complying with copyright laws by not reproducing,
scanning, or distributing any part of it without permission. Your support will contribute to future
works by the creator.
* Commercial copyright but licenced for distribution under Attribution-NonCommercial-ShareAlike 4.0 International (`CC BY-NC-SA 4.0 <https://creativecommons.org/licenses/by-nc-sa/4.0/>`_):
You are free to copy and redistribute the Work in any medium or format, and remix, transform, and build
upon the Work. The creator cannot revoke these freedoms as long as you follow the license terms.
In return: You may not use the material for commercial purposes. You must give appropriate credit, provide
a link to this license, and indicate if changes were made. You may do so in any reasonable manner, but not
in any way that suggests the creator endorses you or your use. If you remix, transform, or build upon the
material, you must distribute your contributions under the same license as the original. You may not apply
legal terms or technological measures that legally restrict others from doing anything the license
permits.
Example code:
.. code-block:: python
RIGHTS = [
"You are free to copy and redistribute the Work in any medium or format, and remix, transform, and build upon the Work. The creator cannot revoke these freedoms as long as you follow the license terms.",
"In return: You may not use the material for commercial purposes. You must give appropriate credit, provide a link to this license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the creator endorses you or your use. If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original. You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits."
]
work.set_rights(RIGHTS)
Rights terms can be one line of text, or several. If several, each line must be provided as a separate term in a `list`.
Set dedication
^^^^^^^^^^^^^^
Most creators have a dedication for their work in mind - usually to apologise for all the late nights and impoverishing
returns on their creative efforts.
This is optional, but you can include a dedication page. Each item in the list will be set on a different paragraph.
.. code-block:: python
dedication = [
"For those who leave.",
"For those who remain.",
"For the wings and tail.",
"But most, for her"
]
work.set_dedication(dedication)
The dedication can be one line of text, or several. If several, each line must be provided as a separate term in a `list`.
Build
^^^^^
The build function is straightforward. Once everything is in place:
.. code-block:: python
work.build()
You will find your EPUB in the directory you specified.
Validate
^^^^^^^^
If you have any doubts as to whether your EPUB is standards compliant, run the validation. This tests the `epub` file
against the standards maintained by the `DAISY Consortium <http://validator.idpf.org/>`_. You can check the file online
at that link. It's the same test.
.. code-block:: python
work.validate()
Output will be `True` or `False`.
"""
import pypandoc
from bs4 import BeautifulSoup
from epubcheck import EpubCheck
from typing import Optional, Literal, List
from urllib.parse import urlparse
from pathlib import Path
import os
import re
import base64
import filetype
from ..models.metadata import WorkMetadata, Contributor
from ..models.matter import Matter, MatterPartition
from ..helpers import pages, formats, coreio as _c
from ..helpers.updatezipfile import UpdateZipFile
class CreateWork:
"""
Publish a standards compliant EPUB3 creative work from a source Microsoft Word `docx` document, and
define its metadata, cover and publishing rights.
If the EPUB file already exists, then publishing this work will overwrite it.
On instantiation, checks `directory` to see if `DEFAULT_METADATA_SETTINGS` is present, loading the required data,
or replacing with specified defaults.
"""
def __init__(self,
directory: Optional[str] = None,
metadata: Optional[WorkMetadata] = None,
stateless: bool = False):
"""
Initialise the CreateWork class.
Parameters
----------
directory: str
A directory path where you would like to save your work.
metadata: WorkMetadata
A model defined by a dictionary of terms.
stateless: bool
Whether your workflow is stateless (default False).
"""
self.stateless = stateless
self.directory = Path(directory)
if self.stateless:
_c.check_path(self.directory)
# Load metadata settings, if exists
try:
_c.check_source(self.directory / _c.DEFAULT_METADATA_SETTINGS)
self.metadata = WorkMetadata(_c.load_json(self.directory / _c.DEFAULT_METADATA_SETTINGS))
self.work_name = self.directory.name # Since will be `.../work-name/`
except FileNotFoundError:
self.metadata = None
self.work_name = None
# Construct the metadata, if it is provided
if metadata:
if isinstance(metadata, WorkMetadata):
metadata = metadata.dict()
self.set_metadata(metadata)
self.source_path = _c.get_helper_path() / "data"
# Set default cover and work bytes
self.work = None
self.cover = None
self.dedication = None
############################################################################
# GATHER WORKING DATA
############################################################################
def get_metadata_schema(self) -> dict:
"""
Return the standard Dublin Core schema permitted for the EPUB3 standard.
Returns
-------
dict
"""
return self.metadata.schema()
def set_metadata(self, metadata: WorkMetadata) -> bool:
"""
Validate metadata values for the permitted Dublin Core schema terms, along with additional metadata. The full
schema, with descriptions, and requirements, is listed by `get_metadata_schema`.
.. note:: The terms `identifier`, `title`, `creator`, `rights` and `language` are required. A random UUID will be assigned if none is provided.
Parameters
----------
metadata: WorkMetadata
A model defined by a dictionary of terms.
Returns
-------
bool
"""
# Dict snake_case fields need to be hyphenated for import
# This as a result of alias names in model
if isinstance(metadata, dict):
for k in [k for k in metadata.keys()]:
hyphenated = "-".join(k.split("_"))
metadata[hyphenated] = metadata.pop(k)
# Rename 'isodate' if it exists
if "isodate" in metadata:
metadata["date"] = metadata.pop("isodate")
# Fix "long-rights" if needed
if "long-rights" in metadata:
metadata["long-rights"] = formats.get_text_paragraphs(metadata["long-rights"])
# Create a temporary WorkMetadata model to hold updated metadata
updated_metadata = WorkMetadata(**metadata)
# And update the original data
# https://fastapi.tiangolo.com/tutorial/body-updates/#partial-updates-with-patch
if self.metadata:
self.metadata = self.metadata.copy(update=updated_metadata.dict(exclude_unset=True))
else:
self.metadata = updated_metadata
work_name = "-".join(["".join([e for e in w if e.isalnum()])
for w in self.metadata.title.lower().split(" ")])
# Set the working directory, if it isn't already, and save metadata there
if not self.work_name:
self.work_name = work_name
self.directory = self.directory / work_name
# If stateless, save the metadata to the working folder
if self.stateless:
_c.check_path(self.directory)
_c.save_json(self.metadata.dict(by_alias=True), self.directory / _c.DEFAULT_METADATA_SETTINGS, overwrite=True)
return True
def _get_validated_bytes(self,
source: [Path, bytes],
base_type: Optional[List[Literal["cover", "work"]]] = None) -> bytes:
"""
Validate a source file, and return a bytes version.
Parameters
----------
source: Path, bytes or base64 string
Filename to open, base64 string, or bytes from an opened file
base_type: Optional, str
Must be one of "cover" or "work" for interpreting base64 mime type
Raises
------
PermissionError: if metadata not yet validated.
FileNotFoundError: if the source is not valid.
Returns
-------
bytes
"""
if not self.metadata:
e = "`set_metadata` before setting source document."
raise PermissionError(e)
if isinstance(source, Path):
try:
_c.check_source(source)
with open(source, "rb") as f:
source = f.read()
except FileNotFoundError:
e = F"`{source}` is not a valid file source."
raise FileNotFoundError(e)
if isinstance(source, str) and base_type:
# Base64 string, remove any provided mime type
source_type = re.search(_c.DEFAULT_BASE64_TYPES[base_type], source)
if source_type:
source = source.replace(source_type.group(0), "")
source = base64.b64decode(source)
if not isinstance(source, bytes):
e = F"File is not valid."
raise FileNotFoundError(e)
return source
def set_document(self, source: [Path, bytes, str]):
"""
Import source `docx` document and, if stateless, save to the working directory. If you're finding errors in
the build step, it could be you need to convert your base64 string to "utf-8" (`source.decode("utf-8")`).
Parameters
----------
source: Path, bytes, or str
Filename to open, bytes from an opened file, or a base64 string
Raises
------
PermissionError: if metadata not yet validated.
FileNotFoundError: if the source is not valid.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting source document."
raise PermissionError(e)
source = self._get_validated_bytes(source, base_type = "work")
if self.stateless:
with open(self.directory / F"{self.work_name}.docx", "wb") as w:
w.write(source)
else:
self.work = source
def set_cover(self,
source: [Path, bytes],
contributor: Optional[Contributor] = None):
"""
Import cover image and, if stateless, save to the working directory, along with any rights and contributor
information. If you're finding errors in the build step, it could be you need to convert your base64 string to
"utf-8" (`source.decode("utf-8")`).
Parameters
----------
source: Path or bytes
Filename to open, including path, or bytes for file
contributor: Contributor
Optional, string indicating contributor name for cover image.
Raises
------
PermissionError: if metadata not yet validated.
FileNotFoundError: if the source is not valid.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting cover."
raise PermissionError(e)
# Cover contributor
if contributor:
if self.metadata.contributor is None:
self.metadata.contributor = []
self.metadata.contributor.append(Contributor(**contributor))
# Cover image
source = self._get_validated_bytes(source, base_type = "cover")
if self.stateless:
kind = filetype.guess(source).extension
with open(self.directory / F"cover.{kind}", "wb") as w:
w.write(source)
_c.save_json(self.metadata.dict(by_alias=True),
self.directory / _c.DEFAULT_METADATA_SETTINGS,
overwrite=True)
else:
self.cover = source
def add_contributor(self, contributor: Contributor):
"""
Add a contributor to the list of those supporting the creation of the work. `contributor` is defined as a dict:
.. code-block:: python
contributor = {
"role": "artist",
"name": "Great Artist",
"year": "2021",
"terms": "Public Domain."
}
Parameters
----------
contributor: Contributor
Include the types of contributor who supported the creation of the work. `role`: `artist`, `editor`, `translator`.
Raises
------
PermissionError: if metadata not yet validated.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before adding contributors, or add the contributors when you set the metadata."
raise PermissionError(e)
if self.metadata.contributor is None:
self.metadata.contributor = []
self.metadata.contributor.append(Contributor(**contributor))
_c.save_json(self.metadata.dict(by_alias=True), self.directory / _c.DEFAULT_METADATA_SETTINGS, overwrite=True)
def set_dedication(self, dedication: [str, list[str]]):
"""
Set dedication page for creative work. Provide as a string, unless it is on multiple paragraphs.
Parameters
----------
dedication: str or list of str
Provide as a string, or list of strings for multiple paragraphs.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting dedication."
raise PermissionError(e)
self.dedication = pages.create_dedication_xhtml(dedication)
if self.stateless:
with open(self.directory / F"dedication.xhtml", "w") as w:
w.write(self.dedication)
def set_rights(self, rights: [str, list[str]]):
"""
Set publication `long_rights` for creative work. Provide as a string, or list of strings if it is on multiple
paragraphs.
There are multiple appropriate rights, and two examples are below. Modify as you require.
* Commercial copyright with all rights reserved:
The right of the creator to be identified as the author of the Work has been asserted by them in
accordance with the Copyright, Designs and Patents Act 1988. This creator supports copyright. Copyright
gives creators space to explore and provides for their long-term ability to sustain themselves from
their work. Thank you for buying this work and for complying with copyright laws by not reproducing,
scanning, or distributing any part of it without permission. Your support will contribute to future
works by the creator.
* Commercial copyright but licenced for distribution under Attribution-NonCommercial-ShareAlike 4.0 International (`CC BY-NC-SA 4.0 <https://creativecommons.org/licenses/by-nc-sa/4.0/>`_):
You are free to copy and redistribute the Work in any medium or format, and remix, transform, and build
upon the Work. The creator cannot revoke these freedoms as long as you follow the license terms.
In return: You may not use the material for commercial purposes. You must give appropriate credit, provide
a link to this license, and indicate if changes were made. You may do so in any reasonable manner, but not
in any way that suggests the creator endorses you or your use. If you remix, transform, or build upon the
material, you must distribute your contributions under the same license as the original. You may not apply
legal terms or technological measures that legally restrict others from doing anything the license
permits.
Parameters
----------
rights: str or list of str
Provide as a string, or list of strings for multiple paragraphs.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting rights."
raise PermissionError(e)
if isinstance(rights, str):
rights = [rights]
self.metadata.long_rights = rights
_c.save_json(self.metadata.dict(by_alias=True), self.directory / _c.DEFAULT_METADATA_SETTINGS, overwrite=True)
############################################################################
# BUILD CREATIVE WORK
############################################################################
def build(self):
"""
Automatically build the creative work as a standards compliant EPUB3. Save to the root directory.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before building creative work."
raise PermissionError(e)
epub_path = self.directory.parent / F"{self.work_name}.epub"
# Generate the initial creative content using Pandoc
# pypandoc can't handle PosixPaths ...
if self.stateless:
pypandoc.convert_file(str(self.directory / F"{self.work_name}.docx"),
format="docx",
to="epub3",
outputfile=str(epub_path))
else:
# Maybe one day Pandoc can return an epub object and we won't save the interim file
pypandoc.convert_text(self.work,
format="docx",
to="epub3",
outputfile=str(epub_path))
# Generate the epub version
with UpdateZipFile(epub_path, "a") as w:
# REMOVES
REMOVES = ["EPUB/styles/stylesheet1.css", "EPUB/text/title_page.xhtml", "EPUB/nav.xhtml"]
# DEFAULT COMPONENTS
DEFAULT = [(self.source_path / "css" / "core.css", "EPUB/css/core.css"),
(self.source_path / "images" / "logo.svg", "EPUB/images/logo.svg"),
(self.source_path / "xhtml" / "onix.xml", "EPUB/onix.xml"),
(self.source_path / "xhtml" / "container.xml", "META-INF/container.xml")]
for default_file, write_file in DEFAULT:
w.write(default_file, write_file)
# DEFAULT FONTS
for f in os.listdir(self.source_path / "fonts"):
w.write(self.source_path / "fonts" / f, F"EPUB/fonts/{f}")
# ADD titlepage.xhtml
w.writestr("EPUB/text/titlepage.xhtml", pages.create_titlepage_xhtml(self.metadata))
# ADD colophon.xhtml
w.writestr("EPUB/text/colophon.xhtml", pages.create_colophon_xhtml(self.metadata))
# ADD cover.img
if self.stateless:
for image_path in [self.directory / F"cover.{t}" for t in ["jpg", "jpeg", "png", "gif", "svg"]]:
if image_path.exists():
w.write(image_path, F"EPUB/images/{image_path.name}")
elif self.cover:
t = filetype.guess(self.cover).extension
w.writestr(F"EPUB/images/cover.{t}", self.cover)
# GET DEDICATION and CHAPTERS
spine = []
# check if the path to dedication exists, if it does, add it to the work and spine
if (self.directory / "dedication.xhtml").exists() or self.dedication:
if self.dedication:
w.writestr("EPUB/text/dedication.xhtml", self.dedication)
else:
w.write(self.directory / "dedication.xhtml", "EPUB/text/dedication.xhtml")
spine = [Matter(partition="frontmatter", content="dedication", title="Dedication")]
CHAPTERS = [f for f in w.namelist() if f.startswith("EPUB/text/ch")]
CHAPTERS.sort()
self.metadata.word_count = 0
for chapter in CHAPTERS:
file_as = F"EPUB/text/chapter-{chapter.split('.')[0][-1]}.xhtml"
try:
chapter_xml = w.read(chapter)
except KeyError:
continue
if file_as != chapter:
# If delete and then re-add same file, causes ZipFile confusion
REMOVES.append(chapter)
# Restructure chapter xml into standard format
chapter_xml = pages.restructure_chapter(chapter_xml)
chapter_title = chapter_xml.title.string
# Count the words (XHTML and HTML treated differently by BeautifulSoup, so first extract `section`)
words = BeautifulSoup(str(chapter_xml.section), "lxml").get_text()
self.metadata.word_count += len(words.replace("\n", " ").replace(" ", " ").strip().split())
w.writestr(file_as, str(chapter_xml))
spine.append(Matter(partition=MatterPartition.body, title=chapter_title))
# PANDOC MAY STILL ADD IMAGES FOUND IN THE WORK WHICH WE NEED TO DISCOVER AND ADD TO THE MANIFEST
# NOTE, these are not only to be added to the manifest, but the folder renamed as well
image_manifest = [f.replace("EPUB/", "") for f in w.namelist() if f.startswith("EPUB/images/")]
for img in [f for f in w.namelist() if f.startswith("EPUB/media/")]:
REMOVES.append(img)
new_img = img.replace("/media/", "/images/")
try:
old_img = w.read(img)
w.writestr(new_img, old_img)
except KeyError:
continue
image_manifest.append(new_img.replace("EPUB/", ""))
# ADD content.opf
w.writestr("EPUB/content.opf", pages.create_content_opf(self.metadata, image_manifest, spine))
# ADD toc.ncx
w.writestr("EPUB/toc.ncx", pages.create_toc_ncx(self.metadata, spine))
# ADD toc.xhtml
w.writestr("EPUB/toc.xhtml", pages.create_toc_xhtml(self.metadata, spine))
# PERFORM REMOVES
for remove in REMOVES:
try:
w.remove_file(remove)
except KeyError:
continue
def validate(self) -> bool:
"""
Validate the creative work as a standards compliant EPUB3.
"""
epub_path = self.directory.parent / F"{self.work_name}.epub"
_c.check_source(epub_path)
result = EpubCheck(epub_path)
return result.valid
| 0
| 0
| 0
|
f014873bcb18c5403755ad32e29f145b9b136a1d
| 3,167
|
py
|
Python
|
test/test_tarrecords.py
|
NVlabs/dlinputs
|
fbce290b7b8c5f3b00e9197c55a13b0a5a0f7953
|
[
"BSD-3-Clause"
] | 38
|
2017-10-18T05:44:25.000Z
|
2021-06-20T02:14:13.000Z
|
test/test_tarrecords.py
|
NVlabs/dlinputs
|
fbce290b7b8c5f3b00e9197c55a13b0a5a0f7953
|
[
"BSD-3-Clause"
] | 1
|
2017-12-07T20:14:18.000Z
|
2018-05-07T01:00:34.000Z
|
test/test_tarrecords.py
|
NVlabs/dlinputs
|
fbce290b7b8c5f3b00e9197c55a13b0a5a0f7953
|
[
"BSD-3-Clause"
] | 10
|
2018-01-07T15:19:17.000Z
|
2020-12-01T20:42:37.000Z
|
from __future__ import unicode_literals
import glob
import pdb
from builtins import range
from imp import reload
from io import open
import numpy as np
from dlinputs import tarrecords
reload(tarrecords)
# get_ipython().system(u'tar -ztvf testdata/imagenet-000000.tgz | sed 7q')
# get_ipython().system(u'tar xvf testdata/imagenet-000000.tgz 10.png')
# get_ipython().system(u'file 10.png')
| 31.989899
| 79
| 0.641932
|
from __future__ import unicode_literals
import glob
import pdb
from builtins import range
from imp import reload
from io import open
import numpy as np
from dlinputs import tarrecords
reload(tarrecords)
def test_tardata():
stream = open("testdata/imagenet-000000.tgz", mode='rb')
data = tarrecords.tardata(stream)
samples = list(data)
assert samples[0] == ('10.cls', b'304'), samples[0]
assert {2} == set([len(x) for x in samples])
def test_group_by_keys():
stream = open("testdata/imagenet-000000.tgz", mode='rb')
data = tarrecords.tardata(stream)
data = tarrecords.group_by_keys()(data)
samples = list(data)
keys = list(samples[0].keys())
assert 'png' in keys
assert 'cls' in keys
# get_ipython().system(u'tar -ztvf testdata/imagenet-000000.tgz | sed 7q')
# get_ipython().system(u'tar xvf testdata/imagenet-000000.tgz 10.png')
# get_ipython().system(u'file 10.png')
def test_decoder():
stream = open("testdata/imagenet-000000.tgz", mode='rb')
data = tarrecords.tardata(stream)
data = tarrecords.group_by_keys()(data)
data = tarrecords.decoder()(data)
samples = list(data)
# print samples[0].keys()
keys = list(samples[0].keys())
assert 'png' in keys
assert 'cls' in keys
def test_tariterator1():
stream = open("testdata/imagenet-000000.tgz", mode='rb')
data = tarrecords.tariterator1(stream)
samples = list(data)
assert len(samples) == 47
assert samples[0]["__key__"] == "10", samples[0]["__key__"]
assert set(samples[3].keys()) == set(
"__key__ png cls xml wnid".split()), list(samples[3].keys())
assert samples[-1]["png"].shape == (400, 300, 3)
def test_tariterator():
stream = open("testdata/imagenet-000000.tgz", mode='rb')
data = tarrecords.tariterator(stream)
samples = list(data)
assert len(samples) == 47
for i in range(len(samples)):
assert samples[i]["png"].dtype == np.dtype(
'f'), samples[i]["png"].dtype
assert np.amin(samples[i]["png"]) >= 0, np.amin(samples[i]["png"])
assert np.amin(samples[i]["png"]) <= 1, np.amax(samples[i]["png"])
assert samples[0]["__key__"] == "10"
assert set(samples[3].keys()) == set(
"__key__ __source__ cls png xml wnid".split()), list(samples[3].keys())
assert samples[-1]["png"].shape == (400, 300, 3)
def test_TarWriter():
stream = open("testdata/imagenet-000000.tgz", mode='rb')
data = tarrecords.tariterator(stream, decode=False)
samples = list(data)
stream = open("/tmp/test.tgz", "wb")
sink = tarrecords.TarWriter(stream, encode=False)
for sample in samples:
sink.write(sample)
sink.close()
stream.close()
# Check if test.tgz was created
assert len(glob.glob("/tmp/test.tgz")) == 1
stream = open("/tmp/test.tgz", mode='rb')
data = tarrecords.tariterator(stream)
samples = list(data)
assert len(samples) == 47
# assert samples[0]["__key__"].decode() == "10"
assert set(samples[3].keys()) == set(
"__key__ __source__ cls png xml wnid".split()), list(samples[3].keys())
assert samples[-1]["png"].shape == (400, 300, 3)
| 2,631
| 0
| 138
|
6a64971723ab828855f7055bd04df9ded8f9d292
| 54,054
|
py
|
Python
|
tests/unit/fake_data_root/vault/var/lib/juju/agents/unit-vault-hacluster-0/charm/hooks/utils.py
|
KellenRenshaw/hotsos
|
e3fc51ab7f8af606a5846a3486a7fda23d761583
|
[
"Apache-2.0"
] | 17
|
2016-07-07T23:39:17.000Z
|
2020-05-06T14:03:54.000Z
|
tests/unit/fake_data_root/vault/var/lib/juju/agents/unit-vault-hacluster-0/charm/hooks/utils.py
|
KellenRenshaw/hotsos
|
e3fc51ab7f8af606a5846a3486a7fda23d761583
|
[
"Apache-2.0"
] | 111
|
2021-10-01T18:18:17.000Z
|
2022-03-29T12:23:20.000Z
|
tests/unit/fake_data_root/vault/var/lib/juju/agents/unit-vault-hacluster-0/charm/hooks/utils.py
|
KellenRenshaw/hotsos
|
e3fc51ab7f8af606a5846a3486a7fda23d761583
|
[
"Apache-2.0"
] | 20
|
2016-11-03T04:04:09.000Z
|
2021-01-04T20:40:43.000Z
|
#!/usr/bin/python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import pcmk
import json
import os
import re
import subprocess
import socket
import fcntl
import struct
import time
import xml.etree.ElementTree as ET
import itertools
from base64 import b64decode
from charmhelpers.core.strutils import (
bool_from_string,
)
from charmhelpers.core.hookenv import (
local_unit,
log,
TRACE,
DEBUG,
ERROR,
INFO,
WARNING,
leader_get,
leader_set,
relation_get,
relation_set,
related_units,
relation_ids,
config,
unit_get,
status_set,
)
from charmhelpers.core import unitdata
from charmhelpers.contrib.openstack.utils import (
get_host_ip,
set_unit_paused,
clear_unit_paused,
is_unit_paused_set,
is_unit_upgrading_set,
)
from charmhelpers.contrib.openstack.ha.utils import (
assert_charm_supports_dns_ha
)
from charmhelpers.core.host import (
mkdir,
rsync,
service_start,
service_stop,
service_running,
write_file,
file_hash,
lsb_release,
init_is_systemd,
CompareHostReleases,
)
from charmhelpers.fetch import (
apt_install,
add_source,
apt_update,
)
from charmhelpers.contrib.hahelpers.cluster import (
peer_ips,
)
from charmhelpers.contrib.network import ip as utils
import netifaces
from netaddr import IPNetwork
import jinja2
TEMPLATES_DIR = 'templates'
COROSYNC_CONF = '/etc/corosync/corosync.conf'
COROSYNC_DEFAULT = '/etc/default/corosync'
COROSYNC_AUTHKEY = '/etc/corosync/authkey'
COROSYNC_HACLUSTER_ACL_DIR = '/etc/corosync/uidgid.d'
COROSYNC_HACLUSTER_ACL = COROSYNC_HACLUSTER_ACL_DIR + '/hacluster'
COROSYNC_CONF_FILES = [
COROSYNC_DEFAULT,
COROSYNC_AUTHKEY,
COROSYNC_CONF,
COROSYNC_HACLUSTER_ACL,
]
SUPPORTED_TRANSPORTS = ['udp', 'udpu', 'multicast', 'unicast']
PCMKR_CONFIG_DIR = '/etc/pacemaker'
PCMKR_AUTHKEY = PCMKR_CONFIG_DIR + '/authkey'
PCMKR_MAX_RETRIES = 3
PCMKR_SLEEP_SECS = 5
SYSTEMD_OVERRIDES_DIR = '/etc/systemd/system/{}.service.d'
SYSTEMD_OVERRIDES_FILE = '{}/overrides.conf'
MAAS_DNS_CONF_DIR = '/etc/maas_dns'
STONITH_CONFIGURED = 'stonith-configured'
def nulls(data):
"""Returns keys of values that are null (but not bool)"""
return [k for k in data.keys()
if not isinstance(data[k], bool) and not data[k]]
def emit_systemd_overrides_file():
"""Generate the systemd overrides file
With Start and Stop timeout values
Note: (David Ames) Bug#1654403 Work around
May be removed if bug is resolved
If timeout value is set to -1 pass infinity
"""
if not init_is_systemd():
return
stop_timeout = int(config('service_stop_timeout'))
if stop_timeout < 0:
stop_timeout = 'infinity'
start_timeout = int(config('service_start_timeout'))
if start_timeout < 0:
start_timeout = 'infinity'
systemd_overrides_context = {'service_stop_timeout': stop_timeout,
'service_start_timeout': start_timeout,
}
for service in ['corosync', 'pacemaker']:
overrides_dir = SYSTEMD_OVERRIDES_DIR.format(service)
overrides_file = SYSTEMD_OVERRIDES_FILE.format(overrides_dir)
if not os.path.isdir(overrides_dir):
os.mkdir(overrides_dir)
write_file(path=overrides_file,
content=render_template('systemd-overrides.conf',
systemd_overrides_context))
# Update systemd with the new information
subprocess.check_call(['systemctl', 'daemon-reload'])
def get_pcmkr_key():
"""Return the pacemaker auth key"""
return config('pacemaker_key') or config('corosync_key')
def assert_charm_supports_ipv6():
"""Check whether we are able to support charms ipv6."""
_release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(_release) < "trusty":
msg = "IPv6 is not supported in the charms for Ubuntu " \
"versions less than Trusty 14.04"
status_set('blocked', msg)
raise Exception(msg)
def get_ipv6_addr():
"""Exclude any ip addresses configured or managed by corosync."""
excludes = []
for rid in relation_ids('ha'):
for unit in related_units(rid):
resources = parse_data(rid, unit, 'resources')
for res in resources.values():
if 'ocf:heartbeat:IPv6addr' in res:
res_params = parse_data(rid, unit, 'resource_params')
res_p = res_params.get(res)
if res_p:
for k, v in res_p.values():
if utils.is_ipv6(v):
log("Excluding '%s' from address list" % v,
level=DEBUG)
excludes.append(v)
return utils.get_ipv6_addr(exc_list=excludes)[0]
def get_node_flags(flag):
"""Nodes which have advertised the given flag.
:param flag: Flag to check peers relation data for.
:type flag: str
:returns: List of IPs of nodes that are ready to join the cluster
:rtype: List
"""
hosts = []
if config('prefer-ipv6'):
hosts.append(get_ipv6_addr())
else:
hosts.append(unit_get('private-address'))
for relid in relation_ids('hanode'):
for unit in related_units(relid):
if relation_get(flag, rid=relid, unit=unit):
hosts.append(relation_get('private-address',
rid=relid,
unit=unit))
hosts.sort()
return hosts
def get_cluster_nodes():
"""Nodes which have advertised that they are ready to join the cluster.
:returns: List of IPs of nodes that are ready to join the cluster
:rtype: List
"""
return get_node_flags('ready')
def get_member_ready_nodes():
"""List of nodes which have advertised that they have joined the cluster.
:returns: List of IPs of nodes that have joined thcluster.
:rtype: List
"""
return get_node_flags('member_ready')
def parse_data(relid, unit, key):
"""Helper to detect and parse json or ast based relation data"""
_key = 'json_{}'.format(key)
data = relation_get(_key, unit, relid) or relation_get(key, unit, relid)
if data:
try:
return json.loads(data)
except (TypeError, ValueError):
return ast.literal_eval(data)
return {}
def configure_monitor_host():
"""Configure extra monitor host for better network failure detection"""
log('Checking monitor host configuration', level=DEBUG)
monitor_host = config('monitor_host')
if monitor_host:
if not pcmk.crm_opt_exists('ping'):
log('Implementing monitor host configuration (host: %s)' %
monitor_host, level=DEBUG)
monitor_interval = config('monitor_interval')
cmd = ('crm -w -F configure primitive ping '
'ocf:pacemaker:ping params host_list="%s" '
'multiplier="100" op monitor interval="%s" ' %
(monitor_host, monitor_interval))
pcmk.commit(cmd)
cmd = ('crm -w -F configure clone cl_ping ping '
'meta interleave="true"')
pcmk.commit(cmd)
else:
log('Reconfiguring monitor host configuration (host: %s)' %
monitor_host, level=DEBUG)
cmd = ('crm -w -F resource param ping set host_list="%s"' %
monitor_host)
else:
if pcmk.crm_opt_exists('ping'):
log('Disabling monitor host configuration', level=DEBUG)
pcmk.commit('crm -w -F resource stop ping')
pcmk.commit('crm -w -F configure delete ping')
def configure_cluster_global(failure_timeout, cluster_recheck_interval=60):
"""Configure global cluster options
:param failure_timeout: Duration in seconds (measured from the most recent
failure) to wait before resetting failcount to 0.
:type failure_timeout: int
:param cluster_recheck_interval: Duration in seconds for the polling
interval at which the cluster checks for
changes in the resource parameters,
constraints or other cluster options.
:type cluster_recheck_interval: int
"""
log('Applying global cluster configuration', level=DEBUG)
# NOTE(lathiat) quorum in a two-node scenario is handled by
# corosync two_node=1. In this case quorum is required for
# initial cluster startup but not if a node was previously in
# contact with the full cluster.
log('Configuring no-quorum-policy to stop', level=DEBUG)
cmd = "crm configure property no-quorum-policy=stop"
pcmk.commit(cmd)
cmd = ('crm configure rsc_defaults $id="rsc-options" '
'resource-stickiness="100" '
'failure-timeout={}'.format(failure_timeout))
pcmk.commit(cmd)
log('Configuring cluster-recheck-interval to {} seconds'.format(
cluster_recheck_interval), level=DEBUG)
cmd = "crm configure property cluster-recheck-interval={}".format(
cluster_recheck_interval)
pcmk.commit(cmd)
def remove_legacy_maas_stonith_resources():
"""Remove maas stoniths resources using the old name."""
stonith_resources = pcmk.crm_maas_stonith_resource_list()
for resource_name in stonith_resources:
pcmk.commit(
'crm -w -F resource stop {}'.format(resource_name))
pcmk.commit(
'crm -w -F configure delete {}'.format(resource_name))
def configure_null_stonith_resource(stonith_hostnames):
"""Create null stonith resource for the given hostname.
:param stonith_hostnames: The hostnames that the stonith management system
refers to the remote node as.
:type stonith_hostname: List
"""
ctxt = {
'stonith_plugin': 'stonith:null',
'stonith_hostnames': stonith_hostnames,
'stonith_resource_name': 'st-null',
'resource_params': (
"params hostlist='{hostnames}' "
"op monitor interval=25 start-delay=25 "
"timeout=25")}
_configure_stonith_resource(ctxt)
# NOTE (gnuoy): Not enabling the global stonith-enabled setting as it
# does not make sense to have stonith-enabled when the only resources
# are null resources, so defer enabling stonith-enabled to the 'real'
# stonith resources.
return {ctxt['stonith_resource_name']: ctxt['stonith_plugin']}
def configure_maas_stonith_resource(stonith_hostnames):
"""Create maas stonith resource for the given hostname.
:param stonith_hostnames: The hostnames that the stonith management system
refers to the remote node as.
:type stonith_hostname: List
"""
ctxt = {
'stonith_plugin': 'stonith:external/maas',
'stonith_hostnames': stonith_hostnames,
'stonith_resource_name': 'st-maas',
'url': config('maas_url'),
'apikey': config('maas_credentials'),
'resource_params': (
"params url='{url}' apikey='{apikey}' hostnames='{hostnames}' "
"op monitor interval=25 start-delay=25 "
"timeout=25")}
_configure_stonith_resource(ctxt)
return {ctxt['stonith_resource_name']: ctxt['stonith_plugin']}
def enable_stonith():
"""Enable stonith via the global property stonith-enabled.
:raises: EnableStonithFailed
"""
log('Enabling STONITH', level=INFO)
try:
pcmk.commit(
"crm configure property stonith-enabled=true",
failure_is_fatal=True)
except subprocess.CalledProcessError as e:
raise EnableStonithFailed(e)
def disable_stonith(failure_is_fatal=True):
"""Disable stonith via the global property stonith-enabled.
:param failure_is_fatal: Whether to raise exception if command fails.
:type failure_is_fatal: bool
:raises: DisableStonithFailed
"""
log('Disabling STONITH', level=INFO)
try:
pcmk.commit(
"crm configure property stonith-enabled=false",
failure_is_fatal=failure_is_fatal)
except subprocess.CalledProcessError as e:
raise DisableStonithFailed(e)
def get_ip_addr_from_resource_params(params):
"""Returns the IP address in the resource params provided
:return: the IP address in the params or None if not found
"""
reg_ex = r'.* ip_address="([a-fA-F\d\:\.]+)".*'
res = re.search(reg_ex, params)
return res.group(1) if res else None
def need_resources_on_remotes():
"""Whether to run resources on remote nodes.
Check the 'enable-resources' setting across the remote units. If it is
absent or inconsistent then raise a ValueError.
:returns: Whether to run resources on remote nodes
:rtype: bool
:raises: ValueError
"""
responses = []
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
data = parse_data(relid, unit, 'enable-resources')
# parse_data returns {} if key is absent.
if type(data) is bool:
responses.append(data)
if len(set(responses)) == 1:
run_resources_on_remotes = responses[0]
else:
msg = "Inconsistent or absent enable-resources setting {}".format(
responses)
log(msg, level=WARNING)
raise ValueError(msg)
return run_resources_on_remotes
def set_cluster_symmetry():
"""Set the cluster symmetry.
By default the cluster is an Opt-out cluster (equivalent to
symmetric-cluster=true) this means that any resource can run anywhere
unless a node explicitly Opts-out. When using pacemaker-remotes there may
be hundreds of nodes and if they are not prepared to run resources the
cluster should be switched to an Opt-in cluster.
"""
try:
symmetric = need_resources_on_remotes()
except ValueError:
msg = 'Unable to calculated desired symmetric-cluster setting'
log(msg, level=WARNING)
return
log('Configuring symmetric-cluster: {}'.format(symmetric), level=DEBUG)
cmd = "crm configure property symmetric-cluster={}".format(
str(symmetric).lower())
pcmk.commit(cmd, failure_is_fatal=True)
def add_score_location_rule(res_name, node, location_score):
"""Add or update a location rule that uses a score.
:param res_name: Resource that this location rule controls.
:type res_name: str
:param node: Node that this location rule relates to.
:type node: str
:param location_score: The score to give this location.
:type location_score: int
"""
loc_constraint_name = 'loc-{}-{}'.format(res_name, node)
pcmk.crm_update_location(
loc_constraint_name,
res_name,
location_score,
node)
def add_location_rules_for_local_nodes(res_name):
"""Add location rules for running resource on local nodes.
Add location rules allowing the given resource to run on local nodes (eg
not remote nodes).
:param res_name: Resource name to create location rules for.
:type res_name: str
"""
for node in pcmk.list_nodes():
loc_constraint_name = 'loc-{}-{}'.format(res_name, node)
if not pcmk.crm_opt_exists(loc_constraint_name):
cmd = 'crm -w -F configure location {} {} 0: {}'.format(
loc_constraint_name,
res_name,
node)
pcmk.commit(cmd, failure_is_fatal=True)
log('%s' % cmd, level=DEBUG)
def add_location_rules_for_pacemaker_remotes(res_names):
"""Add location rules for pacemaker remote resources on local nodes.
Add location rules allowing the pacemaker remote resource to run on a local
node. Use location score rules to spread resources out.
:param res_names: Pacemaker remote resource names.
:type res_names: List[str]
"""
res_names = sorted(res_names)
nodes = sorted(pcmk.list_nodes())
prefered_nodes = list(zip(res_names, itertools.cycle(nodes)))
for res_name in res_names:
for node in nodes:
location_score = 0
if (res_name, node) in prefered_nodes:
location_score = 200
add_score_location_rule(
res_name,
node,
location_score)
def configure_pacemaker_remote(remote_hostname, remote_ip):
"""Create a resource corresponding to the pacemaker remote node.
:param remote_hostname: Remote hostname used for registering remote node.
:type remote_hostname: str
:param remote_ip: Remote IP used for registering remote node.
:type remote_ip: str
:returns: Name of resource for pacemaker remote node.
:rtype: str
"""
resource_name = remote_hostname
if not pcmk.is_resource_present(resource_name):
cmd = (
"crm configure primitive {} ocf:pacemaker:remote "
"params server={} reconnect_interval=60 "
"op monitor interval=30s").format(resource_name,
remote_ip)
pcmk.commit(cmd, failure_is_fatal=True)
return resource_name
def cleanup_remote_nodes(remote_nodes):
"""Cleanup pacemaker remote resources
Remove all status records of the resource and
probe the node afterwards.
:param remote_nodes: List of resource names associated with remote nodes
:type remote_nodes: list
"""
for res_name in remote_nodes:
cmd = 'crm resource cleanup {}'.format(res_name)
# Resource cleanups seem to fail occasionally even on healthy nodes
# Bug #1822962. Given this cleanup task is just housekeeping log
# the message if a failure occurs and move on.
if pcmk.commit(cmd, failure_is_fatal=False) == 0:
log(
'Cleanup of resource {} succeeded'.format(res_name),
level=DEBUG)
else:
log(
'Cleanup of resource {} failed'.format(res_name),
level=WARNING)
def configure_pacemaker_remote_stonith_resource():
"""Create a maas stonith resource for the pacemaker-remotes.
:returns: Stonith resource dict {res_name: res_type}
:rtype: dict
"""
hostnames = []
stonith_resource = {}
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
stonith_hostname = parse_data(relid, unit, 'stonith-hostname')
if stonith_hostname:
hostnames.append(stonith_hostname)
if hostnames:
stonith_resource = configure_maas_stonith_resource(hostnames)
return stonith_resource
def configure_peer_stonith_resource():
"""Create a null stonith resource for lxd containers.
:returns: Stonith resource dict {res_name: res_type}
:rtype: dict
"""
hostnames = [get_hostname()]
stonith_resource = {}
for relid in relation_ids('hanode'):
for unit in related_units(relid):
stonith_hostname = relation_get('hostname', unit, relid)
if stonith_hostname:
hostnames.append(stonith_hostname)
stonith_resource = configure_null_stonith_resource(hostnames)
return stonith_resource
def configure_pacemaker_remote_resources():
"""Create resources corresponding to the pacemaker remote nodes.
Create resources, location constraints and stonith resources for pacemaker
remote node.
:returns: resource dict {res_name: res_type, ...}
:rtype: dict
"""
log('Checking for pacemaker-remote nodes', level=DEBUG)
resources = []
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
remote_hostname = parse_data(relid, unit, 'remote-hostname')
remote_ip = parse_data(relid, unit, 'remote-ip')
if remote_hostname:
resource_name = configure_pacemaker_remote(
remote_hostname,
remote_ip)
resources.append(resource_name)
cleanup_remote_nodes(resources)
return {name: 'ocf:pacemaker:remote' for name in resources}
def configure_resources_on_remotes(resources=None, clones=None, groups=None):
"""Add location rules as needed for resources, clones and groups
If remote nodes should not run resources then add location rules then add
location rules to enable them on local nodes.
:param resources: Resource definitions
:type resources: dict
:param clones: Clone definitions
:type clones: dict
:param groups: Group definitions
:type groups: dict
"""
clones = clones or {}
groups = groups or {}
try:
resources_on_remote = need_resources_on_remotes()
except ValueError:
msg = 'Unable to calculate whether resources should run on remotes'
log(msg, level=WARNING)
return
if resources_on_remote:
msg = ('Resources are permitted to run on remotes, no need to create '
'location constraints')
log(msg, level=WARNING)
return
pacemaker_remotes = []
for res_name, res_type in resources.items():
if res_name not in list(clones.values()) + list(groups.values()):
if res_type == 'ocf:pacemaker:remote':
pacemaker_remotes.append(res_name)
else:
add_location_rules_for_local_nodes(res_name)
add_location_rules_for_pacemaker_remotes(pacemaker_remotes)
for cl_name in clones:
add_location_rules_for_local_nodes(cl_name)
# Limit clone resources to only running on X number of nodes where X
# is the number of local nodes. Otherwise they will show as offline
# on the remote nodes.
node_count = len(pcmk.list_nodes())
cmd = ('crm_resource --resource {} --set-parameter clone-max '
'--meta --parameter-value {}').format(cl_name, node_count)
pcmk.commit(cmd, failure_is_fatal=True)
log('%s' % cmd, level=DEBUG)
for grp_name in groups:
add_location_rules_for_local_nodes(grp_name)
def restart_corosync_on_change():
"""Simple decorator to restart corosync if any of its config changes"""
return wrap
def try_pcmk_wait():
"""Try pcmk.wait_for_pcmk()
Log results and set status message
"""
try:
pcmk.wait_for_pcmk()
log("Pacemaker is ready", level=TRACE)
except pcmk.ServicesNotUp as e:
status_msg = "Pacemaker is down. Please manually start it."
status_set('blocked', status_msg)
full_msg = "{} {}".format(status_msg, e)
log(full_msg, ERROR)
raise pcmk.ServicesNotUp(full_msg)
@restart_corosync_on_change()
def services_running():
"""Determine if both Corosync and Pacemaker are running
Both from the operating system perspective and with a functional test
@returns boolean
"""
pacemaker_status = service_running("pacemaker")
corosync_status = service_running("corosync")
log("Pacemaker status: {}, Corosync status: {}"
"".format(pacemaker_status, corosync_status),
level=DEBUG)
if not (pacemaker_status and corosync_status):
# OS perspective
return False
# Functional test of pacemaker. This will raise if pacemaker doesn't get
# fully ready in time:
pcmk.wait_for_pcmk()
return True
def validated_restart_corosync(retries=10):
"""Restart and validate Corosync and Pacemaker are in fact up and running.
@param retries: number of attempts to restart the services before giving up
@raises pcmk.ServicesNotUp if after retries services are still not up
"""
for restart in range(retries):
try:
if restart_corosync():
log("Corosync and Pacemaker are validated as up and running.",
INFO)
return
else:
log("Corosync or Pacemaker not validated as up yet, retrying",
WARNING)
except pcmk.ServicesNotUp:
log("Pacemaker failed to start, retrying", WARNING)
continue
msg = ("Corosync and/or Pacemaker failed to restart after {} retries"
"".format(retries))
log(msg, ERROR)
status_set('blocked', msg)
raise pcmk.ServicesNotUp(msg)
def validate_dns_ha():
"""Validate the DNS HA
Assert the charm will support DNS HA
Check MAAS related configuration options are properly set
:raises MAASConfigIncomplete: if maas_url and maas_credentials are not set
"""
# Will raise an exception if unable to continue
assert_charm_supports_dns_ha()
if config('maas_url') and config('maas_credentials'):
return True
else:
msg = ("DNS HA is requested but the maas_url or maas_credentials "
"settings are not set")
raise MAASConfigIncomplete(msg)
def setup_maas_api():
"""Install MAAS PPA and packages for accessing the MAAS API.
"""
add_source(config('maas_source'), config('maas_source_key'))
apt_update(fatal=True)
apt_install('python3-maas-client', fatal=True)
def setup_ocf_files():
"""Setup OCF resrouce agent files
"""
# TODO (thedac) Eventually we want to package the OCF files.
# Bundle with the charm until then.
mkdir('/usr/lib/ocf/resource.d/ceph')
mkdir('/usr/lib/ocf/resource.d/maas')
# Xenial corosync is not creating this directory
mkdir('/etc/corosync/uidgid.d')
rsync('files/ocf/ceph/rbd', '/usr/lib/ocf/resource.d/ceph/rbd')
rsync('files/ocf/maas/dns', '/usr/lib/ocf/resource.d/maas/dns')
rsync('files/ocf/maas/maas_dns.py', '/usr/lib/heartbeat/maas_dns.py')
rsync('files/ocf/maas/maasclient/', '/usr/lib/heartbeat/maasclient/')
rsync(
'files/ocf/maas/maas_stonith_plugin.py',
'/usr/lib/stonith/plugins/external/maas')
def write_maas_dns_address(resource_name, resource_addr):
"""Writes the specified IP address to the resource file for MAAS dns.
:param resource_name: the name of the resource the address belongs to.
This is the name of the file that will be written in /etc/maas_dns.
:param resource_addr: the IP address for the resource. This will be
written to the resource_name file.
"""
mkdir(MAAS_DNS_CONF_DIR)
write_file(os.path.join(MAAS_DNS_CONF_DIR, resource_name),
content=resource_addr)
def needs_maas_dns_migration():
"""Determines if the MAAS DNS ocf resources need migration.
:return: True if migration is necessary, False otherwise.
"""
try:
subprocess.check_call(['grep', 'OCF_RESOURCE_INSTANCE',
'/usr/lib/ocf/resource.d/maas/dns'])
return True
except subprocess.CalledProcessError:
# check_call will raise an exception if grep doesn't find the string
return False
def is_in_standby_mode(node_name):
"""Check if node is in standby mode in pacemaker
@param node_name: The name of the node to check
@returns boolean - True if node_name is in standby mode
"""
out = (subprocess
.check_output(['crm', 'node', 'status', node_name])
.decode('utf-8'))
root = ET.fromstring(out)
standby_mode = False
for nvpair in root.iter('nvpair'):
if (nvpair.attrib.get('name') == 'standby' and
nvpair.attrib.get('value') == 'on'):
standby_mode = True
return standby_mode
def get_hostname():
"""Return the hostname of this unit
@returns hostname
"""
return socket.gethostname()
def enter_standby_mode(node_name, duration='forever'):
"""Put this node into standby mode in pacemaker
@returns None
"""
subprocess.check_call(['crm', 'node', 'standby', node_name, duration])
def leave_standby_mode(node_name):
"""Take this node out of standby mode in pacemaker
@returns None
"""
subprocess.check_call(['crm', 'node', 'online', node_name])
def node_has_resources(node_name):
"""Check if this node is running resources
@param node_name: The name of the node to check
@returns boolean - True if node_name has resources
"""
out = subprocess.check_output(['crm_mon', '-X']).decode('utf-8')
root = ET.fromstring(out)
has_resources = False
for resource in root.iter('resource'):
for child in resource:
if child.tag == 'node' and child.attrib.get('name') == node_name:
has_resources = True
return has_resources
def node_is_dc(node_name):
"""Check if this node is the designated controller.
@param node_name: The name of the node to check
@returns boolean - True if node_name is the DC
"""
out = subprocess.check_output(['crm_mon', '-X']).decode('utf-8')
root = ET.fromstring(out)
for current_dc in root.iter("current_dc"):
if current_dc.attrib.get('name') == node_name:
return True
return False
def set_unit_status():
"""Set the workload status for this unit
@returns None
"""
status_set(*assess_status_helper())
def resume_unit():
"""Resume services on this unit and update the units status
@returns None
"""
node_name = get_hostname()
messages = []
leave_standby_mode(node_name)
if is_in_standby_mode(node_name):
messages.append("Node still in standby mode")
if messages:
raise Exception("Couldn't resume: {}".format("; ".join(messages)))
else:
clear_unit_paused()
set_unit_status()
def pause_unit():
"""Pause services on this unit and update the units status
@returns None
"""
node_name = get_hostname()
messages = []
enter_standby_mode(node_name)
if not is_in_standby_mode(node_name):
messages.append("Node not in standby mode")
# some resources may take some time to be migrated out from the node. So 3
# retries are made with a 5 seconds wait between each one.
i = 0
ready = False
has_resources = False
while i < PCMKR_MAX_RETRIES and not ready:
if node_has_resources(node_name):
has_resources = True
i += 1
time.sleep(PCMKR_SLEEP_SECS)
else:
ready = True
has_resources = False
if has_resources:
messages.append("Resources still running on unit")
status, message = assess_status_helper()
# New status message will indicate the resource is not running
if status != 'active' and 'not running' not in message:
messages.append(message)
if messages and not is_unit_upgrading_set():
raise Exception("Couldn't pause: {}".format("; ".join(messages)))
else:
set_unit_paused()
status_set("maintenance",
"Paused. Use 'resume' action to resume normal service.")
def assess_status_helper():
"""Assess status of unit
@returns status, message - status is workload status and message is any
corresponding messages
"""
if config('stonith_enabled') in ['true', 'True', True]:
return(
'blocked',
'stonith_enabled config option is no longer supported')
if config('no_quorum_policy'):
if config('no_quorum_policy').lower() not in ['ignore', 'freeze',
'stop', 'suicide']:
return(
'blocked',
'Invalid no_quorum_policy specified')
if is_unit_upgrading_set():
return ("blocked",
"Ready for do-release-upgrade. Set complete when finished")
if is_waiting_unit_series_upgrade_set():
return ("blocked",
"HA services shutdown, peers are ready for series upgrade")
if is_unit_paused_set():
return ("maintenance",
"Paused. Use 'resume' action to resume normal service.")
node_count = int(config('cluster_count'))
status = 'active'
message = 'Unit is ready and clustered'
try:
try_pcmk_wait()
except pcmk.ServicesNotUp:
message = 'Pacemaker is down'
status = 'blocked'
for relid in relation_ids('hanode'):
if len(related_units(relid)) + 1 < node_count:
status = 'blocked'
message = ("Insufficient peer units for ha cluster "
"(require {})".format(node_count))
# if the status was not changed earlier, we verify the maintenance status
try:
if status == 'active':
prop = pcmk.get_property('maintenance-mode').strip()
except pcmk.PropertyNotFound:
# the property is not the output of 'crm configure show xml', so we use
# the default value for this property. For crmsh>=2.2.0 the default
# value is automatically provided by show-property or get-property.
prop = 'false'
if (status == 'active' and prop == 'true'):
# maintenance mode enabled in pacemaker
status = 'maintenance'
message = 'Pacemaker in maintenance mode'
for resource in get_resources().keys():
if not pcmk.is_resource_present(resource):
return ("waiting",
"Resource: {} not yet configured".format(resource))
if not pcmk.crm_res_running_on_node(resource, get_hostname()):
return ("blocked",
"Resource: {} not running".format(resource))
return status, message
def ocf_file_exists(res_name, resources,
RES_ROOT='/usr/lib/ocf/resource.d'):
"""To determine whether the ocf file exists, allow multiple ocf
files with the same name in different directories
@param res_name: The name of the ocf resource to check
@param resources: ocf resources
@return: boolean - True if the ocf resource exists
"""
res_type = None
for key, val in resources.items():
if res_name == key:
if len(val.split(':')) > 2:
res_type = val.split(':')[1]
ocf_name = res_name.replace('res_', '').replace('_', '-')
ocf_file = os.path.join(RES_ROOT, res_type, ocf_name)
if os.path.isfile(ocf_file):
return True
return False
def kill_legacy_ocf_daemon_process(res_name):
"""Kill legacy ocf daemon process
@param res_name: The name of the ocf process to kill
"""
ocf_name = res_name.replace('res_', '').replace('_', '-')
reg_expr = r'([0-9]+)\s+[^0-9]+{}'.format(ocf_name)
cmd = ['ps', '-eo', 'pid,cmd']
ps = subprocess.check_output(cmd).decode('utf-8')
res = re.search(reg_expr, ps, re.MULTILINE)
if res:
pid = res.group(1)
subprocess.call(['sudo', 'kill', '-9', pid])
def maintenance_mode(enable):
"""Enable/disable pacemaker's maintenance mode"""
log('Setting maintenance-mode to %s' % enable, level=INFO)
try:
current_state = pcmk.get_property('maintenance-mode').strip().lower()
except pcmk.PropertyNotFound:
current_state = 'false'
current_state = True if current_state == 'true' else False
log('Is maintenance-mode currently enabled? %s' % current_state,
level=DEBUG)
if current_state != enable:
pcmk.set_property('maintenance-mode', str(enable).lower())
else:
log('Desired value for maintenance-mode is already set', level=DEBUG)
def get_resources():
"""Get resources from the HA relation
:returns: dict of resources
"""
resources = {}
for rid in relation_ids("ha"):
for unit in related_units(rid):
resources = parse_data(rid, unit, 'resources')
return resources
def set_waiting_unit_series_upgrade():
"""Set the unit to a waiting upgrade state in the local kv() store.
"""
log("Setting waiting-unit-series-upgrade=true in local kv", DEBUG)
with unitdata.HookData()() as t:
kv = t[0]
kv.set('waiting-unit-series-upgrade', True)
def clear_waiting_unit_series_upgrade():
"""Clear the unit from a waiting upgrade state in the local kv() store.
"""
log("Setting waiting-unit-series-upgrade=false in local kv", DEBUG)
with unitdata.HookData()() as t:
kv = t[0]
kv.set('waiting-unit-series-upgrade', False)
def is_waiting_unit_series_upgrade_set():
"""Return the state of the kv().get('waiting-unit-series-upgrade').
To help with units that don't have HookData() (testing)
if it excepts, return False
"""
with unitdata.HookData()() as t:
kv = t[0]
if not kv.get('waiting-unit-series-upgrade'):
return False
return kv.get('waiting-unit-series-upgrade')
def get_series_upgrade_notifications(relid):
"""Check peers for notifications that they are upgrading their series.
Returns a dict of the form {unit_name: target_series, ...}
:param relid: Relation id to check for notifications.
:type relid: str
:returns: dict
"""
notifications = {}
for unit in related_units(relid):
relation_data = relation_get(rid=relid, unit=unit)
for key, value in relation_data.items():
if key.startswith('series_upgrade_of_'):
notifications[unit] = value
log("Found series upgrade notifications: {}".format(notifications), DEBUG)
return notifications
def disable_ha_services():
"""Shutdown and disable HA services."""
log("Disabling HA services", INFO)
for svc in ['corosync', 'pacemaker']:
disable_lsb_services(svc)
if service_running(svc):
service_stop(svc)
def enable_ha_services():
"""Startup and enable HA services."""
log("Enabling HA services", INFO)
for svc in ['pacemaker', 'corosync']:
enable_lsb_services(svc)
if not service_running(svc):
service_start(svc)
def notify_peers_of_series_upgrade():
"""Notify peers which release this unit is upgrading from."""
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
series_upgrade_key = get_series_upgrade_key()
relation_data = {
series_upgrade_key: ubuntu_rel}
for rel_id in relation_ids('hanode'):
relation_set(
relation_id=rel_id,
relation_settings=relation_data)
def clear_series_upgrade_notification():
"""Remove from series upgrade notification from peers."""
log("Removing upgrade notification from peers")
series_upgrade_key = get_series_upgrade_key()
relation_data = {
series_upgrade_key: None}
for rel_id in relation_ids('hanode'):
relation_set(
relation_id=rel_id,
relation_settings=relation_data)
def set_stonith_configured(is_configured):
"""Set the STONITH_CONFIGURED state.
:param is_configured: Flag to check peers relation data for.
:type is_configured: bool
:returns: List of IPs of nodes that are ready to join the cluster
:rtype: List
"""
leader_set({STONITH_CONFIGURED: is_configured})
def is_stonith_configured():
"""Get the STONITH_CONFIGURED state.
:returns: State of STONITH_CONFIGURED state.
:rtype: bool
"""
configured = leader_get(STONITH_CONFIGURED) or 'False'
return bool_from_string(configured)
def get_hanode_hostnames():
"""Hostnames of nodes in the hanode relation.
:returns: List of hostnames of nodes in the hanode relation.
:rtype: List
"""
hanode_hostnames = [get_hostname()]
for relid in relation_ids('hanode'):
for unit in related_units(relid):
hostname = relation_get('hostname', rid=relid, unit=unit)
if hostname:
hanode_hostnames.append(hostname)
hanode_hostnames.sort()
return hanode_hostnames
def update_node_list():
"""Determine and delete unexpected nodes from the corosync ring.
:returns: Set of pcmk nodes not part of Juju hanode relation
:rtype: Set[str]
:raises: RemoveCorosyncNodeFailed
"""
pcmk_nodes = set(pcmk.list_nodes())
juju_nodes = set(get_hanode_hostnames())
diff_nodes = pcmk_nodes.difference(juju_nodes)
log("pcmk_nodes[{}], juju_nodes[{}], diff[{}]"
"".format(pcmk_nodes, juju_nodes, diff_nodes),
DEBUG)
for old_node in diff_nodes:
try:
pcmk.set_node_status_to_maintenance(old_node)
pcmk.delete_node(old_node)
except subprocess.CalledProcessError as e:
raise RemoveCorosyncNodeFailed(old_node, e)
return diff_nodes
| 33.161963
| 79
| 0.643024
|
#!/usr/bin/python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import pcmk
import json
import os
import re
import subprocess
import socket
import fcntl
import struct
import time
import xml.etree.ElementTree as ET
import itertools
from base64 import b64decode
from charmhelpers.core.strutils import (
bool_from_string,
)
from charmhelpers.core.hookenv import (
local_unit,
log,
TRACE,
DEBUG,
ERROR,
INFO,
WARNING,
leader_get,
leader_set,
relation_get,
relation_set,
related_units,
relation_ids,
config,
unit_get,
status_set,
)
from charmhelpers.core import unitdata
from charmhelpers.contrib.openstack.utils import (
get_host_ip,
set_unit_paused,
clear_unit_paused,
is_unit_paused_set,
is_unit_upgrading_set,
)
from charmhelpers.contrib.openstack.ha.utils import (
assert_charm_supports_dns_ha
)
from charmhelpers.core.host import (
mkdir,
rsync,
service_start,
service_stop,
service_running,
write_file,
file_hash,
lsb_release,
init_is_systemd,
CompareHostReleases,
)
from charmhelpers.fetch import (
apt_install,
add_source,
apt_update,
)
from charmhelpers.contrib.hahelpers.cluster import (
peer_ips,
)
from charmhelpers.contrib.network import ip as utils
import netifaces
from netaddr import IPNetwork
import jinja2
TEMPLATES_DIR = 'templates'
COROSYNC_CONF = '/etc/corosync/corosync.conf'
COROSYNC_DEFAULT = '/etc/default/corosync'
COROSYNC_AUTHKEY = '/etc/corosync/authkey'
COROSYNC_HACLUSTER_ACL_DIR = '/etc/corosync/uidgid.d'
COROSYNC_HACLUSTER_ACL = COROSYNC_HACLUSTER_ACL_DIR + '/hacluster'
COROSYNC_CONF_FILES = [
COROSYNC_DEFAULT,
COROSYNC_AUTHKEY,
COROSYNC_CONF,
COROSYNC_HACLUSTER_ACL,
]
SUPPORTED_TRANSPORTS = ['udp', 'udpu', 'multicast', 'unicast']
PCMKR_CONFIG_DIR = '/etc/pacemaker'
PCMKR_AUTHKEY = PCMKR_CONFIG_DIR + '/authkey'
PCMKR_MAX_RETRIES = 3
PCMKR_SLEEP_SECS = 5
SYSTEMD_OVERRIDES_DIR = '/etc/systemd/system/{}.service.d'
SYSTEMD_OVERRIDES_FILE = '{}/overrides.conf'
MAAS_DNS_CONF_DIR = '/etc/maas_dns'
STONITH_CONFIGURED = 'stonith-configured'
class MAASConfigIncomplete(Exception):
pass
class RemoveCorosyncNodeFailed(Exception):
def __init__(self, node_name, called_process_error):
msg = 'Removing {} from the cluster failed. {} output={}'.format(
node_name, called_process_error, called_process_error.output)
super(RemoveCorosyncNodeFailed, self).__init__(msg)
class EnableStonithFailed(Exception):
def __init__(self, called_process_error):
msg = 'Enabling STONITH failed. {} output={}'.format(
called_process_error, called_process_error.output)
super(EnableStonithFailed, self).__init__(msg)
class DisableStonithFailed(Exception):
def __init__(self, called_process_error):
msg = 'Disabling STONITH failed. {} output={}'.format(
called_process_error, called_process_error.output)
super(DisableStonithFailed, self).__init__(msg)
def disable_upstart_services(*services):
for service in services:
with open("/etc/init/{}.override".format(service), "wt") as override:
override.write("manual")
def enable_upstart_services(*services):
for service in services:
path = '/etc/init/{}.override'.format(service)
if os.path.exists(path):
os.remove(path)
def disable_lsb_services(*services):
for service in services:
subprocess.check_call(['update-rc.d', '-f', service, 'remove'])
def enable_lsb_services(*services):
for service in services:
subprocess.check_call(['update-rc.d', '-f', service, 'defaults'])
def get_iface_ipaddr(iface):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8919, # SIOCGIFADDR
struct.pack('256s', iface[:15])
)[20:24])
def get_iface_netmask(iface):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x891b, # SIOCGIFNETMASK
struct.pack('256s', iface[:15])
)[20:24])
def get_netmask_cidr(netmask):
netmask = netmask.split('.')
binary_str = ''
for octet in netmask:
binary_str += bin(int(octet))[2:].zfill(8)
return str(len(binary_str.rstrip('0')))
def get_network_address(iface):
if iface:
iface = str(iface)
network = "{}/{}".format(get_iface_ipaddr(iface),
get_netmask_cidr(get_iface_netmask(iface)))
ip = IPNetwork(network)
return str(ip.network)
else:
return None
def get_ipv6_network_address(iface):
# Behave in same way as ipv4 get_network_address() above if iface is None.
if not iface:
return None
try:
ipv6_addr = utils.get_ipv6_addr(iface=iface)[0]
all_addrs = netifaces.ifaddresses(iface)
for addr in all_addrs[netifaces.AF_INET6]:
if ipv6_addr == addr['addr']:
network = "{}/{}".format(addr['addr'], addr['netmask'])
return str(IPNetwork(network).network)
except ValueError:
msg = "Invalid interface '%s'" % iface
status_set('blocked', msg)
raise Exception(msg)
msg = "No valid network found for interface '%s'" % iface
status_set('blocked', msg)
raise Exception(msg)
def get_corosync_id(unit_name):
# Corosync nodeid 0 is reserved so increase all the nodeids to avoid it
off_set = 1000
return off_set + int(unit_name.split('/')[1])
def nulls(data):
"""Returns keys of values that are null (but not bool)"""
return [k for k in data.keys()
if not isinstance(data[k], bool) and not data[k]]
def get_corosync_conf():
if config('prefer-ipv6'):
ip_version = 'ipv6'
bindnetaddr = get_ipv6_network_address
else:
ip_version = 'ipv4'
bindnetaddr = get_network_address
transport = get_transport()
# NOTE(jamespage) use local charm configuration over any provided by
# principle charm
conf = {
'ip_version': ip_version,
'ha_nodes': get_ha_nodes(),
'transport': transport,
}
# NOTE(jamespage): only populate multicast configuration if udp is
# configured
if transport == 'udp':
conf.update({
'corosync_bindnetaddr': bindnetaddr(config('corosync_bindiface')),
'corosync_mcastport': config('corosync_mcastport'),
'corosync_mcastaddr': config('corosync_mcastaddr')
})
if config('prefer-ipv6'):
conf['nodeid'] = get_corosync_id(local_unit())
if config('netmtu'):
conf['netmtu'] = config('netmtu')
if config('debug'):
conf['debug'] = config('debug')
if not nulls(conf):
log("Found sufficient values in local config to populate "
"corosync.conf", level=DEBUG)
return conf
conf = {}
for relid in relation_ids('ha'):
for unit in related_units(relid):
conf = {
'ip_version': ip_version,
'ha_nodes': get_ha_nodes(),
'transport': transport,
}
# NOTE(jamespage): only populate multicast configuration if udpu is
# configured
if transport == 'udp':
bindiface = relation_get('corosync_bindiface',
unit, relid)
conf.update({
'corosync_bindnetaddr': bindnetaddr(bindiface),
'corosync_mcastport': relation_get('corosync_mcastport',
unit, relid),
'corosync_mcastaddr': config('corosync_mcastaddr'),
})
if config('prefer-ipv6'):
conf['nodeid'] = get_corosync_id(local_unit())
if config('netmtu'):
conf['netmtu'] = config('netmtu')
if config('debug'):
conf['debug'] = config('debug')
# Values up to this point must be non-null
if nulls(conf):
continue
return conf
missing = [k for k, v in conf.items() if v is None]
log('Missing required configuration: %s' % missing)
return None
def emit_systemd_overrides_file():
"""Generate the systemd overrides file
With Start and Stop timeout values
Note: (David Ames) Bug#1654403 Work around
May be removed if bug is resolved
If timeout value is set to -1 pass infinity
"""
if not init_is_systemd():
return
stop_timeout = int(config('service_stop_timeout'))
if stop_timeout < 0:
stop_timeout = 'infinity'
start_timeout = int(config('service_start_timeout'))
if start_timeout < 0:
start_timeout = 'infinity'
systemd_overrides_context = {'service_stop_timeout': stop_timeout,
'service_start_timeout': start_timeout,
}
for service in ['corosync', 'pacemaker']:
overrides_dir = SYSTEMD_OVERRIDES_DIR.format(service)
overrides_file = SYSTEMD_OVERRIDES_FILE.format(overrides_dir)
if not os.path.isdir(overrides_dir):
os.mkdir(overrides_dir)
write_file(path=overrides_file,
content=render_template('systemd-overrides.conf',
systemd_overrides_context))
# Update systemd with the new information
subprocess.check_call(['systemctl', 'daemon-reload'])
def emit_corosync_conf():
corosync_conf_context = get_corosync_conf()
if corosync_conf_context:
write_file(path=COROSYNC_CONF,
content=render_template('corosync.conf',
corosync_conf_context))
return True
return False
def get_pcmkr_key():
"""Return the pacemaker auth key"""
return config('pacemaker_key') or config('corosync_key')
def emit_base_conf():
if not os.path.isdir(COROSYNC_HACLUSTER_ACL_DIR):
os.mkdir(COROSYNC_HACLUSTER_ACL_DIR)
if not os.path.isdir(PCMKR_CONFIG_DIR):
os.mkdir(PCMKR_CONFIG_DIR)
corosync_default_context = {'corosync_enabled': 'yes'}
write_file(path=COROSYNC_DEFAULT,
content=render_template('corosync',
corosync_default_context))
write_file(path=COROSYNC_HACLUSTER_ACL,
content=render_template('hacluster.acl', {}))
corosync_key = config('corosync_key')
if corosync_key:
write_file(path=COROSYNC_AUTHKEY,
content=b64decode(corosync_key),
perms=0o400)
pcmkr_key = get_pcmkr_key()
write_file(path=PCMKR_AUTHKEY,
owner='root',
group='haclient',
content=b64decode(pcmkr_key),
perms=0o440)
return True
return False
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir)
)
template = templates.get_template(template_name)
return template.render(context)
def assert_charm_supports_ipv6():
"""Check whether we are able to support charms ipv6."""
_release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(_release) < "trusty":
msg = "IPv6 is not supported in the charms for Ubuntu " \
"versions less than Trusty 14.04"
status_set('blocked', msg)
raise Exception(msg)
def get_transport():
transport = config('corosync_transport')
_deprecated_transport_values = {"multicast": "udp", "unicast": "udpu"}
val = _deprecated_transport_values.get(transport, transport)
if val not in ['udp', 'udpu']:
msg = ("Unsupported corosync_transport type '%s' - supported "
"types are: %s" % (transport, ', '.join(SUPPORTED_TRANSPORTS)))
status_set('blocked', msg)
raise ValueError(msg)
return val
def get_ipv6_addr():
"""Exclude any ip addresses configured or managed by corosync."""
excludes = []
for rid in relation_ids('ha'):
for unit in related_units(rid):
resources = parse_data(rid, unit, 'resources')
for res in resources.values():
if 'ocf:heartbeat:IPv6addr' in res:
res_params = parse_data(rid, unit, 'resource_params')
res_p = res_params.get(res)
if res_p:
for k, v in res_p.values():
if utils.is_ipv6(v):
log("Excluding '%s' from address list" % v,
level=DEBUG)
excludes.append(v)
return utils.get_ipv6_addr(exc_list=excludes)[0]
def get_ha_nodes():
ha_units = peer_ips(peer_relation='hanode')
ha_nodes = {}
for unit in ha_units:
corosync_id = get_corosync_id(unit)
addr = ha_units[unit]
if config('prefer-ipv6'):
if not utils.is_ipv6(addr):
# Not an error since cluster may still be forming/updating
log("Expected an ipv6 address but got %s" % (addr),
level=WARNING)
ha_nodes[corosync_id] = addr
else:
ha_nodes[corosync_id] = get_host_ip(addr)
corosync_id = get_corosync_id(local_unit())
if config('prefer-ipv6'):
addr = get_ipv6_addr()
else:
addr = get_host_ip(unit_get('private-address'))
ha_nodes[corosync_id] = addr
return ha_nodes
def get_node_flags(flag):
"""Nodes which have advertised the given flag.
:param flag: Flag to check peers relation data for.
:type flag: str
:returns: List of IPs of nodes that are ready to join the cluster
:rtype: List
"""
hosts = []
if config('prefer-ipv6'):
hosts.append(get_ipv6_addr())
else:
hosts.append(unit_get('private-address'))
for relid in relation_ids('hanode'):
for unit in related_units(relid):
if relation_get(flag, rid=relid, unit=unit):
hosts.append(relation_get('private-address',
rid=relid,
unit=unit))
hosts.sort()
return hosts
def get_cluster_nodes():
"""Nodes which have advertised that they are ready to join the cluster.
:returns: List of IPs of nodes that are ready to join the cluster
:rtype: List
"""
return get_node_flags('ready')
def get_member_ready_nodes():
"""List of nodes which have advertised that they have joined the cluster.
:returns: List of IPs of nodes that have joined thcluster.
:rtype: List
"""
return get_node_flags('member_ready')
def parse_data(relid, unit, key):
"""Helper to detect and parse json or ast based relation data"""
_key = 'json_{}'.format(key)
data = relation_get(_key, unit, relid) or relation_get(key, unit, relid)
if data:
try:
return json.loads(data)
except (TypeError, ValueError):
return ast.literal_eval(data)
return {}
def configure_stonith():
if configure_pacemaker_remote_stonith_resource():
configure_peer_stonith_resource()
enable_stonith()
set_stonith_configured(True)
else:
# NOTE(lourot): We enter here when no MAAS STONITH resource could be
# created. Disabling STONITH for now. We're not calling
# set_stonith_configured(), so that enabling STONITH will be retried
# later. (STONITH is now always enabled in this charm.)
# Without MAAS, we keep entering here, which isn't really an issue,
# except that this fails in rare cases, thus failure_is_fatal=False.
disable_stonith(failure_is_fatal=False)
def configure_monitor_host():
"""Configure extra monitor host for better network failure detection"""
log('Checking monitor host configuration', level=DEBUG)
monitor_host = config('monitor_host')
if monitor_host:
if not pcmk.crm_opt_exists('ping'):
log('Implementing monitor host configuration (host: %s)' %
monitor_host, level=DEBUG)
monitor_interval = config('monitor_interval')
cmd = ('crm -w -F configure primitive ping '
'ocf:pacemaker:ping params host_list="%s" '
'multiplier="100" op monitor interval="%s" ' %
(monitor_host, monitor_interval))
pcmk.commit(cmd)
cmd = ('crm -w -F configure clone cl_ping ping '
'meta interleave="true"')
pcmk.commit(cmd)
else:
log('Reconfiguring monitor host configuration (host: %s)' %
monitor_host, level=DEBUG)
cmd = ('crm -w -F resource param ping set host_list="%s"' %
monitor_host)
else:
if pcmk.crm_opt_exists('ping'):
log('Disabling monitor host configuration', level=DEBUG)
pcmk.commit('crm -w -F resource stop ping')
pcmk.commit('crm -w -F configure delete ping')
def configure_cluster_global(failure_timeout, cluster_recheck_interval=60):
"""Configure global cluster options
:param failure_timeout: Duration in seconds (measured from the most recent
failure) to wait before resetting failcount to 0.
:type failure_timeout: int
:param cluster_recheck_interval: Duration in seconds for the polling
interval at which the cluster checks for
changes in the resource parameters,
constraints or other cluster options.
:type cluster_recheck_interval: int
"""
log('Applying global cluster configuration', level=DEBUG)
# NOTE(lathiat) quorum in a two-node scenario is handled by
# corosync two_node=1. In this case quorum is required for
# initial cluster startup but not if a node was previously in
# contact with the full cluster.
log('Configuring no-quorum-policy to stop', level=DEBUG)
cmd = "crm configure property no-quorum-policy=stop"
pcmk.commit(cmd)
cmd = ('crm configure rsc_defaults $id="rsc-options" '
'resource-stickiness="100" '
'failure-timeout={}'.format(failure_timeout))
pcmk.commit(cmd)
log('Configuring cluster-recheck-interval to {} seconds'.format(
cluster_recheck_interval), level=DEBUG)
cmd = "crm configure property cluster-recheck-interval={}".format(
cluster_recheck_interval)
pcmk.commit(cmd)
def remove_legacy_maas_stonith_resources():
"""Remove maas stoniths resources using the old name."""
stonith_resources = pcmk.crm_maas_stonith_resource_list()
for resource_name in stonith_resources:
pcmk.commit(
'crm -w -F resource stop {}'.format(resource_name))
pcmk.commit(
'crm -w -F configure delete {}'.format(resource_name))
def _configure_stonith_resource(ctxt):
hostnames = []
for host in ctxt['stonith_hostnames']:
hostnames.append(host)
if '.' in host:
hostnames.append(host.split('.')[0])
ctxt['hostnames'] = ' '.join(sorted(list(set(hostnames))))
if all(ctxt.values()):
ctxt['resource_params'] = ctxt['resource_params'].format(**ctxt)
if pcmk.is_resource_present(ctxt['stonith_resource_name']):
pcmk.crm_update_resource(
ctxt['stonith_resource_name'],
ctxt['stonith_plugin'],
ctxt['resource_params'])
else:
cmd = (
"crm configure primitive {stonith_resource_name} "
"{stonith_plugin} {resource_params}").format(**ctxt)
pcmk.commit(cmd, failure_is_fatal=True)
else:
raise ValueError("Missing configuration: {}".format(ctxt))
def configure_null_stonith_resource(stonith_hostnames):
"""Create null stonith resource for the given hostname.
:param stonith_hostnames: The hostnames that the stonith management system
refers to the remote node as.
:type stonith_hostname: List
"""
ctxt = {
'stonith_plugin': 'stonith:null',
'stonith_hostnames': stonith_hostnames,
'stonith_resource_name': 'st-null',
'resource_params': (
"params hostlist='{hostnames}' "
"op monitor interval=25 start-delay=25 "
"timeout=25")}
_configure_stonith_resource(ctxt)
# NOTE (gnuoy): Not enabling the global stonith-enabled setting as it
# does not make sense to have stonith-enabled when the only resources
# are null resources, so defer enabling stonith-enabled to the 'real'
# stonith resources.
return {ctxt['stonith_resource_name']: ctxt['stonith_plugin']}
def configure_maas_stonith_resource(stonith_hostnames):
"""Create maas stonith resource for the given hostname.
:param stonith_hostnames: The hostnames that the stonith management system
refers to the remote node as.
:type stonith_hostname: List
"""
ctxt = {
'stonith_plugin': 'stonith:external/maas',
'stonith_hostnames': stonith_hostnames,
'stonith_resource_name': 'st-maas',
'url': config('maas_url'),
'apikey': config('maas_credentials'),
'resource_params': (
"params url='{url}' apikey='{apikey}' hostnames='{hostnames}' "
"op monitor interval=25 start-delay=25 "
"timeout=25")}
_configure_stonith_resource(ctxt)
return {ctxt['stonith_resource_name']: ctxt['stonith_plugin']}
def enable_stonith():
"""Enable stonith via the global property stonith-enabled.
:raises: EnableStonithFailed
"""
log('Enabling STONITH', level=INFO)
try:
pcmk.commit(
"crm configure property stonith-enabled=true",
failure_is_fatal=True)
except subprocess.CalledProcessError as e:
raise EnableStonithFailed(e)
def disable_stonith(failure_is_fatal=True):
"""Disable stonith via the global property stonith-enabled.
:param failure_is_fatal: Whether to raise exception if command fails.
:type failure_is_fatal: bool
:raises: DisableStonithFailed
"""
log('Disabling STONITH', level=INFO)
try:
pcmk.commit(
"crm configure property stonith-enabled=false",
failure_is_fatal=failure_is_fatal)
except subprocess.CalledProcessError as e:
raise DisableStonithFailed(e)
def get_ip_addr_from_resource_params(params):
"""Returns the IP address in the resource params provided
:return: the IP address in the params or None if not found
"""
reg_ex = r'.* ip_address="([a-fA-F\d\:\.]+)".*'
res = re.search(reg_ex, params)
return res.group(1) if res else None
def need_resources_on_remotes():
"""Whether to run resources on remote nodes.
Check the 'enable-resources' setting across the remote units. If it is
absent or inconsistent then raise a ValueError.
:returns: Whether to run resources on remote nodes
:rtype: bool
:raises: ValueError
"""
responses = []
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
data = parse_data(relid, unit, 'enable-resources')
# parse_data returns {} if key is absent.
if type(data) is bool:
responses.append(data)
if len(set(responses)) == 1:
run_resources_on_remotes = responses[0]
else:
msg = "Inconsistent or absent enable-resources setting {}".format(
responses)
log(msg, level=WARNING)
raise ValueError(msg)
return run_resources_on_remotes
def set_cluster_symmetry():
"""Set the cluster symmetry.
By default the cluster is an Opt-out cluster (equivalent to
symmetric-cluster=true) this means that any resource can run anywhere
unless a node explicitly Opts-out. When using pacemaker-remotes there may
be hundreds of nodes and if they are not prepared to run resources the
cluster should be switched to an Opt-in cluster.
"""
try:
symmetric = need_resources_on_remotes()
except ValueError:
msg = 'Unable to calculated desired symmetric-cluster setting'
log(msg, level=WARNING)
return
log('Configuring symmetric-cluster: {}'.format(symmetric), level=DEBUG)
cmd = "crm configure property symmetric-cluster={}".format(
str(symmetric).lower())
pcmk.commit(cmd, failure_is_fatal=True)
def add_score_location_rule(res_name, node, location_score):
"""Add or update a location rule that uses a score.
:param res_name: Resource that this location rule controls.
:type res_name: str
:param node: Node that this location rule relates to.
:type node: str
:param location_score: The score to give this location.
:type location_score: int
"""
loc_constraint_name = 'loc-{}-{}'.format(res_name, node)
pcmk.crm_update_location(
loc_constraint_name,
res_name,
location_score,
node)
def add_location_rules_for_local_nodes(res_name):
"""Add location rules for running resource on local nodes.
Add location rules allowing the given resource to run on local nodes (eg
not remote nodes).
:param res_name: Resource name to create location rules for.
:type res_name: str
"""
for node in pcmk.list_nodes():
loc_constraint_name = 'loc-{}-{}'.format(res_name, node)
if not pcmk.crm_opt_exists(loc_constraint_name):
cmd = 'crm -w -F configure location {} {} 0: {}'.format(
loc_constraint_name,
res_name,
node)
pcmk.commit(cmd, failure_is_fatal=True)
log('%s' % cmd, level=DEBUG)
def add_location_rules_for_pacemaker_remotes(res_names):
"""Add location rules for pacemaker remote resources on local nodes.
Add location rules allowing the pacemaker remote resource to run on a local
node. Use location score rules to spread resources out.
:param res_names: Pacemaker remote resource names.
:type res_names: List[str]
"""
res_names = sorted(res_names)
nodes = sorted(pcmk.list_nodes())
prefered_nodes = list(zip(res_names, itertools.cycle(nodes)))
for res_name in res_names:
for node in nodes:
location_score = 0
if (res_name, node) in prefered_nodes:
location_score = 200
add_score_location_rule(
res_name,
node,
location_score)
def configure_pacemaker_remote(remote_hostname, remote_ip):
"""Create a resource corresponding to the pacemaker remote node.
:param remote_hostname: Remote hostname used for registering remote node.
:type remote_hostname: str
:param remote_ip: Remote IP used for registering remote node.
:type remote_ip: str
:returns: Name of resource for pacemaker remote node.
:rtype: str
"""
resource_name = remote_hostname
if not pcmk.is_resource_present(resource_name):
cmd = (
"crm configure primitive {} ocf:pacemaker:remote "
"params server={} reconnect_interval=60 "
"op monitor interval=30s").format(resource_name,
remote_ip)
pcmk.commit(cmd, failure_is_fatal=True)
return resource_name
def cleanup_remote_nodes(remote_nodes):
"""Cleanup pacemaker remote resources
Remove all status records of the resource and
probe the node afterwards.
:param remote_nodes: List of resource names associated with remote nodes
:type remote_nodes: list
"""
for res_name in remote_nodes:
cmd = 'crm resource cleanup {}'.format(res_name)
# Resource cleanups seem to fail occasionally even on healthy nodes
# Bug #1822962. Given this cleanup task is just housekeeping log
# the message if a failure occurs and move on.
if pcmk.commit(cmd, failure_is_fatal=False) == 0:
log(
'Cleanup of resource {} succeeded'.format(res_name),
level=DEBUG)
else:
log(
'Cleanup of resource {} failed'.format(res_name),
level=WARNING)
def configure_pacemaker_remote_stonith_resource():
"""Create a maas stonith resource for the pacemaker-remotes.
:returns: Stonith resource dict {res_name: res_type}
:rtype: dict
"""
hostnames = []
stonith_resource = {}
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
stonith_hostname = parse_data(relid, unit, 'stonith-hostname')
if stonith_hostname:
hostnames.append(stonith_hostname)
if hostnames:
stonith_resource = configure_maas_stonith_resource(hostnames)
return stonith_resource
def configure_peer_stonith_resource():
"""Create a null stonith resource for lxd containers.
:returns: Stonith resource dict {res_name: res_type}
:rtype: dict
"""
hostnames = [get_hostname()]
stonith_resource = {}
for relid in relation_ids('hanode'):
for unit in related_units(relid):
stonith_hostname = relation_get('hostname', unit, relid)
if stonith_hostname:
hostnames.append(stonith_hostname)
stonith_resource = configure_null_stonith_resource(hostnames)
return stonith_resource
def configure_pacemaker_remote_resources():
"""Create resources corresponding to the pacemaker remote nodes.
Create resources, location constraints and stonith resources for pacemaker
remote node.
:returns: resource dict {res_name: res_type, ...}
:rtype: dict
"""
log('Checking for pacemaker-remote nodes', level=DEBUG)
resources = []
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
remote_hostname = parse_data(relid, unit, 'remote-hostname')
remote_ip = parse_data(relid, unit, 'remote-ip')
if remote_hostname:
resource_name = configure_pacemaker_remote(
remote_hostname,
remote_ip)
resources.append(resource_name)
cleanup_remote_nodes(resources)
return {name: 'ocf:pacemaker:remote' for name in resources}
def configure_resources_on_remotes(resources=None, clones=None, groups=None):
"""Add location rules as needed for resources, clones and groups
If remote nodes should not run resources then add location rules then add
location rules to enable them on local nodes.
:param resources: Resource definitions
:type resources: dict
:param clones: Clone definitions
:type clones: dict
:param groups: Group definitions
:type groups: dict
"""
clones = clones or {}
groups = groups or {}
try:
resources_on_remote = need_resources_on_remotes()
except ValueError:
msg = 'Unable to calculate whether resources should run on remotes'
log(msg, level=WARNING)
return
if resources_on_remote:
msg = ('Resources are permitted to run on remotes, no need to create '
'location constraints')
log(msg, level=WARNING)
return
pacemaker_remotes = []
for res_name, res_type in resources.items():
if res_name not in list(clones.values()) + list(groups.values()):
if res_type == 'ocf:pacemaker:remote':
pacemaker_remotes.append(res_name)
else:
add_location_rules_for_local_nodes(res_name)
add_location_rules_for_pacemaker_remotes(pacemaker_remotes)
for cl_name in clones:
add_location_rules_for_local_nodes(cl_name)
# Limit clone resources to only running on X number of nodes where X
# is the number of local nodes. Otherwise they will show as offline
# on the remote nodes.
node_count = len(pcmk.list_nodes())
cmd = ('crm_resource --resource {} --set-parameter clone-max '
'--meta --parameter-value {}').format(cl_name, node_count)
pcmk.commit(cmd, failure_is_fatal=True)
log('%s' % cmd, level=DEBUG)
for grp_name in groups:
add_location_rules_for_local_nodes(grp_name)
def restart_corosync_on_change():
"""Simple decorator to restart corosync if any of its config changes"""
def wrap(f):
def wrapped_f(*args, **kwargs):
checksums = {}
if not is_unit_paused_set():
for path in COROSYNC_CONF_FILES:
checksums[path] = file_hash(path)
return_data = f(*args, **kwargs)
# NOTE: this assumes that this call is always done around
# configure_corosync, which returns true if configuration
# files where actually generated
if return_data and not is_unit_paused_set():
for path in COROSYNC_CONF_FILES:
if checksums[path] != file_hash(path):
validated_restart_corosync()
break
return return_data
return wrapped_f
return wrap
def try_pcmk_wait():
"""Try pcmk.wait_for_pcmk()
Log results and set status message
"""
try:
pcmk.wait_for_pcmk()
log("Pacemaker is ready", level=TRACE)
except pcmk.ServicesNotUp as e:
status_msg = "Pacemaker is down. Please manually start it."
status_set('blocked', status_msg)
full_msg = "{} {}".format(status_msg, e)
log(full_msg, ERROR)
raise pcmk.ServicesNotUp(full_msg)
@restart_corosync_on_change()
def configure_corosync():
log('Configuring and (maybe) restarting corosync', level=DEBUG)
# David Ames Bug#1654403 Work around
# May be removed if bug is resolved
emit_systemd_overrides_file()
return emit_base_conf() and emit_corosync_conf()
def services_running():
"""Determine if both Corosync and Pacemaker are running
Both from the operating system perspective and with a functional test
@returns boolean
"""
pacemaker_status = service_running("pacemaker")
corosync_status = service_running("corosync")
log("Pacemaker status: {}, Corosync status: {}"
"".format(pacemaker_status, corosync_status),
level=DEBUG)
if not (pacemaker_status and corosync_status):
# OS perspective
return False
# Functional test of pacemaker. This will raise if pacemaker doesn't get
# fully ready in time:
pcmk.wait_for_pcmk()
return True
def validated_restart_corosync(retries=10):
"""Restart and validate Corosync and Pacemaker are in fact up and running.
@param retries: number of attempts to restart the services before giving up
@raises pcmk.ServicesNotUp if after retries services are still not up
"""
for restart in range(retries):
try:
if restart_corosync():
log("Corosync and Pacemaker are validated as up and running.",
INFO)
return
else:
log("Corosync or Pacemaker not validated as up yet, retrying",
WARNING)
except pcmk.ServicesNotUp:
log("Pacemaker failed to start, retrying", WARNING)
continue
msg = ("Corosync and/or Pacemaker failed to restart after {} retries"
"".format(retries))
log(msg, ERROR)
status_set('blocked', msg)
raise pcmk.ServicesNotUp(msg)
def restart_corosync():
if service_running("pacemaker"):
log("Stopping pacemaker", DEBUG)
service_stop("pacemaker")
if not is_unit_paused_set():
log("Stopping corosync", DEBUG)
service_stop("corosync")
log("Starting corosync", DEBUG)
service_start("corosync")
log("Starting pacemaker", DEBUG)
service_start("pacemaker")
return services_running()
def validate_dns_ha():
"""Validate the DNS HA
Assert the charm will support DNS HA
Check MAAS related configuration options are properly set
:raises MAASConfigIncomplete: if maas_url and maas_credentials are not set
"""
# Will raise an exception if unable to continue
assert_charm_supports_dns_ha()
if config('maas_url') and config('maas_credentials'):
return True
else:
msg = ("DNS HA is requested but the maas_url or maas_credentials "
"settings are not set")
raise MAASConfigIncomplete(msg)
def setup_maas_api():
"""Install MAAS PPA and packages for accessing the MAAS API.
"""
add_source(config('maas_source'), config('maas_source_key'))
apt_update(fatal=True)
apt_install('python3-maas-client', fatal=True)
def setup_ocf_files():
"""Setup OCF resrouce agent files
"""
# TODO (thedac) Eventually we want to package the OCF files.
# Bundle with the charm until then.
mkdir('/usr/lib/ocf/resource.d/ceph')
mkdir('/usr/lib/ocf/resource.d/maas')
# Xenial corosync is not creating this directory
mkdir('/etc/corosync/uidgid.d')
rsync('files/ocf/ceph/rbd', '/usr/lib/ocf/resource.d/ceph/rbd')
rsync('files/ocf/maas/dns', '/usr/lib/ocf/resource.d/maas/dns')
rsync('files/ocf/maas/maas_dns.py', '/usr/lib/heartbeat/maas_dns.py')
rsync('files/ocf/maas/maasclient/', '/usr/lib/heartbeat/maasclient/')
rsync(
'files/ocf/maas/maas_stonith_plugin.py',
'/usr/lib/stonith/plugins/external/maas')
def write_maas_dns_address(resource_name, resource_addr):
"""Writes the specified IP address to the resource file for MAAS dns.
:param resource_name: the name of the resource the address belongs to.
This is the name of the file that will be written in /etc/maas_dns.
:param resource_addr: the IP address for the resource. This will be
written to the resource_name file.
"""
mkdir(MAAS_DNS_CONF_DIR)
write_file(os.path.join(MAAS_DNS_CONF_DIR, resource_name),
content=resource_addr)
def needs_maas_dns_migration():
"""Determines if the MAAS DNS ocf resources need migration.
:return: True if migration is necessary, False otherwise.
"""
try:
subprocess.check_call(['grep', 'OCF_RESOURCE_INSTANCE',
'/usr/lib/ocf/resource.d/maas/dns'])
return True
except subprocess.CalledProcessError:
# check_call will raise an exception if grep doesn't find the string
return False
def is_in_standby_mode(node_name):
"""Check if node is in standby mode in pacemaker
@param node_name: The name of the node to check
@returns boolean - True if node_name is in standby mode
"""
out = (subprocess
.check_output(['crm', 'node', 'status', node_name])
.decode('utf-8'))
root = ET.fromstring(out)
standby_mode = False
for nvpair in root.iter('nvpair'):
if (nvpair.attrib.get('name') == 'standby' and
nvpair.attrib.get('value') == 'on'):
standby_mode = True
return standby_mode
def get_hostname():
"""Return the hostname of this unit
@returns hostname
"""
return socket.gethostname()
def enter_standby_mode(node_name, duration='forever'):
"""Put this node into standby mode in pacemaker
@returns None
"""
subprocess.check_call(['crm', 'node', 'standby', node_name, duration])
def leave_standby_mode(node_name):
"""Take this node out of standby mode in pacemaker
@returns None
"""
subprocess.check_call(['crm', 'node', 'online', node_name])
def node_has_resources(node_name):
"""Check if this node is running resources
@param node_name: The name of the node to check
@returns boolean - True if node_name has resources
"""
out = subprocess.check_output(['crm_mon', '-X']).decode('utf-8')
root = ET.fromstring(out)
has_resources = False
for resource in root.iter('resource'):
for child in resource:
if child.tag == 'node' and child.attrib.get('name') == node_name:
has_resources = True
return has_resources
def node_is_dc(node_name):
"""Check if this node is the designated controller.
@param node_name: The name of the node to check
@returns boolean - True if node_name is the DC
"""
out = subprocess.check_output(['crm_mon', '-X']).decode('utf-8')
root = ET.fromstring(out)
for current_dc in root.iter("current_dc"):
if current_dc.attrib.get('name') == node_name:
return True
return False
def set_unit_status():
"""Set the workload status for this unit
@returns None
"""
status_set(*assess_status_helper())
def resume_unit():
"""Resume services on this unit and update the units status
@returns None
"""
node_name = get_hostname()
messages = []
leave_standby_mode(node_name)
if is_in_standby_mode(node_name):
messages.append("Node still in standby mode")
if messages:
raise Exception("Couldn't resume: {}".format("; ".join(messages)))
else:
clear_unit_paused()
set_unit_status()
def pause_unit():
"""Pause services on this unit and update the units status
@returns None
"""
node_name = get_hostname()
messages = []
enter_standby_mode(node_name)
if not is_in_standby_mode(node_name):
messages.append("Node not in standby mode")
# some resources may take some time to be migrated out from the node. So 3
# retries are made with a 5 seconds wait between each one.
i = 0
ready = False
has_resources = False
while i < PCMKR_MAX_RETRIES and not ready:
if node_has_resources(node_name):
has_resources = True
i += 1
time.sleep(PCMKR_SLEEP_SECS)
else:
ready = True
has_resources = False
if has_resources:
messages.append("Resources still running on unit")
status, message = assess_status_helper()
# New status message will indicate the resource is not running
if status != 'active' and 'not running' not in message:
messages.append(message)
if messages and not is_unit_upgrading_set():
raise Exception("Couldn't pause: {}".format("; ".join(messages)))
else:
set_unit_paused()
status_set("maintenance",
"Paused. Use 'resume' action to resume normal service.")
def assess_status_helper():
"""Assess status of unit
@returns status, message - status is workload status and message is any
corresponding messages
"""
if config('stonith_enabled') in ['true', 'True', True]:
return(
'blocked',
'stonith_enabled config option is no longer supported')
if config('no_quorum_policy'):
if config('no_quorum_policy').lower() not in ['ignore', 'freeze',
'stop', 'suicide']:
return(
'blocked',
'Invalid no_quorum_policy specified')
if is_unit_upgrading_set():
return ("blocked",
"Ready for do-release-upgrade. Set complete when finished")
if is_waiting_unit_series_upgrade_set():
return ("blocked",
"HA services shutdown, peers are ready for series upgrade")
if is_unit_paused_set():
return ("maintenance",
"Paused. Use 'resume' action to resume normal service.")
node_count = int(config('cluster_count'))
status = 'active'
message = 'Unit is ready and clustered'
try:
try_pcmk_wait()
except pcmk.ServicesNotUp:
message = 'Pacemaker is down'
status = 'blocked'
for relid in relation_ids('hanode'):
if len(related_units(relid)) + 1 < node_count:
status = 'blocked'
message = ("Insufficient peer units for ha cluster "
"(require {})".format(node_count))
# if the status was not changed earlier, we verify the maintenance status
try:
if status == 'active':
prop = pcmk.get_property('maintenance-mode').strip()
except pcmk.PropertyNotFound:
# the property is not the output of 'crm configure show xml', so we use
# the default value for this property. For crmsh>=2.2.0 the default
# value is automatically provided by show-property or get-property.
prop = 'false'
if (status == 'active' and prop == 'true'):
# maintenance mode enabled in pacemaker
status = 'maintenance'
message = 'Pacemaker in maintenance mode'
for resource in get_resources().keys():
if not pcmk.is_resource_present(resource):
return ("waiting",
"Resource: {} not yet configured".format(resource))
if not pcmk.crm_res_running_on_node(resource, get_hostname()):
return ("blocked",
"Resource: {} not running".format(resource))
return status, message
def ocf_file_exists(res_name, resources,
RES_ROOT='/usr/lib/ocf/resource.d'):
"""To determine whether the ocf file exists, allow multiple ocf
files with the same name in different directories
@param res_name: The name of the ocf resource to check
@param resources: ocf resources
@return: boolean - True if the ocf resource exists
"""
res_type = None
for key, val in resources.items():
if res_name == key:
if len(val.split(':')) > 2:
res_type = val.split(':')[1]
ocf_name = res_name.replace('res_', '').replace('_', '-')
ocf_file = os.path.join(RES_ROOT, res_type, ocf_name)
if os.path.isfile(ocf_file):
return True
return False
def kill_legacy_ocf_daemon_process(res_name):
"""Kill legacy ocf daemon process
@param res_name: The name of the ocf process to kill
"""
ocf_name = res_name.replace('res_', '').replace('_', '-')
reg_expr = r'([0-9]+)\s+[^0-9]+{}'.format(ocf_name)
cmd = ['ps', '-eo', 'pid,cmd']
ps = subprocess.check_output(cmd).decode('utf-8')
res = re.search(reg_expr, ps, re.MULTILINE)
if res:
pid = res.group(1)
subprocess.call(['sudo', 'kill', '-9', pid])
def maintenance_mode(enable):
"""Enable/disable pacemaker's maintenance mode"""
log('Setting maintenance-mode to %s' % enable, level=INFO)
try:
current_state = pcmk.get_property('maintenance-mode').strip().lower()
except pcmk.PropertyNotFound:
current_state = 'false'
current_state = True if current_state == 'true' else False
log('Is maintenance-mode currently enabled? %s' % current_state,
level=DEBUG)
if current_state != enable:
pcmk.set_property('maintenance-mode', str(enable).lower())
else:
log('Desired value for maintenance-mode is already set', level=DEBUG)
def get_resources():
"""Get resources from the HA relation
:returns: dict of resources
"""
resources = {}
for rid in relation_ids("ha"):
for unit in related_units(rid):
resources = parse_data(rid, unit, 'resources')
return resources
def set_waiting_unit_series_upgrade():
"""Set the unit to a waiting upgrade state in the local kv() store.
"""
log("Setting waiting-unit-series-upgrade=true in local kv", DEBUG)
with unitdata.HookData()() as t:
kv = t[0]
kv.set('waiting-unit-series-upgrade', True)
def clear_waiting_unit_series_upgrade():
"""Clear the unit from a waiting upgrade state in the local kv() store.
"""
log("Setting waiting-unit-series-upgrade=false in local kv", DEBUG)
with unitdata.HookData()() as t:
kv = t[0]
kv.set('waiting-unit-series-upgrade', False)
def is_waiting_unit_series_upgrade_set():
"""Return the state of the kv().get('waiting-unit-series-upgrade').
To help with units that don't have HookData() (testing)
if it excepts, return False
"""
with unitdata.HookData()() as t:
kv = t[0]
if not kv.get('waiting-unit-series-upgrade'):
return False
return kv.get('waiting-unit-series-upgrade')
def get_series_upgrade_notifications(relid):
"""Check peers for notifications that they are upgrading their series.
Returns a dict of the form {unit_name: target_series, ...}
:param relid: Relation id to check for notifications.
:type relid: str
:returns: dict
"""
notifications = {}
for unit in related_units(relid):
relation_data = relation_get(rid=relid, unit=unit)
for key, value in relation_data.items():
if key.startswith('series_upgrade_of_'):
notifications[unit] = value
log("Found series upgrade notifications: {}".format(notifications), DEBUG)
return notifications
def disable_ha_services():
"""Shutdown and disable HA services."""
log("Disabling HA services", INFO)
for svc in ['corosync', 'pacemaker']:
disable_lsb_services(svc)
if service_running(svc):
service_stop(svc)
def enable_ha_services():
"""Startup and enable HA services."""
log("Enabling HA services", INFO)
for svc in ['pacemaker', 'corosync']:
enable_lsb_services(svc)
if not service_running(svc):
service_start(svc)
def get_series_upgrade_key():
series_upgrade_key = 'series_upgrade_of_{}'.format(
local_unit().replace('/', '_'))
return series_upgrade_key.replace('-', '_')
def notify_peers_of_series_upgrade():
"""Notify peers which release this unit is upgrading from."""
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
series_upgrade_key = get_series_upgrade_key()
relation_data = {
series_upgrade_key: ubuntu_rel}
for rel_id in relation_ids('hanode'):
relation_set(
relation_id=rel_id,
relation_settings=relation_data)
def clear_series_upgrade_notification():
"""Remove from series upgrade notification from peers."""
log("Removing upgrade notification from peers")
series_upgrade_key = get_series_upgrade_key()
relation_data = {
series_upgrade_key: None}
for rel_id in relation_ids('hanode'):
relation_set(
relation_id=rel_id,
relation_settings=relation_data)
def set_stonith_configured(is_configured):
"""Set the STONITH_CONFIGURED state.
:param is_configured: Flag to check peers relation data for.
:type is_configured: bool
:returns: List of IPs of nodes that are ready to join the cluster
:rtype: List
"""
leader_set({STONITH_CONFIGURED: is_configured})
def is_stonith_configured():
"""Get the STONITH_CONFIGURED state.
:returns: State of STONITH_CONFIGURED state.
:rtype: bool
"""
configured = leader_get(STONITH_CONFIGURED) or 'False'
return bool_from_string(configured)
def get_hanode_hostnames():
"""Hostnames of nodes in the hanode relation.
:returns: List of hostnames of nodes in the hanode relation.
:rtype: List
"""
hanode_hostnames = [get_hostname()]
for relid in relation_ids('hanode'):
for unit in related_units(relid):
hostname = relation_get('hostname', rid=relid, unit=unit)
if hostname:
hanode_hostnames.append(hostname)
hanode_hostnames.sort()
return hanode_hostnames
def update_node_list():
"""Determine and delete unexpected nodes from the corosync ring.
:returns: Set of pcmk nodes not part of Juju hanode relation
:rtype: Set[str]
:raises: RemoveCorosyncNodeFailed
"""
pcmk_nodes = set(pcmk.list_nodes())
juju_nodes = set(get_hanode_hostnames())
diff_nodes = pcmk_nodes.difference(juju_nodes)
log("pcmk_nodes[{}], juju_nodes[{}], diff[{}]"
"".format(pcmk_nodes, juju_nodes, diff_nodes),
DEBUG)
for old_node in diff_nodes:
try:
pcmk.set_node_status_to_maintenance(old_node)
pcmk.delete_node(old_node)
except subprocess.CalledProcessError as e:
raise RemoveCorosyncNodeFailed(old_node, e)
return diff_nodes
def is_update_ring_requested(corosync_update_uuid):
log("Setting corosync-update-uuid=<uuid> in local kv", DEBUG)
with unitdata.HookData()() as t:
kv = t[0]
stored_value = kv.get('corosync-update-uuid')
if not stored_value or stored_value != corosync_update_uuid:
kv.set('corosync-update-uuid', corosync_update_uuid)
return True
return False
def trigger_corosync_update_from_leader(unit, rid):
corosync_update_uuid = relation_get(
attribute='trigger-corosync-update',
unit=unit, rid=rid,
)
if (corosync_update_uuid and
is_update_ring_requested(corosync_update_uuid) and
emit_corosync_conf()):
cmd = 'corosync-cfgtool -R'
pcmk.commit(cmd)
return True
return False
| 12,041
| 80
| 724
|
dc846139a64da96893d81c6eddfca55ea20f7f1e
| 70,433
|
py
|
Python
|
autots/evaluator/auto_model.py
|
nsankar/AutoTS
|
b4167e1506e1ccb41a85dad1be481a646d808583
|
[
"MIT"
] | null | null | null |
autots/evaluator/auto_model.py
|
nsankar/AutoTS
|
b4167e1506e1ccb41a85dad1be481a646d808583
|
[
"MIT"
] | null | null | null |
autots/evaluator/auto_model.py
|
nsankar/AutoTS
|
b4167e1506e1ccb41a85dad1be481a646d808583
|
[
"MIT"
] | null | null | null |
"""Mid-level helper functions for AutoTS."""
import numpy as np
import pandas as pd
import datetime
import json
from hashlib import md5
from autots.evaluator.metrics import PredictionEval
from autots.tools.transform import RandomTransform
def seasonal_int(include_one: bool = False):
"""Generate a random integer of typical seasonalities."""
if include_one:
lag = np.random.choice(
a=[
'random_int',
1,
2,
4,
7,
10,
12,
24,
28,
60,
96,
168,
364,
1440,
420,
52,
84,
],
size=1,
p=[
0.10,
0.05,
0.05,
0.05,
0.15,
0.01,
0.1,
0.1,
0.1,
0.1,
0.04,
0.01,
0.1,
0.01,
0.01,
0.01,
0.01,
],
).item()
else:
lag = np.random.choice(
a=[
'random_int',
2,
4,
7,
10,
12,
24,
28,
60,
96,
168,
364,
1440,
420,
52,
84,
],
size=1,
p=[
0.15,
0.05,
0.05,
0.15,
0.01,
0.1,
0.1,
0.1,
0.1,
0.04,
0.01,
0.1,
0.01,
0.01,
0.01,
0.01,
],
).item()
if lag == 'random_int':
lag = np.random.randint(2, 100, size=1).item()
return int(lag)
def create_model_id(
model_str: str, parameter_dict: dict = {}, transformation_dict: dict = {}
):
"""Create a hash ID which should be unique to the model parameters."""
str_repr = (
str(model_str) + json.dumps(parameter_dict) + json.dumps(transformation_dict)
)
str_repr = ''.join(str_repr.split())
hashed = md5(str_repr.encode('utf-8')).hexdigest()
return hashed
class ModelObject(object):
"""Generic class for holding forecasting models.
Models should all have methods:
.fit(df, future_regressor = []) (taking a DataFrame with DatetimeIndex and n columns of n timeseries)
.predict(forecast_length = int, future_regressor = [], just_point_forecast = False)
.get_new_params() - return a dictionary of weighted random selected parameters
Args:
name (str): Model Name
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
n_jobs (int): used by some models that parallelize to multiple cores
"""
def __repr__(self):
"""Print."""
return 'ModelObject of ' + self.name + ' uses standard .fit/.predict'
def basic_profile(self, df):
"""Capture basic training details."""
self.startTime = datetime.datetime.now()
self.train_shape = df.shape
self.column_names = df.columns
self.train_last_date = df.index[-1]
if self.frequency == 'infer':
self.frequency = pd.infer_freq(df.index, warn=False)
return df
def create_forecast_index(self, forecast_length: int):
"""Generate a pd.DatetimeIndex appropriate for a new forecast.
Warnings:
Requires ModelObject.basic_profile() being called as part of .fit()
"""
forecast_index = pd.date_range(
freq=self.frequency, start=self.train_last_date, periods=forecast_length + 1
)
forecast_index = forecast_index[1:]
self.forecast_index = forecast_index
return forecast_index
def get_params(self):
"""Return dict of current parameters."""
return {}
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
return {}
class PredictionObject(object):
"""Generic class for holding forecast information."""
def __repr__(self):
"""Print."""
if isinstance(self.forecast, pd.DataFrame):
return "Prediction object: \nReturn .forecast, \n .upper_forecast, \n .lower_forecast \n .model_parameters \n .transformation_parameters"
else:
return "Empty prediction object."
def __bool__(self):
"""bool version of class."""
if isinstance(self.forecast, pd.DataFrame):
return True
else:
return False
def total_runtime(self):
"""Combine runtimes."""
return self.fit_runtime + self.predict_runtime + self.transformation_runtime
def ModelMonster(
model: str,
parameters: dict = {},
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
startTimeStamps=None,
forecast_length: int = 14,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
):
"""Directs strings and parameters to appropriate model objects.
Args:
model (str): Name of Model Function
parameters (dict): Dictionary of parameters to pass through to model
"""
model = str(model)
if model == 'ZeroesNaive':
from autots.models.basics import ZeroesNaive
return ZeroesNaive(frequency=frequency, prediction_interval=prediction_interval)
if model == 'LastValueNaive':
from autots.models.basics import LastValueNaive
return LastValueNaive(
frequency=frequency, prediction_interval=prediction_interval
)
if model == 'AverageValueNaive':
from autots.models.basics import AverageValueNaive
if parameters == {}:
return AverageValueNaive(
frequency=frequency, prediction_interval=prediction_interval
)
else:
return AverageValueNaive(
frequency=frequency,
prediction_interval=prediction_interval,
method=parameters['method'],
)
if model == 'SeasonalNaive':
from autots.models.basics import SeasonalNaive
if parameters == {}:
return SeasonalNaive(
frequency=frequency, prediction_interval=prediction_interval
)
else:
return SeasonalNaive(
frequency=frequency,
prediction_interval=prediction_interval,
method=parameters['method'],
lag_1=parameters['lag_1'],
lag_2=parameters['lag_2'],
)
if model == 'GLS':
from autots.models.statsmodels import GLS
return GLS(frequency=frequency, prediction_interval=prediction_interval)
if model == 'GLM':
from autots.models.statsmodels import GLM
if parameters == {}:
model = GLM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = GLM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
family=parameters['family'],
constant=parameters['constant'],
regression_type=parameters['regression_type'],
)
return model
if model == 'ETS':
from autots.models.statsmodels import ETS
if parameters == {}:
model = ETS(
frequency=frequency,
prediction_interval=prediction_interval,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = ETS(
frequency=frequency,
prediction_interval=prediction_interval,
damped=parameters['damped'],
trend=parameters['trend'],
seasonal=parameters['seasonal'],
seasonal_periods=parameters['seasonal_periods'],
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
return model
if model == 'ARIMA':
from autots.models.statsmodels import ARIMA
if parameters == {}:
model = ARIMA(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = ARIMA(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
p=parameters['p'],
d=parameters['d'],
q=parameters['q'],
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
return model
if model == 'FBProphet':
from autots.models.prophet import FBProphet
if parameters == {}:
model = FBProphet(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = FBProphet(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
holiday=parameters['holiday'],
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
return model
if model == 'RollingRegression':
from autots.models.sklearn import RollingRegression
if parameters == {}:
model = RollingRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = RollingRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
holiday=parameters['holiday'],
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
regression_model=parameters['regression_model'],
mean_rolling_periods=parameters['mean_rolling_periods'],
std_rolling_periods=parameters['std_rolling_periods'],
macd_periods=parameters['macd_periods'],
max_rolling_periods=parameters['max_rolling_periods'],
min_rolling_periods=parameters['min_rolling_periods'],
ewm_alpha=parameters['ewm_alpha'],
additional_lag_periods=parameters['additional_lag_periods'],
x_transform=parameters['x_transform'],
rolling_autocorr_periods=parameters['rolling_autocorr_periods'],
abs_energy=parameters['abs_energy'],
add_date_part=parameters['add_date_part'],
polynomial_degree=parameters['polynomial_degree'],
n_jobs=n_jobs,
)
return model
if model == 'UnobservedComponents':
from autots.models.statsmodels import UnobservedComponents
if parameters == {}:
model = UnobservedComponents(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = UnobservedComponents(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
level=parameters['level'],
trend=parameters['trend'],
cycle=parameters['cycle'],
damped_cycle=parameters['damped_cycle'],
irregular=parameters['irregular'],
stochastic_trend=parameters['stochastic_trend'],
stochastic_level=parameters['stochastic_level'],
stochastic_cycle=parameters['stochastic_cycle'],
)
return model
if model == 'DynamicFactor':
from autots.models.statsmodels import DynamicFactor
if parameters == {}:
model = DynamicFactor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = DynamicFactor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
k_factors=parameters['k_factors'],
factor_order=parameters['factor_order'],
)
return model
if model == 'VAR':
from autots.models.statsmodels import VAR
if parameters == {}:
model = VAR(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = VAR(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
maxlags=parameters['maxlags'],
ic=parameters['ic'],
random_seed=random_seed,
verbose=verbose,
)
return model
if model == 'VECM':
from autots.models.statsmodels import VECM
if parameters == {}:
model = VECM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = VECM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
deterministic=parameters['deterministic'],
k_ar_diff=parameters['k_ar_diff'],
)
return model
if model == 'VARMAX':
from autots.models.statsmodels import VARMAX
if parameters == {}:
model = VARMAX(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = VARMAX(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
order=parameters['order'],
trend=parameters['trend'],
)
return model
if model == 'GluonTS':
from autots.models.gluonts import GluonTS
if parameters == {}:
model = GluonTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
)
else:
model = GluonTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
gluon_model=parameters['gluon_model'],
epochs=parameters['epochs'],
learning_rate=parameters['learning_rate'],
forecast_length=forecast_length,
)
return model
if model == 'TSFreshRegressor':
from autots.models.tsfresh import TSFreshRegressor
if parameters == {}:
model = TSFreshRegressor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = TSFreshRegressor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
max_timeshift=parameters['max_timeshift'],
regression_model=parameters['regression_model'],
feature_selection=parameters['feature_selection'],
)
return model
if model == 'MotifSimulation':
from autots.models.basics import MotifSimulation
if parameters == {}:
model = MotifSimulation(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = MotifSimulation(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
phrase_len=parameters['phrase_len'],
comparison=parameters['comparison'],
shared=parameters['shared'],
distance_metric=parameters['distance_metric'],
max_motifs=parameters['max_motifs'],
recency_weighting=parameters['recency_weighting'],
cutoff_threshold=parameters['cutoff_threshold'],
cutoff_minimum=parameters['cutoff_minimum'],
point_method=parameters['point_method'],
)
return model
if model == 'WindowRegression':
from autots.models.sklearn import WindowRegression
if parameters == {}:
model = WindowRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
n_jobs=n_jobs,
)
else:
model = WindowRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
window_size=parameters['window_size'],
regression_model=parameters['regression_model'],
input_dim=parameters['input_dim'],
output_dim=parameters['output_dim'],
normalize_window=parameters['normalize_window'],
shuffle=parameters['shuffle'],
max_windows=parameters['max_windows'],
forecast_length=forecast_length,
n_jobs=n_jobs,
)
return model
if model == 'TensorflowSTS':
from autots.models.tfp import TensorflowSTS
if parameters == {}:
model = TensorflowSTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = TensorflowSTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
seasonal_periods=parameters['seasonal_periods'],
ar_order=parameters['ar_order'],
trend=parameters['trend'],
fit_method=parameters['fit_method'],
num_steps=parameters['num_steps'],
)
return model
if model == 'TFPRegression':
from autots.models.tfp import TFPRegression
if parameters == {}:
model = TFPRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = TFPRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
kernel_initializer=parameters['kernel_initializer'],
epochs=parameters['epochs'],
batch_size=parameters['batch_size'],
optimizer=parameters['optimizer'],
loss=parameters['loss'],
dist=parameters['dist'],
regression_type=parameters['regression_type'],
)
return model
if model == 'ComponentAnalysis':
from autots.models.sklearn import ComponentAnalysis
if parameters == {}:
model = ComponentAnalysis(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
)
else:
model = ComponentAnalysis(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
model=parameters['model'],
model_parameters=parameters['model_parameters'],
decomposition=parameters['decomposition'],
n_components=parameters['n_components'],
forecast_length=forecast_length,
)
return model
else:
raise AttributeError(
("Model String '{}' not a recognized model type").format(model)
)
def ModelPrediction(
df_train,
forecast_length: int,
transformation_dict: dict,
model_str: str,
parameter_dict: dict,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
grouping_ids=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
):
"""Feed parameters into modeling pipeline
Args:
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
n_jobs (int): number of processes
Returns:
PredictionObject (autots.PredictionObject): Prediction from AutoTS model object
"""
transformationStartTime = datetime.datetime.now()
from autots.tools.transform import GeneralTransformer
try:
coerce_integer = transformation_dict['coerce_integer']
grouping = transformation_dict['grouping']
if grouping == 'user' and grouping_ids is None:
grouping = 'kmeans5'
transformation_dict['grouping'] = 'kmeans5'
reconciliation = transformation_dict['reconciliation']
except Exception:
coerce_integer = False
grouping = None
grouping_ids = None
reconciliation = None
transformer_object = GeneralTransformer(
outlier_method=transformation_dict['outlier_method'],
outlier_threshold=transformation_dict['outlier_threshold'],
outlier_position=transformation_dict['outlier_position'],
fillna=transformation_dict['fillna'],
transformation=transformation_dict['transformation'],
detrend=transformation_dict['detrend'],
second_transformation=transformation_dict['second_transformation'],
transformation_param=transformation_dict['transformation_param'],
third_transformation=transformation_dict['third_transformation'],
transformation_param2=transformation_dict['transformation_param2'],
fourth_transformation=transformation_dict['fourth_transformation'],
discretization=transformation_dict['discretization'],
n_bins=transformation_dict['n_bins'],
grouping=grouping,
grouping_ids=grouping_ids,
reconciliation=reconciliation,
coerce_integer=coerce_integer,
).fit(df_train)
df_train_transformed = transformer_object.transform(df_train)
# slice the context, ie shorten the amount of data available.
if transformation_dict['context_slicer'] not in [None, 'None']:
from autots.tools.transform import simple_context_slicer
df_train_transformed = simple_context_slicer(
df_train_transformed,
method=transformation_dict['context_slicer'],
forecast_length=forecast_length,
)
# make sure regressor has same length. This could be a problem if wrong size regressor is passed.
if len(future_regressor_train) > 0:
future_regressor_train = future_regressor_train.tail(
df_train_transformed.shape[0]
)
transformation_runtime = datetime.datetime.now() - transformationStartTime
# from autots.evaluator.auto_model import ModelMonster
model = ModelMonster(
model_str,
parameters=parameter_dict,
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
n_jobs=n_jobs,
)
model = model.fit(df_train_transformed, future_regressor=future_regressor_train)
df_forecast = model.predict(
forecast_length=forecast_length, future_regressor=future_regressor_forecast
)
if df_forecast.forecast.isnull().all(axis=0).astype(int).sum() > 0:
raise ValueError(
"Model {} returned NaN for one or more series".format(model_str)
)
transformationStartTime = datetime.datetime.now()
# Inverse the transformations
df_forecast.forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.lower_forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.lower_forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.upper_forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.upper_forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.transformation_parameters = transformation_dict
# Remove negatives if desired
# There's df.where(df_forecast.forecast > 0, 0) or df.clip(lower = 0), not sure which faster
if no_negatives:
df_forecast.lower_forecast = df_forecast.lower_forecast.clip(lower=0)
df_forecast.forecast = df_forecast.forecast.clip(lower=0)
df_forecast.upper_forecast = df_forecast.upper_forecast.clip(lower=0)
if constraint is not None:
if verbose > 2:
print("Using constraint.")
constraint = float(constraint)
train_std = df_train.std(axis=0)
train_min = df_train.min(axis=0) - (constraint * train_std)
train_max = df_train.max(axis=0) + (constraint * train_std)
df_forecast.forecast = df_forecast.forecast.clip(lower=train_min, axis=1)
df_forecast.forecast = df_forecast.forecast.clip(upper=train_max, axis=1)
transformation_runtime = transformation_runtime + (
datetime.datetime.now() - transformationStartTime
)
df_forecast.transformation_runtime = transformation_runtime
return df_forecast
class TemplateEvalObject(object):
"""Object to contain all your failures!."""
def __repr__(self):
"""Print."""
return 'Results objects, result table at self.model_results (pd.df)'
def concat(self, another_eval):
"""Merge another TemplateEvalObject onto this one."""
self.model_results = pd.concat(
[self.model_results, another_eval.model_results],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
self.per_series_mae = pd.concat(
[self.per_series_mae, another_eval.per_series_mae], axis=0, sort=False
)
self.per_series_spl = pd.concat(
[self.per_series_spl, another_eval.per_series_spl], axis=0, sort=False
)
self.per_series_rmse1 = pd.concat(
[self.per_series_rmse1, another_eval.per_series_rmse1], axis=0, sort=False
)
self.per_series_rmse2 = pd.concat(
[self.per_series_rmse2, another_eval.per_series_rmse2], axis=0, sort=False
)
self.per_timestamp_smape = pd.concat(
[self.per_timestamp_smape, another_eval.per_timestamp_smape],
axis=0,
sort=False,
)
self.model_count = self.model_count + another_eval.model_count
return self
def save(self, filename):
"""Save results to a file."""
if '.csv' in filename:
self.model_results.to_csv(filename, index=False)
elif '.pickle' in filename:
import pickle
with open(filename, "wb") as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
else:
raise ValueError("filename not .csv or .pickle")
def unpack_ensemble_models(
template,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
keep_ensemble: bool = True,
recursive: bool = False,
):
"""Take ensemble models from template and add as new rows."""
ensemble_template = pd.DataFrame()
template['Ensemble'] = np.where(
((template['Model'] == 'Ensemble') & (template['Ensemble'] < 1)),
1,
template['Ensemble'],
)
for index, value in template[template['Ensemble'] != 0][
'ModelParameters'
].iteritems():
model_dict = json.loads(value)['models']
model_df = pd.DataFrame.from_dict(model_dict, orient='index')
model_df = model_df.rename_axis('ID').reset_index(drop=False)
model_df['Ensemble'] = 0
# unpack nested ensembles, if recursive specified
if recursive and 'Ensemble' in model_df['Model'].tolist():
model_df = pd.concat(
[
unpack_ensemble_models(
model_df, recursive=True, template_cols=template_cols
),
model_df,
],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
ensemble_template = pd.concat(
[ensemble_template, model_df], axis=0, ignore_index=True, sort=False
).reset_index(drop=True)
if not keep_ensemble:
template = template[template['Ensemble'] == 0]
template = pd.concat(
[template, ensemble_template], axis=0, ignore_index=True, sort=False
).reset_index(drop=True)
template = template.drop_duplicates(subset=template_cols)
return template
def PredictWitch(
template,
df_train,
forecast_length: int,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
grouping_ids=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""Takes numeric data, returns numeric forecasts.
Only one model (albeit potentially an ensemble)!
Well, she turned me into a newt.
A newt?
I got better. -Python
Args:
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
template_cols (list): column names of columns used as model template
Returns:
PredictionObject (autots.PredictionObject): Prediction from AutoTS model object):
"""
if isinstance(template, pd.Series):
template = pd.DataFrame(template).transpose()
template = template.head(1)
for index_upper, row_upper in template.iterrows():
# if an ensemble
if row_upper['Model'] == 'Ensemble':
from autots.models.ensemble import EnsembleForecast
forecasts_list = []
forecasts_runtime = []
forecasts = []
upper_forecasts = []
lower_forecasts = []
ens_model_str = row_upper['Model']
ens_params = json.loads(row_upper['ModelParameters'])
ens_template = unpack_ensemble_models(
template, template_cols, keep_ensemble=False
)
for index, row in ens_template.iterrows():
# recursive recursion!
if verbose > 2:
total_ens = ens_template.shape[0]
print(
"Ensemble component {} of {} ".format(
model_str, str(index), str(total_ens)
)
)
df_forecast = PredictWitch(
row,
df_train=df_train,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
holiday_country=holiday_country,
startTimeStamps=startTimeStamps,
grouping_ids=grouping_ids,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
template_cols=template_cols,
)
model_id = create_model_id(
df_forecast.model_name,
df_forecast.model_parameters,
df_forecast.transformation_parameters,
)
total_runtime = (
df_forecast.fit_runtime
+ df_forecast.predict_runtime
+ df_forecast.transformation_runtime
)
forecasts_list.extend([model_id])
forecasts_runtime.extend([total_runtime])
forecasts.extend([df_forecast.forecast])
upper_forecasts.extend([df_forecast.upper_forecast])
lower_forecasts.extend([df_forecast.lower_forecast])
ens_forecast = EnsembleForecast(
ens_model_str,
ens_params,
forecasts_list=forecasts_list,
forecasts=forecasts,
lower_forecasts=lower_forecasts,
upper_forecasts=upper_forecasts,
forecasts_runtime=forecasts_runtime,
prediction_interval=prediction_interval,
)
return ens_forecast
# if not an ensemble
else:
model_str = row_upper['Model']
parameter_dict = json.loads(row_upper['ModelParameters'])
transformation_dict = json.loads(row_upper['TransformationParameters'])
df_forecast = ModelPrediction(
df_train,
forecast_length,
transformation_dict,
model_str,
parameter_dict,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
grouping_ids=grouping_ids,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
startTimeStamps=startTimeStamps,
n_jobs=n_jobs,
)
return df_forecast
def TemplateWizard(
template,
df_train,
df_test,
weights,
model_count: int = 0,
ensemble: str = True,
forecast_length: int = 14,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
validation_round: int = 0,
model_interrupt: bool = False,
grouping_ids=None,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""
Take Template, returns Results.
There are some who call me... Tim. - Python
Args:
template (pandas.DataFrame): containing model str, and json of transformations and hyperparamters
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
df_test (pandas.DataFrame): dataframe of actual values of (forecast length * n series)
weights (dict): key = column/series_id, value = weight
ensemble (str): desc of ensemble types to prepare metric collection
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
validation_round (int): int passed to record current validation.
model_interrupt (bool): if True, keyboard interrupts are caught and only break current model eval.
template_cols (list): column names of columns used as model template
Returns:
TemplateEvalObject
"""
ensemble = str(ensemble)
template_result = TemplateEvalObject()
template_result.model_count = model_count
if isinstance(template, pd.Series):
template = pd.DataFrame(template).transpose()
# template = unpack_ensemble_models(template, template_cols, keep_ensemble = False)
for index, row in template.iterrows():
try:
model_str = row['Model']
parameter_dict = json.loads(row['ModelParameters'])
transformation_dict = json.loads(row['TransformationParameters'])
ensemble_input = row['Ensemble']
current_template = pd.DataFrame(row).transpose()
template_result.model_count += 1
if verbose > 0:
if verbose > 1:
print(
"Model Number: {} with model {} in Validation {} with params {} and transformations {}".format(
str(template_result.model_count),
model_str,
str(validation_round),
json.dumps(parameter_dict),
json.dumps(transformation_dict),
)
)
else:
print(
"Model Number: {} with model {} in Validation {} ".format(
str(template_result.model_count),
model_str,
str(validation_round),
)
)
df_forecast = PredictWitch(
current_template,
df_train=df_train,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
holiday_country=holiday_country,
startTimeStamps=startTimeStamps,
grouping_ids=grouping_ids,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
template_cols=template_cols,
)
per_ts = True if 'distance' in ensemble else False
if 'hdist' in ensemble:
dist_n = int(np.ceil(0.3 * forecast_length))
else:
dist_n = None
model_error = PredictionEval(
df_forecast,
df_test,
series_weights=weights,
df_train=df_train,
per_timestamp_errors=per_ts,
dist_n=dist_n,
)
model_id = create_model_id(
df_forecast.model_name,
df_forecast.model_parameters,
df_forecast.transformation_parameters,
)
total_runtime = (
df_forecast.fit_runtime
+ df_forecast.predict_runtime
+ df_forecast.transformation_runtime
)
result = pd.DataFrame(
{
'ID': model_id,
'Model': df_forecast.model_name,
'ModelParameters': json.dumps(df_forecast.model_parameters),
'TransformationParameters': json.dumps(
df_forecast.transformation_parameters
),
'TransformationRuntime': df_forecast.transformation_runtime,
'FitRuntime': df_forecast.fit_runtime,
'PredictRuntime': df_forecast.predict_runtime,
'TotalRuntime': total_runtime,
'Ensemble': ensemble_input,
'Exceptions': np.nan,
'Runs': 1,
'ValidationRound': validation_round,
},
index=[0],
)
a = pd.DataFrame(
model_error.avg_metrics_weighted.rename(lambda x: x + '_weighted')
).transpose()
result = pd.concat(
[result, pd.DataFrame(model_error.avg_metrics).transpose(), a], axis=1
)
template_result.model_results = pd.concat(
[template_result.model_results, result],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
if 'horizontal' in ensemble:
cur_mae = model_error.per_series_metrics.loc['mae']
cur_mae = pd.DataFrame(cur_mae).transpose()
cur_mae.index = [model_id]
template_result.per_series_mae = pd.concat(
[template_result.per_series_mae, cur_mae], axis=0
)
if 'probabilistic' in ensemble:
cur_spl = model_error.per_series_metrics.loc['spl']
cur_spl = pd.DataFrame(cur_spl).transpose()
cur_spl.index = [model_id]
template_result.per_series_spl = pd.concat(
[template_result.per_series_spl, cur_spl], axis=0
)
if 'distance' in ensemble:
cur_smape = model_error.per_timestamp.loc['weighted_smape']
cur_smape = pd.DataFrame(cur_smape).transpose()
cur_smape.index = [model_id]
template_result.per_timestamp_smape = pd.concat(
[template_result.per_timestamp_smape, cur_smape], axis=0
)
if 'hdist' in ensemble:
cur_rmse1 = model_error.per_series_metrics.loc['rmse1']
cur_rmse2 = model_error.per_series_metrics.loc['rmse2']
cur_rmse1 = pd.DataFrame(cur_rmse1).transpose()
cur_rmse2 = pd.DataFrame(cur_rmse2).transpose()
cur_rmse1.index = [model_id]
cur_rmse2.index = [model_id]
template_result.per_series_rmse1 = pd.concat(
[template_result.per_series_rmse1, cur_rmse1], axis=0
)
template_result.per_series_rmse2 = pd.concat(
[template_result.per_series_rmse2, cur_rmse2], axis=0
)
except KeyboardInterrupt:
if model_interrupt:
result = pd.DataFrame(
{
'ID': create_model_id(
model_str, parameter_dict, transformation_dict
),
'Model': model_str,
'ModelParameters': json.dumps(parameter_dict),
'TransformationParameters': json.dumps(transformation_dict),
'Ensemble': ensemble_input,
'TransformationRuntime': datetime.timedelta(0),
'FitRuntime': datetime.timedelta(0),
'PredictRuntime': datetime.timedelta(0),
'TotalRuntime': datetime.timedelta(0),
'Exceptions': "KeyboardInterrupt by user",
'Runs': 1,
'ValidationRound': validation_round,
},
index=[0],
)
template_result.model_results = pd.concat(
[template_result.model_results, result],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
else:
raise KeyboardInterrupt
except Exception as e:
if verbose >= 0:
print(
'Template Eval Error: {} in model {}: {}'.format(
(repr(e)), template_result.model_count, model_str
)
)
result = pd.DataFrame(
{
'ID': create_model_id(
model_str, parameter_dict, transformation_dict
),
'Model': model_str,
'ModelParameters': json.dumps(parameter_dict),
'TransformationParameters': json.dumps(transformation_dict),
'Ensemble': ensemble_input,
'TransformationRuntime': datetime.timedelta(0),
'FitRuntime': datetime.timedelta(0),
'PredictRuntime': datetime.timedelta(0),
'TotalRuntime': datetime.timedelta(0),
'Exceptions': repr(e),
'Runs': 1,
'ValidationRound': validation_round,
},
index=[0],
)
template_result.model_results = pd.concat(
[template_result.model_results, result],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
return template_result
def RandomTemplate(
n: int = 10,
model_list: list = [
'ZeroesNaive',
'LastValueNaive',
'AverageValueNaive',
'GLS',
'GLM',
'ETS',
'ARIMA',
'FBProphet',
'RollingRegression',
'GluonTS',
'UnobservedComponents',
'VARMAX',
'VECM',
'DynamicFactor',
],
):
"""
Returns a template dataframe of randomly generated transformations, models, and hyperparameters.
Args:
n (int): number of random models to return
"""
n = abs(int(n))
template = pd.DataFrame()
counter = 0
while len(template.index) < n:
model_str = np.random.choice(model_list)
param_dict = ModelMonster(model_str).get_new_params()
trans_dict = RandomTransform()
row = pd.DataFrame(
{
'Model': model_str,
'ModelParameters': json.dumps(param_dict),
'TransformationParameters': json.dumps(trans_dict),
'Ensemble': 0,
},
index=[0],
)
template = pd.concat([template, row], axis=0, ignore_index=True)
template.drop_duplicates(inplace=True)
counter += 1
if counter > (n * 3):
break
return template
def UniqueTemplates(
existing_templates,
new_possibilities,
selection_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""
Returns unique dataframe rows from new_possiblities not in existing_templates.
Args:
selection_cols (list): list of column namess to use to judge uniqueness/match on
"""
keys = list(new_possibilities[selection_cols].columns.values)
idx1 = existing_templates.copy().set_index(keys).index
idx2 = new_possibilities.set_index(keys).index
new_template = new_possibilities[~idx2.isin(idx1)]
return new_template
def dict_recombination(a: dict, b: dict):
"""Recombine two dictionaries with identical keys. Return new dict."""
b_keys = [*b]
key_size = int(len(b_keys) / 2) if len(b_keys) > 1 else 1
bs_keys = np.random.choice(b_keys, size=key_size)
b_prime = {k: b[k] for k in bs_keys}
c = {**a, **b_prime} # overwrites with B
return c
def trans_dict_recomb(dict_array):
"""Recombine two transformation param dictionaries from array of dicts."""
r_sel = np.random.choice(dict_array, size=2, replace=False)
a = r_sel[0]
b = r_sel[1]
c = dict_recombination(a, b)
out_keys = ['outlier_method', 'outlier_threshold', 'outlier_position']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in out_keys}}
mid_trans_keys = ['second_transformation', 'transformation_param']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in mid_trans_keys}}
mid_trans_keys = ['third_transformation', 'transformation_param2']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in mid_trans_keys}}
disc_keys = ['discretization', 'n_bins']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in disc_keys}}
disc_keys = ['grouping', 'reconciliation']
current_dict = np.random.choice([a, b], size=1).item()
if all([x in current_dict.keys() for x in disc_keys]):
c = {**c, **{k: current_dict[k] for k in disc_keys}}
return c
def NewGeneticTemplate(
model_results,
submitted_parameters,
sort_column: str = "smape_weighted",
sort_ascending: bool = True,
max_results: int = 50,
max_per_model_class: int = 5,
top_n: int = 50,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""
Return new template given old template with model accuracies.
Args:
model_results (pandas.DataFrame): models that have actually been run
submitted_paramters (pandas.DataFrame): models tried (may have returned different parameters to results)
"""
new_template = pd.DataFrame()
# filter existing templates
sorted_results = model_results[model_results['Ensemble'] == 0].copy()
sorted_results = sorted_results.sort_values(
by=sort_column, ascending=sort_ascending, na_position='last'
)
sorted_results = sorted_results.drop_duplicates(subset=template_cols, keep='first')
if str(max_per_model_class).isdigit():
sorted_results = (
sorted_results.sort_values(sort_column, ascending=sort_ascending)
.groupby('Model')
.head(max_per_model_class)
.reset_index()
)
sorted_results = sorted_results.sort_values(
by=sort_column, ascending=sort_ascending, na_position='last'
).head(top_n)
no_params = ['ZeroesNaive', 'LastValueNaive', 'GLS']
recombination_approved = [
'SeasonalNaive',
'MotifSimulation',
"ETS",
'DynamicFactor',
'VECM',
'VARMAX',
'GLM',
'ARIMA',
'FBProphet',
'GluonTS',
'RollingRegression',
'VAR',
'WindowRegression',
'TensorflowSTS',
'TFPRegression',
]
borrow = ['ComponentAnalysis']
best = json.loads(sorted_results.iloc[0, :]['TransformationParameters'])
for model_type in sorted_results['Model'].unique():
if model_type in no_params:
current_ops = sorted_results[sorted_results['Model'] == model_type]
n = 3
trans_dicts = _trans_dicts(current_ops, best=best, n=n)
model_param = current_ops.iloc[0, :]['ModelParameters']
new_row = pd.DataFrame(
{
'Model': model_type,
'ModelParameters': model_param,
'TransformationParameters': trans_dicts,
'Ensemble': 0,
},
index=list(range(n)),
)
elif model_type in recombination_approved:
current_ops = sorted_results[sorted_results['Model'] == model_type]
n = 4
trans_dicts = _trans_dicts(current_ops, best=best, n=n)
# select the best model of this type
fir = json.loads(current_ops.iloc[0, :]['ModelParameters'])
cur_len = current_ops.shape[0]
if cur_len > 1:
# select randomly from best of data, doesn't handle lengths < 2
top_r = np.floor((cur_len / 5) + 2)
r_id = np.random.randint(1, top_r)
sec = json.loads(current_ops.iloc[r_id, :]['ModelParameters'])
else:
sec = ModelMonster(model_type).get_new_params()
# generate new random parameters ('mutations')
r = ModelMonster(model_type).get_new_params()
r2 = ModelMonster(model_type).get_new_params()
arr = [fir, sec, r2, r]
model_dicts = list()
# recombine best and random to create new generation
for _ in range(n):
r_sel = np.random.choice(arr, size=2, replace=False)
a = r_sel[0]
b = r_sel[1]
c = dict_recombination(a, b)
model_dicts.append(json.dumps(c))
new_row = pd.DataFrame(
{
'Model': model_type,
'ModelParameters': model_dicts,
'TransformationParameters': trans_dicts,
'Ensemble': 0,
},
index=list(range(n)),
)
else:
current_ops = sorted_results[sorted_results['Model'] == model_type]
n = 3
trans_dicts = _trans_dicts(current_ops, best=best, n=n)
model_dicts = list()
for _ in range(n):
c = ModelMonster(model_type).get_new_params()
model_dicts.append(json.dumps(c))
new_row = pd.DataFrame(
{
'Model': model_type,
'ModelParameters': model_dicts,
'TransformationParameters': trans_dicts,
'Ensemble': 0,
},
index=list(range(n)),
)
new_template = pd.concat(
[new_template, new_row], axis=0, ignore_index=True, sort=False
)
"""
# recombination of transforms across models by shifting transforms
recombination = sorted_results.tail(len(sorted_results.index) - 1).copy()
recombination['TransformationParameters'] = sorted_results['TransformationParameters'].shift(1).tail(len(sorted_results.index) - 1)
new_template = pd.concat([new_template,
recombination.head(top_n)[template_cols]],
axis=0, ignore_index=True, sort=False)
"""
# remove generated models which have already been tried
sorted_results = pd.concat(
[submitted_parameters, sorted_results], axis=0, ignore_index=True, sort=False
).reset_index(drop=True)
new_template = UniqueTemplates(
sorted_results, new_template, selection_cols=template_cols
).head(max_results)
return new_template
def validation_aggregation(validation_results):
"""Aggregate a TemplateEvalObject."""
groupby_cols = [
'ID',
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
]
col_aggs = {
'Runs': 'sum',
'smape': 'mean',
'mae': 'mean',
'rmse': 'mean',
'containment': 'mean',
'spl': 'mean',
'contour': 'mean',
'smape_weighted': 'mean',
'mae_weighted': 'mean',
'rmse_weighted': 'mean',
'containment_weighted': 'mean',
'contour_weighted': 'mean',
'spl_weighted': 'mean',
'containment_weighted': 'mean',
'TotalRuntimeSeconds': 'mean',
'Score': 'mean',
}
validation_results.model_results['TotalRuntimeSeconds'] = (
validation_results.model_results['TotalRuntime'].dt.seconds + 1
)
validation_results.model_results = validation_results.model_results[
pd.isnull(validation_results.model_results['Exceptions'])
]
validation_results.model_results = validation_results.model_results.replace(
[np.inf, -np.inf], np.nan
)
validation_results.model_results = validation_results.model_results.groupby(
groupby_cols
).agg(col_aggs)
validation_results.model_results = validation_results.model_results.reset_index(
drop=False
)
return validation_results
def generate_score(
model_results, metric_weighting: dict = {}, prediction_interval: float = 0.9
):
"""Generate score based on relative accuracies."""
try:
smape_weighting = metric_weighting['smape_weighting']
except KeyError:
smape_weighting = 1
try:
mae_weighting = metric_weighting['mae_weighting']
except KeyError:
mae_weighting = 0
try:
rmse_weighting = metric_weighting['rmse_weighting']
except KeyError:
rmse_weighting = 0
try:
containment_weighting = metric_weighting['containment_weighting']
except KeyError:
containment_weighting = 0
try:
runtime_weighting = metric_weighting['runtime_weighting'] * 0.1
except KeyError:
runtime_weighting = 0
try:
spl_weighting = metric_weighting['spl_weighting']
except KeyError:
spl_weighting = 0
try:
contour_weighting = metric_weighting['contour_weighting']
except KeyError:
contour_weighting = 0
# handle various runtime information records
if 'TotalRuntimeSeconds' in model_results.columns:
if 'TotalRuntime' in model_results.columns:
model_results['TotalRuntimeSeconds'] = np.where(
model_results['TotalRuntimeSeconds'].isna(),
model_results['TotalRuntime'].dt.seconds,
model_results['TotalRuntimeSeconds'],
)
else:
model_results['TotalRuntimeSeconds'] = np.where(
model_results['TotalRuntimeSeconds'].isna(),
model_results['TotalRuntimeSeconds'].max(),
model_results['TotalRuntimeSeconds'],
)
else:
model_results['TotalRuntimeSeconds'] = model_results['TotalRuntime'].dt.seconds
# generate minimizing scores, where smaller = better accuracy
try:
model_results = model_results.replace([np.inf, -np.inf], np.nan)
# model_results = model_results.fillna(value=model_results.max(axis=0))
smape_score = model_results['smape_weighted'] / (
model_results['smape_weighted'].min(skipna=True) + 1
) # smaller better
rmse_scaler = model_results['rmse_weighted'].median(skipna=True)
rmse_scaler = 1 if rmse_scaler == 0 else rmse_scaler
rmse_score = model_results['rmse_weighted'] / rmse_scaler
mae_scaler = model_results['mae_weighted'].median(skipna=True)
mae_scaler = 1 if mae_scaler == 0 else mae_scaler
mae_score = model_results['mae_weighted'] / mae_scaler
containment_score = (
abs(prediction_interval - model_results['containment'])
) + 1 # from 1 to 2, smaller better
runtime = model_results['TotalRuntimeSeconds'] + 120
runtime_score = runtime / (runtime.min(skipna=True)) # smaller better
spl_score = model_results['spl_weighted'] / (
model_results['spl_weighted'].min(skipna=True) + 1
) # smaller better
contour_score = (
(1 / (model_results['contour_weighted']))
.replace([np.inf, -np.inf, np.nan], 10)
.clip(upper=10)
)
except KeyError:
raise KeyError(
"Inconceivable! Evaluation Metrics are missing and all models have failed, by an error in TemplateWizard or metrics. A new template may help, or an adjusted model_list."
)
return (
(smape_score * smape_weighting)
+ (mae_score * mae_weighting)
+ (rmse_score * rmse_weighting)
+ (containment_score * containment_weighting)
+ (runtime_score * runtime_weighting)
+ (spl_score * spl_weighting)
+ (contour_score * contour_weighting)
)
| 37.907966
| 181
| 0.582397
|
"""Mid-level helper functions for AutoTS."""
import numpy as np
import pandas as pd
import datetime
import json
from hashlib import md5
from autots.evaluator.metrics import PredictionEval
from autots.tools.transform import RandomTransform
def seasonal_int(include_one: bool = False):
"""Generate a random integer of typical seasonalities."""
if include_one:
lag = np.random.choice(
a=[
'random_int',
1,
2,
4,
7,
10,
12,
24,
28,
60,
96,
168,
364,
1440,
420,
52,
84,
],
size=1,
p=[
0.10,
0.05,
0.05,
0.05,
0.15,
0.01,
0.1,
0.1,
0.1,
0.1,
0.04,
0.01,
0.1,
0.01,
0.01,
0.01,
0.01,
],
).item()
else:
lag = np.random.choice(
a=[
'random_int',
2,
4,
7,
10,
12,
24,
28,
60,
96,
168,
364,
1440,
420,
52,
84,
],
size=1,
p=[
0.15,
0.05,
0.05,
0.15,
0.01,
0.1,
0.1,
0.1,
0.1,
0.04,
0.01,
0.1,
0.01,
0.01,
0.01,
0.01,
],
).item()
if lag == 'random_int':
lag = np.random.randint(2, 100, size=1).item()
return int(lag)
def create_model_id(
model_str: str, parameter_dict: dict = {}, transformation_dict: dict = {}
):
"""Create a hash ID which should be unique to the model parameters."""
str_repr = (
str(model_str) + json.dumps(parameter_dict) + json.dumps(transformation_dict)
)
str_repr = ''.join(str_repr.split())
hashed = md5(str_repr.encode('utf-8')).hexdigest()
return hashed
class ModelObject(object):
"""Generic class for holding forecasting models.
Models should all have methods:
.fit(df, future_regressor = []) (taking a DataFrame with DatetimeIndex and n columns of n timeseries)
.predict(forecast_length = int, future_regressor = [], just_point_forecast = False)
.get_new_params() - return a dictionary of weighted random selected parameters
Args:
name (str): Model Name
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
n_jobs (int): used by some models that parallelize to multiple cores
"""
def __init__(
self,
name: str = "Uninitiated Model Name",
frequency: str = 'infer',
prediction_interval: float = 0.9,
regression_type: str = None,
fit_runtime=datetime.timedelta(0),
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = -1,
):
self.name = name
self.frequency = frequency
self.prediction_interval = prediction_interval
self.regression_type = regression_type
self.fit_runtime = fit_runtime
self.holiday_country = holiday_country
self.random_seed = random_seed
self.verbose = verbose
self.verbose_bool = True if self.verbose > 1 else False
self.n_jobs = n_jobs
def __repr__(self):
"""Print."""
return 'ModelObject of ' + self.name + ' uses standard .fit/.predict'
def basic_profile(self, df):
"""Capture basic training details."""
self.startTime = datetime.datetime.now()
self.train_shape = df.shape
self.column_names = df.columns
self.train_last_date = df.index[-1]
if self.frequency == 'infer':
self.frequency = pd.infer_freq(df.index, warn=False)
return df
def create_forecast_index(self, forecast_length: int):
"""Generate a pd.DatetimeIndex appropriate for a new forecast.
Warnings:
Requires ModelObject.basic_profile() being called as part of .fit()
"""
forecast_index = pd.date_range(
freq=self.frequency, start=self.train_last_date, periods=forecast_length + 1
)
forecast_index = forecast_index[1:]
self.forecast_index = forecast_index
return forecast_index
def get_params(self):
"""Return dict of current parameters."""
return {}
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
return {}
class PredictionObject(object):
"""Generic class for holding forecast information."""
def __init__(
self,
model_name: str = 'Uninitiated',
forecast_length: int = 0,
forecast_index=np.nan,
forecast_columns=np.nan,
lower_forecast=np.nan,
forecast=np.nan,
upper_forecast=np.nan,
prediction_interval: float = 0.9,
predict_runtime=datetime.timedelta(0),
fit_runtime=datetime.timedelta(0),
model_parameters={},
transformation_parameters={},
transformation_runtime=datetime.timedelta(0),
):
self.model_name = model_name
self.model_parameters = model_parameters
self.transformation_parameters = transformation_parameters
self.forecast_length = forecast_length
self.forecast_index = forecast_index
self.forecast_columns = forecast_columns
self.lower_forecast = lower_forecast
self.forecast = forecast
self.upper_forecast = upper_forecast
self.prediction_interval = prediction_interval
self.predict_runtime = predict_runtime
self.fit_runtime = fit_runtime
self.transformation_runtime = transformation_runtime
def __repr__(self):
"""Print."""
if isinstance(self.forecast, pd.DataFrame):
return "Prediction object: \nReturn .forecast, \n .upper_forecast, \n .lower_forecast \n .model_parameters \n .transformation_parameters"
else:
return "Empty prediction object."
def __bool__(self):
"""bool version of class."""
if isinstance(self.forecast, pd.DataFrame):
return True
else:
return False
def total_runtime(self):
"""Combine runtimes."""
return self.fit_runtime + self.predict_runtime + self.transformation_runtime
def ModelMonster(
model: str,
parameters: dict = {},
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
startTimeStamps=None,
forecast_length: int = 14,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
):
"""Directs strings and parameters to appropriate model objects.
Args:
model (str): Name of Model Function
parameters (dict): Dictionary of parameters to pass through to model
"""
model = str(model)
if model == 'ZeroesNaive':
from autots.models.basics import ZeroesNaive
return ZeroesNaive(frequency=frequency, prediction_interval=prediction_interval)
if model == 'LastValueNaive':
from autots.models.basics import LastValueNaive
return LastValueNaive(
frequency=frequency, prediction_interval=prediction_interval
)
if model == 'AverageValueNaive':
from autots.models.basics import AverageValueNaive
if parameters == {}:
return AverageValueNaive(
frequency=frequency, prediction_interval=prediction_interval
)
else:
return AverageValueNaive(
frequency=frequency,
prediction_interval=prediction_interval,
method=parameters['method'],
)
if model == 'SeasonalNaive':
from autots.models.basics import SeasonalNaive
if parameters == {}:
return SeasonalNaive(
frequency=frequency, prediction_interval=prediction_interval
)
else:
return SeasonalNaive(
frequency=frequency,
prediction_interval=prediction_interval,
method=parameters['method'],
lag_1=parameters['lag_1'],
lag_2=parameters['lag_2'],
)
if model == 'GLS':
from autots.models.statsmodels import GLS
return GLS(frequency=frequency, prediction_interval=prediction_interval)
if model == 'GLM':
from autots.models.statsmodels import GLM
if parameters == {}:
model = GLM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = GLM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
family=parameters['family'],
constant=parameters['constant'],
regression_type=parameters['regression_type'],
)
return model
if model == 'ETS':
from autots.models.statsmodels import ETS
if parameters == {}:
model = ETS(
frequency=frequency,
prediction_interval=prediction_interval,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = ETS(
frequency=frequency,
prediction_interval=prediction_interval,
damped=parameters['damped'],
trend=parameters['trend'],
seasonal=parameters['seasonal'],
seasonal_periods=parameters['seasonal_periods'],
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
return model
if model == 'ARIMA':
from autots.models.statsmodels import ARIMA
if parameters == {}:
model = ARIMA(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = ARIMA(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
p=parameters['p'],
d=parameters['d'],
q=parameters['q'],
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
return model
if model == 'FBProphet':
from autots.models.prophet import FBProphet
if parameters == {}:
model = FBProphet(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = FBProphet(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
holiday=parameters['holiday'],
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
return model
if model == 'RollingRegression':
from autots.models.sklearn import RollingRegression
if parameters == {}:
model = RollingRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = RollingRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
holiday=parameters['holiday'],
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
regression_model=parameters['regression_model'],
mean_rolling_periods=parameters['mean_rolling_periods'],
std_rolling_periods=parameters['std_rolling_periods'],
macd_periods=parameters['macd_periods'],
max_rolling_periods=parameters['max_rolling_periods'],
min_rolling_periods=parameters['min_rolling_periods'],
ewm_alpha=parameters['ewm_alpha'],
additional_lag_periods=parameters['additional_lag_periods'],
x_transform=parameters['x_transform'],
rolling_autocorr_periods=parameters['rolling_autocorr_periods'],
abs_energy=parameters['abs_energy'],
add_date_part=parameters['add_date_part'],
polynomial_degree=parameters['polynomial_degree'],
n_jobs=n_jobs,
)
return model
if model == 'UnobservedComponents':
from autots.models.statsmodels import UnobservedComponents
if parameters == {}:
model = UnobservedComponents(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = UnobservedComponents(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
level=parameters['level'],
trend=parameters['trend'],
cycle=parameters['cycle'],
damped_cycle=parameters['damped_cycle'],
irregular=parameters['irregular'],
stochastic_trend=parameters['stochastic_trend'],
stochastic_level=parameters['stochastic_level'],
stochastic_cycle=parameters['stochastic_cycle'],
)
return model
if model == 'DynamicFactor':
from autots.models.statsmodels import DynamicFactor
if parameters == {}:
model = DynamicFactor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = DynamicFactor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
k_factors=parameters['k_factors'],
factor_order=parameters['factor_order'],
)
return model
if model == 'VAR':
from autots.models.statsmodels import VAR
if parameters == {}:
model = VAR(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = VAR(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
maxlags=parameters['maxlags'],
ic=parameters['ic'],
random_seed=random_seed,
verbose=verbose,
)
return model
if model == 'VECM':
from autots.models.statsmodels import VECM
if parameters == {}:
model = VECM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = VECM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
deterministic=parameters['deterministic'],
k_ar_diff=parameters['k_ar_diff'],
)
return model
if model == 'VARMAX':
from autots.models.statsmodels import VARMAX
if parameters == {}:
model = VARMAX(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = VARMAX(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
order=parameters['order'],
trend=parameters['trend'],
)
return model
if model == 'GluonTS':
from autots.models.gluonts import GluonTS
if parameters == {}:
model = GluonTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
)
else:
model = GluonTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
gluon_model=parameters['gluon_model'],
epochs=parameters['epochs'],
learning_rate=parameters['learning_rate'],
forecast_length=forecast_length,
)
return model
if model == 'TSFreshRegressor':
from autots.models.tsfresh import TSFreshRegressor
if parameters == {}:
model = TSFreshRegressor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = TSFreshRegressor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
max_timeshift=parameters['max_timeshift'],
regression_model=parameters['regression_model'],
feature_selection=parameters['feature_selection'],
)
return model
if model == 'MotifSimulation':
from autots.models.basics import MotifSimulation
if parameters == {}:
model = MotifSimulation(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = MotifSimulation(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
phrase_len=parameters['phrase_len'],
comparison=parameters['comparison'],
shared=parameters['shared'],
distance_metric=parameters['distance_metric'],
max_motifs=parameters['max_motifs'],
recency_weighting=parameters['recency_weighting'],
cutoff_threshold=parameters['cutoff_threshold'],
cutoff_minimum=parameters['cutoff_minimum'],
point_method=parameters['point_method'],
)
return model
if model == 'WindowRegression':
from autots.models.sklearn import WindowRegression
if parameters == {}:
model = WindowRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
n_jobs=n_jobs,
)
else:
model = WindowRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
window_size=parameters['window_size'],
regression_model=parameters['regression_model'],
input_dim=parameters['input_dim'],
output_dim=parameters['output_dim'],
normalize_window=parameters['normalize_window'],
shuffle=parameters['shuffle'],
max_windows=parameters['max_windows'],
forecast_length=forecast_length,
n_jobs=n_jobs,
)
return model
if model == 'TensorflowSTS':
from autots.models.tfp import TensorflowSTS
if parameters == {}:
model = TensorflowSTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = TensorflowSTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
seasonal_periods=parameters['seasonal_periods'],
ar_order=parameters['ar_order'],
trend=parameters['trend'],
fit_method=parameters['fit_method'],
num_steps=parameters['num_steps'],
)
return model
if model == 'TFPRegression':
from autots.models.tfp import TFPRegression
if parameters == {}:
model = TFPRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = TFPRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
kernel_initializer=parameters['kernel_initializer'],
epochs=parameters['epochs'],
batch_size=parameters['batch_size'],
optimizer=parameters['optimizer'],
loss=parameters['loss'],
dist=parameters['dist'],
regression_type=parameters['regression_type'],
)
return model
if model == 'ComponentAnalysis':
from autots.models.sklearn import ComponentAnalysis
if parameters == {}:
model = ComponentAnalysis(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
)
else:
model = ComponentAnalysis(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
model=parameters['model'],
model_parameters=parameters['model_parameters'],
decomposition=parameters['decomposition'],
n_components=parameters['n_components'],
forecast_length=forecast_length,
)
return model
else:
raise AttributeError(
("Model String '{}' not a recognized model type").format(model)
)
def ModelPrediction(
df_train,
forecast_length: int,
transformation_dict: dict,
model_str: str,
parameter_dict: dict,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
grouping_ids=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
):
"""Feed parameters into modeling pipeline
Args:
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
n_jobs (int): number of processes
Returns:
PredictionObject (autots.PredictionObject): Prediction from AutoTS model object
"""
transformationStartTime = datetime.datetime.now()
from autots.tools.transform import GeneralTransformer
try:
coerce_integer = transformation_dict['coerce_integer']
grouping = transformation_dict['grouping']
if grouping == 'user' and grouping_ids is None:
grouping = 'kmeans5'
transformation_dict['grouping'] = 'kmeans5'
reconciliation = transformation_dict['reconciliation']
except Exception:
coerce_integer = False
grouping = None
grouping_ids = None
reconciliation = None
transformer_object = GeneralTransformer(
outlier_method=transformation_dict['outlier_method'],
outlier_threshold=transformation_dict['outlier_threshold'],
outlier_position=transformation_dict['outlier_position'],
fillna=transformation_dict['fillna'],
transformation=transformation_dict['transformation'],
detrend=transformation_dict['detrend'],
second_transformation=transformation_dict['second_transformation'],
transformation_param=transformation_dict['transformation_param'],
third_transformation=transformation_dict['third_transformation'],
transformation_param2=transformation_dict['transformation_param2'],
fourth_transformation=transformation_dict['fourth_transformation'],
discretization=transformation_dict['discretization'],
n_bins=transformation_dict['n_bins'],
grouping=grouping,
grouping_ids=grouping_ids,
reconciliation=reconciliation,
coerce_integer=coerce_integer,
).fit(df_train)
df_train_transformed = transformer_object.transform(df_train)
# slice the context, ie shorten the amount of data available.
if transformation_dict['context_slicer'] not in [None, 'None']:
from autots.tools.transform import simple_context_slicer
df_train_transformed = simple_context_slicer(
df_train_transformed,
method=transformation_dict['context_slicer'],
forecast_length=forecast_length,
)
# make sure regressor has same length. This could be a problem if wrong size regressor is passed.
if len(future_regressor_train) > 0:
future_regressor_train = future_regressor_train.tail(
df_train_transformed.shape[0]
)
transformation_runtime = datetime.datetime.now() - transformationStartTime
# from autots.evaluator.auto_model import ModelMonster
model = ModelMonster(
model_str,
parameters=parameter_dict,
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
n_jobs=n_jobs,
)
model = model.fit(df_train_transformed, future_regressor=future_regressor_train)
df_forecast = model.predict(
forecast_length=forecast_length, future_regressor=future_regressor_forecast
)
if df_forecast.forecast.isnull().all(axis=0).astype(int).sum() > 0:
raise ValueError(
"Model {} returned NaN for one or more series".format(model_str)
)
transformationStartTime = datetime.datetime.now()
# Inverse the transformations
df_forecast.forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.lower_forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.lower_forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.upper_forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.upper_forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.transformation_parameters = transformation_dict
# Remove negatives if desired
# There's df.where(df_forecast.forecast > 0, 0) or df.clip(lower = 0), not sure which faster
if no_negatives:
df_forecast.lower_forecast = df_forecast.lower_forecast.clip(lower=0)
df_forecast.forecast = df_forecast.forecast.clip(lower=0)
df_forecast.upper_forecast = df_forecast.upper_forecast.clip(lower=0)
if constraint is not None:
if verbose > 2:
print("Using constraint.")
constraint = float(constraint)
train_std = df_train.std(axis=0)
train_min = df_train.min(axis=0) - (constraint * train_std)
train_max = df_train.max(axis=0) + (constraint * train_std)
df_forecast.forecast = df_forecast.forecast.clip(lower=train_min, axis=1)
df_forecast.forecast = df_forecast.forecast.clip(upper=train_max, axis=1)
transformation_runtime = transformation_runtime + (
datetime.datetime.now() - transformationStartTime
)
df_forecast.transformation_runtime = transformation_runtime
return df_forecast
class TemplateEvalObject(object):
"""Object to contain all your failures!."""
def __init__(
self,
model_results=pd.DataFrame(),
per_timestamp_smape=pd.DataFrame(),
per_series_mae=pd.DataFrame(),
per_series_spl=pd.DataFrame(),
per_series_rmse1=pd.DataFrame(),
per_series_rmse2=pd.DataFrame(),
model_count: int = 0,
):
self.model_results = model_results
self.model_count = model_count
self.per_series_mae = per_series_mae
self.per_series_spl = per_series_spl
self.per_series_rmse1 = per_series_rmse1
self.per_series_rmse2 = per_series_rmse2
self.per_timestamp_smape = per_timestamp_smape
def __repr__(self):
"""Print."""
return 'Results objects, result table at self.model_results (pd.df)'
def concat(self, another_eval):
"""Merge another TemplateEvalObject onto this one."""
self.model_results = pd.concat(
[self.model_results, another_eval.model_results],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
self.per_series_mae = pd.concat(
[self.per_series_mae, another_eval.per_series_mae], axis=0, sort=False
)
self.per_series_spl = pd.concat(
[self.per_series_spl, another_eval.per_series_spl], axis=0, sort=False
)
self.per_series_rmse1 = pd.concat(
[self.per_series_rmse1, another_eval.per_series_rmse1], axis=0, sort=False
)
self.per_series_rmse2 = pd.concat(
[self.per_series_rmse2, another_eval.per_series_rmse2], axis=0, sort=False
)
self.per_timestamp_smape = pd.concat(
[self.per_timestamp_smape, another_eval.per_timestamp_smape],
axis=0,
sort=False,
)
self.model_count = self.model_count + another_eval.model_count
return self
def save(self, filename):
"""Save results to a file."""
if '.csv' in filename:
self.model_results.to_csv(filename, index=False)
elif '.pickle' in filename:
import pickle
with open(filename, "wb") as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
else:
raise ValueError("filename not .csv or .pickle")
def unpack_ensemble_models(
template,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
keep_ensemble: bool = True,
recursive: bool = False,
):
"""Take ensemble models from template and add as new rows."""
ensemble_template = pd.DataFrame()
template['Ensemble'] = np.where(
((template['Model'] == 'Ensemble') & (template['Ensemble'] < 1)),
1,
template['Ensemble'],
)
for index, value in template[template['Ensemble'] != 0][
'ModelParameters'
].iteritems():
model_dict = json.loads(value)['models']
model_df = pd.DataFrame.from_dict(model_dict, orient='index')
model_df = model_df.rename_axis('ID').reset_index(drop=False)
model_df['Ensemble'] = 0
# unpack nested ensembles, if recursive specified
if recursive and 'Ensemble' in model_df['Model'].tolist():
model_df = pd.concat(
[
unpack_ensemble_models(
model_df, recursive=True, template_cols=template_cols
),
model_df,
],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
ensemble_template = pd.concat(
[ensemble_template, model_df], axis=0, ignore_index=True, sort=False
).reset_index(drop=True)
if not keep_ensemble:
template = template[template['Ensemble'] == 0]
template = pd.concat(
[template, ensemble_template], axis=0, ignore_index=True, sort=False
).reset_index(drop=True)
template = template.drop_duplicates(subset=template_cols)
return template
def PredictWitch(
template,
df_train,
forecast_length: int,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
grouping_ids=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""Takes numeric data, returns numeric forecasts.
Only one model (albeit potentially an ensemble)!
Well, she turned me into a newt.
A newt?
I got better. -Python
Args:
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
template_cols (list): column names of columns used as model template
Returns:
PredictionObject (autots.PredictionObject): Prediction from AutoTS model object):
"""
if isinstance(template, pd.Series):
template = pd.DataFrame(template).transpose()
template = template.head(1)
for index_upper, row_upper in template.iterrows():
# if an ensemble
if row_upper['Model'] == 'Ensemble':
from autots.models.ensemble import EnsembleForecast
forecasts_list = []
forecasts_runtime = []
forecasts = []
upper_forecasts = []
lower_forecasts = []
ens_model_str = row_upper['Model']
ens_params = json.loads(row_upper['ModelParameters'])
ens_template = unpack_ensemble_models(
template, template_cols, keep_ensemble=False
)
for index, row in ens_template.iterrows():
# recursive recursion!
if verbose > 2:
total_ens = ens_template.shape[0]
print(
"Ensemble component {} of {} ".format(
model_str, str(index), str(total_ens)
)
)
df_forecast = PredictWitch(
row,
df_train=df_train,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
holiday_country=holiday_country,
startTimeStamps=startTimeStamps,
grouping_ids=grouping_ids,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
template_cols=template_cols,
)
model_id = create_model_id(
df_forecast.model_name,
df_forecast.model_parameters,
df_forecast.transformation_parameters,
)
total_runtime = (
df_forecast.fit_runtime
+ df_forecast.predict_runtime
+ df_forecast.transformation_runtime
)
forecasts_list.extend([model_id])
forecasts_runtime.extend([total_runtime])
forecasts.extend([df_forecast.forecast])
upper_forecasts.extend([df_forecast.upper_forecast])
lower_forecasts.extend([df_forecast.lower_forecast])
ens_forecast = EnsembleForecast(
ens_model_str,
ens_params,
forecasts_list=forecasts_list,
forecasts=forecasts,
lower_forecasts=lower_forecasts,
upper_forecasts=upper_forecasts,
forecasts_runtime=forecasts_runtime,
prediction_interval=prediction_interval,
)
return ens_forecast
# if not an ensemble
else:
model_str = row_upper['Model']
parameter_dict = json.loads(row_upper['ModelParameters'])
transformation_dict = json.loads(row_upper['TransformationParameters'])
df_forecast = ModelPrediction(
df_train,
forecast_length,
transformation_dict,
model_str,
parameter_dict,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
grouping_ids=grouping_ids,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
startTimeStamps=startTimeStamps,
n_jobs=n_jobs,
)
return df_forecast
def TemplateWizard(
template,
df_train,
df_test,
weights,
model_count: int = 0,
ensemble: str = True,
forecast_length: int = 14,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
validation_round: int = 0,
model_interrupt: bool = False,
grouping_ids=None,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""
Take Template, returns Results.
There are some who call me... Tim. - Python
Args:
template (pandas.DataFrame): containing model str, and json of transformations and hyperparamters
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
df_test (pandas.DataFrame): dataframe of actual values of (forecast length * n series)
weights (dict): key = column/series_id, value = weight
ensemble (str): desc of ensemble types to prepare metric collection
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
validation_round (int): int passed to record current validation.
model_interrupt (bool): if True, keyboard interrupts are caught and only break current model eval.
template_cols (list): column names of columns used as model template
Returns:
TemplateEvalObject
"""
ensemble = str(ensemble)
template_result = TemplateEvalObject()
template_result.model_count = model_count
if isinstance(template, pd.Series):
template = pd.DataFrame(template).transpose()
# template = unpack_ensemble_models(template, template_cols, keep_ensemble = False)
for index, row in template.iterrows():
try:
model_str = row['Model']
parameter_dict = json.loads(row['ModelParameters'])
transformation_dict = json.loads(row['TransformationParameters'])
ensemble_input = row['Ensemble']
current_template = pd.DataFrame(row).transpose()
template_result.model_count += 1
if verbose > 0:
if verbose > 1:
print(
"Model Number: {} with model {} in Validation {} with params {} and transformations {}".format(
str(template_result.model_count),
model_str,
str(validation_round),
json.dumps(parameter_dict),
json.dumps(transformation_dict),
)
)
else:
print(
"Model Number: {} with model {} in Validation {} ".format(
str(template_result.model_count),
model_str,
str(validation_round),
)
)
df_forecast = PredictWitch(
current_template,
df_train=df_train,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
holiday_country=holiday_country,
startTimeStamps=startTimeStamps,
grouping_ids=grouping_ids,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
template_cols=template_cols,
)
per_ts = True if 'distance' in ensemble else False
if 'hdist' in ensemble:
dist_n = int(np.ceil(0.3 * forecast_length))
else:
dist_n = None
model_error = PredictionEval(
df_forecast,
df_test,
series_weights=weights,
df_train=df_train,
per_timestamp_errors=per_ts,
dist_n=dist_n,
)
model_id = create_model_id(
df_forecast.model_name,
df_forecast.model_parameters,
df_forecast.transformation_parameters,
)
total_runtime = (
df_forecast.fit_runtime
+ df_forecast.predict_runtime
+ df_forecast.transformation_runtime
)
result = pd.DataFrame(
{
'ID': model_id,
'Model': df_forecast.model_name,
'ModelParameters': json.dumps(df_forecast.model_parameters),
'TransformationParameters': json.dumps(
df_forecast.transformation_parameters
),
'TransformationRuntime': df_forecast.transformation_runtime,
'FitRuntime': df_forecast.fit_runtime,
'PredictRuntime': df_forecast.predict_runtime,
'TotalRuntime': total_runtime,
'Ensemble': ensemble_input,
'Exceptions': np.nan,
'Runs': 1,
'ValidationRound': validation_round,
},
index=[0],
)
a = pd.DataFrame(
model_error.avg_metrics_weighted.rename(lambda x: x + '_weighted')
).transpose()
result = pd.concat(
[result, pd.DataFrame(model_error.avg_metrics).transpose(), a], axis=1
)
template_result.model_results = pd.concat(
[template_result.model_results, result],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
if 'horizontal' in ensemble:
cur_mae = model_error.per_series_metrics.loc['mae']
cur_mae = pd.DataFrame(cur_mae).transpose()
cur_mae.index = [model_id]
template_result.per_series_mae = pd.concat(
[template_result.per_series_mae, cur_mae], axis=0
)
if 'probabilistic' in ensemble:
cur_spl = model_error.per_series_metrics.loc['spl']
cur_spl = pd.DataFrame(cur_spl).transpose()
cur_spl.index = [model_id]
template_result.per_series_spl = pd.concat(
[template_result.per_series_spl, cur_spl], axis=0
)
if 'distance' in ensemble:
cur_smape = model_error.per_timestamp.loc['weighted_smape']
cur_smape = pd.DataFrame(cur_smape).transpose()
cur_smape.index = [model_id]
template_result.per_timestamp_smape = pd.concat(
[template_result.per_timestamp_smape, cur_smape], axis=0
)
if 'hdist' in ensemble:
cur_rmse1 = model_error.per_series_metrics.loc['rmse1']
cur_rmse2 = model_error.per_series_metrics.loc['rmse2']
cur_rmse1 = pd.DataFrame(cur_rmse1).transpose()
cur_rmse2 = pd.DataFrame(cur_rmse2).transpose()
cur_rmse1.index = [model_id]
cur_rmse2.index = [model_id]
template_result.per_series_rmse1 = pd.concat(
[template_result.per_series_rmse1, cur_rmse1], axis=0
)
template_result.per_series_rmse2 = pd.concat(
[template_result.per_series_rmse2, cur_rmse2], axis=0
)
except KeyboardInterrupt:
if model_interrupt:
result = pd.DataFrame(
{
'ID': create_model_id(
model_str, parameter_dict, transformation_dict
),
'Model': model_str,
'ModelParameters': json.dumps(parameter_dict),
'TransformationParameters': json.dumps(transformation_dict),
'Ensemble': ensemble_input,
'TransformationRuntime': datetime.timedelta(0),
'FitRuntime': datetime.timedelta(0),
'PredictRuntime': datetime.timedelta(0),
'TotalRuntime': datetime.timedelta(0),
'Exceptions': "KeyboardInterrupt by user",
'Runs': 1,
'ValidationRound': validation_round,
},
index=[0],
)
template_result.model_results = pd.concat(
[template_result.model_results, result],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
else:
raise KeyboardInterrupt
except Exception as e:
if verbose >= 0:
print(
'Template Eval Error: {} in model {}: {}'.format(
(repr(e)), template_result.model_count, model_str
)
)
result = pd.DataFrame(
{
'ID': create_model_id(
model_str, parameter_dict, transformation_dict
),
'Model': model_str,
'ModelParameters': json.dumps(parameter_dict),
'TransformationParameters': json.dumps(transformation_dict),
'Ensemble': ensemble_input,
'TransformationRuntime': datetime.timedelta(0),
'FitRuntime': datetime.timedelta(0),
'PredictRuntime': datetime.timedelta(0),
'TotalRuntime': datetime.timedelta(0),
'Exceptions': repr(e),
'Runs': 1,
'ValidationRound': validation_round,
},
index=[0],
)
template_result.model_results = pd.concat(
[template_result.model_results, result],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
return template_result
def RandomTemplate(
n: int = 10,
model_list: list = [
'ZeroesNaive',
'LastValueNaive',
'AverageValueNaive',
'GLS',
'GLM',
'ETS',
'ARIMA',
'FBProphet',
'RollingRegression',
'GluonTS',
'UnobservedComponents',
'VARMAX',
'VECM',
'DynamicFactor',
],
):
"""
Returns a template dataframe of randomly generated transformations, models, and hyperparameters.
Args:
n (int): number of random models to return
"""
n = abs(int(n))
template = pd.DataFrame()
counter = 0
while len(template.index) < n:
model_str = np.random.choice(model_list)
param_dict = ModelMonster(model_str).get_new_params()
trans_dict = RandomTransform()
row = pd.DataFrame(
{
'Model': model_str,
'ModelParameters': json.dumps(param_dict),
'TransformationParameters': json.dumps(trans_dict),
'Ensemble': 0,
},
index=[0],
)
template = pd.concat([template, row], axis=0, ignore_index=True)
template.drop_duplicates(inplace=True)
counter += 1
if counter > (n * 3):
break
return template
def UniqueTemplates(
existing_templates,
new_possibilities,
selection_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""
Returns unique dataframe rows from new_possiblities not in existing_templates.
Args:
selection_cols (list): list of column namess to use to judge uniqueness/match on
"""
keys = list(new_possibilities[selection_cols].columns.values)
idx1 = existing_templates.copy().set_index(keys).index
idx2 = new_possibilities.set_index(keys).index
new_template = new_possibilities[~idx2.isin(idx1)]
return new_template
def dict_recombination(a: dict, b: dict):
"""Recombine two dictionaries with identical keys. Return new dict."""
b_keys = [*b]
key_size = int(len(b_keys) / 2) if len(b_keys) > 1 else 1
bs_keys = np.random.choice(b_keys, size=key_size)
b_prime = {k: b[k] for k in bs_keys}
c = {**a, **b_prime} # overwrites with B
return c
def trans_dict_recomb(dict_array):
"""Recombine two transformation param dictionaries from array of dicts."""
r_sel = np.random.choice(dict_array, size=2, replace=False)
a = r_sel[0]
b = r_sel[1]
c = dict_recombination(a, b)
out_keys = ['outlier_method', 'outlier_threshold', 'outlier_position']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in out_keys}}
mid_trans_keys = ['second_transformation', 'transformation_param']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in mid_trans_keys}}
mid_trans_keys = ['third_transformation', 'transformation_param2']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in mid_trans_keys}}
disc_keys = ['discretization', 'n_bins']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in disc_keys}}
disc_keys = ['grouping', 'reconciliation']
current_dict = np.random.choice([a, b], size=1).item()
if all([x in current_dict.keys() for x in disc_keys]):
c = {**c, **{k: current_dict[k] for k in disc_keys}}
return c
def _trans_dicts(current_ops, best=None, n: int = 5):
fir = json.loads(current_ops.iloc[0, :]['TransformationParameters'])
cur_len = current_ops.shape[0]
if cur_len > 1:
# select randomly from best of data, doesn't handle lengths < 2
top_r = np.floor((cur_len / 5) + 2)
r_id = np.random.randint(1, top_r)
sec = json.loads(current_ops.iloc[r_id, :]['TransformationParameters'])
else:
sec = RandomTransform()
r = RandomTransform()
if best is None:
best = RandomTransform()
arr = [fir, sec, best, r]
trans_dicts = [json.dumps(trans_dict_recomb(arr)) for _ in range(n)]
return trans_dicts
def NewGeneticTemplate(
model_results,
submitted_parameters,
sort_column: str = "smape_weighted",
sort_ascending: bool = True,
max_results: int = 50,
max_per_model_class: int = 5,
top_n: int = 50,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""
Return new template given old template with model accuracies.
Args:
model_results (pandas.DataFrame): models that have actually been run
submitted_paramters (pandas.DataFrame): models tried (may have returned different parameters to results)
"""
new_template = pd.DataFrame()
# filter existing templates
sorted_results = model_results[model_results['Ensemble'] == 0].copy()
sorted_results = sorted_results.sort_values(
by=sort_column, ascending=sort_ascending, na_position='last'
)
sorted_results = sorted_results.drop_duplicates(subset=template_cols, keep='first')
if str(max_per_model_class).isdigit():
sorted_results = (
sorted_results.sort_values(sort_column, ascending=sort_ascending)
.groupby('Model')
.head(max_per_model_class)
.reset_index()
)
sorted_results = sorted_results.sort_values(
by=sort_column, ascending=sort_ascending, na_position='last'
).head(top_n)
no_params = ['ZeroesNaive', 'LastValueNaive', 'GLS']
recombination_approved = [
'SeasonalNaive',
'MotifSimulation',
"ETS",
'DynamicFactor',
'VECM',
'VARMAX',
'GLM',
'ARIMA',
'FBProphet',
'GluonTS',
'RollingRegression',
'VAR',
'WindowRegression',
'TensorflowSTS',
'TFPRegression',
]
borrow = ['ComponentAnalysis']
best = json.loads(sorted_results.iloc[0, :]['TransformationParameters'])
for model_type in sorted_results['Model'].unique():
if model_type in no_params:
current_ops = sorted_results[sorted_results['Model'] == model_type]
n = 3
trans_dicts = _trans_dicts(current_ops, best=best, n=n)
model_param = current_ops.iloc[0, :]['ModelParameters']
new_row = pd.DataFrame(
{
'Model': model_type,
'ModelParameters': model_param,
'TransformationParameters': trans_dicts,
'Ensemble': 0,
},
index=list(range(n)),
)
elif model_type in recombination_approved:
current_ops = sorted_results[sorted_results['Model'] == model_type]
n = 4
trans_dicts = _trans_dicts(current_ops, best=best, n=n)
# select the best model of this type
fir = json.loads(current_ops.iloc[0, :]['ModelParameters'])
cur_len = current_ops.shape[0]
if cur_len > 1:
# select randomly from best of data, doesn't handle lengths < 2
top_r = np.floor((cur_len / 5) + 2)
r_id = np.random.randint(1, top_r)
sec = json.loads(current_ops.iloc[r_id, :]['ModelParameters'])
else:
sec = ModelMonster(model_type).get_new_params()
# generate new random parameters ('mutations')
r = ModelMonster(model_type).get_new_params()
r2 = ModelMonster(model_type).get_new_params()
arr = [fir, sec, r2, r]
model_dicts = list()
# recombine best and random to create new generation
for _ in range(n):
r_sel = np.random.choice(arr, size=2, replace=False)
a = r_sel[0]
b = r_sel[1]
c = dict_recombination(a, b)
model_dicts.append(json.dumps(c))
new_row = pd.DataFrame(
{
'Model': model_type,
'ModelParameters': model_dicts,
'TransformationParameters': trans_dicts,
'Ensemble': 0,
},
index=list(range(n)),
)
else:
current_ops = sorted_results[sorted_results['Model'] == model_type]
n = 3
trans_dicts = _trans_dicts(current_ops, best=best, n=n)
model_dicts = list()
for _ in range(n):
c = ModelMonster(model_type).get_new_params()
model_dicts.append(json.dumps(c))
new_row = pd.DataFrame(
{
'Model': model_type,
'ModelParameters': model_dicts,
'TransformationParameters': trans_dicts,
'Ensemble': 0,
},
index=list(range(n)),
)
new_template = pd.concat(
[new_template, new_row], axis=0, ignore_index=True, sort=False
)
"""
# recombination of transforms across models by shifting transforms
recombination = sorted_results.tail(len(sorted_results.index) - 1).copy()
recombination['TransformationParameters'] = sorted_results['TransformationParameters'].shift(1).tail(len(sorted_results.index) - 1)
new_template = pd.concat([new_template,
recombination.head(top_n)[template_cols]],
axis=0, ignore_index=True, sort=False)
"""
# remove generated models which have already been tried
sorted_results = pd.concat(
[submitted_parameters, sorted_results], axis=0, ignore_index=True, sort=False
).reset_index(drop=True)
new_template = UniqueTemplates(
sorted_results, new_template, selection_cols=template_cols
).head(max_results)
return new_template
def validation_aggregation(validation_results):
"""Aggregate a TemplateEvalObject."""
groupby_cols = [
'ID',
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
]
col_aggs = {
'Runs': 'sum',
'smape': 'mean',
'mae': 'mean',
'rmse': 'mean',
'containment': 'mean',
'spl': 'mean',
'contour': 'mean',
'smape_weighted': 'mean',
'mae_weighted': 'mean',
'rmse_weighted': 'mean',
'containment_weighted': 'mean',
'contour_weighted': 'mean',
'spl_weighted': 'mean',
'containment_weighted': 'mean',
'TotalRuntimeSeconds': 'mean',
'Score': 'mean',
}
validation_results.model_results['TotalRuntimeSeconds'] = (
validation_results.model_results['TotalRuntime'].dt.seconds + 1
)
validation_results.model_results = validation_results.model_results[
pd.isnull(validation_results.model_results['Exceptions'])
]
validation_results.model_results = validation_results.model_results.replace(
[np.inf, -np.inf], np.nan
)
validation_results.model_results = validation_results.model_results.groupby(
groupby_cols
).agg(col_aggs)
validation_results.model_results = validation_results.model_results.reset_index(
drop=False
)
return validation_results
def generate_score(
model_results, metric_weighting: dict = {}, prediction_interval: float = 0.9
):
"""Generate score based on relative accuracies."""
try:
smape_weighting = metric_weighting['smape_weighting']
except KeyError:
smape_weighting = 1
try:
mae_weighting = metric_weighting['mae_weighting']
except KeyError:
mae_weighting = 0
try:
rmse_weighting = metric_weighting['rmse_weighting']
except KeyError:
rmse_weighting = 0
try:
containment_weighting = metric_weighting['containment_weighting']
except KeyError:
containment_weighting = 0
try:
runtime_weighting = metric_weighting['runtime_weighting'] * 0.1
except KeyError:
runtime_weighting = 0
try:
spl_weighting = metric_weighting['spl_weighting']
except KeyError:
spl_weighting = 0
try:
contour_weighting = metric_weighting['contour_weighting']
except KeyError:
contour_weighting = 0
# handle various runtime information records
if 'TotalRuntimeSeconds' in model_results.columns:
if 'TotalRuntime' in model_results.columns:
model_results['TotalRuntimeSeconds'] = np.where(
model_results['TotalRuntimeSeconds'].isna(),
model_results['TotalRuntime'].dt.seconds,
model_results['TotalRuntimeSeconds'],
)
else:
model_results['TotalRuntimeSeconds'] = np.where(
model_results['TotalRuntimeSeconds'].isna(),
model_results['TotalRuntimeSeconds'].max(),
model_results['TotalRuntimeSeconds'],
)
else:
model_results['TotalRuntimeSeconds'] = model_results['TotalRuntime'].dt.seconds
# generate minimizing scores, where smaller = better accuracy
try:
model_results = model_results.replace([np.inf, -np.inf], np.nan)
# model_results = model_results.fillna(value=model_results.max(axis=0))
smape_score = model_results['smape_weighted'] / (
model_results['smape_weighted'].min(skipna=True) + 1
) # smaller better
rmse_scaler = model_results['rmse_weighted'].median(skipna=True)
rmse_scaler = 1 if rmse_scaler == 0 else rmse_scaler
rmse_score = model_results['rmse_weighted'] / rmse_scaler
mae_scaler = model_results['mae_weighted'].median(skipna=True)
mae_scaler = 1 if mae_scaler == 0 else mae_scaler
mae_score = model_results['mae_weighted'] / mae_scaler
containment_score = (
abs(prediction_interval - model_results['containment'])
) + 1 # from 1 to 2, smaller better
runtime = model_results['TotalRuntimeSeconds'] + 120
runtime_score = runtime / (runtime.min(skipna=True)) # smaller better
spl_score = model_results['spl_weighted'] / (
model_results['spl_weighted'].min(skipna=True) + 1
) # smaller better
contour_score = (
(1 / (model_results['contour_weighted']))
.replace([np.inf, -np.inf, np.nan], 10)
.clip(upper=10)
)
except KeyError:
raise KeyError(
"Inconceivable! Evaluation Metrics are missing and all models have failed, by an error in TemplateWizard or metrics. A new template may help, or an adjusted model_list."
)
return (
(smape_score * smape_weighting)
+ (mae_score * mae_weighting)
+ (rmse_score * rmse_weighting)
+ (containment_score * containment_weighting)
+ (runtime_score * runtime_weighting)
+ (spl_score * spl_weighting)
+ (contour_score * contour_weighting)
)
| 3,116
| 0
| 104
|
b6fd8e198c7dfa420a6d4c45a470a2d144bb9ee4
| 1,025
|
py
|
Python
|
aoc2020/day12/day12_part2.py
|
GetPastTheMonkey/advent-of-code
|
db80be6d87baba4d5315cc69276905c55762da86
|
[
"MIT"
] | 1
|
2019-09-15T16:37:24.000Z
|
2019-09-15T16:37:24.000Z
|
aoc2020/day12/day12_part2.py
|
GetPastTheMonkey/advent-of-code
|
db80be6d87baba4d5315cc69276905c55762da86
|
[
"MIT"
] | null | null | null |
aoc2020/day12/day12_part2.py
|
GetPastTheMonkey/advent-of-code
|
db80be6d87baba4d5315cc69276905c55762da86
|
[
"MIT"
] | null | null | null |
from utils import get_input_lines
pos_x = 0
pos_y = 0
waypoint_x = 10
waypoint_y = 1
for line in get_input_lines(__file__):
action = line[0]
n = int(line[1:])
# Handle actions
if action == "N":
waypoint_y += n
elif action == "S":
waypoint_y -= n
elif action == "E":
waypoint_x += n
elif action == "W":
waypoint_x -= n
elif action == "L":
# Rotate (n//90) times CCW: (new_x, new_y) = (-old_y, old_x)
for i in range(n // 90):
tmp_x = waypoint_x
waypoint_x = -waypoint_y
waypoint_y = tmp_x
elif action == "R":
# Rotate (n//90) times CW: (new_x, new_y) = (old_y, -old_x)
for i in range(n // 90):
tmp_x = waypoint_x
waypoint_x = waypoint_y
waypoint_y = -tmp_x
elif action == "F":
pos_x += n * waypoint_x
pos_y += n * waypoint_y
else:
raise NotImplementedError(f"Unknown action '{action}'")
print(abs(pos_x) + abs(pos_y))
| 25
| 68
| 0.537561
|
from utils import get_input_lines
pos_x = 0
pos_y = 0
waypoint_x = 10
waypoint_y = 1
for line in get_input_lines(__file__):
action = line[0]
n = int(line[1:])
# Handle actions
if action == "N":
waypoint_y += n
elif action == "S":
waypoint_y -= n
elif action == "E":
waypoint_x += n
elif action == "W":
waypoint_x -= n
elif action == "L":
# Rotate (n//90) times CCW: (new_x, new_y) = (-old_y, old_x)
for i in range(n // 90):
tmp_x = waypoint_x
waypoint_x = -waypoint_y
waypoint_y = tmp_x
elif action == "R":
# Rotate (n//90) times CW: (new_x, new_y) = (old_y, -old_x)
for i in range(n // 90):
tmp_x = waypoint_x
waypoint_x = waypoint_y
waypoint_y = -tmp_x
elif action == "F":
pos_x += n * waypoint_x
pos_y += n * waypoint_y
else:
raise NotImplementedError(f"Unknown action '{action}'")
print(abs(pos_x) + abs(pos_y))
| 0
| 0
| 0
|
2326b0fce7d21d579893e74d3e91c5354e98cf2f
| 282
|
py
|
Python
|
src/year2019/day13a.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | 10
|
2017-12-11T17:54:52.000Z
|
2021-12-09T20:16:30.000Z
|
src/year2019/day13a.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | 260
|
2015-12-09T11:03:03.000Z
|
2021-12-12T14:32:23.000Z
|
src/year2019/day13a.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | null | null | null |
"""2019 - Day 13 Part 1: Care Package."""
from src.year2019.intcode import Computer
def solve(task: str) -> int:
"""Count the number of blocks."""
computer = Computer()
computer.load_program(task)
computer.execute()
return list(computer.stdout)[2::3].count(2)
| 25.636364
| 47
| 0.666667
|
"""2019 - Day 13 Part 1: Care Package."""
from src.year2019.intcode import Computer
def solve(task: str) -> int:
"""Count the number of blocks."""
computer = Computer()
computer.load_program(task)
computer.execute()
return list(computer.stdout)[2::3].count(2)
| 0
| 0
| 0
|
6af6580ccb03ce18151efe09dbeb559263c69624
| 1,816
|
py
|
Python
|
examples/fontforge-old/demoAddToMenu.py
|
simoncozens/pysilfont
|
bb8a9fc58a83e074bbcc466ba058841845b9107e
|
[
"MIT"
] | 41
|
2015-05-21T21:12:26.000Z
|
2022-02-17T17:23:14.000Z
|
examples/fontforge-old/demoAddToMenu.py
|
simoncozens/pysilfont
|
bb8a9fc58a83e074bbcc466ba058841845b9107e
|
[
"MIT"
] | 63
|
2015-05-15T10:25:55.000Z
|
2021-02-23T04:51:17.000Z
|
examples/fontforge-old/demoAddToMenu.py
|
simoncozens/pysilfont
|
bb8a9fc58a83e074bbcc466ba058841845b9107e
|
[
"MIT"
] | 12
|
2015-06-12T11:52:08.000Z
|
2020-09-23T10:40:59.000Z
|
#!/usr/bin/env python
'FontForge: Demo script to add menu items to FF tools menu'
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2014 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'David Raymond'
import sys, os, fontforge
sys.path.append(os.path.join(os.environ['HOME'], 'src/pysilfont/scripts'))
import samples.demoFunctions
from samples.demoFunctions import functionList, callFunctions
#from samples.demoCallFunctions import callFunctions
funcList=functionList()
for functionGroup in funcList :
menuType = funcList[functionGroup][0]
fontforge.registerMenuItem(toolMenuFunction,None,functionGroup,menuType,None,functionGroup);
print functionGroup, " registered"
''' This script needs to be called from one of the folders that FontForge looks in for scripts to
run when it is started. With current versions of FontForge, one is Home/.config/fontforge/python.
You may need to turn on showing hidden files (ctrl-H in Nautilus) before you can see the .config
folder. Within there create a one-line python script, say call sampledemo.py containing a call
to this script, eg:
execfile("/home/david/src/pysilfont/scripts/samples/demoAddToMenu.py")
Due to the reload(samples.demoFunctions) line above, changes functions defined in demoFunctions.py
are dynamic, ie FontForge does not have to be restarted (as would be the case if the functions were
called directly from the tools menu. Functions can even be added dynamically to the function groups.
If new function groups are defined, FontForge does have to be restarted to add them to the tools menu.
'''
| 46.564103
| 102
| 0.785793
|
#!/usr/bin/env python
'FontForge: Demo script to add menu items to FF tools menu'
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2014 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'David Raymond'
import sys, os, fontforge
sys.path.append(os.path.join(os.environ['HOME'], 'src/pysilfont/scripts'))
import samples.demoFunctions
from samples.demoFunctions import functionList, callFunctions
#from samples.demoCallFunctions import callFunctions
def toolMenuFunction(functionGroup,font) :
reload (samples.demoFunctions)
callFunctions(functionGroup,font)
funcList=functionList()
for functionGroup in funcList :
menuType = funcList[functionGroup][0]
fontforge.registerMenuItem(toolMenuFunction,None,functionGroup,menuType,None,functionGroup);
print functionGroup, " registered"
''' This script needs to be called from one of the folders that FontForge looks in for scripts to
run when it is started. With current versions of FontForge, one is Home/.config/fontforge/python.
You may need to turn on showing hidden files (ctrl-H in Nautilus) before you can see the .config
folder. Within there create a one-line python script, say call sampledemo.py containing a call
to this script, eg:
execfile("/home/david/src/pysilfont/scripts/samples/demoAddToMenu.py")
Due to the reload(samples.demoFunctions) line above, changes functions defined in demoFunctions.py
are dynamic, ie FontForge does not have to be restarted (as would be the case if the functions were
called directly from the tools menu. Functions can even be added dynamically to the function groups.
If new function groups are defined, FontForge does have to be restarted to add them to the tools menu.
'''
| 94
| 0
| 23
|
46615d419dda76960016bd1ad4896644e6b356d7
| 29,475
|
py
|
Python
|
tensor2struct/utils/tree_kernels.py
|
chenyangh/tensor2struct-public
|
d3257cba6d76d3c658a58a78f687d986bdc755cf
|
[
"MIT"
] | 69
|
2021-04-14T06:35:07.000Z
|
2022-03-31T18:35:05.000Z
|
tensor2struct/utils/tree_kernels.py
|
chenyangh/tensor2struct-public
|
d3257cba6d76d3c658a58a78f687d986bdc755cf
|
[
"MIT"
] | 11
|
2021-04-16T11:16:04.000Z
|
2022-03-22T21:21:29.000Z
|
tensor2struct/utils/tree_kernels.py
|
chenyangh/tensor2struct-public
|
d3257cba6d76d3c658a58a78f687d986bdc755cf
|
[
"MIT"
] | 18
|
2021-04-14T07:19:56.000Z
|
2022-03-23T19:26:18.000Z
|
import math
from copy import deepcopy
from tensor2struct.utils import tree
# Common routines for kernel functions
####
# An extremely simple cache
| 41.927454
| 145
| 0.397116
|
import math
from copy import deepcopy
from tensor2struct.utils import tree
class Kernel:
# Common routines for kernel functions
def kernel(self, a, b):
# compute the tree kernel on the trees a and b
if not isinstance(a, tree.Tree):
print("ERROR: first parameter has to be a Tree Object")
return ""
if not isinstance(b, tree.Tree):
print("ERROR: second parameter has to be a Tree Object")
return ""
self.preProcess(a)
self.preProcess(b)
return self.evaluate(a, b)
def preProcess(self, a):
# Create any data structure useful for computing the kernel
# To be instantiated in subclasses
print("ERROR: prepProcess() must be executed in subclasses")
pass
def evaluate(self, a, b):
# To be instantiated in subclasses
print("ERROR: evaluated() must be executed in subclasses")
pass
def printKernelMatrix(self, dataset):
if not isinstance(dataset, tree.Dataset):
print("ERROR: the first Parameter must be a Dataset object")
return
ne = len(dataset)
for i in range(ne):
for j in range(i, ne):
print(
"%d %d %.2f"
% (i, j, self.kernel(dataset.getExample(i), dataset.getExample(j)),)
)
class KernelST(Kernel):
def __init__(self, l, savememory=1, hashsep="#"):
self.l = float(l)
self.hashsep = hashsep
self.savememory = savememory
def preProcess(self, a):
if hasattr(a, "kernelstrepr"): # already preprocessed
return
if not hasattr(a.root, "stsize"):
a.root.setSubtreeSize()
a.root.setHashSubtreeIdentifier(self.hashsep)
a.kernelstrepr = tree.SubtreeIDSubtreeSizeList(a.root)
a.kernelstrepr.sort()
if self.savememory == 1:
a.deleteRootTreeNode()
def evaluate(self, a, b):
ha, hb = (a.kernelstrepr, b.kernelstrepr)
# Assumes ha and hb are ordered list of pairs (subtreeid, subtreesize)
# a.kernelreprst,b.kernelreprst are checked or created in preProcess()
i, j, k, toti, totj = (0, 0, 0, len(ha), len(hb))
while i < toti and j < totj:
if ha.getSubtreeID(i) == hb.getSubtreeID(j):
ci, cj = (i, j)
while i < toti and ha.getSubtreeID(i) == ha.getSubtreeID(ci):
i += 1
while j < totj and hb.getSubtreeID(j) == hb.getSubtreeID(cj):
j += 1
k += (i - ci) * (j - cj) * (self.l ** ha.getSubtreeSize(ci))
elif ha.getSubtreeID(i) < hb.getSubtreeID(j):
i += 1
else:
j += 1
return k
class KernelSST(Kernel):
def __init__(self, l, hashsep="#"):
self.l = float(l)
self.hashsep = hashsep
self.cache = Cache()
def preProcess(self, a):
if hasattr(a, "kernelsstrepr"): # already preprocessed
return
a.root.setHashSubtreeIdentifier(self.hashsep)
a.kernelsstrepr = tree.ProdSubtreeList(a.root)
a.kernelsstrepr.sort()
def CSST(self, c, d):
if c.getSubtreeID() < d.getSubtreeID():
tmpkey = str(c.getSubtreeID()) + "#" + str(d.getSubtreeID())
else:
tmpkey = str(d.getSubtreeID()) + "#" + str(c.getSubtreeID())
if self.cache.exists(tmpkey):
return float(self.cache.read(tmpkey))
else:
prod = self.l
nc = c.getOutdegree()
if nc == d.getOutdegree():
for ci in range(nc):
if c.getChild(ci).getProduction() == d.getChild(ci).getProduction():
prod *= 1 + self.CSST(c.getChild(ci), d.getChild(ci))
else:
cid, did = (
c.getChild(ci).getSubtreeID(),
d.getChild(ci).getSubtreeID(),
)
if cid < did:
self.cache.insert(str(cid) + str(did), 0)
else:
self.cache.insert(str(did) + str(cid), 0)
self.cache.insert(tmpkey, prod)
return float(prod)
def evaluate(self, a, b):
pa, pb = (a.kernelsstrepr, b.kernelsstrepr)
self.cache.removeAll()
i, j, k, toti, totj = (0, 0, 0, len(pa), len(pb))
while i < toti and j < totj:
if pa.getProduction(i) == pb.getProduction(j):
ci, cj = (i, j)
while i < toti and pa.getProduction(i) == pa.getProduction(ci):
j = cj
while j < totj and pb.getProduction(j) == pb.getProduction(cj):
k += self.CSST(pa.getTree(i), pb.getTree(j))
j += 1
i += 1
elif len(pa.getProduction(i)) < len(pb.getProduction(j)) or (
len(pa.getProduction(i)) == len(pb.getProduction(j))
and pa.getProduction(i) < pb.getProduction(j)
):
i += 1
else:
j += 1
return k
class KernelPT(Kernel):
def __init__(self, l, m, hashsep="#"):
self.l = float(l)
self.m = float(m)
self.hashsep = hashsep
self.cache = Cache()
def preProcess(self, a):
if hasattr(a, "kernelptrepr"): # already preprocessed
return
a.root.setHashSubtreeIdentifier(self.hashsep)
a.kernelptrepr = tree.LabelSubtreeList(a.root)
a.kernelptrepr.sort()
def DeltaSk(self, a, b, nca, ncb):
DPS = [[0 for i in range(ncb + 1)] for j in range(nca + 1)]
DP = [[0 for i in range(ncb + 1)] for j in range(nca + 1)]
kmat = [0] * (nca + 1)
for i in range(1, nca + 1):
for j in range(1, ncb + 1):
if a.getChild(i - 1).getLabel() == b.getChild(j - 1).getLabel():
DPS[i][j] = self.CPT(a.getChild(i - 1), b.getChild(j - 1))
kmat[0] += DPS[i][j]
else:
DPS[i][j] = 0
for s in range(1, min(nca, ncb)):
for i in range(nca + 1):
DP[i][s - 1] = 0
for j in range(ncb + 1):
DP[s - 1][j] = 0
for i in range(s, nca + 1):
for j in range(s, ncb + 1):
DP[i][j] = (
DPS[i][j]
+ self.l * DP[i - 1][j]
+ self.l * DP[i][j - 1]
- self.l ** 2 * DP[i - 1][j - 1]
)
if a.getChild(i - 1).getLabel() == b.getChild(j - 1).getLabel():
DPS[i][j] = (
self.CPT(a.getChild(i - 1), b.getChild(j - 1))
* DP[i - 1][j - 1]
)
kmat[s] += DPS[i][j]
return sum(kmat)
def CPT(self, c, d):
if c.getSubtreeID() < d.getSubtreeID():
tmpkey = str(c.getSubtreeID()) + "#" + str(d.getSubtreeID())
else:
tmpkey = str(d.getSubtreeID()) + "#" + str(c.getSubtreeID())
if self.cache.exists(tmpkey):
return self.cache.read(tmpkey)
else:
if c.getOutdegree() == 0 or d.getOutdegree() == 0:
prod = self.m * self.l ** 2
else:
prod = self.m * (
self.l ** 2 + self.DeltaSk(c, d, c.getOutdegree(), d.getOutdegree())
)
self.cache.insert(tmpkey, prod)
return prod
def evaluate(self, a, b):
self.cache.removeAll()
la, lb = (a.kernelptrepr, b.kernelptrepr)
i, j, k, toti, totj = (0, 0, 0, len(la), len(lb))
while i < toti and j < totj:
if la.getLabel(i) == lb.getLabel(j):
ci, cj = (i, j)
while i < toti and la.getLabel(i) == la.getLabel(ci):
j = cj
while j < totj and lb.getLabel(j) == lb.getLabel(cj):
k += self.CPT(la.getTree(i), lb.getTree(j))
j += 1
i += 1
elif la.getLabel(i) <= lb.getLabel(j):
i += 1
else:
j += 1
return k
class KernelPdak(Kernel):
def __init__(self, l, gamma, beta, hashsep="#"):
self.l = float(l)
self.gamma = float(gamma)
self.beta = float(beta)
self.hashsep = hashsep
def preProcess(self, t):
if hasattr(t, "kernelpdakrepr"): # already preprocessed
return
if not hasattr(t.root, "stsize"):
t.root.setSubtreeSize()
t.root.setHashSubtreeIdentifier(self.hashsep)
t.computeNodesDepth()
t.kernelpdakrepr = tree.SubtreePositionIDLabelSubtreeSizeList(t.root)
def mergetrees_with_depth(self, tree1, tree2):
merge = {}
for key in tree1:
if key in tree2:
merge[key] = (
{(tree1[key][0], tree1[key][2]): {tree1[key][1]: 1}},
{(tree2[key][0], tree2[key][2]): {tree2[key][1]: 1}},
)
del tree2[key]
else:
merge[key] = (
{(tree1[key][0], tree1[key][2]): {tree1[key][1]: 1}},
None,
)
for key in tree2:
merge[key] = (None, {(tree2[key][0], tree2[key][2]): {tree2[key][1]: 1}})
return merge
def visit_with_depth(self, jtree, node, depth, param, lambda_par, gamma_par):
kvalue = 0
if node is not None:
child = 0
key = str(hash(node + "#" + str(child)))
while key in jtree:
kvalue = kvalue + self.visit_with_depth(
jtree, key, depth + 1, param, lambda_par, gamma_par
)
if jtree[key][0] is not None:
if jtree[node][0] is None:
# jtree[node][0] = jtree[key][0]
jtree[node] = (jtree[key][0], jtree[node][1])
else:
for tmpkey in jtree[key][0]:
if tmpkey in jtree[node][0]:
for tmpkey2 in jtree[key][0][tmpkey]:
if tmpkey2 in jtree[node][0][tmpkey]:
jtree[node][0][tmpkey][tmpkey2] = (
jtree[node][0][tmpkey][tmpkey2]
+ jtree[key][0][tmpkey][tmpkey2]
)
else:
jtree[node][0][tmpkey][tmpkey2] = jtree[key][0][
tmpkey
][tmpkey2]
else:
jtree[node][0][tmpkey] = jtree[key][0][tmpkey]
if jtree[key][1] is not None:
if jtree[node][1] is None:
# jtree[node][1]=jtree[key][1]
jtree[node] = (jtree[node][0], jtree[key][1])
else:
for tmpkey in jtree[key][1]:
if tmpkey in jtree[node][1]:
for tmpkey2 in jtree[key][1][tmpkey]:
if tmpkey2 in jtree[node][1][tmpkey]:
jtree[node][1][tmpkey][tmpkey2] = (
jtree[node][1][tmpkey][tmpkey2]
+ jtree[key][1][tmpkey][tmpkey2]
)
else:
jtree[node][1][tmpkey][tmpkey2] = jtree[key][1][
tmpkey
][tmpkey2]
else:
jtree[node][1][tmpkey] = jtree[key][1][tmpkey]
child = child + 1
key = str(hash(node + "#" + str(child)))
# print jtree[node]
if (jtree[node][0] is not None) and (jtree[node][1] is not None):
for lkey in jtree[node][0]:
if lkey in jtree[node][1]:
tmpk = 0
for fkey1 in jtree[node][0][lkey]:
for fkey2 in jtree[node][1][lkey]:
tmpk = tmpk + lambda_par ** lkey[1] * jtree[node][0][
lkey
][fkey1] * jtree[node][1][lkey][fkey2] * math.exp(
-param * (fkey1 + fkey2)
)
kvalue = kvalue + (gamma_par ** depth) * tmpk * math.exp(
2 * param * depth
)
return kvalue
def evaluate(self, a, b):
tree1 = deepcopy(a.kernelpdakrepr.sids)
tree2 = deepcopy(b.kernelpdakrepr.sids)
m = self.mergetrees_with_depth(tree1, tree2)
kvalue = self.visit_with_depth(
m, str(hash("0")), 1, self.l, self.gamma, self.beta
)
del m, tree1, tree2
return kvalue
class KernelPdakMine(Kernel):
def __init__(self, l, gamma, beta, hashsep="#"):
self.l = float(l)
self.gamma = float(gamma)
self.beta = float(beta)
self.hashsep = hashsep
self.cache = Cache()
self.cachesize = 10000
def preProcess(self, t):
if hasattr(t, "kernelpdakrepr"): # already preprocessed
return
if not hasattr(t.root, "stsize"):
t.root.setSubtreeSize()
t.root.setHashSubtreeIdentifier(self.hashsep)
t.computeNodesDepth()
t.computeRoutes()
t.kernelpdakrepr = tree.SubtreeIDSubtreeSizeRouteList(t.root)
t.kernelpdakrepr.sort()
# print t.kernelpdakrepr.sids
def ntk(self, ra, da, rb, db, hra, hrb):
if hra < hrb:
tmpkey = str(hra) + "#" + str(hrb)
else:
tmpkey = str(hrb) + "#" + str(hra)
if self.cache.exists(tmpkey):
return float(self.cache.read(tmpkey))
lena, lenb = len(ra), len(rb)
c, p, minlen = 0, 0, min(lena, lenb)
while c < minlen and ra[c] == rb[c]:
if ra[c] == "#":
p += 1
c += 1
# print "p = ", p, "da, db", da, db, ra, rb
if self.gamma == 1:
r = (p + 1) * (math.e ** (-self.beta * (da + db - 2 * p)))
else:
r = (
(1 - self.gamma ** (p + 1))
/ (1 - self.gamma)
* (math.e ** (-self.beta * (da + db - 2 * p)))
)
if len(self.cache) > self.cachesize:
self.cache.removeAll()
self.cache.insert(tmpkey, r)
return r
# if self.gamma == 1:
# return (p+1)*(math.e**(-self.beta*(da + db - 2*p)))
# else:
# return (1-self.gamma**(p+1))/(1-self.gamma)*(math.e**(-self.beta*(da + db - 2*p)))
def evaluate(self, a, b):
ha, hb = (a.kernelpdakrepr, b.kernelpdakrepr)
# print ha, hb
# Assumes ha and hb are ordered list of pairs (subtreeid, subtreesize, route)
# a.kernelreprst,b.kernelreprst are checked or created in preProcess()
i, j, k, toti, totj = (0, 0, 0, len(ha), len(hb))
while i < toti and j < totj:
if ha.getLabel(i) == hb.getLabel(j):
ci, cj = (i, j)
while i < toti and ha.getLabel(i) == ha.getLabel(ci):
j = cj
while j < totj and hb.getLabel(j) == hb.getLabel(cj):
cst = self.l
if ha.getSubtreeID(i) == hb.getSubtreeID(j):
cst += self.l ** ha.getSubtreeSize(i)
# print ha.getLabel(i), hb.getLabel(j), cst, self.ntk(ha.getRoute(i), ha.getDepth(i), hb.getRoute(j), hb.getDepth(j))
k += cst * self.ntk(
ha.getRoute(i),
ha.getDepth(i),
hb.getRoute(j),
hb.getDepth(j),
ha.getRouteHash(i),
hb.getRouteHash(j),
)
j += 1
i += 1
elif ha.getLabel(i) < hb.getLabel(j):
i += 1
else:
j += 1
return k
class KernelPdakFast(KernelPdak):
def preProcess(self, t):
if hasattr(t, "kernelpdakrepr"): # already preprocessed
return
if not hasattr(t.root, "stsize"):
t.root.setSubtreeSize()
t.root.setHashSubtreeIdentifier(self.hashsep)
t.computeNodesDepth()
a = tree.SubtreePositionIDSubtreeIDSubtreeSizeListLabel(t.root)
t.kernelpdakrepr = (a.sids, a.pinv)
def mergetrees_with_depth_del_labels(self, tree_labels1, tree_labels2):
tree1, labels1 = tree_labels_1
tree2, labels2 = tree_labels_2
merge = {}
match = 0
for key in tree1:
if key in tree2:
if tree1[key][0] in labels2:
match = match + 1
if tree2[key][0] in labels1:
merge[key] = (
{(tree1[key][0], tree1[key][1]): 0},
{(tree2[key][0], tree2[key][1]): 0},
)
else:
merge[key] = ({(tree1[key][0], tree1[key][1]): 0}, {})
else:
if tree2[key][0] in labels1:
merge[key] = ({}, {(tree2[key][0], tree2[key][1]): 0})
match = match + 1
else:
merge[key] = ({}, {})
del tree2[key]
else:
if tree1[key][0] in labels2:
merge[key] = ({(tree1[key][0], tree1[key][1]): 0}, {})
match = match + 1
else:
merge[key] = ({}, {})
for key in tree2:
if tree2[key][0] in labels1:
merge[key] = ({}, {(tree2[key][0], tree2[key][1]): 0})
match = match + 1
else:
merge[key] = ({}, {})
return (merge, match)
def visit_with_depth(self, jtree, node, depth, param, lambda_par, gamma_par):
kvalue = 0
tmpk = 0
if node is not None:
child = 0
key = str(hash(node + "#" + str(child)))
startkey = key
max_size = [0, None]
while key in jtree:
kvalue = kvalue + self.visit_with_depth(
jtree, key, depth + 1, param, lambda_par, gamma_par
)
if (len(jtree[key][0]) + len(jtree[key][1])) > max_size[0]:
max_size[0] = len(jtree[key][0]) + len(jtree[key][1])
max_size[1] = key
child = child + 1
key = str(hash(node + "#" + str(child)))
# print 'max_size',max_size[0]
if max_size[0] > 0:
child = 0
while startkey in jtree:
if startkey != max_size[1]:
if jtree[startkey][0] is not {}:
for tmpkey in jtree[startkey][0]:
# calcolo kernel
if tmpkey in jtree[max_size[1]][1]:
if gamma_par != 1.0:
tmpk = (
tmpk
+ (gamma_par ** (depth + 1) - gamma_par)
/ (gamma_par - 1)
* lambda_par ** tmpkey[1]
* jtree[startkey][0][tmpkey]
* jtree[max_size[1]][1][tmpkey]
)
else:
tmpk = (
tmpk
+ depth
* lambda_par ** tmpkey[1]
* jtree[startkey][0][tmpkey]
* jtree[max_size[1]][1][tmpkey]
)
# fine calcolo kernel, inizio inserimento
if jtree[startkey][1] is not {}:
for tmpkey in jtree[startkey][1]:
# calcolo kernel
if tmpkey in jtree[max_size[1]][0]:
if gamma_par != 1.0:
tmpk = (
tmpk
+ (gamma_par ** (depth + 1) - gamma_par)
/ (gamma_par - 1)
* lambda_par ** tmpkey[1]
* jtree[startkey][1][tmpkey]
* jtree[max_size[1]][0][tmpkey]
)
else:
tmpk = (
tmpk
+ depth
* lambda_par ** tmpkey[1]
* jtree[startkey][1][tmpkey]
* jtree[max_size[1]][0][tmpkey]
)
# fine calcolo kernel, inizio inserimento
if tmpkey in jtree[max_size[1]][1]:
jtree[max_size[1]][1][tmpkey] = (
jtree[max_size[1]][1][tmpkey]
+ jtree[startkey][1][tmpkey]
)
else:
jtree[max_size[1]][1][tmpkey] = jtree[startkey][1][
tmpkey
]
# inserisco anche hash 0
for tmpkey in jtree[startkey][0]:
if tmpkey in jtree[max_size[1]][0]:
jtree[max_size[1]][0][tmpkey] = (
jtree[max_size[1]][0][tmpkey]
+ jtree[startkey][0][tmpkey]
)
else:
jtree[max_size[1]][0][tmpkey] = jtree[startkey][0][
tmpkey
]
# next child
child = child + 1
startkey = str(hash(node + "#" + str(child)))
# fine while figli
if jtree[node][0] is not {}:
for tmpkey in jtree[node][0]:
# calcolo kernel
if tmpkey in jtree[max_size[1]][1]:
if gamma_par != 1.0:
tmpk = (
tmpk
+ (gamma_par ** (depth + 1) - gamma_par)
/ (gamma_par - 1)
* lambda_par ** tmpkey[1]
* math.exp(-param * depth)
* jtree[max_size[1]][1][tmpkey]
)
else:
tmpk = (
tmpk
+ depth
* lambda_par ** tmpkey[1]
* math.exp(-param * depth)
* jtree[max_size[1]][1][tmpkey]
)
# fine calcolo kernel, inizio inserimento
if tmpkey in jtree[max_size[1]][0]:
jtree[max_size[1]][0][tmpkey] = jtree[max_size[1]][0][
tmpkey
] + math.exp(-param * depth)
else:
jtree[max_size[1]][0][tmpkey] = math.exp(-param * depth)
if jtree[node][1] is not {}:
for tmpkey in jtree[node][1]:
# calcolo kernel
if tmpkey in jtree[max_size[1]][0]:
if gamma_par != 1.0:
tmpk = (
tmpk
+ (gamma_par ** (depth + 1) - gamma_par)
/ (gamma_par - 1)
* lambda_par ** tmpkey[1]
* math.exp(-param * depth)
* jtree[max_size[1]][0][tmpkey]
)
else:
tmpk = (
tmpk
+ depth
* lambda_par ** tmpkey[1]
* math.exp(-param * depth)
* jtree[max_size[1]][0][tmpkey]
)
# fine calcolo kernel, inizio inserimento
if tmpkey in jtree[max_size[1]][1]:
jtree[max_size[1]][1][tmpkey] = jtree[max_size[1]][1][
tmpkey
] + math.exp(-param * depth)
else:
jtree[max_size[1]][1][tmpkey] = math.exp(-param * depth)
jtree[node] = (jtree[max_size[1]][0], jtree[max_size[1]][1])
else:
for tmpkey in jtree[node][0]:
jtree[node][0][tmpkey] = math.exp(-param * depth)
for tmpkey in jtree[node][1]:
jtree[node][1][tmpkey] = math.exp(-param * depth)
if jtree[node][0] is not {} and jtree[node][1] is not {}:
for tmpkey in jtree[node][0]:
if tmpkey in jtree[node][1]:
if gamma_par != 1.0:
tmpk = (
tmpk
+ (gamma_par ** (depth + 1) - gamma_par)
/ (gamma_par - 1)
* lambda_par ** tmpkey[1]
* jtree[node][0][tmpkey]
* jtree[node][1][tmpkey]
)
else:
tmpk = (
tmpk
+ depth
* lambda_par ** tmpkey[1]
* jtree[node][0][tmpkey]
* jtree[node][1][tmpkey]
)
return kvalue + tmpk * math.exp(2 * param * depth)
def evaluate(self, a, b):
tree1 = deepcopy(a.kernelpdakrepr)
tree2 = deepcopy(b.kernelpdakrepr)
(m, match) = self.mergetrees_with_depth_del_labels(tree1, tree2)
kvalue = 0
if match > 0:
kvalue = self.visit_with_depth(
m, str(hash("0")), 1, self.l, self.gamma, self.beta
)
del m, tree1, tree2
return kvalue
####
class Cache:
# An extremely simple cache
def __init__(self):
self.cache = {}
self.size = 0
def exists(self, key):
return key in self.cache
def existsPair(self, keya, keyb):
if keya < keyb:
tmpkey = str(keya) + "#" + str(keyb)
else:
tmpkey = str(keyb) + "#" + str(keya)
return tmpkey in self.cache
def insert(self, key, value):
self.cache[key] = value
self.size += 1
def insertPairIfNew(self, keya, keyb):
if keya < keyb:
tmpkey = str(keya) + "#" + str(keyb)
else:
tmpkey = str(keyb) + "#" + str(keya)
if not tmpkey in self.cache:
self.insert(tmpkey)
def remove(self, key):
del self.cache[key]
self.size -= 1
def removeAll(self):
self.cache = {}
self.size = 0
def read(self, key):
return self.cache[key]
def __len__(self):
return self.size
| 28,093
| 14
| 1,202
|
332475d0ce0bb3d099c2199a8d805fa2ce7c35ea
| 3,005
|
py
|
Python
|
backend/src/board/views/api/v1/telegram.py
|
baealex/Medium-Clone-in-Django
|
1f388ca9d75c05ce3100dfb7ad58ef751c964d19
|
[
"MIT"
] | null | null | null |
backend/src/board/views/api/v1/telegram.py
|
baealex/Medium-Clone-in-Django
|
1f388ca9d75c05ce3100dfb7ad58ef751c964d19
|
[
"MIT"
] | null | null | null |
backend/src/board/views/api/v1/telegram.py
|
baealex/Medium-Clone-in-Django
|
1f388ca9d75c05ce3100dfb7ad58ef751c964d19
|
[
"MIT"
] | null | null | null |
import json
from django.conf import settings
from django.http import Http404
from django.utils import timezone
from PIL import Image, ImageFilter
from board.models import TelegramSync
from board.modules.response import StatusDone, StatusError
from modules.subtask import sub_task_manager
from modules.telegram import TelegramBot
from modules.randomness import randstr
| 41.164384
| 125
| 0.560067
|
import json
from django.conf import settings
from django.http import Http404
from django.utils import timezone
from PIL import Image, ImageFilter
from board.models import TelegramSync
from board.modules.response import StatusDone, StatusError
from modules.subtask import sub_task_manager
from modules.telegram import TelegramBot
from modules.randomness import randstr
def telegram(request, parameter):
if parameter == 'webHook':
if request.method == 'POST':
print(request.body.decode("utf-8"))
bot = TelegramBot(settings.TELEGRAM_BOT_TOKEN)
try:
req = json.loads(request.body.decode("utf-8"))
req_userid = req['message']['from']['id']
req_token = req['message']['text']
telegram_sync = TelegramSync.objects.get(auth_token=req_token)
if telegram_sync:
if not telegram_sync.is_token_expire():
telegram_sync.tid = req_userid
telegram_sync.auth_token = ''
telegram_sync.save()
sub_task_manager.append(lambda: bot.send_message(req_userid, '정상적으로 연동되었습니다.'))
else:
telegram_sync.auth_token = ''
telegram_sync.save()
sub_task_manager.append(lambda: bot.send_message(req_userid, '기간이 만료된 토큰입니다. 홈페이지에서 연동을 다시 시도하십시오.'))
except:
message = '블렉스 다양한 정보를 살펴보세요!\n\n' + settings.SITE_URL + '/notion'
sub_task_manager.append(lambda: bot.send_message(req_userid, message))
return StatusDone()
if parameter == 'makeToken':
if request.method == 'POST':
token = randstr(6)
has_token = TelegramSync.objects.filter(auth_token=token)
while len(has_token) > 0:
token = randstr(6)
has_token = TelegramSync.objects.filter(auth_token=token)
if hasattr(request.user, 'telegramsync'):
telegramsync = request.user.telegramsync
telegramsync.auth_token = token
telegramsync.auth_token_exp = timezone.now()
telegramsync.save()
return StatusDone({
'token': token
})
else:
telegramsync = TelegramSync(user=request.user)
telegramsync.auth_token = token
telegramsync.save()
return StatusDone({
'token': token
})
if parameter == 'unsync':
if request.method == 'POST':
if hasattr(request.user, 'telegramsync'):
telegramsync = request.user.telegramsync
if not telegramsync.tid == '':
telegramsync.delete()
return StatusDone()
return StatusError('AE', '이미 연동이 해제되었습니다.')
raise Http404
| 2,745
| 0
| 23
|
162b83394afde2c91cf06578b6a90603be379765
| 469
|
py
|
Python
|
Question_nlp/answers/onehot.py
|
skn047/DeepLearningMugenKnock
|
73d2b903816b380d56020c8336041883bc0d131c
|
[
"MIT"
] | 10
|
2021-12-17T06:07:25.000Z
|
2022-03-25T13:50:05.000Z
|
Question_nlp/answers/onehot.py
|
skn047/DeepLearningMugenKnock
|
73d2b903816b380d56020c8336041883bc0d131c
|
[
"MIT"
] | null | null | null |
Question_nlp/answers/onehot.py
|
skn047/DeepLearningMugenKnock
|
73d2b903816b380d56020c8336041883bc0d131c
|
[
"MIT"
] | 2
|
2022-03-15T02:42:09.000Z
|
2022-03-30T23:19:55.000Z
|
_chars = "あいうおえかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆよらりるれろわをんがぎぐげござじずぜぞだぢづでどばびぶべぼぱぴぷぺぽぁぃぅぇぉゃゅょっー1234567890!?、。"
chars = [c for c in _chars]
print(data_load())
| 23.45
| 106
| 0.556503
|
_chars = "あいうおえかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆよらりるれろわをんがぎぐげござじずぜぞだぢづでどばびぶべぼぱぴぷぺぽぁぃぅぇぉゃゅょっー1234567890!?、。"
chars = [c for c in _chars]
def data_load():
fname = 'sandwitchman.txt'
xs = []
with open(fname, 'r') as f:
for l in f.readlines():
l = l.strip()
for c in l:
x = [0 for _ in range(len(chars))]
x[chars.index(c)] = 1
xs.append(x)
return xs
print(data_load())
| 291
| 0
| 23
|
12774b82ae587f55749e4d546d6743b03cdf3463
| 590
|
py
|
Python
|
problems/593.Valid_Square/li.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
problems/593.Valid_Square/li.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
problems/593.Valid_Square/li.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
# coding=utf-8
# Author: Jianghan LI
# Question: 593.Valid_Square
# Date: 2017-05-22
| 26.818182
| 137
| 0.477966
|
# coding=utf-8
# Author: Jianghan LI
# Question: 593.Valid_Square
# Date: 2017-05-22
class Solution(object):
def validSquare(self, p1, p2, p3, p4):
"""
:type p1: List[int]
:type p2: List[int]
:type p3: List[int]
:type p4: List[int]
:rtype: bool
"""
p1, p2, p3, p4 = sorted([p1, p2, p3, p4])
def isRight(a, b, c):
return (a[1] - b[1]) * (c[1] - b[1]) + (a[0] - b[0]) * (c[0] - b[0]) == 0 and abs(a[1] - b[1]) == abs(c[0] - b[0]) and a != b
return isRight(p2, p1, p3) and isRight(p2, p4, p3)
| 138
| 343
| 23
|
f1f4c0e148288296136b5caf6748c31645cc02a9
| 769
|
py
|
Python
|
Tuples and Sets - Exercise/07. Battle of Names.py
|
B3WD/python_advanced
|
477b2eac41f1ec5a172d612afda1096a9d7fb2f5
|
[
"MIT"
] | 1
|
2020-10-28T07:52:17.000Z
|
2020-10-28T07:52:17.000Z
|
Tuples and Sets - Exercise/07. Battle of Names.py
|
B3WD/python_advanced_softuni
|
477b2eac41f1ec5a172d612afda1096a9d7fb2f5
|
[
"MIT"
] | null | null | null |
Tuples and Sets - Exercise/07. Battle of Names.py
|
B3WD/python_advanced_softuni
|
477b2eac41f1ec5a172d612afda1096a9d7fb2f5
|
[
"MIT"
] | null | null | null |
lines_count = int(input())
lines = [input() for _ in range(lines_count)]
solve(lines)
| 27.464286
| 62
| 0.587776
|
def solve(liens):
results_odd = set()
results_even = set()
for i, name in enumerate(lines):
ascii_sum = 0
for char in name:
ascii_sum += ord(char)
result = int(ascii_sum / (i + 1))
if result % 2 != 0:
results_odd.add(result)
else:
results_even.add(result)
if sum(results_odd) == sum(results_even):
print(", ".join(map(str, results_odd | results_even)))
elif sum(results_odd) > sum(results_even):
print(", ".join(map(str, results_odd - results_even)))
elif sum(results_odd) < sum(results_even):
print(", ".join(map(str, results_odd ^ results_even)))
lines_count = int(input())
lines = [input() for _ in range(lines_count)]
solve(lines)
| 658
| 0
| 22
|
bc177fd76c0ff5e5ec9943fd6750ff2dfe5cb200
| 3,707
|
py
|
Python
|
sdk/python/touca/_printer.py
|
trytouca/trytouca
|
eae38a96407d1ecac543c5a5fb05cbbe632ddfca
|
[
"Apache-2.0"
] | 6
|
2022-03-19T02:57:11.000Z
|
2022-03-31T16:34:34.000Z
|
sdk/python/touca/_printer.py
|
trytouca/trytouca
|
eae38a96407d1ecac543c5a5fb05cbbe632ddfca
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/touca/_printer.py
|
trytouca/trytouca
|
eae38a96407d1ecac543c5a5fb05cbbe632ddfca
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Touca, Inc. Subject to Apache-2.0 License.
import math
from pathlib import Path
from colorama import Style, Fore, Back, init
init()
| 34.971698
| 88
| 0.534394
|
# Copyright 2021 Touca, Inc. Subject to Apache-2.0 License.
import math
from pathlib import Path
from colorama import Style, Fore, Back, init
init()
class Printer:
def print_warning(fmt: str, *args, **kwargs):
print(f"{Fore.YELLOW}{fmt.format(*args, **kwargs)}{Fore.RESET}")
def print_error(fmt: str, *args, **kwargs):
import sys
print(f"{Fore.RED}{fmt.format(*args, **kwargs)}{Fore.RESET}", file=sys.stderr)
def print_app_header():
print("\nTouca Test Framework")
def print_app_footer():
print("\n✨ Ran all test suites.\n")
def __init__(self, options):
self.options = options
self.testcase_width = max(len(k) for k in options.get("testcases"))
self.testcase_count = len(options.get("testcases"))
def print_line(self, fmt: str, *args, **kwargs):
msg = fmt.format(*args, **kwargs) if args or kwargs else fmt
if self.options.get("colored-output"):
print(msg)
return
import re
line = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])").sub("", msg)
print(line)
def print_header(self):
revision = "/".join([self.options.get(k) for k in ["suite", "version"]])
self.print_line("\nSuite: {:s}\n", revision)
def print_progress(self, timer, testcase, idx, status, errors=[]):
states = {
"pass": ("PASS", Back.GREEN),
"skip": ("SKIP", Back.YELLOW),
"fail": ("FAIL", Back.RED),
}
performance = (
""
if status == "skip"
else " {dim}({timer:d} ms){reset}".format(
dim=Style.DIM,
reset=Style.NORMAL,
timer=timer.count(testcase),
)
)
progress = " {number:>{width}d}{dim}.{reset}".format(
dim=Style.DIM,
reset=Style.NORMAL,
number=idx + 1,
count=self.testcase_count,
width=int(math.log10(self.testcase_count)) + 1,
)
badge = "{bg_color} {text} {bg_reset}".format(
bg_color=states.get(status)[1],
bg_reset=Back.RESET,
text=states.get(status)[0],
)
self.print_line(
"{progress} {badge} {testcase:<{testcase_width}s}{performance}",
badge=badge,
progress=progress,
testcase=testcase,
testcase_width=self.testcase_width + 3,
performance=performance,
)
if errors:
self.print_line("\n {}Exception Raised:{}", Style.DIM, Style.NORMAL)
self.print_line("\n".join(f" - {error}\n" for error in errors))
def print_footer(self, stats, timer, options):
states = [
("pass", "passed", Fore.GREEN),
("skip", "skipped", Fore.YELLOW),
("fail", "failed", Fore.RED),
]
messages = []
for state in states:
if not stats.count(state[0]):
continue
messages.append(f"{state[2]}{stats.count(state[0])} {state[1]}{Fore.RESET}")
messages.append(f"{self.testcase_count} total")
left_pad = int(math.log10(self.testcase_count)) + 11
self.print_line("\n{:s} {:s}", "Tests:".ljust(left_pad), ", ".join(messages))
self.print_line(
"{:s} {:.2f} s", "Time:".ljust(left_pad), timer.count("__workflow__") / 1000
)
if any(map(options.get, ["save-as-binary", "save-as-json"])):
results_dir = Path(
*map(options.get, ["output-directory", "suite", "version"])
)
self.print_line("{:s} {}", "Results:".ljust(left_pad), results_dir)
| 3,299
| -7
| 265
|
55dadc90c2f9fdbe258867ab069d2c73b2196055
| 394
|
py
|
Python
|
siphon/__init__.py
|
DanielWatkins/siphon
|
4c6740c2f8030ec1a23cafd8b8b9713dcd382cb2
|
[
"BSD-3-Clause"
] | 1
|
2019-05-31T14:02:08.000Z
|
2019-05-31T14:02:08.000Z
|
siphon/__init__.py
|
DanielWatkins/siphon
|
4c6740c2f8030ec1a23cafd8b8b9713dcd382cb2
|
[
"BSD-3-Clause"
] | 147
|
2021-03-06T01:01:13.000Z
|
2022-03-30T22:18:18.000Z
|
siphon/__init__.py
|
DanielWatkins/siphon
|
4c6740c2f8030ec1a23cafd8b8b9713dcd382cb2
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2013-2015 Siphon Contributors.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tools for accessing atmospheric and oceanic science data on remote servers."""
# Version import needs to come first so everyone else can pull on import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 39.4
| 81
| 0.786802
|
# Copyright (c) 2013-2015 Siphon Contributors.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tools for accessing atmospheric and oceanic science data on remote servers."""
# Version import needs to come first so everyone else can pull on import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 0
| 0
| 0
|
d33d5c69746ae0b6fef212b68e6a477d56434fb5
| 88
|
py
|
Python
|
Python/B2025.py
|
Epoch1017/LintCode-Shared-Solutions
|
d1559ef96917c4255e1ce2cf25ef17edec596ac3
|
[
"CC0-1.0"
] | null | null | null |
Python/B2025.py
|
Epoch1017/LintCode-Shared-Solutions
|
d1559ef96917c4255e1ce2cf25ef17edec596ac3
|
[
"CC0-1.0"
] | null | null | null |
Python/B2025.py
|
Epoch1017/LintCode-Shared-Solutions
|
d1559ef96917c4255e1ce2cf25ef17edec596ac3
|
[
"CC0-1.0"
] | null | null | null |
# B2025-输出字符菱形
print(" *")
print(" ***")
print("*****")
print(" ***")
print(" *")
| 14.666667
| 15
| 0.409091
|
# B2025-输出字符菱形
print(" *")
print(" ***")
print("*****")
print(" ***")
print(" *")
| 0
| 0
| 0
|
5fe8ca8ff1e4826dd6cfd6ad23878adf5707eed5
| 3,733
|
py
|
Python
|
brilhack/basic_blocks.py
|
yati-sagade/brilhack
|
700e95dc5f5ba7f156ac405d0339dd03bd0de021
|
[
"MIT"
] | null | null | null |
brilhack/basic_blocks.py
|
yati-sagade/brilhack
|
700e95dc5f5ba7f156ac405d0339dd03bd0de021
|
[
"MIT"
] | null | null | null |
brilhack/basic_blocks.py
|
yati-sagade/brilhack
|
700e95dc5f5ba7f156ac405d0339dd03bd0de021
|
[
"MIT"
] | null | null | null |
import copy
from .util import is_terminator, is_label, mklabel
| 31.905983
| 86
| 0.554246
|
import copy
from .util import is_terminator, is_label, mklabel
def _make_blocks(instrs):
blocks = []
label_index = {}
curr_block = []
for instr in instrs:
if not is_label(instr):
curr_block.append(instr)
if is_label(instr) or is_terminator(instr):
if curr_block:
blocks.append(curr_block)
curr_block = []
if is_label(instr):
label_index[instr['label']] = len(blocks)
# Technically we don't need the label here, but keeping it
# makes generating bril again slightly easier.
curr_block.append(instr)
if curr_block:
blocks.append(curr_block)
return blocks, label_index
def _build_cfg(blocks, label_index):
cfg = []
for i, block in enumerate(blocks):
last = block[-1]
if is_terminator(last):
next_blocks = [label_index[label] for label in last['labels']]
else:
next_blocks = [i + 1]
cfg.append(next_blocks)
# If the last instr of the last block is not a terminator, we have a dummy
# edge to a non-existent block. If this is the case, make sure this dummy
# block has an empty adjlist.
if cfg and cfg[-1] == [len(blocks)]:
cfg.append([])
return cfg
class Function:
def __init__(self,
name,
args,
instrs=None,
blocks=None,
label_index=None,
block_exits=None):
self.name = name
assert (instrs is None) ^ (blocks is None and label_index is None),\
"Either instrs, or (blocks and label_index and block_exits) must be given"
if instrs is not None:
self.blocks, self.label_index = _make_blocks(instrs)
else:
self.blocks = blocks
self.label_index = label_index
self.args = args
# Control flow information.
# Map from block index to a list of block indices where control can
# reach from it.
if block_exits is None:
self.block_exits = _build_cfg(self.blocks, self.label_index)
else:
self.block_exits = block_exits
def to_bril(self):
return {
'name': self.name,
'args': self.args,
'instrs': [instr for block in self.blocks for instr in block]
}
@classmethod
def filter_copy(cls, other, exclude=None):
"""`exclude` is a set of (block_idx, instr_idx) pairs which are
excluded from the copy."""
f = cls(name=other.name,
args=other.args,
blocks=[],
label_index=copy.deepcopy(other.label_index),
block_exits=copy.deepcopy(other.block_exits))
for block_idx, block in enumerate(other.blocks):
b = []
for instr_idx, instr in enumerate(block):
if not exclude or (block_idx, instr_idx) not in exclude:
b.append(copy.deepcopy(instr))
f.blocks.append(b)
return f
def copy(self):
return self.__class__.filter_copy(self)
class BBProgram:
def __init__(self, prog=None):
# Map from function to list of basic blocks in it.
self.funcs = {}
if prog is not None:
for func in prog['functions']:
self.funcs[func['name']] = Function(name=func['name'],
args=func.get('args', []),
instrs=func['instrs'])
def bril_dict(self):
return {'functions': [func.to_bril() for func in self.funcs.values()]}
| 2,760
| 760
| 145
|
73ac7853a328732454c3edf704f71354323b832f
| 1,215
|
py
|
Python
|
tests/pages/rollups.py
|
uk-gov-mirror/alphagov.notifications-functional-tests
|
899032ff637161c1ab8d058555cb0326ec04b1be
|
[
"MIT"
] | null | null | null |
tests/pages/rollups.py
|
uk-gov-mirror/alphagov.notifications-functional-tests
|
899032ff637161c1ab8d058555cb0326ec04b1be
|
[
"MIT"
] | null | null | null |
tests/pages/rollups.py
|
uk-gov-mirror/alphagov.notifications-functional-tests
|
899032ff637161c1ab8d058555cb0326ec04b1be
|
[
"MIT"
] | null | null | null |
from config import config
from tests.pages import SignInPage
from tests.test_utils import do_verify, do_email_auth_verify
| 34.714286
| 102
| 0.723457
|
from config import config
from tests.pages import SignInPage
from tests.test_utils import do_verify, do_email_auth_verify
def sign_in(driver, seeded=False):
_sign_in(driver, 'seeded' if seeded else 'normal')
do_verify(driver)
def sign_in_email_auth(driver):
_sign_in(driver, 'email_auth')
assert driver.current_url == config['notify_admin_url'] + '/two-factor-email-sent'
do_email_auth_verify(driver)
def _sign_in(driver, account_type):
sign_in_page = SignInPage(driver)
sign_in_page.get()
assert sign_in_page.is_current()
email, password = get_email_and_password(account_type=account_type)
sign_in_page.login(email, password)
def get_email_and_password(account_type):
if account_type == 'normal':
return config['user']['email'], config['user']['password']
elif account_type == 'seeded':
return config['service']['seeded_user']['email'], config['service']['seeded_user']['password']
elif account_type == 'email_auth':
# has the same password as the seeded user
return config['service']['email_auth_account'], config['service']['seeded_user']['password']
raise Exception('unknown account_type {}'.format(account_type))
| 996
| 0
| 92
|
6adbb154ad2791f017881d82ab4f304608aa3b72
| 21,792
|
py
|
Python
|
rave/filesystem.py
|
rave-engine/rave
|
0eeb956363f4d7eda92350775d7d386550361273
|
[
"BSD-2-Clause"
] | 5
|
2015-03-18T01:19:56.000Z
|
2020-10-23T12:44:47.000Z
|
rave/filesystem.py
|
rave-engine/rave
|
0eeb956363f4d7eda92350775d7d386550361273
|
[
"BSD-2-Clause"
] | null | null | null |
rave/filesystem.py
|
rave-engine/rave
|
0eeb956363f4d7eda92350775d7d386550361273
|
[
"BSD-2-Clause"
] | null | null | null |
# rave's virtual file system.
import os
import io
import re
import threading
import collections
import rave.common
import rave.log
_log = rave.log.get(__name__)
# Canonical path separator.
PATH_SEPARATOR = '/'
# Root.
ROOT = '/'
# Unnormalized path pattern.
BAD_PATH_PATTERN = re.compile(r'(?:{0}{{2,}}|(?:{0}|^)\.+(?:{0}|$))'.format(PATH_SEPARATOR))
# Various standard mount points.
ENGINE_MOUNT = '/.rave'
MODULE_MOUNT = '/.modules'
GAME_MOUNT = '/'
COMMON_MOUNT = '/.common'
class File(io.IOBase):
"""
An open file in the virtual file system.
Subclasses are expected to at least override the following:
- opened()
- readable() (if readable, returns False by default)
- writable() (if writable, returns False by default)
- seekable() (if seekable, returns False by default)
- close()
- read(amount=None) (if readable, raises FileNotReadable by default)
- write(data) (if writable, raises FileNotWritable by default)
- seek(position, mode) (if seekable, raises FileNotSeekable by default)
- tell() (if seekable, raises FileNotSeekable by default)
"""
def close(self):
""" Close file. Any operation on the file after calling this method will fail with `FileClosed` raised. """
raise NotImplementedError
def opened(self):
""" Return whether this file is open. """
raise NotImplementedError
def readable(self):
""" Return whether this file is readable. """
return False
def writable(self):
""" Return whether this file is writable. """
return False
def seekable(self):
""" Return whether this file is seeekable. """
return False
def read(self, amount=None):
""" Read `amount` bytes from file. Will read full contents if `amount` is not given. """
raise FileNotReadable(self)
def write(self, data):
""" Write `data` to file. """
raise FileNotWritable(self)
def seek(self, position, mode=os.SEEK_CUR):
""" Seek in file. May raise `FileNotSeekable` if this file can't be seeked in. """
raise FileNotSeekable(self)
def tell(self):
""" Tell current file position. May raise `FileNotSeekable` if this file can't be seeked in. """
raise FileNotSeekable(self)
class FileSystemProvider:
""" A provider to mount a filesystem within another filesystem. """
## Stateful API.
| 36.686869
| 149
| 0.62321
|
# rave's virtual file system.
import os
import io
import re
import threading
import collections
import rave.common
import rave.log
_log = rave.log.get(__name__)
# Canonical path separator.
PATH_SEPARATOR = '/'
# Root.
ROOT = '/'
# Unnormalized path pattern.
BAD_PATH_PATTERN = re.compile(r'(?:{0}{{2,}}|(?:{0}|^)\.+(?:{0}|$))'.format(PATH_SEPARATOR))
# Various standard mount points.
ENGINE_MOUNT = '/.rave'
MODULE_MOUNT = '/.modules'
GAME_MOUNT = '/'
COMMON_MOUNT = '/.common'
class FileSystemError(rave.common.raveError, IOError):
def __init__(self, filename, message=None):
super().__init__(message or filename)
self.filename = filename
class NativeError(FileSystemError):
def __init__(self, filename, parent):
super().__init__(filename, message=repr(parent))
self.native_error = parent
class FileNotFound(FileSystemError, FileNotFoundError):
pass
class AccessDenied(FileSystemError, PermissionError):
pass
class FileNotReadable(FileSystemError, PermissionError, io.UnsupportedOperation):
pass
class FileNotWritable(FileSystemError, PermissionError, io.UnsupportedOperation):
pass
class FileNotSeekable(FileSystemError, PermissionError, io.UnsupportedOperation):
pass
class FileClosed(FileSystemError, BrokenPipeError):
pass
class NotAFile(FileSystemError, IsADirectoryError):
pass
class NotADirectory(FileSystemError, NotADirectoryError):
pass
class FileSystem:
def __init__(self):
# Lock when rebuilding cache or modifying the file system.
self._lock = threading.RLock()
# Clear the file system.
self.clear()
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
def clear(self):
if hasattr(self, '_roots'):
_log.trace('Clearing file system...')
with self._lock:
# File system roots. A mapping of path -> [ list of providers ].
self._roots = {}
# Transforming providers. A mapping of extension -> [ list of providers ].
self._transformers = {}
# File/directory list cache. A mapping of filename -> [ list of providers ].
self._file_cache = None
# Directory content cache. A mapping of directory -> { set of direct contents }.
self._listing_cache = None
## Building file cache.
def _build_cache(self):
""" Rebuild internal file cache. This will make looking up files, errors notwithstanding, an O(1) lookup operation. """
_log.trace('Building cache...')
with self._lock:
self._file_cache = { ROOT: [] }
self._listing_cache = { ROOT: set() }
for root, providers in self._roots.items():
for provider in providers:
self._build_provider_cache(provider, root)
def _build_provider_cache(self, provider, root):
"""
Add provider to file cache. This will traverse the providers file and iteratively add them to the file cache.
This function will check if transformers exist for the file in the process, which might indirectly trigger recursion,
since a transformed file acts as a new provider.
"""
_log.trace('Caching mount point {root} <- {prov}...', prov=provider, root=root)
# Add root to cache.
self._cache_directory(provider, root, root)
# Traverse provider and add files and directories on the go.
for subpath in provider.list():
path = self.join(root, subpath)
if provider.isdir(subpath):
self._cache_directory(provider, root, path)
else:
self._cache_file(provider, root, path)
def _build_transformer_cache(self, transformer, pattern):
"""
Add `transformer` to file cache. This will search all existing files to look for files that match the `pattern`, and if so,
adds the transformer as a new provider for that file and optionally removes it if the transformer consumes the file.
"""
_log.trace('Caching {trans} for {pattern}...', trans=transformer, pattern=pattern.pattern)
# Traverse paths to find matching files.
for file in self._file_cache.copy():
if not pattern.search(file):
continue
# Gotcha.
try:
handle = self.open(file)
except:
_log.warn('Couldn\'t open {path} for transformer {transformer}. Moving on...'.format(path=file, transformer=transformer))
continue
self._cache_transformed_file(transformer, file, handle)
def _cache_directory(self, provider, root, path):
""" Add `path`, provided by `provider`, as a directory to the file cache. """
_log.trace('Caching directory: {path} <- {provider}...', path=path, provider=provider)
with self._lock:
self._listing_cache.setdefault(path, set())
self._cache_entry(provider, root, path)
def _cache_file(self, provider, root, path):
""" Add `path`, provided by `provider`, as a file to the file cache. """
_log.trace('Caching file: {path} <- {provider}...', path=path, provider=provider)
localpath = self._local_file(root, path)
for pattern, transformers in self._transformers.items():
if not pattern.search(path):
continue
consumed = False
for transformer in transformers:
try:
handle = provider.open(localpath)
except Exception as e:
_log.warn('Couldn\'t open {provider}:{path} for transformer {transformer}. Error: {err}',
provider=provider, path=localpath, transformer=transformer, err=e)
continue
consumed = self._cache_transformed_file(transformer, path, handle)
if consumed:
break
# Stop processing entirely if we have consumed the file.
if consumed:
_log.debug('Cached file {path} consumed by transformer.', path=path)
break
else:
# No transformers found for file, or file wasn't consumed. Add it to cache.
self._cache_entry(provider, root, path)
def _cache_entry(self, provider, root, path):
""" Add an entry at `path`, provided by `provider`, to the file cache. """
with self._lock:
self._file_cache.setdefault(path, [])
if provider and provider not in self._file_cache[path]:
self._file_cache[path].append((provider, root))
if path != ROOT:
parent = self.dirname(path)
if not self.exists(parent):
self._cache_directory(None, None, parent)
basename = self.basename(path)
self._listing_cache.setdefault(parent, set())
self._listing_cache[parent].add(basename)
def _cache_transformed_file(self, transformer, path, handle):
"""
Add a transformed file at `path`, transformed by `transformer`, to the file cache.
This will return whether or not the original file was consumed by `transformer`.
It might fail to add the transformed file to the file cache if the transformers raises an error.
If the transformer consumes the original file, this function will remove the original file from the file system,
if it exists on it.
"""
try:
instance = transformer(path, handle)
except Exception as e:
_log.warn('Error while transforming {path} with {transformer}: {err}', path=path, transformer=transformer, err=e)
return False
if not instance.valid():
return False
_log.trace('Caching transformed file: {path} <- {trans}...', path=path, trans=transformer)
# Determine root directory of files.
if instance.relative():
parentdir = self.dirname(path)
else:
parentdir = ROOT
# Mount as provider.
self._build_provider_cache(instance, parentdir)
if instance.consumes():
# Remove file cache for now-consumed file.
with self._lock:
if path in self._file_cache:
del self._file_cache[path]
return True
else:
return False
def _providers_for_file(self, path):
"""
Return a generator yielding (provider, mountpoint) tuples for all providers that provide given `path`.
Priority is done on a last-come last-serve basis: the last provider added that provides `path` is yielded first.
"""
if self._file_cache is None:
self._build_cache()
if path not in self._file_cache:
raise FileNotFound(path)
for provider, mountpoint in reversed(self._file_cache[path]):
yield provider, self._local_file(mountpoint, path)
def _local_file(self, root, path):
return path[len(root.rstrip(PATH_SEPARATOR)):]
## API.
def list(self, subdir=None):
""" List all files and directories in the root file system, or `subdir` if given, recursively. """
if self._file_cache is None:
self._build_cache()
if subdir is not None:
subdir = self.normalize(subdir)
if not self.isdir(subdir):
if not self.exists(subdir):
raise FileNotFound(subdir)
else:
raise NotADirectory(subdir)
files = { '/' }
to_process = collections.deque()
to_process.append(subdir)
while to_process:
target = to_process.popleft()
for entry in self._listing_cache[target]:
path = self.join(target, entry)
if self.isdir(path):
to_process.append(path)
files.add(path.replace(subdir, ''))
return files
else:
return set(self._file_cache)
def listdir(self, subdir=None):
""" List all files and directories in the root file system, or `subdir` is given. """
if self._file_cache is None:
self._build_cache()
if not subdir:
subdir = ROOT
else:
subdir = self.normalize(subdir)
if not self.isdir(subdir):
if not self.exists(subdir):
raise FileNotFound(subdir)
else:
raise NotADirectory(subdir)
return self._listing_cache[subdir]
def mount(self, path, provider):
"""
Mount `provider` at `path` in the virtual file system.
`provider` must be an object satisfying the following API:
- list(): return a list of all file names (including folders) this provider can provide.
- has(filename): return whether this provider can open given file.
- open(filename, **kwargs): open a file, has to raise one of the subclasses of `FileSystemError` on error, else return a subclass of `File`.
- isfile(filename): check if the given file is a file, should raise applicable `FileSystemError` subclass if applicable,
except for NotAFile/NotADirectory, or return a boolean.
- isdir(filename): check if the given file is a directory, should raise applicable `FileSystemError` subclass if applicable,
except for NotAFile/NotADirectory, or return a boolean.
A path or file can be provided by different providers. Their file lists will be merged.
Conflicting files will be handled as such:
- The last provider that has been mounted will serve the file first.
- If an error occurs while serving the file, the next provider according to these rules will serve it.
"""
path = self.normalize(path)
with self._lock:
self._roots.setdefault(path, [])
self._roots[path].append(provider)
_log.debug('Mounted {provider} on {path}.', provider=provider, path=path)
if self._file_cache is None:
self._build_cache()
else:
self._build_provider_cache(provider, path)
def unmount(self, path, provider):
""" Unmount `provider` from `path` in the virtual file system. Will trigger a full cache rebuild. """
path = self.normalize(path)
with self._lock:
self._roots[path].remove(provider)
_log.debug('Unmounted {provider} from {path}.', provider=provider, path=path)
self._build_cache()
def transform(self, pattern, transformer):
"""
TRANSFORMERS! TRANSFORMERS! MORE THAN MEETS THE EYE! TRANSFORMERS!
Add `transformer` as a transformer for files matching `pattern`.
`transformer` has to be a class(!) satisfying the provider API (see `mount`), plus the following API:
- __init__(filename, handle): initialize object, can raise any kind of error if the file is invalid.
`handle` is a `File` object pointing to the opened file.
- valid(): return whether the file is valid according to the format this transformer parses.
- consumes(): return whether the source file should be retained in the file system.
- relative(): return whether files exposed by this transformer should be relative to the path of the source file or absolute.
"""
pattern = re.compile(pattern, re.UNICODE)
with self._lock:
self._transformers.setdefault(pattern, [])
self._transformers[pattern].append(transformer)
_log.debug('Added transformer {transformer} for pattern {pattern}.', transformer=transformer, pattern=pattern.pattern)
if self._file_cache is None:
self._build_cache()
else:
self._build_transformer_cache(transformer, pattern)
def untransform(self, pattern, transformer):
""" Remove a transformer from the virtual file system. Will trigger a full cache rebuild. """
pattern = re.compile(pattern, re.UNICODE)
with self._lock:
self._transformers[pattern].remove(transformer)
_log.debug('Removed transformer {transformer} for pattern {pattern}.', transformer=transformer, pattern=pattern.pattern)
self._build_cache()
def open(self, filename, *args, **kwargs):
"""
Open `filename` and return a corresponding `File` object. Will raise `FileNotFound` if the file was not found.
Will only raise the error from the last attempted provider if multiple providers raise an error.
"""
error = None
filename = self.normalize(filename)
if self.isdir(filename):
raise NotAFile(filename)
for provider, localfile in self._providers_for_file(filename):
try:
_log.trace('Opening {filename} from {provider}...', filename=filename, provider=provider)
return provider.open(localfile, *args, **kwargs)
except FileNotFound:
continue
except FileSystemError as e:
error = e
if error:
raise error
else:
raise FileNotFound(filename)
def exists(self, filename):
""" Return whether or not `filename` exists. """
if self._file_cache is None:
self._build_cache()
filename = self.normalize(filename)
return filename in self._file_cache
def isdir(self, filename):
""" Return whether or not `filename` exists and is a directory. """
if self._file_cache is None:
self._build_cache()
filename = self.normalize(filename)
return filename in self._listing_cache
def isfile(self, filename):
""" Return whether or not `filename` exists and is a file. """
if self._file_cache is None:
self._build_cache()
filename = self.normalize(filename)
return filename in self._file_cache and filename not in self._listing_cache
def dirname(self, path):
""" Return the directory part of the given `path`. """
path = self.normalize(path)
return path.rsplit(PATH_SEPARATOR, 1)[0] or ROOT
def basename(self, path):
""" Return the filename part of the given `path`. """
if path == ROOT:
return ''
path = self.normalize(path)
return path.rsplit(PATH_SEPARATOR, 1)[1]
def join(self, *paths, normalized=True):
""" Join path components into a file system path. Optionally normalize the result. """
if normalized:
return self.normalize(PATH_SEPARATOR.join(paths))
return PATH_SEPARATOR.join(paths)
def split(self, path, *args, **kwargs):
""" Split path by path separator. """
return path.split(PATH_SEPARATOR, *args, **kwargs)
def normalize(self, path):
""" Normalize path to canonical path. """
# Quick check to see if we need to normalize at all.
if path.startswith(ROOT) and not BAD_PATH_PATTERN.search(path):
if path.endswith(PATH_SEPARATOR) and path != ROOT:
return path[:-len(PATH_SEPARATOR)]
return path
# Remove root.
if path.startswith(ROOT):
path = path[len(ROOT):]
# Split path into directory pieces and remove empty or redundant directories.
pieces = [ piece for piece in self.split(path) if piece and piece != '.' ]
# Remove parent directory entries.
while '..' in pieces:
i = pieces.index('..')
del pieces[i]
# The preceding directory too, of course.
if i > 0:
del pieces[i - 1]
return ROOT + self.join(*pieces, normalized=False)
class File(io.IOBase):
"""
An open file in the virtual file system.
Subclasses are expected to at least override the following:
- opened()
- readable() (if readable, returns False by default)
- writable() (if writable, returns False by default)
- seekable() (if seekable, returns False by default)
- close()
- read(amount=None) (if readable, raises FileNotReadable by default)
- write(data) (if writable, raises FileNotWritable by default)
- seek(position, mode) (if seekable, raises FileNotSeekable by default)
- tell() (if seekable, raises FileNotSeekable by default)
"""
def __del__(self):
try:
self.close()
except:
# Nothing we can do about it now, anyway.
pass
def close(self):
""" Close file. Any operation on the file after calling this method will fail with `FileClosed` raised. """
raise NotImplementedError
def opened(self):
""" Return whether this file is open. """
raise NotImplementedError
def readable(self):
""" Return whether this file is readable. """
return False
def writable(self):
""" Return whether this file is writable. """
return False
def seekable(self):
""" Return whether this file is seeekable. """
return False
def read(self, amount=None):
""" Read `amount` bytes from file. Will read full contents if `amount` is not given. """
raise FileNotReadable(self)
def write(self, data):
""" Write `data` to file. """
raise FileNotWritable(self)
def seek(self, position, mode=os.SEEK_CUR):
""" Seek in file. May raise `FileNotSeekable` if this file can't be seeked in. """
raise FileNotSeekable(self)
def tell(self):
""" Tell current file position. May raise `FileNotSeekable` if this file can't be seeked in. """
raise FileNotSeekable(self)
class FileSystemProvider:
""" A provider to mount a filesystem within another filesystem. """
def __init__(self, fs):
self.fs = fs
def __repr__(self):
return '<FileSystemProvider: {}>'.format(self.fs)
def list(self):
return self.fs.list()
def open(self, filename, *args, **kwargs):
return self.fs.open(filename, *args, **kwargs)
def has(self, filename):
return self.fs.isfile(filename)
def isfile(self, filename):
return self.fs.isfile(filename)
def isdir(self, filename):
return self.fs.isdir(filename)
## Stateful API.
def current():
import rave.game, rave.engine
game = rave.game.current()
if not game:
return rave.engine.engine.fs
return game.fs
def list(subdir=None):
return current().list(subdir)
def listdir(subdir=None):
return current().listdir(subdir)
def mount(path, provider):
return current().mount(path, provider)
def unmount(path, provider):
return current().unmount(path, provider)
def transform(pattern, transformer):
return current().transform(pattern, transformer)
def untransform(pattern, transformer):
return current().untransform(pattern, transformer)
def open(filename, *args, **kwargs):
return current().open(filename, *args, **kwargs)
def exists(filename):
return current().exists(filename)
def isfile(filename):
return current().isfile(filename)
def isdir(filename):
return current().isdir(filename)
def dirname(path):
return current().dirname(path)
def basename(path):
return current().basename(path)
def join(*paths, normalized=True):
return current().join(*paths, normalized=normalized)
def split(path, *args, **kwargs):
return current().split(path, *args, **kwargs)
def normalize(path):
return current().normalize(path)
| 2,375
| 16,110
| 880
|
dbf0337be9f2b428ff1ae5f70ee820bc2aaa584f
| 802
|
py
|
Python
|
geogebra_applet/views.py
|
Stasianna/geogebra-project
|
33ddf30ec8b001f86fb35d336b8d53bcdf69231b
|
[
"MIT"
] | null | null | null |
geogebra_applet/views.py
|
Stasianna/geogebra-project
|
33ddf30ec8b001f86fb35d336b8d53bcdf69231b
|
[
"MIT"
] | null | null | null |
geogebra_applet/views.py
|
Stasianna/geogebra-project
|
33ddf30ec8b001f86fb35d336b8d53bcdf69231b
|
[
"MIT"
] | null | null | null |
#from django.http import HttpResponse
#from django.http import response_redirect
from django.shortcuts import render_to_response
from django.views.generic import DetailView, CreateView, TemplateView
from geogebra_applet.models import GeogebraApplet
| 32.08
| 70
| 0.77182
|
#from django.http import HttpResponse
#from django.http import response_redirect
from django.shortcuts import render_to_response
from django.views.generic import DetailView, CreateView, TemplateView
from geogebra_applet.models import GeogebraApplet
class GeogebraAppletDetailView(DetailView):
model = GeogebraApplet
class MainPageView(TemplateView):
template_name = 'geogebra_applet/main_str.html'
def get_context_data(self, **kwargs):
context = super(MainPageView, self).get_context_data(**kwargs)
context['arhives'] = GeogebraApplet.objects.all()
return context
def ViewHtml(request, file):
copymodel = GeogebraApplet.objects.filter(id = file).first()
f = copymodel.index_file.open(mode="rb")
return render_to_response(copymodel.index_file.url)
| 341
| 140
| 69
|
637cd14234add4d98a4167907d1629a8dc3593e3
| 6,036
|
py
|
Python
|
test/test_diff_tar.py
|
gmertes/conda-mirror
|
34b206e19d8c858676ce2b707da15165578e6f79
|
[
"BSD-3-Clause"
] | 6
|
2020-10-09T15:55:57.000Z
|
2021-07-29T11:08:10.000Z
|
test/test_diff_tar.py
|
gmertes/conda-mirror
|
34b206e19d8c858676ce2b707da15165578e6f79
|
[
"BSD-3-Clause"
] | 34
|
2020-09-05T05:08:16.000Z
|
2022-03-09T15:13:55.000Z
|
test/test_diff_tar.py
|
gmertes/conda-mirror
|
34b206e19d8c858676ce2b707da15165578e6f79
|
[
"BSD-3-Clause"
] | 7
|
2020-09-07T09:45:59.000Z
|
2022-01-20T20:16:38.000Z
|
import os
import sys
import json
import shutil
import tempfile
from os.path import isfile, join
import pathlib
import pytest
import conda_mirror.diff_tar as dt
EMPTY_MD5 = "d41d8cd98f00b204e9800998ecf8427e"
@pytest.fixture
| 29.588235
| 88
| 0.680583
|
import os
import sys
import json
import shutil
import tempfile
from os.path import isfile, join
import pathlib
import pytest
import conda_mirror.diff_tar as dt
EMPTY_MD5 = "d41d8cd98f00b204e9800998ecf8427e"
@pytest.fixture
def tmpdir():
tmpdir = tempfile.mkdtemp()
dt.mirror_dir = join(tmpdir, "repo")
dt.DEFAULT_REFERENCE_PATH = join(tmpdir, "reference.json")
dt.DEFAULT_UPDATE_PATH = join(tmpdir, "updates.tar")
yield tmpdir
shutil.rmtree(tmpdir)
def test_md5_file(tmpdir):
tmpfile = join(tmpdir, "testfile")
with open(tmpfile, "wb") as fo:
fo.write(b"A\n")
assert dt.md5_file(tmpfile) == "bf072e9119077b4e76437a93986787ef"
def create_test_repo(subdirname="linux-64"):
subdir = join(dt.mirror_dir, subdirname)
os.makedirs(subdir)
with open(join(subdir, "repodata.json"), "w") as fo:
fo.write(json.dumps({"packages": {"a-1.0-0.tar.bz2": {"md5": EMPTY_MD5}}}))
for fn in "repodata.json.bz2", "a-1.0-0.tar.bz2":
with open(join(subdir, fn), "wb") as fo:
pass
def test_find_repos(tmpdir):
create_test_repo()
assert list(dt.find_repos(dt.mirror_dir)) == [join(dt.mirror_dir, "linux-64")]
def test_all_repodata_repos(tmpdir):
create_test_repo()
d = dt.all_repodata(dt.mirror_dir)
assert d[join(dt.mirror_dir, "linux-64")]["a-1.0-0.tar.bz2"]["md5"] == EMPTY_MD5
def test_verify_all_repos(tmpdir):
create_test_repo()
dt.verify_all_repos(dt.mirror_dir)
def test_read_no_reference(tmpdir):
# tmpdir is empty - join(tmpdir, 'reference.json') does not exist
with pytest.raises(dt.NoReferenceError):
dt.read_reference()
def test_write_and_read_reference(tmpdir):
create_test_repo()
dt.write_reference(join(tmpdir, "repo"))
ref = dt.read_reference()
assert ref[join(dt.mirror_dir, "linux-64")]["a-1.0-0.tar.bz2"]["md5"] == EMPTY_MD5
def test_write_and_read_reference_with_target(tmpdir):
create_test_repo()
dt.write_reference(join(tmpdir, "repo"), join(tmpdir, "reference_target.json"))
ref = dt.read_reference(join(tmpdir, "reference_target.json"))
assert ref[join(dt.mirror_dir, "linux-64")]["a-1.0-0.tar.bz2"]["md5"] == EMPTY_MD5
def test_get_updates(tmpdir):
create_test_repo()
dt.write_reference(join(tmpdir, "repo"))
assert list(dt.get_updates(dt.mirror_dir)) == []
create_test_repo("win-32")
lst = sorted(pathlib.Path(f) for f in dt.get_updates(dt.mirror_dir))
assert lst == [
pathlib.Path("win-32/a-1.0-0.tar.bz2"),
pathlib.Path("win-32/repodata.json"),
pathlib.Path("win-32/repodata.json.bz2"),
]
def test_get_updates_with_target(tmpdir):
create_test_repo()
dt.write_reference(join(tmpdir, "repo"), join(tmpdir, "reference_target.json"))
assert (
list(dt.get_updates(dt.mirror_dir, join(tmpdir, "reference_target.json"))) == []
)
create_test_repo("win-32")
lst = sorted(
pathlib.Path(f)
for f in dt.get_updates(dt.mirror_dir, join(tmpdir, "reference_target.json"))
)
assert lst == [
pathlib.Path("win-32/a-1.0-0.tar.bz2"),
pathlib.Path("win-32/repodata.json"),
pathlib.Path("win-32/repodata.json.bz2"),
]
def test_tar_repo(tmpdir):
create_test_repo()
dt.write_reference(dt.mirror_dir)
create_test_repo("win-32")
dt.tar_repo(dt.mirror_dir)
assert isfile(dt.DEFAULT_UPDATE_PATH)
def test_tar_repo_with_target(tmpdir):
create_test_repo()
tarball = join(tmpdir, "updates_target.tar")
reference = join(tmpdir, "reference_target.json")
dt.write_reference(dt.mirror_dir, reference)
create_test_repo("win-32")
dt.tar_repo(dt.mirror_dir, reference, tarball)
assert isfile(tarball)
def run_with_args(args):
old_args = list(sys.argv)
sys.argv = ["conda-diff-tar"] + args
dt.main()
sys.argv = old_args
def test_version():
run_with_args(["--version"])
def test_cli_reference(tmpdir):
create_test_repo()
run_with_args(["--reference", dt.mirror_dir])
assert isfile(dt.DEFAULT_REFERENCE_PATH)
def test_cli_reference_outfile(tmpdir):
target_path = join(tmpdir, "ref_target.json")
create_test_repo()
run_with_args(["--reference", dt.mirror_dir])
assert isfile(dt.DEFAULT_REFERENCE_PATH)
run_with_args(["--reference", "--outfile", target_path, dt.mirror_dir])
assert isfile(target_path)
with open(dt.DEFAULT_REFERENCE_PATH, "r") as ref1:
with open(target_path, "r") as ref2:
assert ref1.readlines() == ref2.readlines()
def test_cli_create_outfile(tmpdir):
target_path = join(tmpdir, "tar_target.tar")
create_test_repo()
run_with_args(["--reference", dt.mirror_dir])
run_with_args(["--create", "--outfile", target_path, dt.mirror_dir])
assert isfile(target_path)
def test_cli_create_infile(tmpdir):
target_ref_path = join(tmpdir, "ref_target.json")
create_test_repo()
run_with_args(["--reference", "--outfile", target_ref_path, dt.mirror_dir])
assert isfile(target_ref_path)
run_with_args(["--create", "--infile", target_ref_path, dt.mirror_dir])
assert isfile(dt.DEFAULT_UPDATE_PATH)
def test_cli_create_infile_outfile(tmpdir):
target_tar_path = join(tmpdir, "tar_target.tar")
target_ref_path = join(tmpdir, "ref_target.json")
create_test_repo()
run_with_args(["--reference", "--outfile", target_ref_path, dt.mirror_dir])
assert isfile(target_ref_path)
run_with_args(
[
"--create",
"--outfile",
target_tar_path,
"--infile",
target_ref_path,
dt.mirror_dir,
]
)
assert isfile(target_tar_path)
def test_misc(tmpdir):
create_test_repo()
run_with_args(["--reference", dt.mirror_dir])
create_test_repo("win-32")
run_with_args(["--show", dt.mirror_dir])
run_with_args(["--create", "--verbose", dt.mirror_dir])
run_with_args(["--verify", dt.mirror_dir])
run_with_args([dt.mirror_dir]) # do nothing
| 5,305
| 0
| 482
|
926b3d6413e556587b0edd606bb4824c907485dd
| 417
|
py
|
Python
|
app/core/migrations/0005_item_slug.py
|
Andika7/microservice-django
|
7c25635d7fe371a62f14d2e3b6685678354a0568
|
[
"MIT"
] | null | null | null |
app/core/migrations/0005_item_slug.py
|
Andika7/microservice-django
|
7c25635d7fe371a62f14d2e3b6685678354a0568
|
[
"MIT"
] | null | null | null |
app/core/migrations/0005_item_slug.py
|
Andika7/microservice-django
|
7c25635d7fe371a62f14d2e3b6685678354a0568
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-02-05 10:54
from django.db import migrations, models
| 20.85
| 54
| 0.58753
|
# Generated by Django 3.0.2 on 2020-02-05 10:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20200205_1029'),
]
operations = [
migrations.AddField(
model_name='item',
name='slug',
field=models.SlugField(default='product'),
preserve_default=False,
),
]
| 0
| 303
| 23
|
be8b6d00d6603fd9d2bfbc19d2ac5292cd519ac9
| 1,268
|
py
|
Python
|
src/views/CmdView/deactivate.py
|
mehsoy/jaws
|
b79723c1fc549741494ebf5d948e94a44e971f2a
|
[
"MIT"
] | 1
|
2019-06-17T17:01:17.000Z
|
2019-06-17T17:01:17.000Z
|
src/views/CmdView/deactivate.py
|
mehsoy/jaws
|
b79723c1fc549741494ebf5d948e94a44e971f2a
|
[
"MIT"
] | 7
|
2021-02-08T20:46:15.000Z
|
2021-09-08T02:12:59.000Z
|
src/views/CmdView/deactivate.py
|
mehsoy/jaws
|
b79723c1fc549741494ebf5d948e94a44e971f2a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from views.CmdView.tokenify import Tokenify
from views.CmdView.command import Command
| 30.190476
| 77
| 0.576498
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from views.CmdView.tokenify import Tokenify
from views.CmdView.command import Command
class Deactivate(Command):
def __init__(self, object, obj, id=None):
super(Deactivate, self).__init__(obj)
self.id = id
self.object = object
def execute(self, token, username):
body = {'status': 'DEACTIVATED'}
cookies = dict(username=username, token=token)
if self.object not in ['worker', 'master']:
print("Object " + self.object + " not known")
return
elif self.object == 'worker':
self.check_id(self.id)
url = os.path.join(Tokenify.get_url(), 'workers', str(self.id))
elif self.object == 'master':
url = os.path.join(Tokenify.get_url(), 'master')
r, text = self.send_request('patch', url, cookies=cookies, json=body)
if r.status_code == 200 or r.status_code == 204:
print(self.object + ' ' + str(self.id) + ' deactivated!')
else:
print(str(r.status_code) + "---- HTTP REQUEST GET'S ERROR")
print(text)
def check_id(self,id):
if id is None:
print("Id for Storage or Worker needed")
exit(0)
| 1,020
| 5
| 103
|
277cfc2eaf7d209975206be666ec892e87746c43
| 4,457
|
py
|
Python
|
datasets/utils.py
|
xdr940/cc
|
a98fe9b6c33c332a4c399f968032a90989c55672
|
[
"MIT"
] | null | null | null |
datasets/utils.py
|
xdr940/cc
|
a98fe9b6c33c332a4c399f968032a90989c55672
|
[
"MIT"
] | 1
|
2019-08-16T07:09:22.000Z
|
2019-09-04T04:59:51.000Z
|
datasets/utils.py
|
xdr940/cc
|
a98fe9b6c33c332a4c399f968032a90989c55672
|
[
"MIT"
] | 1
|
2020-01-13T04:51:22.000Z
|
2020-01-13T04:51:22.000Z
|
import numpy as np
from scipy.misc import imread
import torch
import random
#这里跳帧
#增加跳帧功能
#跳帧且加载gt
| 30.319728
| 118
| 0.572582
|
import numpy as np
from scipy.misc import imread
import torch
import random
def load_depth(path,format='png'):
if format=='npy':
tgt_depth = np.expand_dims(np.load(path), axis=0)
elif format=='png':
tgt_depth =np.expand_dims( imread(path), axis=0)
return torch.from_numpy(tgt_depth).float() / 255
def load_as_float(path):
return imread(path).astype(np.float32)
#这里跳帧
def crawl_folders(folders_list, sequence_length,shuffle = False):
sequence_set = []
demi_length = (sequence_length-1)//2
for folder in folders_list:
intrinsics = np.genfromtxt(folder/'cam.txt', delimiter=',')#分隔符空格
intrinsics = intrinsics.astype(np.float32).reshape((3, 3))
imgs = sorted(folder.files('*.jpg'))
if len(imgs) < sequence_length:
continue
for i in range(demi_length, len(imgs)-demi_length):
sample = {'intrinsics': intrinsics, 'tgt': imgs[i], 'ref_imgs': []}
for j in range(-demi_length, demi_length + 1):
if j != 0:
sample['ref_imgs'].append(imgs[i+j])
sequence_set.append(sample)
if shuffle:
random.shuffle(sequence_set)
else:
pass
return sequence_set
#增加跳帧功能
def crawl_folders2(folders_list, sequence_length,interval_frame=0,sample_gap = 0, shuffle=False):
sequence_set = []
demi_length = (sequence_length - 1) // 2
for folder in folders_list:
intrinsics = np.genfromtxt(folder / 'cam.txt', delimiter=',') # 分隔符空格
intrinsics = intrinsics.astype(np.float32).reshape((3, 3))
imgs = sorted(folder.files('*.jpg'))
if len(imgs) < sequence_length:#frame太少, 放弃这个folder
continue
#插孔抽出
for i in range(len(imgs)):
if i % (interval_frame+1) != 0 :
imgs[i]=None
while None in imgs:
imgs.remove(None)
for i in range(demi_length, len(imgs) - demi_length):#在一个folder里
sample = {'intrinsics': intrinsics, 'tgt': imgs[i], 'ref_imgs': []}
for j in range(-demi_length, demi_length + 1):
if j != 0:
sample['ref_imgs'].append(imgs[i + j])
sequence_set.append(sample)
if shuffle:
random.shuffle(sequence_set)
else:
pass
# 插空减少样本,提升训练速度
for i in range(len(sequence_set)):
if i % (sample_gap+1) != 0:
sequence_set[i] = None
while None in sequence_set:
sequence_set.remove(None)
return sequence_set
#跳帧且加载gt
def crawl_folders_gt(folders_list, sequence_length,interval_frame=0,sample_gap = 0,depth_format='png', shuffle=False):
sequence_set = []
demi_length = (sequence_length - 1) // 2
for folder in folders_list:
intrinsics = np.genfromtxt(folder / 'cam.txt', delimiter=',') # 分隔符空格
intrinsics = intrinsics.astype(np.float32).reshape((3, 3))
depths_folder = folder / 'depths'
imgs_folder = folder/'imgs'
# all paths
imgs = sorted(imgs_folder.files('*.png'))
if depth_format=='npy':
depths = sorted(depths_folder.files('*.npy'))
elif depth_format=='png':
depths = sorted(depths_folder.files('*.png'))
if len(imgs) < sequence_length:#frame太少, 放弃这个folder
continue
#插孔抽出
for i in range(len(imgs)):
if i % (interval_frame+1) != 0 :
imgs[i]=None
depths[i]=None
#pose[i]=None
#flow[i]=None
while None in imgs:
imgs.remove(None)
depths.remove(None)
for i in range(demi_length, len(imgs) - demi_length):#在一个folder里
sample = {'intrinsics': intrinsics, 'tgt': imgs[i], 'ref_imgs': [],'tgt_depth':depths[i]}
#ref imgs precess
for j in range(-demi_length, demi_length + 1):
if j != 0:
sample['ref_imgs'].append(imgs[i + j])
#flow precess
#pose precess
sequence_set.append(sample)
if shuffle:
random.shuffle(sequence_set)
else:
pass
# 插空减少样本,提升训练速度
for i in range(len(sequence_set)):
if i % (sample_gap+1) != 0:
sequence_set[i] = None
while None in sequence_set:
sequence_set.remove(None)
return sequence_set
| 4,378
| 0
| 112
|
6377ca21c65dacd8a4f10dd4484517d8cf1d99aa
| 6,665
|
py
|
Python
|
modules/datastructures/TrainData_deepCSV.py
|
dntaylor/DeepJet
|
249610b3b80543c8c84f5ba795bbb07c097f8150
|
[
"Apache-2.0"
] | 1
|
2018-02-16T13:13:09.000Z
|
2018-02-16T13:13:09.000Z
|
modules/datastructures/TrainData_deepCSV.py
|
dntaylor/DeepJet
|
249610b3b80543c8c84f5ba795bbb07c097f8150
|
[
"Apache-2.0"
] | null | null | null |
modules/datastructures/TrainData_deepCSV.py
|
dntaylor/DeepJet
|
249610b3b80543c8c84f5ba795bbb07c097f8150
|
[
"Apache-2.0"
] | 5
|
2017-11-03T15:51:27.000Z
|
2019-05-29T14:45:23.000Z
|
'''
Created on 21 Feb 2017
@author: jkiesele
'''
from TrainDataDeepJet import TrainData_Flavour, TrainData_simpleTruth, TrainData_fullTruth, fileTimeOut
import numpy as np
class TrainData_deepCSV(TrainData_Flavour, TrainData_simpleTruth):
'''
same as TrainData_deepCSV but with 4 truth labels: B BB C UDSG
'''
def __init__(self):
'''
Constructor
'''
TrainData_Flavour.__init__(self)
self.addBranches(['jet_pt', 'jet_eta',
'TagVarCSV_jetNSecondaryVertices',
'TagVarCSV_trackSumJetEtRatio', 'TagVarCSV_trackSumJetDeltaR',
'TagVarCSV_vertexCategory', 'TagVarCSV_trackSip2dValAboveCharm',
'TagVarCSV_trackSip2dSigAboveCharm', 'TagVarCSV_trackSip3dValAboveCharm',
'TagVarCSV_trackSip3dSigAboveCharm', 'TagVarCSV_jetNSelectedTracks',
'TagVarCSV_jetNTracksEtaRel'])
self.addBranches(['TagVarCSVTrk_trackJetDistVal',
'TagVarCSVTrk_trackPtRel',
'TagVarCSVTrk_trackDeltaR',
'TagVarCSVTrk_trackPtRatio',
'TagVarCSVTrk_trackSip3dSig',
'TagVarCSVTrk_trackSip2dSig',
'TagVarCSVTrk_trackDecayLenVal'],
6)
self.addBranches(['TagVarCSV_trackEtaRel'],4)
self.addBranches(['TagVarCSV_vertexMass',
'TagVarCSV_vertexNTracks',
'TagVarCSV_vertexEnergyRatio',
'TagVarCSV_vertexJetDeltaR',
'TagVarCSV_flightDistance2dVal',
'TagVarCSV_flightDistance2dSig',
'TagVarCSV_flightDistance3dVal',
'TagVarCSV_flightDistance3dSig'],
1)
class TrainData_deepCSV_RNN(TrainData_fullTruth):
'''
same as TrainData_deepCSV but with 4 truth labels: B BB C UDSG
'''
def __init__(self):
'''
Constructor
'''
super(TrainData_deepCSV_RNN, self).__init__()
self.addBranches([
'jet_pt', 'jet_eta',
'TagVarCSV_jetNSecondaryVertices',
'TagVarCSV_trackSumJetEtRatio', 'TagVarCSV_trackSumJetDeltaR',
'TagVarCSV_vertexCategory', 'TagVarCSV_trackSip2dValAboveCharm',
'TagVarCSV_trackSip2dSigAboveCharm', 'TagVarCSV_trackSip3dValAboveCharm',
'TagVarCSV_trackSip3dSigAboveCharm', 'TagVarCSV_jetNSelectedTracks',
'TagVarCSV_jetNTracksEtaRel'])
self.addBranches([
'TagVarCSVTrk_trackJetDistVal',
'TagVarCSVTrk_trackPtRel',
'TagVarCSVTrk_trackDeltaR',
'TagVarCSVTrk_trackPtRatio',
'TagVarCSVTrk_trackSip3dSig',
'TagVarCSVTrk_trackSip2dSig',
'TagVarCSVTrk_trackDecayLenVal'
], 6)
self.addBranches(['TagVarCSV_trackEtaRel'],4)
self.addBranches([
'TagVarCSV_vertexMass',
'TagVarCSV_vertexNTracks',
'TagVarCSV_vertexEnergyRatio',
'TagVarCSV_vertexJetDeltaR',
'TagVarCSV_flightDistance2dVal',
'TagVarCSV_flightDistance2dSig',
'TagVarCSV_flightDistance3dVal',
'TagVarCSV_flightDistance3dSig'
], 1)
self.addBranches(['jet_corr_pt'])
self.registerBranches(['gen_pt_WithNu'])
self.regressiontargetclasses=['uncPt','Pt']
class TrainData_deepCSV_RNN_Deeper(TrainData_deepCSV_RNN):
'''
same as TrainData_deepCSV but with 4 truth labels: B BB C UDSG
'''
def __init__(self):
'''
Constructor
'''
super(TrainData_deepCSV_RNN_Deeper, self).__init__()
self.branchcutoffs = [1, 20, 13, 4, 1]
| 35.452128
| 103
| 0.580045
|
'''
Created on 21 Feb 2017
@author: jkiesele
'''
from TrainDataDeepJet import TrainData_Flavour, TrainData_simpleTruth, TrainData_fullTruth, fileTimeOut
import numpy as np
class TrainData_deepCSV(TrainData_Flavour, TrainData_simpleTruth):
'''
same as TrainData_deepCSV but with 4 truth labels: B BB C UDSG
'''
def __init__(self):
'''
Constructor
'''
TrainData_Flavour.__init__(self)
self.addBranches(['jet_pt', 'jet_eta',
'TagVarCSV_jetNSecondaryVertices',
'TagVarCSV_trackSumJetEtRatio', 'TagVarCSV_trackSumJetDeltaR',
'TagVarCSV_vertexCategory', 'TagVarCSV_trackSip2dValAboveCharm',
'TagVarCSV_trackSip2dSigAboveCharm', 'TagVarCSV_trackSip3dValAboveCharm',
'TagVarCSV_trackSip3dSigAboveCharm', 'TagVarCSV_jetNSelectedTracks',
'TagVarCSV_jetNTracksEtaRel'])
self.addBranches(['TagVarCSVTrk_trackJetDistVal',
'TagVarCSVTrk_trackPtRel',
'TagVarCSVTrk_trackDeltaR',
'TagVarCSVTrk_trackPtRatio',
'TagVarCSVTrk_trackSip3dSig',
'TagVarCSVTrk_trackSip2dSig',
'TagVarCSVTrk_trackDecayLenVal'],
6)
self.addBranches(['TagVarCSV_trackEtaRel'],4)
self.addBranches(['TagVarCSV_vertexMass',
'TagVarCSV_vertexNTracks',
'TagVarCSV_vertexEnergyRatio',
'TagVarCSV_vertexJetDeltaR',
'TagVarCSV_flightDistance2dVal',
'TagVarCSV_flightDistance2dSig',
'TagVarCSV_flightDistance3dVal',
'TagVarCSV_flightDistance3dSig'],
1)
def readFromRootFile(self,filename,TupleMeanStd, weighter):
super(TrainData_deepCSV, self).readFromRootFile(filename, TupleMeanStd, weighter)
ys = self.y[0]
flav_sum = ys.sum(axis=1)
if (flav_sum > 1).any():
raise ValueError('In file: %s I get a jet with multiple flavours assigned!' % filename)
mask = (flav_sum == 1) if self.remove else (np.ones(flav_sum.shape[0]) == 1)
self.x = [self.x[0][mask]]
self.y = [self.y[0][mask]]
self.w = [self.w[0][mask]]
class TrainData_deepCSV_RNN(TrainData_fullTruth):
'''
same as TrainData_deepCSV but with 4 truth labels: B BB C UDSG
'''
def __init__(self):
'''
Constructor
'''
super(TrainData_deepCSV_RNN, self).__init__()
self.addBranches([
'jet_pt', 'jet_eta',
'TagVarCSV_jetNSecondaryVertices',
'TagVarCSV_trackSumJetEtRatio', 'TagVarCSV_trackSumJetDeltaR',
'TagVarCSV_vertexCategory', 'TagVarCSV_trackSip2dValAboveCharm',
'TagVarCSV_trackSip2dSigAboveCharm', 'TagVarCSV_trackSip3dValAboveCharm',
'TagVarCSV_trackSip3dSigAboveCharm', 'TagVarCSV_jetNSelectedTracks',
'TagVarCSV_jetNTracksEtaRel'])
self.addBranches([
'TagVarCSVTrk_trackJetDistVal',
'TagVarCSVTrk_trackPtRel',
'TagVarCSVTrk_trackDeltaR',
'TagVarCSVTrk_trackPtRatio',
'TagVarCSVTrk_trackSip3dSig',
'TagVarCSVTrk_trackSip2dSig',
'TagVarCSVTrk_trackDecayLenVal'
], 6)
self.addBranches(['TagVarCSV_trackEtaRel'],4)
self.addBranches([
'TagVarCSV_vertexMass',
'TagVarCSV_vertexNTracks',
'TagVarCSV_vertexEnergyRatio',
'TagVarCSV_vertexJetDeltaR',
'TagVarCSV_flightDistance2dVal',
'TagVarCSV_flightDistance2dSig',
'TagVarCSV_flightDistance3dVal',
'TagVarCSV_flightDistance3dSig'
], 1)
self.addBranches(['jet_corr_pt'])
self.registerBranches(['gen_pt_WithNu'])
self.regressiontargetclasses=['uncPt','Pt']
def readFromRootFile(self,filename,TupleMeanStd, weighter):
from DeepJetCore.preprocessing import MeanNormApply, MeanNormZeroPad, MeanNormZeroPadParticles
import numpy
from DeepJetCore.stopwatch import stopwatch
sw=stopwatch()
swall=stopwatch()
import ROOT
fileTimeOut(filename,120) #give eos a minute to recover
rfile = ROOT.TFile(filename)
tree = rfile.Get("deepntuplizer/tree")
self.nsamples=tree.GetEntries()
print('took ', sw.getAndReset(), ' seconds for getting tree entries')
# split for convolutional network
x_global = MeanNormZeroPad(
filename,None,
[self.branches[0]],
[self.branchcutoffs[0]],self.nsamples
)
x_cpf = MeanNormZeroPadParticles(
filename,None,
self.branches[1],
self.branchcutoffs[1],self.nsamples
)
x_etarel = MeanNormZeroPadParticles(
filename,None,
self.branches[2],
self.branchcutoffs[2],self.nsamples
)
x_sv = MeanNormZeroPadParticles(
filename,None,
self.branches[3],
self.branchcutoffs[3],self.nsamples
)
print('took ', sw.getAndReset(), ' seconds for mean norm and zero padding (C module)')
npy_array = self.readTreeFromRootToTuple(filename)
reg_truth=npy_array['gen_pt_WithNu'].view(numpy.ndarray)
reco_pt=npy_array['jet_corr_pt'].view(numpy.ndarray)
correctionfactor=numpy.zeros(self.nsamples)
for i in range(self.nsamples):
correctionfactor[i]=reg_truth[i]/reco_pt[i]
truthtuple = npy_array[self.truthclasses]
alltruth=self.reduceTruth(truthtuple)
self.x=[x_global, x_cpf, x_etarel, x_sv, reco_pt]
self.y=[alltruth,correctionfactor]
self._normalize_input_(weighter, npy_array)
class TrainData_deepCSV_RNN_Deeper(TrainData_deepCSV_RNN):
'''
same as TrainData_deepCSV but with 4 truth labels: B BB C UDSG
'''
def __init__(self):
'''
Constructor
'''
super(TrainData_deepCSV_RNN_Deeper, self).__init__()
self.branchcutoffs = [1, 20, 13, 4, 1]
| 2,537
| 0
| 54
|
589972506c18ceae6aaf9215a98021b547e17043
| 603
|
py
|
Python
|
src/clover/demo.py
|
gregjhansell97/leprechaun
|
d31e8d1a4b0a91aee2902602224c924b0b89fa06
|
[
"MIT"
] | null | null | null |
src/clover/demo.py
|
gregjhansell97/leprechaun
|
d31e8d1a4b0a91aee2902602224c924b0b89fa06
|
[
"MIT"
] | null | null | null |
src/clover/demo.py
|
gregjhansell97/leprechaun
|
d31e8d1a4b0a91aee2902602224c924b0b89fa06
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from clover.gui import CloverApp, Clock
# may want to consider bounding speed
app = CloverApp(title="leo-demo")
| 20.793103
| 55
| 0.633499
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from clover.gui import CloverApp, Clock
# may want to consider bounding speed
app = CloverApp(title="leo-demo")
def tick(time_interval):
if app.world.gold is None or app.world.leo is None:
return
try:
gold_x, gold_y = app.world.gold.loc
leo_x, leo_y = app.world.leo.loc
except AttributeError:
return
scale_factor = 0.01
dx = scale_factor * (gold_x - leo_x)
dy = scale_factor * (gold_y - leo_y)
app.world.leo.vel = (dx, dy)
def main():
Clock.schedule_interval(tick, 0.01)
app.run()
| 393
| 0
| 46
|
7c1efb98e5fd682a9bcfc00cec5517743f22f8a6
| 1,197
|
py
|
Python
|
flatiron-notebook/test/test_languages.py
|
IllumiDesk/flatiron-stacks
|
51ec24fefc35ccca0a1667ae20438db26a901d22
|
[
"MIT"
] | null | null | null |
flatiron-notebook/test/test_languages.py
|
IllumiDesk/flatiron-stacks
|
51ec24fefc35ccca0a1667ae20438db26a901d22
|
[
"MIT"
] | 2
|
2021-08-02T02:53:14.000Z
|
2021-11-05T18:08:18.000Z
|
flatiron-notebook/test/test_languages.py
|
IllumiDesk/flatiron-stacks
|
51ec24fefc35ccca0a1667ae20438db26a901d22
|
[
"MIT"
] | 1
|
2020-10-21T16:08:46.000Z
|
2020-10-21T16:08:46.000Z
|
import docker
from docker.errors import ContainerError
import logging
import pytest
LOGGER = logging.getLogger(__name__)
PYTHON_VERSION='3.9.5'
NOTEBOOK_IMAGE_TAG=f'python-{PYTHON_VERSION}'
@pytest.mark.parametrize(
'language,version_output',
[
('python', ['Python', '3.9.5\n']),
],
)
def test_languages(language, version_output):
"""Ensure that the language is available in the container's PATH and that
it has the correct version
"""
LOGGER.info(f'Test that language {language} {PYTHON_VERSION} is correctly installed ...')
client = docker.from_env()
output = client.containers.run(f'illumidesk/flatiron-notebook:{NOTEBOOK_IMAGE_TAG}', f'{language} --version')
output_decoded = output.decode('utf-8').split(' ')
assert output_decoded[0:3] == version_output
LOGGER.info(f'Output from command: {output_decoded[0:3]}')
def test_invalid_cmd():
"""Ensure that an invalid command returns a docker.errors.ContainerError
"""
with pytest.raises(ContainerError):
LOGGER.info('Test an invalid command ...')
client = docker.from_env()
client.containers.run('illumidesk/flatiron-notebook', 'foo --version')
| 29.925
| 113
| 0.70259
|
import docker
from docker.errors import ContainerError
import logging
import pytest
LOGGER = logging.getLogger(__name__)
PYTHON_VERSION='3.9.5'
NOTEBOOK_IMAGE_TAG=f'python-{PYTHON_VERSION}'
@pytest.mark.parametrize(
'language,version_output',
[
('python', ['Python', '3.9.5\n']),
],
)
def test_languages(language, version_output):
"""Ensure that the language is available in the container's PATH and that
it has the correct version
"""
LOGGER.info(f'Test that language {language} {PYTHON_VERSION} is correctly installed ...')
client = docker.from_env()
output = client.containers.run(f'illumidesk/flatiron-notebook:{NOTEBOOK_IMAGE_TAG}', f'{language} --version')
output_decoded = output.decode('utf-8').split(' ')
assert output_decoded[0:3] == version_output
LOGGER.info(f'Output from command: {output_decoded[0:3]}')
def test_invalid_cmd():
"""Ensure that an invalid command returns a docker.errors.ContainerError
"""
with pytest.raises(ContainerError):
LOGGER.info('Test an invalid command ...')
client = docker.from_env()
client.containers.run('illumidesk/flatiron-notebook', 'foo --version')
| 0
| 0
| 0
|
39b66c96fbaf8cecf65a0efb079529756c9fb5ba
| 5,846
|
py
|
Python
|
bot.py
|
rivermont/orka
|
4719c3b758d85f9b340698b9b637af196a50cec2
|
[
"MIT"
] | 1
|
2017-10-08T17:18:44.000Z
|
2017-10-08T17:18:44.000Z
|
bot.py
|
rivermont/orka
|
4719c3b758d85f9b340698b9b637af196a50cec2
|
[
"MIT"
] | null | null | null |
bot.py
|
rivermont/orka
|
4719c3b758d85f9b340698b9b637af196a50cec2
|
[
"MIT"
] | null | null | null |
"""
Orka Discord Bot
Copyright (c) 2017 William Bennett
"""
###########
# IMPORTS #
###########
import discord
import random
import markovify
from os import path, makedirs
from scripts import *
###################
# OTHER FUNCTIONS #
###################
def add_msg(channel, text, mode='a+'):
"""
Appends a message to the end of a file.
"""
with open('channels/{0}.txt'.format(channel), '{0}'.format(mode), encoding="utf_8") as file:
file.write('{0}\n'.format(text))
#######
# BOT #
#######
#######
# RUN #
#######
client = Orka()
read = []
if __name__ == '__main__':
if not path.exists('channels\\'):
makedirs('channels\\')
client.run()
| 34.591716
| 128
| 0.561923
|
"""
Orka Discord Bot
Copyright (c) 2017 William Bennett
"""
###########
# IMPORTS #
###########
import discord
import random
import markovify
from os import path, makedirs
from scripts import *
###################
# OTHER FUNCTIONS #
###################
def add_msg(channel, text, mode='a+'):
"""
Appends a message to the end of a file.
"""
with open('channels/{0}.txt'.format(channel), '{0}'.format(mode), encoding="utf_8") as file:
file.write('{0}\n'.format(text))
def make_markov_model(channel):
with open('channels/{0}.txt'.format(channel), 'r', encoding="utf_8") as file:
model = markovify.NewlineText(file)
global model
#######
# BOT #
#######
class Orka(discord.Client):
async def on_ready(self):
print('Logging in...')
print('Logged in as {0}; ID #{1}'.format(client.user.name, client.user.id))
print('Setting status...')
await client.change_presence(game=discord.Game(name='https://github.com/rivermont/orka'))
print('Gathering available text channels...')
for server in client.servers:
for channel in server.channels:
if channel.type == discord.ChannelType.text:
if channel.permissions_for(server.me).read_messages:
print('Read access in: ' + server.name + '/' + channel.name)
read.append(channel)
print('Downloading logs from readable text channels...')
for channel in read:
add_msg(channel, '', mode='w+')
async for message in client.logs_from(channel, limit=1000):
add_msg(channel, message.content, mode='a')
print('Ready.')
async def on_member_join(self, member):
general = self.get_server("256600580837998592").get_channel("256600580837998592")
await client.send_message(
general,
'Welcome, @{0}! Please familiarize yourself with our #rules, then go wild!'.format(member.name)
)
async def on_message(self, message):
print('Received message..')
content = message.content
channel = message.channel
add_msg(channel, content)
# General commands
if message.content.startswith('!flip'):
# Flips a coin on two choices. Defaults to Heads or Tails.
print('Flipping coin...')
if len(content.split()) == 1:
choice_ = random.choice(['Heads', 'Tails'])
await client.send_message(channel, choice_)
elif len(content.split()) == 2:
await client.send_message(channel, 'Only one option supplied. Must be two or none.')
elif len(content.split()) == 3:
options = content.split()[1:]
flip = random.choice(options)
await client.send_message(channel, flip)
elif len(content.split()) > 3:
await client.send_message(channel, 'Too many options supplied. Must be two or none.')
elif content.startswith('!roll'):
# Rolls a dice. Defaults to a d6.
print('Rolling die...')
if len(content.split()) == 1:
roll = random.randint(1, 6)
await client.send_message(channel, 'You rolled a {0}.'.format(roll))
if len(content.split()) == 2:
input_ = content.split()[1]
roll = random.randint(1, int(input_))
await client.send_message(channel, 'You rolled a {0}.'.format(roll))
elif content.startswith('!convert'):
# Converts Kelvin/Celsius/Fahrenheit
input_ = content.split()
try:
amount = int(input_[1][:-1])
unit_from = input_[1][-1]
unit_to = input_[2]
result = convert(amount, unit_from, unit_to)
if result == "Error":
raise IndexError
else:
await client.send_message(channel, 'Converted {0}{1} to {2}{3}.'.format(amount, unit_from, result, unit_to))
except IndexError:
print('Invalid input.')
await client.send_message(channel, 'Invalid input. Must be in format `!convert 23U U`.')
# Moderation commands
elif content.startswith('@stop'):
print('Stopping bot...')
await client.logout()
elif content.startswith('@logs'):
async for m in client.logs_from(channel):
add_msg(channel, m.content)
elif content.startswith('@generate'):
print('Generating markov model for channel {0}'.format(channel))
make_markov_model(channel)
await client.send_message(channel, 'Successfully generated markov model.')
elif content.startswith('!sentence'):
# Generates a single line from the current markov model
# Under moderation b/c that's where @generate is
sentence = ''
try:
sentence = model.make_sentence(tries=1000)
except NameError:
print('No available markov model.')
await client.send_message(channel, 'No available markov model.')
if not bool(sentence):
await client.send_message(channel, 'No sentence generated.')
else:
await client.send_message(channel, sentence)
elif content.startswith('@save'):
with open('model.json', 'w+') as f:
f.write(model.to_json())
elif content.startswith('@test'):
# Generic testing function
pass
#######
# RUN #
#######
client = Orka()
read = []
if __name__ == '__main__':
if not path.exists('channels\\'):
makedirs('channels\\')
client.run()
| 5,018
| 6
| 127
|
5dfc4ead3ddd6b27d4c94803fa33ddb4e209c9c4
| 20,054
|
py
|
Python
|
composer/callbacks/checkpoint_saver.py
|
hanlint/composer
|
83d96b7efde533cbc2fff7dd7e0769da2b177807
|
[
"Apache-2.0"
] | null | null | null |
composer/callbacks/checkpoint_saver.py
|
hanlint/composer
|
83d96b7efde533cbc2fff7dd7e0769da2b177807
|
[
"Apache-2.0"
] | null | null | null |
composer/callbacks/checkpoint_saver.py
|
hanlint/composer
|
83d96b7efde533cbc2fff7dd7e0769da2b177807
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 MosaicML. All Rights Reserved.
"""Callback to save checkpoints during training."""
from __future__ import annotations
import logging
import os
import pathlib
import textwrap
from typing import Callable, List, Optional, Tuple, Union
from composer.core import Event, State
from composer.core.callback import Callback
from composer.core.time import Time, Timestamp, TimeUnit
from composer.loggers import Logger
from composer.loggers.logger import LogLevel
from composer.utils import checkpoint, dist
from composer.utils.file_helpers import (FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, FORMAT_NAME_WITH_DIST_TABLE,
ensure_folder_is_empty, format_name_with_dist, format_name_with_dist_and_time,
is_tar)
log = logging.getLogger(__name__)
__all__ = ["CheckpointSaver", "checkpoint_periodically"]
def checkpoint_periodically(interval: Union[str, int, Time]) -> Callable[[State, Event], bool]:
"""Helper function to create a checkpoint scheduler according to a specified interval.
Args:
interval (Union[str, int, Time]): The interval describing how often checkpoints should be
saved. If an integer, it will be assumed to be in :attr:`~TimeUnit.EPOCH`\\s.
Otherwise, the unit must be either :attr:`TimeUnit.EPOCH` or :attr:`TimeUnit.BATCH`.
Checkpoints will be saved every ``n`` batches or epochs (depending on the unit),
and at the end of training.
Returns:
Callable[[State, Event], bool]: A function that can be passed as the ``save_interval``
argument into the :class:`CheckpointSaver`.
"""
if isinstance(interval, str):
interval = Time.from_timestring(interval)
if isinstance(interval, int):
interval = Time(interval, TimeUnit.EPOCH)
if interval.unit == TimeUnit.EPOCH:
save_event = Event.EPOCH_CHECKPOINT
elif interval.unit == TimeUnit.BATCH:
save_event = Event.BATCH_CHECKPOINT
else:
raise NotImplementedError(
f"Unknown checkpointing interval: {interval.unit}. Must be TimeUnit.EPOCH or TimeUnit.BATCH.")
last_checkpoint_batch = None
return save_interval
| 49.761787
| 154
| 0.621472
|
# Copyright 2021 MosaicML. All Rights Reserved.
"""Callback to save checkpoints during training."""
from __future__ import annotations
import logging
import os
import pathlib
import textwrap
from typing import Callable, List, Optional, Tuple, Union
from composer.core import Event, State
from composer.core.callback import Callback
from composer.core.time import Time, Timestamp, TimeUnit
from composer.loggers import Logger
from composer.loggers.logger import LogLevel
from composer.utils import checkpoint, dist
from composer.utils.file_helpers import (FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, FORMAT_NAME_WITH_DIST_TABLE,
ensure_folder_is_empty, format_name_with_dist, format_name_with_dist_and_time,
is_tar)
log = logging.getLogger(__name__)
__all__ = ["CheckpointSaver", "checkpoint_periodically"]
def checkpoint_periodically(interval: Union[str, int, Time]) -> Callable[[State, Event], bool]:
"""Helper function to create a checkpoint scheduler according to a specified interval.
Args:
interval (Union[str, int, Time]): The interval describing how often checkpoints should be
saved. If an integer, it will be assumed to be in :attr:`~TimeUnit.EPOCH`\\s.
Otherwise, the unit must be either :attr:`TimeUnit.EPOCH` or :attr:`TimeUnit.BATCH`.
Checkpoints will be saved every ``n`` batches or epochs (depending on the unit),
and at the end of training.
Returns:
Callable[[State, Event], bool]: A function that can be passed as the ``save_interval``
argument into the :class:`CheckpointSaver`.
"""
if isinstance(interval, str):
interval = Time.from_timestring(interval)
if isinstance(interval, int):
interval = Time(interval, TimeUnit.EPOCH)
if interval.unit == TimeUnit.EPOCH:
save_event = Event.EPOCH_CHECKPOINT
elif interval.unit == TimeUnit.BATCH:
save_event = Event.BATCH_CHECKPOINT
else:
raise NotImplementedError(
f"Unknown checkpointing interval: {interval.unit}. Must be TimeUnit.EPOCH or TimeUnit.BATCH.")
last_checkpoint_batch = None
def save_interval(state: State, event: Event):
nonlocal last_checkpoint_batch
if state.get_elapsed_duration() >= 1.0:
# if doing batch-wise checkpointing, and we saved a checkpoint at the batch_checkpoint event
# right before the epoch_checkpoint event, do not save another checkpoint at the epoch_checkpoint
# event if the batch count didn't increase.
if state.timer.batch != last_checkpoint_batch:
last_checkpoint_batch = state.timer.batch
return True
if save_event == Event.EPOCH_CHECKPOINT:
count = state.timer.epoch
elif save_event == Event.BATCH_CHECKPOINT:
count = state.timer.batch
else:
raise RuntimeError(f"Invalid save_event: {save_event}")
if event == save_event and int(count) % int(interval) == 0:
last_checkpoint_batch = state.timer.batch
return True
return False
return save_interval
class CheckpointSaver(Callback):
__doc__ = f"""Callback to save checkpoints.
.. note::
If the ``folder`` argument is specified constructing the :class:`~composer.trainer.trainer.Trainer`,
then the :class:`.CheckpointSaver` callback need not be constructed manually. However, for advanced
checkpointing use cases (such as saving a weights-only checkpoint at one interval and the full training state
at another interval), instance(s) of this :class:`.CheckpointSaver` callback can be specified in the
``callbacks`` argument of the :class:`~composer.trainer.trainer.Trainer`, as shown in the example below.
Example
.. testsetup::
from composer.callbacks.checkpoint_saver import CheckpointSaver
.. doctest::
>>> trainer = Trainer(..., callbacks=[
... CheckpointSaver(
... folder='{{run_name}}/checkpoints',
... filename="ep{{epoch}}-ba{{batch}}-rank{{rank}}",
... latest_filename="latest-rank{{rank}}",
... save_interval="1ep",
... weights_only=False,
... )
... ])
.. testcleanup::
trainer.engine.close()
Args:
folder (str, optional): Format string for the folder where checkpoints will be saved.
(default: ``'{{run_name}}/checkpoints'``)
The following format variables are available:
{textwrap.indent(FORMAT_NAME_WITH_DIST_TABLE, prefix=' ')}
.. note::
When training with multiple devices (i.e. GPUs), ensure that ``'{{rank}}'`` appears in the format.
Otherwise, multiple processes may attempt to write to the same file.
filename (str, optional): A format string describing how to name checkpoints.
(default: ``'ep{{epoch}}-ba{{batch}}-rank{{rank}}'``)
Checkpoints will be saved approximately to ``{{folder}}/{{filename.format(...)}}``.
The following format variables are available:
{textwrap.indent(FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, prefix=' ')}
.. note::
* By default, only the rank zero process will save a checkpoint file.
* When using DeepSpeed, each rank will save a checkpoint file in tarball format. DeepSpeed
requires tarball format, as it saves model and optimizer states in separate files.
Ensure that ``'{{rank}}'`` appears within the ``filename``. Otherwise, multiple ranks
may attempt to write to the same file(s), leading to corrupted checkpoints. If no tarball file
extension is specified, ``'.tar'`` will be used.
* To use compression (regardless of whether DeepSpeed is enabled), set the file extension
to ``'.tar.gz'``, ``'.tgz'``, ``'.tar.bzip'``, or ``'.tar.lzma'`` (depending on the desired
compression algorithm).
.. warning::
Using compression will block the training loop while checkpoints are being compressed. As such, we
recommend saving checkpoints without compression.
Consider the following scenario, where:
* The :attr:`~.Logger.run_name` is ``'awesome-training-run'``
* The default ``folder='{{run_name}}/checkpoints'`` is used.
* The default ``name='ep{{epoch}}-ba{{batch}}-rank{{rank}}'`` is used.
* The current epoch count is ``1``.
* The current batch count is ``42``.
When DeepSpeed is not being used, the rank zero process will save the checkpoint to ``"awesome-training-run/checkpoints/ep1-ba42-rank0"``.
When DeepSpeed is being used, each rank (process) will save checkpoints to::
awesome-training-run/checkpoints/ep1-ba42-rank0.tar
awesome-training-run/checkpoints/ep1-ba42-rank1.tar
awesome-training-run/checkpoints/ep1-ba42-rank2.tar
...
artifact_name (str, optional): Format string for the checkpoint's artifact name.
(default: ``'{{run_name}}/checkpoints/ep{{epoch}}-ba{{batch}}-rank{{rank}}"``)
After the checkpoint is saved, it will be periodically logged as a file artifact.
The artifact name will be determined by this format string.
.. seealso:: :meth:`~composer.loggers.logger.Logger.log_file_artifact` for file artifact logging.
The same format variables for ``filename`` are available.
Leading slashes (``'/'``) will be stripped.
To disable logging trace files as file artifacts, set this parameter to ``None``.
latest_filename (str, optional): A format string for a symlink which points to the last saved checkpoint.
(default: ``'latest-rank{{rank}}'``)
Symlinks will be created approximately at ``{{folder}}/{{latest_filename.format(...)}}``.
The same format variables as for ``name`` are available.
To disable symlinks, set this parameter to ``None``.
Consider the following scenario, where:
* The :attr:`~.Logger.run_name` is 'awesome-training-run'
* The default ``folder='{{run_name}}/checkpoints'`` is used.
* The default ``name='ep{{epoch}}-ba{{batch}}-rank{{rank}}'`` is used.
* The default ``latest_filename='latest-rank{{rank}}'`` is used.
* The current epoch count is ``1``.
* The current batch count is ``42``.
When DeepSpeed is not being used, the rank zero process will save the checkpoint to
``'awesome-training-run/checkpoints/ep1-ba42-rank0'``,
and a symlink will be created at
``'awesome-training-run/checkpoints/latest-rank0' -> 'awesome-training-run/checkpoints/ep1-ba42-rank0'``
When DeepSpeed is being used, each rank (process) will save checkpoints to::
awesome-training-run/checkpoints/ep1-ba42-rank0.tar
awesome-training-run/checkpoints/ep1-ba42-rank1.tar
awesome-training-run/checkpoints/ep1-ba42-rank2.tar
...
Corresponding symlinks will be created at::
awesome-training-run/checkpoints/latest-rank0.tar -> awesome-training-run/checkpoints/ep1-ba42-rank0.tar
awesome-training-run/checkpoints/latest-rank1.tar -> awesome-training-run/checkpoints/ep1-ba42-rank1.tar
awesome-training-run/checkpoints/latest-rank2.tar -> awesome-training-run/checkpoints/ep1-ba42-rank2.tar
...
latest_artifact_name (str, optional): Format string for the checkpoint's latest symlink artifact name.
(default: ``'{{run_name}}/checkpoints/latest-rank{{rank}}"``)
Whenever a new checkpoint is saved, a symlink artifact is created or updated to point to the latest checkpoint's ``artifact_name``.
The artifact name will be determined by this format string. This parameter has no effect if ``latest_filename`` or ``artifact_name`` is None."
.. seealso:: :meth:`~composer.loggers.logger.Logger.log_symlink_artifact` for symlink artifact logging.
The same format variables for ``filename`` are available.
Leading slashes (``'/'``) will be stripped.
To disable symlinks in logger, set this parameter to ``None``.
overwrite (bool, optional): Whether existing checkpoints should be overridden.
If ``False`` (the default), then the ``folder`` must not exist or be empty.
(default: ``False``)
save_interval (Time | str | int | (State, Event) -> bool): A :class:`Time`, time-string, integer (in epochs),
or a function that takes (state, event) and returns a boolean whether a checkpoint should be saved.
If an integer, checkpoints will be saved every n epochs.
If :class:`Time` or a time-string, checkpoints will be saved according to this interval.
.. seealso:: :func:`.checkpoint_periodically`
If a function, then this function should take two arguments (:class:`State`, :class:`Event`).
The first argument will be the current state of the trainer, and the second argument will be
be :attr:`.Event.BATCH_CHECKPOINT` or :attr:`.EPOCH_CHECKPOINT` (depending on the current training
progress). It should return ``True`` if a checkpoint should be saved given the current state and
event.
weights_only (bool): If ``True``, save only the model weights instead of the entire training state.
This parmeter must be ``False`` when using DeepSpeed. (default: ``False``)
num_checkpoints_to_keep (int, optional): The number of checkpoints to keep locally. The oldest checkpoints
are removed first. Set to ``-1`` to keep all checkpoints locally. (default: ``-1``)
Checkpoints will be removed after they have been logged as a file artifact. For example, when this callback
is used in conjunction with the :class:`~composer.loggers.object_store_logger.ObjectStoreLogger`, set this
parameter to ``0`` to immediately delete checkpoints from the local disk after they have been uploaded to
the object store.
This parameter only controls how many checkpoints are kept locally; checkpoints are not deleted from
artifact stores.
Attributes:
saved_checkpoints (List[Tuple[Timestamp, List[pathlib.Path]]]): The checkpoint timestamps and filepaths.
This list contains tuples of the save timestamp and the checkpoint filepaths.
This list will have at most ``num_checkpoints_to_keep`` entries. The latest checkpoint
will be at the end.
.. note::
When using DeepSpeed, the index of a filepath in each list corresponds to the global rank of
the process that wrote that file. Each filepath is valid only on the process's (rank's) node.
Otherwise, when not using DeepSpeed, each sub-list will contain only one filepath since only rank zero
saves checkpoints.
"""
def __init__(
self,
folder: str = "{run_name}/checkpoints",
filename: str = "ep{epoch}-ba{batch}-rank{rank}",
artifact_name: Optional[str] = "{run_name}/checkpoints/ep{epoch}-ba{batch}-rank{rank}",
latest_filename: Optional[str] = "latest-rank{rank}",
latest_artifact_name: Optional[str] = "{run_name}/checkpoints/latest-rank{rank}",
save_interval: Union[Time, str, int, Callable[[State, Event], bool]] = "1ep",
*,
overwrite: bool = False,
num_checkpoints_to_keep: int = -1,
weights_only: bool = False,
):
if not callable(save_interval):
save_interval = checkpoint_periodically(save_interval)
self.folder = folder
self.filename = filename
self.artifact_name = artifact_name
self.latest_filename = latest_filename
self.latest_artifact_name = latest_artifact_name
self.overwrite = overwrite
self.save_interval = save_interval
self.saved_checkpoints: List[Tuple[Timestamp, List[pathlib.Path]]] = []
self.num_checkpoints_to_keep = num_checkpoints_to_keep
self.weights_only = weights_only
def init(self, state: State, logger: Logger) -> None:
del state # unused
folder = format_name_with_dist(self.folder, logger.run_name)
os.makedirs(folder, exist_ok=True)
if not self.overwrite:
ensure_folder_is_empty(folder)
# Ensure no rank proceeds (and potentially attempts to write to the folder), until all ranks have validated that the folder is empty.
dist.barrier()
def fit_start(self, state: State, logger: Logger) -> None:
if state.is_model_deepspeed:
if self.weights_only:
NotImplementedError(
("Saving checkpoints with `weights_only=True` is not currently supported when using DeepSpeed. "
"See https://github.com/mosaicml/composer/issues/685."))
def batch_checkpoint(self, state: State, logger: Logger):
if self.save_interval(state, Event.BATCH_CHECKPOINT):
# If training is finished, log at the FIT loglevel
log_level = LogLevel.BATCH if state.get_elapsed_duration() < 1.0 else LogLevel.FIT
self._save_checkpoint(state, logger, log_level)
def epoch_checkpoint(self, state: State, logger: Logger):
if self.save_interval(state, Event.EPOCH_CHECKPOINT):
log_level = LogLevel.EPOCH if state.get_elapsed_duration() < 1.0 else LogLevel.FIT
self._save_checkpoint(state, logger, log_level)
def _save_checkpoint(self, state: State, logger: Logger, log_level: LogLevel):
checkpoint_filepath = os.path.join(format_name_with_dist(self.folder, logger.run_name), self.filename)
checkpoint_filepaths = checkpoint.save_checkpoint(state,
logger,
checkpoint_filepath,
weights_only=self.weights_only)
if dist.get_global_rank() < len(checkpoint_filepaths):
# Log the checkpoint as an artifact
checkpoint_filepath = checkpoint_filepaths[dist.get_global_rank()]
if self.artifact_name is not None:
artifact_name = format_name_with_dist_and_time(self.artifact_name, logger.run_name,
state.timer.get_timestamp()).lstrip("/")
if state.is_model_deepspeed and not is_tar(artifact_name):
# Deepspeed requires tarballs; appending `.tar`
artifact_name += ".tar"
logger.file_artifact(log_level=log_level,
artifact_name=artifact_name,
file_path=checkpoint_filepath,
overwrite=self.overwrite)
if self.latest_filename is not None:
symlink_name = os.path.join(
format_name_with_dist(self.folder, logger.run_name),
format_name_with_dist_and_time(self.latest_filename, logger.run_name,
state.timer.get_timestamp()).lstrip("/"),
)
if state.is_model_deepspeed and not is_tar(symlink_name):
# Deepspeed requires tarballs; appending `.tar`
symlink_name += ".tar"
symlink_dirname = os.path.dirname(symlink_name)
if symlink_dirname:
os.makedirs(symlink_dirname, exist_ok=True)
try:
os.remove(symlink_name)
except FileNotFoundError:
pass
os.symlink(checkpoint_filepath, symlink_name)
if self.artifact_name is not None and self.latest_artifact_name is not None:
symlink_artifact_name = format_name_with_dist_and_time(self.latest_artifact_name, logger.run_name,
state.timer.get_timestamp()).lstrip("/")
artifact_name = format_name_with_dist_and_time(self.artifact_name, logger.run_name,
state.timer.get_timestamp()).lstrip("/")
# Always overwrite for symlinks since we use the same filename for latest
logger.symlink_artifact(log_level=log_level,
existing_artifact_name=artifact_name,
symlink_artifact_name=symlink_artifact_name,
overwrite=True)
timestamp = state.timer.get_timestamp()
self.saved_checkpoints.append((timestamp, checkpoint_filepaths))
if self.num_checkpoints_to_keep >= 0:
while len(self.saved_checkpoints) > self.num_checkpoints_to_keep:
timestamp, checkpoint_filepaths = self.saved_checkpoints[0]
if dist.get_global_rank() < len(checkpoint_filepaths):
# Remove this rank's checkpoint
os.remove(checkpoint_filepaths[dist.get_global_rank()])
del self.saved_checkpoints[0]
| 7,195
| 10,577
| 50
|
9df712a61c104af137c3836ef28840763dfb0311
| 5,919
|
py
|
Python
|
wrapperNYU.py
|
Z7Gao/InverseRenderingOfIndoorScene
|
f245d20dcbe05b1de766c2e53af79fd489f58d74
|
[
"MIT"
] | 171
|
2020-06-28T04:03:23.000Z
|
2022-03-30T08:50:20.000Z
|
wrapperNYU.py
|
Z7Gao/InverseRenderingOfIndoorScene
|
f245d20dcbe05b1de766c2e53af79fd489f58d74
|
[
"MIT"
] | 9
|
2020-08-20T08:56:38.000Z
|
2022-01-19T19:53:51.000Z
|
wrapperNYU.py
|
Z7Gao/InverseRenderingOfIndoorScene
|
f245d20dcbe05b1de766c2e53af79fd489f58d74
|
[
"MIT"
] | 19
|
2020-06-23T11:49:03.000Z
|
2022-01-22T01:49:26.000Z
|
import torch
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import models
# Return triplet of predictions, ground-truth and error
| 48.516393
| 167
| 0.66937
|
import torch
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import models
# Return triplet of predictions, ground-truth and error
def wrapperNYU(dataBatch, opt,
encoder, albedoDecoder, normalDecoder, roughDecoder, depthDecoder ):
# Load data from cpu to gpu
normal_cpu = dataBatch['normal']
normalBatch = Variable(normal_cpu ).cuda()
depth_cpu = dataBatch['depth']
depthBatch = Variable(depth_cpu ).cuda()
seg_cpu = dataBatch['segNormal']
segNormalBatch = Variable( seg_cpu ).cuda()
seg_cpu = dataBatch['segDepth']
segDepthBatch = Variable(seg_cpu ).cuda()
# Load the image from cpu to gpu
im_cpu = (dataBatch['im'] )
imBatch = Variable(im_cpu ).cuda()
if opt.cascadeLevel > 0:
albedoPre_cpu = dataBatch['albedoPre']
albedoPreBatch = Variable(albedoPre_cpu ).cuda()
normalPre_cpu = dataBatch['normalPre']
normalPreBatch = Variable(normalPre_cpu ).cuda()
roughPre_cpu = dataBatch['roughPre']
roughPreBatch = Variable(roughPre_cpu ).cuda()
depthPre_cpu = dataBatch['depthPre']
depthPreBatch = Variable(depthPre_cpu ).cuda()
diffusePre_cpu = dataBatch['diffusePre']
diffusePreBatch = Variable(diffusePre_cpu ).cuda()
specularPre_cpu = dataBatch['specularPre']
specularPreBatch = Variable(specularPre_cpu ).cuda()
if albedoPreBatch.size(2) < opt.imHeight or albedoPreBatch.size(3) < opt.imWidth:
albedoPreBatch = F.interpolate(albedoPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
if normalPreBatch.size(2) < opt.imHeight or normalPreBatch.size(3) < opt.imWidth :
normalPreBatch = F.interpolate(normalPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
if roughPreBatch.size(2) < opt.imHeight or roughPreBatch.size(3) < opt.imWidth :
roughPreBatch = F.interpolate(roughPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
if depthPreBatch.size(2) < opt.imHeight or depthPreBatch.size(3) < opt.imWidth :
depthPreBatch = F.interpolate(depthPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
# Regress the diffusePred and specular Pred
envRow, envCol = diffusePreBatch.size(2), diffusePreBatch.size(3)
imBatchSmall = F.adaptive_avg_pool2d(imBatch, (envRow, envCol) )
diffusePreBatch, specularPreBatch = models.LSregressDiffSpec(
diffusePreBatch.detach(),
specularPreBatch.detach(),
imBatchSmall,
diffusePreBatch, specularPreBatch )
if diffusePreBatch.size(2) < opt.imHeight or diffusePreBatch.size(3) < opt.imWidth:
diffusePreBatch = F.interpolate(diffusePreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
if specularPreBatch.size(2) < opt.imHeight or specularPreBatch.size(3) < opt.imWidth:
specularPreBatch = F.interpolate(specularPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
# Normalize Albedo and depth
bn, ch, nrow, ncol = albedoPreBatch.size()
albedoPreBatch = albedoPreBatch.view(bn, -1)
albedoPreBatch = albedoPreBatch / torch.clamp(torch.mean(albedoPreBatch, dim=1), min=1e-10).unsqueeze(1) / 3.0
albedoPreBatch = albedoPreBatch.view(bn, ch, nrow, ncol)
bn, ch, nrow, ncol = depthPreBatch.size()
depthPreBatch = depthPreBatch.view(bn, -1)
depthPreBatch = depthPreBatch / torch.clamp(torch.mean(depthPreBatch, dim=1), min=1e-10).unsqueeze(1) / 3.0
depthPreBatch = depthPreBatch.view(bn, ch, nrow, ncol)
########################################################
# Build the cascade network architecture #
if opt.cascadeLevel == 0:
inputBatch = imBatch
elif opt.cascadeLevel > 0:
inputBatch = torch.cat([imBatch, albedoPreBatch,
normalPreBatch, roughPreBatch, depthPreBatch,
diffusePreBatch, specularPreBatch ], dim=1)
# Initial Prediction
x1, x2, x3, x4, x5, x6 = encoder(inputBatch )
albedoPred = 0.5 * (albedoDecoder(imBatch, x1, x2, x3, x4, x5, x6) + 1)
normalPred = normalDecoder(imBatch, x1, x2, x3, x4, x5, x6)
roughPred = roughDecoder(imBatch, x1, x2, x3, x4, x5, x6)
depthPred = 0.5 * (depthDecoder(imBatch, x1, x2, x3, x4, x5, x6) + 1)
normalPred = F.interpolate(normalPred, [normalBatch.size(2), normalBatch.size(3)], mode='bilinear')
depthPred = F.interpolate(depthPred, [depthBatch.size(2), depthBatch.size(3)], mode='bilinear')
depthPred = models.LSregress(depthPred.detach() * segDepthBatch.expand_as(depthPred),
depthBatch * segDepthBatch.expand_as(depthBatch), depthPred)
## Compute Errors
pixelAllNumNormal = (torch.sum(segNormalBatch ).cpu().data).item()
normalErr = torch.sum( (normalPred - normalBatch)
* (normalPred - normalBatch) * segNormalBatch.expand_as(normalBatch) ) / pixelAllNumNormal / 3.0
pixelAllNumDepth = (torch.sum(segDepthBatch ).cpu().data).item()
depthErr = torch.sum( (torch.log(depthPred + 0.1) - torch.log(depthBatch + 0.1 ) )
* ( torch.log(depthPred + 0.1) - torch.log(depthBatch + 0.1) ) * segDepthBatch.expand_as(depthBatch ) ) / pixelAllNumDepth
angleMean = torch.sum(torch.acos( torch.clamp(torch.sum(normalPred * normalBatch, dim=1).unsqueeze(1), -1, 1) ) / np.pi * 180 * segNormalBatch) / pixelAllNumNormal
normalPred_np = normalPred.data.cpu().numpy()
normalBatch_np = normalBatch.data.cpu().numpy()
segNormalBatch_np = segNormalBatch.cpu().numpy()
theta = np.arccos( np.clip(np.sum(normalPred_np * normalBatch_np, axis=1)[:, np.newaxis, :, :], -1, 1) ) / np.pi * 180
angleMean_np = (theta * segNormalBatch_np ) / pixelAllNumNormal
return [albedoPred, None], [normalPred, normalErr, angleMean], \
[roughPred, None ], [depthPred, depthErr], \
| 5,724
| 0
| 24
|
5108e36e1b24e0722a49cfe5996dd84987d18722
| 1,337
|
py
|
Python
|
ex105.py
|
ArthurCorrea/python-exercises
|
0c2ac46b8c40dd9868b132e847cfa42e025095e3
|
[
"MIT"
] | null | null | null |
ex105.py
|
ArthurCorrea/python-exercises
|
0c2ac46b8c40dd9868b132e847cfa42e025095e3
|
[
"MIT"
] | null | null | null |
ex105.py
|
ArthurCorrea/python-exercises
|
0c2ac46b8c40dd9868b132e847cfa42e025095e3
|
[
"MIT"
] | null | null | null |
# Faça um programa que tenha um função notas() que pode receber várias
# notas de alunos e vai retornar um dicionário com as seguintes informações:
# - Quantidade de notas;
# - A maior nota;
# - A menor nota;
# - A média da turma;
# - A situação(opcional);
# Adicione também as docstrings da função.
def notas(show=False):
"""
:param show: mostra a situação da turma de acordo com o escolhido: True ou False
:return: sem retorno
"""
somanotas = 0
d = dict()
lista = list()
qtdvalores = 0
while True:
n1 = float(input(f'Nota do aluno {qtdvalores}: '))
somanotas += n1
lista.append(n1)
qtdvalores += 1
d['Qtd notas'] = qtdvalores
resp = str(input('Quer continuar: [S/N] ')).upper().strip()[0]
while resp != 'S' and resp != 'N':
resp = str(input('Quer continuar? [S/N] ')).upper().strip()[0]
if resp == 'N':
break
d['Maior nota'] = max(lista)
d['Menor nota'] = min(lista)
d['Média da turma'] = somanotas / qtdvalores
if show:
if d['Média da turma'] < 5:
d['Situação'] = 'Ruim'
elif 5 <= d['Média da turma'] < 7:
d['Situação'] = 'Razoável'
else:
d['Situação'] = 'Boa'
print(d)
else:
print(d)
notas()
notas(show=True)
| 27.854167
| 84
| 0.554226
|
# Faça um programa que tenha um função notas() que pode receber várias
# notas de alunos e vai retornar um dicionário com as seguintes informações:
# - Quantidade de notas;
# - A maior nota;
# - A menor nota;
# - A média da turma;
# - A situação(opcional);
# Adicione também as docstrings da função.
def notas(show=False):
"""
:param show: mostra a situação da turma de acordo com o escolhido: True ou False
:return: sem retorno
"""
somanotas = 0
d = dict()
lista = list()
qtdvalores = 0
while True:
n1 = float(input(f'Nota do aluno {qtdvalores}: '))
somanotas += n1
lista.append(n1)
qtdvalores += 1
d['Qtd notas'] = qtdvalores
resp = str(input('Quer continuar: [S/N] ')).upper().strip()[0]
while resp != 'S' and resp != 'N':
resp = str(input('Quer continuar? [S/N] ')).upper().strip()[0]
if resp == 'N':
break
d['Maior nota'] = max(lista)
d['Menor nota'] = min(lista)
d['Média da turma'] = somanotas / qtdvalores
if show:
if d['Média da turma'] < 5:
d['Situação'] = 'Ruim'
elif 5 <= d['Média da turma'] < 7:
d['Situação'] = 'Razoável'
else:
d['Situação'] = 'Boa'
print(d)
else:
print(d)
notas()
notas(show=True)
| 0
| 0
| 0
|
78baa060484aee5b5791c697ecb50180a17dcae1
| 15,613
|
py
|
Python
|
core/models/group.py
|
agolibroda/PyTorWiki
|
678a2ae13d0027c61af36e61b72e4e54493a29ac
|
[
"Apache-2.0"
] | null | null | null |
core/models/group.py
|
agolibroda/PyTorWiki
|
678a2ae13d0027c61af36e61b72e4e54493a29ac
|
[
"Apache-2.0"
] | null | null | null |
core/models/group.py
|
agolibroda/PyTorWiki
|
678a2ae13d0027c61af36e61b72e4e54493a29ac
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
#
# Copyright 2016 Alec Goliboda
#
# group.py
from __future__ import print_function
import logging
import json
import zlib
# import markdown
from datetime import datetime
import tornado.options
# import pymysql
import hashlib
import bcrypt
import base64
# from _overlapped import NULL
##############
import config
from . import Model, CipherWrapper
from .. import WikiException
from core.models.author import Author
from ..constants.data_base import *
# from core.models.template import Template
from ..constants.data_base import *
class Group(Model):
"""
модель - для Группы
внутри будут:
- список участников
- библиотека
Просмотр:
- список всех групп
- одну группу Описание
- список участников группы
- список статей (библиотека)
- создавать группы
- "удалять группы" - о... нужен флаг - "группа удалена"???
- добавлять (удаять) участников в группу
- по приглашению - нужен список приглашений - соотв, у каждого автора может быть список приглашений "вступить в группу"
- нужен список заявок на вступление - это инструмент админа группы "список заявок на вступление"
- добавлять (удалять) статьи в библиотеку
- статья моет ажодится в библиотеке и иметь флаг
"pbl" - для всеобщего доступа
"grp" - только для группы, такие стаьи будут ЗАКРЫТЫМИ!!!!
Видимость групп (group_status)
- публичная - 'pbl' - любой посетитель может читать публичные материалы группы
- закрытая - 'shut' ??? - что - то я пока не знаю.... может, в закрытых группах не может быть "публичных статей"??
Процедура создания новой группы:
При создании новой группы, Создатель группы становится ее Администратором.
Запись о создании группы размещается в таблицах "dt_headers" и "groups"
Запись о вступлении в группу Администратора добавляется в таблицу "members";
Процедура работы с Ключами:
Создается уникальная пара RSA-ключей,
Публичный ключ помещается в заголовок группы,
персональный - размещается в списке "members",
Приватный ключ группы закрывается Публичным ключем Создателя группы,
и добавляется в соответствующее поле таблицы "members"
Когда Участник Группы открывает страницу группы (переходит на рабочий стол группы)
в профиль Участника добавляется значение его копии приватного ключа группы;
После этого пользователь сможет читать и редактировать все статьи из групповой библиотеки, имеющие флаг "grp"
"""
def get(self, groupId):
"""
загрузить ОДНО значение - по ИД группы
"""
resList = self.select(
'dt_headers.dt_header_id, group_title, group_annotation ' , # строка - чего хотим получить из селекта
'dt_headers', #'authors', # строка - список таблиц
{
'whereStr': " groups.actual_flag = 'A' AND groups.dt_header_id = dt_headers.dt_header_id AND dt_headers.dt_header_id = " + str(groupId)
} # все остальные секции селекта
)
# for item in resList:
# logging.info('Author:: get:: resList = ' + str(item))
if len(resList) == 1:
# return resList[0]
objValuesNameList = list(resList[0].__dict__.keys())
for objValue in objValuesNameList:
if objValue.find('_') != 0:
self.__setattr__(objValue,resList[0].__getattribute__(objValue) )
return self
else:
raise WikiException(LOAD_ONE_VALUE_ERROR)
def list(self):
"""
загрузить список всех групп
"""
resList = self.select(
'dt_headers.dt_header_id, group_title, group_annotation, group_status ' , # строка - чего хотим получить из селекта
'dt_headers', #'authors', # строка - список таблиц
{
'whereStr': " groups.actual_flag = 'A' AND groups.dt_header_id = dt_headers.dt_header_id "
} # все остальные секции селекта
)
# logging.info('Author:: get:: resList = ')
# logging.info(resList)
return resList
def grouplistForAutor(self, authorId):
"""
Получить список групп для одного автора - все руппы, которые АВТОР создал,
и в которых АВТОР является участником
вот тут возможно, надо будет все поправить -
и показывать только ПАБЛИК группы, и/или приватные группы,
в которых участвуют оба - и зритель, и автор
"""
try:
resList = self.select(
' DISTINCT dt_headers.dt_header_id, groups.group_title, groups.group_annotation, groups.group_status, ' +
' members.member_role_type ' , # строка - чего хотим получить из селекта
' members, dt_headers ', #'authors', # строка - список таблиц
{
'whereStr': " groups.actual_flag = 'A' AND groups.dt_header_id = dt_headers.dt_header_id AND " +
" members.author_id = " + str(authorId) +
" AND members.group_id = groups.dt_header_id ",
'orderStr': ' groups.group_title '
} # все остальные секции селекта
)
# logging.info( 'grouplistForAutor:: resList = ' + str(resList))
return resList
except Exception as e:
# except WikiException as e:
# WikiException( ARTICLE_NOT_FOUND )
logging.info( 'grouplistForAutor::Have ERROR!!! ' + str(e))
if not article: raise tornado.web.HTTPError(404)
else: return (article, [])
def getGroupArticleList(self, groupId):
"""
Получить список всех статей одной группы
"""
libControl = self.Library ()
return libControl.getGroupArticleList( groupId)
def getGroupMembersleList(self, groupId):
"""
Получить список всех Участников одной группы
"""
memberControl = self.Member ()
return memberControl.getGroupMembersleList( groupId)
def save(self, authorId ):
"""
сохранить группу,
пользователя, который создал группу надо воткнуть не только в авторы группы,
но, и в "members" да еще и АДМИНОМ!!!
"""
bbsalt = config.options.salt.encode()
cip = CipherWrapper()
logging.info(' save:: before SAVE = ' + str(self))
if self.dt_header_id == 0:
# self.group_create_date = datetime.now()
operationFlag = 'I'
autotControl = Author()
creator = autotControl.get(authorId)
cip.rsaInit() # сделать пару ключей
self.public_key = cip.rsaPubSerialiation(cip.getPublicKey())
pKey = cip.getPrivateKey() # поучить незакрытый приватный ключ
# self.private_key_hash = bcrypt.hashpw(cip.rsaPrivateSerialiation(pKey), bbsalt).decode('utf-8') # получим ХЕш приватного ключа - для последуюей проверки при восстановлении пароля
# logging.info(' save:: before SAVE creator.publicKey() = ' + str(creator.publicKey()))
pkTmp = cip.rsaEncrypt(creator.publicKey(), cip.rsaPrivateSerialiation(pKey))
# logging.info(' save:: before SAVE pkTmp = ' + str(pkTmp))
self.private_key = pkTmp
else:
operationFlag = 'U'
self.begin()
revisions_sha_hash_sou = str(self.group_title) + str(self.group_annotation) + str(self.group_status)
# self.dt_header_id =
Model.save(self, authorId, operationFlag, revisions_sha_hash_sou )
# теперь сохранить автора группы как ее админа.
# logging.info(' SAVE:: GROUPPPPP authorId = ' + str(authorId))
# logging.info(' SAVE:: GROUPPPPP 2 = ' + str(self))
if operationFlag == 'I':
memberControl = self.Member()
memberControl.author_id = authorId
memberControl.group_id = self.dt_header_id
memberControl.member_role_type = 'A'
memberControl.private_key = self.private_key
# bbWrk = (bytePass+bbsalt)[0:32]
# cipher_aes = AES.new(bbWrk, AES.MODE_EAX) # закроем приватный ключ на пароль пользователя.
# ciphertext = cipher_aes.encrypt(pKey)
# self.private_key = pickle.dumps({'cipherKey': ciphertext, 'nonce': cipher_aes.nonce})
memberControl.save(authorId)
self.commit()
return True
def librarySave(self, authorId = 0, groupId = 0, article_id=0, library_permission_type = 'W'):
"""
Добавить статью к группе
"""
libControl = self.Library(groupId, authorId, library_permission_type)
libControl.save(authorId)
| 40.343669
| 193
| 0.547172
|
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
#
# Copyright 2016 Alec Goliboda
#
# group.py
from __future__ import print_function
import logging
import json
import zlib
# import markdown
from datetime import datetime
import tornado.options
# import pymysql
import hashlib
import bcrypt
import base64
# from _overlapped import NULL
##############
import config
from . import Model, CipherWrapper
from .. import WikiException
from core.models.author import Author
from ..constants.data_base import *
# from core.models.template import Template
from ..constants.data_base import *
class Group(Model):
"""
модель - для Группы
внутри будут:
- список участников
- библиотека
Просмотр:
- список всех групп
- одну группу Описание
- список участников группы
- список статей (библиотека)
- создавать группы
- "удалять группы" - о... нужен флаг - "группа удалена"???
- добавлять (удаять) участников в группу
- по приглашению - нужен список приглашений - соотв, у каждого автора может быть список приглашений "вступить в группу"
- нужен список заявок на вступление - это инструмент админа группы "список заявок на вступление"
- добавлять (удалять) статьи в библиотеку
- статья моет ажодится в библиотеке и иметь флаг
"pbl" - для всеобщего доступа
"grp" - только для группы, такие стаьи будут ЗАКРЫТЫМИ!!!!
Видимость групп (group_status)
- публичная - 'pbl' - любой посетитель может читать публичные материалы группы
- закрытая - 'shut' ??? - что - то я пока не знаю.... может, в закрытых группах не может быть "публичных статей"??
Процедура создания новой группы:
При создании новой группы, Создатель группы становится ее Администратором.
Запись о создании группы размещается в таблицах "dt_headers" и "groups"
Запись о вступлении в группу Администратора добавляется в таблицу "members";
Процедура работы с Ключами:
Создается уникальная пара RSA-ключей,
Публичный ключ помещается в заголовок группы,
персональный - размещается в списке "members",
Приватный ключ группы закрывается Публичным ключем Создателя группы,
и добавляется в соответствующее поле таблицы "members"
Когда Участник Группы открывает страницу группы (переходит на рабочий стол группы)
в профиль Участника добавляется значение его копии приватного ключа группы;
После этого пользователь сможет читать и редактировать все статьи из групповой библиотеки, имеющие флаг "grp"
"""
def __init__(self, group_title = '', group_annotation = '', group_status = 'pbl'):
Model.__init__(self)
self.dt_header_id = 0
# self.author_id = 0
self.group_title = group_title
self.group_annotation = group_annotation
self.group_status = group_status
self.public_key = ''
self.private_key = ''
self.private_key_hash = ''
# self.group_create_date = datetime.now()
self.setDataStruct(Model.TableDef( tabName='groups',
idFieldName=None,
mainPrimaryList =['dt_header_id'],
listAttrNames=['dt_header_id', 'group_title', 'group_annotation', 'group_status']))
self.setHeadStruct(Model.TableDef( tabName='dt_headers',
idFieldName='dt_header_id',
mainPrimaryList =['dt_header_id'],
listAttrNames=['dt_header_type', 'public_key']))
class Member(Model):
def __init__(self):
Model.__init__(self)
self.group_id = 0
self.author_id = 0
self.member_role_type = 'M'
self.setDataStruct(Model.TableDef( tabName='members',
idFieldName=None,
mainPrimaryList =None,
listAttrNames=['group_id', 'author_id', 'member_role_type', 'private_key']))
def save(self, authorId ):
operationFlag = 'I'
revisions_sha_hash_sou = str(self.group_id) + str(self.author_id) + self.member_role_type
logging.info(' Member save:: self = ' + str(self))
Model.save(self, authorId, operationFlag, revisions_sha_hash_sou)
def getGroupMembersleList(self, groupId):
"""
Получить список всех соучастников одной группы
"""
getRez = self.select(
'dt_headers.dt_header_id, author_name, author_surname, author_role, author_phon, author_email, author_create, dt_headers.public_key ',
'authors, dt_headers',
{
'whereStr': " members.group_id = authors.dt_header_id AND dt_headers.dt_header_id = authors.dt_header_id AND " +\
" members.actual_flag = 'A' AND authors.actual_flag = 'A' AND "
" members.group_id = " + str(groupId) , # строка набор условий для выбора строк
'orderStr': ' author_name, author_surname ', # строка порядок строк
}
)
# 'whereStr': " groups.author_id = authors.author_id AND groups.group_id = " + str(group_id)
# logging.info( 'getGroupMembersleList:: getRez = ' + str(getRez))
if len(getRez) == 0:
# raise WikiException( ARTICLE_NOT_FOUND )
return []
authorList = []
author = Author()
for autorStruct in getRez:
authorList.append(author.parsingAuthor(self, autorStruct))
return authorList
class Library(Model):
def __init__(self, groupId = 0, articleId=0, libraryPermissionType = 'W' ):
Model.__init__(self)
self.group_id = groupId
self.article_id = articleId
self.library_permission_type = libraryPermissionType
self.setDataStruct(Model.TableDef( tabName='librarys',
idFieldName=None,
mainPrimaryList =['group_id','article_id' ],
listAttrNames=['group_id', 'author_id', 'library_permission_type']))
def save(self, autorId):
operationFlag = 'I'
revisionsShaHashSou = str(self.group_id) + str(self.article_id) + self.library_permission_type
# logging.info(' Library save:: self = ' + str(self))
Model.save(self, autorId, operationFlag, revisionsShaHashSou)
# self.dt_header_id = Model.save(self, self.dt_header_id, operationFlag, sha_hash_sou)
def getGroupArticleList(self, groupId):
"""
Получить список всех статей одной группы
"""
getRez = self.select(
' articles.article_id, articles.article_title, articles.article_link, ' +
' articles.article_annotation, articles.article_category_id, ' +
' articles.article_template_id, ' +
' null AS group_title, null AS group_annotation, librarys AS group_id, librarys.library_permission_type ',
'articles',
{
'whereStr': " librarys.article_id = articles.article_id AND " +\
" articles.actual_flag = 'A' AND librarys.actual_flag = 'A' AND " +\
" librarys.group_id = " + str(groupId) , # строка набор условий для выбора строк
'orderStr': ' articles.article_id ', # строка порядок строк
}
)
# 'whereStr': " groups.dt_header_id = authors.dt_header_id AND groups.group_id = " + str(group_id)
# for item in getRez:
# logging.info( 'getGroupArticleList:: getRez = ' + str(item))
if len(getRez) == 0:
# raise WikiException( ARTICLE_NOT_FOUND )
return []
return getRez
def get(self, groupId):
"""
загрузить ОДНО значение - по ИД группы
"""
resList = self.select(
'dt_headers.dt_header_id, group_title, group_annotation ' , # строка - чего хотим получить из селекта
'dt_headers', #'authors', # строка - список таблиц
{
'whereStr': " groups.actual_flag = 'A' AND groups.dt_header_id = dt_headers.dt_header_id AND dt_headers.dt_header_id = " + str(groupId)
} # все остальные секции селекта
)
# for item in resList:
# logging.info('Author:: get:: resList = ' + str(item))
if len(resList) == 1:
# return resList[0]
objValuesNameList = list(resList[0].__dict__.keys())
for objValue in objValuesNameList:
if objValue.find('_') != 0:
self.__setattr__(objValue,resList[0].__getattribute__(objValue) )
return self
else:
raise WikiException(LOAD_ONE_VALUE_ERROR)
def list(self):
"""
загрузить список всех групп
"""
resList = self.select(
'dt_headers.dt_header_id, group_title, group_annotation, group_status ' , # строка - чего хотим получить из селекта
'dt_headers', #'authors', # строка - список таблиц
{
'whereStr': " groups.actual_flag = 'A' AND groups.dt_header_id = dt_headers.dt_header_id "
} # все остальные секции селекта
)
# logging.info('Author:: get:: resList = ')
# logging.info(resList)
return resList
def grouplistForAutor(self, authorId):
"""
Получить список групп для одного автора - все руппы, которые АВТОР создал,
и в которых АВТОР является участником
вот тут возможно, надо будет все поправить -
и показывать только ПАБЛИК группы, и/или приватные группы,
в которых участвуют оба - и зритель, и автор
"""
try:
resList = self.select(
' DISTINCT dt_headers.dt_header_id, groups.group_title, groups.group_annotation, groups.group_status, ' +
' members.member_role_type ' , # строка - чего хотим получить из селекта
' members, dt_headers ', #'authors', # строка - список таблиц
{
'whereStr': " groups.actual_flag = 'A' AND groups.dt_header_id = dt_headers.dt_header_id AND " +
" members.author_id = " + str(authorId) +
" AND members.group_id = groups.dt_header_id ",
'orderStr': ' groups.group_title '
} # все остальные секции селекта
)
# logging.info( 'grouplistForAutor:: resList = ' + str(resList))
return resList
except Exception as e:
# except WikiException as e:
# WikiException( ARTICLE_NOT_FOUND )
logging.info( 'grouplistForAutor::Have ERROR!!! ' + str(e))
if not article: raise tornado.web.HTTPError(404)
else: return (article, [])
def getGroupArticleList(self, groupId):
"""
Получить список всех статей одной группы
"""
libControl = self.Library ()
return libControl.getGroupArticleList( groupId)
def getGroupMembersleList(self, groupId):
"""
Получить список всех Участников одной группы
"""
memberControl = self.Member ()
return memberControl.getGroupMembersleList( groupId)
def save(self, authorId ):
"""
сохранить группу,
пользователя, который создал группу надо воткнуть не только в авторы группы,
но, и в "members" да еще и АДМИНОМ!!!
"""
bbsalt = config.options.salt.encode()
cip = CipherWrapper()
logging.info(' save:: before SAVE = ' + str(self))
if self.dt_header_id == 0:
# self.group_create_date = datetime.now()
operationFlag = 'I'
autotControl = Author()
creator = autotControl.get(authorId)
cip.rsaInit() # сделать пару ключей
self.public_key = cip.rsaPubSerialiation(cip.getPublicKey())
pKey = cip.getPrivateKey() # поучить незакрытый приватный ключ
# self.private_key_hash = bcrypt.hashpw(cip.rsaPrivateSerialiation(pKey), bbsalt).decode('utf-8') # получим ХЕш приватного ключа - для последуюей проверки при восстановлении пароля
# logging.info(' save:: before SAVE creator.publicKey() = ' + str(creator.publicKey()))
pkTmp = cip.rsaEncrypt(creator.publicKey(), cip.rsaPrivateSerialiation(pKey))
# logging.info(' save:: before SAVE pkTmp = ' + str(pkTmp))
self.private_key = pkTmp
else:
operationFlag = 'U'
self.begin()
revisions_sha_hash_sou = str(self.group_title) + str(self.group_annotation) + str(self.group_status)
# self.dt_header_id =
Model.save(self, authorId, operationFlag, revisions_sha_hash_sou )
# теперь сохранить автора группы как ее админа.
# logging.info(' SAVE:: GROUPPPPP authorId = ' + str(authorId))
# logging.info(' SAVE:: GROUPPPPP 2 = ' + str(self))
if operationFlag == 'I':
memberControl = self.Member()
memberControl.author_id = authorId
memberControl.group_id = self.dt_header_id
memberControl.member_role_type = 'A'
memberControl.private_key = self.private_key
# bbWrk = (bytePass+bbsalt)[0:32]
# cipher_aes = AES.new(bbWrk, AES.MODE_EAX) # закроем приватный ключ на пароль пользователя.
# ciphertext = cipher_aes.encrypt(pKey)
# self.private_key = pickle.dumps({'cipherKey': ciphertext, 'nonce': cipher_aes.nonce})
memberControl.save(authorId)
self.commit()
return True
def librarySave(self, authorId = 0, groupId = 0, article_id=0, library_permission_type = 'W'):
"""
Добавить статью к группе
"""
libControl = self.Library(groupId, authorId, library_permission_type)
libControl.save(authorId)
| 2,684
| 3,344
| 99
|
70b6c80341def36320aeb56eea498bea8fda840e
| 4,327
|
py
|
Python
|
spug_api/libs/parser.py
|
atompi/spug
|
88ebd46e47c88731b40cb82a6c7a360511b703fa
|
[
"MIT"
] | null | null | null |
spug_api/libs/parser.py
|
atompi/spug
|
88ebd46e47c88731b40cb82a6c7a360511b703fa
|
[
"MIT"
] | null | null | null |
spug_api/libs/parser.py
|
atompi/spug
|
88ebd46e47c88731b40cb82a6c7a360511b703fa
|
[
"MIT"
] | null | null | null |
# Copyright: (c) OpenSpug Organization. https://github.com/openspug/spug
# Copyright: (c) <spug.dev@gmail.com>
# Released under the AGPL-3.0 License.
import json
from .utils import AttrDict
# 自定义的解析异常
# 需要校验的参数对象
class Argument(object):
"""
:param name: name of option
:param default: default value if the argument if absent
:param bool required: is required
"""
# 解析器基类
# Json解析器
| 33.030534
| 105
| 0.554657
|
# Copyright: (c) OpenSpug Organization. https://github.com/openspug/spug
# Copyright: (c) <spug.dev@gmail.com>
# Released under the AGPL-3.0 License.
import json
from .utils import AttrDict
# 自定义的解析异常
class ParseError(BaseException):
def __init__(self, message):
self.message = message
# 需要校验的参数对象
class Argument(object):
"""
:param name: name of option
:param default: default value if the argument if absent
:param bool required: is required
"""
def __init__(self, name, default=None, handler=None, required=True, type=str, filter=None, help=None,
nullable=False):
self.name = name
self.default = default
self.type = type
self.required = required
self.nullable = nullable
self.filter = filter
self.help = help
self.handler = handler
if not isinstance(self.name, str):
raise TypeError('Argument name must be string')
if filter and not callable(self.filter):
raise TypeError('Argument filter is not callable')
def parse(self, has_key, value):
if not has_key:
if self.required and self.default is None:
raise ParseError(
self.help or 'Required Error: %s is required' % self.name)
else:
return self.default
elif value in [u'', '', None]:
if self.default is not None:
return self.default
elif not self.nullable and self.required:
raise ParseError(
self.help or 'Value Error: %s must not be null' % self.name)
else:
return None
try:
if self.type:
if self.type in (list, dict) and isinstance(value, str):
value = json.loads(value)
assert isinstance(value, self.type)
elif self.type == bool and isinstance(value, str):
assert value.lower() in ['true', 'false']
value = value.lower() == 'true'
elif not isinstance(value, self.type):
value = self.type(value)
except (TypeError, ValueError, AssertionError):
raise ParseError(self.help or 'Type Error: %s type must be %s' % (
self.name, self.type))
if self.filter:
if not self.filter(value):
raise ParseError(
self.help or 'Value Error: %s filter check failed' % self.name)
if self.handler:
value = self.handler(value)
return value
# 解析器基类
class BaseParser(object):
def __init__(self, *args):
self.args = []
for e in args:
if isinstance(e, str):
e = Argument(e)
elif not isinstance(e, Argument):
raise TypeError('%r is not instance of Argument' % e)
self.args.append(e)
def _get(self, key):
raise NotImplementedError
def _init(self, data):
raise NotImplementedError
def add_argument(self, **kwargs):
self.args.append(Argument(**kwargs))
def parse(self, data=None, clear=False):
rst = AttrDict()
try:
self._init(data)
for e in self.args:
has_key, value = self._get(e.name)
if clear and has_key is False and e.required is False:
continue
rst[e.name] = e.parse(has_key, value)
except ParseError as err:
return None, err.message
return rst, None
# Json解析器
class JsonParser(BaseParser):
def __init__(self, *args):
self.__data = None
super(JsonParser, self).__init__(*args)
def _get(self, key):
return key in self.__data, self.__data.get(key)
def _init(self, data):
try:
if isinstance(data, (str, bytes)):
data = data.decode('utf-8')
self.__data = json.loads(data) if data else {}
else:
assert hasattr(data, '__contains__')
assert hasattr(data, 'get')
assert callable(data.get)
self.__data = data
except (ValueError, AssertionError):
raise ParseError('Invalid data type for parse')
| 3,534
| 23
| 360
|
ca3c214980bb966e02bee0584e6a700a068fc2b7
| 3,954
|
py
|
Python
|
mBugTranslations/Chrome.py
|
SkyLined/mBugId
|
781bfe9a120e55630a91ce1e86b39ad0dee031ec
|
[
"CC-BY-4.0"
] | 22
|
2016-08-11T14:50:55.000Z
|
2021-06-06T09:39:26.000Z
|
mBugTranslations/Chrome.py
|
SkyLined/mBugId
|
781bfe9a120e55630a91ce1e86b39ad0dee031ec
|
[
"CC-BY-4.0"
] | 19
|
2016-09-07T05:54:40.000Z
|
2020-07-02T07:46:38.000Z
|
mBugTranslations/Chrome.py
|
SkyLined/mBugId
|
781bfe9a120e55630a91ce1e86b39ad0dee031ec
|
[
"CC-BY-4.0"
] | 11
|
2016-09-03T22:42:50.000Z
|
2018-10-01T18:28:59.000Z
|
import re;
from .cBugTranslation import cBugTranslation;
aoBugTranslations = [
# ASan build related -> Ignored
cBugTranslation(
azs0rbAdditionalIrrelevantStackFrameSymbols = [
rb".*!`anonymous namespace'::Create", # Part of skia
rb".*!base::debug::BreakDebugger",
rb".*!base::debug::CollectGDIUsageAndDie",
rb".*!blink::ReportFatalErrorInMainThread",
rb".*!blink::V8ScriptRunner::CallExtraOrCrash(<.+>)?",
rb".*!crash_reporter::internal::CrashForExceptionInNonABICompliantCodeRange",
rb".*!CrashForException_ExportThunk",
rb".*!crashpad::`anonymous namespace'::UnhandledExceptionHandler",
rb".*!crashpad::CrashpadClient::DumpAndCrash",
rb".*!raise",
rb".*!sk_abort_no_print",
rb".*!SkMallocPixelRef::MakeUsing",
rb".*!v8::Utils::ApiCheck",
rb".*!WTF::Deque<.+>::ExpandCapacity(IfNeeded)",
rb".*!WTF::Deque<.+>::push_back",
],
),
# Breakpoint -> Ignored
cBugTranslation(
srzOriginalBugTypeId = r"Breakpoint",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!__sanitizer_cov",
],
s0zTranslatedBugTypeId = None, # This is apparently triggered by ASAN builds to determine EIP/RIP.
s0zTranslatedBugDescription = None,
s0zTranslatedSecurityImpact = None,
),
# Breakpoint -> OOM
cBugTranslation(
srzOriginalBugTypeId = r"Breakpoint",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!base::`anonymous namespace'::OnNoMemory",
rb".*!base::internal::SchedulerWorkerPoolImpl::Start", # CHECK() on thread start
rb".*!base::PartitionRecommitSystemPages",
rb".*!blink::MemoryRegion::Commit",
rb".*!content::`anonymous namespace'::CrashOnMapFailure",
rb".*!skia::CreateHBitmap",
rb".*!ui::ClientGpuMemoryBufferManager::ClientGpuMemoryBufferManager", # std::vector throws breakpoint
],
s0zTranslatedBugTypeId = "OOM",
s0zTranslatedBugDescription = "The application triggered a breakpoint to indicate it was unable to allocate enough memory.",
s0zTranslatedSecurityImpact = None,
),
# Breakpoint -> Assert
cBugTranslation(
srzOriginalBugTypeId = r"Breakpoint",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!blink::reportFatalErrorInMainThread",
rb".*!v8::Utils::ReportApiFailure",
rb".*!logging::LogMessage::~LogMessage",
],
s0zTranslatedBugTypeId = "Assert",
s0zTranslatedBugDescription = "The application triggered an exception to indicate an assertion failed.",
s0zTranslatedSecurityImpact = None,
),
# AVW@NULL -> Assert
cBugTranslation(
srzOriginalBugTypeId = r"AVW@NULL",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!base::win::`anonymous namespace'::ForceCrashOnSigAbort",
],
s0zTranslatedBugTypeId = "Assert",
s0zTranslatedBugDescription = "The application triggered a NULL pointer access violation to indicate an assertion failed.",
s0zTranslatedSecurityImpact = None,
),
# Various -> OOM
cBugTranslation(
srzOriginalBugTypeId = r"0xE0000008|Assert|AVW@NULL", # 0xE0000008 (win::kOomExceptionCode) -> OOM
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!base::`anonymous namespace'::OnNoMemory",
rb".*!(?:base|WTF)::[Pp]artitions?(?:ExcessiveAllocationSize|OutOfMemory(Using\w+)?)",
rb".*!blink::(?:BlinkGCOutOfMemory|ReportOOMErrorInMainThread)",
rb".*!FX_OutOfMemoryTerminate",
rb".*!SkBitmap::allocPixels",
],
s0zTranslatedBugTypeId = "OOM",
s0zTranslatedBugDescription = "The application caused an access violation by writing to NULL to indicate it was unable to allocate enough memory.",
s0zTranslatedSecurityImpact = None,
),
# OOM -> hide irrelevant frames
cBugTranslation(
srzOriginalBugTypeId = r"OOM",
azs0rbAdditionalIrrelevantStackFrameSymbols = [
rb".+!(.+::)?(Win)?CallNewHandler",
rb".+!(.+::)?\w+_malloc(_\w+)?",
rb".+!(.+::)?\w*(Alloc|alloc|OutOfMemory)\w*(<.+>)?",
],
),
];
| 40.762887
| 151
| 0.686899
|
import re;
from .cBugTranslation import cBugTranslation;
aoBugTranslations = [
# ASan build related -> Ignored
cBugTranslation(
azs0rbAdditionalIrrelevantStackFrameSymbols = [
rb".*!`anonymous namespace'::Create", # Part of skia
rb".*!base::debug::BreakDebugger",
rb".*!base::debug::CollectGDIUsageAndDie",
rb".*!blink::ReportFatalErrorInMainThread",
rb".*!blink::V8ScriptRunner::CallExtraOrCrash(<.+>)?",
rb".*!crash_reporter::internal::CrashForExceptionInNonABICompliantCodeRange",
rb".*!CrashForException_ExportThunk",
rb".*!crashpad::`anonymous namespace'::UnhandledExceptionHandler",
rb".*!crashpad::CrashpadClient::DumpAndCrash",
rb".*!raise",
rb".*!sk_abort_no_print",
rb".*!SkMallocPixelRef::MakeUsing",
rb".*!v8::Utils::ApiCheck",
rb".*!WTF::Deque<.+>::ExpandCapacity(IfNeeded)",
rb".*!WTF::Deque<.+>::push_back",
],
),
# Breakpoint -> Ignored
cBugTranslation(
srzOriginalBugTypeId = r"Breakpoint",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!__sanitizer_cov",
],
s0zTranslatedBugTypeId = None, # This is apparently triggered by ASAN builds to determine EIP/RIP.
s0zTranslatedBugDescription = None,
s0zTranslatedSecurityImpact = None,
),
# Breakpoint -> OOM
cBugTranslation(
srzOriginalBugTypeId = r"Breakpoint",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!base::`anonymous namespace'::OnNoMemory",
rb".*!base::internal::SchedulerWorkerPoolImpl::Start", # CHECK() on thread start
rb".*!base::PartitionRecommitSystemPages",
rb".*!blink::MemoryRegion::Commit",
rb".*!content::`anonymous namespace'::CrashOnMapFailure",
rb".*!skia::CreateHBitmap",
rb".*!ui::ClientGpuMemoryBufferManager::ClientGpuMemoryBufferManager", # std::vector throws breakpoint
],
s0zTranslatedBugTypeId = "OOM",
s0zTranslatedBugDescription = "The application triggered a breakpoint to indicate it was unable to allocate enough memory.",
s0zTranslatedSecurityImpact = None,
),
# Breakpoint -> Assert
cBugTranslation(
srzOriginalBugTypeId = r"Breakpoint",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!blink::reportFatalErrorInMainThread",
rb".*!v8::Utils::ReportApiFailure",
rb".*!logging::LogMessage::~LogMessage",
],
s0zTranslatedBugTypeId = "Assert",
s0zTranslatedBugDescription = "The application triggered an exception to indicate an assertion failed.",
s0zTranslatedSecurityImpact = None,
),
# AVW@NULL -> Assert
cBugTranslation(
srzOriginalBugTypeId = r"AVW@NULL",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!base::win::`anonymous namespace'::ForceCrashOnSigAbort",
],
s0zTranslatedBugTypeId = "Assert",
s0zTranslatedBugDescription = "The application triggered a NULL pointer access violation to indicate an assertion failed.",
s0zTranslatedSecurityImpact = None,
),
# Various -> OOM
cBugTranslation(
srzOriginalBugTypeId = r"0xE0000008|Assert|AVW@NULL", # 0xE0000008 (win::kOomExceptionCode) -> OOM
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!base::`anonymous namespace'::OnNoMemory",
rb".*!(?:base|WTF)::[Pp]artitions?(?:ExcessiveAllocationSize|OutOfMemory(Using\w+)?)",
rb".*!blink::(?:BlinkGCOutOfMemory|ReportOOMErrorInMainThread)",
rb".*!FX_OutOfMemoryTerminate",
rb".*!SkBitmap::allocPixels",
],
s0zTranslatedBugTypeId = "OOM",
s0zTranslatedBugDescription = "The application caused an access violation by writing to NULL to indicate it was unable to allocate enough memory.",
s0zTranslatedSecurityImpact = None,
),
# OOM -> hide irrelevant frames
cBugTranslation(
srzOriginalBugTypeId = r"OOM",
azs0rbAdditionalIrrelevantStackFrameSymbols = [
rb".+!(.+::)?(Win)?CallNewHandler",
rb".+!(.+::)?\w+_malloc(_\w+)?",
rb".+!(.+::)?\w*(Alloc|alloc|OutOfMemory)\w*(<.+>)?",
],
),
];
| 0
| 0
| 0
|
f7b84e0119a5bb7d3e69b3fc77fd9952daf83b18
| 2,975
|
py
|
Python
|
ProjetoMercado/mercado/models.py
|
LucasRodriguesDaPaixao/ProjetoMercado
|
7a086ab0af800b15ef090520c9c81a0cd83dd650
|
[
"MIT"
] | null | null | null |
ProjetoMercado/mercado/models.py
|
LucasRodriguesDaPaixao/ProjetoMercado
|
7a086ab0af800b15ef090520c9c81a0cd83dd650
|
[
"MIT"
] | null | null | null |
ProjetoMercado/mercado/models.py
|
LucasRodriguesDaPaixao/ProjetoMercado
|
7a086ab0af800b15ef090520c9c81a0cd83dd650
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
| 33.806818
| 103
| 0.736807
|
from django.db import models
# Create your models here.
class Cliente(models.Model):
ID_cliente = models.AutoField(primary_key=True)
nome_cliente = models.CharField(max_length=100, verbose_name="Nome:")
cpf = models.CharField(max_length=14, verbose_name="CPF:")
def __str__(self):
return self.nome_cliente
class Fornecedor(models.Model):
ID_fornecedor = models.AutoField(primary_key=True)
nome_fornecedor = models.CharField(max_length=100, verbose_name="Nome:")
email_fornecedor = models.CharField(max_length=100, verbose_name="Email:")
cnpj= models.CharField(max_length=18, verbose_name="CNPJ:")
telefone = models.CharField(max_length=13, verbose_name="Telefone:")
def __str__(self):
return self.nome_fornecedor
class Meta:
verbose_name_plural="Fornecedores"
class Categoria(models.Model):
ID_categoria = models.AutoField(primary_key=True)
nome_categoria = models.CharField(max_length=45, verbose_name="Nome Categoria:")
def __str__(self):
return self.nome_categoria
class Produto(models.Model):
ID_produto = models.AutoField(primary_key=True)
nome_produto = models.CharField(max_length=100, verbose_name="Nome:")
data_validade = models.DateField(verbose_name="Data de validade:")
preco = models.DecimalField(max_digits=5, decimal_places=2, verbose_name="Preço:")
quantidade_produto = models.IntegerField(verbose_name="Quantidade de produtos:")
FK_categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE, verbose_name="Categoria:")
FK_fornecedor = models.ForeignKey(Fornecedor, on_delete=models.CASCADE, verbose_name="Fornecedor:")
def __str__(self):
return self.nome_produto
class Setor(models.Model):
ID_setor = models.AutoField(primary_key=True)
nome_setor = models.CharField(max_length=45, verbose_name="Setor:")
FK_categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE, verbose_name="Categoria:")
def __str__(self):
return self.nome_setor
class Meta:
verbose_name_plural="Setores"
class Funcionario(models.Model):
ID_funcionario = models.AutoField(primary_key=True)
nome_funcionario = models.CharField(max_length=45, verbose_name="Nome:")
rg = models.CharField(max_length=12, verbose_name="RG:")
cpf = models.CharField(max_length=14, verbose_name="CPF:")
FK_setor = models.ForeignKey(Setor, on_delete=models.CASCADE, verbose_name="Setor:")
def __str__(self):
return self.nome_funcionario
class Compra(models.Model):
ID_compra = models.AutoField(primary_key=True)
valor_total = models.DecimalField(max_digits=5, decimal_places=2, verbose_name="Valor total:")
FK_cliente = models.ForeignKey(Cliente, on_delete=models.CASCADE, verbose_name="Cliente:")
compra_produto = models.ManyToManyField(Produto)
def __str__(self):
return "Compra: {} <--> {}".format(self.ID_compra, self.FK_cliente)
| 260
| 2,486
| 160
|
e682d03323f99fc860ddd405e81e02079d38b903
| 2,979
|
py
|
Python
|
macrokit/_validator.py
|
hanjinliu/macro-kit
|
61ebc38ea1086337d5a7477c6e896af0220f8a71
|
[
"BSD-3-Clause"
] | 2
|
2021-11-02T09:53:49.000Z
|
2021-11-10T10:33:05.000Z
|
macrokit/_validator.py
|
hanjinliu/macro-kit
|
61ebc38ea1086337d5a7477c6e896af0220f8a71
|
[
"BSD-3-Clause"
] | null | null | null |
macrokit/_validator.py
|
hanjinliu/macro-kit
|
61ebc38ea1086337d5a7477c6e896af0220f8a71
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Callable, Hashable, TypeVar, Iterable, Union
from ._symbol import Symbol
from .head import Head
_T = TypeVar("_T", bound=Hashable)
_A = TypeVar("_A")
class Validator:
"""A validator class that will be used for Expr argument validation."""
def register(self, value: _T):
"""Register value for validation."""
return wrapper
def __call__(self, arg: _T, *args: _A) -> Union[_A, Iterable[_A]]:
"""Run validation."""
try:
func = self._map[arg]
except KeyError:
return args
try:
out = func(*args)
except ValidationError as e:
e.args = (f"{args} is incompatible with {arg}",)
raise e
return out
class ValidationError(ValueError):
"""Raised when validation failed."""
validator = Validator()
@validator.register(Head.empty)
@validator.register(Head.del_)
@validator.register(Head.raise_)
@validator.register(Head.comment)
@validator.register(Head.assert_)
@validator.register(Head.getitem)
@validator.register(Head.unop)
@validator.register(Head.getattr)
@validator.register(Head.assign)
@validator.register(Head.kw)
@validator.register(Head.annotate)
@validator.register(Head.binop)
@validator.register(Head.aug)
@validator.register(Head.function)
@validator.register(Head.for_)
@validator.register(Head.while_)
@validator.register(Head.if_)
@validator.register(Head.elif_)
| 22.568182
| 75
| 0.627727
|
from typing import Callable, Hashable, TypeVar, Iterable, Union
from ._symbol import Symbol
from .head import Head
_T = TypeVar("_T", bound=Hashable)
_A = TypeVar("_A")
class Validator:
"""A validator class that will be used for Expr argument validation."""
def __init__(self):
self._map: dict[_T, Callable[[_A], _A]] = {}
def register(self, value: _T):
"""Register value for validation."""
def wrapper(func):
self._map[value] = func
return func
return wrapper
def __call__(self, arg: _T, *args: _A) -> Union[_A, Iterable[_A]]:
"""Run validation."""
try:
func = self._map[arg]
except KeyError:
return args
try:
out = func(*args)
except ValidationError as e:
e.args = (f"{args} is incompatible with {arg}",)
raise e
return out
class ValidationError(ValueError):
"""Raised when validation failed."""
validator = Validator()
@validator.register(Head.empty)
def _no_arg(args):
if len(args) != 0:
raise ValidationError()
return args
@validator.register(Head.del_)
@validator.register(Head.raise_)
def _single_arg(args):
if len(args) != 1:
raise ValidationError()
return args
@validator.register(Head.comment)
def _single_str(args):
if len(args) != 1:
raise ValidationError()
k = args[0]
if isinstance(k, Symbol):
k.name = k.name.strip("'")
return args
@validator.register(Head.assert_)
@validator.register(Head.getitem)
@validator.register(Head.unop)
def _two_args(args):
if len(args) != 2:
raise ValidationError()
return args
@validator.register(Head.getattr)
def _getattr(args):
if len(args) != 2:
raise ValidationError()
k = args[1]
if isinstance(k, Symbol):
k.name = k.name.strip("'")
return args
@validator.register(Head.assign)
@validator.register(Head.kw)
@validator.register(Head.annotate)
def _symbol_and_any(args):
if len(args) != 2:
raise ValidationError()
k, v = args
if isinstance(k, str):
k = Symbol.var(k)
elif isinstance(k, Symbol) and k.constant:
k = Symbol.var(k.name)
return [k, v]
@validator.register(Head.binop)
@validator.register(Head.aug)
def _three_args(args):
if len(args) != 3:
raise ValidationError()
return args
@validator.register(Head.function)
@validator.register(Head.for_)
@validator.register(Head.while_)
def _an_arg_and_a_block(args):
if len(args) != 2:
raise ValidationError()
b = args[1]
if getattr(b, "head", None) != Head.block:
raise ValidationError()
return args
@validator.register(Head.if_)
@validator.register(Head.elif_)
def _two_args_and_a_block(args):
if len(args) != 3:
raise ValidationError()
b = args[2]
if getattr(b, "head", None) != Head.block:
raise ValidationError()
return args
| 1,270
| 0
| 256
|
2dcc5057b0af83ae887869fbadf0b60476028183
| 7,579
|
py
|
Python
|
cubi_tk/archive/readme.py
|
eudesbarbosa/cubi-tk
|
80c3ef9387f2399f796b2cc445b99781d541f222
|
[
"MIT"
] | null | null | null |
cubi_tk/archive/readme.py
|
eudesbarbosa/cubi-tk
|
80c3ef9387f2399f796b2cc445b99781d541f222
|
[
"MIT"
] | null | null | null |
cubi_tk/archive/readme.py
|
eudesbarbosa/cubi-tk
|
80c3ef9387f2399f796b2cc445b99781d541f222
|
[
"MIT"
] | null | null | null |
"""``cubi-tk archive prepare``: Prepare a project for archival"""
import errno
import os
import re
import shutil
import sys
import tempfile
from cookiecutter.main import cookiecutter
from logzero import logger
from ..common import execute_shell_commands
from ..isa_tpl import IsaTabTemplate
from ..isa_tpl import load_variables
_BASE_DIR = os.path.dirname(__file__)
TEMPLATE = IsaTabTemplate(
name="archive",
path=os.path.join(os.path.dirname(_BASE_DIR), "isa_tpl", "archive"),
description="Prepare project for archival",
configuration=load_variables("archive"),
)
DU = re.compile("^ *([0-9]+)[ \t]+[^ \t]+.*$")
DATE = re.compile("^(20[0-9][0-9]-[01][0-9]-[0-3][0-9])[_-].+$")
MAIL = (
"(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*"
'|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]'
'|\\\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")'
"@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?"
"|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}"
"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:"
"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]"
"|\\\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)"
"\\])"
)
PATTERNS = {
"project_name": re.compile("^ *- *Project name: *.+$"),
"date": re.compile("^ *- *Start date: *20[0-9]{2}-[01][0-9]-[0-3][0-9].*$"),
"status": re.compile("^ *- *Current status: *(Active|Inactive|Finished|Archived) *$"),
"PI": re.compile("^ *- P.I.: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"client": re.compile("^ *- *Client contact: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"archiver": re.compile("^ *- *CUBI contact: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"CUBI": re.compile("^ *- *CUBI project leader: ([A-z '-]+) *$"),
}
COMMANDS = {
"size": ["du", "--bytes", "--max-depth=0"],
"inodes": ["du", "--inodes", "--max-depth=0"],
"size_follow": ["du", "--dereference", "--bytes", "--max-depth=0"],
"inodes_follow": ["du", "--dereference", "--inodes", "--max-depth=0"],
}
MSG = "**Contents of original `README.md` file**"
| 34.766055
| 98
| 0.577517
|
"""``cubi-tk archive prepare``: Prepare a project for archival"""
import errno
import os
import re
import shutil
import sys
import tempfile
from cookiecutter.main import cookiecutter
from logzero import logger
from ..common import execute_shell_commands
from ..isa_tpl import IsaTabTemplate
from ..isa_tpl import load_variables
_BASE_DIR = os.path.dirname(__file__)
TEMPLATE = IsaTabTemplate(
name="archive",
path=os.path.join(os.path.dirname(_BASE_DIR), "isa_tpl", "archive"),
description="Prepare project for archival",
configuration=load_variables("archive"),
)
DU = re.compile("^ *([0-9]+)[ \t]+[^ \t]+.*$")
DATE = re.compile("^(20[0-9][0-9]-[01][0-9]-[0-3][0-9])[_-].+$")
MAIL = (
"(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*"
'|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]'
'|\\\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")'
"@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?"
"|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}"
"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:"
"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]"
"|\\\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)"
"\\])"
)
PATTERNS = {
"project_name": re.compile("^ *- *Project name: *.+$"),
"date": re.compile("^ *- *Start date: *20[0-9]{2}-[01][0-9]-[0-3][0-9].*$"),
"status": re.compile("^ *- *Current status: *(Active|Inactive|Finished|Archived) *$"),
"PI": re.compile("^ *- P.I.: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"client": re.compile("^ *- *Client contact: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"archiver": re.compile("^ *- *CUBI contact: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"CUBI": re.compile("^ *- *CUBI project leader: ([A-z '-]+) *$"),
}
COMMANDS = {
"size": ["du", "--bytes", "--max-depth=0"],
"inodes": ["du", "--inodes", "--max-depth=0"],
"size_follow": ["du", "--dereference", "--bytes", "--max-depth=0"],
"inodes_follow": ["du", "--dereference", "--inodes", "--max-depth=0"],
}
MSG = "**Contents of original `README.md` file**"
def _extra_context_from_config(config=None):
extra_context = {}
if config:
for name in TEMPLATE.configuration:
if getattr(config, "var_%s" % name, None) is not None:
extra_context[name] = getattr(config, "var_%s" % name)
return extra_context
def _get_snakemake_nb(project_dir):
cmds = [
[
"find",
project_dir,
"-type",
"d",
"-name",
".snakemake",
"-exec",
"du",
"--inodes",
"--max-depth=0",
"{}",
";",
],
["cut", "-f", "1"],
["paste", "-sd+"],
["bc"],
]
return execute_shell_commands(cmds, check=False, verbose=False)
def _get_archiver_name():
cmds = [
["pinky", "-l", os.getenv("USER")],
["grep", "In real life:"],
["sed", "-e", "s/.*In real life: *//"],
]
output = execute_shell_commands(cmds, check=False, verbose=False)
return output.rstrip()
def _create_extra_context(project_dir, config=None):
extra_context = _extra_context_from_config(config)
logger.info("Collecting size & inodes numbers")
for (context_name, cmd) in COMMANDS.items():
if context_name not in extra_context.keys():
cmd.append(project_dir)
extra_context[context_name] = DU.match(
execute_shell_commands([cmd], check=False, verbose=False)
).group(1)
if "snakemake_nb" not in extra_context.keys():
extra_context["snakemake_nb"] = _get_snakemake_nb(project_dir)
if "archiver_name" not in extra_context.keys():
extra_context["archiver_name"] = _get_archiver_name()
if "archiver_email" not in extra_context.keys():
extra_context["archiver_email"] = (
"{}@bih-charite.de".format(extra_context["archiver_name"]).lower().replace(" ", ".")
)
if "CUBI_name" not in extra_context.keys():
extra_context["CUBI_name"] = extra_context["archiver_name"]
if "PI_name" in extra_context.keys() and "PI_email" not in extra_context.keys():
extra_context["PI_email"] = (
"{}@charite.de".format(extra_context["PI_name"]).lower().replace(" ", ".")
)
if "client_name" in extra_context.keys() and "client_email" not in extra_context.keys():
extra_context["client_email"] = (
"{}@charite.de".format(extra_context["client_name"]).lower().replace(" ", ".")
)
if "SODAR_UUID" in extra_context.keys() and "SODAR_URL" not in extra_context.keys():
extra_context["SODAR_URL"] = "{}/projects/{}".format(
config.sodar_server_url, extra_context["SODAR_UUID"]
)
if "directory" not in extra_context.keys():
extra_context["directory"] = project_dir
if "project_name" not in extra_context.keys():
extra_context["project_name"] = os.path.basename(project_dir)
if "start_date" not in extra_context.keys() and DATE.match(extra_context["project_name"]):
extra_context["start_date"] = DATE.match(extra_context["project_name"]).group(1)
if "current_status" not in extra_context.keys():
extra_context["current_status"] = "Finished"
return extra_context
def _copy_readme(src, target):
os.makedirs(os.path.realpath(os.path.dirname(target)), mode=488, exist_ok=True)
with open(src, "rt") as f:
lines = [x.rstrip() for x in f.readlines()]
if os.path.exists(target):
lines.extend(["", "", "-" * 80, "", "", MSG, "", "", "-" * 80, "", ""])
with open(target, "rt") as f:
lines.extend([x.rstrip() for x in f.readlines()])
os.remove(target)
with open(os.path.realpath(target), "wt") as f:
f.write("\n".join(lines))
def is_readme_valid(filename=None):
if filename is None:
f = sys.stdin
else:
if not os.path.exists(filename):
return False
f = open(filename, "rt")
matching = set()
for line in f:
line = line.rstrip()
for (name, pattern) in PATTERNS.items():
if pattern.match(line):
matching.add(name)
f.close()
return set(PATTERNS.keys()).issubset(matching)
def create_readme(filename, project_dir, config=None, no_input=False):
# If a valid README.md file already exists in the project, do nothing
if os.path.exists(filename) and is_readme_valid(filename):
logger.info("Using existing file, variables ignored : '{}'".format(filename))
return
# Fill defaults (emails, size, inodes, ...)
extra_context = _create_extra_context(project_dir, config)
try:
tmp = tempfile.mkdtemp()
# Create the readme file in temp directory
cookiecutter(
template=TEMPLATE.path, extra_context=extra_context, output_dir=tmp, no_input=no_input
)
# Copy it back to destination, including contents of former incomplete README.md
_copy_readme(os.path.join(tmp, extra_context["project_name"], "README.md"), filename)
finally:
try:
shutil.rmtree(tmp)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def add_readme_parameters(parser):
for name in TEMPLATE.configuration:
key = name.replace("_", "-")
parser.add_argument(
"--var-%s" % key, help="template variables %s" % repr(name), default=None
)
| 5,266
| 0
| 184
|
769fc816a6040cc61dab6376c20fd5c6bf0ebaa0
| 989
|
py
|
Python
|
sigmod2021-exdra-p523/experiments/archive/submitted_results/code/other/pca.py
|
damslab/reproducibility
|
f7804b2513859f7e6f14fa7842d81003d0758bf8
|
[
"Apache-2.0"
] | 4
|
2021-12-10T17:20:26.000Z
|
2021-12-27T14:38:40.000Z
|
sigmod2021-exdra-p523/experiments/code/other/pca.py
|
damslab/reproducibility
|
f7804b2513859f7e6f14fa7842d81003d0758bf8
|
[
"Apache-2.0"
] | null | null | null |
sigmod2021-exdra-p523/experiments/code/other/pca.py
|
damslab/reproducibility
|
f7804b2513859f7e6f14fa7842d81003d0758bf8
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import argparse
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--datapath', type=str, required=True)
parser.add_argument('-y', '--labels', type=str, required=True)
parser.add_argument('-v', '--verbose', type=bool, default=False)
parser.add_argument('-o', '--outputpath', type=str, required=True)
args = parser.parse_args()
X = np.load(args.datapath, allow_pickle=True)
# https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.make_pipeline.html#sklearn.pipeline.make_pipeline
# https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html
# https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA
pca = make_pipeline(StandardScaler(), PCA(n_components=10,svd_solver="full")).fit(X)
np.savetxt(args.outputpath, pca.steps[1][1].components_, delimiter=",")
| 43
| 118
| 0.781598
|
import numpy as np
import argparse
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--datapath', type=str, required=True)
parser.add_argument('-y', '--labels', type=str, required=True)
parser.add_argument('-v', '--verbose', type=bool, default=False)
parser.add_argument('-o', '--outputpath', type=str, required=True)
args = parser.parse_args()
X = np.load(args.datapath, allow_pickle=True)
# https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.make_pipeline.html#sklearn.pipeline.make_pipeline
# https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html
# https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA
pca = make_pipeline(StandardScaler(), PCA(n_components=10,svd_solver="full")).fit(X)
np.savetxt(args.outputpath, pca.steps[1][1].components_, delimiter=",")
| 0
| 0
| 0
|
ac1fa224c6f4611660c583d6666d2a339221dfa7
| 8,868
|
py
|
Python
|
prototypes/learn_weigths.py
|
pantelisantonoudiou/Logic_szrDetect
|
3267cabc78905c189a97e06ea2731b6f9e7b2def
|
[
"Apache-2.0"
] | 1
|
2020-11-19T19:26:34.000Z
|
2020-11-19T19:26:34.000Z
|
prototypes/learn_weigths.py
|
pantelisantonoudiou/Logic_szrDetect
|
3267cabc78905c189a97e06ea2731b6f9e7b2def
|
[
"Apache-2.0"
] | null | null | null |
prototypes/learn_weigths.py
|
pantelisantonoudiou/Logic_szrDetect
|
3267cabc78905c189a97e06ea2731b6f9e7b2def
|
[
"Apache-2.0"
] | 1
|
2021-04-07T11:41:39.000Z
|
2021-04-07T11:41:39.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 09:14:32 2020
@author: Pante
"""
import os, features, time
import numpy as np
from sklearn.preprocessing import StandardScaler
from array_helper import find_szr_idx, match_szrs, merge_close
from build_feature_data import get_data, get_features_allch
from sklearn.metrics import log_loss,recall_score
import matplotlib.pyplot as plt
####### consider isolation forest for outlier detection!!!!!!
def user_cost(y_true, y_pred):
"""
user_cost(y_true, y_pred)
Parameters
----------
y_true : 1ndarray bool, ground truth values
y_pred : 1ndarray bool, predicted values
Returns
-------
cost : float
"""
detected = 0 # number of detected seizures
# get bounds of sezures
bounds_true = find_szr_idx(y_true, np.array([0,1])) # total predicted
bounds_pred = find_szr_idx(y_pred, np.array([0,1])) # total predicted
bounds_pred = merge_close(bounds_pred, merge_margin = 5) # merge seizures close together
if bounds_pred.shape[0]>0: # find matching seizures
detected = match_szrs(bounds_true, bounds_pred, err_margin = 10)
# calculate cost
a = 1 - (detected/bounds_true.shape[0]) # get detected ratio
b = (bounds_pred.shape[0] - detected) # get false positives
cost = a + np.log10(b+1) # cost function
return cost
def create_cost(bounds_true, bounds_pred):
"""
create_cost(bounds_true, bounds_pred)
Parameters
----------
bounds_true : 2d ndarray (rows = seizrs, columns = start,stop), ground truth
bounds_pred : 2d ndarray (rows = seizrs, columns = start,stop), predicted
Returns
-------
cost : Float,
"""
# find matching seizurs
detected = 0
a = 100
if bounds_pred.shape[0]>0:
detected = match_szrs(bounds_true, bounds_pred, err_margin = 10)
if bounds_true.shape[0]>0:
# get detected ratio
a = (1 - (detected/bounds_true.shape[0]))*20
# get false positives
b = (bounds_pred.shape[0] - detected)
# cost function
# L = 1 # learning rate
cost = a + np.log10(b+1)
return cost
def szr_cost(bounds_true, bounds_pred):
"""
create_cost(bounds_true, bounds_pred)
Parameters
----------
bounds_true : 2d ndarray (rows = seizrs, columns = start,stop), ground truth
bounds_pred : 2d ndarray (rows = seizrs, columns = start,stop), predicted
Returns
-------
cost : Float,
"""
# find matching seizurs
detected = 0
if bounds_pred.shape[0]>0:
detected = match_szrs(bounds_true, bounds_pred, err_margin = 10)
if bounds_true.shape[0]>0:
# get detected ratio
a = 1 - (detected/bounds_true.shape[0])
if (a > 0 and a <= 1):
a = 20
# get false positives
b = (bounds_pred.shape[0] - detected)
# cost function
cost = a + np.log10(b+1)
return cost
def get_min_cost(feature, y_true):
"""
get_min_cost(feature, y_true)
Parameters
----------
feature : 1D ndarray, extracted feature
y_true : 1D ndarray, bool grund truth labels
Returns
-------
TYPE: Float, threshold value that gves minimum cost
"""
n_loop = 100 # loop number and separation
thresh_array = np.linspace(1, 20, n_loop) # thresholds to test
cost_array = np.zeros(n_loop)
for i in range(n_loop):
# thresh_array[i] = thresh
y_pred = feature> (np.mean(feature) + thresh_array[i]*np.std(feature))
# get number of seizures
bounds_true = find_szr_idx(y_true, np.array([0,1])) # true
bounds_pred = find_szr_idx(y_pred, np.array([0,1])) # predicted
# merge seizures close together
if bounds_pred.shape[0]>1:
bounds_pred = merge_close(bounds_pred, merge_margin = 5)
cost = szr_cost(bounds_true, bounds_pred) # get cost
# pass to array
cost_array[i] = cost
return thresh_array[np.argmin(cost_array)]
# define parameter list
param_list = (features.autocorr, features.line_length, features.rms, features.mad, features.var, features.std, features.psd, features.energy,
features.get_envelope_max_diff,)
cross_ch_param_list = (features.cross_corr, features.signal_covar, features.signal_abs_covar,)
# get data and true labels
exp_path = r'C:\Users\Pante\Desktop\seizure_data_tb\Train_data\3642_3641_3560_3514'
# 071919_3514 071719_3560
data, y_true = get_data(exp_path, '072519_3642',ch_num = [0,1],
inner_path={'data_path':'filt_data', 'pred_path':'verified_predictions_pantelis'} , load_y = True)
# # get file list
# main_path = r'C:\Users\Pante\Desktop\seizure_data_tb\Train_data'
# folder_path = '3514_3553_3639_3640'
# ver_path = os.path.join(main_path,folder_path, 'verified_predictions_pantelis')
# filelist = list(filter(lambda k: '.csv' in k, os.listdir(ver_path))) # get only files with predictions
# filelist = [os.path.splitext(x)[0] for x in filelist] # remove csv ending
# # data, y_true = get_data(r'W:\Maguire Lab\Trina\2019\07-July\3514_3553_3639_3640, '071819_3553a',ch_num = [0,1],
# # inner_path={'data_path':'reorganized_data', 'pred_path':'verified_predictions_pantelis'} , load_y = True)
# for i in range(1):
# # 071919_3514 071719_3560
# data, y_true = get_data(os.path.join(main_path, folder_path), filelist[i],ch_num = [0,1],
# inner_path={'data_path':'filt_data', 'pred_path':'verified_predictions_pantelis'} , load_y = True)
# if sum(y_true) == 0:
# continue
# get features
x_data, labels = get_features_allch(data,param_list,cross_ch_param_list)
# Normalize data
x_data = StandardScaler().fit_transform(x_data)
# get cost plot
cost_array,thresh_array = find_threshold(x_data, y_true)
| 27.974763
| 141
| 0.614005
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 09:14:32 2020
@author: Pante
"""
import os, features, time
import numpy as np
from sklearn.preprocessing import StandardScaler
from array_helper import find_szr_idx, match_szrs, merge_close
from build_feature_data import get_data, get_features_allch
from sklearn.metrics import log_loss,recall_score
import matplotlib.pyplot as plt
####### consider isolation forest for outlier detection!!!!!!
def user_cost(y_true, y_pred):
"""
user_cost(y_true, y_pred)
Parameters
----------
y_true : 1ndarray bool, ground truth values
y_pred : 1ndarray bool, predicted values
Returns
-------
cost : float
"""
detected = 0 # number of detected seizures
# get bounds of sezures
bounds_true = find_szr_idx(y_true, np.array([0,1])) # total predicted
bounds_pred = find_szr_idx(y_pred, np.array([0,1])) # total predicted
bounds_pred = merge_close(bounds_pred, merge_margin = 5) # merge seizures close together
if bounds_pred.shape[0]>0: # find matching seizures
detected = match_szrs(bounds_true, bounds_pred, err_margin = 10)
# calculate cost
a = 1 - (detected/bounds_true.shape[0]) # get detected ratio
b = (bounds_pred.shape[0] - detected) # get false positives
cost = a + np.log10(b+1) # cost function
return cost
def create_cost(bounds_true, bounds_pred):
"""
create_cost(bounds_true, bounds_pred)
Parameters
----------
bounds_true : 2d ndarray (rows = seizrs, columns = start,stop), ground truth
bounds_pred : 2d ndarray (rows = seizrs, columns = start,stop), predicted
Returns
-------
cost : Float,
"""
# find matching seizurs
detected = 0
a = 100
if bounds_pred.shape[0]>0:
detected = match_szrs(bounds_true, bounds_pred, err_margin = 10)
if bounds_true.shape[0]>0:
# get detected ratio
a = (1 - (detected/bounds_true.shape[0]))*20
# get false positives
b = (bounds_pred.shape[0] - detected)
# cost function
# L = 1 # learning rate
cost = a + np.log10(b+1)
return cost
def szr_cost(bounds_true, bounds_pred):
"""
create_cost(bounds_true, bounds_pred)
Parameters
----------
bounds_true : 2d ndarray (rows = seizrs, columns = start,stop), ground truth
bounds_pred : 2d ndarray (rows = seizrs, columns = start,stop), predicted
Returns
-------
cost : Float,
"""
# find matching seizurs
detected = 0
if bounds_pred.shape[0]>0:
detected = match_szrs(bounds_true, bounds_pred, err_margin = 10)
if bounds_true.shape[0]>0:
# get detected ratio
a = 1 - (detected/bounds_true.shape[0])
if (a > 0 and a <= 1):
a = 20
# get false positives
b = (bounds_pred.shape[0] - detected)
# cost function
cost = a + np.log10(b+1)
return cost
def get_min_cost(feature, y_true):
"""
get_min_cost(feature, y_true)
Parameters
----------
feature : 1D ndarray, extracted feature
y_true : 1D ndarray, bool grund truth labels
Returns
-------
TYPE: Float, threshold value that gves minimum cost
"""
n_loop = 100 # loop number and separation
thresh_array = np.linspace(1, 20, n_loop) # thresholds to test
cost_array = np.zeros(n_loop)
for i in range(n_loop):
# thresh_array[i] = thresh
y_pred = feature> (np.mean(feature) + thresh_array[i]*np.std(feature))
# get number of seizures
bounds_true = find_szr_idx(y_true, np.array([0,1])) # true
bounds_pred = find_szr_idx(y_pred, np.array([0,1])) # predicted
# merge seizures close together
if bounds_pred.shape[0]>1:
bounds_pred = merge_close(bounds_pred, merge_margin = 5)
cost = szr_cost(bounds_true, bounds_pred) # get cost
# pass to array
cost_array[i] = cost
return thresh_array[np.argmin(cost_array)]
def find_threshold(x_data, y_true):
# thresh = 1;
ftr = 8
x = x_data[:,ftr]
# fig = plt.figure()
# ax = fig.add_subplot(111)
# t = np.ones(x.shape[0]) * (np.mean(x) + thresh*np.std(x))
# line1 = ax.plot(x)
# line2 = ax.plot(t)
n_loop = 100
cost_array = np.zeros(n_loop)
thresh_array = np.zeros(n_loop)
thresh_array = np.linspace(1, 20, n_loop)
for i in range(n_loop):
# thresh_array[i] = thresh
y_pred = x> (np.mean(x) + thresh_array[i]*np.std(x))
# get number of seizures
bounds_true = find_szr_idx(y_true, np.array([0,1])) # true
bounds_pred = find_szr_idx(y_pred, np.array([0,2])) # predicted
# merge seizures close together
if bounds_pred.shape[0]>1:
bounds_pred = merge_close(bounds_pred, merge_margin = 5)
cost = create_cost(bounds_true, bounds_pred) # get cost
# cost = log_loss(y_true, y_pred ,labels =[True,False])
cost_array[i] = cost
# if cost == 0:
# print('cost has reached zero, stopping')
# return cost_array,thresh_array
# thresh += cost # update cost
# ax.plot(np.ones(x.shape[0]) * (np.mean(x) + thresh*np.std(x)))
# line2[0].set_ydata(np.ones(x.shape[0]) * (np.mean(x) + thresh*np.std(x)))
# fig.canvas.draw()
plt.figure()
plt.plot(thresh_array, cost_array)
plt.ylabel('cost')
plt.xlabel('thresh')
print('seizures = ', bounds_true.shape[0])
return cost_array,thresh_array
def find_threshold_all(x_data, y_true):
thresh = 1;
ftr = 1
x = x_data[:,ftr]
fig = plt.figure()
ax = fig.add_subplot(111)
t = np.ones(x.shape[0]) * (np.mean(x) + thresh*np.std(x))
line1 = ax.plot(x)
line2 = ax.plot(t)
n_loop = 100
cost_array = np.zeros(n_loop)
thresh_array = np.zeros(n_loop)
# thresh_array = np.linspace(10, 0, n_loop)
for i in range(n_loop):
thresh_array[i] = thresh
y_pred = x> (np.mean(x) + thresh_array[i]*np.std(x))
# get number of seizures
bounds_true = find_szr_idx(y_true, np.array([0,1])) # true
bounds_pred = find_szr_idx(y_pred, np.array([0,1])) # predicted
# merge seizures close together
if bounds_pred.shape[0]>1:
bounds_pred = merge_close(bounds_pred, merge_margin = 5)
cost = create_cost(bounds_true, bounds_pred) # get cost
# cost = log_loss(y_true, y_pred ,labels =[True,False])
cost_array[i] = cost
if cost == 0:
print('cost has reached zero, stopping')
return cost_array,thresh_array
return cost_array,thresh_array
# define parameter list
param_list = (features.autocorr, features.line_length, features.rms, features.mad, features.var, features.std, features.psd, features.energy,
features.get_envelope_max_diff,)
cross_ch_param_list = (features.cross_corr, features.signal_covar, features.signal_abs_covar,)
# get data and true labels
exp_path = r'C:\Users\Pante\Desktop\seizure_data_tb\Train_data\3642_3641_3560_3514'
# 071919_3514 071719_3560
data, y_true = get_data(exp_path, '072519_3642',ch_num = [0,1],
inner_path={'data_path':'filt_data', 'pred_path':'verified_predictions_pantelis'} , load_y = True)
# # get file list
# main_path = r'C:\Users\Pante\Desktop\seizure_data_tb\Train_data'
# folder_path = '3514_3553_3639_3640'
# ver_path = os.path.join(main_path,folder_path, 'verified_predictions_pantelis')
# filelist = list(filter(lambda k: '.csv' in k, os.listdir(ver_path))) # get only files with predictions
# filelist = [os.path.splitext(x)[0] for x in filelist] # remove csv ending
# # data, y_true = get_data(r'W:\Maguire Lab\Trina\2019\07-July\3514_3553_3639_3640, '071819_3553a',ch_num = [0,1],
# # inner_path={'data_path':'reorganized_data', 'pred_path':'verified_predictions_pantelis'} , load_y = True)
# for i in range(1):
# # 071919_3514 071719_3560
# data, y_true = get_data(os.path.join(main_path, folder_path), filelist[i],ch_num = [0,1],
# inner_path={'data_path':'filt_data', 'pred_path':'verified_predictions_pantelis'} , load_y = True)
# if sum(y_true) == 0:
# continue
# get features
x_data, labels = get_features_allch(data,param_list,cross_ch_param_list)
# Normalize data
x_data = StandardScaler().fit_transform(x_data)
# get cost plot
cost_array,thresh_array = find_threshold(x_data, y_true)
| 2,799
| 0
| 54
|
276b5d3d63f7139687164c5d10374d92ac764ed2
| 1,016
|
py
|
Python
|
qcloudsdkcmem/DescribeCmemRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
qcloudsdkcmem/DescribeCmemRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
qcloudsdkcmem/DescribeCmemRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
| 25.4
| 73
| 0.643701
|
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class DescribeCmemRequest(Request):
def __init__(self):
super(DescribeCmemRequest, self).__init__(
'cmem', 'qcloudcliV1', 'DescribeCmem', 'cmem.api.qcloud.com')
def get_limit(self):
return self.get_params().get('limit')
def set_limit(self, limit):
self.add_param('limit', limit)
def get_offset(self):
return self.get_params().get('offset')
def set_offset(self, offset):
self.add_param('offset', offset)
def get_sizeInfo(self):
return self.get_params().get('sizeInfo')
def set_sizeInfo(self, sizeInfo):
self.add_param('sizeInfo', sizeInfo)
def get_subnetId(self):
return self.get_params().get('subnetId')
def set_subnetId(self, subnetId):
self.add_param('subnetId', subnetId)
def get_vpcId(self):
return self.get_params().get('vpcId')
def set_vpcId(self, vpcId):
self.add_param('vpcId', vpcId)
| 615
| 14
| 320
|
14f7045572f6a48f7da91a5292fba1fe343f71d8
| 391
|
py
|
Python
|
tests/unit/cartography/intel/aws/test_ecr.py
|
srics/cartography
|
19a06766e304d657d956246179a2bb01a6d9aef6
|
[
"Apache-2.0"
] | 1
|
2022-03-31T03:24:37.000Z
|
2022-03-31T03:24:37.000Z
|
tests/unit/cartography/intel/aws/test_ecr.py
|
srics/cartography
|
19a06766e304d657d956246179a2bb01a6d9aef6
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/cartography/intel/aws/test_ecr.py
|
srics/cartography
|
19a06766e304d657d956246179a2bb01a6d9aef6
|
[
"Apache-2.0"
] | null | null | null |
from cartography.intel.aws import ecr
from tests.data.aws.ecr import GET_ECR_REPOSITORY_IMAGE_VULNS
| 43.444444
| 96
| 0.803069
|
from cartography.intel.aws import ecr
from tests.data.aws.ecr import GET_ECR_REPOSITORY_IMAGE_VULNS
def test_transform_repository_image_vulns():
transformed_data = ecr.transform_ecr_scan_finding_attributes(GET_ECR_REPOSITORY_IMAGE_VULNS)
assert transformed_data['findings'][0]['package_name'] == 'some_name'
assert transformed_data['findings'][0]['package_version'] == '1.2.3'
| 267
| 0
| 23
|
a06cceb6d9e57c9f8d1381b5bcfd1fa628bd0789
| 2,314
|
py
|
Python
|
rssnewsbot/spiders/rssspider.py
|
hijoe320/RSSBot
|
cbc0bc24d980ede3419111d51384abbc2c93f70c
|
[
"MIT"
] | null | null | null |
rssnewsbot/spiders/rssspider.py
|
hijoe320/RSSBot
|
cbc0bc24d980ede3419111d51384abbc2c93f70c
|
[
"MIT"
] | null | null | null |
rssnewsbot/spiders/rssspider.py
|
hijoe320/RSSBot
|
cbc0bc24d980ede3419111d51384abbc2c93f70c
|
[
"MIT"
] | null | null | null |
from time import sleep, gmtime, mktime
from datetime import datetime
import logging
import scrapy
import redis
import msgpack
import xxhash
import pymongo as pm
import feedparser as fp
from colorama import Back, Fore, Style
from ..settings import MONGODB_URI, REDIS_HOST, REDIS_PORT, REDIS_PWD, REDIS_PENDING_QUEUE
def hs(s):
"""
hash function to convert url to fixed length hash code
"""
return xxhash.xxh32(s).hexdigest()
def time2ts(time_struct):
"""
convert time_struct to epoch
"""
return mktime(time_struct)
| 32.138889
| 103
| 0.617978
|
from time import sleep, gmtime, mktime
from datetime import datetime
import logging
import scrapy
import redis
import msgpack
import xxhash
import pymongo as pm
import feedparser as fp
from colorama import Back, Fore, Style
from ..settings import MONGODB_URI, REDIS_HOST, REDIS_PORT, REDIS_PWD, REDIS_PENDING_QUEUE
def hs(s):
"""
hash function to convert url to fixed length hash code
"""
return xxhash.xxh32(s).hexdigest()
def time2ts(time_struct):
"""
convert time_struct to epoch
"""
return mktime(time_struct)
class RSSSpider(scrapy.Spider):
name = "rssspider"
def __init__(self, *args, **kwargs):
super(RSSSpider, self).__init__(*args, **kwargs)
self.rc = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PWD)
self.df = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PWD, db=REDIS_DUPFLT_DB)
self.mc = pm.MongoClient(host=MONGODB_URI, connect=False)
def start_requests(self):
with self.mc.rssnews.feed.find() as cursor:
logging.info("number of rss feeds = %d", cursor.count())
for item in cursor:
logging.debug("rss=%(url)s", item)
yield scrapy.Request(url=item["url"], callback=self.parse, meta=item)
def parse(self, res):
logging.debug("%sparsing %s%s", Fore.GREEN, res.url, Style.RESET_ALL)
rss = fp.parse(res.body)
symbol = res.meta["symbol"]
for e in rss.entries:
if self.check_exist(e.link):
continue
if '*' in e.link:
url = "http" + e.link.split("*http")[-1]
self.append_task(e, url)
elif e.link.startswith("http://finance.yahoo.com/r/"):
yield scrapy.Request(url=e.link, callback=self.extract_url, meta=e)
else:
self.append_task(e, e.link)
def extract_url(self, res):
if res.body.startswith("<script src="):
url = res.body.split("URL=\'")[-1].split("\'")[0]
self.append_task(res.meta, url)
else:
pass
def check_exist(self, url):
return self.df.get(url)
def append_task(self, entry, url):
self.df.set(url, True, ex=3600)
self.rc.append(PENDING_QUEUE, msgpack.packb(task))
| 1,545
| 195
| 23
|
0a29357a3fcb65eb38130117fd1af6fb06bc1c40
| 11,992
|
py
|
Python
|
data/transforms/data_preprocessing.py
|
zyxwvu321/Classifer_SSL_Longtail
|
e6c09414c49e695b0f4221a3c6245ae3929a1788
|
[
"MIT"
] | null | null | null |
data/transforms/data_preprocessing.py
|
zyxwvu321/Classifer_SSL_Longtail
|
e6c09414c49e695b0f4221a3c6245ae3929a1788
|
[
"MIT"
] | null | null | null |
data/transforms/data_preprocessing.py
|
zyxwvu321/Classifer_SSL_Longtail
|
e6c09414c49e695b0f4221a3c6245ae3929a1788
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 8 15:36:15 2020
dataset transform
@author: minjie
"""
import albumentations as A
from albumentations.pytorch import ToTensor as ToTensor_albu
import cv2
import torch
from multiprocessing import Pool
from utils.parse_meta import parse_kpds
import numpy as np
# imgs = []
# pts = []
#
# hh,ww,_ = img.shape
# for _ in range(self.n_aug):
# #points = [ww/2.0,hh/2.0,1.0]
# points = [[0.0,0.0,1.0], [0.0,hh,1.0], [ww,0.0,1.0],[ww,hh,1.0]]
# augmented = self.augment(image = img,keypoints=points,category_id = ['0'])
# imgs.append(augmented['image'])
# pts.append(augmented['keypoints'])
# # NOTE: use bbox will have prob that box is outside crop region.
# bboxes= [[0.45, 0.45, 0.55, 0.55]]
#
# augmented = self.T_aug(image = img,bboxes = bboxes,category_id = ['0'])
# hh,ww,_ = img.shape
# points = [[ww/2.0,hh/2.0,1.0]]
# augmented = self.augment(image = img,keypoints=points,category_id = ['0'])
#return augmented['image']
| 38.935065
| 162
| 0.488075
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 8 15:36:15 2020
dataset transform
@author: minjie
"""
import albumentations as A
from albumentations.pytorch import ToTensor as ToTensor_albu
import cv2
import torch
from multiprocessing import Pool
from utils.parse_meta import parse_kpds
import numpy as np
def get_aug(aug, min_area=0., min_visibility=0.):
return A.Compose(aug, bbox_params={'format': 'pascal_voc', 'min_area': min_area, 'min_visibility': min_visibility, 'label_fields': ['category_id']})
class TrainAugmentation_albu:
def __init__(self, sz_hw = (384,384),mean=0, std=1.0, crp_scale=(0.08, 1.0),crp_ratio = (0.75, 1.3333), weak_aug = False,n_aug = 1,out_augpos = False):
"""
Args:
weak_aug, week aug for fixmatch
"""
if isinstance(sz_hw, int):
sz_hw = (sz_hw,sz_hw)
self.mean = mean
self.std = std
self.sz_hw = sz_hw
self.crp_scale = crp_scale
self.crp_ratio = crp_ratio
self.n_aug = n_aug # number of repeated augmentation
self.out_augpos = out_augpos
if self.sz_hw[0] == self.sz_hw[1]:
self.T_aug = A.Compose([A.Rotate(p=0.5),
A.RandomResizedCrop(height = self.sz_hw[0], width = self.sz_hw[1], scale=self.crp_scale, ratio=self.crp_ratio,
interpolation = cv2.INTER_CUBIC,p = 1.0),
A.Flip(p = 0.5),
A.RandomRotate90(p = 0.5)])
else:
self.T_aug = A.Compose([A.Rotate(p=0.5),
A.RandomResizedCrop(height = self.sz_hw[0], width = self.sz_hw[1], scale=self.crp_scale, ratio=self.crp_ratio,
interpolation = cv2.INTER_CUBIC,p = 1.0),
A.Flip(p = 0.5)])
self.I_aug = A.Compose([ A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5),
A.HueSaturationValue(hue_shift_limit=2, sat_shift_limit=15, val_shift_limit=20,p = 0.5),
A.OneOf([A.Blur(blur_limit=5, p=0.3),
A.GaussNoise(var_limit=(5.0, 10.0), p=0.3),
A.IAASharpen(alpha=(0.1, 0.3), lightness=(0.5, 1.0), p=0.4)],p=0.5)])
self.N_aug = A.Compose([A.Normalize(mean=mean, std=std, p=1.0),
ToTensor_albu()])
if weak_aug is False:
self.augment = A.Compose([ self.T_aug, self.I_aug,self.N_aug])
# self.augment = A.Compose([ self.T_aug, self.I_aug])
# self.augment = A.Compose(self.augment, bbox_params={'format': 'albumentations', 'min_area': 0, 'min_visibility': 0, 'label_fields': ['category_id']})
if self.out_augpos is True:
self.augment = A.Compose(self.augment,\
keypoint_params = A.KeypointParams(format= 'xys', \
remove_invisible=False, angle_in_degrees=True))#label_fields=['category_id'], \
else:
#weak augment
self.T_aug = A.RandomResizedCrop(height = self.sz_hw[0], width = self.sz_hw[1], scale=self.crp_scale, ratio=self.crp_ratio,
interpolation = cv2.INTER_CUBIC,p = 1.0)
self.augment = A.Compose([ self.T_aug, self.N_aug])
def __call__(self, img):
"""
Args:
img: the output of cv.imread in RGB layout.
labels: labels of boxes.
"""
if self.n_aug==1:
#augmented = self.augment(image = img)
if self.out_augpos is False:
augmented = self.augment(image = img)
return augmented['image']
else:
hh,ww,_ = img.shape
points = [[ww/2.0,hh/2.0,1.0],[0.0,0.0,1.0]]
hw_in = img.shape[:2]
augmented = self.augment(image = img,keypoints=points)
image_aug = augmented['image']
hw_out = image_aug.shape[1:]
feat_kpds = torch.tensor(parse_kpds(augmented['keypoints'],hw_in,hw_out))
return (image_aug,feat_kpds)
else:
# test multi-aug
if self.out_augpos is False:
return torch.stack([self.augment(image = img)['image'] for _ in range(self.n_aug)])
else:
img_out = []
feat_out = []
trans_out = []
hh,ww,_ = img.shape
#points = [[ww/2.0,hh/2.0,1.0],[0.0,0.0,1.0]]
points = [[ww/2.0,hh/2.0,1.0],[0.0,0.0,1.0],[ww,0.0, 1.0]] # add one point for cv2.getAffineTransform
hw_in = img.shape[:2]
for _ in range(self.n_aug):
augmented = self.augment(image = img,keypoints=points)
image_aug = augmented['image']
hw_out = image_aug.shape[1:]
#feat_kpds = torch.tensor(parse_kpds(augmented['keypoints'],hw_in,hw_out))
feat_kpds = torch.tensor(parse_kpds(augmented['keypoints'][:2],hw_in,hw_out))
pts2 = augmented['keypoints']
pts1 = np.float32([pt[:2] for pt in points])
pts2 = np.float32([pt[:2] for pt in pts2])
trans = cv2.getAffineTransform(pts2,pts1)
trans_out.append(trans)
img_out.append(image_aug)
feat_out.append(feat_kpds)
return (torch.stack(img_out), {'feat_out':torch.stack(feat_out), 'trans_out': np.stack(trans_out)})
#return torch.stack([self.augment(image = img)['image'] for _ in range(self.n_aug)])
# imgs = []
# pts = []
#
# hh,ww,_ = img.shape
# for _ in range(self.n_aug):
# #points = [ww/2.0,hh/2.0,1.0]
# points = [[0.0,0.0,1.0], [0.0,hh,1.0], [ww,0.0,1.0],[ww,hh,1.0]]
# augmented = self.augment(image = img,keypoints=points,category_id = ['0'])
# imgs.append(augmented['image'])
# pts.append(augmented['keypoints'])
# # NOTE: use bbox will have prob that box is outside crop region.
# bboxes= [[0.45, 0.45, 0.55, 0.55]]
#
# augmented = self.T_aug(image = img,bboxes = bboxes,category_id = ['0'])
# hh,ww,_ = img.shape
# points = [[ww/2.0,hh/2.0,1.0]]
# augmented = self.augment(image = img,keypoints=points,category_id = ['0'])
#return augmented['image']
class TestAugmentation_albu:
def __init__(self, size, mean=0, std=1.0,out_augpos = False):
"""
Args:
size: the size the of final image.
mean: mean pixel value per channel.
"""
if isinstance(size, int):
size = (size,size)
self.mean = mean
self.size = size
self.out_augpos = out_augpos
self.augment = A.Compose([A.Resize( size[0], size[1], interpolation=cv2.INTER_CUBIC, p=1),
A.Normalize(mean=mean, std=std, p=1.0),
ToTensor_albu()
])
if self.out_augpos is True:
self.augment = A.Compose(self.augment,\
keypoint_params = A.KeypointParams(format= 'xys', \
remove_invisible=False, angle_in_degrees=True))
def __call__(self, img):
"""
Args:
img: the output of cv.imread in RGB layout.
labels: labels of boxes.
"""
if self.out_augpos is False:
augmented = self.augment(image = img)
return augmented['image']
else:
hh,ww,_ = img.shape
points = [[ww/2.0,hh/2.0,1.0],[0.0,0.0,1.0]]
hw_in = img.shape[:2]
augmented = self.augment(image = img,keypoints=points)
image_aug = augmented['image']
hw_out = image_aug.shape[1:]
feat_kpds = torch.tensor(parse_kpds(augmented['keypoints'],hw_in,hw_out))
return (image_aug,feat_kpds)
class TrainAugmentation_bone:
def __init__(self, sz_in_hw = (512,512), sz_out_hw = (448,448),mean=0, std=1.0, minmax_h = (0,128), w2h_ratio = 1.0):
"""
Args:
size: the size the of final image.
mean: mean pixel value per channel.
"""
if isinstance(sz_in_hw, int):
sz_in_hw = (sz_in_hw,sz_in_hw)
if isinstance(sz_out_hw, int):
sz_out_hw = (sz_out_hw,sz_out_hw)
self.mean = mean
self.sz_in_hw = sz_in_hw
self.sz_out_hw = sz_out_hw
#self.crp_scale = crp_scale
#self.crp_ratio = crp_ratio
self.minmax_h = minmax_h
self.w2h_ratio = w2h_ratio
self.I_aug = A.Compose([A.Resize( sz_in_hw[0], sz_in_hw[1], interpolation=1, p=1),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5),
A.OneOf([A.Blur(blur_limit=5, p=0.3),
A.GaussNoise(var_limit=(5.0, 10.0), p=0.3),
A.IAASharpen(alpha=(0.1, 0.3), lightness=(0.5, 1.0), p=0.4)],p=0.5)])
self.T_aug = A.RandomSizedCrop(min_max_height = (self.minmax_h[0],self.minmax_h[1]),height = self.sz_out_hw[0], width = self.sz_out_hw[1],\
w2h_ratio = self.w2h_ratio,p = 1.0)
self.N_aug = A.Compose([A.Normalize(mean=mean, std=std, p=1.0),
ToTensor_albu()])
self.augment = A.Compose([self.I_aug, self.T_aug,self.N_aug])
def __call__(self, img):
"""
Args:
img: the output of cv.imread in RGB layout.
labels: labels of boxes.
"""
augmented = self.augment(image = img)
return augmented['image']
class TestAugmentation_bone:
#def __init__(self, size, mean=0, std=1.0, ext_p =(-0.125,0.25)):
def __init__(self, sz_in_hw = (512,512), sz_out_hw = (448,448), mean=0, std=1.0):
"""
Args:
size: the size the of final image.
mean: mean pixel value per channel.
"""
if isinstance(sz_in_hw, int):
sz_in_hw = (sz_in_hw,sz_in_hw)
if isinstance(sz_out_hw, int):
sz_out_hw = (sz_out_hw,sz_out_hw)
self.augment = A.Compose([A.Resize( sz_in_hw[0], sz_in_hw[1], interpolation=1, p=1),
A.CenterCrop(sz_out_hw[0], sz_out_hw[1], p=1.0),
A.Normalize(mean=mean, std=std, max_pixel_value=255.0, p=1.0),
ToTensor_albu()
])
#
def __call__(self, img):
"""
Args:
img: the output of cv.imread in RGB layout.
labels: labels of boxes.
"""
augmented = self.augment(image = img)
return augmented['image']
| 181
| 10,462
| 116
|
22a8e0eda2fca9bf48bd5895ab01712afaaf9054
| 265
|
py
|
Python
|
Python/leetcode/HIndexIi.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | 2
|
2015-12-02T06:44:01.000Z
|
2016-05-04T21:40:54.000Z
|
Python/leetcode/HIndexIi.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | null | null | null |
Python/leetcode/HIndexIi.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | null | null | null |
'''
Created on 1.12.2016
@author: Darren
''''''
Follow up for H-Index: What if the citations array is sorted in ascending order? Could you optimize your algorithm?
Expected runtime complexity is in O(log n) and the input is sorted.
"
'''
| 18.928571
| 117
| 0.656604
|
'''
Created on 1.12.2016
@author: Darren
''''''
Follow up for H-Index: What if the citations array is sorted in ascending order? Could you optimize your algorithm?
Expected runtime complexity is in O(log n) and the input is sorted.
"
'''
| 0
| 0
| 0
|
c6d3edd93fcde8345da8ce9f04c85393e6bb98d8
| 5,516
|
py
|
Python
|
HW1/hw1/code/visual_recog.py
|
jiansfoggy/16-720B
|
6395555449fa297f19efb42970e480f1b382e38a
|
[
"Unlicense"
] | 2
|
2020-03-31T15:54:49.000Z
|
2022-01-07T13:43:46.000Z
|
HW1/hw1/code/visual_recog.py
|
jiansfoggy/16-720B
|
6395555449fa297f19efb42970e480f1b382e38a
|
[
"Unlicense"
] | null | null | null |
HW1/hw1/code/visual_recog.py
|
jiansfoggy/16-720B
|
6395555449fa297f19efb42970e480f1b382e38a
|
[
"Unlicense"
] | 4
|
2019-09-10T00:48:11.000Z
|
2022-01-07T13:43:50.000Z
|
import numpy as np
import threading
import queue
import imageio
import os,time
import math
import visual_words
import multiprocessing as mp
def build_recognition_system(num_workers=2):
'''
Creates a trained recognition system by generating training features from all training images.
[input]
* num_workers: number of workers to process in parallel
[saved]
* features: numpy.ndarray of shape (N,M)
* labels: numpy.ndarray of shape (N)
* dictionary: numpy.ndarray of shape (K,3F)
* SPM_layer_num: number of spatial pyramid layers
'''
train_data = np.load("../data/train_data.npz")
dictionary = np.load("../outputs/dictionary.npy")
data = train_data['image_names']
SPM_layer_num = 2
K = 100
size_Feature = int(K*(4**(SPM_layer_num+1) -1)/3)
pool = mp.Pool(num_workers)
results = []
for i in range(0, len(data)):
print (i)
args = [data[i][0], dictionary, SPM_layer_num, K]
results.append(pool.apply_async(get_image_feature, args))
features = []
for result in results:
features.append(result.get())
final_features = np.reshape(features, (len(data), size_Feature))
labels = np.asarray(train_data['labels'])
np.savez('../outputs/trained_system.npz', features = final_features, labels = labels, SPM_layer_num = SPM_layer_num, dictionary = dictionary)
def evaluate_recognition_system(num_workers=2):
'''
Evaluates the recognition system for all test images and returns the confusion matrix.
[input]
* num_workers: number of workers to process in parallel
[output]
* conf: numpy.ndarray of shape (8,8)
* accuracy: accuracy of the evaluated system
'''
test_data = np.load("../data/test_data.npz")
trained_system = np.load("../outputs/trained_system.npz")
features = trained_system['features']
dictionary = trained_system['dictionary']
SPM_layer_num = trained_system['SPM_layer_num']
labels = trained_system['labels']
K = dictionary.shape[0]
data = test_data['image_names']
pool = mp.Pool(num_workers)
features_test = []
for i in range(0, len(data)):
args = [(data[i][0], dictionary, SPM_layer_num, K, features, labels)]
features_test.append(pool.apply_async(test_label, args))
test_labels = []
for feature in features_test:
test_labels.append(feature.get())
testActualLabels = test_data['labels']
size_confusion = len(np.unique(testActualLabels))
C = np.zeros((size_confusion, size_confusion))
for a,p in zip(testActualLabels, test_labels):
C[a][p] += 1
accuracy = np.diag(C).sum()/C.sum()
return C, accuracy
def get_image_feature(file_path,dictionary,layer_num,K):
'''
Extracts the spatial pyramid matching feature.
[input]
* file_path: path of image file to read
* dictionary: numpy.ndarray of shape (K,3F)
* layer_num: number of spatial pyramid layers
* K: number of clusters for the word maps
[output]
* feature: numpy.ndarray of shape (K*(4^layer_num-1)/3)
'''
image = imageio.imread('../data/' + file_path)
wordmap = visual_words.get_visual_words(image, dictionary)
hist_all = get_feature_from_wordmap_SPM(wordmap, layer_num, K)
return hist_all
def distance_to_set(word_hist,histograms):
'''
Compute similarity between a histogram of visual words with all training image histograms.
[input]
* word_hist: numpy.ndarray of shape (K)
* histograms: numpy.ndarray of shape (N,K)
[output]
* sim: numpy.ndarray of shape (N)
'''
min_compare = np.minimum(histograms, word_hist)
return np.sum(min_compare, axis=1)
def get_feature_from_wordmap(wordmap,dict_size):
'''
Compute histogram of visual words.
[input]
* wordmap: numpy.ndarray of shape (H,W)
* dict_size: dictionary size K
[output]
* hist: numpy.ndarray of shape (K)
'''
flatten_wordmap = wordmap.flatten()
hist = np.histogram(flatten_wordmap, bins = dict_size, range = (0,dict_size))
hist = hist[0]/np.linalg.norm(hist[0], ord = 1)
return np.asarray(hist)
def get_feature_from_wordmap_SPM(wordmap,layer_num,dict_size):
'''
Compute histogram of visual words using spatial pyramid matching.
[input]
* wordmap: numpy.ndarray of shape (H,W)
* layer_num: number of spatial pyramid layers
* dict_size: dictionary size K
[output]
* hist_all: numpy.ndarray of shape (K*(4^layer_num-1)/3)
'''
i_h, i_w = wordmap.shape
hist_all = []
for layer in range(0, layer_num+1):
D = 2**layer
if layer == 0 or layer == 1:
weight = 1/(2**(layer_num))
else:
weight = 1/(2**(layer_num+1-layer))
height_indices = np.round(np.arange(0, i_h+1, i_h/D)).astype('int')
width_indices = np.round(np.arange(0, i_w+1, i_w/D)).astype('int')
divisions = height_indices.shape[0]-1
for i in range(0, divisions):
for j in range (0, divisions):
s_h, s_w = height_indices[i], width_indices[j]
e_h, e_w = height_indices[i+1], width_indices[j+1]
imageSection = wordmap[s_h:e_h, s_w:e_w]
imageDictionary = get_feature_from_wordmap(imageSection, dict_size)
imageDictionary = imageDictionary*weight
hist_all.append(imageDictionary)
hist_all = np.asarray(hist_all)
hist_all = hist_all.flatten()
hist_all = hist_all/np.linalg.norm(hist_all, ord = 1)
return hist_all
| 28.729167
| 143
| 0.701051
|
import numpy as np
import threading
import queue
import imageio
import os,time
import math
import visual_words
import multiprocessing as mp
def build_recognition_system(num_workers=2):
'''
Creates a trained recognition system by generating training features from all training images.
[input]
* num_workers: number of workers to process in parallel
[saved]
* features: numpy.ndarray of shape (N,M)
* labels: numpy.ndarray of shape (N)
* dictionary: numpy.ndarray of shape (K,3F)
* SPM_layer_num: number of spatial pyramid layers
'''
train_data = np.load("../data/train_data.npz")
dictionary = np.load("../outputs/dictionary.npy")
data = train_data['image_names']
SPM_layer_num = 2
K = 100
size_Feature = int(K*(4**(SPM_layer_num+1) -1)/3)
pool = mp.Pool(num_workers)
results = []
for i in range(0, len(data)):
print (i)
args = [data[i][0], dictionary, SPM_layer_num, K]
results.append(pool.apply_async(get_image_feature, args))
features = []
for result in results:
features.append(result.get())
final_features = np.reshape(features, (len(data), size_Feature))
labels = np.asarray(train_data['labels'])
np.savez('../outputs/trained_system.npz', features = final_features, labels = labels, SPM_layer_num = SPM_layer_num, dictionary = dictionary)
def test_label(args):
file_path,dictionary,layer_num,K, features, labels = args
feature = get_image_feature(file_path, dictionary, layer_num, K)
distance = distance_to_set(feature, features)
i = np.argmax(distance)
label = labels[i]
return label
def evaluate_recognition_system(num_workers=2):
'''
Evaluates the recognition system for all test images and returns the confusion matrix.
[input]
* num_workers: number of workers to process in parallel
[output]
* conf: numpy.ndarray of shape (8,8)
* accuracy: accuracy of the evaluated system
'''
test_data = np.load("../data/test_data.npz")
trained_system = np.load("../outputs/trained_system.npz")
features = trained_system['features']
dictionary = trained_system['dictionary']
SPM_layer_num = trained_system['SPM_layer_num']
labels = trained_system['labels']
K = dictionary.shape[0]
data = test_data['image_names']
pool = mp.Pool(num_workers)
features_test = []
for i in range(0, len(data)):
args = [(data[i][0], dictionary, SPM_layer_num, K, features, labels)]
features_test.append(pool.apply_async(test_label, args))
test_labels = []
for feature in features_test:
test_labels.append(feature.get())
testActualLabels = test_data['labels']
size_confusion = len(np.unique(testActualLabels))
C = np.zeros((size_confusion, size_confusion))
for a,p in zip(testActualLabels, test_labels):
C[a][p] += 1
accuracy = np.diag(C).sum()/C.sum()
return C, accuracy
def get_image_feature(file_path,dictionary,layer_num,K):
'''
Extracts the spatial pyramid matching feature.
[input]
* file_path: path of image file to read
* dictionary: numpy.ndarray of shape (K,3F)
* layer_num: number of spatial pyramid layers
* K: number of clusters for the word maps
[output]
* feature: numpy.ndarray of shape (K*(4^layer_num-1)/3)
'''
image = imageio.imread('../data/' + file_path)
wordmap = visual_words.get_visual_words(image, dictionary)
hist_all = get_feature_from_wordmap_SPM(wordmap, layer_num, K)
return hist_all
def distance_to_set(word_hist,histograms):
'''
Compute similarity between a histogram of visual words with all training image histograms.
[input]
* word_hist: numpy.ndarray of shape (K)
* histograms: numpy.ndarray of shape (N,K)
[output]
* sim: numpy.ndarray of shape (N)
'''
min_compare = np.minimum(histograms, word_hist)
return np.sum(min_compare, axis=1)
def get_feature_from_wordmap(wordmap,dict_size):
'''
Compute histogram of visual words.
[input]
* wordmap: numpy.ndarray of shape (H,W)
* dict_size: dictionary size K
[output]
* hist: numpy.ndarray of shape (K)
'''
flatten_wordmap = wordmap.flatten()
hist = np.histogram(flatten_wordmap, bins = dict_size, range = (0,dict_size))
hist = hist[0]/np.linalg.norm(hist[0], ord = 1)
return np.asarray(hist)
def get_feature_from_wordmap_SPM(wordmap,layer_num,dict_size):
'''
Compute histogram of visual words using spatial pyramid matching.
[input]
* wordmap: numpy.ndarray of shape (H,W)
* layer_num: number of spatial pyramid layers
* dict_size: dictionary size K
[output]
* hist_all: numpy.ndarray of shape (K*(4^layer_num-1)/3)
'''
i_h, i_w = wordmap.shape
hist_all = []
for layer in range(0, layer_num+1):
D = 2**layer
if layer == 0 or layer == 1:
weight = 1/(2**(layer_num))
else:
weight = 1/(2**(layer_num+1-layer))
height_indices = np.round(np.arange(0, i_h+1, i_h/D)).astype('int')
width_indices = np.round(np.arange(0, i_w+1, i_w/D)).astype('int')
divisions = height_indices.shape[0]-1
for i in range(0, divisions):
for j in range (0, divisions):
s_h, s_w = height_indices[i], width_indices[j]
e_h, e_w = height_indices[i+1], width_indices[j+1]
imageSection = wordmap[s_h:e_h, s_w:e_w]
imageDictionary = get_feature_from_wordmap(imageSection, dict_size)
imageDictionary = imageDictionary*weight
hist_all.append(imageDictionary)
hist_all = np.asarray(hist_all)
hist_all = hist_all.flatten()
hist_all = hist_all/np.linalg.norm(hist_all, ord = 1)
return hist_all
| 236
| 0
| 25
|
076663290b2821e6423b989415d2957ab3b21b81
| 441
|
py
|
Python
|
app/__init__.py
|
SalesAppi/JPSSM_topics
|
6be32fca31e5e15f51753101a222a08fd2013f9b
|
[
"MIT"
] | 1
|
2022-03-01T08:15:28.000Z
|
2022-03-01T08:15:28.000Z
|
app/__init__.py
|
SalesAppi/JPSSM_topics
|
6be32fca31e5e15f51753101a222a08fd2013f9b
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
SalesAppi/JPSSM_topics
|
6be32fca31e5e15f51753101a222a08fd2013f9b
|
[
"MIT"
] | 1
|
2020-12-14T05:00:28.000Z
|
2020-12-14T05:00:28.000Z
|
from flask import Flask
import gensim
import re
from gensim.models import LdaModel
from gensim.test.utils import datapath
from gensim import corpora, models
from gensim.corpora import Dictionary
from re import sub
import os
import string
import codecs
import nltk
#from nltk.tokenize import RegexpTokenizer
#from nltk import stem
#from nltk.stem import WordNetLemmatizer
app = Flask(__name__)
from app import views
from app import model
| 19.173913
| 42
| 0.825397
|
from flask import Flask
import gensim
import re
from gensim.models import LdaModel
from gensim.test.utils import datapath
from gensim import corpora, models
from gensim.corpora import Dictionary
from re import sub
import os
import string
import codecs
import nltk
#from nltk.tokenize import RegexpTokenizer
#from nltk import stem
#from nltk.stem import WordNetLemmatizer
app = Flask(__name__)
from app import views
from app import model
| 0
| 0
| 0
|
0cd7b7f58642768e6475f872172900431c6d1e24
| 100
|
py
|
Python
|
tests/core/test_str_utils.py
|
namuan/orkestra
|
83b67f7e816c94b75232691c14d91fd9d62213ed
|
[
"MIT"
] | null | null | null |
tests/core/test_str_utils.py
|
namuan/orkestra
|
83b67f7e816c94b75232691c14d91fd9d62213ed
|
[
"MIT"
] | 11
|
2020-06-07T12:29:21.000Z
|
2020-06-24T19:44:36.000Z
|
tests/core/test_str_utils.py
|
namuan/orkestra
|
83b67f7e816c94b75232691c14d91fd9d62213ed
|
[
"MIT"
] | null | null | null |
from app.utils.str_utils import str_to_int
| 16.666667
| 42
| 0.73
|
from app.utils.str_utils import str_to_int
def test_str_to_int():
assert str_to_int("5") == 5
| 33
| 0
| 23
|
f6c6564e121ddf8a7df728a398f2d1498dea1117
| 7,942
|
py
|
Python
|
tables.py
|
arunext/greffy
|
001a0b94428629b9cdfaa8966f3cf6cd6f349e8a
|
[
"Apache-2.0"
] | null | null | null |
tables.py
|
arunext/greffy
|
001a0b94428629b9cdfaa8966f3cf6cd6f349e8a
|
[
"Apache-2.0"
] | null | null | null |
tables.py
|
arunext/greffy
|
001a0b94428629b9cdfaa8966f3cf6cd6f349e8a
|
[
"Apache-2.0"
] | null | null | null |
import psycopg2
from config import config
import datetime
from textblob import TextBlob
import nltk
from nltk.corpus import stopwords
def create_tables():
""" create tables in the PostgreSQL database"""
params = config()
# connect to the PostgreSQL server
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS POSTS
(POST_ID INT PRIMARY KEY NOT NULL,
DATA TEXT NOT NULL,
CREATED TIMESTAMP NOT NULL,
COMMENTS INT,
COUNT INT);''')
cur.execute('''CREATE TABLE IF NOT EXISTS COMMENTS
(POST_ID INT NOT NULL,
COMMENT_ID INT PRIMARY KEY NOT NULL,
DATA TEXT NOT NULL,
CREATED TIMESTAMP NOT NULL,
UPVOTES INT,
DOWNVOTES INT);''')
conn.commit()
conn.close()
def create_post(postid, text):
""" insert a new post into the vendors table """
print("inside create post")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
count = 0
comments = 0
time = datetime.datetime.utcnow();
cur.execute("INSERT INTO POSTS (POST_ID, DATA, CREATED, COMMENTS, COUNT) VALUES (%s, %s, %s, %s, %s)",(postid,text,time,comments,count));
conn.commit()
print("Records created successfully")
conn.close()
def create_comment(postid, commentid, text):
""" insert a new comment into the post table """
print("inside create comments")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
count = 0
time = datetime.datetime.utcnow();
cur.execute("INSERT INTO COMMENTS (POST_ID, COMMENT_ID, DATA, CREATED, UPVOTES, DOWNVOTES) VALUES (%s, %s, %s, %s, 0, 0)",(postid,commentid,text, time));
# Get Corresponding post
cur.execute("SELECT POST_ID, COMMENTS from POSTS where POST_ID = {0} ORDER BY COUNT DESC".format(postid));
rows = cur.fetchall()
for row in rows:
comments = row[1]
break
comments = comments+1
# Update Comments count of post
cur.execute("UPDATE POSTS set COMMENTS = {0} where POST_ID = {1}".format(comments,postid));
conn.commit()
print("Records created successfully")
conn.close()
def lookup_table(text):
""" insert a new post into the vendors table """
print("inside lookup to tables")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
#initialize id and count to null values
postid = 0
count = 0
#Select post
cur.execute("SELECT POST_ID, DATA, COUNT from POSTS where DATA = '{0}' ORDER BY COUNT DESC".format(text));
rows = cur.fetchall()
for row in rows:
postid = row[0]
count = row[2]
break
print "Lookup operation done successfully. Id = {0}".format(id);
conn.close()
return postid, count
def update_table_count(postid, count):
""" update post with count """
print("inside lookup to tables")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute("UPDATE POSTS set COUNT = {0} where POST_ID = {1}".format(count,postid));
conn.commit()
print "Update operation done successfully for POST_ID {0} and count {1}".format(postid,count)
conn.close()
def comment_upvote(comment_id):
""" update post with count """
print("inside upvote comment")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
# Get Corresponding comment
cur.execute("SELECT COMMENT_ID, UPVOTES, POST_ID from COMMENTS where COMMENT_ID = {0} ORDER BY UPVOTES DESC".format(comment_id));
rows = cur.fetchall()
for row in rows:
upvotes = row[1]
break
upvotes = upvotes+1
# Update Comments count of post
cur.execute("UPDATE COMMENTS set UPVOTES = {0} where COMMENT_ID = {1}".format(upvotes,comment_id));
conn.commit()
print ("Comment upvote completed")
conn.close()
#return post ID so that redirect can use it
return (row[2])
def comment_downvote(comment_id):
""" update comment with dwnvote """
print("inside downvote comment")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
# Get Corresponding comment
cur.execute("SELECT COMMENT_ID, DOWNVOTES, POST_ID from COMMENTS where COMMENT_ID = {0} ORDER BY DOWNVOTES DESC".format(comment_id));
rows = cur.fetchall()
for row in rows:
downvotes = row[1]
break
downvotes = downvotes+1
# Update Comments count of post
cur.execute("UPDATE COMMENTS set DOWNVOTES = {0} where COMMENT_ID = {1}".format(downvotes,comment_id));
conn.commit()
print ("Comment upvote completed")
conn.close()
#return post ID so that redirect can use it
return (row[2])
| 27.013605
| 157
| 0.629816
|
import psycopg2
from config import config
import datetime
from textblob import TextBlob
import nltk
from nltk.corpus import stopwords
def create_tables():
""" create tables in the PostgreSQL database"""
params = config()
# connect to the PostgreSQL server
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS POSTS
(POST_ID INT PRIMARY KEY NOT NULL,
DATA TEXT NOT NULL,
CREATED TIMESTAMP NOT NULL,
COMMENTS INT,
COUNT INT);''')
cur.execute('''CREATE TABLE IF NOT EXISTS COMMENTS
(POST_ID INT NOT NULL,
COMMENT_ID INT PRIMARY KEY NOT NULL,
DATA TEXT NOT NULL,
CREATED TIMESTAMP NOT NULL,
UPVOTES INT,
DOWNVOTES INT);''')
conn.commit()
conn.close()
def show_table():
print("creating tables with")
create_tables() #creating table, later check if table exists.
print("Inside show tables")
""" show tables from the PostgreSQL database"""
params = config()
# connect to the PostgreSQL server
conn = psycopg2.connect(**params)
print ("Opened database successfully")
cur = conn.cursor()
cur.execute("SELECT POST_ID, DATA, COUNT, COMMENTS from POSTS ORDER BY COUNT DESC")
rows = cur.fetchall()
#table_text = ""
#for row in rows:
# table_text += "Post ID = " + str(row[0])
# table_text += "Text = " + row[1]
#table_text += "Count = " + str(row[2]) + "\n"
conn.close()
return rows
def show_post(postid):
print("Inside show post")
""" show tables from the PostgreSQL database"""
params = config()
# connect to the PostgreSQL server
conn = psycopg2.connect(**params)
print ("Opened database successfully")
cur = conn.cursor()
cur.execute("SELECT POST_ID, COMMENT_ID, DATA, CREATED, UPVOTES, DOWNVOTES from COMMENTS where POST_ID = {0} ORDER BY UPVOTES DESC".format(postid));
rows = cur.fetchall()
conn.close()
return rows
def create_post(postid, text):
""" insert a new post into the vendors table """
print("inside create post")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
count = 0
comments = 0
time = datetime.datetime.utcnow();
cur.execute("INSERT INTO POSTS (POST_ID, DATA, CREATED, COMMENTS, COUNT) VALUES (%s, %s, %s, %s, %s)",(postid,text,time,comments,count));
conn.commit()
print("Records created successfully")
conn.close()
def create_comment(postid, commentid, text):
""" insert a new comment into the post table """
print("inside create comments")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
count = 0
time = datetime.datetime.utcnow();
cur.execute("INSERT INTO COMMENTS (POST_ID, COMMENT_ID, DATA, CREATED, UPVOTES, DOWNVOTES) VALUES (%s, %s, %s, %s, 0, 0)",(postid,commentid,text, time));
# Get Corresponding post
cur.execute("SELECT POST_ID, COMMENTS from POSTS where POST_ID = {0} ORDER BY COUNT DESC".format(postid));
rows = cur.fetchall()
for row in rows:
comments = row[1]
break
comments = comments+1
# Update Comments count of post
cur.execute("UPDATE POSTS set COMMENTS = {0} where POST_ID = {1}".format(comments,postid));
conn.commit()
print("Records created successfully")
conn.close()
def lookup_table(text):
""" insert a new post into the vendors table """
print("inside lookup to tables")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
#initialize id and count to null values
postid = 0
count = 0
#Select post
cur.execute("SELECT POST_ID, DATA, COUNT from POSTS where DATA = '{0}' ORDER BY COUNT DESC".format(text));
rows = cur.fetchall()
for row in rows:
postid = row[0]
count = row[2]
break
print "Lookup operation done successfully. Id = {0}".format(id);
conn.close()
return postid, count
def get_post_summary(postid):
#currently send the top comment, latet this is the key logic to send response
print("inside get post summary")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute("SELECT POST_ID, COMMENT_ID, DATA, CREATED, UPVOTES, DOWNVOTES from COMMENTS where POST_ID = {0} ORDER BY UPVOTES DESC".format(postid));
rows = cur.fetchall()
count = 0
catcomments = ""
for row in rows:
count = count + 1
if count == 1:
topcomment = row[2]
catcomments = catcomments + row[2]
if count == 0:
#no comments, ask user to comment
topcomment = "Sorry, we don't have any comments, be the first one to comment: http://greffy.herokuapp.com/post/" + str(postid)
polarity = 0
subjectivity = 0
else:
blob = TextBlob(catcomments)
# TODO add overall positive, neutral negative instead of polarity
blob.sentences
words = b
polarity =round(blob.sentiment.polarity,2)
subjectivity = round(blob.sentiment.subjectivity,2)
print(topcomment,polarity)
return topcomment,polarity
def update_table_count(postid, count):
""" update post with count """
print("inside lookup to tables")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute("UPDATE POSTS set COUNT = {0} where POST_ID = {1}".format(count,postid));
conn.commit()
print "Update operation done successfully for POST_ID {0} and count {1}".format(postid,count)
conn.close()
def comment_upvote(comment_id):
""" update post with count """
print("inside upvote comment")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
# Get Corresponding comment
cur.execute("SELECT COMMENT_ID, UPVOTES, POST_ID from COMMENTS where COMMENT_ID = {0} ORDER BY UPVOTES DESC".format(comment_id));
rows = cur.fetchall()
for row in rows:
upvotes = row[1]
break
upvotes = upvotes+1
# Update Comments count of post
cur.execute("UPDATE COMMENTS set UPVOTES = {0} where COMMENT_ID = {1}".format(upvotes,comment_id));
conn.commit()
print ("Comment upvote completed")
conn.close()
#return post ID so that redirect can use it
return (row[2])
def comment_downvote(comment_id):
""" update comment with dwnvote """
print("inside downvote comment")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
# Get Corresponding comment
cur.execute("SELECT COMMENT_ID, DOWNVOTES, POST_ID from COMMENTS where COMMENT_ID = {0} ORDER BY DOWNVOTES DESC".format(comment_id));
rows = cur.fetchall()
for row in rows:
downvotes = row[1]
break
downvotes = downvotes+1
# Update Comments count of post
cur.execute("UPDATE COMMENTS set DOWNVOTES = {0} where COMMENT_ID = {1}".format(downvotes,comment_id));
conn.commit()
print ("Comment upvote completed")
conn.close()
#return post ID so that redirect can use it
return (row[2])
| 2,434
| 0
| 69
|
2f1c4753ac08df358bf6226a60a7c9bda64e76e2
| 950
|
py
|
Python
|
weltgeist/units.py
|
samgeen/Weltgeist
|
c7d52e879bb3473cecbb06651b5e76dac3020da6
|
[
"MIT"
] | null | null | null |
weltgeist/units.py
|
samgeen/Weltgeist
|
c7d52e879bb3473cecbb06651b5e76dac3020da6
|
[
"MIT"
] | null | null | null |
weltgeist/units.py
|
samgeen/Weltgeist
|
c7d52e879bb3473cecbb06651b5e76dac3020da6
|
[
"MIT"
] | null | null | null |
"""
Defined code units and physical quantities
The Python parts of Weltgeist use cgs
VH1 uses units defined below
Sam Geen, February 2018
"""
import numpy as np
# Physical quantities (base units in cgs)
pc = 3.086e+18
mH = 1.66e-24
year = 3.154e+7
Myr = 1e6*year
kB = 1.3806485279e-16 # in cgs
G = 6.67428e-8
X = 0.74
mp = mH / X
c = 2.998e+10
eV = 1.60217662e-12 # in ergs
Msun = 1.9891e33 # g
# Code units
# Used by VH1 - the Python parts of Weltgeist use cgs
distance = pc # in cm
density = mH # 1 g/cm^3
time = 1.0 / np.sqrt(G*density) # sets G=1 in VH1 (not super important here, though)
# Derived units
velocity = distance / time
mass = density*distance**3.0
pressure = density * velocity**2.0
energy = mass*velocity**2.0
# Note: this is acceleration! In the code (e.g. forces.f90), grav = v*v/r
# e.g. 2*GM/r = v_esc^2, so g=GM/r^2=0.5*v_esc^2/r
gravity = G*mass/distance**2 # velocity*velocity/distance
| 26.388889
| 85
| 0.663158
|
"""
Defined code units and physical quantities
The Python parts of Weltgeist use cgs
VH1 uses units defined below
Sam Geen, February 2018
"""
import numpy as np
# Physical quantities (base units in cgs)
pc = 3.086e+18
mH = 1.66e-24
year = 3.154e+7
Myr = 1e6*year
kB = 1.3806485279e-16 # in cgs
G = 6.67428e-8
X = 0.74
mp = mH / X
c = 2.998e+10
eV = 1.60217662e-12 # in ergs
Msun = 1.9891e33 # g
# Code units
# Used by VH1 - the Python parts of Weltgeist use cgs
distance = pc # in cm
density = mH # 1 g/cm^3
time = 1.0 / np.sqrt(G*density) # sets G=1 in VH1 (not super important here, though)
# Derived units
velocity = distance / time
mass = density*distance**3.0
pressure = density * velocity**2.0
energy = mass*velocity**2.0
# Note: this is acceleration! In the code (e.g. forces.f90), grav = v*v/r
# e.g. 2*GM/r = v_esc^2, so g=GM/r^2=0.5*v_esc^2/r
gravity = G*mass/distance**2 # velocity*velocity/distance
| 0
| 0
| 0
|
6f13085bc1fdca1855e4e392298226e252788d0b
| 343
|
py
|
Python
|
api/interest/api/viewsets.py
|
jhonatantft/ckl
|
8a0d533922fa091ac5f2dbe50caee3920ec2b90d
|
[
"MIT"
] | null | null | null |
api/interest/api/viewsets.py
|
jhonatantft/ckl
|
8a0d533922fa091ac5f2dbe50caee3920ec2b90d
|
[
"MIT"
] | 2
|
2021-05-08T21:26:43.000Z
|
2022-02-19T00:26:17.000Z
|
api/interest/api/viewsets.py
|
jhonatantft/ckl
|
8a0d533922fa091ac5f2dbe50caee3920ec2b90d
|
[
"MIT"
] | null | null | null |
from rest_framework.response import Response
from rest_framework.filters import SearchFilter
from rest_framework.viewsets import ModelViewSet
from interest.models import Interest
from .serializers import InterestSerializer
| 31.181818
| 48
| 0.848397
|
from rest_framework.response import Response
from rest_framework.filters import SearchFilter
from rest_framework.viewsets import ModelViewSet
from interest.models import Interest
from .serializers import InterestSerializer
class InterestsViewSet(ModelViewSet):
serializer_class = InterestSerializer
queryset = Interest.objects.all()
| 0
| 97
| 23
|
ca8b1afb26f13038161c24aead09569f01b99768
| 9,456
|
py
|
Python
|
olwidget/widgets.py
|
jj0hns0n/mednet
|
efb6681292e7ac8f870ee5967a5a2b352853ae35
|
[
"BSD-3-Clause"
] | 2
|
2016-02-18T01:06:04.000Z
|
2016-02-18T03:53:37.000Z
|
olwidget/widgets.py
|
jj0hns0n/mednet
|
efb6681292e7ac8f870ee5967a5a2b352853ae35
|
[
"BSD-3-Clause"
] | null | null | null |
olwidget/widgets.py
|
jj0hns0n/mednet
|
efb6681292e7ac8f870ee5967a5a2b352853ae35
|
[
"BSD-3-Clause"
] | null | null | null |
import re
from django.contrib.gis.gdal import OGRException, OGRGeometry
from django.contrib.gis.geos import GEOSGeometry
from django.forms.widgets import Textarea
from django.template.loader import render_to_string
from django.utils import simplejson
from django.conf import settings
from django import forms
# Default settings for paths and API URLs. These can all be overridden by
# specifying a value in settings.py
api_defaults = {
'GOOGLE_API_KEY': "",
'YAHOO_APP_ID': "",
'OLWIDGET_MEDIA_URL': url_join(settings.MEDIA_URL, "olwidget"),
'GOOGLE_API': "http://maps.google.com/maps?file=api&v=2",
'YAHOO_API': "http://api.maps.yahoo.com/ajaxymap?v=3.0",
'OSM_API': "http://openstreetmap.org/openlayers/OpenStreetMap.js",
'OL_API': "http://openlayers.org/api/2.8/OpenLayers.js",
'MS_VE_API' : "http://dev.virtualearth.net/mapcontrol/mapcontrol.ashx?v=6.1",
}
for key, default in api_defaults.iteritems():
if not hasattr(settings, key):
setattr(settings, key, default)
OLWIDGET_JS = url_join(settings.OLWIDGET_MEDIA_URL, "js/olwidget.js")
OLWIDGET_CSS = url_join(settings.OLWIDGET_MEDIA_URL, "css/olwidget.css")
DEFAULT_PROJ = "4326"
class EditableMap(forms.Textarea, MapMixin):
"""
An OpenLayers mapping widget for geographic data.
Example::
from django import forms
from olwidget.widgets import OLWidget
class MyForm(forms.Form):
location = forms.CharField(widget=EditableMap(
options={'geometry': 'point'}))
"""
default_template = 'olwidget/editable_map.html'
class MapDisplay(EditableMap):
"""
Object for display of geometries on an OpenLayers map. Arguments (all are
optional):
* ``fields`` - a list of geometric fields or WKT strings to display on the
map. If none are given, the map will have no overlay.
* ``name`` - a name to use for display of the field data layer.
* ``options`` - a dict of options for map display. A complete list of
options is in the documentation for olwidget.js.
Example::
from olwidget.widgets import MapDisplay
map = MapDisplay(fields=[my_model.start_point, my_model.destination])
To use in a template, first display the media (URLs for javascript and CSS
needed for map display) and then print the MapDisplay object, as in the
following::
<html>
<head>
{{ map.media }}
</head>
<body>
{{ map }}
</body>
</html>
By default, maps rendered by MapDisplay objects are not editable, but this
can be overriden by setting "options['editable'] = True".
"""
class InfoMap(forms.Widget, MapMixin):
"""
Widget for displaying maps with pop-up info boxes over geometries.
Arguments:
* ``info``: an array of [geometry, HTML] pairs that specify geometries, and
the popup contents associated with them. Geometries can be expressed as
geometry fields, or as WKT strings. Example::
[
[geomodel1.geofield, "<p>Model One</p>"],
[geomodel2.geofield, "<p>Model Two</p>"],
...
]
* ``options``: an optional dict of options for map display.
In templates, InfoMap.media must be displayed in addition to InfoMap for
the map to function properly.
"""
default_template = 'olwidget/info_map.html'
ewkt_re = re.compile("^SRID=(?P<srid>\d+);(?P<wkt>.+)$", re.I)
def get_wkt(value, srid=DEFAULT_PROJ):
"""
`value` is either a WKT string or a geometry field. Returns WKT in the
projection for the given SRID.
"""
ogr = None
if value:
if isinstance(value, OGRGeometry):
ogr = value
elif isinstance(value, GEOSGeometry):
ogr = value.ogr
elif isinstance(value, basestring):
match = ewkt_re.match(value)
if match:
ogr = OGRGeometry(match.group('wkt'), match.group('srid'))
else:
ogr = OGRGeometry(value)
wkt = ''
if ogr:
# Workaround for Django bug #12312. GEOSGeometry types don't support 3D wkt;
# OGRGeometry types output 3D for linestrings even if they should do 2D, causing
# IntegrityError's.
if ogr.dimension == 2:
geos = ogr.geos
geos.transform(srid)
wkt = geos.wkt
else:
ogr.transform(srid)
wkt = ogr.wkt
return wkt
def collection_wkt(fields):
""" Returns WKT for the given list of geometry fields. """
if not fields:
return ""
if len(fields) == 1:
return get_wkt(fields[0])
return "GEOMETRYCOLLECTION(%s)" % \
",".join(get_wkt(field) for field in fields)
def add_srid(wkt, srid=DEFAULT_PROJ):
"""
Returns EWKT (WKT with a specified SRID) for the given wkt and SRID
(default 4326).
"""
if wkt:
return "SRID=%s;%s" % (srid, wkt)
return ""
| 32.273038
| 88
| 0.602792
|
import re
from django.contrib.gis.gdal import OGRException, OGRGeometry
from django.contrib.gis.geos import GEOSGeometry
from django.forms.widgets import Textarea
from django.template.loader import render_to_string
from django.utils import simplejson
from django.conf import settings
from django import forms
def reduce_url_parts(a, b):
if a[-1] == "/":
return a + b
return a + "/" + b
def url_join(*args):
return reduce(reduce_url_parts, args)
# Default settings for paths and API URLs. These can all be overridden by
# specifying a value in settings.py
api_defaults = {
'GOOGLE_API_KEY': "",
'YAHOO_APP_ID': "",
'OLWIDGET_MEDIA_URL': url_join(settings.MEDIA_URL, "olwidget"),
'GOOGLE_API': "http://maps.google.com/maps?file=api&v=2",
'YAHOO_API': "http://api.maps.yahoo.com/ajaxymap?v=3.0",
'OSM_API': "http://openstreetmap.org/openlayers/OpenStreetMap.js",
'OL_API': "http://openlayers.org/api/2.8/OpenLayers.js",
'MS_VE_API' : "http://dev.virtualearth.net/mapcontrol/mapcontrol.ashx?v=6.1",
}
for key, default in api_defaults.iteritems():
if not hasattr(settings, key):
setattr(settings, key, default)
OLWIDGET_JS = url_join(settings.OLWIDGET_MEDIA_URL, "js/olwidget.js")
OLWIDGET_CSS = url_join(settings.OLWIDGET_MEDIA_URL, "css/olwidget.css")
DEFAULT_PROJ = "4326"
def separated_lowercase_to_lower_camelcase(input):
return re.sub('_\w', lambda match: match.group(0)[-1].upper(), input)
def translate_options(options):
translated = {}
for key, value in options.iteritems():
new_key = separated_lowercase_to_lower_camelcase(key)
# recurse
if isinstance(value, dict):
translated[new_key] = translate_options(value)
else:
translated[new_key] = value
return translated
class MapMixin(object):
def set_options(self, options, template):
self.options = options or {}
# Though this is the olwidget.js default, it must be explicitly set so
# form.media knows to include osm.
self.options['layers'] = self.options.get('layers',
['osm.mapnik'])
self.template = template or self.default_template
def _media(self):
js = set()
# collect scripts necessary for various layers
for layer in self.options['layers']:
if layer.startswith("osm."):
js.add(settings.OSM_API)
elif layer.startswith("google."):
js.add(settings.GOOGLE_API + "&key=%s" % settings.GOOGLE_API_KEY)
elif layer.startswith("yahoo."):
js.add(settings.YAHOO_API + "&appid=%s" % settings.YAHOO_APP_ID)
elif layer.startswith("ve."):
js.add(settings.MS_VE_API)
js = [settings.OL_API, OLWIDGET_JS] + list(js)
return forms.Media(css={'all': (OLWIDGET_CSS,)}, js=js)
media = property(_media)
class EditableMap(forms.Textarea, MapMixin):
"""
An OpenLayers mapping widget for geographic data.
Example::
from django import forms
from olwidget.widgets import OLWidget
class MyForm(forms.Form):
location = forms.CharField(widget=EditableMap(
options={'geometry': 'point'}))
"""
default_template = 'olwidget/editable_map.html'
def __init__(self, options=None, template=None):
self.set_options(options, template)
super(EditableMap, self).__init__()
def render(self, name, value, attrs=None):
if not attrs:
attrs = {}
# without an id, javascript fails
if attrs.has_key('id'):
element_id = attrs['id']
else:
element_id = "id_%s" % id(self)
# Allow passing of wkt for MapDisplay subclass
if attrs.has_key('wkt'):
wkt = attrs['wkt']
else:
# Use the default SRID's
wkt = add_srid(get_wkt(value))
if name and not self.options.has_key('name'):
self.options['name'] = name
context = {
'id': element_id,
'name': name,
'wkt': wkt,
'map_opts': simplejson.dumps(
translate_options(self.options)
),
}
return render_to_string(self.template, context)
class MapDisplay(EditableMap):
"""
Object for display of geometries on an OpenLayers map. Arguments (all are
optional):
* ``fields`` - a list of geometric fields or WKT strings to display on the
map. If none are given, the map will have no overlay.
* ``name`` - a name to use for display of the field data layer.
* ``options`` - a dict of options for map display. A complete list of
options is in the documentation for olwidget.js.
Example::
from olwidget.widgets import MapDisplay
map = MapDisplay(fields=[my_model.start_point, my_model.destination])
To use in a template, first display the media (URLs for javascript and CSS
needed for map display) and then print the MapDisplay object, as in the
following::
<html>
<head>
{{ map.media }}
</head>
<body>
{{ map }}
</body>
</html>
By default, maps rendered by MapDisplay objects are not editable, but this
can be overriden by setting "options['editable'] = True".
"""
def __init__(self, fields=None, options=None, template=None):
self.fields = fields
options = options or {}
if not options.has_key('editable'):
options['editable'] = False
if (self.fields and len(self.fields) > 1) or \
(fields[0].geom_type.upper() == 'GEOMETRYCOLLECTION'):
options['isCollection'] = True
super(MapDisplay, self).__init__(options, template)
def __unicode__(self):
wkt = add_srid(collection_wkt(self.fields))
name = self.options.get('name', 'data')
return self.render(name, None, attrs={'wkt': wkt})
class InfoMap(forms.Widget, MapMixin):
"""
Widget for displaying maps with pop-up info boxes over geometries.
Arguments:
* ``info``: an array of [geometry, HTML] pairs that specify geometries, and
the popup contents associated with them. Geometries can be expressed as
geometry fields, or as WKT strings. Example::
[
[geomodel1.geofield, "<p>Model One</p>"],
[geomodel2.geofield, "<p>Model Two</p>"],
...
]
* ``options``: an optional dict of options for map display.
In templates, InfoMap.media must be displayed in addition to InfoMap for
the map to function properly.
"""
default_template = 'olwidget/info_map.html'
def __init__(self, info=None, options=None, template=None):
self.info = info
self.set_options(options, template)
super(InfoMap, self).__init__()
def render(self, name, value, attrs=None):
if not self.info:
info_json = '[]'
else:
# convert fields to wkt and translate options if needed
wkt_array = []
for geom, attr in self.info:
wkt = add_srid(get_wkt(geom))
if isinstance(attr, dict):
wkt_array.append([wkt, translate_options(attr)])
else:
wkt_array.append([wkt, attr])
info_json = simplejson.dumps(wkt_array)
# arbitrary unique id
div_id = "id_%s" % id(self)
context = {
'id': div_id,
'info_array': info_json,
'map_opts': simplejson.dumps(
translate_options(self.options)
),
}
return render_to_string(self.template, context)
def __unicode__(self):
return self.render(None, None)
ewkt_re = re.compile("^SRID=(?P<srid>\d+);(?P<wkt>.+)$", re.I)
def get_wkt(value, srid=DEFAULT_PROJ):
"""
`value` is either a WKT string or a geometry field. Returns WKT in the
projection for the given SRID.
"""
ogr = None
if value:
if isinstance(value, OGRGeometry):
ogr = value
elif isinstance(value, GEOSGeometry):
ogr = value.ogr
elif isinstance(value, basestring):
match = ewkt_re.match(value)
if match:
ogr = OGRGeometry(match.group('wkt'), match.group('srid'))
else:
ogr = OGRGeometry(value)
wkt = ''
if ogr:
# Workaround for Django bug #12312. GEOSGeometry types don't support 3D wkt;
# OGRGeometry types output 3D for linestrings even if they should do 2D, causing
# IntegrityError's.
if ogr.dimension == 2:
geos = ogr.geos
geos.transform(srid)
wkt = geos.wkt
else:
ogr.transform(srid)
wkt = ogr.wkt
return wkt
def collection_wkt(fields):
""" Returns WKT for the given list of geometry fields. """
if not fields:
return ""
if len(fields) == 1:
return get_wkt(fields[0])
return "GEOMETRYCOLLECTION(%s)" % \
",".join(get_wkt(field) for field in fields)
def add_srid(wkt, srid=DEFAULT_PROJ):
"""
Returns EWKT (WKT with a specified SRID) for the given wkt and SRID
(default 4326).
"""
if wkt:
return "SRID=%s;%s" % (srid, wkt)
return ""
| 4,049
| 84
| 304
|
a777c1f7cbe7e6ff795a3c5c9391e45397c000e0
| 921
|
py
|
Python
|
advent/year2021/day1.py
|
davweb/advent-of-code
|
6d9ac52092f4aad26a84d7cfd2fcd8420f1ea612
|
[
"Unlicense"
] | null | null | null |
advent/year2021/day1.py
|
davweb/advent-of-code
|
6d9ac52092f4aad26a84d7cfd2fcd8420f1ea612
|
[
"Unlicense"
] | null | null | null |
advent/year2021/day1.py
|
davweb/advent-of-code
|
6d9ac52092f4aad26a84d7cfd2fcd8420f1ea612
|
[
"Unlicense"
] | null | null | null |
#!/usr/local/bin/python3
def part1(data):
"""
>>> part1([199, 200, 208, 210, 200, 207, 240, 269, 260, 263])
7
>>> part1(read_input())
1581
"""
previous = data[0]
count = 0
for value in data[1:]:
if value > previous:
count += 1
previous = value
return count
def part2(data):
"""
>>> part2([199, 200, 208, 210, 200, 207, 240, 269, 260, 263])
5
>>> part2(read_input())
1618
"""
count = 0
for i in range(1, len(data) - 2):
previous = sum(data[i - 1:i + 2])
value = sum(data[i:i + 3])
if value > previous:
count += 1
return count
if __name__ == "__main__":
main()
| 16.745455
| 65
| 0.512486
|
#!/usr/local/bin/python3
def read_input():
file = open('input/2021/day1-input.txt', 'r')
return [int(line) for line in file.readlines()]
def part1(data):
"""
>>> part1([199, 200, 208, 210, 200, 207, 240, 269, 260, 263])
7
>>> part1(read_input())
1581
"""
previous = data[0]
count = 0
for value in data[1:]:
if value > previous:
count += 1
previous = value
return count
def part2(data):
"""
>>> part2([199, 200, 208, 210, 200, 207, 240, 269, 260, 263])
5
>>> part2(read_input())
1618
"""
count = 0
for i in range(1, len(data) - 2):
previous = sum(data[i - 1:i + 2])
value = sum(data[i:i + 3])
if value > previous:
count += 1
return count
def main():
data = read_input()
print(part1(data))
print(part2(data))
if __name__ == "__main__":
main()
| 158
| 0
| 46
|
ac3978d7b01ad6d6e0f32900633722a103fd5b2e
| 4,738
|
py
|
Python
|
Example/Gutenkunst2007/Lee_2003/LeeNet.py
|
bcdaniels/SloppyCell
|
17e68127a6aba19056a5067748a2d18241cc4d76
|
[
"BSD-3-Clause"
] | 2
|
2020-05-26T19:29:39.000Z
|
2020-08-26T20:54:52.000Z
|
Example/Gutenkunst2007/Lee_2003/LeeNet.py
|
bcdaniels/SloppyCell
|
17e68127a6aba19056a5067748a2d18241cc4d76
|
[
"BSD-3-Clause"
] | 1
|
2019-04-15T21:08:12.000Z
|
2019-04-15T21:08:12.000Z
|
Example/Gutenkunst2007/Lee_2003/LeeNet.py
|
jurquiza/SloppyCellUrquiza2019
|
a9f64d9d4172c82735813f09e48f36777a714e9c
|
[
"BSD-3-Clause"
] | 3
|
2017-09-12T03:12:01.000Z
|
2018-10-19T11:08:09.000Z
|
from SloppyCell.ReactionNetworks import *
net = Network('Lee2003')
net.add_compartment('extract')
net.add_parameter('Dsh0', 100, name = r'Dsh^0')
net.add_parameter('APC0', 100, name = r'APC^0')
net.add_parameter('TCF0', 15, name = r'TCF^0')
net.add_parameter('GSK0', 50, name = r'GSK^0')
net.add_species('X2', 'extract', 0)#, name=r'Dsh_a')
net.add_species('X3', 'extract', 0)#, name=r'APC^*/axin^*/GSK3')
net.add_species('X4', 'extract', 0)#, name=r'APC/axin/GSK3')
net.add_species('X9', 'extract', 0)#, name=r'\beta-catenin^*/APC^*/axin^*/GSK3')
net.add_species('X10', 'extract', 0)#, name=r'\beta-catenin^*')
net.add_species('X11', 'extract', 0)#, name=r'\beta-catenin')
net.add_species('X12', 'extract', 0)#, name=r'Axin')
#net.add_species('X5', 'extract', 'GSK0', is_constant=True)#, name=r'GSK3')
net.add_species('X5', 'extract', 'GSK0', is_constant=True)#, name=r'GSK3')
net.add_species('X1', 'extract')#, name=r'Dsh_i')
net.add_species('X6', 'extract')#, name=r'APC/axin')
net.add_species('X7', 'extract')#, name=r'APC')
net.add_species('X8', 'extract')#, name=r'\beta-catenin/APC^*/axin^*/GSK3')
net.add_species('X13', 'extract')#, name=r'TCF')
net.add_species('X14', 'extract')#, name=r'\beta-catenin/TCF')
net.add_species('X15', 'extract')#, name=r'\beta-catenin/APC')
net.add_parameter('K7', 50, name = r'K_7')
net.add_parameter('K8', 120, name = r'K_8')
net.add_parameter('K16', 30, name = r'K_16')
net.add_parameter('K17', 1200, name = r'K_17')
net.add_parameter('k1', 0.182, name = r'k_{1}')
net.add_parameter('k2', 1.82e-2, name = r'k_{2}')
net.add_parameter('k3', 5e-2, name = r'k_{3}')
net.add_parameter('k4', 0.267, name = r'k_{4}')
net.add_parameter('k5', 0.133, name = r'k_{5}')
net.add_parameter('k6', 9.09e-2, name = r'k_{6}')
net.add_parameter('km6', 0.909, name = 'k_{-6}')
net.add_parameter('k9', 206, name = r'k_{9}')
net.add_parameter('k10', 206, name = r'k_{10}')
net.add_parameter('k11', 0.417, name = r'k_{11}')
net.add_parameter('k13', 2.57e-4, name = r'k_{13}')
net.add_parameter('k15', 0.167, name = r'k_{15}')
net.add_parameter('v12', 0.423, name = r'v_{12}')
net.add_parameter('v14', 8.22e-5, name = r'v_{14}')
#net.add_parameter('k1', 0.18, name = r'k_{1}')
#net.add_parameter('k2', 1.8e-2, name = r'k_{2}')
#net.add_parameter('k3', 5e-2, name = r'k_{3}')
#net.add_parameter('k4', 0.27, name = r'k_{4}')
#net.add_parameter('k5', 0.13, name = r'k_{5}')
#net.add_parameter('k6', 9.1e-2, name = r'k_{6}')
#net.add_parameter('km6', 0.91, name = 'k_{-6}')
#net.add_parameter('k9', 210, name = r'k_{9}')
#net.add_parameter('k10', 210, name = r'k_{10}')
#net.add_parameter('k11', 0.42, name = r'k_{11}')
#net.add_parameter('k13', 2.6e-4, name = r'k_{13}')
#net.add_parameter('k15', 0.17, name = r'k_{15}')
#
#net.add_parameter('v12', 0.42, name = r'v_{12}')
#net.add_parameter('v14', 8.2e-5, name = r'v_{14}')
net.add_parameter('W', 0, is_optimizable=False)
net.add_rate_rule('X2', 'k1*W*(Dsh0-X2)-k2*X2')
net.add_rate_rule('X9', 'k9 * X8 - k10*X9')
net.add_rate_rule('X10', 'k10*X9-k11*X10')
net.add_rate_rule('X4', '-k3*X2*X4 - k4*X4 + k5*X3 + k6*X5*X6 - km6*X4')
net.add_parameter('a')
net.add_assignment_rule('a', '1+APC0*K17/(K7*(K17+X11))')
net.add_parameter('b')
net.add_assignment_rule('b', 'APC0*K17*X12/(K7*(K17+X11)**2)')
net.add_parameter('c')
net.add_assignment_rule('c', 'k3*X2*X4 - k6 * GSK0*APC0*K17*X12/(K7*(K17+X11)) + km6*X4 + v14 - k15*X12')
net.add_parameter('d')
net.add_assignment_rule('d', '1+X11/K8')
net.add_parameter('e')
net.add_assignment_rule('e', 'X3/K8')
net.add_parameter('f')
net.add_assignment_rule('f', 'k4*X4 - k5*X3 - k9*X3*X11/K8 + k10*X9')
net.add_parameter('g')
net.add_assignment_rule('g', '1+X3/K8+TCF0*K16/(K16+X11)**2 + APC0*K17/(K17+X11)**2')
net.add_parameter('h')
net.add_assignment_rule('h', 'X11/K8')
net.add_parameter('i')
net.add_assignment_rule('i', 'v12 - (k9*X3/K8 + k13)*X11')
net.add_parameter('rhsX11', name = 'rhs_{X11}')
net.add_assignment_rule('rhsX11', '(d*i - f*h)/(d*g - e*h)')
net.add_rate_rule('X11', 'rhsX11')
net.add_rate_rule('X12', '(c + rhsX11*b)/a')
net.add_rate_rule('X3', '(e*i - f*g)/(e*h - d*g)')
net.add_assignment_rule('X1', 'Dsh0 - X2')
net.add_assignment_rule('X7', 'K17*APC0/(K17+X11)')
net.add_assignment_rule('X15', 'X11*APC0/(K17+X11)')
net.add_assignment_rule('X13', 'K16*TCF0/(K16+X11)')
net.add_assignment_rule('X14', 'X11*TCF0/(K16+X11)')
net.add_assignment_rule('X8', 'X3*X11/K8')
net.add_assignment_rule('X6', 'K17*X12*APC0/(K7*(K17+X11))')
# These are just for my own monitoring purposes
net.add_parameter('BCatenin', name = r'\beta-catenin')
net.add_assignment_rule('BCatenin', 'X8+X9+X10+X11+X14+X15')
net.add_parameter('Axin', name = r'Axin')
net.add_assignment_rule('Axin', 'X3+X4+X6+X8+X9+X12')
| 41.561404
| 105
| 0.657239
|
from SloppyCell.ReactionNetworks import *
net = Network('Lee2003')
net.add_compartment('extract')
net.add_parameter('Dsh0', 100, name = r'Dsh^0')
net.add_parameter('APC0', 100, name = r'APC^0')
net.add_parameter('TCF0', 15, name = r'TCF^0')
net.add_parameter('GSK0', 50, name = r'GSK^0')
net.add_species('X2', 'extract', 0)#, name=r'Dsh_a')
net.add_species('X3', 'extract', 0)#, name=r'APC^*/axin^*/GSK3')
net.add_species('X4', 'extract', 0)#, name=r'APC/axin/GSK3')
net.add_species('X9', 'extract', 0)#, name=r'\beta-catenin^*/APC^*/axin^*/GSK3')
net.add_species('X10', 'extract', 0)#, name=r'\beta-catenin^*')
net.add_species('X11', 'extract', 0)#, name=r'\beta-catenin')
net.add_species('X12', 'extract', 0)#, name=r'Axin')
#net.add_species('X5', 'extract', 'GSK0', is_constant=True)#, name=r'GSK3')
net.add_species('X5', 'extract', 'GSK0', is_constant=True)#, name=r'GSK3')
net.add_species('X1', 'extract')#, name=r'Dsh_i')
net.add_species('X6', 'extract')#, name=r'APC/axin')
net.add_species('X7', 'extract')#, name=r'APC')
net.add_species('X8', 'extract')#, name=r'\beta-catenin/APC^*/axin^*/GSK3')
net.add_species('X13', 'extract')#, name=r'TCF')
net.add_species('X14', 'extract')#, name=r'\beta-catenin/TCF')
net.add_species('X15', 'extract')#, name=r'\beta-catenin/APC')
net.add_parameter('K7', 50, name = r'K_7')
net.add_parameter('K8', 120, name = r'K_8')
net.add_parameter('K16', 30, name = r'K_16')
net.add_parameter('K17', 1200, name = r'K_17')
net.add_parameter('k1', 0.182, name = r'k_{1}')
net.add_parameter('k2', 1.82e-2, name = r'k_{2}')
net.add_parameter('k3', 5e-2, name = r'k_{3}')
net.add_parameter('k4', 0.267, name = r'k_{4}')
net.add_parameter('k5', 0.133, name = r'k_{5}')
net.add_parameter('k6', 9.09e-2, name = r'k_{6}')
net.add_parameter('km6', 0.909, name = 'k_{-6}')
net.add_parameter('k9', 206, name = r'k_{9}')
net.add_parameter('k10', 206, name = r'k_{10}')
net.add_parameter('k11', 0.417, name = r'k_{11}')
net.add_parameter('k13', 2.57e-4, name = r'k_{13}')
net.add_parameter('k15', 0.167, name = r'k_{15}')
net.add_parameter('v12', 0.423, name = r'v_{12}')
net.add_parameter('v14', 8.22e-5, name = r'v_{14}')
#net.add_parameter('k1', 0.18, name = r'k_{1}')
#net.add_parameter('k2', 1.8e-2, name = r'k_{2}')
#net.add_parameter('k3', 5e-2, name = r'k_{3}')
#net.add_parameter('k4', 0.27, name = r'k_{4}')
#net.add_parameter('k5', 0.13, name = r'k_{5}')
#net.add_parameter('k6', 9.1e-2, name = r'k_{6}')
#net.add_parameter('km6', 0.91, name = 'k_{-6}')
#net.add_parameter('k9', 210, name = r'k_{9}')
#net.add_parameter('k10', 210, name = r'k_{10}')
#net.add_parameter('k11', 0.42, name = r'k_{11}')
#net.add_parameter('k13', 2.6e-4, name = r'k_{13}')
#net.add_parameter('k15', 0.17, name = r'k_{15}')
#
#net.add_parameter('v12', 0.42, name = r'v_{12}')
#net.add_parameter('v14', 8.2e-5, name = r'v_{14}')
net.add_parameter('W', 0, is_optimizable=False)
net.add_rate_rule('X2', 'k1*W*(Dsh0-X2)-k2*X2')
net.add_rate_rule('X9', 'k9 * X8 - k10*X9')
net.add_rate_rule('X10', 'k10*X9-k11*X10')
net.add_rate_rule('X4', '-k3*X2*X4 - k4*X4 + k5*X3 + k6*X5*X6 - km6*X4')
net.add_parameter('a')
net.add_assignment_rule('a', '1+APC0*K17/(K7*(K17+X11))')
net.add_parameter('b')
net.add_assignment_rule('b', 'APC0*K17*X12/(K7*(K17+X11)**2)')
net.add_parameter('c')
net.add_assignment_rule('c', 'k3*X2*X4 - k6 * GSK0*APC0*K17*X12/(K7*(K17+X11)) + km6*X4 + v14 - k15*X12')
net.add_parameter('d')
net.add_assignment_rule('d', '1+X11/K8')
net.add_parameter('e')
net.add_assignment_rule('e', 'X3/K8')
net.add_parameter('f')
net.add_assignment_rule('f', 'k4*X4 - k5*X3 - k9*X3*X11/K8 + k10*X9')
net.add_parameter('g')
net.add_assignment_rule('g', '1+X3/K8+TCF0*K16/(K16+X11)**2 + APC0*K17/(K17+X11)**2')
net.add_parameter('h')
net.add_assignment_rule('h', 'X11/K8')
net.add_parameter('i')
net.add_assignment_rule('i', 'v12 - (k9*X3/K8 + k13)*X11')
net.add_parameter('rhsX11', name = 'rhs_{X11}')
net.add_assignment_rule('rhsX11', '(d*i - f*h)/(d*g - e*h)')
net.add_rate_rule('X11', 'rhsX11')
net.add_rate_rule('X12', '(c + rhsX11*b)/a')
net.add_rate_rule('X3', '(e*i - f*g)/(e*h - d*g)')
net.add_assignment_rule('X1', 'Dsh0 - X2')
net.add_assignment_rule('X7', 'K17*APC0/(K17+X11)')
net.add_assignment_rule('X15', 'X11*APC0/(K17+X11)')
net.add_assignment_rule('X13', 'K16*TCF0/(K16+X11)')
net.add_assignment_rule('X14', 'X11*TCF0/(K16+X11)')
net.add_assignment_rule('X8', 'X3*X11/K8')
net.add_assignment_rule('X6', 'K17*X12*APC0/(K7*(K17+X11))')
# These are just for my own monitoring purposes
net.add_parameter('BCatenin', name = r'\beta-catenin')
net.add_assignment_rule('BCatenin', 'X8+X9+X10+X11+X14+X15')
net.add_parameter('Axin', name = r'Axin')
net.add_assignment_rule('Axin', 'X3+X4+X6+X8+X9+X12')
| 0
| 0
| 0
|
86a50822ca54c8a73daa3b439ad60fdf4ef3c6c6
| 4,288
|
py
|
Python
|
project_1_1/src/engine.py
|
tillaczel/Deep-Learning-in-Computer-Vision
|
792e90a3ad5bafdb30e0267226c2c75b8afd01e3
|
[
"MIT"
] | null | null | null |
project_1_1/src/engine.py
|
tillaczel/Deep-Learning-in-Computer-Vision
|
792e90a3ad5bafdb30e0267226c2c75b8afd01e3
|
[
"MIT"
] | null | null | null |
project_1_1/src/engine.py
|
tillaczel/Deep-Learning-in-Computer-Vision
|
792e90a3ad5bafdb30e0267226c2c75b8afd01e3
|
[
"MIT"
] | 1
|
2021-06-08T09:28:01.000Z
|
2021-06-08T09:28:01.000Z
|
from omegaconf import DictConfig
import pytorch_lightning as pl
import torch
from torch import nn
import torchmetrics
from .model import Model
| 34.861789
| 120
| 0.614972
|
from omegaconf import DictConfig
import pytorch_lightning as pl
import torch
from torch import nn
import torchmetrics
from .model import Model
class EngineModule(pl.LightningModule):
def __init__(self, config: DictConfig, main_metric: str="f1"):
super().__init__()
self.config = config
self.model = Model(pretrained=config.model.pretrained, in_dim=config.model.in_dim, out_dim=config.model.out_dim)
self.loss_func = nn.BCEWithLogitsLoss()
self.train_acc = torchmetrics.Accuracy()
self.val_acc = torchmetrics.Accuracy()
self.train_f1 = torchmetrics.F1()
self.val_f1 = torchmetrics.F1()
self.train_recall = torchmetrics.Recall()
self.val_recall = torchmetrics.Recall()
self.train_precision = torchmetrics.Precision()
self.val_precision = torchmetrics.Precision()
self.metrics = ["acc", "f1", "recall", "precision"]
self.main_metric = main_metric
# not released yet :(
# self.train_specificity = torchmetrics.Specificity()
# self.val_specificity = torchmetrics.Specificity()
@property
def lr(self):
return self.optimizers().param_groups[0]['lr']
def forward(self, x):
return self.model(x)
def update_and_log_metric(self, metric_name, probs, labels, mode='train'):
metric = getattr(self, f"{mode}_{metric_name}")
metric(probs, labels)
self.log(f"{mode}_{metric_name}", metric,
on_step=False,
prog_bar=(metric_name == self.main_metric),
on_epoch=True, logger=True)
def training_step(self, batch, batch_idx):
images, labels = batch
pred = self.model(images).squeeze() # [Bx1] -> [B]
loss = self.loss_func(pred, labels.type(torch.float32))
self.log('loss', loss, on_step=False, on_epoch=True,
prog_bar=False, logger=True)
self.log('lr', self.lr, on_step=False, on_epoch=True,
prog_bar=False, logger=True)
probs = nn.functional.sigmoid(pred)
for metric_name in self.metrics:
self.update_and_log_metric(metric_name, probs, labels, mode='train')
return {'loss': loss}
def training_epoch_end(self, outputs: list):
pass
def validation_step(self, batch, batch_idx):
images, labels = batch
pred = self.model(images).squeeze() # [Bx1] -> [B]
loss = self.loss_func(pred, labels.type(torch.float32))
probs = torch.sigmoid(pred)
self.log('val_loss', loss, on_step=False, on_epoch=True,
prog_bar=False, logger=True)
for metric_name in self.metrics:
self.update_and_log_metric(metric_name, probs, labels, mode='val')
return {'val_loss': loss}
def validation_epoch_end(self, outputs: list):
pass
def configure_optimizers(self):
optimizer = get_optimizer(self.config.training.optimizer, self.parameters())
scheduler_config = self.config.training.scheduler
if scheduler_config is not None:
scheduler = get_scheduler(scheduler_config, optimizer)
return [optimizer], [scheduler]
else:
return optimizer
def get_optimizer(optim_config: DictConfig, params):
name = optim_config.name
lr = optim_config.lr
if name == 'sgd':
return torch.optim.SGD(params, lr=lr)
elif name == 'adam':
return torch.optim.Adam(params, lr=lr)
else:
raise ValueError(f'{name} not in optimizers')
def get_scheduler(scheduler_config, optimizer):
name = scheduler_config.name
monitor = scheduler_config.monitor
if name == 'plateau':
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode=scheduler_config.mode,
patience=scheduler_config.patience,
factor=scheduler_config.factor,
min_lr=scheduler_config.min_lr)
return dict(scheduler=scheduler, monitor=monitor)
else:
raise ValueError(f'{name} not in schedulers')
| 3,796
| 276
| 69
|
07f3349c9a417036cdc8776d53fcfa52d2e1af80
| 5,082
|
py
|
Python
|
PWGJE/EMCALJetTasks/Tracks/analysis/util/Interpolator.py
|
maroozm/AliPhysics
|
22ec256928cfdf8f800e05bfc1a6e124d90b6eaf
|
[
"BSD-3-Clause"
] | 114
|
2017-03-03T09:12:23.000Z
|
2022-03-03T20:29:42.000Z
|
PWGJE/EMCALJetTasks/Tracks/analysis/util/Interpolator.py
|
maroozm/AliPhysics
|
22ec256928cfdf8f800e05bfc1a6e124d90b6eaf
|
[
"BSD-3-Clause"
] | 19,637
|
2017-01-16T12:34:41.000Z
|
2022-03-31T22:02:40.000Z
|
PWGJE/EMCALJetTasks/Tracks/analysis/util/Interpolator.py
|
maroozm/AliPhysics
|
22ec256928cfdf8f800e05bfc1a6e124d90b6eaf
|
[
"BSD-3-Clause"
] | 1,021
|
2016-07-14T22:41:16.000Z
|
2022-03-31T05:15:51.000Z
|
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
"""
Interpolation module
@author: Jacek Otwinowski
@organization: ALICE Collaboration
Translated into PYTHON by Markus Fasel <markus.fasel@cern.ch>, Lawrence Berkeley National Laboratory
"""
import math
| 37.925373
| 123
| 0.538764
|
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
"""
Interpolation module
@author: Jacek Otwinowski
@organization: ALICE Collaboration
Translated into PYTHON by Markus Fasel <markus.fasel@cern.ch>, Lawrence Berkeley National Laboratory
"""
import math
class Interpolator(object):
def __init__(self):
"""
Constructor
"""
pass
def Interpolate(self, x, x1, y1, x2, y2, integrate = False, r = 0, method="lin"):
"""
Interpolation handler:
forwards methods to the different interpolation functions
@param x: x at which to evaluate the interpolation
@param x1: lower x step
@param y1: function value at x1
@param x2: upper x step
@param y2: function value at x2
@param integrate: if true we evaluate the integral
@param r:
"""
if method == "lin":
return self.__InterpolateLinear(x, x1, y1, x2, y2, integrate, r)
elif method == "pow":
return self.__InterpolatePowerLaw(x, x1, y1, x2, y2, integrate, r)
elif method == "exp":
return self.__InterpolateExponential(x, x1, y1, x2, y2)
elif method == "hag":
return self.__InterpolateSimpleHagedorn(x, x1, y1, x2, y2)
def __InterpolateLinear(self, x, x1, y1, x2, y2, integrate = False, r = 0):
"""
Linear interpolation method
@param x: x at which to evaluate the interpolation
@param x1: lower x step
@param y1: function value at x1
@param x2: upper x step
@param y2: function value at x2
@param integrate: if true we evaluate the integral
@param r:
"""
if x1-x2 == 0:
return 0
if integrate:
return 2*r*(y1+((x-x1)*(y1-y2))/(x1-x2))
else:
return (y1 + (((y2-y1)/(x2-x1))*(x-x1)))
def __InterpolatePowerLaw(self, x, x1, y1, x2, y2, integrate = False, r = 0):
"""
Power law interpolation method
@param x: x at which to evaluate the interpolation
@param x1: lower x step
@param y1: function value at x1
@param x2: upper x step
@param y2: function value at x2
@param integrate: if true we evaluate the integral
@param r:
"""
#assume functional form y=a*x^n
if not self.__AssurePositive(x, x1, x2, y1, y2):
return 0.
n = (math.log(y1)-math.log(y2))/(math.log(x1)-math.log(x2));
a = y1*pow(x1,-n)
print "y: %f" %(a*pow(x,n))
print "n: %f" %(n)
print "a: %f" %(a)
if integrate:
return ((a/(n+1.))*(math.pow(x+r,n+1.)-math.pow(x-r,n+1.))/(2.*r))
else:
return (a*math.pow(x,n))
def __InterpolateExponential(self, x, x1, y1, x2, y2):
"""
Exponential interpolation method
@param x: x at which to evaluate the interpolation
@param x1: lower x step
@param y1: function value at x1
@param x2: upper x step
@param y2: function value at x2
"""
if not self.__AssurePositive(x, x1, x2, y1, y2):
return 0.
return math.exp(self.__InterpolateLinear(x,x1,math.log(y1),x2,math.log(y2)))
def __InterpolateSimpleHagedorn(self, x, x1, y1, x2, y2):
"""
Hagedorn interpolation method
@param x: x at which to evaluate the interpolation
@param x1: lower x step
@param y1: function value at x1
@param x2: upper x step
@param y2: function value at x2
"""
if not self.__AssurePositive(x, x1, x2, y1, y2):
return 0.
return math.exp(self.__InterpolateLinear(math.log(1.+x),math.log(1.+x1),math.log(y1),math.log(1.+x2),math.log(y2)))
def __AssurePositive(self, x, x1, x2, y1, y2):
"""
Check if all values are positive
"""
if x <= 0. or x1 <= 0. or x2 <= 0. or y1 <= 0. or y2 <= 0.:
return False
return True
| 0
| 3,791
| 23
|
0b9bc42aab3a61dc776c20fde1b7be088ba0e2b2
| 2,276
|
py
|
Python
|
creator/schema.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | 3
|
2019-05-04T02:07:28.000Z
|
2020-10-16T17:47:44.000Z
|
creator/schema.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | 604
|
2019-02-21T18:14:51.000Z
|
2022-02-10T08:13:54.000Z
|
creator/schema.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | null | null | null |
"""
This is the root schema definition that combines individual applications'
schemas into one.
Each application that has queries or mutations exports them as either Query
or Mutation from the application's schema module.
No resolvers or type definitions should be included here.
"""
import graphene
from django.conf import settings
import creator.analyses.schema
import creator.buckets.schema
import creator.files.schema
import creator.studies.schema
import creator.projects.schema
import creator.users.schema
import creator.referral_tokens.schema
import creator.status.schema
import creator.jobs.schema
import creator.releases.schema
import creator.data_reviews.schema
import creator.ingest_runs.schema
import creator.organizations.schema
import creator.data_templates.schema
class Query(
creator.analyses.schema.Query,
creator.files.schema.Query,
creator.studies.schema.Query,
creator.users.schema.Query,
creator.events.schema.Query,
creator.projects.schema.Query,
creator.buckets.schema.Query,
creator.referral_tokens.schema.Query,
creator.status.schema.Query,
creator.jobs.schema.Query,
creator.releases.schema.Query,
creator.data_reviews.schema.Query,
creator.ingest_runs.schema.Query,
creator.organizations.schema.Query,
creator.data_templates.schema.Query,
graphene.ObjectType,
):
""" Root query schema combining all apps' schemas """
node = graphene.relay.Node.Field()
if settings.DEBUG:
from graphene_django.debug import DjangoDebug
debug = graphene.Field(DjangoDebug, name="_debug")
class Mutation(
creator.analyses.schema.Mutation,
creator.buckets.schema.Mutation,
creator.projects.schema.Mutation,
creator.studies.schema.Mutation,
creator.files.schema.Mutation,
creator.users.schema.Mutation,
creator.referral_tokens.schema.Mutation,
creator.status.schema.Mutation,
creator.releases.schema.Mutation,
creator.data_reviews.schema.Mutation,
creator.ingest_runs.schema.Mutation,
creator.organizations.schema.Mutation,
creator.data_templates.schema.Mutation,
graphene.ObjectType,
):
""" Root mutation schema combining all apps' schemas """
pass
schema = graphene.Schema(query=Query, mutation=Mutation)
| 29.947368
| 75
| 0.773726
|
"""
This is the root schema definition that combines individual applications'
schemas into one.
Each application that has queries or mutations exports them as either Query
or Mutation from the application's schema module.
No resolvers or type definitions should be included here.
"""
import graphene
from django.conf import settings
import creator.analyses.schema
import creator.buckets.schema
import creator.files.schema
import creator.studies.schema
import creator.projects.schema
import creator.users.schema
import creator.referral_tokens.schema
import creator.status.schema
import creator.jobs.schema
import creator.releases.schema
import creator.data_reviews.schema
import creator.ingest_runs.schema
import creator.organizations.schema
import creator.data_templates.schema
class Query(
creator.analyses.schema.Query,
creator.files.schema.Query,
creator.studies.schema.Query,
creator.users.schema.Query,
creator.events.schema.Query,
creator.projects.schema.Query,
creator.buckets.schema.Query,
creator.referral_tokens.schema.Query,
creator.status.schema.Query,
creator.jobs.schema.Query,
creator.releases.schema.Query,
creator.data_reviews.schema.Query,
creator.ingest_runs.schema.Query,
creator.organizations.schema.Query,
creator.data_templates.schema.Query,
graphene.ObjectType,
):
""" Root query schema combining all apps' schemas """
node = graphene.relay.Node.Field()
if settings.DEBUG:
from graphene_django.debug import DjangoDebug
debug = graphene.Field(DjangoDebug, name="_debug")
class Mutation(
creator.analyses.schema.Mutation,
creator.buckets.schema.Mutation,
creator.projects.schema.Mutation,
creator.studies.schema.Mutation,
creator.files.schema.Mutation,
creator.users.schema.Mutation,
creator.referral_tokens.schema.Mutation,
creator.status.schema.Mutation,
creator.releases.schema.Mutation,
creator.data_reviews.schema.Mutation,
creator.ingest_runs.schema.Mutation,
creator.organizations.schema.Mutation,
creator.data_templates.schema.Mutation,
graphene.ObjectType,
):
""" Root mutation schema combining all apps' schemas """
pass
schema = graphene.Schema(query=Query, mutation=Mutation)
| 0
| 0
| 0
|
eebaa5aaa5d495d9ab50fc4d5c37d590c86b3096
| 9,624
|
py
|
Python
|
simulation/simulation.py
|
bopopescu/sparrow-mod
|
56c601ee3dd852a9f053bffffc2a52ff3da8d2bd
|
[
"Apache-2.0"
] | 200
|
2015-01-05T07:37:20.000Z
|
2022-03-30T03:28:21.000Z
|
simulation/simulation.py
|
bopopescu/sparrow-mod
|
56c601ee3dd852a9f053bffffc2a52ff3da8d2bd
|
[
"Apache-2.0"
] | 1
|
2016-05-13T10:46:32.000Z
|
2016-05-13T10:46:32.000Z
|
simulation/simulation.py
|
bopopescu/sparrow-mod
|
56c601ee3dd852a9f053bffffc2a52ff3da8d2bd
|
[
"Apache-2.0"
] | 73
|
2015-01-06T02:00:17.000Z
|
2021-11-22T10:04:03.000Z
|
#
# Copyright 2013 The Regents of The University California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import math
import numpy
import random
from util import Job, TaskDistributions
import Queue
MEDIAN_TASK_DURATION = 100
NETWORK_DELAY = 0
TASKS_PER_JOB = 500
SLOTS_PER_WORKER = 4
TOTAL_WORKERS = 10000
PROBE_RATIO = 2
class Event(object):
""" Abstract class representing events. """
def run(self, current_time):
""" Returns any events that should be added to the queue. """
raise NotImplementedError("The run() method must be implemented by "
"each class subclassing Event")
class JobArrival(Event):
""" Event to signify a job arriving at a scheduler. """
class ProbeEvent(Event):
""" Event to signify a probe arriving at a worker. """
class NoopGetTaskResponseEvent(Event):
""" Signifies when a getTask() RPC response arrives at a worker, with a noop response. """
if __name__ == "__main__":
main()
| 40.779661
| 115
| 0.641209
|
#
# Copyright 2013 The Regents of The University California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import math
import numpy
import random
from util import Job, TaskDistributions
import Queue
MEDIAN_TASK_DURATION = 100
NETWORK_DELAY = 0
TASKS_PER_JOB = 500
SLOTS_PER_WORKER = 4
TOTAL_WORKERS = 10000
PROBE_RATIO = 2
def get_percentile(N, percent, key=lambda x:x):
if not N:
return 0
k = (len(N) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c-k)
d1 = key(N[int(c)]) * (k-f)
return d0 + d1
def plot_cdf(values, filename):
values.sort()
f = open(filename, "w")
for percent in range(100):
fraction = percent / 100.
f.write("%s\t%s\n" % (fraction, get_percentile(values, fraction)))
f.close()
class Event(object):
""" Abstract class representing events. """
def __init__(self):
raise NotImplementedError("Event is an abstract class and cannot be "
"instantiated directly")
def run(self, current_time):
""" Returns any events that should be added to the queue. """
raise NotImplementedError("The run() method must be implemented by "
"each class subclassing Event")
class JobArrival(Event):
""" Event to signify a job arriving at a scheduler. """
def __init__(self, simulation, interarrival_delay, task_distribution):
self.simulation = simulation
self.interarrival_delay = interarrival_delay
self.task_distribution= task_distribution
def run(self, current_time):
job = Job(TASKS_PER_JOB, current_time, self.task_distribution, MEDIAN_TASK_DURATION)
logging.getLogger("sim").debug("Job %s arrived at %s" % (job.id, current_time))
# Schedule job.
new_events = self.simulation.send_probes(job, current_time)
# Add new Job Arrival event, for the next job to arrive after this one.
arrival_delay = random.expovariate(1.0 / self.interarrival_delay)
new_events.append((current_time + arrival_delay, self))
logging.getLogger("sim").debug("Retuning %s events" % len(new_events))
return new_events
class ProbeEvent(Event):
""" Event to signify a probe arriving at a worker. """
def __init__(self, worker, job_id):
self.worker = worker
self.job_id = job_id
def run(self, current_time):
logging.getLogger("sim").debug("Probe for job %s arrived at worker %s at %s" %
(self.job_id, self.worker.id, current_time))
return self.worker.add_probe(self.job_id, current_time)
class NoopGetTaskResponseEvent(Event):
""" Signifies when a getTask() RPC response arrives at a worker, with a noop response. """
def __init__(self, worker):
self.worker = worker
def run(self, current_time):
logging.getLogger("sim").debug("getTask() request for worker %s returned no task at %s" %
(self.worker.id, current_time))
return self.worker.free_slot(current_time)
class TaskEndEvent():
def __init__(self, worker):
self.worker = worker
def run(self, current_time):
return self.worker.free_slot(current_time)
class Worker(object):
def __init__(self, simulation, num_slots, id):
self.simulation = simulation
self.free_slots = num_slots
# Just a list of job ids!
self.queued_probes = Queue.Queue()
self.id = id
self.probes_replied_to_immediately = 0
def add_probe(self, job_id, current_time):
self.queued_probes.put(job_id)
new_events = self.maybe_get_task(current_time)
self.probes_replied_to_immediately += len(new_events)
logging.getLogger("sim").debug("Worker %s: %s" %
(self.id, self.probes_replied_to_immediately))
return new_events
def free_slot(self, current_time):
""" Frees a slot on the worker and attempts to launch another task in that slot. """
self.free_slots += 1
get_task_events = self.maybe_get_task(current_time)
return get_task_events
def maybe_get_task(self, current_time):
if not self.queued_probes.empty() and self.free_slots > 0:
# Account for "running" task
self.free_slots -= 1
job_id = self.queued_probes.get()
task_duration = self.simulation.get_task(job_id)
probe_response_time = current_time + 2*NETWORK_DELAY
if task_duration > 0:
task_end_time = probe_response_time + task_duration
logging.getLogger("sim").debug(("Task for job %s running on worker %s (get task at: %s, duration: "
"%s, end: %s)") %
(job_id, self.id, current_time, task_duration, task_end_time))
self.simulation.add_task_completion_time(job_id, task_end_time)
new_event = TaskEndEvent(self)
return [(task_end_time, new_event)]
else:
# There was no task left for the job, so send another probe
# after 1RTT.
logging.getLogger("sim").debug("Noop returning on worker %s at %s" %
(self.id, probe_response_time))
return [(probe_response_time, NoopGetTaskResponseEvent(self))]
return []
class Simulation(object):
def __init__(self, num_jobs, file_prefix, load, task_distribution):
avg_used_slots = load * SLOTS_PER_WORKER * TOTAL_WORKERS
self.interarrival_delay = (1.0 * MEDIAN_TASK_DURATION * TASKS_PER_JOB / avg_used_slots)
print ("Interarrival delay: %s (avg slots in use: %s)" %
(self.interarrival_delay, avg_used_slots))
self.jobs = {}
self.remaining_jobs = num_jobs
self.event_queue = Queue.PriorityQueue()
self.workers = []
self.file_prefix = file_prefix
while len(self.workers) < TOTAL_WORKERS:
self.workers.append(Worker(self, SLOTS_PER_WORKER, len(self.workers)))
self.worker_indices = range(TOTAL_WORKERS)
self.task_distribution = task_distribution
def send_probes(self, job, current_time):
""" Send probes to acquire load information, in order to schedule a job. """
self.jobs[job.id] = job
random.shuffle(self.worker_indices)
probe_events = []
num_probes = PROBE_RATIO * len(job.unscheduled_tasks)
for worker_index in self.worker_indices[:num_probes]:
probe_events.append((current_time + NETWORK_DELAY,
ProbeEvent(self.workers[worker_index], job.id)))
return probe_events
def get_task(self, job_id):
job = self.jobs[job_id]
if len(job.unscheduled_tasks) > 0:
task_duration = job.unscheduled_tasks[0]
job.unscheduled_tasks = job.unscheduled_tasks[1:]
return task_duration
return -1
def add_task_completion_time(self, job_id, completion_time):
job_complete = self.jobs[job_id].task_completed(completion_time)
if job_complete:
self.remaining_jobs -= 1
logging.getLogger("sim").debug("Job %s completed in %s" %
(job_id, self.jobs[job_id].end_time - self.jobs[job_id].start_time))
def run(self):
self.event_queue.put((0,
JobArrival(self, self.interarrival_delay, self.task_distribution)))
last_time = 0
while self.remaining_jobs > 0:
current_time, event = self.event_queue.get()
assert current_time >= last_time
last_time = current_time
new_events = event.run(current_time)
for new_event in new_events:
self.event_queue.put(new_event)
print ("Simulation ended after %s milliseconds (%s jobs started)" %
(last_time, len(self.jobs)))
complete_jobs = [j for j in self.jobs.values() if j.completed_tasks_count == j.num_tasks]
print "%s complete jobs" % len(complete_jobs)
response_times = [job.end_time - job.start_time for job in complete_jobs
if job.start_time > 500]
print "Included %s jobs" % len(response_times)
plot_cdf(response_times, "%s_response_times.data" % self.file_prefix)
print "Average response time: ", numpy.mean(response_times)
longest_tasks = [job.longest_task for job in complete_jobs]
plot_cdf(longest_tasks, "%s_ideal_response_time.data" % self.file_prefix)
tasks_replied_to_immediately = sum([w.probes_replied_to_immediately for w in self.workers])
print "Tasks replied to immeiately: ", tasks_replied_to_immediately
return response_times
def main():
random.seed(1)
logging.basicConfig(level=logging.INFO)
sim = Simulation(1000, "sparrow", 0.95, TaskDistributions.CONSTANT)
sim.run()
if __name__ == "__main__":
main()
| 6,768
| 976
| 376
|
eb0cc8b93b8223d65f24aaccba78c888502d04df
| 892
|
py
|
Python
|
2015/MAC0327/Desafios 2/p11.py
|
andredalton/bcc
|
188190e436615e2344d87b722856fa02e6eec9cc
|
[
"Apache-2.0"
] | 1
|
2018-08-02T14:09:26.000Z
|
2018-08-02T14:09:26.000Z
|
2015/MAC0327/Desafios 2/p11.py
|
andredalton/bcc
|
188190e436615e2344d87b722856fa02e6eec9cc
|
[
"Apache-2.0"
] | null | null | null |
2015/MAC0327/Desafios 2/p11.py
|
andredalton/bcc
|
188190e436615e2344d87b722856fa02e6eec9cc
|
[
"Apache-2.0"
] | 1
|
2020-07-13T04:27:02.000Z
|
2020-07-13T04:27:02.000Z
|
# coding=utf-8
__author__ = 'André Meneghelli'
"""
/*******************************************************************************
* Aluno: André Meneghelli Vale, Núm. USP: 4898948
* Curso: Bacharelado em Ciências da Computação
* Aula 13 - Stone Pile
* MAC0327 -- IME/USP, -- Prof. Cristina Gomes Fernandes
******************************************************************************/
"""
pedras = []
if __name__ == '__main__':
main()
| 22.3
| 80
| 0.48991
|
# coding=utf-8
__author__ = 'André Meneghelli'
"""
/*******************************************************************************
* Aluno: André Meneghelli Vale, Núm. USP: 4898948
* Curso: Bacharelado em Ciências da Computação
* Aula 13 - Stone Pile
* MAC0327 -- IME/USP, -- Prof. Cristina Gomes Fernandes
******************************************************************************/
"""
pedras = []
def procura(s1, s2, index):
global pedras
if index == -1:
return abs(s1-s2)
sa = procura(s1 + pedras[index], s2, index-1)
sb = procura(s1, s2 + pedras[index], index-1)
if sa < sb:
return sa
return sb
def main():
global pedras
s1 = 0
raw_input()
pedras = map(int, raw_input().split())
pedras.sort()
s2 = pedras[len(pedras)-1]
print procura(s1, s2, len(pedras)-2)
if __name__ == '__main__':
main()
| 389
| 0
| 46
|
cd67696b0ec1ee40fb689af2c3c02ad3ecc6be4e
| 5,014
|
py
|
Python
|
model.py
|
abhitrip/Behavioral-Cloning
|
9930dc7fc2e6623954f84859b7d011905cd48d30
|
[
"MIT"
] | null | null | null |
model.py
|
abhitrip/Behavioral-Cloning
|
9930dc7fc2e6623954f84859b7d011905cd48d30
|
[
"MIT"
] | null | null | null |
model.py
|
abhitrip/Behavioral-Cloning
|
9930dc7fc2e6623954f84859b7d011905cd48d30
|
[
"MIT"
] | null | null | null |
import csv
import matplotlib.image as mpimg
import pickle
import numpy as np
from keras.models import Sequential
from keras.layers.core import Flatten,Lambda,Dense
from keras.layers.convolutional import Cropping2D,Conv2D
from keras import backend as K
from keras.layers.core import Activation
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
"""
To show the preprocessing for final model
"""
def read_data_gen(batch_size):
"""
Generator function to load driving logs and input images.
"""
while 1:
with open('data/driving_log.csv') as driving_log_file:
reader = csv.DictReader(driving_log_file)
count = 0
inputs, targets = [], []
try:
for row in reader:
center_img = mpimg.imread('data/'+ row['center'].strip())
flipped_center_img = np.fliplr(center_img)
center_steering = float(row['steering'])
if count < batch_size//2:
inputs += [center_img, flipped_center_img]
targets += [center_steering, -center_steering]
count += 1
else:
yield np.array(inputs, dtype=center_img.dtype), np.array(targets)
count = 0
inputs, targets= [], []
except StopIteration:
pass
batch_size = 128
# define model
"""
model = nvidia_model()
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
model.summary()
# train model
model.fit_generator(read_data_gen(batch_size), samples_per_epoch=8000*2, nb_epoch=5)
model.save('model.h5')
"""
if __name__=="__main__":
train_model()
| 25.451777
| 89
| 0.632429
|
import csv
import matplotlib.image as mpimg
import pickle
import numpy as np
from keras.models import Sequential
from keras.layers.core import Flatten,Lambda,Dense
from keras.layers.convolutional import Cropping2D,Conv2D
from keras import backend as K
from keras.layers.core import Activation
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
def resize(image):
import tensorflow as tf
resized = tf.image.resize_images(image,(32,32))
return resized
def resize_nvidia(image):
import tensorflow as tf
resized = tf.image.resize_images(image,(66,200))
return resized
"""
To show the preprocessing for final model
"""
def process_image(file_name,nvidia_or_final):
if nvidia_or_final=='nvidia':
crop_top, crop_bot = 70, 25
new_shape = (66,200)
elif nvidia_or_final=='final':
crop_top, crop_bot = 80, 48
new_shape = (32,32)
img = mpimg.imread(file_name)
h = img.shape[0]
cropped_img = img[crop_top:h-crop_bot,:,:]
plt.imshow(cropped_img)
plt.savefig("cropped_img")
resized_image = cv2.resize(cropped_img,new_shape)
plt.imshow(resized_image)
plt.savefig("resized_img")
plt.imshow(np.fliplr(resized_image))
plt.savefig("flipped_img")
def read_data_gen(batch_size):
"""
Generator function to load driving logs and input images.
"""
while 1:
with open('data/driving_log.csv') as driving_log_file:
reader = csv.DictReader(driving_log_file)
count = 0
inputs, targets = [], []
try:
for row in reader:
center_img = mpimg.imread('data/'+ row['center'].strip())
flipped_center_img = np.fliplr(center_img)
center_steering = float(row['steering'])
if count < batch_size//2:
inputs += [center_img, flipped_center_img]
targets += [center_steering, -center_steering]
count += 1
else:
yield np.array(inputs, dtype=center_img.dtype), np.array(targets)
count = 0
inputs, targets= [], []
except StopIteration:
pass
batch_size = 128
# define model
def final_model():
# define model
model = Sequential()
# crop top and bottom parts of the image
model.add(Cropping2D(cropping=((80, 48), (0, 0)), input_shape=(160, 320, 3)))
# resize image to 32x32
model.add(Lambda(resize,output_shape=(32, 32, 3)))
# normalize layer values
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
# Model colour information
model.add(Conv2D(3, 1, 1, border_mode='valid', subsample=(1, 1), activation='elu'))
# Conv filter 1
model.add(Conv2D(3, 3, 3, border_mode='valid', activation='elu'))
# Conv filter 2
model.add(Conv2D(6, 5, 5, border_mode='valid', subsample=(2, 2), activation='elu'))
# conv filter 3
model.add(Conv2D(16, 5, 5, border_mode='valid', subsample=(2, 2), activation='elu'))
# flatten
model.add(Flatten())
# Dense layer 1
model.add(Dense(100, activation='elu'))
# Dense layer 2
model.add(Dense(25, activation='elu'))
# Final Dense for prediction of steering
model.add(Dense(1))
return model
def nvidia_model():
model = Sequential()
# Preprocessing
model.add(Lambda(lambda x: x/127.5 -1.0,input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
#model.add(Lambda(resize_nvidia,output_shape=(32, 32, 3)))
# 1st Conv Layer
model.add(Conv2D(24,5,5,subsample=(2,2)))
model.add(Activation('elu'))
# 2nd Conv Layer
model.add(Conv2D(36,5,5,subsample=(2,2)))
model.add(Activation('elu'))
# 3rd Conv Layer
model.add(Conv2D(48,5,5,subsample=(2,2)))
model.add(Activation('elu'))
# 4th Conv Layer
model.add(Conv2D(64,3,3))
model.add(Activation('elu'))
# 5th Conv Layer
model.add(Conv2D(64,3,3))
model.add(Activation('elu'))
# Flatten
model.add(Flatten())
model.add(Dense(100))
model.add(Activation('elu'))
model.add(Dense(50))
model.add(Activation('elu'))
model.add(Dense(10))
model.add(Activation('elu'))
model.add(Dense(1))
return model
"""
model = nvidia_model()
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
model.summary()
# train model
model.fit_generator(read_data_gen(batch_size), samples_per_epoch=8000*2, nb_epoch=5)
model.save('model.h5')
"""
def gen_preprocess_images():
image = 'data/IMG/center_2016_12_01_13_31_13_177.jpg'
process_image(image,'final')
def train_model():
model = final_model()
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
model.summary()
model.fit_generator(read_data_gen(batch_size), samples_per_epoch=8000*2, nb_epoch=5)
model.save('model.h5')
if __name__=="__main__":
train_model()
| 3,085
| 0
| 158
|
43ba3750ab55b89ed9e0505f5404d4b28171dd33
| 1,647
|
py
|
Python
|
src/downward/experiments/issue739/v5-translate.py
|
ScarfZapdos/conan-bge-questgen
|
4d184c5bf0ae4b768b8043cec586395df9ce1451
|
[
"MIT"
] | 1
|
2021-09-09T13:03:02.000Z
|
2021-09-09T13:03:02.000Z
|
src/downward/experiments/issue739/v5-translate.py
|
ScarfZapdos/conan-bge-questgen
|
4d184c5bf0ae4b768b8043cec586395df9ce1451
|
[
"MIT"
] | null | null | null |
src/downward/experiments/issue739/v5-translate.py
|
ScarfZapdos/conan-bge-questgen
|
4d184c5bf0ae4b768b8043cec586395df9ce1451
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue739-v5"]
CONFIGS = [
IssueConfig('translate', [], driver_options=['--translate']),
IssueConfig('translate-with-options', ['--translate-options', '--keep-unreachable-facts', '--keep-unimportant-variables', '--full-encoding'], driver_options=['--translate']),
IssueConfig('translate-time-limit', [], driver_options=['--translate-time-limit', '5s', '--translate']),
IssueConfig('translate-memory-limit', [], driver_options=['--translate-memory-limit', '100M', '--translate']),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl']
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.LAB_STATIC_PROPERTIES_PARSER)
exp.add_parser(exp.LAB_DRIVER_PARSER)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['translator_*', 'error'])
exp.run_steps()
| 35.042553
| 178
| 0.756527
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue739-v5"]
CONFIGS = [
IssueConfig('translate', [], driver_options=['--translate']),
IssueConfig('translate-with-options', ['--translate-options', '--keep-unreachable-facts', '--keep-unimportant-variables', '--full-encoding'], driver_options=['--translate']),
IssueConfig('translate-time-limit', [], driver_options=['--translate-time-limit', '5s', '--translate']),
IssueConfig('translate-memory-limit', [], driver_options=['--translate-memory-limit', '100M', '--translate']),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="silvan.sievers@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl']
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.LAB_STATIC_PROPERTIES_PARSER)
exp.add_parser(exp.LAB_DRIVER_PARSER)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['translator_*', 'error'])
exp.run_steps()
| 0
| 0
| 0
|
6ba0b5003d4c97df676dbfc10dff603b15cd48d9
| 506
|
py
|
Python
|
mplscience/__init__.py
|
adamgayoso/mpscience
|
0401ded920a4d09314e9a747cf4da07d17a60a05
|
[
"MIT"
] | 4
|
2021-07-15T16:55:24.000Z
|
2022-03-04T23:10:02.000Z
|
mplscience/__init__.py
|
adamgayoso/mpscience
|
0401ded920a4d09314e9a747cf4da07d17a60a05
|
[
"MIT"
] | null | null | null |
mplscience/__init__.py
|
adamgayoso/mpscience
|
0401ded920a4d09314e9a747cf4da07d17a60a05
|
[
"MIT"
] | null | null | null |
"""Matplotlib science style"""
from .core import available_styles, set_style, style_context
# https://github.com/python-poetry/poetry/pull/2366#issuecomment-652418094
# https://github.com/python-poetry/poetry/issues/144#issuecomment-623927302
try:
import importlib.metadata as importlib_metadata
except ModuleNotFoundError:
import importlib_metadata
package_name = "mplscience"
__version__ = importlib_metadata.version(package_name)
__all__ = ["available_styles", "set_style", "style_context"]
| 31.625
| 75
| 0.804348
|
"""Matplotlib science style"""
from .core import available_styles, set_style, style_context
# https://github.com/python-poetry/poetry/pull/2366#issuecomment-652418094
# https://github.com/python-poetry/poetry/issues/144#issuecomment-623927302
try:
import importlib.metadata as importlib_metadata
except ModuleNotFoundError:
import importlib_metadata
package_name = "mplscience"
__version__ = importlib_metadata.version(package_name)
__all__ = ["available_styles", "set_style", "style_context"]
| 0
| 0
| 0
|
3a06da5ff6c0053e4dc72e9a222d828921a7534c
| 4,153
|
py
|
Python
|
template/templates.py
|
dkratzert/FinalCif
|
07ca23dbb4e7439b108a906521a118cdb876d97e
|
[
"Beerware"
] | 13
|
2020-01-14T16:23:48.000Z
|
2022-02-16T18:02:08.000Z
|
template/templates.py
|
dkratzert/FinalCif
|
07ca23dbb4e7439b108a906521a118cdb876d97e
|
[
"Beerware"
] | 24
|
2021-04-21T05:30:42.000Z
|
2022-03-31T20:07:29.000Z
|
template/templates.py
|
dkratzert/FinalCif
|
07ca23dbb4e7439b108a906521a118cdb876d97e
|
[
"Beerware"
] | 1
|
2021-08-09T16:48:33.000Z
|
2021-08-09T16:48:33.000Z
|
from contextlib import suppress
from pathlib import Path
from typing import List
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QFileDialog, QListWidgetItem
with suppress(ImportError):
from appwindow import AppWindow
from tools.settings import FinalCifSettings
class ReportTemplates:
"""
Displays the list of report templates in the options menu.
"""
| 44.180851
| 120
| 0.666265
|
from contextlib import suppress
from pathlib import Path
from typing import List
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QFileDialog, QListWidgetItem
with suppress(ImportError):
from appwindow import AppWindow
from tools.settings import FinalCifSettings
class ReportTemplates:
"""
Displays the list of report templates in the options menu.
"""
def __init__(self, app: 'AppWindow', settings: FinalCifSettings):
self.app = app
self.settings = settings
self.lw = self.app.ui.TemplatesListWidget
self.load_templates_list()
self.app.ui.AddNewTemplPushButton.clicked.connect(self.add_new_template)
self.app.ui.RemoveTemplPushButton.clicked.connect(self.remove_current_template)
self.app.ui.TemplatesListWidget.currentItemChanged.connect(self.template_changed)
self.app.ui.TemplatesListWidget.itemChanged.connect(self.template_changed)
self.app.ui.TemplatesListWidget.setCurrentItem(
self.app.ui.TemplatesListWidget.item(self.app.options.current_template))
def add_new_template(self, templ_path: str = '') -> None:
if not templ_path:
templ_path, _ = QFileDialog.getOpenFileName(filter="DOCX file (*.docx)", initialFilter="DOCX file (*.docx)",
caption='Open a Report Template File')
itemslist = self.get_templates_list_from_widget()
self.app.status_bar.show_message('')
if templ_path in itemslist:
self.app.status_bar.show_message('This templates is already in the list.', 10)
print('This templates is already in the list.')
return
if not Path(templ_path).exists() or not Path(templ_path).is_file() \
or not Path(templ_path).name.endswith('.docx'):
self.app.status_bar.show_message('This template does not exist or is unreadable.', 10)
print('This template does not exist or is unreadable.', Path(templ_path).resolve())
return
item = QListWidgetItem(templ_path)
item.setCheckState(Qt.Unchecked)
self.app.ui.TemplatesListWidget.addItem(item)
self.settings.save_template_list('report_templates_list', self.get_templates_list_from_widget())
def load_templates_list(self):
templates = self.settings.load_template('report_templates_list')
if not templates:
return
for text in templates:
if text.startswith('Use'):
continue
with suppress(Exception):
if not Path(text).exists():
item = QListWidgetItem(text)
item.setForeground(QColor(220, 12, 34))
else:
item = QListWidgetItem(str(Path(text).resolve(strict=True)))
self.app.ui.TemplatesListWidget.addItem(item)
item.setCheckState(Qt.Unchecked)
def get_templates_list_from_widget(self) -> List:
itemslist = []
for num in range(self.lw.count()):
itemtext = self.lw.item(num).text()
if not itemtext in itemslist:
itemslist.append(itemtext)
return itemslist
def remove_current_template(self) -> None:
if self.lw.currentRow() == 0:
return
self.lw.takeItem(self.lw.row(self.lw.currentItem()))
self.settings.save_template_list('report_templates_list', self.get_templates_list_from_widget())
def template_changed(self, current_item: QListWidgetItem):
# Blocking signal in order to avoid infinitive recursion:
self.app.ui.TemplatesListWidget.blockSignals(True)
options = self.settings.load_options()
options.update({'current_report_template': self.lw.row(current_item)})
self.uncheck_all_templates()
current_item.setCheckState(Qt.Checked)
self.settings.save_options(options)
self.app.ui.TemplatesListWidget.blockSignals(False)
def uncheck_all_templates(self):
for num in range(self.lw.count()):
self.lw.item(num).setCheckState(Qt.Unchecked)
| 3,553
| 0
| 189
|
10d8540be113dbd61336aecade22ee5448341df6
| 2,648
|
py
|
Python
|
guillotina_cms/api/search.py
|
alteroo/guillotina_cms
|
a8ea0efd2ad4f4ab9fab484fe55f41abd37cdac8
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina_cms/api/search.py
|
alteroo/guillotina_cms
|
a8ea0efd2ad4f4ab9fab484fe55f41abd37cdac8
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina_cms/api/search.py
|
alteroo/guillotina_cms
|
a8ea0efd2ad4f4ab9fab484fe55f41abd37cdac8
|
[
"BSD-2-Clause"
] | null | null | null |
from guillotina import configure
from guillotina.catalog.utils import parse_query
from guillotina.interfaces import IResource
from guillotina.utils import find_container
from guillotina_cms.utils import get_search_utility
import itertools
from collections import Counter
@configure.service(
context=IResource, method='GET', permission='guillotina.AccessContent', name='@search',
summary='Make search request',
responses={
"200": {
"description": "Search results",
"type": "object",
"schema": {
"$ref": "#/definitions/SearchResults"
}
}
})
@configure.service(
context=IResource, method='GET', permission='guillotina.AccessContent', name='@suggestion',
summary='Make search request',
responses={
"200": {
"description": "Search results",
"type": "object",
"schema": {
"$ref": "#/definitions/SearchResults"
}
}
})
| 28.782609
| 95
| 0.574018
|
from guillotina import configure
from guillotina.catalog.utils import parse_query
from guillotina.interfaces import IResource
from guillotina.utils import find_container
from guillotina_cms.utils import get_search_utility
import itertools
from collections import Counter
@configure.service(
context=IResource, method='GET', permission='guillotina.AccessContent', name='@search',
summary='Make search request',
responses={
"200": {
"description": "Search results",
"type": "object",
"schema": {
"$ref": "#/definitions/SearchResults"
}
}
})
async def search_get(context, request):
query = request.query.copy()
search = get_search_utility(query)
if search is None:
return {
'@id': request.url,
'items': [],
'items_total': 0
}
parsed_query = parse_query(context, query, search)
container = find_container(context)
result = await search.search(container, parsed_query)
return {
'@id': request.url,
'items': result['member'],
'items_total': result['items_count'],
'batching': {
'from': parsed_query['_from'] or 0,
'size': parsed_query['size']
}
}
@configure.service(
context=IResource, method='GET', permission='guillotina.AccessContent', name='@suggestion',
summary='Make search request',
responses={
"200": {
"description": "Search results",
"type": "object",
"schema": {
"$ref": "#/definitions/SearchResults"
}
}
})
async def suggestion_get(context, request):
query = request.query.copy()
search = get_search_utility(query)
if search is None:
return {}
fields = request.query.get('_metadata', '').split(',')
result = await search.query_aggregation(context, query)
if 'member' in result:
aggregation = []
for field in fields:
aggregation.append([])
for items in result['member']:
for index, item in enumerate(items):
if isinstance(item, list):
aggregation[index].extend(item)
elif isinstance(item, str):
aggregation[index].append(item)
final_result = {}
for index, field in enumerate(fields):
elements = dict(Counter(aggregation[index]))
final_result[field] = {
"items": elements,
"total": len(elements)
}
return final_result
else:
return {}
| 1,595
| 0
| 44
|
a01a3506d1d2788ace0fce8ccd2fde5aaaddd30d
| 505
|
py
|
Python
|
modules/img2Vector.py
|
cycoe/class_robber
|
968e493481e1dfa06a806d6382413871a4856f31
|
[
"MIT"
] | 3
|
2018-09-19T14:55:43.000Z
|
2019-03-12T01:07:05.000Z
|
modules/img2Vector.py
|
cycoe/class_robber
|
968e493481e1dfa06a806d6382413871a4856f31
|
[
"MIT"
] | 1
|
2020-12-14T11:56:10.000Z
|
2020-12-14T12:31:37.000Z
|
modules/img2Vector.py
|
cycoe/class_robber
|
968e493481e1dfa06a806d6382413871a4856f31
|
[
"MIT"
] | null | null | null |
import os
from PIL import Image
| 24.047619
| 62
| 0.554455
|
import os
from PIL import Image
def img2Vector(imgPath):
fullVector = []
vectorList = []
img = Image.open(imgPath)
img = img.convert('RGBA')
pixdata = img.load()
for x in range(img.size[0]):
for y in range(img.size[1]):
if pixdata[x, y] == (0, 0, 153, 255):
fullVector.append(1)
else:
fullVector.append(0)
for i in range(4):
vectorList.append(fullVector[135+324*i:135+324*(i+1)])
return vectorList
| 449
| 0
| 23
|
6aa55d8d1800c23e9910fd1b02e8951e054760f8
| 4,126
|
py
|
Python
|
tests/test_random_walk.py
|
SwamyDev/gym-quickcheck
|
d271f509c11998c9f210c5d8131906f712553123
|
[
"MIT"
] | null | null | null |
tests/test_random_walk.py
|
SwamyDev/gym-quickcheck
|
d271f509c11998c9f210c5d8131906f712553123
|
[
"MIT"
] | null | null | null |
tests/test_random_walk.py
|
SwamyDev/gym-quickcheck
|
d271f509c11998c9f210c5d8131906f712553123
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
from gym import utils
from more_itertools import last
from gym_quickcheck.envs import RandomWalkEnv
from tests.aux import assert_that, follows_contract, assert_obs_eq, unpack_reward, unpack_obs, unpack_done, until_done, \
run_example
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.mark.parametrize('direction', [0, 1])
| 30.338235
| 121
| 0.741638
|
import numpy as np
import pytest
from gym import utils
from more_itertools import last
from gym_quickcheck.envs import RandomWalkEnv
from tests.aux import assert_that, follows_contract, assert_obs_eq, unpack_reward, unpack_obs, unpack_done, until_done, \
run_example
@pytest.fixture
def env():
return RandomWalkEnv()
@pytest.fixture
def obs_shape(env):
return env.observation_space.shape
@pytest.fixture
def make_observation(obs_shape, make_observation_of):
def obs_fac(agent_pos):
return make_observation_of(obs_shape, agent_pos)
return obs_fac
@pytest.fixture
def walk_len(obs_shape):
return obs_shape[0]
@pytest.fixture
def center(walk_len):
return walk_len // 2
@pytest.fixture
def steps_to_edge(walk_len, center):
return walk_len - center - 1
@pytest.fixture
def make_walk_string(walk_len):
def walk_fac(agent_pos, color='red'):
s = ['#'] * walk_len
s[agent_pos] = utils.colorize(s[agent_pos], color=color, highlight=True)
return "".join(s)
return walk_fac
def test_adherence_to_gym_contract(env, gym_interface, gym_properties):
assert_that(env, follows_contract(gym_interface, gym_properties))
def test_agent_starts_in_the_center(env, make_observation, center):
assert_obs_eq(env.reset(), make_observation(agent_pos=center))
def test_navigating_the_agent(env, make_observation, center):
actions = [1, 1, 0, 0, 0, 1]
offsets = [1, 2, 1, 0, -1, 0]
for a, o in zip(actions, offsets):
assert_obs_eq(unpack_obs(env.step(a)), make_observation(agent_pos=center + o))
def test_reset_moves_agent_back_to_center(env, make_observation, center):
env.step(0)
assert_obs_eq(env.reset(), make_observation(agent_pos=center))
def test_environment_does_not_finish_until_goal_or_max_length_is_reached(env):
assert all(not unpack_done(env.step(0)) for _ in range(env.max_len - 1))
@pytest.mark.parametrize('direction', [0, 1])
def test_reset_environment_is_not_done(env, direction):
all(until_done(env, direction))
env.reset()
assert not unpack_done(env.step(direction))
all(until_done(env, direction))
def test_environment_has_a_max_episode_len(env):
assert sum(1 for _ in until_done(env, 0)) == env.max_len
def test_finishes_when_reaching_right_most_edge(env, steps_to_edge):
assert last(unpack_done(env.step(1)) for _ in range(steps_to_edge))
def test_each_step_outside_of_goal_returns_a_penalty(env, walk_len, steps_to_edge):
assert all(unpack_reward(env.step(0)) == env.penalty for _ in range(steps_to_edge))
assert all(unpack_reward(env.step(1)) == env.penalty for _ in range(walk_len - 2))
def test_reaching_goal_on_the_right_returns_reward(env):
assert last(r for _, r, _, _ in until_done(env, 1)) == env.reward
def test_walking_right_achieves_maximum_reward(env):
assert sum(r for _, r, _, _ in until_done(env, 1)) == env.reward_range[1]
def test_walking_left_until_max_length_is_reached_achieves_minimum_reward(env):
assert sum(r for _, r, _, _ in until_done(env, 0)) == env.reward_range[0]
def test_on_average_random_agent_performs_poorly(env, sample_average_reward):
assert sample_average_reward(env, 1000) <= np.mean(env.reward_range)
def test_render_writes_current_state_to_stdout(env, make_walk_string, center, capstdout):
env.render()
assert capstdout.read() == "\n" + make_walk_string(agent_pos=center) + "\n"
env.step(0)
env.render()
assert capstdout.read() == "(Left)\n" + make_walk_string(agent_pos=center - 1) + "\n"
env.step(1)
env.render()
assert capstdout.read() == "(Right)\n" + make_walk_string(agent_pos=center) + "\n"
def test_render_agent_pos_in_green_when_reaching_goal(env, make_walk_string, walk_len, capstdout):
all(_ for _ in until_done(env, 1))
env.render()
assert capstdout.read() == "(Right)\n" + make_walk_string(agent_pos=walk_len - 1, color='green') + "\n"
def test_random_walk_example(request, capstdout):
example = request.session.fspath / "examples/random_walk.py"
lines = run_example(example)
assert "Observation: " in last(lines)
| 3,144
| 0
| 521
|
1815a1cfdf441bab8f5c07943254b362f00a655f
| 163
|
py
|
Python
|
celery/settings.py
|
alculquicondor/AmigoCloud-IGP-Sync
|
56de7e9137340054159289ef9c6534bb1b5872fc
|
[
"MIT"
] | null | null | null |
celery/settings.py
|
alculquicondor/AmigoCloud-IGP-Sync
|
56de7e9137340054159289ef9c6534bb1b5872fc
|
[
"MIT"
] | null | null | null |
celery/settings.py
|
alculquicondor/AmigoCloud-IGP-Sync
|
56de7e9137340054159289ef9c6534bb1b5872fc
|
[
"MIT"
] | null | null | null |
from os import environ
TOKEN = environ.get('AMIGOCLOUD_TOKEN')
BROKER_URL = environ.get('BROKER_URL')
PROJECT_URL = 'users/475/projects/13608'
DATASET_ID = 79746
| 23.285714
| 40
| 0.779141
|
from os import environ
TOKEN = environ.get('AMIGOCLOUD_TOKEN')
BROKER_URL = environ.get('BROKER_URL')
PROJECT_URL = 'users/475/projects/13608'
DATASET_ID = 79746
| 0
| 0
| 0
|
855c72651aff3902ac92bec1942941cff9cf4170
| 342
|
py
|
Python
|
scripts/twist_remapper.py
|
tamago117/kcctsim
|
0cd72c79ade6be48ad59fb9cfb202dcbe8de69cf
|
[
"Apache-2.0"
] | 1
|
2021-11-25T07:53:53.000Z
|
2021-11-25T07:53:53.000Z
|
scripts/twist_remapper.py
|
tamago117/kcctsim
|
0cd72c79ade6be48ad59fb9cfb202dcbe8de69cf
|
[
"Apache-2.0"
] | 1
|
2021-09-09T06:34:32.000Z
|
2021-11-02T11:49:00.000Z
|
scripts/twist_remapper.py
|
tamago117/kcctsim
|
0cd72c79ade6be48ad59fb9cfb202dcbe8de69cf
|
[
"Apache-2.0"
] | 2
|
2021-10-01T13:43:58.000Z
|
2021-11-25T07:53:54.000Z
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
pub = rospy.Publisher("/diff_drive_controller/cmd_vel", Twist, queue_size = 10)
if __name__ == '__main__':
rospy.init_node('twist_remapper', anonymous=True)
rospy.Subscriber("/cmd_vel", Twist, callback)
rospy.spin()
| 28.5
| 79
| 0.733918
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
pub = rospy.Publisher("/diff_drive_controller/cmd_vel", Twist, queue_size = 10)
def callback(data):
pub.publish(data)
if __name__ == '__main__':
rospy.init_node('twist_remapper', anonymous=True)
rospy.Subscriber("/cmd_vel", Twist, callback)
rospy.spin()
| 20
| 0
| 22
|
860f3238dfabe5abdc4b560671b0f41979c23fa1
| 48,472
|
py
|
Python
|
qiskit/visualization/matplotlib.py
|
quantumjim/qiskit-terra
|
5292f487eaa980986a1e5affae8c4fc50c743e71
|
[
"Apache-2.0"
] | 1
|
2019-12-09T08:25:14.000Z
|
2019-12-09T08:25:14.000Z
|
qiskit/visualization/matplotlib.py
|
quantumjim/qiskit-terra
|
5292f487eaa980986a1e5affae8c4fc50c743e71
|
[
"Apache-2.0"
] | 1
|
2020-03-29T19:57:14.000Z
|
2020-03-29T21:49:25.000Z
|
qiskit/visualization/matplotlib.py
|
quantumjim/qiskit-terra
|
5292f487eaa980986a1e5affae8c4fc50c743e71
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name,missing-docstring,inconsistent-return-statements
"""mpl circuit visualization backend."""
import collections
import fractions
import itertools
import json
import logging
import math
import numpy as np
try:
from matplotlib import get_backend
from matplotlib import patches
from matplotlib import pyplot as plt
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
from qiskit.circuit import ControlledGate
from qiskit.visualization import exceptions
from qiskit.visualization.qcstyle import DefaultStyle, BWStyle
from qiskit import user_config
from qiskit.circuit.tools.pi_check import pi_check
logger = logging.getLogger(__name__)
WID = 0.65
HIG = 0.65
DEFAULT_SCALE = 4.3
PORDER_GATE = 5
PORDER_LINE = 3
PORDER_REGLINE = 2
PORDER_GRAY = 3
PORDER_TEXT = 6
PORDER_SUBP = 4
| 41.894555
| 100
| 0.444875
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name,missing-docstring,inconsistent-return-statements
"""mpl circuit visualization backend."""
import collections
import fractions
import itertools
import json
import logging
import math
import numpy as np
try:
from matplotlib import get_backend
from matplotlib import patches
from matplotlib import pyplot as plt
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
from qiskit.circuit import ControlledGate
from qiskit.visualization import exceptions
from qiskit.visualization.qcstyle import DefaultStyle, BWStyle
from qiskit import user_config
from qiskit.circuit.tools.pi_check import pi_check
logger = logging.getLogger(__name__)
WID = 0.65
HIG = 0.65
DEFAULT_SCALE = 4.3
PORDER_GATE = 5
PORDER_LINE = 3
PORDER_REGLINE = 2
PORDER_GRAY = 3
PORDER_TEXT = 6
PORDER_SUBP = 4
class Anchor:
def __init__(self, reg_num, yind, fold):
self.__yind = yind
self.__fold = fold
self.__reg_num = reg_num
self.__gate_placed = []
self.gate_anchor = 0
def plot_coord(self, index, gate_width, x_offset):
h_pos = index % self.__fold + 1
# check folding
if self.__fold > 0:
if h_pos + (gate_width - 1) > self.__fold:
index += self.__fold - (h_pos - 1)
x_pos = index % self.__fold + 1 + 0.5 * (gate_width - 1)
y_pos = self.__yind - (index // self.__fold) * (self.__reg_num + 1)
else:
x_pos = index + 1 + 0.5 * (gate_width - 1)
y_pos = self.__yind
# could have been updated, so need to store
self.gate_anchor = index
return x_pos + x_offset, y_pos
def is_locatable(self, index, gate_width):
hold = [index + i for i in range(gate_width)]
for p in hold:
if p in self.__gate_placed:
return False
return True
def set_index(self, index, gate_width):
h_pos = index % self.__fold + 1
if h_pos + (gate_width - 1) > self.__fold:
_index = index + self.__fold - (h_pos - 1)
else:
_index = index
for ii in range(gate_width):
if _index + ii not in self.__gate_placed:
self.__gate_placed.append(_index + ii)
self.__gate_placed.sort()
def get_index(self):
if self.__gate_placed:
return self.__gate_placed[-1] + 1
return 0
class MatplotlibDrawer:
def __init__(self, qregs, cregs, ops,
scale=1.0, style=None, plot_barriers=True,
reverse_bits=False, layout=None, fold=25, ax=None):
if not HAS_MATPLOTLIB:
raise ImportError('The class MatplotlibDrawer needs matplotlib. '
'To install, run "pip install matplotlib".')
self._ast = None
self._scale = DEFAULT_SCALE * scale
self._creg = []
self._qreg = []
self._registers(cregs, qregs)
self._ops = ops
self._qreg_dict = collections.OrderedDict()
self._creg_dict = collections.OrderedDict()
self._cond = {
'n_lines': 0,
'xmax': 0,
'ymax': 0,
}
config = user_config.get_config()
if config and (style is None):
config_style = config.get('circuit_mpl_style', 'default')
if config_style == 'default':
self._style = DefaultStyle()
elif config_style == 'bw':
self._style = BWStyle()
elif style is False:
self._style = BWStyle()
else:
self._style = DefaultStyle()
self.plot_barriers = plot_barriers
self.reverse_bits = reverse_bits
self.layout = layout
if style:
if isinstance(style, dict):
self._style.set_style(style)
elif isinstance(style, str):
with open(style, 'r') as infile:
dic = json.load(infile)
self._style.set_style(dic)
if ax is None:
self.return_fig = True
self.figure = plt.figure()
self.figure.patch.set_facecolor(color=self._style.bg)
self.ax = self.figure.add_subplot(111)
else:
self.return_fig = False
self.ax = ax
self.figure = ax.get_figure()
self.fold = fold
if self.fold < 2:
self.fold = -1
self.ax.axis('off')
self.ax.set_aspect('equal')
self.ax.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
self.x_offset = 0
def _registers(self, creg, qreg):
self._creg = []
for r in creg:
self._creg.append(r)
self._qreg = []
for r in qreg:
self._qreg.append(r)
@property
def ast(self):
return self._ast
def _custom_multiqubit_gate(self, xy, cxy=None, fc=None, wide=True, text=None,
subtext=None):
xpos = min([x[0] for x in xy])
ypos = min([y[1] for y in xy])
ypos_max = max([y[1] for y in xy])
if cxy:
ypos = min([y[1] for y in cxy])
if wide:
if subtext:
boxes_length = round(max([len(text), len(subtext)]) / 7) or 1
else:
boxes_length = math.ceil(len(text) / 7) or 1
wid = WID * 2.5 * boxes_length
else:
wid = WID
if fc:
_fc = fc
else:
if self._style.name != 'bw':
if self._style.gc != DefaultStyle().gc:
_fc = self._style.gc
else:
_fc = self._style.dispcol['multi']
_ec = self._style.dispcol['multi']
else:
_fc = self._style.gc
qubit_span = abs(ypos) - abs(ypos_max) + 1
height = HIG + (qubit_span - 1)
box = patches.Rectangle(
xy=(xpos - 0.5 * wid, ypos - .5 * HIG),
width=wid, height=height,
fc=_fc,
ec=self._style.dispcol['multi'],
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
# Annotate inputs
for bit, y in enumerate([x[1] for x in xy]):
self.ax.text(xpos - 0.45 * wid, y, str(bit), ha='left', va='center',
fontsize=self._style.fs, color=self._style.gt,
clip_on=True, zorder=PORDER_TEXT)
if text:
disp_text = text
if subtext:
self.ax.text(xpos, ypos + 0.5 * height, disp_text, ha='center',
va='center', fontsize=self._style.fs,
color=self._style.gt, clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(xpos, ypos + 0.3 * height, subtext, ha='center',
va='center', fontsize=self._style.sfs,
color=self._style.sc, clip_on=True,
zorder=PORDER_TEXT)
else:
self.ax.text(xpos, ypos + .5 * (qubit_span - 1), disp_text,
ha='center',
va='center',
fontsize=self._style.fs,
color=self._style.gt,
clip_on=True,
zorder=PORDER_TEXT,
wrap=True)
def _gate(self, xy, fc=None, wide=False, text=None, subtext=None):
xpos, ypos = xy
if wide:
if subtext:
subtext_len = len(subtext)
if '$\\pi$' in subtext:
pi_count = subtext.count('pi')
subtext_len = subtext_len - (4 * pi_count)
boxes_wide = round(max(subtext_len, len(text)) / 10, 1) or 1
wid = WID * 1.5 * boxes_wide
else:
boxes_wide = round(len(text) / 10) or 1
wid = WID * 2.2 * boxes_wide
if wid < WID:
wid = WID
else:
wid = WID
if fc:
_fc = fc
elif self._style.gc != DefaultStyle().gc:
_fc = self._style.gc
elif text and text in self._style.dispcol:
_fc = self._style.dispcol[text]
else:
_fc = self._style.gc
box = patches.Rectangle(
xy=(xpos - 0.5 * wid, ypos - 0.5 * HIG), width=wid, height=HIG,
fc=_fc, ec=self._style.edge_color, linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
if text:
font_size = self._style.fs
sub_font_size = self._style.sfs
# check if gate is not unitary
if text in ['reset']:
disp_color = self._style.not_gate_lc
sub_color = self._style.not_gate_lc
font_size = self._style.math_fs
else:
disp_color = self._style.gt
sub_color = self._style.sc
if text in self._style.dispcol:
disp_text = "${}$".format(self._style.disptex[text])
else:
disp_text = text
if subtext:
self.ax.text(xpos, ypos + 0.15 * HIG, disp_text, ha='center',
va='center', fontsize=font_size,
color=disp_color, clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(xpos, ypos - 0.3 * HIG, subtext, ha='center',
va='center', fontsize=sub_font_size,
color=sub_color, clip_on=True,
zorder=PORDER_TEXT)
else:
self.ax.text(xpos, ypos, disp_text, ha='center', va='center',
fontsize=font_size,
color=disp_color,
clip_on=True,
zorder=PORDER_TEXT)
def _subtext(self, xy, text):
xpos, ypos = xy
self.ax.text(xpos, ypos - 0.3 * HIG, text, ha='center', va='top',
fontsize=self._style.sfs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _sidetext(self, xy, text):
xpos, ypos = xy
# 0.15 = the initial gap, each char means it needs to move
# another 0.0375 over
xp = xpos + 0.15 + (0.0375 * len(text))
self.ax.text(xp, ypos + HIG, text, ha='center', va='top',
fontsize=self._style.sfs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _line(self, xy0, xy1, lc=None, ls=None, zorder=PORDER_LINE):
x0, y0 = xy0
x1, y1 = xy1
if lc is None:
linecolor = self._style.lc
else:
linecolor = lc
if ls is None:
linestyle = 'solid'
else:
linestyle = ls
if linestyle == 'doublet':
theta = np.arctan2(np.abs(x1 - x0), np.abs(y1 - y0))
dx = 0.05 * WID * np.cos(theta)
dy = 0.05 * WID * np.sin(theta)
self.ax.plot([x0 + dx, x1 + dx], [y0 + dy, y1 + dy],
color=linecolor,
linewidth=2,
linestyle='solid',
zorder=zorder)
self.ax.plot([x0 - dx, x1 - dx], [y0 - dy, y1 - dy],
color=linecolor,
linewidth=2,
linestyle='solid',
zorder=zorder)
else:
self.ax.plot([x0, x1], [y0, y1],
color=linecolor,
linewidth=2,
linestyle=linestyle,
zorder=zorder)
def _measure(self, qxy, cxy, cid, basis='z'):
qx, qy = qxy
cx, cy = cxy
self._gate(qxy, fc=self._style.dispcol['meas'])
# add measure symbol
arc = patches.Arc(xy=(qx, qy - 0.15 * HIG), width=WID * 0.7,
height=HIG * 0.7, theta1=0, theta2=180, fill=False,
ec=self._style.not_gate_lc, linewidth=2,
zorder=PORDER_GATE)
self.ax.add_patch(arc)
self.ax.plot([qx, qx + 0.35 * WID],
[qy - 0.15 * HIG, qy + 0.20 * HIG],
color=self._style.not_gate_lc, linewidth=2, zorder=PORDER_GATE)
# arrow
self._line(qxy, [cx, cy + 0.35 * WID], lc=self._style.cc,
ls=self._style.cline)
arrowhead = patches.Polygon(((cx - 0.20 * WID, cy + 0.35 * WID),
(cx + 0.20 * WID, cy + 0.35 * WID),
(cx, cy)),
fc=self._style.cc,
ec=None)
self.ax.add_artist(arrowhead)
# target
if self._style.bundle:
self.ax.text(cx + .25, cy + .1, str(cid), ha='left', va='bottom',
fontsize=0.8 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
# measurement basis label
if basis != 'z':
self.ax.text(qx - 0.4 * WID, qy + 0.25 * HIG, basis.upper(),
color=self._style.not_gate_lc,
clip_on=True, zorder=PORDER_TEXT, fontsize=0.5 * self._style.fs,
fontweight='bold')
def _conds(self, xy, istrue=False):
xpos, ypos = xy
if istrue:
_fc = self._style.lc
else:
_fc = self._style.gc
box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,
fc=_fc, ec=self._style.lc,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
def _ctrl_qubit(self, xy, fc=None, ec=None):
if self._style.gc != DefaultStyle().gc:
fc = self._style.gc
ec = self._style.gc
if fc is None:
fc = self._style.lc
if ec is None:
ec = self._style.lc
xpos, ypos = xy
box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,
fc=fc, ec=ec,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
def set_multi_ctrl_bits(self, ctrl_state, num_ctrl_qubits, qbit, color_str):
# convert op.ctrl_state to bit string and reverse
cstate = "{0:b}".format(ctrl_state).rjust(num_ctrl_qubits, '0')[::-1]
for i in range(num_ctrl_qubits):
# Make facecolor of ctrl bit the box color if closed and bkgrnd if open
fc_open_close = (self._style.dispcol[color_str] if cstate[i] == '1'
else self._style.bg)
self._ctrl_qubit(qbit[i], fc=fc_open_close, ec=self._style.dispcol[color_str])
def _tgt_qubit(self, xy, fc=None, ec=None, ac=None,
add_width=None):
if self._style.gc != DefaultStyle().gc:
fc = self._style.gc
ec = self._style.gc
if fc is None:
fc = self._style.dispcol['target']
if ec is None:
ec = self._style.lc
if ac is None:
ac = self._style.lc
if add_width is None:
add_width = 0.35
linewidth = 2
if self._style.dispcol['target'] == '#ffffff':
add_width = self._style.colored_add_width
xpos, ypos = xy
box = patches.Circle(xy=(xpos, ypos), radius=HIG * 0.35,
fc=fc, ec=ec, linewidth=linewidth,
zorder=PORDER_GATE)
self.ax.add_patch(box)
# add '+' symbol
self.ax.plot([xpos, xpos], [ypos - add_width * HIG,
ypos + add_width * HIG],
color=ac, linewidth=linewidth, zorder=PORDER_GATE + 1)
self.ax.plot([xpos - add_width * HIG, xpos + add_width * HIG],
[ypos, ypos], color=ac, linewidth=linewidth,
zorder=PORDER_GATE + 1)
def _swap(self, xy, color):
xpos, ypos = xy
self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],
[ypos - 0.20 * WID, ypos + 0.20 * WID],
color=color, linewidth=2, zorder=PORDER_LINE + 1)
self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],
[ypos + 0.20 * WID, ypos - 0.20 * WID],
color=color, linewidth=2, zorder=PORDER_LINE + 1)
def _barrier(self, config):
xys = config['coord']
group = config['group']
y_reg = []
for qreg in self._qreg_dict.values():
if qreg['group'] in group:
y_reg.append(qreg['y'])
for xy in xys:
xpos, ypos = xy
self.ax.plot([xpos, xpos], [ypos + 0.5, ypos - 0.5],
linewidth=1, linestyle="dashed",
color=self._style.lc,
zorder=PORDER_TEXT)
box = patches.Rectangle(xy=(xpos - (0.3 * WID), ypos - 0.5),
width=0.6 * WID, height=1,
fc=self._style.bc, ec=None, alpha=0.6,
linewidth=1.5, zorder=PORDER_GRAY)
self.ax.add_patch(box)
def _linefeed_mark(self, xy):
xpos, ypos = xy
self.ax.plot([xpos - .1, xpos - .1],
[ypos, ypos - self._cond['n_lines'] + 1],
color=self._style.lc, zorder=PORDER_LINE)
self.ax.plot([xpos + .1, xpos + .1],
[ypos, ypos - self._cond['n_lines'] + 1],
color=self._style.lc, zorder=PORDER_LINE)
def draw(self, filename=None, verbose=False):
self._draw_regs()
self._draw_ops(verbose)
_xl = - self._style.margin[0]
_xr = self._cond['xmax'] + self._style.margin[1]
_yb = - self._cond['ymax'] - self._style.margin[2] + 1 - 0.5
_yt = self._style.margin[3] + 0.5
self.ax.set_xlim(_xl, _xr)
self.ax.set_ylim(_yb, _yt)
# update figure size
fig_w = _xr - _xl
fig_h = _yt - _yb
if self._style.figwidth < 0.0:
self._style.figwidth = fig_w * self._scale * self._style.fs / 72 / WID
self.figure.set_size_inches(self._style.figwidth, self._style.figwidth * fig_h / fig_w)
if filename:
self.figure.savefig(filename, dpi=self._style.dpi,
bbox_inches='tight')
if self.return_fig:
if get_backend() in ['module://ipykernel.pylab.backend_inline',
'nbAgg']:
plt.close(self.figure)
return self.figure
def _draw_regs(self):
def _fix_double_script(label):
words = label.split(' ')
words = [word.replace('_', r'\_') if word.count('_') > 1 else word
for word in words]
words = [word.replace('^', r'\^{\ }') if word.count('^') > 1 else word
for word in words]
return ' '.join(words)
len_longest_label = 0
# quantum register
for ii, reg in enumerate(self._qreg):
if len(self._qreg) > 1:
if self.layout is None:
label = '${{{name}}}_{{{index}}}$'.format(name=reg.register.name,
index=reg.index)
else:
label = '${{{name}}}_{{{index}}} \\mapsto {{{physical}}}$'.format(
name=self.layout[reg.index].register.name,
index=self.layout[reg.index].index,
physical=reg.index)
else:
label = '${name}$'.format(name=reg.register.name)
label = _fix_double_script(label)
if len(label) > len_longest_label:
len_longest_label = len(label)
pos = -ii
self._qreg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.register
}
self._cond['n_lines'] += 1
# classical register
if self._creg:
n_creg = self._creg.copy()
n_creg.pop(0)
idx = 0
y_off = -len(self._qreg)
for ii, (reg, nreg) in enumerate(itertools.zip_longest(
self._creg, n_creg)):
pos = y_off - idx
if self._style.bundle:
label = '${}$'.format(reg.register.name)
label = _fix_double_script(label)
self._creg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.register
}
if not (not nreg or reg.register != nreg.register):
continue
else:
label = '${}_{{{}}}$'.format(reg.register.name, reg.index)
label = _fix_double_script(label)
self._creg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.register
}
if len(label) > len_longest_label:
len_longest_label = len(label)
self._cond['n_lines'] += 1
idx += 1
# 7 is the length of the smallest possible label
self.x_offset = -.5 + 0.18 * (len_longest_label - 7)
def _draw_regs_sub(self, n_fold, feedline_l=False, feedline_r=False):
# quantum register
for qreg in self._qreg_dict.values():
if n_fold == 0:
label = qreg['label']
else:
label = qreg['label']
y = qreg['y'] - n_fold * (self._cond['n_lines'] + 1)
self.ax.text(self.x_offset - 0.2, y, label, ha='right', va='center',
fontsize=1.25 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self._line([self.x_offset + 0.2, y], [self._cond['xmax'], y],
zorder=PORDER_REGLINE)
# classical register
this_creg_dict = {}
for creg in self._creg_dict.values():
if n_fold == 0:
label = creg['label']
else:
label = creg['label']
y = creg['y'] - n_fold * (self._cond['n_lines'] + 1)
if y not in this_creg_dict.keys():
this_creg_dict[y] = {'val': 1, 'label': label}
else:
this_creg_dict[y]['val'] += 1
for y, this_creg in this_creg_dict.items():
# bundle
if this_creg['val'] > 1:
self.ax.plot([self.x_offset + 1.1, self.x_offset + 1.2], [y - .1, y + .1],
color=self._style.cc,
zorder=PORDER_LINE)
self.ax.text(self.x_offset + 1.0, y + .1, str(this_creg['val']), ha='left',
va='bottom',
fontsize=0.8 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(self.x_offset - 0.2, y, this_creg['label'], ha='right', va='center',
fontsize=1.5 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self._line([self.x_offset + 0.2, y], [self._cond['xmax'], y], lc=self._style.cc,
ls=self._style.cline, zorder=PORDER_REGLINE)
# lf line
if feedline_r:
self._linefeed_mark((self.fold + self.x_offset + 1 - 0.1,
- n_fold * (self._cond['n_lines'] + 1)))
if feedline_l:
self._linefeed_mark((self.x_offset + 0.3,
- n_fold * (self._cond['n_lines'] + 1)))
def _draw_ops(self, verbose=False):
_wide_gate = ['u2', 'u3', 'cu3', 'unitary', 'r', 'cu1', 'rzz']
_barriers = {'coord': [], 'group': []}
#
# generate coordinate manager
#
q_anchors = {}
for key, qreg in self._qreg_dict.items():
q_anchors[key] = Anchor(reg_num=self._cond['n_lines'],
yind=qreg['y'],
fold=self.fold)
c_anchors = {}
for key, creg in self._creg_dict.items():
c_anchors[key] = Anchor(reg_num=self._cond['n_lines'],
yind=creg['y'],
fold=self.fold)
#
# draw gates
#
prev_anc = -1
for layer in self._ops:
layer_width = 1
for op in layer:
# If one of the standard wide gates
if op.name in _wide_gate:
if layer_width < 2:
layer_width = 2
if op.type == 'op' and hasattr(op.op, 'params'):
param = self.param_parse(op.op.params)
if '$\\pi$' in param:
pi_count = param.count('pi')
len_param = len(param) - (4 * pi_count)
else:
len_param = len(param)
if len_param > len(op.name):
box_width = math.floor(len(param) / 10)
if op.name == 'unitary':
box_width = 2
# If more than 4 characters min width is 2
if box_width <= 1:
box_width = 2
if layer_width < box_width:
if box_width > 2:
layer_width = box_width
else:
layer_width = 2
continue
# If custom ControlledGate
elif isinstance(op.op, ControlledGate) and op.name not in [
'ccx', 'cx', 'c3x', 'c4x', 'cy', 'cz', 'ch', 'cu1',
'cu3', 'crz', 'cswap']:
if op.type == 'op' and hasattr(op.op, 'params'):
param = self.param_parse(op.op.params)
if '$\\pi$' in param:
pi_count = param.count('pi')
len_param = len(param) - (4 * pi_count)
else:
len_param = len(param)
if len_param > len(op.name):
box_width = math.floor(len_param / 5.5)
layer_width = box_width
continue
# if custom gate with a longer than standard name determine
# width
elif op.name not in ['barrier', 'snapshot', 'load', 'save',
'noise', 'cswap', 'swap', 'measure',
'measure_x', 'measure_y', 'measure_z'] and len(op.name) >= 4:
box_width = math.ceil(len(op.name) / 6)
# handle params/subtext longer than op names
if op.type == 'op' and hasattr(op.op, 'params'):
param = self.param_parse(op.op.params)
if '$\\pi$' in param:
pi_count = param.count('pi')
len_param = len(param) - (4 * pi_count)
else:
len_param = len(param)
if len_param > len(op.name):
box_width = math.floor(len(param) / 8)
# If more than 4 characters min width is 2
if box_width <= 1:
box_width = 2
if layer_width < box_width:
if box_width > 2:
layer_width = box_width * 2
else:
layer_width = 2
continue
# If more than 4 characters min width is 2
layer_width = math.ceil(box_width * WID * 2.5)
this_anc = prev_anc + 1
for op in layer:
_iswide = op.name in _wide_gate
if op.name not in ['barrier', 'snapshot', 'load', 'save',
'noise', 'cswap', 'swap', 'measure',
'measure_x', 'measure_y', 'measure_z',
'reset'] and len(op.name) >= 4:
_iswide = True
# get qreg index
q_idxs = []
for qarg in op.qargs:
for index, reg in self._qreg_dict.items():
if (reg['group'] == qarg.register and
reg['index'] == qarg.index):
q_idxs.append(index)
break
# get creg index
c_idxs = []
for carg in op.cargs:
for index, reg in self._creg_dict.items():
if (reg['group'] == carg.register and
reg['index'] == carg.index):
c_idxs.append(index)
break
# Only add the gate to the anchors if it is going to be plotted.
# This prevents additional blank wires at the end of the line if
# the last instruction is a barrier type
if self.plot_barriers or \
op.name not in ['barrier', 'snapshot', 'load', 'save',
'noise']:
for ii in q_idxs:
q_anchors[ii].set_index(this_anc, layer_width)
# qreg coordinate
q_xy = [q_anchors[ii].plot_coord(this_anc, layer_width, self.x_offset)
for ii in q_idxs]
# creg coordinate
c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width, self.x_offset)
for ii in c_idxs]
# bottom and top point of qreg
qreg_b = min(q_xy, key=lambda xy: xy[1])
qreg_t = max(q_xy, key=lambda xy: xy[1])
# update index based on the value from plotting
this_anc = q_anchors[q_idxs[0]].gate_anchor
if verbose:
print(op)
if op.type == 'op' and hasattr(op.op, 'params'):
param = self.param_parse(op.op.params)
else:
param = None
# conditional gate
if op.condition:
c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width, self.x_offset) for
ii in self._creg_dict]
mask = 0
for index, cbit in enumerate(self._creg):
if cbit.register == op.condition[0]:
mask |= (1 << index)
val = op.condition[1]
# cbit list to consider
fmt_c = '{{:0{}b}}'.format(len(c_xy))
cmask = list(fmt_c.format(mask))[::-1]
# value
fmt_v = '{{:0{}b}}'.format(cmask.count('1'))
vlist = list(fmt_v.format(val))[::-1]
# plot conditionals
v_ind = 0
xy_plot = []
for xy, m in zip(c_xy, cmask):
if m == '1':
if xy not in xy_plot:
if vlist[v_ind] == '1' or self._style.bundle:
self._conds(xy, istrue=True)
else:
self._conds(xy, istrue=False)
xy_plot.append(xy)
v_ind += 1
creg_b = sorted(xy_plot, key=lambda xy: xy[1])[0]
self._subtext(creg_b, hex(val))
self._line(qreg_t, creg_b, lc=self._style.cc,
ls=self._style.cline)
#
# draw special gates
#
if op.name[:7] == 'measure':
vv = self._creg_dict[c_idxs[0]]['index']
if len(op.name) == 9:
basis = op.name[-1]
else:
basis = 'z'
self._measure(q_xy[0], c_xy[0], vv, basis)
elif op.name in ['barrier', 'snapshot', 'load', 'save',
'noise']:
_barriers = {'coord': [], 'group': []}
for index, qbit in enumerate(q_idxs):
q_group = self._qreg_dict[qbit]['group']
if q_group not in _barriers['group']:
_barriers['group'].append(q_group)
_barriers['coord'].append(q_xy[index])
if self.plot_barriers:
self._barrier(_barriers)
elif op.name == 'initialize':
vec = '[%s]' % param
self._custom_multiqubit_gate(q_xy, wide=_iswide,
text=op.op.label or "|psi>",
subtext=vec)
elif op.name == 'unitary':
# TODO(mtreinish): Look into adding the unitary to the
# subtext
self._custom_multiqubit_gate(q_xy, wide=_iswide,
text=op.op.label or "Unitary")
elif isinstance(op.op, ControlledGate) and op.name not in [
'ccx', 'cx', 'c3x', 'c4x', 'cy', 'cz', 'ch', 'cu1', 'cu3', 'crz',
'cswap']:
disp = op.op.base_gate.name
num_ctrl_qubits = op.op.num_ctrl_qubits
num_qargs = len(q_xy) - num_ctrl_qubits
# set the ctrl qbits to open or closed
self.set_multi_ctrl_bits(op.op.ctrl_state, num_ctrl_qubits, q_xy, 'multi')
# add qubit-qubit wiring
self._line(qreg_b, qreg_t, lc=self._style.dispcol['multi'])
if num_qargs == 1:
if param:
self._gate(q_xy[num_ctrl_qubits], wide=_iswide,
text=disp,
fc=self._style.dispcol['multi'],
subtext='{}'.format(param))
else:
fcx = op.name if op.name in self._style.dispcol else 'multi'
self._gate(q_xy[num_ctrl_qubits], wide=_iswide, text=disp,
fc=self._style.dispcol[fcx])
else:
self._custom_multiqubit_gate(
q_xy[num_ctrl_qubits:], wide=_iswide, fc=self._style.dispcol['multi'],
text=disp)
#
# draw single qubit gates
#
elif len(q_xy) == 1:
disp = op.name
if param:
self._gate(q_xy[0], wide=_iswide, text=disp,
subtext=str(param))
else:
self._gate(q_xy[0], wide=_iswide, text=disp)
#
# draw multi-qubit gates (n=2)
#
elif len(q_xy) == 2:
# cx
if op.name == 'cx':
if self._style.dispcol['cx'] != '#ffffff':
add_width = self._style.colored_add_width
else:
add_width = None
num_ctrl_qubits = op.op.num_ctrl_qubits
# set the ctrl qbits to open or closed
self.set_multi_ctrl_bits(op.op.ctrl_state, num_ctrl_qubits, q_xy, 'cx')
if self._style.name != 'bw':
self._tgt_qubit(q_xy[1], fc=self._style.dispcol['cx'],
ec=self._style.dispcol['cx'],
ac=self._style.dispcol['target'],
add_width=add_width)
else:
self._tgt_qubit(q_xy[1], fc=self._style.dispcol['target'],
ec=self._style.dispcol['cx'],
ac=self._style.dispcol['cx'],
add_width=add_width)
# add qubit-qubit wiring
self._line(qreg_b, qreg_t, lc=self._style.dispcol['cx'])
# cz for latexmode
elif op.name == 'cz':
disp = op.name.replace('c', '')
if self._style.name != 'bw':
color = self._style.dispcol['cz']
self._ctrl_qubit(q_xy[0],
fc=color,
ec=color)
self._ctrl_qubit(q_xy[1],
fc=color,
ec=color)
else:
self._ctrl_qubit(q_xy[0])
self._ctrl_qubit(q_xy[1])
# add qubit-qubit wiring
if self._style.name != 'bw':
self._line(qreg_b, qreg_t,
lc=color)
else:
self._line(qreg_b, qreg_t, zorder=PORDER_LINE + 1)
# control gate
elif op.name in ['cy', 'ch', 'cu3', 'crz']:
disp = op.name.replace('c', '')
color = None
if self._style.name != 'bw':
if op.name == 'cy':
color = self._style.dispcol['cy']
else:
color = self._style.dispcol['multi']
self._ctrl_qubit(q_xy[0], fc=color, ec=color)
if param:
self._gate(q_xy[1], wide=_iswide,
text=disp,
fc=color,
subtext='{}'.format(param))
else:
self._gate(q_xy[1], wide=_iswide, text=disp,
fc=color)
# add qubit-qubit wiring
self._line(qreg_b, qreg_t, lc=color)
# rzz gate
elif op.name == 'rzz':
color = self._style.dispcol['multi']
self._ctrl_qubit(q_xy[0], fc=color, ec=color)
self._ctrl_qubit(q_xy[1], fc=color, ec=color)
self._sidetext(qreg_b, text='zz({})'.format(param))
# add qubit-qubit wiring
self._line(qreg_b, qreg_t, lc=color)
# cu1 gate
elif op.name == 'cu1':
color = self._style.dispcol['multi']
self._ctrl_qubit(q_xy[0], fc=color, ec=color)
self._ctrl_qubit(q_xy[1], fc=color, ec=color)
self._sidetext(qreg_b, text='U1 ({})'.format(param))
# add qubit-qubit wiring
fc = self._style
self._line(qreg_b, qreg_t, lc=color)
# swap gate
elif op.name == 'swap':
self._swap(q_xy[0], self._style.dispcol['swap'])
self._swap(q_xy[1], self._style.dispcol['swap'])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t, lc=self._style.dispcol['swap'])
# dcx and iswap gate
elif op.name in ['dcx', 'iswap']:
self._custom_multiqubit_gate(q_xy, c_xy, wide=_iswide,
fc=self._style.dispcol[op.name],
text=op.op.label or op.name)
# Custom gate
else:
self._custom_multiqubit_gate(q_xy, c_xy, wide=_iswide,
text=op.op.label or op.name)
#
# draw multi-qubit gates (n=3)
#
elif len(q_xy) in range(3, 6):
# cswap gate
if op.name == 'cswap':
self._ctrl_qubit(q_xy[0],
fc=self._style.dispcol['multi'],
ec=self._style.dispcol['multi'])
self._swap(q_xy[1], self._style.dispcol['multi'])
self._swap(q_xy[2], self._style.dispcol['multi'])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t, lc=self._style.dispcol['multi'])
# ccx gate
elif op.name == 'ccx' or op.name == 'c3x' or op.name == 'c4x':
num_ctrl_qubits = op.op.num_ctrl_qubits
# set the ctrl qbits to open or closed
self.set_multi_ctrl_bits(op.op.ctrl_state, num_ctrl_qubits, q_xy, 'multi')
if self._style.name != 'bw':
self._tgt_qubit(q_xy[num_ctrl_qubits], fc=self._style.dispcol['multi'],
ec=self._style.dispcol['multi'],
ac=self._style.dispcol['target'])
else:
self._tgt_qubit(q_xy[num_ctrl_qubits], fc=self._style.dispcol['target'],
ec=self._style.dispcol['multi'],
ac=self._style.dispcol['multi'])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t, lc=self._style.dispcol['multi'])
# custom gate
else:
self._custom_multiqubit_gate(q_xy, c_xy, wide=_iswide,
text=getattr(op.op, 'label', None) or op.name)
# draw custom multi-qubit gate
elif len(q_xy) > 5:
self._custom_multiqubit_gate(q_xy, c_xy, wide=_iswide,
text=op.op.label or op.name)
else:
logger.critical('Invalid gate %s', op)
raise exceptions.VisualizationError('invalid gate {}'.format(op))
# adjust the column if there have been barriers encountered, but not plotted
barrier_offset = 0
if not self.plot_barriers:
# only adjust if everything in the layer wasn't plotted
barrier_offset = -1 if all([op.name in
['barrier', 'snapshot', 'load', 'save', 'noise']
for op in layer]) else 0
prev_anc = this_anc + layer_width + barrier_offset - 1
#
# adjust window size and draw horizontal lines
#
anchors = [q_anchors[ii].get_index() for ii in self._qreg_dict]
if anchors:
max_anc = max(anchors)
else:
max_anc = 0
n_fold = max(0, max_anc - 1) // self.fold
# window size
if max_anc > self.fold > 0:
self._cond['xmax'] = self.fold + 1 + self.x_offset
self._cond['ymax'] = (n_fold + 1) * (self._cond['n_lines'] + 1) - 1
else:
self._cond['xmax'] = max_anc + 1 + self.x_offset
self._cond['ymax'] = self._cond['n_lines']
# add horizontal lines
for ii in range(n_fold + 1):
feedline_r = (n_fold > 0 and n_fold > ii)
feedline_l = (ii > 0)
self._draw_regs_sub(ii, feedline_l, feedline_r)
# draw gate number
if self._style.index:
for ii in range(max_anc):
if self.fold > 0:
x_coord = ii % self.fold + 1
y_coord = - (ii // self.fold) * (self._cond['n_lines'] + 1) + 0.7
else:
x_coord = ii + 1
y_coord = 0.7
self.ax.text(x_coord, y_coord, str(ii + 1), ha='center',
va='center', fontsize=self._style.sfs,
color=self._style.tc, clip_on=True,
zorder=PORDER_TEXT)
@staticmethod
def param_parse(v):
# create an empty list to store the parameters in
param_parts = [None] * len(v)
for i, e in enumerate(v):
try:
param_parts[i] = pi_check(e, output='mpl', ndigits=3)
except TypeError:
param_parts[i] = str(e)
if param_parts[i].startswith('-'):
param_parts[i] = '$-$' + param_parts[i][1:]
param_parts = ', '.join(param_parts)
return param_parts
@staticmethod
def format_numeric(val, tol=1e-5):
if isinstance(val, complex):
return str(val)
elif complex(val).imag != 0:
val = complex(val)
abs_val = abs(val)
if math.isclose(abs_val, 0.0, abs_tol=1e-100):
return '0'
if math.isclose(math.fmod(abs_val, 1.0),
0.0, abs_tol=tol) and 0.5 < abs_val < 9999.5:
return str(int(val))
if 0.1 <= abs_val < 100.0:
return '{:.2f}'.format(val)
return '{:.1e}'.format(val)
@staticmethod
def fraction(val, base=np.pi, n=100, tol=1e-5):
abs_val = abs(val)
for i in range(1, n):
for j in range(1, n):
if math.isclose(abs_val, i / j * base, rel_tol=tol):
if val < 0:
i *= -1
return fractions.Fraction(i, j)
return None
| 46,257
| 682
| 180
|
a7cf222e3f96762239244a7b076603c3ca2e33f3
| 946
|
py
|
Python
|
sjpClass.py
|
alkamid/wiktionary
|
ce242da609a1001ae7462b07da2f6e83f1a7281b
|
[
"MIT"
] | 3
|
2015-01-06T22:00:22.000Z
|
2016-08-14T08:07:32.000Z
|
sjpClass.py
|
alkamid/wiktionary
|
ce242da609a1001ae7462b07da2f6e83f1a7281b
|
[
"MIT"
] | 56
|
2015-07-12T10:21:38.000Z
|
2020-02-23T18:51:01.000Z
|
sjpClass.py
|
alkamid/wiktionary
|
ce242da609a1001ae7462b07da2f6e83f1a7281b
|
[
"MIT"
] | 2
|
2015-01-06T21:25:06.000Z
|
2018-01-17T12:03:17.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pywikibot
| 27.823529
| 136
| 0.609937
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pywikibot
class kategoriaSlowa():
def __init__(self, name, counter, pages, tabelka, outputFile):
self.name = name
self.counter = counter
self.pages = 'Wikipedysta:AlkamidBot/sjp/' + pages
self.buffer = ''
self.tabelka = tabelka
self.outputFile = 'output/' + outputFile
self.limit = 0
def addLimit(self, limit):
self.limit = limit
def checkHistory(pagename):
#returns 1, if AlkamidBot or Olafbot were the last authors, 0 if someone is verifying the page (=it was last edited by someone else)
bots = ('AlkamidBot', 'Olafbot', 'PBbot')
site = pywikibot.Site()
page = pywikibot.Page(site, pagename)
try: page.get()
except pywikibot.NoPage:
return 1
else:
history = page.getVersionHistory()
if history[0][2] in bots:
return 1
else:
return 0
| 786
| 2
| 98
|
8b6ebb32e27f26c072b135c85ff8fb1b572ad23d
| 2,446
|
py
|
Python
|
tests/test_filters.py
|
mobius-medical/flask-genshi
|
68cba6c9cb604272a25f5e4c74e5a127e3ac7854
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_filters.py
|
mobius-medical/flask-genshi
|
68cba6c9cb604272a25f5e4c74e5a127e3ac7854
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_filters.py
|
mobius-medical/flask-genshi
|
68cba6c9cb604272a25f5e4c74e5a127e3ac7854
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from inspect import cleandoc
from genshi.filters import Transformer
from flask_genshi import render_template
from flatland.out.genshi import setup as flatland_setup
from flatland import Form, String
def test_applies_method_filters(app):
"""Method filters are applied for generated and rendered templates"""
with app.test_request_context():
genshi = app.extensions["genshi"]
@genshi.filter("html")
rendered = render_template("filter.html")
# Remove leading indentation, for cleaner multi-line string
expected = cleandoc(
"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><head><title>Flask-Genshi - Hi!</title></head></html>
"""
)
assert rendered == expected
def test_filters_per_render(app):
"""Filters can be applied per rendering"""
with app.test_request_context():
rendered = render_template("filter.html", filter=prepend_title)
# Remove leading indentation, for cleaner multi-line string
expected = cleandoc(
"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><head><title>Hi! - Flask-Genshi</title></head></html>
"""
)
assert rendered == expected
def test_works_with_flatland(app):
"""Filters can take the context and support flatland"""
with app.test_request_context():
genshi = app.extensions["genshi"]
@genshi.template_parsed
context = dict(form=FlatlandForm({"username": "dag"}))
rendered = render_template("flatland.html", context)
# Remove leading indentation, for cleaner multi-line string
expected = cleandoc(
"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<input type="text" name="username" value="dag">
"""
)
assert rendered == expected
| 31.766234
| 102
| 0.629599
|
from __future__ import unicode_literals
from inspect import cleandoc
from genshi.filters import Transformer
from flask_genshi import render_template
from flatland.out.genshi import setup as flatland_setup
from flatland import Form, String
class FlatlandForm(Form):
username = String
def test_applies_method_filters(app):
"""Method filters are applied for generated and rendered templates"""
with app.test_request_context():
genshi = app.extensions["genshi"]
@genshi.filter("html")
def prepend_title(template):
return template | Transformer("head/title").prepend("Flask-Genshi - ")
rendered = render_template("filter.html")
# Remove leading indentation, for cleaner multi-line string
expected = cleandoc(
"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><head><title>Flask-Genshi - Hi!</title></head></html>
"""
)
assert rendered == expected
def test_filters_per_render(app):
"""Filters can be applied per rendering"""
with app.test_request_context():
def prepend_title(template):
return template | Transformer("head/title").append(" - Flask-Genshi")
rendered = render_template("filter.html", filter=prepend_title)
# Remove leading indentation, for cleaner multi-line string
expected = cleandoc(
"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><head><title>Hi! - Flask-Genshi</title></head></html>
"""
)
assert rendered == expected
def test_works_with_flatland(app):
"""Filters can take the context and support flatland"""
with app.test_request_context():
genshi = app.extensions["genshi"]
@genshi.template_parsed
def callback(template):
flatland_setup(template)
context = dict(form=FlatlandForm({"username": "dag"}))
rendered = render_template("flatland.html", context)
# Remove leading indentation, for cleaner multi-line string
expected = cleandoc(
"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<input type="text" name="username" value="dag">
"""
)
assert rendered == expected
| 218
| 27
| 114
|
978ff0be8e3774dfa21908c9b4b49bc92d1eeb4e
| 3,159
|
py
|
Python
|
forms/models.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 3
|
2018-02-27T13:48:28.000Z
|
2018-03-03T21:57:50.000Z
|
forms/models.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 6
|
2020-02-12T00:07:46.000Z
|
2022-03-11T23:25:59.000Z
|
forms/models.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 1
|
2019-03-26T20:19:57.000Z
|
2019-03-26T20:19:57.000Z
|
from django.db import models
from django import forms
from django.contrib.auth.models import User
from .form_dynamic import NominationForm
import json
FIELD_TYPES = (
('Short_answer', forms.CharField),
('Paragraph', forms.CharField),
('Integer', forms.IntegerField),
('ChoiceField', forms.ChoiceField),
('MultipleChoiceField', forms.MultipleChoiceField),
# ('Date', forms.DateField),
)
QUES_TYPES = (
('Short_answer', 'One Line Answer'),
('Paragraph', 'Multiple Line Answer'),
('Integer', 'Integer Answer'),
('ChoiceField', 'Choice'),
('MultipleChoiceField', 'Multiple-choice'),
# ('Date', 'date'),
)
| 28.981651
| 120
| 0.660336
|
from django.db import models
from django import forms
from django.contrib.auth.models import User
from .form_dynamic import NominationForm
import json
class Questionnaire(models.Model):
name = models.CharField(max_length=100, null=True)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
def get_form(self, *args, **kwargs):
fields = []
for question in self.question_set.all():
field = question._get_formfield_class()
label = question.question
if question.required:
label = question.question + " *"
field_args = question._get_field_args()
ques_id = question.id
fields.append((label, field, field_args, ques_id))
return NominationForm(*args, extra=fields, **kwargs)
def add_answer(self, applicant, answer_data):
json_data = json.dumps(answer_data)
answerform = FilledForm(questionnaire=self, applicant=applicant,data=json_data)
answerform.save()
return answerform
FIELD_TYPES = (
('Short_answer', forms.CharField),
('Paragraph', forms.CharField),
('Integer', forms.IntegerField),
('ChoiceField', forms.ChoiceField),
('MultipleChoiceField', forms.MultipleChoiceField),
# ('Date', forms.DateField),
)
QUES_TYPES = (
('Short_answer', 'One Line Answer'),
('Paragraph', 'Multiple Line Answer'),
('Integer', 'Integer Answer'),
('ChoiceField', 'Choice'),
('MultipleChoiceField', 'Multiple-choice'),
# ('Date', 'date'),
)
class Question(models.Model):
questionnaire = models.ForeignKey(Questionnaire,on_delete=models.CASCADE, null=True)
question_type = models.CharField(max_length=50, choices=QUES_TYPES, null=True)
question = models.CharField(max_length=1000, null=True)
question_choices = models.TextField(max_length=600, null=True, blank=True, help_text='make new line for new option')
required = models.BooleanField(default=True)
def __unicode__(self):
return self.question
def __str__(self):
return self.question
def _get_formfield_class(self):
for index, field_class in FIELD_TYPES:
if self.question_type == index:
return field_class
def _get_field_args(self):
args = {}
if self.question_type == 'ChoiceField' or self.question_type == 'MultipleChoiceField':
args['choices'] = enumerate(self.question_choices.split('\n'))
if self.question_type == 'MultipleChoiceField':
args['widget']=forms.CheckboxSelectMultiple
if self.question_type == 'Paragraph':
args['widget'] =forms.Textarea
if self.required:
args['label_suffix'] = " *"
args.update({'required': self.required})
return args
class FilledForm(models.Model):
questionnaire = models.ForeignKey(Questionnaire,on_delete=models.CASCADE, null=True)
applicant = models.ForeignKey(User, null=True)
data = models.CharField(max_length=30000, null=True, blank=True)
def __str__(self):
return self.questionnaire.name
| 1,490
| 940
| 69
|
c910c09445a0e65ba2545dbe1c4a46731ae345b6
| 4,099
|
py
|
Python
|
degmo/data/datasets.py
|
IcarusWizard/Deep-Generative-Models
|
4117c11ad944bdeff106a80adbb3642a076af64e
|
[
"MIT"
] | 2
|
2019-11-21T15:50:59.000Z
|
2019-12-17T02:44:19.000Z
|
degmo/data/datasets.py
|
IcarusWizard/Deep-Generative-Models
|
4117c11ad944bdeff106a80adbb3642a076af64e
|
[
"MIT"
] | null | null | null |
degmo/data/datasets.py
|
IcarusWizard/Deep-Generative-Models
|
4117c11ad944bdeff106a80adbb3642a076af64e
|
[
"MIT"
] | 1
|
2021-07-02T05:49:29.000Z
|
2021-07-02T05:49:29.000Z
|
import numpy as np
import torch, torchvision, math
from torch.functional import F
import os
import PIL.Image as Image
from functools import partial
DATADIR = 'dataset/'
load_celeba32 = partial(load_celeba, image_size=32)
load_celeba64 = partial(load_celeba, image_size=64)
load_celeba128 = partial(load_celeba, image_size=128)
| 37.263636
| 112
| 0.68041
|
import numpy as np
import torch, torchvision, math
from torch.functional import F
import os
import PIL.Image as Image
from functools import partial
DATADIR = 'dataset/'
class ImageDataset(torch.utils.data.Dataset):
def __init__(self, root, transform=None):
super().__init__()
self.root = root
self.image_list = [os.path.join(root, filename) for filename in os.listdir(root)]
self.transform = transform
def __len__(self):
return len(self.image_list)
def __getitem__(self, index):
img = Image.open(self.image_list[index])
if self.transform:
img = self.transform(img)
return (img, )
def load_mnist(normalize=False):
config = {
"c" : 1,
"h" : 28,
"w" : 28,
}
transform = [torchvision.transforms.ToTensor()]
if normalize:
transform.append(torchvision.transforms.Normalize([0.5], [0.5]))
transform = torchvision.transforms.Compose(transform)
train_dataset = torchvision.datasets.MNIST(DATADIR, train=True, download=True, transform=transform)
test_dataset = torchvision.datasets.MNIST(DATADIR, train=False, download=True, transform=transform)
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, (55000, 5000))
return (train_dataset, val_dataset, test_dataset, config)
def load_bmnist(normalize=False):
config = {
"c" : 1,
"h" : 28,
"w" : 28,
}
assert not normalize, "bmnist do not support normalize operation"
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Lambda(lambda x: (x > 0).float()),
])
train_dataset = torchvision.datasets.MNIST(DATADIR, train=True, download=True, transform=transform)
test_dataset = torchvision.datasets.MNIST(DATADIR, train=False, download=True, transform=transform)
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, (55000, 5000))
return (train_dataset, val_dataset, test_dataset, config)
def load_svhn(normalize=False):
config = {
"c" : 3,
"h" : 32,
"w" : 32,
}
transform = [torchvision.transforms.ToTensor()]
if normalize:
transform.append(torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
transform = torchvision.transforms.Compose(transform)
train_dataset = torchvision.datasets.SVHN(DATADIR, split='train', download=True, transform=transform)
test_dataset = torchvision.datasets.SVHN(DATADIR, split='test', download=True, transform=transform)
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, (len(train_dataset) - 5000, 5000))
return (train_dataset, val_dataset, test_dataset, config)
def load_cifar(normalize=False):
config = {
"c" : 3,
"h" : 32,
"w" : 32,
}
transform = [torchvision.transforms.ToTensor()]
if normalize:
transform.append(torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
transform = torchvision.transforms.Compose(transform)
dataset = torchvision.datasets.CIFAR10(DATADIR, download=True, transform=transform)
return torch.utils.data.random_split(dataset, (40000, 5000, 5000)) + [config]
def load_celeba(image_size=128, normalize=False):
config = {
"c" : 3,
"h" : image_size,
"w" : image_size,
}
transform = [
torchvision.transforms.Resize(image_size),
torchvision.transforms.CenterCrop(image_size),
torchvision.transforms.ToTensor(),
]
if normalize:
transform.append(torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
transform = torchvision.transforms.Compose(transform)
dataset = torchvision.datasets.CelebA(DATADIR, download=True, transform=transform)
return torch.utils.data.random_split(dataset, (len(dataset) - 2000, 1000, 1000)) + [config]
load_celeba32 = partial(load_celeba, image_size=32)
load_celeba64 = partial(load_celeba, image_size=64)
load_celeba128 = partial(load_celeba, image_size=128)
| 3,525
| 24
| 222
|
1a3578a56a4bccb214d3e2c35a83b6e6b51851e2
| 57,483
|
py
|
Python
|
basistheory/api/tenants_api.py
|
Basis-Theory/basistheory-python
|
5fd0f3d20fd07e8de45d6d5919e092c696049df1
|
[
"Apache-2.0"
] | null | null | null |
basistheory/api/tenants_api.py
|
Basis-Theory/basistheory-python
|
5fd0f3d20fd07e8de45d6d5919e092c696049df1
|
[
"Apache-2.0"
] | null | null | null |
basistheory/api/tenants_api.py
|
Basis-Theory/basistheory-python
|
5fd0f3d20fd07e8de45d6d5919e092c696049df1
|
[
"Apache-2.0"
] | null | null | null |
"""
Basis Theory API
## Getting Started * Sign-in to [Basis Theory](https://basistheory.com) and go to [Applications](https://portal.basistheory.com/applications) * Create a Basis Theory Server to Server Application * All permissions should be selected * Paste the API Key into the `BT-API-KEY` variable # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from basistheory.api_client import ApiClient, Endpoint as _Endpoint
from basistheory.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types,
set_request_options
)
from basistheory.model.create_tenant_invitation_request import CreateTenantInvitationRequest
from basistheory.model.problem_details import ProblemDetails
from basistheory.model.tenant import Tenant
from basistheory.model.tenant_invitation_response import TenantInvitationResponse
from basistheory.model.tenant_invitation_response_paginated_list import TenantInvitationResponsePaginatedList
from basistheory.model.tenant_invitation_status import TenantInvitationStatus
from basistheory.model.tenant_member_response_paginated_list import TenantMemberResponsePaginatedList
from basistheory.model.tenant_usage_report import TenantUsageReport
from basistheory.model.update_tenant_request import UpdateTenantRequest
from basistheory.model.validation_problem_details import ValidationProblemDetails
class TenantsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def create_invitation(
self,
**kwargs
):
"""create_invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_invitation(async_req=True)
>>> result = thread.get()
Keyword Args:
create_tenant_invitation_request (CreateTenantInvitationRequest): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantInvitationResponse
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.create_invitation_endpoint.call_with_http_info(**kwargs)
def delete(
self,
**kwargs
):
"""delete # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.delete_endpoint.call_with_http_info(**kwargs)
def delete_invitation(
self,
invitation_id,
**kwargs
):
"""delete_invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_invitation(invitation_id, async_req=True)
>>> result = thread.get()
Args:
invitation_id (str):
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['invitation_id'] = \
invitation_id
return self.delete_invitation_endpoint.call_with_http_info(**kwargs)
def delete_member(
self,
member_id,
**kwargs
):
"""delete_member # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_member(member_id, async_req=True)
>>> result = thread.get()
Args:
member_id (str):
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['member_id'] = \
member_id
return self.delete_member_endpoint.call_with_http_info(**kwargs)
def get(
self,
**kwargs
):
"""get # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
Tenant
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_endpoint.call_with_http_info(**kwargs)
def get_invitations(
self,
**kwargs
):
"""get_invitations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_invitations(async_req=True)
>>> result = thread.get()
Keyword Args:
status (TenantInvitationStatus): [optional]
page (int): [optional]
size (int): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantInvitationResponsePaginatedList
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_invitations_endpoint.call_with_http_info(**kwargs)
def get_members(
self,
**kwargs
):
"""get_members # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_members(async_req=True)
>>> result = thread.get()
Keyword Args:
user_id ([str]): [optional]
page (int): [optional]
size (int): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantMemberResponsePaginatedList
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_members_endpoint.call_with_http_info(**kwargs)
def get_tenant_operation_report(
self,
**kwargs
):
"""get_tenant_operation_report # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_operation_report(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantUsageReport
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_tenant_operation_report_endpoint.call_with_http_info(**kwargs)
def get_tenant_usage_report(
self,
**kwargs
):
"""get_tenant_usage_report # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_usage_report(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantUsageReport
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_tenant_usage_report_endpoint.call_with_http_info(**kwargs)
def resend_invitation(
self,
invitation_id,
**kwargs
):
"""resend_invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.resend_invitation(invitation_id, async_req=True)
>>> result = thread.get()
Args:
invitation_id (str):
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantInvitationResponse
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['invitation_id'] = \
invitation_id
return self.resend_invitation_endpoint.call_with_http_info(**kwargs)
def update(
self,
**kwargs
):
"""update # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update(async_req=True)
>>> result = thread.get()
Keyword Args:
update_tenant_request (UpdateTenantRequest): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
Tenant
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.update_endpoint.call_with_http_info(**kwargs)
| 37.768068
| 300
| 0.517736
|
"""
Basis Theory API
## Getting Started * Sign-in to [Basis Theory](https://basistheory.com) and go to [Applications](https://portal.basistheory.com/applications) * Create a Basis Theory Server to Server Application * All permissions should be selected * Paste the API Key into the `BT-API-KEY` variable # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from basistheory.api_client import ApiClient, Endpoint as _Endpoint
from basistheory.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types,
set_request_options
)
from basistheory.model.create_tenant_invitation_request import CreateTenantInvitationRequest
from basistheory.model.problem_details import ProblemDetails
from basistheory.model.tenant import Tenant
from basistheory.model.tenant_invitation_response import TenantInvitationResponse
from basistheory.model.tenant_invitation_response_paginated_list import TenantInvitationResponsePaginatedList
from basistheory.model.tenant_invitation_status import TenantInvitationStatus
from basistheory.model.tenant_member_response_paginated_list import TenantMemberResponsePaginatedList
from basistheory.model.tenant_usage_report import TenantUsageReport
from basistheory.model.update_tenant_request import UpdateTenantRequest
from basistheory.model.validation_problem_details import ValidationProblemDetails
class TenantsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.create_invitation_endpoint = _Endpoint(
settings={
'response_type': (TenantInvitationResponse,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/invitations',
'operation_id': 'create_invitation',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'create_tenant_invitation_request',
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'create_tenant_invitation_request':
(CreateTenantInvitationRequest,),
},
'attribute_map': {
},
'location_map': {
'create_tenant_invitation_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.delete_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self',
'operation_id': 'delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.delete_invitation_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/invitations/{invitationId}',
'operation_id': 'delete_invitation',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'invitation_id',
'request_options'
],
'required': [
'invitation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'invitation_id':
(str,),
},
'attribute_map': {
'invitation_id': 'invitationId',
},
'location_map': {
'invitation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.delete_member_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/members/{memberId}',
'operation_id': 'delete_member',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'member_id',
'request_options'
],
'required': [
'member_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'member_id':
(str,),
},
'attribute_map': {
'member_id': 'memberId',
},
'location_map': {
'member_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_endpoint = _Endpoint(
settings={
'response_type': (Tenant,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self',
'operation_id': 'get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_invitations_endpoint = _Endpoint(
settings={
'response_type': (TenantInvitationResponsePaginatedList,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/invitations',
'operation_id': 'get_invitations',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'status',
'page',
'size',
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'status':
(TenantInvitationStatus,),
'page':
(int,),
'size':
(int,),
},
'attribute_map': {
'status': 'status',
'page': 'page',
'size': 'size',
},
'location_map': {
'status': 'query',
'page': 'query',
'size': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_members_endpoint = _Endpoint(
settings={
'response_type': (TenantMemberResponsePaginatedList,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/members',
'operation_id': 'get_members',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'user_id',
'page',
'size',
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'user_id':
([str],),
'page':
(int,),
'size':
(int,),
},
'attribute_map': {
'user_id': 'user_id',
'page': 'page',
'size': 'size',
},
'location_map': {
'user_id': 'query',
'page': 'query',
'size': 'query',
},
'collection_format_map': {
'user_id': 'multi',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_tenant_operation_report_endpoint = _Endpoint(
settings={
'response_type': (TenantUsageReport,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/reports/operations',
'operation_id': 'get_tenant_operation_report',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_tenant_usage_report_endpoint = _Endpoint(
settings={
'response_type': (TenantUsageReport,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/reports/usage',
'operation_id': 'get_tenant_usage_report',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.resend_invitation_endpoint = _Endpoint(
settings={
'response_type': (TenantInvitationResponse,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/invitations/{invitationId}/resend',
'operation_id': 'resend_invitation',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'invitation_id',
'request_options'
],
'required': [
'invitation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'invitation_id':
(str,),
},
'attribute_map': {
'invitation_id': 'invitationId',
},
'location_map': {
'invitation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.update_endpoint = _Endpoint(
settings={
'response_type': (Tenant,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self',
'operation_id': 'update',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'update_tenant_request',
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'update_tenant_request':
(UpdateTenantRequest,),
},
'attribute_map': {
},
'location_map': {
'update_tenant_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def create_invitation(
self,
**kwargs
):
"""create_invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_invitation(async_req=True)
>>> result = thread.get()
Keyword Args:
create_tenant_invitation_request (CreateTenantInvitationRequest): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantInvitationResponse
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.create_invitation_endpoint.call_with_http_info(**kwargs)
def delete(
self,
**kwargs
):
"""delete # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.delete_endpoint.call_with_http_info(**kwargs)
def delete_invitation(
self,
invitation_id,
**kwargs
):
"""delete_invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_invitation(invitation_id, async_req=True)
>>> result = thread.get()
Args:
invitation_id (str):
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['invitation_id'] = \
invitation_id
return self.delete_invitation_endpoint.call_with_http_info(**kwargs)
def delete_member(
self,
member_id,
**kwargs
):
"""delete_member # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_member(member_id, async_req=True)
>>> result = thread.get()
Args:
member_id (str):
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['member_id'] = \
member_id
return self.delete_member_endpoint.call_with_http_info(**kwargs)
def get(
self,
**kwargs
):
"""get # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
Tenant
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_endpoint.call_with_http_info(**kwargs)
def get_invitations(
self,
**kwargs
):
"""get_invitations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_invitations(async_req=True)
>>> result = thread.get()
Keyword Args:
status (TenantInvitationStatus): [optional]
page (int): [optional]
size (int): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantInvitationResponsePaginatedList
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_invitations_endpoint.call_with_http_info(**kwargs)
def get_members(
self,
**kwargs
):
"""get_members # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_members(async_req=True)
>>> result = thread.get()
Keyword Args:
user_id ([str]): [optional]
page (int): [optional]
size (int): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantMemberResponsePaginatedList
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_members_endpoint.call_with_http_info(**kwargs)
def get_tenant_operation_report(
self,
**kwargs
):
"""get_tenant_operation_report # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_operation_report(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantUsageReport
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_tenant_operation_report_endpoint.call_with_http_info(**kwargs)
def get_tenant_usage_report(
self,
**kwargs
):
"""get_tenant_usage_report # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_usage_report(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantUsageReport
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_tenant_usage_report_endpoint.call_with_http_info(**kwargs)
def resend_invitation(
self,
invitation_id,
**kwargs
):
"""resend_invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.resend_invitation(invitation_id, async_req=True)
>>> result = thread.get()
Args:
invitation_id (str):
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantInvitationResponse
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['invitation_id'] = \
invitation_id
return self.resend_invitation_endpoint.call_with_http_info(**kwargs)
def update(
self,
**kwargs
):
"""update # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update(async_req=True)
>>> result = thread.get()
Keyword Args:
update_tenant_request (UpdateTenantRequest): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
Tenant
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.update_endpoint.call_with_http_info(**kwargs)
| 16,455
| 0
| 27
|
2b5c325a1726de056d5d1198acc3940aef23c363
| 3,454
|
py
|
Python
|
custom_components/nwsradar/config_flow.py
|
MatthewFlamm/ha_nws_radar
|
f039bf1abb94a48232599746f80d4c7e4af35de7
|
[
"MIT"
] | 21
|
2019-07-18T23:38:22.000Z
|
2021-01-08T01:14:44.000Z
|
custom_components/nwsradar/config_flow.py
|
MatthewFlamm/ha_nws_radar
|
f039bf1abb94a48232599746f80d4c7e4af35de7
|
[
"MIT"
] | 7
|
2019-09-06T13:14:49.000Z
|
2020-12-18T17:49:34.000Z
|
custom_components/nwsradar/config_flow.py
|
MatthewFlamm/ha_nws_radar
|
f039bf1abb94a48232599746f80d4c7e4af35de7
|
[
"MIT"
] | 2
|
2019-07-26T21:23:59.000Z
|
2020-01-14T23:03:12.000Z
|
"""Config flow for National Weather Service (NWS) integration."""
import logging
import voluptuous as vol
from nws_radar.nws_radar_mosaic import REGIONS
from homeassistant import config_entries
from . import unique_id
# pylint: disable=unused-import
from .const import (
CONF_LOOP,
CONF_STATION,
CONF_STYLE,
STYLES,
CONF_TYPE,
RADAR_TYPES,
DEFAULT_RADAR_TYPE,
CONF_NAME,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for National Weather Service (NWS)."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
self._config = user_input # pylint: disable=attribute-defined-outside-init
if user_input[CONF_STYLE] in {"Standard", "Enhanced"}:
return await self.async_step_standard_enhanced()
# Mosaic
return await self.async_step_mosaic()
data_schema = vol.Schema(
{
vol.Required(CONF_STYLE): vol.In(STYLES),
}
)
return self.async_show_form(
step_id="user", data_schema=data_schema, errors=errors
)
async def async_step_standard_enhanced(self, user_input=None):
"""Standard or enhanced step."""
errors = {}
if user_input is not None:
self._config.update(user_input)
self._config[CONF_STATION] = self._config[CONF_STATION].upper()
title = unique_id(self._config)
self._config[CONF_NAME] = None
await self.async_set_unique_id(unique_id(self._config))
self._abort_if_unique_id_configured()
return self.async_create_entry(title=title, data=self._config)
data_schema = vol.Schema(
{
vol.Required(CONF_STATION): str,
vol.Required(CONF_LOOP, default=True): bool,
vol.Required(CONF_TYPE, default=DEFAULT_RADAR_TYPE): vol.In(
RADAR_TYPES.keys()
),
}
)
return self.async_show_form(
step_id="standard_enhanced", data_schema=data_schema, errors=errors
)
async def async_step_mosaic(self, user_input=None):
"""Mosaic step."""
errors = {}
if user_input is not None:
self._config.update(user_input)
self._config[CONF_TYPE] = ""
self._config[CONF_NAME] = None
title = unique_id(self._config)
await self.async_set_unique_id(title)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=title, data=self._config)
data_schema = vol.Schema(
{
vol.Required(CONF_STATION): vol.In(REGIONS),
vol.Required(CONF_LOOP, default=True): bool,
}
)
return self.async_show_form(
step_id="mosaic", data_schema=data_schema, errors=errors
)
async def async_step_import(self, user_input=None):
"""Import an entry from yaml."""
title = unique_id(user_input)
await self.async_set_unique_id(title)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=title, data=user_input)
| 32.895238
| 87
| 0.621888
|
"""Config flow for National Weather Service (NWS) integration."""
import logging
import voluptuous as vol
from nws_radar.nws_radar_mosaic import REGIONS
from homeassistant import config_entries
from . import unique_id
# pylint: disable=unused-import
from .const import (
CONF_LOOP,
CONF_STATION,
CONF_STYLE,
STYLES,
CONF_TYPE,
RADAR_TYPES,
DEFAULT_RADAR_TYPE,
CONF_NAME,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for National Weather Service (NWS)."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
self._config = user_input # pylint: disable=attribute-defined-outside-init
if user_input[CONF_STYLE] in {"Standard", "Enhanced"}:
return await self.async_step_standard_enhanced()
# Mosaic
return await self.async_step_mosaic()
data_schema = vol.Schema(
{
vol.Required(CONF_STYLE): vol.In(STYLES),
}
)
return self.async_show_form(
step_id="user", data_schema=data_schema, errors=errors
)
async def async_step_standard_enhanced(self, user_input=None):
"""Standard or enhanced step."""
errors = {}
if user_input is not None:
self._config.update(user_input)
self._config[CONF_STATION] = self._config[CONF_STATION].upper()
title = unique_id(self._config)
self._config[CONF_NAME] = None
await self.async_set_unique_id(unique_id(self._config))
self._abort_if_unique_id_configured()
return self.async_create_entry(title=title, data=self._config)
data_schema = vol.Schema(
{
vol.Required(CONF_STATION): str,
vol.Required(CONF_LOOP, default=True): bool,
vol.Required(CONF_TYPE, default=DEFAULT_RADAR_TYPE): vol.In(
RADAR_TYPES.keys()
),
}
)
return self.async_show_form(
step_id="standard_enhanced", data_schema=data_schema, errors=errors
)
async def async_step_mosaic(self, user_input=None):
"""Mosaic step."""
errors = {}
if user_input is not None:
self._config.update(user_input)
self._config[CONF_TYPE] = ""
self._config[CONF_NAME] = None
title = unique_id(self._config)
await self.async_set_unique_id(title)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=title, data=self._config)
data_schema = vol.Schema(
{
vol.Required(CONF_STATION): vol.In(REGIONS),
vol.Required(CONF_LOOP, default=True): bool,
}
)
return self.async_show_form(
step_id="mosaic", data_schema=data_schema, errors=errors
)
async def async_step_import(self, user_input=None):
"""Import an entry from yaml."""
title = unique_id(user_input)
await self.async_set_unique_id(title)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=title, data=user_input)
| 0
| 0
| 0
|
e288a19792ad0dab33c86bbab35d030926f6a073
| 2,944
|
py
|
Python
|
xinci/model.py
|
Lapis-Hong/xinci
|
9234ef6e426dfa282c334ff79f4f76b475eb10f3
|
[
"MIT"
] | 23
|
2018-06-18T15:35:47.000Z
|
2021-07-28T02:19:16.000Z
|
xinci/model.py
|
Lapis-Hong/xinci
|
9234ef6e426dfa282c334ff79f4f76b475eb10f3
|
[
"MIT"
] | null | null | null |
xinci/model.py
|
Lapis-Hong/xinci
|
9234ef6e426dfa282c334ff79f4f76b475eb10f3
|
[
"MIT"
] | 10
|
2018-06-20T07:01:17.000Z
|
2020-08-31T15:56:24.000Z
|
#!/usr/bin/env python
# coding: utf-8
# @Author: lapis-hong
# @Date : 2018/6/17
"""This module contains the main algorithm for chinese word extraction.
criterion 1:
solid rate
criterion 2:
character entropy
"""
from __future__ import unicode_literals
from __future__ import division
import math
from indexer import CnTextIndexer
from utils import WordCountDict
class EntropyJudger:
"""Use entropy and solid rate to judge whether a candidate is a chinese word or not."""
def __init__(self, document, least_cnt_threshold=5, solid_rate_threshold=0.018, entropy_threshold=1.92):
"""
Args:
least_cnt_threshold: a word least appeared count, can not pass judge if less than this value.
solid_rate_threshold: p(candidate)/p(candidate[0]) * p(candidate)/p(candidate[1]) * ...
entropy_threshold: min(left_char_entropy, right_char_entropy), The smaller this values is,
more new words you will get, but with less accuracy.
"""
self._least_cnt_threshold = least_cnt_threshold
self._solid_rate_threshold = solid_rate_threshold
self._entropy_threshold = entropy_threshold
self._indexer = CnTextIndexer(document)
| 37.74359
| 108
| 0.66712
|
#!/usr/bin/env python
# coding: utf-8
# @Author: lapis-hong
# @Date : 2018/6/17
"""This module contains the main algorithm for chinese word extraction.
criterion 1:
solid rate
criterion 2:
character entropy
"""
from __future__ import unicode_literals
from __future__ import division
import math
from indexer import CnTextIndexer
from utils import WordCountDict
class EntropyJudger:
"""Use entropy and solid rate to judge whether a candidate is a chinese word or not."""
def __init__(self, document, least_cnt_threshold=5, solid_rate_threshold=0.018, entropy_threshold=1.92):
"""
Args:
least_cnt_threshold: a word least appeared count, can not pass judge if less than this value.
solid_rate_threshold: p(candidate)/p(candidate[0]) * p(candidate)/p(candidate[1]) * ...
entropy_threshold: min(left_char_entropy, right_char_entropy), The smaller this values is,
more new words you will get, but with less accuracy.
"""
self._least_cnt_threshold = least_cnt_threshold
self._solid_rate_threshold = solid_rate_threshold
self._entropy_threshold = entropy_threshold
self._indexer = CnTextIndexer(document)
def judge(self, candidate):
solid_rate = self._get_solid_rate(candidate)
entropy = self._get_entropy(candidate)
if solid_rate < self._solid_rate_threshold or entropy < self._entropy_threshold:
return False
return True
def _get_solid_rate(self, candidate):
if len(candidate) < 2:
return 1.0
cnt = self._indexer.count(candidate) # candidate count in document
if cnt < self._least_cnt_threshold: # least count to be a word
return 0.0
rate = 1.0
for c in candidate:
rate *= cnt / self._indexer.char_cnt_map[c] # candidate character count in document
return math.pow(rate, 1/float(len(candidate))) * math.sqrt(len(candidate)) # interesting
def _get_entropy(self, candidate):
left_char_dic = WordCountDict()
right_char_dic = WordCountDict()
candidate_pos_generator = self._indexer.find(candidate)
for pos in candidate_pos_generator:
c = self._indexer[pos-1]
left_char_dic.add(c)
c = self._indexer[pos+len(candidate)]
right_char_dic.add(c)
previous_total_char_cnt = left_char_dic.count()
next_total_char_cnt = right_char_dic.count()
previous_entropy = 0.0
next_entropy = 0.0
for char, count in left_char_dic.items(): # efficient
prob = count / previous_total_char_cnt
previous_entropy -= prob * math.log(prob)
for char, count in right_char_dic.items():
prob = count / next_total_char_cnt
next_entropy -= prob * math.log(prob)
return min(previous_entropy, next_entropy) # 返回前后信息熵中较小的一个
| 1,657
| 0
| 81
|
86c4428f80dd80644e84963f60d1a11c38e4a4c2
| 561
|
py
|
Python
|
Machine_Learning/ZCSNumpy.py
|
ZuoCaiSong/Python
|
137d1c4c79f9594b9bc2c7dc7728246e697f1329
|
[
"MIT"
] | null | null | null |
Machine_Learning/ZCSNumpy.py
|
ZuoCaiSong/Python
|
137d1c4c79f9594b9bc2c7dc7728246e697f1329
|
[
"MIT"
] | null | null | null |
Machine_Learning/ZCSNumpy.py
|
ZuoCaiSong/Python
|
137d1c4c79f9594b9bc2c7dc7728246e697f1329
|
[
"MIT"
] | null | null | null |
#! usr/bin/env python
# -*- coding:utf-8 -*-
"""
基础篇
"""
from numpy import *
'''
NumPy的主要对象是同种元素的多维数组。
这是一个所有的元素都是一种类型、通过一个正整数元组索引的元素表格(通常是元素是数字)
'''
'''
NumPy中维度(dimensions)叫做轴(axes),轴的个数叫做秩(rank)。
'''
#eg:
'''
在3D空间一个点的坐标 [1,2,3]
是一个秩为1的数组,它只有一个轴,轴的长度为3
'''
#注意直接写,他的类型不是一个ndarray,是一个list,此处只是用于举例说明秩
arr2 = [[1,0,0],
[0,1,0]]
'''
arr2 的秩为2 (它有两个维度)
'''
'''
NumPy的数组类被称作 ndarray 。通常被称作数组。
注意numpy.array和标准python库类array.array并不相同,
后者只处理一维数组和提供少量功能
'''
# 创建一个numpy的对象
a = arange(15).reshape(3, 5)
print a
# 数组轴的个数(秩),行数
print "a的秩为", a.ndim
help(ndim)
| 13.357143
| 44
| 0.682709
|
#! usr/bin/env python
# -*- coding:utf-8 -*-
"""
基础篇
"""
from numpy import *
'''
NumPy的主要对象是同种元素的多维数组。
这是一个所有的元素都是一种类型、通过一个正整数元组索引的元素表格(通常是元素是数字)
'''
'''
NumPy中维度(dimensions)叫做轴(axes),轴的个数叫做秩(rank)。
'''
#eg:
'''
在3D空间一个点的坐标 [1,2,3]
是一个秩为1的数组,它只有一个轴,轴的长度为3
'''
#注意直接写,他的类型不是一个ndarray,是一个list,此处只是用于举例说明秩
arr2 = [[1,0,0],
[0,1,0]]
'''
arr2 的秩为2 (它有两个维度)
'''
'''
NumPy的数组类被称作 ndarray 。通常被称作数组。
注意numpy.array和标准python库类array.array并不相同,
后者只处理一维数组和提供少量功能
'''
# 创建一个numpy的对象
a = arange(15).reshape(3, 5)
print a
# 数组轴的个数(秩),行数
print "a的秩为", a.ndim
help(ndim)
| 0
| 0
| 0
|
077630693a28af4ea5bf434f4de1bcb506757b3e
| 1,696
|
py
|
Python
|
tests/unit/test_fileclient.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 1
|
2020-01-02T09:03:21.000Z
|
2020-01-02T09:03:21.000Z
|
tests/unit/test_fileclient.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_fileclient.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 1
|
2020-01-02T09:03:24.000Z
|
2020-01-02T09:03:24.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email: `Bo Maryniuk <bo@suse.de>`
'''
# Import Python libs
from __future__ import absolute_import
import errno
# Import Salt Testing libs
from tests.support.mock import patch, Mock
from tests.support.unit import TestCase
# Import Salt libs
from salt.ext.six.moves import range
from salt.fileclient import Client
class FileclientTestCase(TestCase):
'''
Fileclient test
'''
opts = {
'extension_modules': '',
'cachedir': '/__test__',
}
def test_cache_skips_makedirs_on_race_condition(self):
'''
If cache contains already a directory, do not raise an exception.
'''
with patch('os.path.isfile', lambda prm: False):
for exists in range(2):
with patch('os.makedirs', self._fake_makedir()):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
def test_cache_raises_exception_on_non_eexist_ioerror(self):
'''
If makedirs raises other than EEXIST errno, an exception should be raised.
'''
with patch('os.path.isfile', lambda prm: False):
with patch('os.makedirs', self._fake_makedir(num=errno.EROFS)):
with self.assertRaises(OSError):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
| 32.615385
| 82
| 0.628538
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email: `Bo Maryniuk <bo@suse.de>`
'''
# Import Python libs
from __future__ import absolute_import
import errno
# Import Salt Testing libs
from tests.support.mock import patch, Mock
from tests.support.unit import TestCase
# Import Salt libs
from salt.ext.six.moves import range
from salt.fileclient import Client
class FileclientTestCase(TestCase):
'''
Fileclient test
'''
opts = {
'extension_modules': '',
'cachedir': '/__test__',
}
def _fake_makedir(self, num=errno.EEXIST):
def _side_effect(*args, **kwargs):
raise OSError(num, 'Errno {0}'.format(num))
return Mock(side_effect=_side_effect)
def test_cache_skips_makedirs_on_race_condition(self):
'''
If cache contains already a directory, do not raise an exception.
'''
with patch('os.path.isfile', lambda prm: False):
for exists in range(2):
with patch('os.makedirs', self._fake_makedir()):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
def test_cache_raises_exception_on_non_eexist_ioerror(self):
'''
If makedirs raises other than EEXIST errno, an exception should be raised.
'''
with patch('os.path.isfile', lambda prm: False):
with patch('os.makedirs', self._fake_makedir(num=errno.EROFS)):
with self.assertRaises(OSError):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
| 166
| 0
| 27
|
9e5a009aa9aeb584ea41f3acb660d59e05af5898
| 11,072
|
py
|
Python
|
prepare_datasets.py
|
Jakob-Bach/Meta-Learning-Feature-Importance
|
089e5c7a5be91307f747e00b38b1567386fbee16
|
[
"MIT"
] | null | null | null |
prepare_datasets.py
|
Jakob-Bach/Meta-Learning-Feature-Importance
|
089e5c7a5be91307f747e00b38b1567386fbee16
|
[
"MIT"
] | null | null | null |
prepare_datasets.py
|
Jakob-Bach/Meta-Learning-Feature-Importance
|
089e5c7a5be91307f747e00b38b1567386fbee16
|
[
"MIT"
] | null | null | null |
"""Prepare datasets
Script that:
- downloads, pre-processes, and saves base datasets from OpenML
- computes meta-features
- computes meta-targets (combining feature-importance measures and base models)
- saves the meta-datasets
Usage: python -m prepare_datasets --help
"""
import argparse
import multiprocessing
import pathlib
from typing import Collection, Dict, Optional, Sequence, Union
import warnings
import numpy as np
import openml
import pandas as pd
import sklearn.preprocessing
import tqdm
import data_utility
import meta_features
import meta_targets
# Download one base dataset with the given "data_id" from OpenML and store it in X, y format in
# "base_data_dir", all columns made numeric. Note that the method might throw an exception if
# OpenML is not able to retrieve the dataset.
# Download OpenML datasets and store them in "base_data_dir". Either retrieve base datasets by
# "data_ids" or search according to fixed dataset characteristics. The latter was done for the
# paper, but the datasets matching the characteristics can change in future.
# Compute all meta-features for one base dataset with "base_dataset_name", located in
# "base_data_dir", and store the resulting meta-data in "meta_data_dir"
# For each base dataset from "base_data_dir", compute all meta-features. Save the resulting
# meta-data into "meta_data_dir".
# Compute one meta-target, i.e., apply one importance measure and one base model to one base
# dataset. Return the actual meta-target (numeric feature importances) and some information
# identifying it.
# For each base dataset from "base_data_dir", compute all meta-targets, i.e., all
# feature-importance measures for all base models. Save the resulting meta-data into
# "meta_data_dir".
# Parse command-line arguments and prepare base + meta datasets.
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Retrieves base datasets from OpenML, creates meta-datasets ' +
'and stores all these data.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-b', '--base_data_dir', type=pathlib.Path, default='data/base_datasets/',
help='Directory to store base datasets. Will be created if necessary.')
parser.add_argument('-i', '--data_ids', type=int, default=[], nargs='*',
help='Ids of OpenML datasets. If none provided, will search for datasets.')
parser.add_argument('-m', '--meta_data_dir', type=pathlib.Path, default='data/meta_datasets/',
help='Directory to store meta-datasets. Will be created if necessary.')
parser.add_argument('-p', '--n_processes', type=int, default=None,
help='Number of processes for multi-processing (default: all cores).')
args = parser.parse_args()
prepare_base_datasets(base_data_dir=args.base_data_dir, data_ids=args.data_ids)
prepare_meta_features(base_data_dir=args.base_data_dir, meta_data_dir=args.meta_data_dir,
n_processes=args.n_processes)
prepare_meta_targets(base_data_dir=args.base_data_dir, meta_data_dir=args.meta_data_dir,
n_processes=args.n_processes)
| 58.582011
| 110
| 0.694906
|
"""Prepare datasets
Script that:
- downloads, pre-processes, and saves base datasets from OpenML
- computes meta-features
- computes meta-targets (combining feature-importance measures and base models)
- saves the meta-datasets
Usage: python -m prepare_datasets --help
"""
import argparse
import multiprocessing
import pathlib
from typing import Collection, Dict, Optional, Sequence, Union
import warnings
import numpy as np
import openml
import pandas as pd
import sklearn.preprocessing
import tqdm
import data_utility
import meta_features
import meta_targets
# Download one base dataset with the given "data_id" from OpenML and store it in X, y format in
# "base_data_dir", all columns made numeric. Note that the method might throw an exception if
# OpenML is not able to retrieve the dataset.
def download_base_dataset(data_id: int, base_data_dir: pathlib.Path) -> None:
dataset = openml.datasets.get_dataset(dataset_id=data_id, download_data=True)
X, y, _, _ = dataset.get_data(target=dataset.default_target_attribute)
non_numeric_features = [x.name for x in dataset.features.values()
if (x.name in X.columns) and (x.data_type != 'numeric')]
X[non_numeric_features] = sklearn.preprocessing.OrdinalEncoder(dtype=int).fit_transform(
X=X[non_numeric_features])
assert all(np.issubdtype(X[feature].dtype, np.number) for feature in X.columns)
y = pd.Series(sklearn.preprocessing.LabelEncoder().fit_transform(y=y), name=y.name)
data_utility.save_dataset(X=X, y=y, dataset_name=dataset.name, directory=base_data_dir)
# Download OpenML datasets and store them in "base_data_dir". Either retrieve base datasets by
# "data_ids" or search according to fixed dataset characteristics. The latter was done for the
# paper, but the datasets matching the characteristics can change in future.
def prepare_base_datasets(base_data_dir: pathlib.Path, data_ids: Optional[Sequence[int]] = None) -> None:
print('Base dataset preparation started.')
if not base_data_dir.is_dir():
print('Base-dataset directory does not exist. We create it.')
base_data_dir.mkdir(parents=True)
if any(base_data_dir.iterdir()):
print('Base-dataset directory is not empty. Files might be overwritten, but not deleted.')
dataset_overview = openml.datasets.list_datasets(status='active', output_format='dataframe')
if (data_ids is None) or (len(data_ids) == 0):
dataset_overview = dataset_overview[
(dataset_overview['NumberOfClasses'] == 2) & # binary classification
(dataset_overview['NumberOfInstances'] >= 1000) &
(dataset_overview['NumberOfInstances'] <= 10000) &
(dataset_overview['NumberOfMissingValues'] == 0)
]
# Pick latest version of each dataset:
dataset_overview = dataset_overview.sort_values(by='version').groupby('name').last().reset_index()
# Pick the same amount of datasets from different categories regarding number of features:
feature_number_groups = [(6, 11), (12, 26), (27, 51)] # list of (lower, upper); count includes target
num_datasets_per_group = 20
data_ids = []
with tqdm.tqdm(total=(len(feature_number_groups) * num_datasets_per_group),
desc='Downloading datasets') as progress_bar:
for lower, upper in feature_number_groups:
current_datasets = dataset_overview[(dataset_overview['NumberOfFeatures'] >= lower) &
(dataset_overview['NumberOfFeatures'] <= upper)]
successful_downloads = 0
current_position = 0 # ... in the table of datasets
while successful_downloads < num_datasets_per_group:
data_id = int(current_datasets['did'].iloc[current_position])
try:
download_base_dataset(data_id=data_id, base_data_dir=base_data_dir)
data_ids.append(data_id)
successful_downloads += 1
progress_bar.update()
except Exception: # OpenML does not specify exception type for get_dataset()
pass
finally: # in any case, move on to next dataset
current_position += 1
else:
print('Using given dataset ids.')
for data_id in tqdm.tqdm(data_ids, desc='Downloading datasets'):
try:
download_base_dataset(data_id=data_id, base_data_dir=base_data_dir)
except Exception: # OpenML does not specify exception type for get_dataset()
warnings.warn(f'Download of dataset {data_id} failed.')
dataset_overview[dataset_overview['did'].isin(data_ids)].to_csv(
base_data_dir / '_dataset_overview.csv', index=False)
print('Base datasets prepared and saved.')
# Compute all meta-features for one base dataset with "base_dataset_name", located in
# "base_data_dir", and store the resulting meta-data in "meta_data_dir"
def compute_and_save_meta_features(base_data_dir: pathlib.Path, base_dataset_name: str,
meta_data_dir: pathlib.Path) -> None:
X, y = data_utility.load_dataset(dataset_name=base_dataset_name, directory=base_data_dir)
result = meta_features.compute_meta_features(X=X, y=y)
data_utility.save_dataset(dataset_name=base_dataset_name, directory=meta_data_dir, X=result)
# For each base dataset from "base_data_dir", compute all meta-features. Save the resulting
# meta-data into "meta_data_dir".
def prepare_meta_features(base_data_dir: pathlib.Path, meta_data_dir: pathlib.Path,
n_processes: Optional[int] = None) -> None:
print('Meta-feature preparation started.')
base_datasets = data_utility.list_datasets(directory=base_data_dir)
with tqdm.tqdm(total=(len(base_datasets)), desc='Computing meta-features') as progress_bar:
with multiprocessing.Pool(processes=n_processes) as process_pool:
results = [process_pool.apply_async(compute_and_save_meta_features, kwds={
'base_data_dir': base_data_dir, 'base_dataset_name': base_dataset_name,
'meta_data_dir': meta_data_dir}, callback=lambda x: progress_bar.update())
for base_dataset_name in base_datasets]
[x.wait() for x in results] # don't need to return value here, just wait till finished
print('Meta-features prepared and saved.')
# Compute one meta-target, i.e., apply one importance measure and one base model to one base
# dataset. Return the actual meta-target (numeric feature importances) and some information
# identifying it.
def compute_meta_target(base_data_dir: pathlib.Path, base_dataset_name: str, base_model_name: str,
importance_measure_name: str) -> Dict[str, Union[str, Collection[float]]]:
result = {'base_dataset': base_dataset_name, 'base_model': base_model_name,
'importance_measure': importance_measure_name}
X, y = data_utility.load_dataset(dataset_name=base_dataset_name, directory=base_data_dir)
importance_type = meta_targets.IMPORTANCE_MEASURES[importance_measure_name]
base_model_func = meta_targets.BASE_MODELS[base_model_name]['func']
base_model_args = meta_targets.BASE_MODELS[base_model_name]['args']
result['values'] = importance_type.compute_importance(X=X, y=y, model_func=base_model_func,
model_args=base_model_args)
return result
# For each base dataset from "base_data_dir", compute all meta-targets, i.e., all
# feature-importance measures for all base models. Save the resulting meta-data into
# "meta_data_dir".
def prepare_meta_targets(base_data_dir: pathlib.Path, meta_data_dir: pathlib.Path,
n_processes: Optional[int] = None) -> None:
print('Meta-target preparation started.')
base_datasets = data_utility.list_datasets(directory=base_data_dir)
with tqdm.tqdm(total=(len(base_datasets) * len(meta_targets.IMPORTANCE_MEASURES) *
len(meta_targets.BASE_MODELS)), desc='Computing meta-targets') as progress_bar:
with multiprocessing.Pool(processes=n_processes) as process_pool:
results = [process_pool.apply_async(compute_meta_target, kwds={
'base_data_dir': base_data_dir, 'base_dataset_name': base_dataset_name,
'base_model_name': base_model_name, 'importance_measure_name': importance_measure_name
}, callback=lambda x: progress_bar.update())
for base_dataset_name in base_datasets
for base_model_name in meta_targets.BASE_MODELS.keys()
for importance_measure_name in meta_targets.IMPORTANCE_MEASURES.keys()]
results = [x.get() for x in results]
# Combine individual meta-targets to one data frame per base dataset:
meta_target_data = {base_dataset_name: pd.DataFrame() for base_dataset_name in base_datasets}
for result in results:
column_name = data_utility.name_meta_target(
importance_measure_name=result['importance_measure'],
base_model_name=result['base_model'])
meta_target_data[result['base_dataset']][column_name] = result['values']
for base_dataset_name, data_frame in meta_target_data.items():
data_utility.save_dataset(dataset_name=base_dataset_name, directory=meta_data_dir,
y=data_frame)
print('Meta-targets prepared and saved.')
# Parse command-line arguments and prepare base + meta datasets.
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Retrieves base datasets from OpenML, creates meta-datasets ' +
'and stores all these data.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-b', '--base_data_dir', type=pathlib.Path, default='data/base_datasets/',
help='Directory to store base datasets. Will be created if necessary.')
parser.add_argument('-i', '--data_ids', type=int, default=[], nargs='*',
help='Ids of OpenML datasets. If none provided, will search for datasets.')
parser.add_argument('-m', '--meta_data_dir', type=pathlib.Path, default='data/meta_datasets/',
help='Directory to store meta-datasets. Will be created if necessary.')
parser.add_argument('-p', '--n_processes', type=int, default=None,
help='Number of processes for multi-processing (default: all cores).')
args = parser.parse_args()
prepare_base_datasets(base_data_dir=args.base_data_dir, data_ids=args.data_ids)
prepare_meta_features(base_data_dir=args.base_data_dir, meta_data_dir=args.meta_data_dir,
n_processes=args.n_processes)
prepare_meta_targets(base_data_dir=args.base_data_dir, meta_data_dir=args.meta_data_dir,
n_processes=args.n_processes)
| 7,730
| 0
| 132
|
3f79f678ffc367e4156a50c0372bba0efcd118d0
| 4,749
|
py
|
Python
|
mywebsocket.py
|
malengelajosue/app_flask
|
ea656abb859d8941e9a4761736f2a6ce4b91f7aa
|
[
"MIT"
] | null | null | null |
mywebsocket.py
|
malengelajosue/app_flask
|
ea656abb859d8941e9a4761736f2a6ce4b91f7aa
|
[
"MIT"
] | null | null | null |
mywebsocket.py
|
malengelajosue/app_flask
|
ea656abb859d8941e9a4761736f2a6ce4b91f7aa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import signal
import sys
import tornado.ioloop
import tornado.web
import tornado.websocket
import tornado.wsgi as myapp_wsgi
from datetime import datetime
import time
import ast
import random
from datetime import date
from models.model import Sites
from models.model import Coordonnates
from models.db_connection import Session,engine,Base
from myclasses.gpsaccess import Gpsaccess as Gps
# Javascript Usage:
# var ws = new WebSocket('ws://localhost:8000/ws');
# ws.onopen = function(event){ console.log('socket open'); }
# ws.onclose = function(event){ console.log('socket closed'); }
# ws.onerror = function(error){ console.log('error:', err); }
# ws.onmessage = function(event){ console.log('message:', event.data); }
# # ... wait for connection to open
# ws.send('hello world')
# Simple Websocket echo handler. This could be extended to
# use Redis PubSub to broadcast updates to clients.
application = tornado.web.Application([
(r'/ws', MyAppWebSocket),
(r'/(.*)', tornado.web.FallbackHandler, dict(
fallback=tornado.wsgi.WSGIContainer(myapp_wsgi)
)),
], debug=True)
if __name__ == '__main__':
application.listen(8001)
instance=tornado.ioloop.IOLoop.instance()
instance.start()
signal.signal(signal.SIGINT, signal_handler)
signal.pause()
| 29.314815
| 169
| 0.620131
|
#!/usr/bin/env python
import signal
import sys
import tornado.ioloop
import tornado.web
import tornado.websocket
import tornado.wsgi as myapp_wsgi
from datetime import datetime
import time
import ast
import random
from datetime import date
from models.model import Sites
from models.model import Coordonnates
from models.db_connection import Session,engine,Base
from myclasses.gpsaccess import Gpsaccess as Gps
# Javascript Usage:
# var ws = new WebSocket('ws://localhost:8000/ws');
# ws.onopen = function(event){ console.log('socket open'); }
# ws.onclose = function(event){ console.log('socket closed'); }
# ws.onerror = function(error){ console.log('error:', err); }
# ws.onmessage = function(event){ console.log('message:', event.data); }
# # ... wait for connection to open
# ws.send('hello world')
class MyAppWebSocket(tornado.websocket.WebSocketHandler):
# Simple Websocket echo handler. This could be extended to
# use Redis PubSub to broadcast updates to clients.
def getPosition(self):
self.connected = False
if self.connected == False:
self.gpsDevice = Gps()
self.myCoord = ''
self.connected = True
time.sleep(0.5)
coordonnates = self.gpsDevice.readCoordonates()
self.myCoord = coordonnates
if coordonnates != {}:
self.lat = float(coordonnates['latitude'])
self.long = float(coordonnates['longitude'])
self.alt = coordonnates['altitude']
self.speed = coordonnates['speed']
self.course = coordonnates['course']
self.satellite = coordonnates['satellite']
self.moment = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
coordonnates = {'Lat': self.lat, 'Long': self.long, 'Alt': self.alt, 'Moment': self.moment, 'Sat': self.satellite,'Course': self.course, 'Speed': self.speed}
self.write_message(coordonnates)
if self.persit==True:
self.saveCoordonates()
else:
self.write_message({'status':0})
return coordonnates
def open(self):
self.persit=False
self.mysite = ''
def on_message(self, message):
message=ast.literal_eval(message)
print(message)
coordonates={}
if message.get('action')=='get_position':
coordonates=self.getPosition()
elif message.get('action')=='start_persiste':
print("start persisting....")
self.site_name=str(message.get('site_name'))
self.capture_type=str(message.get('type'))
self.description=str(message.get('description'))
_name=self.site_name
_description=self.description
_type=self.capture_type
mySite=Sites(name=_name,description=_description,type_prelevement=_type)
self.mysite=mySite
self.persit = True
elif message.get('action')=='stop_persiste':
self.persit=False
session=Session()
session.add(self.mysite)
session.commit()
session.close()
elif message.get('action')=='gps_test':
self.getPosition()
print('gps test')
elif message.get('action') == 'multiwii_test':
self.getPosition()
print('Multiwii test')
elif message.get('action') == 'arm_test':
self.getPosition()
print('Arm test')
def run(self):
time.sleep(1)
return
def on_close(self):
try:
print
'connection closed'
except tornado.websocket.WebSocketClosedError:
print('connection fermee de maniere inatendu!')
self.close()
def check_origin(self, origin):
return True
def saveCoordonates(self):
_lat=str(self.lat)
_long=str(self.long)
_alt=str(self.alt)
_moment=datetime.now()
_vitesse=str(self.speed)
_course=str(self.course)
_satellite=str(self.satellite)
coord=Coordonnates(lat=_lat,long=_long,alt=_alt,moment=_moment,speed=_vitesse,course=_course,satellite=_satellite)
self.mysite.coordonnates.append(coord)
application = tornado.web.Application([
(r'/ws', MyAppWebSocket),
(r'/(.*)', tornado.web.FallbackHandler, dict(
fallback=tornado.wsgi.WSGIContainer(myapp_wsgi)
)),
], debug=True)
if __name__ == '__main__':
application.listen(8001)
instance=tornado.ioloop.IOLoop.instance()
instance.start()
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
instance.stop()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.pause()
| 3,139
| 36
| 238
|
ef071130d8e688b2bf7d1480cb9a43266fc55e27
| 4,576
|
py
|
Python
|
compile.py
|
KValexander/compile-java-project
|
62aab5ca9ec53705daa25a21875fc5c97e71db97
|
[
"MIT"
] | null | null | null |
compile.py
|
KValexander/compile-java-project
|
62aab5ca9ec53705daa25a21875fc5c97e71db97
|
[
"MIT"
] | null | null | null |
compile.py
|
KValexander/compile-java-project
|
62aab5ca9ec53705daa25a21875fc5c97e71db97
|
[
"MIT"
] | null | null | null |
import shutil
import os
import re
from tkinter import *
# Storage
concat = ""
assets = []
startclass = ""
# Static config
config = {
"javapath": "java",
"classpath": "class",
"sourcetxt": "source.txt",
"compilebat": "compile.bat",
"startclass": "Main.class",
"runbat": "run.bat",
"copyassets": "true"
}
# Getting configurations from a file
if os.path.exists("compile_config.txt"):
f = open("compile_config.txt", "r")
for line in f:
line = line.replace(" ", "").split("=");
config[line[0]] = line[1].rstrip();
f.close()
# Entries
entries = {
"javapath": "Java dir: ",
"classpath": "Class dir: ",
"sourcetxt": "Source txt: ",
"compilebat": "Compile bat: ",
"startclass": "Start class: ",
"runbat": "Run bat: ",
"copyassets": "Copy assets: "
}
# Setting configurations
# GUI
# Create field
# Concatenating paths to java files
# Getting the path to the starting class
# Copy assets
# File creation
# Start programm
# Call GUI
tkinter_interface()
| 26
| 138
| 0.674825
|
import shutil
import os
import re
from tkinter import *
# Storage
concat = ""
assets = []
startclass = ""
# Static config
config = {
"javapath": "java",
"classpath": "class",
"sourcetxt": "source.txt",
"compilebat": "compile.bat",
"startclass": "Main.class",
"runbat": "run.bat",
"copyassets": "true"
}
# Getting configurations from a file
if os.path.exists("compile_config.txt"):
f = open("compile_config.txt", "r")
for line in f:
line = line.replace(" ", "").split("=");
config[line[0]] = line[1].rstrip();
f.close()
# Entries
entries = {
"javapath": "Java dir: ",
"classpath": "Class dir: ",
"sourcetxt": "Source txt: ",
"compilebat": "Compile bat: ",
"startclass": "Start class: ",
"runbat": "Run bat: ",
"copyassets": "Copy assets: "
}
# Setting configurations
def setting_configurations():
for key, val in entries.items():
if(entries[key].get() != ""): config[key] = entries[key].get()
# Overwrite config file
f = open("compile_config.txt", "w+")
for key, val in config.items():
f.write(key + " = " + val + "\n")
f.close()
# Call start processing
start_processing()
# GUI
def tkinter_interface():
global entries
# Window
window = Tk()
window.title("Java compilation automator")
window.resizable(width=False, height=False)
window.geometry("400x300")
# Labels and Entries
i = 0
for key, val in entries.items():
entries[key] = create_field(window, val, 30, 0, i)
entries[key].insert(0, config[key])
i += 2
# Button
button = Button(window, text="Run", background="#888", foreground="#eee", padx="20", pady="0", font="20", command=setting_configurations)
button.grid(column=2,row=0, padx=20)
# Mainloop
window.mainloop()
# Create field
def create_field(win, text, width, c, r):
label = Label(win, text=text)
label.grid(column=c, row=r, pady=10, padx=10)
txt = Entry(win, width=width)
txt.grid(column=c+1, row=r)
return txt
# Concatenating paths to java files
def java_dir_processing(path):
global concat, assets
ld = os.listdir(path)
for file in ld:
if re.search(r"\.java", file):
concat += "./" + path + "/" + file + "\n"
elif os.path.isdir(path + "/" + file): java_dir_processing(path + "/" + file)
else: assets.append(path + "/" + file)
# Getting the path to the starting class
def class_dir_processing(path):
global startclass
if(not os.path.exists(path)): return False;
ld = os.listdir(path)
for file in ld:
if re.search(config["startclass"], file):
startclass = path + "/" + re.split(r"\.", file)[0]
startclass = re.sub(r"/", ".", startclass.replace(config["classpath"]+"/", ""))
return;
elif os.path.isdir(path + "/" + file): class_dir_processing(path + "/" + file)
# Copy assets
def assets_processing():
global assets
for asset in assets:
topath = re.sub(r"\/[\w\-]*\.\w*", "/", asset.replace(config["javapath"], config["classpath"], 1))
if not os.path.exists(topath):
shutil.copytree(topath.replace(config["classpath"], config["javapath"]),topath)
for filename in os.listdir(topath):
fullpath = topath + filename
if os.path.isfile(fullpath): os.unlink(fullpath)
elif os.path.isdir(fullpath): shutil.rmtree(fullpath)
shutil.copy(asset, topath)
# File creation
def create_file(name, content):
f = open(name, "w+")
f.write(content)
f.close()
# Start programm
def start_processing():
global concat, assets
# Call jdp
java_dir_processing(config["javapath"])
# Create file with paths
create_file(config["sourcetxt"], concat)
concat = ""
# Delete class folder if it exists
if os.path.exists(config["classpath"]): shutil.rmtree(config["classpath"])
# Create file with compilation command
create_file(config["compilebat"], "javac -d " + config["classpath"] + " @" + config["sourcetxt"] + "\n")
# Compilation activation
os.system(config["compilebat"])
# Removing intermediate files
os.remove(config["compilebat"])
os.remove(config["sourcetxt"])
# Checking for compilation success
# and getting the path to the starting class
if(class_dir_processing(config["classpath"]) == False):
return print("\nJCA message: Compilation error")
if(not startclass):
return print("\nJCA message: Startup error")
else:
print("JCA message: Compilation is successful")
# Call ap
if(config["copyassets"] == "true"): assets_processing()
assets.clear()
# Creating an interpretation file
create_file(config["runbat"], "java -classpath ./" + config["classpath"] + " " + startclass)
# Running the code
os.system(config["runbat"])
# Removing intermediate files
os.remove(config["runbat"])
# Call GUI
tkinter_interface()
| 3,423
| 0
| 176
|
6d79c61a4cd03cad002390bea3fef1d83f0bef83
| 601
|
py
|
Python
|
multiprocessing_module/multiprocessing_test2.py
|
kenwaldek/python
|
e6aaf5616a456a4fb91889c0617bd6511f1a223e
|
[
"MIT"
] | 1
|
2019-02-24T09:57:16.000Z
|
2019-02-24T09:57:16.000Z
|
multiprocessing_module/multiprocessing_test2.py
|
kenwaldek/python
|
e6aaf5616a456a4fb91889c0617bd6511f1a223e
|
[
"MIT"
] | null | null | null |
multiprocessing_module/multiprocessing_test2.py
|
kenwaldek/python
|
e6aaf5616a456a4fb91889c0617bd6511f1a223e
|
[
"MIT"
] | 4
|
2017-05-21T15:34:53.000Z
|
2018-09-25T06:56:15.000Z
|
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
###############################################################
# © kenwaldek MIT-license
#
# Title: multiprocessing 2 Version: 1.0
# Date: 30-12-16 Language: python3
# Description: multiprocessing dus met meerdere cores te samen
#
###############################################################
from multiprocessing import Pool
if __name__ == '__main__':
p = Pool(processes=20)
data = p.map(job, range(20))
p.close()
print(data)
| 25.041667
| 63
| 0.44426
|
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
###############################################################
# © kenwaldek MIT-license
#
# Title: multiprocessing 2 Version: 1.0
# Date: 30-12-16 Language: python3
# Description: multiprocessing dus met meerdere cores te samen
#
###############################################################
from multiprocessing import Pool
def job(num):
return num * 2
if __name__ == '__main__':
p = Pool(processes=20)
data = p.map(job, range(20))
p.close()
print(data)
| 11
| 0
| 23
|
d0f18eb34b3ac7f1fbda4ccdac297a8ef889417b
| 1,088
|
py
|
Python
|
leetcode-python/num002.py
|
shuaizi/leetcode
|
c943410575f380a00335bf5ac8d361af53a92d78
|
[
"Apache-2.0"
] | null | null | null |
leetcode-python/num002.py
|
shuaizi/leetcode
|
c943410575f380a00335bf5ac8d361af53a92d78
|
[
"Apache-2.0"
] | null | null | null |
leetcode-python/num002.py
|
shuaizi/leetcode
|
c943410575f380a00335bf5ac8d361af53a92d78
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'shuai'
l1 = ListNode(2)
l1.next = ListNode(4)
l1.next.next = ListNode(3)
l2 = ListNode(5)
l2.next = ListNode(6)
l2.next.next = ListNode(4)
sol = Solution()
sol.addTwoNumbers(l1, l2)
| 23.148936
| 45
| 0.465074
|
__author__ = 'shuai'
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
ret = ListNode(0)
tmp = 0
tmpNode = ret
while l1 or l2:
if not l1:
sum = l2.val
l2 = l2.next
elif not l2:
sum = l1.val
l1 = l1.next
else:
sum = l1.val + l2.val
l1 = l1.next
l2 = l2.next
tmpN = ListNode((sum + tmp) % 10)
tmp = (sum + tmp) / 10
tmpNode.next = tmpN
tmpNode = tmpNode.next
if tmp != 0:
tmpN = ListNode(tmp)
tmpNode.next = tmpN
return ret.next
l1 = ListNode(2)
l1.next = ListNode(4)
l1.next.next = ListNode(3)
l2 = ListNode(5)
l2.next = ListNode(6)
l2.next.next = ListNode(4)
sol = Solution()
sol.addTwoNumbers(l1, l2)
| 49
| 768
| 73
|
b5b7dbcb1ba016b3352ed24c0000b521b5f7e7a1
| 116
|
py
|
Python
|
some/path/to/app/Class3.py
|
CameronHudson8/python-implicit-namespace-packages
|
80dc55ab31ef43d4cbf57ce68ee3c89a14cd534d
|
[
"Apache-2.0"
] | null | null | null |
some/path/to/app/Class3.py
|
CameronHudson8/python-implicit-namespace-packages
|
80dc55ab31ef43d4cbf57ce68ee3c89a14cd534d
|
[
"Apache-2.0"
] | null | null | null |
some/path/to/app/Class3.py
|
CameronHudson8/python-implicit-namespace-packages
|
80dc55ab31ef43d4cbf57ce68ee3c89a14cd534d
|
[
"Apache-2.0"
] | null | null | null |
from module1.Class1 import Class1
| 14.5
| 40
| 0.672414
|
from module1.Class1 import Class1
class Class3:
def __init__(self):
self.helper = Class1('hola-bot')
| 39
| -8
| 50
|