hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bc3c1b4b67c6a161b94ae23b58760e641ad020db
| 914
|
py
|
Python
|
colorspace/__init__.py
|
atomicoo/EnhanceIMG
|
8c009fbb6c5461ff6d7f30bdacec72232639c7f2
|
[
"MIT"
] | 35
|
2021-04-20T21:14:25.000Z
|
2022-03-31T08:27:35.000Z
|
colorspace/__init__.py
|
Real798/EnhanceIMG
|
8c009fbb6c5461ff6d7f30bdacec72232639c7f2
|
[
"MIT"
] | 2
|
2021-05-13T05:34:59.000Z
|
2021-09-23T09:07:32.000Z
|
colorspace/__init__.py
|
Real798/EnhanceIMG
|
8c009fbb6c5461ff6d7f30bdacec72232639c7f2
|
[
"MIT"
] | 7
|
2021-05-10T12:08:42.000Z
|
2022-02-24T10:06:05.000Z
|
import cv2
def bgr2rgb(img_bgr):
return cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
def rgb2bgr(img_rgb):
return cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)
def bgr2gray(img_bgr):
return cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
def bgr2hsv(img_bgr):
return cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)
def hsv2bgr(img_hsv):
return cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR)
def bgr2hls(img_bgr):
return cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HLS)
def hls2bgr(img_hls):
return cv2.cvtColor(img_hls, cv2.COLOR_HLS2BGR)
def bgr2lab(img_bgr):
return cv2.cvtColor(img_bgr, cv2.COLOR_BGR2LAB)
def lab2bgr(img_lab):
return cv2.cvtColor(img_lab, cv2.COLOR_LAB2BGR)
def bgr2luv(img_bgr):
return cv2.cvtColor(img_bgr, cv2.COLOR_BGR2LUV)
def luv2bgr(img_luv):
return cv2.cvtColor(img_luv, cv2.COLOR_LUV2BGR)
def bgr2yuv(img_bgr):
return cv2.cvtColor(img_bgr, cv2.COLOR_BGR2YUV)
| 23.435897
| 52
| 0.760394
|
20dcadf14d7e729f3bf49414a96c1bfd37338759
| 5,160
|
py
|
Python
|
mangasheet.py
|
MizunagiKB/mangasheet
|
acc747eb6e84d32d775caede3bd4d1e4dda7ef80
|
[
"MIT"
] | null | null | null |
mangasheet.py
|
MizunagiKB/mangasheet
|
acc747eb6e84d32d775caede3bd4d1e4dda7ef80
|
[
"MIT"
] | null | null | null |
mangasheet.py
|
MizunagiKB/mangasheet
|
acc747eb6e84d32d775caede3bd4d1e4dda7ef80
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# ------------------------------------------------------------------ import(s)
import sys
import math
import argparse
import PIL.Image
import PIL.ImageDraw
import PIL.ImageFont
# ------------------------------------------------------------------- param(s)
COLOR_WHITE = (255, 255, 255)
COLOR_LBLUE = ( 63, 63, 255)
DPI = 300
# A4 210 x 297
# B5 182 x 257
# B5 baseframe 150 x 220
# ------------------------------------------------------------------- class(s)
# ---------------------------------------------------------------- function(s)
def mm_to_px(w_mm, h_mm, DPI=DPI):
w_px = float(w_mm * DPI) / 25.4
w_px = int(math.floor(w_px + 1)) & ~1
h_px = float(h_mm * DPI) / 25.4
h_px = int(math.floor(h_px + 1)) & ~1
return w_px, h_px
def draw_center_rect(o_image, w_mm, h_mm, dpi=DPI, width=1, fill=COLOR_WHITE):
paper_w, paper_h = o_image.size
w, h = mm_to_px(w_mm, h_mm, dpi)
o_draw = PIL.ImageDraw.Draw(o_image)
if width == 1:
range_st = 0
range_sz = 1
else:
range_st = -1
range_sz = 2
for width_y in range(range_st, range_sz):
for width_x in range(range_st, range_sz):
x = ( paper_w - w) >> 1
x += width_x
y = ( paper_h - h) >> 1
y += width_y
o_draw.rectangle(
((x, y),
(x + w, y + h)),
outline=COLOR_LBLUE,
fill=fill
)
def gen_comicbase(dpi, margin, comment="", Jmarks=False):
A4_paper_w, A4_paper_h = mm_to_px(210.0, 297.0, dpi)
B5_paper_w, B5_paper_h = mm_to_px(182.0, 257.0, dpi)
B5margin_w, B5margin_h = mm_to_px((182.0 - 2.0), (257.0 + 2.0) + (margin << 1), dpi)
Jmarks_w, Jmarks_h = mm_to_px(182 + (margin << 1), 257 + (margin << 1), dpi)
szMargin, sz10 = mm_to_px(3.0, 10.0, dpi)
o_image = PIL.Image.new("RGB", (A4_paper_w, A4_paper_h), COLOR_WHITE)
if Jmarks is True:
draw_center_rect(o_image, 999, 257, dpi, 1)
draw_center_rect(o_image, 182, 999, dpi, 1)
o_draw = PIL.ImageDraw.Draw(o_image)
# Jmark LR
o_draw.line(
(
((A4_paper_w - Jmarks_w) >> 1) - szMargin, (A4_paper_h >> 1) - sz10,
((A4_paper_w - Jmarks_w) >> 1) - szMargin, (A4_paper_h >> 1) + sz10,
),
fill=COLOR_LBLUE, width=1
)
o_draw.line(
(
(A4_paper_w - ((A4_paper_w - Jmarks_w) >> 1)) + szMargin, (A4_paper_h >> 1) - sz10,
(A4_paper_w - ((A4_paper_w - Jmarks_w) >> 1)) + szMargin, (A4_paper_h >> 1) + sz10,
),
fill=COLOR_LBLUE, width=1
)
# Jmark TB
o_draw.line(
(
(A4_paper_w >> 1) - sz10, ((A4_paper_h - Jmarks_h) >> 1) - szMargin,
(A4_paper_w >> 1) + sz10, ((A4_paper_h - Jmarks_h) >> 1) - szMargin,
),
fill=COLOR_LBLUE, width=1
)
o_draw.line(
(
(A4_paper_w >> 1) - sz10, (A4_paper_h - ((A4_paper_h - Jmarks_h) >> 1)) + szMargin,
(A4_paper_w >> 1) + sz10, (A4_paper_h - ((A4_paper_h - Jmarks_h) >> 1)) + szMargin,
),
fill=COLOR_LBLUE, width=1
)
o_draw.line(
(
A4_paper_w >> 1, 0, A4_paper_w >> 1, A4_paper_h
),
fill=COLOR_LBLUE, width=1
)
o_draw.line(
(
0, A4_paper_h >> 1, A4_paper_w, A4_paper_h >> 1
),
fill=COLOR_LBLUE, width=1
)
draw_center_rect(
o_image,
182 + (margin << 1),
257 + (margin << 1),
dpi, 1
)
draw_center_rect(
o_image,
182 + (margin << 1),
257 + (margin << 1),
dpi, 3, None
)
draw_center_rect(o_image, 182, 257, dpi)
draw_center_rect(o_image, 150, 220, dpi)
if len(comment.strip()) > 0:
fontsize = int((dpi / 300.0) * 24)
render_text = comment.strip()[0:200]
fnt = PIL.ImageFont.truetype("res/NotoSansCJKjp-Regular.otf", fontsize)
txt_x = ((A4_paper_w - B5margin_w) >> 1)
txt_y = B5margin_h + ((A4_paper_h - B5margin_h) >> 1)
o_draw = PIL.ImageDraw.Draw(o_image)
o_draw.text(
(txt_x, txt_y),
render_text, font=fnt, fill=COLOR_LBLUE)
return o_image
def main():
parser = argparse.ArgumentParser(description="Comic base generator")
parser.add_argument(
"-o", "--out", type=str, required=True,
help="Output filename")
parser.add_argument(
"-m", "--margin", type=int, required=False, default=3,
help="Margin")
parser.add_argument(
"-d", "--dpi", type=int, required=False, default=DPI,
help="Image resolution")
o_argv = parser.parse_args()
o_image = gen_comicbase(o_argv.dpi, o_argv.margin)
o_image.save(o_argv.out)
if __name__ == "__main__":
main()
# [EOF]
| 28.196721
| 99
| 0.486434
|
3a7623d7e0055f0adc198c936fa5d8640bd35ec3
| 2,432
|
py
|
Python
|
manage.py
|
kevinmcalear/lego
|
b1f92159f8d0b8ec7ee54cf2f749066efc4d0615
|
[
"BSD-3-Clause"
] | null | null | null |
manage.py
|
kevinmcalear/lego
|
b1f92159f8d0b8ec7ee54cf2f749066efc4d0615
|
[
"BSD-3-Clause"
] | null | null | null |
manage.py
|
kevinmcalear/lego
|
b1f92159f8d0b8ec7ee54cf2f749066efc4d0615
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Management script."""
import os
from glob import glob
from subprocess import call
from flask_migrate import MigrateCommand
from flask_script import Command, Manager, Option, Server, Shell
from flask_script.commands import Clean, ShowUrls
from lego_me.app import create_app
from lego_me.database import db
from lego_me.settings import DevConfig, ProdConfig
from lego_me.user.models import User
CONFIG = ProdConfig if os.environ.get('LEGO_ME_ENV') == 'prod' else DevConfig
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
app = create_app(CONFIG)
manager = Manager(app)
def _make_context():
"""Return context dict for a shell session so you can access app, db, and the User model by default."""
return {'app': app, 'db': db, 'User': User}
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
class Lint(Command):
"""Lint and check code style with flake8 and isort."""
def get_options(self):
"""Command line options."""
return (
Option('-f', '--fix-imports', action='store_true', dest='fix_imports', default=False,
help='Fix imports using isort, before linting'),
)
def run(self, fix_imports):
"""Run command."""
skip = ['requirements']
root_files = glob('*.py')
root_directories = [name for name in next(os.walk('.'))[1] if not name.startswith('.')]
files_and_directories = [arg for arg in root_files + root_directories if arg not in skip]
def execute_tool(description, *args):
"""Execute a checking tool with its arguments."""
command_line = list(args) + files_and_directories
print('{}: {}'.format(description, ' '.join(command_line)))
rv = call(command_line)
if rv is not 0:
exit(rv)
if fix_imports:
execute_tool('Fixing import order', 'isort', '-rc')
execute_tool('Checking code style', 'flake8')
manager.add_command('server', Server())
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
manager.add_command('urls', ShowUrls())
manager.add_command('clean', Clean())
manager.add_command('lint', Lint())
if __name__ == '__main__':
manager.run()
| 31.584416
| 107
| 0.660362
|
8c57107f4d14ef5f170bfeb1e539420d882a5c56
| 202
|
py
|
Python
|
spitz/spiders/snap_fit.py
|
zezaeoh/spitz
|
4420af2bc26a705ef92cc05e14f6db759e217a13
|
[
"MIT"
] | 1
|
2019-12-16T16:31:45.000Z
|
2019-12-16T16:31:45.000Z
|
spitz/spiders/snap_fit.py
|
zezaeoh/spitz
|
4420af2bc26a705ef92cc05e14f6db759e217a13
|
[
"MIT"
] | 2
|
2021-03-31T19:29:57.000Z
|
2021-12-13T20:27:03.000Z
|
spitz/spiders/snap_fit.py
|
zezaeoh/spitz
|
4420af2bc26a705ef92cc05e14f6db759e217a13
|
[
"MIT"
] | null | null | null |
import scrapy
class SnapFitSpider(scrapy.Spider):
name = 'snap_fit'
allowed_domains = ['66girls.co.kr']
start_urls = ['http://66girls.co.kr/']
def parse(self, response):
pass
| 18.363636
| 42
| 0.638614
|
f4dd51193d41ee00b9cdd00a1f106e6eb46346cf
| 8,729
|
py
|
Python
|
blender/2.79/scripts/addons/add_mesh_extra_objects/geodesic_domes/forms_271.py
|
uzairakbar/bpy2.79
|
3a3e0004ac6783c4e4b89d939e4432de99026a85
|
[
"MIT"
] | 3
|
2019-09-16T10:29:19.000Z
|
2022-02-11T14:43:18.000Z
|
engine/2.80/scripts/addons/add_mesh_geodesic_domes/forms_271.py
|
byteinc/Phasor
|
f7d23a489c2b4bcc3c1961ac955926484ff8b8d9
|
[
"Unlicense"
] | null | null | null |
engine/2.80/scripts/addons/add_mesh_geodesic_domes/forms_271.py
|
byteinc/Phasor
|
f7d23a489c2b4bcc3c1961ac955926484ff8b8d9
|
[
"Unlicense"
] | 4
|
2020-02-19T20:02:26.000Z
|
2022-02-11T18:47:56.000Z
|
from math import sin, cos, sqrt
from .vefm_271 import *
class form(mesh):
def __init__(self, uresolution, vresolution, uscale, vscale, upart,
vpart, uphase, vphase, utwist, vtwist, xscale, yscale, sform):
mesh.__init__(self)
self.PKHG_parameters = [uresolution, vresolution, uscale, vscale, upart,
vpart, uphase, vphase, utwist, vtwist, xscale, yscale, sform]
self.ures = uresolution
self.vres = vresolution
self.uscale = uscale
self.vscale = vscale
self.upart = upart
self.vpart = vpart
self.uphase = uphase * self.a360
self.vphase = vphase * self.a360
self.utwist = utwist
self.vtwist = vtwist
self.xscale = xscale
self.yscale = yscale
self.sform = sform
if self.upart != 1.0: # there is a gap in the major radius
self.uflag = 1
else:
self.uflag = 0
if self.vpart != 1.0: # there is a gap in the minor radius
self.vflag = 1
else:
self.vflag = 0
if self.uflag:
self.ufinish = self.ures + 1
else:
self.ufinish = self.ures
if self.vflag:
self.vfinish = self.vres + 1
else:
self.vfinish = self.vres
self.ustep = (self.a360 / self.ures) * self.upart
self.vstep = (self.a360 / self.vres) * self.vpart
if self.xscale != 1.0:
self.xscaleflag = 1
else:
self.xscaleflag = 0
if self.yscale != 1.0:
self.yscaleflag = 1
else:
self.yscaleflag = 0
self.rowlist = []
def generatepoints(self):
for i in range(self.ufinish):
row = []
for j in range(self.vfinish):
u = self.ustep * i + self.uphase
v = self.vstep * j + self.vphase
if self.sform[12]:
r1 = self.superform(self.sform[0], self.sform[1], self.sform[2],
self.sform[3], self.sform[14] + u, self.sform[4],
self.sform[5], self.sform[16] * v)
else:
r1 = 1.0
if self.sform[13]:
r2 = self.superform(self.sform[6], self.sform[7], self.sform[8],
self.sform[9], self.sform[15] + v, self.sform[10],
self.sform[11], self.sform[17] * v)
else:
r2 = 1.0
x, y, z = self.formula(u, v, r1, r2)
point = vertex((x, y, z))
row.append(point)
self.verts.append(point)
self.rowlist.append(row)
if self.vflag:
pass
else:
for i in range(len(self.rowlist)):
self.rowlist[i].append(self.rowlist[i][0])
if self.uflag:
pass
else:
self.rowlist.append(self.rowlist[0])
def generatefaces(self):
ufin = len(self.rowlist) - 1
vfin = len(self.rowlist[0]) - 1
for i in range(ufin):
for j in range(vfin):
top = i
bottom = i + 1
left = j
right = j + 1
a = self.rowlist[top][left]
b = self.rowlist[top][right]
c = self.rowlist[bottom][right]
d = self.rowlist[bottom][left]
face1 = face([a, b, c, d])
self.faces.append(face1)
edge1 = edge(a, b)
edge2 = edge(a, d)
self.edges.append(edge1)
self.edges.append(edge2)
if i + 1 == ufin:
edge3 = edge(d, c)
self.edges.append(edge3)
if j + 1 == vfin:
edge4 = edge(b, c)
self.edges.append(edge4)
class grid(form):
def __init__(self, uresolution, vresolution, uscale, vscale, upart, vpart,
uphase, vphase, utwist, vtwist, xscale, yscale, sform):
form.__init__(self, uresolution, vresolution, uscale, vscale, upart, vpart,
uphase, vphase, utwist, vtwist, xscale, yscale, sform)
unit = 1.0 / self.a360
if self.ures == 1:
print("\n***ERRORin forms_271.grid L126***, ures is 1, changed into 2\n\n")
self.ures = 2
if self.vres == 1:
print("\n***ERROR in grid forms_271.grid L129***, vres is 1, changed into 2\n\n")
self.vres = 2
self.ustep = self.a360 / (self.ures - 1)
self.vstep = self.a360 / (self.vres - 1)
self.uflag = 1
self.vflag = 1
self.xscaleflag = 0
self.yscaleflag = 0
self.uexpand = unit * self.uscale
self.vexpand = unit * self.vscale
self.ushift = self.uscale * 0.5
self.vshift = self.vscale * 0.5
self.generatepoints()
self.generatefaces()
for i in range(len(self.verts)):
self.verts[i].index = i
self.connectivity()
def formula(self, u, v, r1, r2):
x = u * self.uexpand - self.ushift
y = v * self.vexpand - self.vshift
z = r1 * r2 - 1.0
return x, y, z
class cylinder(form):
def __init__(self, uresolution, vresolution, uscale, vscale, upart, vpart,
uphase, vphase, utwist, vtwist, xscale, yscale, sform):
form.__init__(self, uresolution, vresolution, uscale, vscale, upart, vpart,
uphase, vphase, utwist, vtwist, xscale, yscale, sform)
unit = 1.0 / self.a360
self.vshift = self.vscale * 0.5
self.vexpand = unit * self.vscale
self.vflag = 1
self.generatepoints()
self.generatefaces()
for i in range(len(self.verts)):
self.verts[i].index = i
self.connectivity()
def formula(self, u, v, r1, r2):
x = sin(u) * self.uscale * r1 * r2 * self.xscale
y = cos(u) * self.uscale * r1 * r2
z = v * self.vexpand - self.vshift
return x, y, z
class parabola(form):
def __init__(self, uresolution, vresolution, uscale, vscale, upart, vpart,
uphase, vphase, utwist, vtwist, xscale, yscale, sform):
form.__init__(self, uresolution, vresolution, uscale, vscale, upart, vpart,
uphase, vphase, utwist, vtwist, xscale, yscale, sform)
unit = 1.0 / self.a360
self.vshift = self.vscale * 0.5
self.vexpand = unit * self.vscale
self.vflag = 1
self.generatepoints()
self.generatefaces()
for i in range(len(self.verts)):
self.verts[i].index = i
self.connectivity()
def formula(self, u, v, r1, r2):
factor = sqrt(v) + 0.001
x = sin(u) * factor * self.uscale * r1 * r2 * self.xscale
y = cos(u) * factor * self.uscale * r1 * r2
z = - v * self.vexpand + self.vshift
return x, y, z
class torus(form):
def __init__(self, uresolution, vresolution, uscale, vscale, upart, vpart,
uphase, vphase, utwist, vtwist, xscale, yscale, sform):
form.__init__(self, uresolution, vresolution, uscale, vscale, upart, vpart,
uphase, vphase, utwist, vtwist, xscale, yscale, sform)
self.generatepoints()
self.generatefaces()
for i in range(len(self.verts)):
self.verts[i].index = i
self.connectivity()
def formula(self, u, v, r1, r2):
z = sin(v) * self.uscale * r2 * self.yscale
y = (self.vscale + self.uscale * cos(v)) * cos(u) * r1 * r2
x = (self.vscale + self.uscale * cos(v)) * sin(u) * r1 * r2 * self.xscale
return x, y, z
class sphere(form):
def __init__(self, uresolution, vresolution, uscale, vscale, upart, vpart,
uphase, vphase, utwist, vtwist, xscale, yscale, sform):
form.__init__(self, uresolution, vresolution, uscale, vscale, upart, vpart,
uphase, vphase, utwist, vtwist, xscale, yscale, sform)
self.vstep = (self.a360 / (self.vres - 1)) * self.vpart
self.vflag = 1
self.generatepoints()
self.generatefaces()
for i in range(len(self.verts)):
self.verts[i].index = i
self.connectivity()
def formula(self, u, v, r1, r2):
v = (v * 0.5) - (self.a360 * 0.25)
x = r1 * cos(u) * r2 * cos(v) * self.uscale * self.xscale
y = r1 * sin(u) * r2 * cos(v) * self.uscale
z = r2 * sin(v) * self.uscale * self.yscale
return x, y, z
| 36.523013
| 93
| 0.513805
|
627cb3b39d546badb4943297cdc34a8129f96823
| 35
|
py
|
Python
|
Calculator/squared.py
|
zc256/PythonCalculator
|
3267a0809dbc28e437337c68f93a9205c0532563
|
[
"MIT"
] | null | null | null |
Calculator/squared.py
|
zc256/PythonCalculator
|
3267a0809dbc28e437337c68f93a9205c0532563
|
[
"MIT"
] | null | null | null |
Calculator/squared.py
|
zc256/PythonCalculator
|
3267a0809dbc28e437337c68f93a9205c0532563
|
[
"MIT"
] | null | null | null |
def squared(a):
return float(a)**2
| 17.5
| 19
| 0.685714
|
93e928336e1ad6c687d3ad3a2bc5c378d547edf5
| 5,433
|
py
|
Python
|
streamlit/src/pages/user_statistics.py
|
likweitan/final_year_project
|
a86059cad92efe4edd85364d21b4ee6a56234b30
|
[
"MIT"
] | null | null | null |
streamlit/src/pages/user_statistics.py
|
likweitan/final_year_project
|
a86059cad92efe4edd85364d21b4ee6a56234b30
|
[
"MIT"
] | null | null | null |
streamlit/src/pages/user_statistics.py
|
likweitan/final_year_project
|
a86059cad92efe4edd85364d21b4ee6a56234b30
|
[
"MIT"
] | 1
|
2021-07-10T15:54:54.000Z
|
2021-07-10T15:54:54.000Z
|
import streamlit as st
import altair as alt
import pandas as pd
import numpy as np
import random
def load(data):
st.title('🎲 Student Statistics')
merge_df = merge_all(data[0], data[1], data[2])
users = find_user(merge_df)
st.sidebar.header('User')
user = st.sidebar.selectbox('Please select an user', ['Student A','Student B','Student C','Student D','Student E'])
switcher = {
'Student A': "Kpq2q+eKw/O+6/jLs3XJosgmI7weEJxJZdnkKTbbF8I=",
'Student B': "0+VU/Zb0Q96uoByuRhl7r9bJuJO6CKWpsmNMEuijSzc=",
'Student C': "g8DnYvIqpolw10XlwWeIWv6NbDPByUbmgH8EshJqBns=",
'Student D': "kSyUTFlepsYUD723IPL/jEZ520xaKbscrBmNtBUFR1o=",
'Student E': "XMFbFA7C49+LRhUddhelfPpA6F5dbOoxeyL3eYbuTlY="
}
user_id = switcher.get(user,"Invalid")
subjects = find_subject(user_id, merge_df)
test_subjects = []
for x in range(len(subjects)):
test_subjects.append('Course ' + str(x+1))
st.sidebar.header('Course')
code = st.sidebar.selectbox('Please select a course', test_subjects)
id = int(code[-1])
subject_id = subjects[id]
topics = find_topic(user_id, subject_id, merge_df)
st.sidebar.header('Topics')
topic_id = st.sidebar.selectbox('Please select a topic', topics)
contents = find_content(user_id, subject_id, topic_id, merge_df)
test_contents = []
for x in range(len(contents)):
test_contents.append('Exercise ' + str(x+1))
st.sidebar.header('Exercise')
code = st.sidebar.selectbox('Please select a exercise', test_contents)
id = int(code[-1])
content_id = contents[id]
# with st.spinner('Plotting...'):
# plot_difficulty(user, data[0], data[1], data[2])
plot_gender(user_id, content_id, data[0], data[1], data[2])
#st.write(average_score(user, merge_df))
@st.cache(show_spinner=False)
def merge_all(info_content_df: pd.DataFrame, info_userdata_df: pd.DataFrame, log_problem_df: pd.DataFrame):
merge_df = log_problem_df.merge(info_userdata_df, how='left', on='uuid')
merge_df = merge_df.merge(info_content_df, how='left', on='ucid')
return merge_df
@st.cache(show_spinner=False)
def find_user(merge_df: pd.DataFrame):
users = merge_df.uuid.head(5).values
return users
@st.cache(show_spinner=False)
def find_subject(user, merge_df: pd.DataFrame):
subjects = merge_df.level3_id[merge_df['uuid'] == user].drop_duplicates().values
return subjects
@st.cache(show_spinner=False)
def find_topic(user, subject, merge_df: pd.DataFrame):
topics = merge_df.level4_id[(merge_df['uuid'] == user) & (merge_df['level3_id'] == subject)].drop_duplicates().values
return topics
@st.cache(show_spinner=False)
def find_content(user, subject, topic, merge_df: pd.DataFrame):
contents = merge_df.ucid[(merge_df['uuid'] == user) & (merge_df['level3_id'] == subject) & (merge_df['level4_id'] == topic)].drop_duplicates().values
return contents
def highlight_correct(s):
'''
highlight the maximum in a Series yellow.
'''
is_max = s == 1
return ['background-color: green' if v else '' for v in is_max]
def highlight_level(s):
'''
highlight the maximum in a Series yellow.
'''
is_max = s == 4
return ['background-color: yellow' if v else '' for v in is_max]
def plot_gender(user, content, info_content_df: pd.DataFrame, info_userdata_df: pd.DataFrame, log_problem_df: pd.DataFrame):
st.header("**♟** Learning Path **♟**")
st.write('From the chart below, we could see that the number of users attempt the question is getting lower.')
# Lets randomly pick a user and an exercise and observe the learning process!
learning_path = log_problem_df[(
log_problem_df['uuid'] == user) & (
log_problem_df['ucid'] == content)]
learning_path = learning_path[[
'timestamp_TW', 'problem_number', 'total_sec_taken', 'total_attempt_cnt', 'used_hint_cnt', 'is_correct', 'level']].reset_index().copy()
scatter_plot = alt.Chart(learning_path).mark_circle(size=60).encode(
x='problem_number',
y='total_sec_taken',
color='is_correct',
tooltip=['problem_number', 'total_sec_taken',
'used_hint_cnt', 'is_correct']
)
st.altair_chart(scatter_plot, use_container_width=True)
#st.write(learning_path.sort_values(
# 'problem_number').style.apply(highlight_correct, subset=['is_correct']).apply(highlight_level, subset=['level']))
total_rows = learning_path.shape[0]
st.write('The user has tried ' + str(total_rows) +
' times for this course.')
def plot_difficulty(user, info_content_df: pd.DataFrame, info_userdata_df: pd.DataFrame, log_problem_df: pd.DataFrame):
merge_df = log_problem_df.merge(info_userdata_df, how='left', on='uuid')
merge_df = merge_df.merge(info_content_df, how='left', on='ucid')
group_difficulty = merge_df[merge_df['uuid'] ==
user].drop_duplicates(subset=['uuid', 'ucid'])
st.bar_chart(group_difficulty['difficulty'].value_counts())
st.write(group_difficulty)
def average_score(user, merge_df: pd.DataFrame):
x = pd.pivot_table(merge_df[merge_df['uuid'] == user], index=['ucid'],
columns=['is_correct'], aggfunc=np.mean)
return x
# def plot_total_sec_taken(user, info_content_df: pd.DataFrame, info_userdata_df: pd.DataFrame, log_problem_df: pd.DataFrame):
| 35.509804
| 153
| 0.684336
|
528fae4391563c0a920f260d5099c37962493faf
| 14,785
|
py
|
Python
|
batch/batch/driver/canceller.py
|
mattsolo1/hail
|
ccec08b3dfcf5da83fa09a4576dd1f5ee4e17b6f
|
[
"MIT"
] | null | null | null |
batch/batch/driver/canceller.py
|
mattsolo1/hail
|
ccec08b3dfcf5da83fa09a4576dd1f5ee4e17b6f
|
[
"MIT"
] | null | null | null |
batch/batch/driver/canceller.py
|
mattsolo1/hail
|
ccec08b3dfcf5da83fa09a4576dd1f5ee4e17b6f
|
[
"MIT"
] | null | null | null |
import logging
import asyncio
from hailtop.utils import (
WaitableSharedPool,
retry_long_running,
run_if_changed,
AsyncWorkerPool,
time_msecs,
periodically_call,
)
from hailtop import aiotools
from gear import Database
from .job import unschedule_job, mark_job_complete
from .instance_collection import InstanceCollectionManager
from ..utils import Box
log = logging.getLogger('canceller')
class Canceller:
@staticmethod
async def create(app):
c = Canceller(app)
c.task_manager.ensure_future(
retry_long_running(
'cancel_cancelled_ready_jobs_loop',
run_if_changed,
c.cancel_ready_state_changed,
c.cancel_cancelled_ready_jobs_loop_body,
)
)
c.task_manager.ensure_future(
retry_long_running(
'cancel_cancelled_creating_jobs_loop',
run_if_changed,
c.cancel_creating_state_changed,
c.cancel_cancelled_creating_jobs_loop_body,
)
)
c.task_manager.ensure_future(
retry_long_running(
'cancel_cancelled_running_jobs_loop',
run_if_changed,
c.cancel_running_state_changed,
c.cancel_cancelled_running_jobs_loop_body,
)
)
c.task_manager.ensure_future(periodically_call(60, c.cancel_orphaned_attempts_loop_body))
return c
def __init__(self, app):
self.app = app
self.cancel_ready_state_changed: asyncio.Event = app['cancel_ready_state_changed']
self.cancel_creating_state_changed: asyncio.Event = app['cancel_creating_state_changed']
self.cancel_running_state_changed: asyncio.Event = app['cancel_running_state_changed']
self.db: Database = app['db']
self.async_worker_pool: AsyncWorkerPool = self.app['async_worker_pool']
self.inst_coll_manager: InstanceCollectionManager = app['driver'].inst_coll_manager
self.task_manager = aiotools.BackgroundTaskManager()
def shutdown(self):
try:
self.task_manager.shutdown()
finally:
self.async_worker_pool.shutdown()
async def cancel_cancelled_ready_jobs_loop_body(self):
records = self.db.select_and_fetchall(
'''
SELECT user, CAST(COALESCE(SUM(n_cancelled_ready_jobs), 0) AS SIGNED) AS n_cancelled_ready_jobs
FROM user_inst_coll_resources
GROUP BY user
HAVING n_cancelled_ready_jobs > 0;
''',
timer_description='in cancel_cancelled_ready_jobs: aggregate n_cancelled_ready_jobs',
)
user_n_cancelled_ready_jobs = {record['user']: record['n_cancelled_ready_jobs'] async for record in records}
total = sum(user_n_cancelled_ready_jobs.values())
if total == 0:
should_wait = True
return should_wait
user_share = {
user: max(int(300 * user_n_jobs / total + 0.5), 20)
for user, user_n_jobs in user_n_cancelled_ready_jobs.items()
}
async def user_cancelled_ready_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT batches.id, batches_cancelled.id IS NOT NULL AS cancelled
FROM batches
LEFT JOIN batches_cancelled
ON batches.id = batches_cancelled.id
WHERE user = %s AND `state` = 'running';
''',
(user,),
timer_description=f'in cancel_cancelled_ready_jobs: get {user} running batches',
):
if batch['cancelled']:
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 0
LIMIT %s;
''',
(batch['id'], remaining.value),
timer_description=f'in cancel_cancelled_ready_jobs: get {user} batch {batch["id"]} ready cancelled jobs (1)',
):
record['batch_id'] = batch['id']
yield record
else:
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 0 AND cancelled = 1
LIMIT %s;
''',
(batch['id'], remaining.value),
timer_description=f'in cancel_cancelled_ready_jobs: get {user} batch {batch["id"]} ready cancelled jobs (2)',
):
record['batch_id'] = batch['id']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, share in user_share.items():
remaining = Box(share)
async for record in user_cancelled_ready_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
id = (batch_id, job_id)
log.info(f'cancelling job {id}')
async def cancel_with_error_handling(app, batch_id, job_id, id):
try:
resources = []
await mark_job_complete(
app, batch_id, job_id, None, None, 'Cancelled', None, None, None, 'cancelled', resources
)
except Exception:
log.info(f'error while cancelling job {id}', exc_info=True)
await waitable_pool.call(cancel_with_error_handling, self.app, batch_id, job_id, id)
remaining.value -= 1
if remaining.value <= 0:
should_wait = False
break
await waitable_pool.wait()
return should_wait
async def cancel_cancelled_creating_jobs_loop_body(self):
records = self.db.select_and_fetchall(
'''
SELECT user, CAST(COALESCE(SUM(n_cancelled_creating_jobs), 0) AS SIGNED) AS n_cancelled_creating_jobs
FROM user_inst_coll_resources
GROUP BY user
HAVING n_cancelled_creating_jobs > 0;
''',
timer_description='in cancel_cancelled_creating_jobs: aggregate n_cancelled_creating_jobs',
)
user_n_cancelled_creating_jobs = {
record['user']: record['n_cancelled_creating_jobs'] async for record in records
}
total = sum(user_n_cancelled_creating_jobs.values())
if total == 0:
should_wait = True
return should_wait
user_share = {
user: max(int(300 * user_n_jobs / total + 0.5), 20)
for user, user_n_jobs in user_n_cancelled_creating_jobs.items()
}
async def user_cancelled_creating_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT batches.id
FROM batches
INNER JOIN batches_cancelled
ON batches.id = batches_cancelled.id
WHERE user = %s AND `state` = 'running';
''',
(user,),
timer_description=f'in cancel_cancelled_creating_jobs: get {user} cancelled batches',
):
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id, attempts.attempt_id, attempts.instance_name
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
STRAIGHT_JOIN attempts
ON attempts.batch_id = jobs.batch_id AND attempts.job_id = jobs.job_id
WHERE jobs.batch_id = %s AND state = 'Creating' AND always_run = 0 AND cancelled = 0
LIMIT %s;
''',
(batch['id'], remaining.value),
timer_description=f'in cancel_cancelled_creating_jobs: get {user} batch {batch["id"]} creating cancelled jobs',
):
record['batch_id'] = batch['id']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, share in user_share.items():
remaining = Box(share)
async for record in user_cancelled_creating_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
attempt_id = record['attempt_id']
instance_name = record['instance_name']
id = (batch_id, job_id)
async def cancel_with_error_handling(app, batch_id, job_id, attempt_id, instance_name, id):
try:
resources = []
end_time = time_msecs()
await mark_job_complete(
app,
batch_id,
job_id,
attempt_id,
instance_name,
'Cancelled',
None,
None,
end_time,
'cancelled',
resources,
)
instance = self.inst_coll_manager.get_instance(instance_name)
if instance is None:
log.warning(f'in cancel_cancelled_creating_jobs: unknown instance {instance_name}')
return
await instance.inst_coll.call_delete_instance(instance, 'cancelled')
except Exception:
log.info(f'cancelling creating job {id} on instance {instance_name}', exc_info=True)
await waitable_pool.call(
cancel_with_error_handling, self.app, batch_id, job_id, attempt_id, instance_name, id
)
remaining.value -= 1
if remaining.value <= 0:
should_wait = False
break
await waitable_pool.wait()
return should_wait
async def cancel_cancelled_running_jobs_loop_body(self):
records = self.db.select_and_fetchall(
'''
SELECT user, CAST(COALESCE(SUM(n_cancelled_running_jobs), 0) AS SIGNED) AS n_cancelled_running_jobs
FROM user_inst_coll_resources
GROUP BY user
HAVING n_cancelled_running_jobs > 0;
''',
timer_description='in cancel_cancelled_running_jobs: aggregate n_cancelled_running_jobs',
)
user_n_cancelled_running_jobs = {record['user']: record['n_cancelled_running_jobs'] async for record in records}
total = sum(user_n_cancelled_running_jobs.values())
if total == 0:
should_wait = True
return should_wait
user_share = {
user: max(int(300 * user_n_jobs / total + 0.5), 20)
for user, user_n_jobs in user_n_cancelled_running_jobs.items()
}
async def user_cancelled_running_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT batches.id
FROM batches
INNER JOIN batches_cancelled
ON batches.id = batches_cancelled.id
WHERE user = %s AND `state` = 'running';
''',
(user,),
timer_description=f'in cancel_cancelled_running_jobs: get {user} cancelled batches',
):
async for record in self.db.select_and_fetchall(
'''
SELECT jobs.job_id, attempts.attempt_id, attempts.instance_name
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
STRAIGHT_JOIN attempts
ON attempts.batch_id = jobs.batch_id AND attempts.job_id = jobs.job_id
WHERE jobs.batch_id = %s AND state = 'Running' AND always_run = 0 AND cancelled = 0
LIMIT %s;
''',
(batch['id'], remaining.value),
timer_description=f'in cancel_cancelled_running_jobs: get {user} batch {batch["id"]} running cancelled jobs',
):
record['batch_id'] = batch['id']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, share in user_share.items():
remaining = Box(share)
async for record in user_cancelled_running_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
id = (batch_id, job_id)
async def unschedule_with_error_handling(app, record, instance_name, id):
try:
await unschedule_job(app, record)
except Exception:
log.info(f'unscheduling job {id} on instance {instance_name}', exc_info=True)
await waitable_pool.call(unschedule_with_error_handling, self.app, record, record['instance_name'], id)
remaining.value -= 1
if remaining.value <= 0:
should_wait = False
break
await waitable_pool.wait()
return should_wait
async def cancel_orphaned_attempts_loop_body(self):
log.info('cancelling orphaned attempts')
waitable_pool = WaitableSharedPool(self.async_worker_pool)
n_unscheduled = 0
async for record in self.db.select_and_fetchall(
'''
SELECT attempts.*
FROM attempts
INNER JOIN jobs ON attempts.batch_id = jobs.batch_id AND attempts.job_id = jobs.job_id
LEFT JOIN instances ON attempts.instance_name = instances.name
WHERE attempts.start_time IS NOT NULL
AND attempts.end_time IS NULL
AND ((jobs.state != 'Running' AND jobs.state != 'Creating') OR jobs.attempt_id != attempts.attempt_id)
AND instances.`state` = 'active'
ORDER BY attempts.start_time ASC
LIMIT 300;
''',
timer_description='in cancel_orphaned_attempts',
):
batch_id = record['batch_id']
job_id = record['job_id']
attempt_id = record['attempt_id']
instance_name = record['instance_name']
id = (batch_id, job_id)
n_unscheduled += 1
async def unschedule_with_error_handling(app, record, instance_name, id, attempt_id):
try:
await unschedule_job(app, record)
except Exception:
log.info(
f'unscheduling job {id} with orphaned attempt {attempt_id} on instance {instance_name}',
exc_info=True,
)
await waitable_pool.call(unschedule_with_error_handling, self.app, record, instance_name, id, attempt_id)
await waitable_pool.wait()
log.info(f'cancelled {n_unscheduled} orphaned attempts')
| 38.204134
| 133
| 0.596483
|
0cd22bf2a4222316017609c754ef754694cb9df1
| 525
|
py
|
Python
|
Study_Ahead/Templates_Basics/starter_temp.py
|
AnikaZN/AnikaDS
|
02c69894b242abdc88b6e1ac86407a02a3d43f92
|
[
"MIT"
] | null | null | null |
Study_Ahead/Templates_Basics/starter_temp.py
|
AnikaZN/AnikaDS
|
02c69894b242abdc88b6e1ac86407a02a3d43f92
|
[
"MIT"
] | null | null | null |
Study_Ahead/Templates_Basics/starter_temp.py
|
AnikaZN/AnikaDS
|
02c69894b242abdc88b6e1ac86407a02a3d43f92
|
[
"MIT"
] | null | null | null |
import os
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def index():
return render_template('home.html')
@app.route('/puppy/<name>')
def puppy(name):
return render_template('puppy_name.html', name=name)
@app.route('/another/<name>')
def another(name):
letters = list(name)
puppy_dict = {'puppy_name': name}
return render_template('puppy_name.html', name=name, mylist=letters,
my_dict=puppy_dict)
if __name__ == "__main__":
app.run()
| 21
| 72
| 0.657143
|
2e93c7ab3c8ec8793db42c1a7a5aff91d2690618
| 2,563
|
py
|
Python
|
book_figures/chapter5/fig_outlier_distribution.py
|
StKyr/astroML_figures
|
45e9748335e0cd854d09319dff0e43ecd70e7b61
|
[
"BSD-2-Clause"
] | 6
|
2019-08-31T16:43:43.000Z
|
2021-07-10T06:06:20.000Z
|
book_figures/chapter5/fig_outlier_distribution.py
|
StKyr/astroML_figures
|
45e9748335e0cd854d09319dff0e43ecd70e7b61
|
[
"BSD-2-Clause"
] | 34
|
2018-09-10T22:35:07.000Z
|
2022-02-08T21:17:39.000Z
|
book_figures/chapter5/fig_outlier_distribution.py
|
StKyr/astroML_figures
|
45e9748335e0cd854d09319dff0e43ecd70e7b61
|
[
"BSD-2-Clause"
] | 10
|
2017-06-22T09:21:19.000Z
|
2020-01-26T03:54:26.000Z
|
"""
Gaussian Distribution with Outliers
-----------------------------------
This figure shows the distribution of points drawn from a narrow
Gaussian distribution, with 20% "outliers" drawn from a wider
Gaussian distribution. Over-plotted are the robust and non-robust
estimators of the mean and standard deviation.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import norm, anderson
from astroML.stats import mean_sigma, median_sigmaG
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
if "setup_text_plots" not in globals():
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Create distribution
Npts = int(1E6)
f_out = 0.2
N_out = int(f_out * Npts)
sigma1 = 1
sigma2 = 3
np.random.seed(1)
x = np.hstack((np.random.normal(0, sigma1, Npts - N_out),
np.random.normal(0, sigma2, N_out)))
#------------------------------------------------------------
# Compute anderson-darling test
A2, sig, crit = anderson(x)
print("anderson-darling A^2 = {0:.1f}".format(A2))
#------------------------------------------------------------
# Compute non-robust and robust point statistics
mu_sample, sig_sample = mean_sigma(x)
med_sample, sigG_sample = median_sigmaG(x)
#------------------------------------------------------------
# Plot the results
fig, ax = plt.subplots(figsize=(5, 3.75))
# histogram of data
ax.hist(x, 100, histtype='stepfilled', alpha=0.2,
color='k', density=True)
# best-fit normal curves
x_sample = np.linspace(-15, 15, 1000)
ax.plot(x_sample, norm(mu_sample, sig_sample).pdf(x_sample), '-k',
label='$\sigma$ fit')
ax.plot(x_sample, norm(med_sample, sigG_sample).pdf(x_sample), '--k',
label='$\sigma_G$ fit')
ax.legend()
ax.set_xlim(-8, 8)
ax.set_xlabel('$x$')
ax.set_ylabel('$p(x)$')
plt.show()
| 32.858974
| 79
| 0.62817
|
f0d7669235d99d5b4974773e30f421bcce3f43ec
| 85
|
py
|
Python
|
run.py
|
ouriquegustavo/twitch_tower_defense
|
20e1b41e3f1c363856515eda1c2a9288ce42e442
|
[
"MIT"
] | null | null | null |
run.py
|
ouriquegustavo/twitch_tower_defense
|
20e1b41e3f1c363856515eda1c2a9288ce42e442
|
[
"MIT"
] | null | null | null |
run.py
|
ouriquegustavo/twitch_tower_defense
|
20e1b41e3f1c363856515eda1c2a9288ce42e442
|
[
"MIT"
] | null | null | null |
from main.game import Game
if __name__ == "__main__":
main_server = Game()
| 14.166667
| 26
| 0.647059
|
688cf0d89ccced4ccb7d1e234b017329f99a8921
| 2,953
|
py
|
Python
|
test/problems/sampling/protein_folding/peptide/beads/test_bead_main.py
|
jschuhmac/qiskit-nature
|
b8b1181d951cf8fa76fe0db9e5ea192dad5fb186
|
[
"Apache-2.0"
] | 132
|
2021-01-28T14:51:11.000Z
|
2022-03-25T21:10:47.000Z
|
test/problems/sampling/protein_folding/peptide/beads/test_bead_main.py
|
jschuhmac/qiskit-nature
|
b8b1181d951cf8fa76fe0db9e5ea192dad5fb186
|
[
"Apache-2.0"
] | 449
|
2021-01-28T19:57:43.000Z
|
2022-03-31T17:01:50.000Z
|
test/problems/sampling/protein_folding/peptide/beads/test_bead_main.py
|
jschuhmac/qiskit-nature
|
b8b1181d951cf8fa76fe0db9e5ea192dad5fb186
|
[
"Apache-2.0"
] | 109
|
2021-01-28T13:17:46.000Z
|
2022-03-30T23:53:39.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests Main Bead."""
from test import QiskitNatureTestCase
from test.problems.sampling.protein_folding.resources.file_parser import read_expected_file
from qiskit.opflow import I, Z
from qiskit_nature.problems.sampling.protein_folding.peptide.pauli_ops_builder import (
_build_full_identity,
)
from qiskit_nature.problems.sampling.protein_folding.peptide.beads.main_bead import MainBead
from qiskit_nature.problems.sampling.protein_folding.peptide.chains.side_chain import SideChain
PATH = "problems/sampling/protein_folding/resources/test_bead_main"
class TestMainBead(QiskitNatureTestCase):
"""Tests Main Bead."""
def test_main_bead_constructor(self):
"""Tests that a MainBead is created."""
main_chain_len = 4
num_turn_qubits = 2 * (main_chain_len - 1)
main_bead_id = 3
residue_type = "S"
turn_qubits = (
0.5 * _build_full_identity(num_turn_qubits) - 0.5 * (I ^ Z ^ I ^ I ^ I ^ I),
0.5 * _build_full_identity(num_turn_qubits) - 0.5 * (Z ^ I ^ I ^ I ^ I ^ I),
)
side_chain_residue_sequences = ["S"]
side_chain = SideChain(main_chain_len, main_bead_id, side_chain_residue_sequences)
main_bead = MainBead(main_bead_id, residue_type, turn_qubits, side_chain)
self.assertEqual(main_bead.side_chain, side_chain)
indic_0, indic_1, indic_2, indic_3 = main_bead.indicator_functions
expected_path_indic_0 = self.get_resource_path(
"test_main_bead_constructor_expected_indic_0",
PATH,
)
expected_indic_0 = read_expected_file(expected_path_indic_0)
expected_path_indic_1 = self.get_resource_path(
"test_main_bead_constructor_expected_indic_1",
PATH,
)
expected_indic_1 = read_expected_file(expected_path_indic_1)
expected_path_indic_2 = self.get_resource_path(
"test_main_bead_constructor_expected_indic_2",
PATH,
)
expected_indic_2 = read_expected_file(expected_path_indic_2)
expected_path_indic_3 = self.get_resource_path(
"test_main_bead_constructor_expected_indic_3",
PATH,
)
expected_indic_3 = read_expected_file(expected_path_indic_3)
self.assertEqual(indic_0, expected_indic_0)
self.assertEqual(indic_1, expected_indic_1)
self.assertEqual(indic_2, expected_indic_2)
self.assertEqual(indic_3, expected_indic_3)
| 40.452055
| 95
| 0.716559
|
c3062d30168475f93a4fbbdb94e37cadf5670df7
| 7,471
|
py
|
Python
|
lib/matplotlib/tri/triangulation.py
|
cboos/matplotlib
|
2bb91ce669c92e118f7a87ea9437a3efaa4ead39
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2015-02-13T19:39:31.000Z
|
2015-02-13T19:39:31.000Z
|
lib/matplotlib/tri/triangulation.py
|
cboos/matplotlib
|
2bb91ce669c92e118f7a87ea9437a3efaa4ead39
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
lib/matplotlib/tri/triangulation.py
|
cboos/matplotlib
|
2bb91ce669c92e118f7a87ea9437a3efaa4ead39
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
import matplotlib.delaunay as delaunay
import matplotlib._tri as _tri
import numpy as np
class Triangulation(object):
"""
An unstructured triangular grid consisting of npoints points and
ntri triangles. The triangles can either be specified by the user
or automatically generated using a Delaunay triangulation.
Read-only attributes:
*x*: array of shape (npoints).
x-coordinates of grid points.
*y*: array of shape (npoints).
y-coordinates of grid points.
*triangles*: integer array of shape (ntri,3).
For each triangle, the indices of the three points that make
up the triangle, ordered in an anticlockwise manner.
*mask*: optional boolean array of shape (ntri).
Which triangles are masked out.
*edges*: integer array of shape (?,2).
All edges of non-masked triangles. Each edge is the start
point index and end point index. Each edge (start,end and
end,start) appears only once.
*neighbors*: integer array of shape (ntri,3).
For each triangle, the indices of the three triangles that
share the same edges, or -1 if there is no such neighboring
triangle. neighbors[i,j] is the triangle that is the neighbor
to the edge from point index triangles[i,j] to point index
triangles[i,(j+1)%3].
"""
def __init__(self, x, y, triangles=None, mask=None):
"""
Create a Triangulation object.
The first two arguments must be:
*x*, *y*: arrays of shape (npoints).
Point coordinates.
Optional arguments (args or keyword args):
*triangles*: integer array of shape (ntri,3).
For each triangle, the indices of the three points that make
up the triangle. If the points are ordered in a clockwise
manner, they are converted to anticlockwise.
If not specified, matplotlib.delaunay is used to create a
Delaunay triangulation of the points.
*mask*: optional boolean array of shape (ntri).
Which triangles are masked out.
"""
self.x = np.asarray(x, dtype=np.float64)
self.y = np.asarray(y, dtype=np.float64)
if self.x.shape != self.y.shape or len(self.x.shape) != 1:
raise ValueError("x and y must be equal-length 1-D arrays")
self.mask = None
self._edges = None
self._neighbors = None
if triangles is None:
# No triangulation specified, so use matplotlib.delaunay.
dt = delaunay.Triangulation(self.x, self.y)
self.triangles = np.asarray(dt.triangle_nodes, dtype=np.int32)
if mask is None:
self._edges = np.asarray(dt.edge_db, dtype=np.int32)
# Delaunay triangle_neighbors uses different edge indexing,
# so convert.
neighbors = np.asarray(dt.triangle_neighbors, dtype=np.int32)
self._neighbors = np.roll(neighbors, 1, axis=1)
else:
# Triangulation specified.
self.triangles = np.asarray(triangles, dtype=np.int32)
if self.triangles.ndim != 2 or self.triangles.shape[1] != 3:
raise ValueError('triangles must be a (?,3) array')
if self.triangles.max() >= len(self.x):
raise ValueError('triangles max element is out of bounds')
if self.triangles.min() < 0:
raise ValueError('triangles min element is out of bounds')
if mask is not None:
self.mask = np.asarray(mask, dtype=np.bool)
if len(self.mask.shape) != 1 or \
self.mask.shape[0] != self.triangles.shape[0]:
raise ValueError('mask array must have same length as '
'triangles array')
# Underlying C++ object is not created until first needed.
self._cpp_triangulation = None
@property
def edges(self):
if self._edges is None:
self._edges = self.get_cpp_triangulation().get_edges()
return self._edges
def get_cpp_triangulation(self):
"""
Return the underlying C++ Triangulation object, creating it
if necessary.
"""
if self._cpp_triangulation is None:
self._cpp_triangulation = _tri.Triangulation(
self.x, self.y, self.triangles, self.mask, self._edges,
self._neighbors)
return self._cpp_triangulation
def get_masked_triangles(self):
"""
Return an array of triangles that are not masked.
"""
if self.mask is not None:
return self.triangles.compress(1-self.mask, axis=0)
else:
return self.triangles
@staticmethod
def get_from_args_and_kwargs(*args, **kwargs):
"""
Return a Triangulation object from the args and kwargs, and
the remaining args and kwargs with the consumed values removed.
There are two alternatives: either the first argument is a
Triangulation object, in which case it is returned, or the args
and kwargs are sufficient to create a new Triangulation to
return. In the latter case, see Triangulation.__init__ for
the possible args and kwargs.
"""
if isinstance(args[0], Triangulation):
triangulation = args[0]
args = args[1:]
else:
x = args[0]
y = args[1]
args = args[2:] # Consumed first two args.
# Check triangles in kwargs then args.
triangles = kwargs.pop('triangles', None)
from_args = False
if triangles is None and len(args) > 0:
triangles = args[0]
from_args = True
if triangles is not None:
try:
triangles = np.asarray(triangles, dtype=np.int32)
except ValueError:
triangles = None
if triangles is not None and (triangles.ndim != 2 or
triangles.shape[1] != 3):
triangles = None
if triangles is not None and from_args:
args = args[1:] # Consumed first item in args.
# Check for mask in kwargs.
mask = kwargs.pop('mask', None)
triangulation = Triangulation(x, y, triangles, mask)
return triangulation, args, kwargs
@property
def neighbors(self):
if self._neighbors is None:
self._neighbors = self._get_cpp_triangulation().get_neighbors()
return self._neighbors
def set_mask(self, mask):
"""
Set or clear the mask array. This is either None, or a boolean
array of shape (ntri).
"""
if mask is None:
self.mask = None
else:
self.mask = np.asarray(mask, dtype=np.bool)
if len(self.mask.shape) != 1 or \
self.mask.shape[0] != self.triangles.shape[0]:
raise ValueError('mask array must have same length as '
'triangles array')
# Set mask in C++ Triangulation.
if self._cpp_triangulation is not None:
self._cpp_triangulation.set_mask(self.mask)
# Clear derived fields so they are recalculated when needed.
self._edges = None
self._neighbors = None
| 37.732323
| 77
| 0.590684
|
1ce1eac9137b3f22a6077830c830b6da4ce18911
| 8,492
|
py
|
Python
|
gmn/src/d1_gmn/app/views/slice.py
|
DataONEorg/d1_python
|
dfab267c3adea913ab0e0073ed9dc1ee50b5b8eb
|
[
"Apache-2.0"
] | 15
|
2016-10-28T13:56:52.000Z
|
2022-01-31T19:07:49.000Z
|
gmn/src/d1_gmn/app/views/slice.py
|
DataONEorg/d1_python
|
dfab267c3adea913ab0e0073ed9dc1ee50b5b8eb
|
[
"Apache-2.0"
] | 56
|
2017-03-16T03:52:32.000Z
|
2022-03-12T01:05:28.000Z
|
gmn/src/d1_gmn/app/views/slice.py
|
DataONEorg/d1_python
|
dfab267c3adea913ab0e0073ed9dc1ee50b5b8eb
|
[
"Apache-2.0"
] | 11
|
2016-05-31T16:22:02.000Z
|
2020-10-05T14:37:10.000Z
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handle slicing / paging of multi-page result set."""
import copy
import hashlib
import logging
import d1_common.const
import d1_common.types
import d1_common.types.exceptions
import d1_common.url
import d1_common.util
import d1_common.utils.ulog
import django.conf
import django.core.cache
import django.db.models
# import logging
def add_slice_filter(request, query, total_int):
url_dict = d1_common.url.parseUrl(request.get_full_path())
start_int = _get_and_assert_slice_param(url_dict, "start", 0)
count_int = _get_and_assert_slice_param(
url_dict, "count", d1_common.const.DEFAULT_SLICE_SIZE
)
_assert_valid_start(start_int, count_int, total_int)
count_int = _adjust_count_if_required(start_int, count_int, total_int)
authn_subj_list = _get_authenticated_subj_list(request)
logging.debug(
"Adding slice filter. start={} count={} total={} subj={}".format(
start_int, count_int, total_int, ",".join(authn_subj_list)
)
)
last_ts_tup = _cache_get_last_in_slice(
url_dict, start_int, total_int, authn_subj_list
)
if last_ts_tup:
query = _add_fast_slice_filter(query, last_ts_tup, count_int)
else:
query = _add_fallback_slice_filter(query, start_int, count_int, total_int)
return query, start_int, count_int
def cache_add_last_in_slice(request, query, start_int, total_int, sort_field_list):
""""""
url_dict = d1_common.url.parseUrl(request.get_full_path())
authn_subj_list = _get_authenticated_subj_list(request)
key_str = _gen_cache_key_for_slice(
url_dict, start_int + query.count(), total_int, authn_subj_list
)
last_model = query[query.count() - 1] if query.count() else None
last_ts_tup = (
tuple([getattr(last_model, f) for f in sort_field_list]) if last_model else None
)
django.core.cache.cache.set(key_str, last_ts_tup)
logging.debug('Cache set. key="{}" last={}'.format(key_str, last_ts_tup))
# Private
def _get_and_assert_slice_param(url_dict, param_name, default_int):
"""Return ``param_str`` converted to an int.
If str cannot be converted to int or int is not zero or positive, raise
InvalidRequest.
"""
param_str = url_dict["query"].get(param_name, default_int)
try:
n = int(param_str)
except ValueError:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Slice parameter is not a valid integer. {}="{}"'.format(
param_name, param_str
),
)
if n < 0:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Slice parameter cannot be a negative number. {}="{}"'.format(
param_name, param_str
),
)
return n
def _assert_valid_start(start_int, count_int, total_int):
"""Assert that the number of objects visible to the session subject is higher than
the requested start position for the slice.
This ensures that it's possible to create a valid slice.
"""
if total_int and start_int >= total_int:
raise d1_common.types.exceptions.InvalidRequest(
0,
"Requested a non-existing slice. start={} count={} total={}".format(
start_int, count_int, total_int
),
)
def _adjust_count_if_required(start_int, count_int, total_int):
"""Adjust requested object count down if there are not enough objects visible to the
session subjects to cover the requested slice start and count.
Preconditions: start is verified to be lower than the number of visible objects,
making it possible to create a valid slice by adjusting count.
"""
if start_int + count_int > total_int:
count_int = total_int - start_int
count_int = min(count_int, django.conf.settings.MAX_SLICE_ITEMS)
return count_int
# def _get_slice_params(query_dict):
# return query_dict['start'], query_dict['count']
def _get_authenticated_subj_list(request):
return list(sorted(request.all_subjects_set))
def _add_fast_slice_filter(query, last_ts_tup, count_int):
logging.debug(
"Adding fast slice filter. last={} count={}".format(last_ts_tup, count_int)
)
last_timestamp, last_id = last_ts_tup
return query.filter(
django.db.models.Q(timestamp__gt=last_timestamp)
| django.db.models.Q(timestamp__exact=last_timestamp, id__gt=last_id)
)[:count_int]
def _add_fallback_slice_filter(query, start_int, count_int, total_int):
"""Create a slice of a query based on request start and count parameters.
This adds `OFFSET <start> LIMIT <count>` to the SQL query, which causes slicing to
run very slowly on large result sets.
"""
logging.debug(
"Adding fallback slice filter. start={} count={} total={} ".format(
start_int, count_int, total_int
)
)
if not count_int:
return query.none()
else:
return query[start_int : start_int + count_int]
def _cache_get_last_in_slice(url_dict, start_int, total_int, authn_subj_list):
"""Return None if cache entry does not exist."""
key_str = _gen_cache_key_for_slice(url_dict, start_int, total_int, authn_subj_list)
# TODO: Django docs state that cache.get() should return None on unknown key.
try:
last_ts_tup = django.core.cache.cache.get(key_str)
except KeyError:
last_ts_tup = None
logging.debug('Cache get. key="{}" -> last_ts_tup={}'.format(key_str, last_ts_tup))
return last_ts_tup
def _gen_cache_key_for_slice(url_dict, start_int, total_int, authn_subj_list):
"""Generate cache key for the REST URL the client is currently accessing or is
expected to access in order to get the slice starting at the given ``start_int`` of
a multi-slice result set.
When used for finding the key to check in the current call, ``start_int`` is
0, or the start that was passed in the current call.
When used for finding the key to set for the anticipated call, ``start_int`` is
current ``start_int`` + ``count_int``, the number of objects the current call will
return.
The URL for the slice is the same as for the current slice, except that the
`start` query parameter has been increased by the number of items returned in
the current slice.
Except for advancing the start value and potentially adjusting the desired
slice size, it doesn't make sense for the client to change the REST URL during
slicing, but such queries are supported. They will, however, trigger
potentially expensive database queries to find the current slice position.
To support adjustments in desired slice size during slicing, the count is not
used when generating the key.
The active subjects are used in the key in order to prevent potential security
issues if authenticated subjects change during slicing.
The url_dict is normalized by encoding it to a JSON string with sorted keys. A
hash of the JSON is used for better distribution in a hash map and to avoid
the 256 bytes limit on keys in some caches.
"""
# logging.debug('Gen key. result_record_count={}'.format(result_record_count))
key_url_dict = copy.deepcopy(url_dict)
key_url_dict["query"].pop("start", None)
key_url_dict["query"].pop("count", None)
key_json = d1_common.util.serialize_to_normalized_compact_json(
{
"url_dict": key_url_dict,
"start": start_int,
"total": total_int,
"subject": authn_subj_list,
}
)
logging.debug("key_json={}".format(key_json))
return hashlib.sha256(key_json.encode("utf-8")).hexdigest()
| 36.603448
| 88
| 0.705488
|
57fd3a540d3abeab44712652f804e1ee0bc3dfde
| 74
|
py
|
Python
|
test.py
|
dmcblue/balanced_teams
|
d1bbec56fa7b64853632343461c1c272c147ae4d
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
dmcblue/balanced_teams
|
d1bbec56fa7b64853632343461c1c272c147ae4d
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
dmcblue/balanced_teams
|
d1bbec56fa7b64853632343461c1c272c147ae4d
|
[
"Apache-2.0"
] | 1
|
2021-03-28T23:11:47.000Z
|
2021-03-28T23:11:47.000Z
|
import os
os.system("python -m unittest discover -s test -p '*_test.py'")
| 24.666667
| 63
| 0.702703
|
1f9172926d4626341236eec0431dec7ef34ba0ad
| 5,593
|
py
|
Python
|
kubernetes/client/models/v1_resource_quota_spec.py
|
TomasTomecek/kubernetes-python
|
c37c074303a13c72662b9201ccc023fb0ca45755
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_resource_quota_spec.py
|
TomasTomecek/kubernetes-python
|
c37c074303a13c72662b9201ccc023fb0ca45755
|
[
"Apache-2.0"
] | 1
|
2021-04-30T20:41:19.000Z
|
2021-04-30T20:41:19.000Z
|
venv/lib/python2.7/site-packages/kubernetes/client/models/v1_resource_quota_spec.py
|
784134748/kubernetes-install
|
5df59632c2619632e422948b667fb68eab9ff5be
|
[
"MIT"
] | 1
|
2020-05-09T07:16:55.000Z
|
2020-05-09T07:16:55.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ResourceQuotaSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'hard': 'dict(str, str)',
'scope_selector': 'V1ScopeSelector',
'scopes': 'list[str]'
}
attribute_map = {
'hard': 'hard',
'scope_selector': 'scopeSelector',
'scopes': 'scopes'
}
def __init__(self, hard=None, scope_selector=None, scopes=None):
"""
V1ResourceQuotaSpec - a model defined in Swagger
"""
self._hard = None
self._scope_selector = None
self._scopes = None
self.discriminator = None
if hard is not None:
self.hard = hard
if scope_selector is not None:
self.scope_selector = scope_selector
if scopes is not None:
self.scopes = scopes
@property
def hard(self):
"""
Gets the hard of this V1ResourceQuotaSpec.
hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
:return: The hard of this V1ResourceQuotaSpec.
:rtype: dict(str, str)
"""
return self._hard
@hard.setter
def hard(self, hard):
"""
Sets the hard of this V1ResourceQuotaSpec.
hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
:param hard: The hard of this V1ResourceQuotaSpec.
:type: dict(str, str)
"""
self._hard = hard
@property
def scope_selector(self):
"""
Gets the scope_selector of this V1ResourceQuotaSpec.
scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
:return: The scope_selector of this V1ResourceQuotaSpec.
:rtype: V1ScopeSelector
"""
return self._scope_selector
@scope_selector.setter
def scope_selector(self, scope_selector):
"""
Sets the scope_selector of this V1ResourceQuotaSpec.
scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
:param scope_selector: The scope_selector of this V1ResourceQuotaSpec.
:type: V1ScopeSelector
"""
self._scope_selector = scope_selector
@property
def scopes(self):
"""
Gets the scopes of this V1ResourceQuotaSpec.
A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.
:return: The scopes of this V1ResourceQuotaSpec.
:rtype: list[str]
"""
return self._scopes
@scopes.setter
def scopes(self, scopes):
"""
Sets the scopes of this V1ResourceQuotaSpec.
A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.
:param scopes: The scopes of this V1ResourceQuotaSpec.
:type: list[str]
"""
self._scopes = scopes
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ResourceQuotaSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 30.562842
| 287
| 0.603254
|
ecb0184d295937350be675ffab084d3706eec6a5
| 75,978
|
py
|
Python
|
fugue/workflow/workflow.py
|
gityow/fugue
|
e975625b33766d8b9dc64c6954871569b59367ec
|
[
"Apache-2.0"
] | null | null | null |
fugue/workflow/workflow.py
|
gityow/fugue
|
e975625b33766d8b9dc64c6954871569b59367ec
|
[
"Apache-2.0"
] | null | null | null |
fugue/workflow/workflow.py
|
gityow/fugue
|
e975625b33766d8b9dc64c6954871569b59367ec
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
from threading import RLock
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, TypeVar, Union
from uuid import uuid4
from adagio.specs import WorkflowSpec
from fugue.collections.partition import PartitionSpec
from fugue.collections.yielded import Yielded
from fugue.constants import (
FUGUE_CONF_WORKFLOW_AUTO_PERSIST,
FUGUE_CONF_WORKFLOW_AUTO_PERSIST_VALUE,
)
from fugue.dataframe import DataFrame, YieldedDataFrame
from fugue.dataframe.dataframes import DataFrames
from fugue.exceptions import FugueWorkflowCompileError, FugueWorkflowError
from fugue.execution.factory import make_execution_engine
from fugue.extensions._builtins import (
AlterColumns,
AssertEqual,
AssertNotEqual,
Distinct,
DropColumns,
Dropna,
Fillna,
Load,
LoadYielded,
Rename,
RunJoin,
RunOutputTransformer,
RunSetOperation,
RunSQLSelect,
RunTransformer,
Sample,
Save,
SaveAndUse,
SelectColumns,
Show,
Take,
Zip,
)
from fugue.extensions.transformer.convert import _to_output_transformer, _to_transformer
from fugue.rpc import to_rpc_handler
from fugue.rpc.base import EmptyRPCHandler
from fugue.workflow._checkpoint import FileCheckpoint, WeakCheckpoint
from fugue.workflow._tasks import Create, CreateData, FugueTask, Output, Process
from fugue.workflow._workflow_context import FugueWorkflowContext
from triad import ParamDict, Schema, assert_or_throw
_DEFAULT_IGNORE_ERRORS: List[Any] = []
TDF = TypeVar("TDF", bound="WorkflowDataFrame")
class WorkflowDataFrame(DataFrame):
"""It represents the edges in the graph constructed by :class:`~.FugueWorkflow`.
In Fugue, we use DAG to represent workflows, and the edges are strictly
dataframes. DAG construction and execution are different steps, this class is
used in the construction step. Although it inherits from
:class:`~fugue.dataframe.dataframe.DataFrame`, it's not concerete data. So a
lot of the operations are not allowed. If you want to obtain the concrete
Fugue :class:`~fugue.dataframe.dataframe.DataFrame`, use :meth:`~.compute()`
to execute the workflow.
Normally, you don't construct it by yourself, you will just use the methods of it.
:param workflow: the parent workflow it belongs to
:param task: the task that generates this dataframe
:param metadata: dict-like metadata, defaults to None
"""
def __init__(
self, workflow: "FugueWorkflow", task: FugueTask, metadata: Any = None
):
super().__init__("_0:int", metadata)
self._workflow = workflow
self._task = task
def spec_uuid(self) -> str:
"""UUID of its task spec"""
return self._task.__uuid__()
@property
def name(self) -> str:
"""Name of its task spec"""
return self._task.name
@property
def workflow(self) -> "FugueWorkflow":
"""The parent workflow"""
return self._workflow
@property
def result(self) -> DataFrame:
"""The concrete DataFrame obtained from :meth:`~.compute()`.
This property will not trigger compute again, but compute should
have been called earlier and the result is cached.
"""
return self.workflow.get_result(self)
@property
def partition_spec(self) -> PartitionSpec:
"""The partition spec set on the dataframe for next steps to use
:Examples:
.. code-block:: python
dag = FugueWorkflow()
df = dag.df([[0],[1]], "a:int")
assert df.partition_spec.empty
df2 = df.partition(by=["a"])
assert df.partition_spec.empty
assert df2.partition_spec == PartitionSpec(by=["a"])
"""
return self._metadata.get("pre_partition", PartitionSpec())
def compute(self, *args, **kwargs) -> DataFrame:
"""Trigger the parent workflow to
:meth:`~fugue.workflow.workflow.FugueWorkflow.run` and to generate and cache
the result dataframe this instance represent.
:Examples:
>>> df = FugueWorkflow().df([[0]],"a:int").transform(a_transformer)
>>> df.compute().as_pandas() # pandas dataframe
>>> df.compute(SparkExecutionEngine).native # spark dataframe
:Notice:
Consider using :meth:`fugue.workflow.workflow.FugueWorkflow.run` instead.
Because this method actually triggers the entire workflow to run, so it may
be confusing to use this method because extra time may be taken to compute
unrelated dataframes.
.. code-block:: python
dag = FugueWorkflow()
df1 = dag.df([[0]],"a:int").transform(a_transformer)
df2 = dag.df([[0]],"b:int")
dag.run(SparkExecutionEngine)
df1.result.show()
df2.result.show()
"""
# TODO: it computes entire graph
self.workflow.run(*args, **kwargs)
return self.result
def process(
self: TDF,
using: Any,
schema: Any = None,
params: Any = None,
pre_partition: Any = None,
) -> TDF:
"""Run a processor on this dataframe. It's a simple wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.process`
Please read the :ref:`Processor Tutorial <tutorial:/tutorials/processor.ipynb>`
:param using: processor-like object, can't be a string expression
:param schema: |SchemaLikeObject|, defaults to None. The processor
will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.output_schema`
:param params: |ParamsLikeObject| to run the processor, defaults to None.
The processor will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.params`
:param pre_partition: |PartitionLikeObject|, defaults to None.
The processor will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.partition_spec`
:return: result dataframe
:rtype: :class:`~.WorkflowDataFrame`
"""
assert_or_throw(
not isinstance(using, str), f"processor {using} can't be string expression"
)
if pre_partition is None:
pre_partition = self.partition_spec
df = self.workflow.process(
self, using=using, schema=schema, params=params, pre_partition=pre_partition
)
return self._to_self_type(df)
def output(self, using: Any, params: Any = None, pre_partition: Any = None) -> None:
"""Run a outputter on this dataframe. It's a simple wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.output`
Please read the :ref:`Outputter Tutorial <tutorial:/tutorials/outputter.ipynb>`
:param using: outputter-like object, can't be a string expression
:param params: |ParamsLikeObject| to run the outputter, defaults to None.
The outputter will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.params`
:param pre_partition: |PartitionLikeObject|, defaults to None.
The outputter will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.partition_spec`
"""
assert_or_throw(
not isinstance(using, str), f"outputter {using} can't be string expression"
)
if pre_partition is None:
pre_partition = self.partition_spec
self.workflow.output(
self, using=using, params=params, pre_partition=pre_partition
)
def show(
self,
rows: int = 10,
show_count: bool = False,
title: Optional[str] = None,
best_width: int = 100,
) -> None:
"""Show the dataframe.
See :ref:`examples <tutorial:/tutorials/dag.ipynb#initialize-a-workflow>`.
:param rows: max number of rows, defaults to 10
:param show_count: whether to show total count, defaults to False
:param title: title to display on top of the dataframe, defaults to None
:param best_width: max width for the output table, defaults to 100
:Notice:
* When you call this method, it means you want the dataframe to be
printed when the workflow executes. So the dataframe won't show until
you run the workflow.
* When ``show_count`` is True, it can trigger expensive calculation for
a distributed dataframe. So if you call this function directly, you may
need to :meth:`~.persist` the dataframe. Or you can turn on
:ref:`tutorial:/tutorials/useful_config.ipynb#auto-persist`
"""
# TODO: best_width is not used
self.workflow.show(self, rows=rows, show_count=show_count, title=title)
def assert_eq(self, *dfs: Any, **params: Any) -> None:
"""Wrapper of :meth:`fugue.workflow.workflow.FugueWorkflow.assert_eq` to
compare this dataframe with other dataframes.
:param dfs: |DataFramesLikeObject|
:param digits: precision on float number comparison, defaults to 8
:param check_order: if to compare the row orders, defaults to False
:param check_schema: if compare schemas, defaults to True
:param check_content: if to compare the row values, defaults to True
:param check_metadata: if to compare the dataframe metadatas, defaults to True
:param no_pandas: if true, it will compare the string representations of the
dataframes, otherwise, it will convert both to pandas dataframe to compare,
defaults to False
:raises AssertionError: if not equal
"""
self.workflow.assert_eq(self, *dfs, **params)
def assert_not_eq(self, *dfs: Any, **params: Any) -> None:
"""Wrapper of :meth:`fugue.workflow.workflow.FugueWorkflow.assert_not_eq` to
compare this dataframe with other dataframes.
:param dfs: |DataFramesLikeObject|
:param digits: precision on float number comparison, defaults to 8
:param check_order: if to compare the row orders, defaults to False
:param check_schema: if compare schemas, defaults to True
:param check_content: if to compare the row values, defaults to True
:param check_metadata: if to compare the dataframe metadatas, defaults to True
:param no_pandas: if true, it will compare the string representations of the
dataframes, otherwise, it will convert both to pandas dataframe to compare,
defaults to False
:raises AssertionError: if any dataframe is equal to the first dataframe
"""
self.workflow.assert_not_eq(self, *dfs, **params)
def transform(
self: TDF,
using: Any,
schema: Any = None,
params: Any = None,
pre_partition: Any = None,
ignore_errors: List[Any] = _DEFAULT_IGNORE_ERRORS,
callback: Any = None,
) -> TDF:
"""Transform this dataframe using transformer. It's a wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.transform`
Please read the
:ref:`Transformer Tutorial <tutorial:/tutorials/transformer.ipynb>`
:param using: transformer-like object, can't be a string expression
:param schema: |SchemaLikeObject|, defaults to None. The transformer
will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.output_schema`
:param params: |ParamsLikeObject| to run the processor, defaults to None.
The transformer will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.params`
:param pre_partition: |PartitionLikeObject|, defaults to None. It's
recommended to use the equivalent wayt, which is to call
:meth:`~.partition` and then call :meth:`~.transform` without this parameter
:param ignore_errors: list of exception types the transformer can ignore,
defaults to empty list
:param callback: |RPCHandlerLikeObject|, defaults to None
:return: the transformed dataframe
:rtype: :class:`~.WorkflowDataFrame`
:Notice:
:meth:`~.transform` can be lazy and will return the transformed dataframe,
:meth:`~.out_transform` is guaranteed to execute immediately (eager) and
return nothing
"""
assert_or_throw(
not isinstance(using, str),
f"transformer {using} can't be string expression",
)
if pre_partition is None:
pre_partition = self.partition_spec
df = self.workflow.transform(
self,
using=using,
schema=schema,
params=params,
pre_partition=pre_partition,
ignore_errors=ignore_errors,
callback=callback,
)
return self._to_self_type(df)
def out_transform(
self: TDF,
using: Any,
params: Any = None,
pre_partition: Any = None,
ignore_errors: List[Any] = _DEFAULT_IGNORE_ERRORS,
callback: Any = None,
) -> None:
"""Transform this dataframe using transformer. It's a wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.out_transform`
Please read the
:ref:`Transformer Tutorial <tutorial:/tutorials/transformer.ipynb>`
:param using: transformer-like object, can't be a string expression
:param params: |ParamsLikeObject| to run the processor, defaults to None.
The transformer will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.params`
:param pre_partition: |PartitionLikeObject|, defaults to None. It's
recommended to use the equivalent wayt, which is to call
:meth:`~.partition` and then call :meth:`~.transform` without this parameter
:param ignore_errors: list of exception types the transformer can ignore,
defaults to empty list
:param callback: |RPCHandlerLikeObject|, defaults to None
:Notice:
:meth:`~.transform` can be lazy and will return the transformed dataframe,
:meth:`~.out_transform` is guaranteed to execute immediately (eager) and
return nothing
"""
assert_or_throw(
not isinstance(using, str),
f"output transformer {using} can't be string expression",
)
if pre_partition is None:
pre_partition = self.partition_spec
self.workflow.out_transform(
self,
using=using,
params=params,
pre_partition=pre_partition,
ignore_errors=ignore_errors,
callback=callback,
)
def join(self: TDF, *dfs: Any, how: str, on: Optional[Iterable[str]] = None) -> TDF:
"""Join this dataframe with dataframes. It's a wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.join`. |ReadJoin|
:param dfs: |DataFramesLikeObject|
:param how: can accept ``semi``, ``left_semi``, ``anti``, ``left_anti``,
``inner``, ``left_outer``, ``right_outer``, ``full_outer``, ``cross``
:param on: it can always be inferred, but if you provide, it will be
validated against the inferred keys. Default to None
:return: joined dataframe
:rtype: :class:`~.WorkflowDataFrame`
"""
df = self.workflow.join(self, *dfs, how=how, on=on)
return self._to_self_type(df)
def inner_join(self: TDF, *dfs: Any, on: Optional[Iterable[str]] = None) -> TDF:
"""INNER Join this dataframe with dataframes. It's a wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.join`. |ReadJoin|
:param dfs: |DataFramesLikeObject|
:param on: it can always be inferred, but if you provide, it will be
validated against the inferred keys. Default to None
:return: joined dataframe
:rtype: :class:`~.WorkflowDataFrame`
"""
return self.join(*dfs, how="inner", on=on)
def semi_join(self: TDF, *dfs: Any, on: Optional[Iterable[str]] = None) -> TDF:
"""LEFT SEMI Join this dataframe with dataframes. It's a wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.join`. |ReadJoin|
:param dfs: |DataFramesLikeObject|
:param on: it can always be inferred, but if you provide, it will be
validated against the inferred keys. Default to None
:return: joined dataframe
:rtype: :class:`~.WorkflowDataFrame`
"""
return self.join(*dfs, how="semi", on=on)
def left_semi_join(self: TDF, *dfs: Any, on: Optional[Iterable[str]] = None) -> TDF:
"""LEFT SEMI Join this dataframe with dataframes. It's a wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.join`. |ReadJoin|
:param dfs: |DataFramesLikeObject|
:param on: it can always be inferred, but if you provide, it will be
validated against the inferred keys. Default to None
:return: joined dataframe
:rtype: :class:`~.WorkflowDataFrame`
"""
return self.join(*dfs, how="left_semi", on=on)
def anti_join(self: TDF, *dfs: Any, on: Optional[Iterable[str]] = None) -> TDF:
"""LEFT ANTI Join this dataframe with dataframes. It's a wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.join`. |ReadJoin|
:param dfs: |DataFramesLikeObject|
:param on: it can always be inferred, but if you provide, it will be
validated against the inferred keys. Default to None
:return: joined dataframe
:rtype: :class:`~.WorkflowDataFrame`
"""
return self.join(*dfs, how="anti", on=on)
def left_anti_join(self: TDF, *dfs: Any, on: Optional[Iterable[str]] = None) -> TDF:
"""LEFT ANTI Join this dataframe with dataframes. It's a wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.join`. |ReadJoin|
:param dfs: |DataFramesLikeObject|
:param on: it can always be inferred, but if you provide, it will be
validated against the inferred keys. Default to None
:return: joined dataframe
:rtype: :class:`~.WorkflowDataFrame`
"""
return self.join(*dfs, how="left_anti", on=on)
def left_outer_join(
self: TDF, *dfs: Any, on: Optional[Iterable[str]] = None
) -> TDF:
"""LEFT OUTER Join this dataframe with dataframes. It's a wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.join`. |ReadJoin|
:param dfs: |DataFramesLikeObject|
:param on: it can always be inferred, but if you provide, it will be
validated against the inferred keys. Default to None
:return: joined dataframe
:rtype: :class:`~.WorkflowDataFrame`
"""
return self.join(*dfs, how="left_outer", on=on)
def right_outer_join(
self: TDF, *dfs: Any, on: Optional[Iterable[str]] = None
) -> TDF:
"""RIGHT OUTER Join this dataframe with dataframes. It's a wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.join`. |ReadJoin|
:param dfs: |DataFramesLikeObject|
:param on: it can always be inferred, but if you provide, it will be
validated against the inferred keys. Default to None
:return: joined dataframe
:rtype: :class:`~.WorkflowDataFrame`
"""
return self.join(*dfs, how="right_outer", on=on)
def full_outer_join(
self: TDF, *dfs: Any, on: Optional[Iterable[str]] = None
) -> TDF:
"""CROSS Join this dataframe with dataframes. It's a wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.join`. |ReadJoin|
:param dfs: |DataFramesLikeObject|
:param on: it can always be inferred, but if you provide, it will be
validated against the inferred keys. Default to None
:return: joined dataframe
:rtype: :class:`~.WorkflowDataFrame`
"""
return self.join(*dfs, how="full_outer", on=on)
def cross_join(self: TDF, *dfs: Any) -> TDF:
"""CROSS Join this dataframe with dataframes. It's a wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.join`. |ReadJoin|
:param dfs: |DataFramesLikeObject|
:return: joined dataframe
:rtype: :class:`~.WorkflowDataFrame`
"""
return self.join(*dfs, how="cross")
def union(self: TDF, *dfs: Any, distinct: bool = True) -> TDF:
"""Union this dataframe with ``dfs``.
:param dfs: |DataFramesLikeObject|
:param distinct: whether to perform `distinct` after union,
default to True
:return: unioned dataframe
:Notice:
Currently, all dataframes in ``dfs`` must have identical schema, otherwise
exception will be thrown.
"""
df = self.workflow.union(self, *dfs, distinct=distinct)
return self._to_self_type(df)
def subtract(self: TDF, *dfs: Any, distinct: bool = True) -> TDF:
"""Subtract ``dfs`` from this dataframe.
:param dfs: |DataFramesLikeObject|
:param distinct: whether to perform `distinct` after subtraction,
default to True
:return: subtracted dataframe
:Notice:
Currently, all dataframes in ``dfs`` must have identical schema, otherwise
exception will be thrown.
"""
df = self.workflow.subtract(self, *dfs, distinct=distinct)
return self._to_self_type(df)
def intersect(self: TDF, *dfs: Any, distinct: bool = True) -> TDF:
"""Intersect this dataframe with ``dfs``.
:param dfs: |DataFramesLikeObject|
:param distinct: whether to perform `distinct` after intersection,
default to True
:return: intersected dataframe
:Notice:
Currently, all dataframes in ``dfs`` must have identical schema, otherwise
exception will be thrown.
"""
df = self.workflow.intersect(self, *dfs, distinct=distinct)
return self._to_self_type(df)
def distinct(self: TDF) -> TDF:
"""Get distinct dataframe. Equivalent to ``SELECT DISTINCT * FROM df``
:return: dataframe with unique records
"""
df = self.workflow.process(self, using=Distinct)
return self._to_self_type(df)
def dropna(
self: TDF, how: str = "any", thresh: int = None, subset: List[str] = None
) -> TDF:
"""Drops records containing NA records
:param how: 'any' or 'all'. 'any' drops rows that contain any nulls.
'all' drops rows that contain all nulls.
:param thresh: int, drops rows that have less than thresh non-null values
:param subset: list of columns to operate on
:return: dataframe with incomplete records dropped
"""
params = dict(how=how, thresh=thresh, subset=subset)
params = {k: v for k, v in params.items() if v is not None}
df = self.workflow.process(self, using=Dropna, params=params)
return self._to_self_type(df)
def fillna(self: TDF, value: Any, subset: List[str] = None) -> TDF:
"""Fills NA values with replacement values
:param value: if scalar, fills all columns with same value.
if dictionary, fills NA using the keys as column names and the
values as the replacement values.
:param subset: list of columns to operate on. ignored if value is
a dictionary
:return: dataframe with NA records filled
"""
params = dict(value=value, subset=subset)
params = {k: v for k, v in params.items() if v is not None}
df = self.workflow.process(self, using=Fillna, params=params)
return self._to_self_type(df)
def sample(
self: TDF,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
seed: Optional[int] = None,
) -> TDF:
"""
Sample dataframe by number of rows or by fraction
:param n: number of rows to sample, one and only one of ``n`` and ``fact``
must be set
:param frac: fraction [0,1] to sample, one and only one of ``n`` and ``fact``
must be set
:param replace: whether replacement is allowed. With replacement,
there may be duplicated rows in the result, defaults to False
:param seed: seed for randomness, defaults to None
:return: sampled dataframe
"""
params: Dict[str, Any] = dict(replace=replace, seed=seed)
if n is not None:
params["n"] = n
if frac is not None:
params["frac"] = frac
df = self.workflow.process(self, using=Sample, params=params)
return self._to_self_type(df)
def take(self: TDF, n: int, presort: str = None, na_position: str = "last") -> TDF:
"""
Get the first n rows of a DataFrame per partition. If a presort is defined,
use the presort before applying take. presort overrides partition_spec.presort
:param n: number of rows to return
:param presort: presort expression similar to partition presort
:param na_position: position of null values during the presort.
can accept ``first`` or ``last``
:return: n rows of DataFrame per partition
"""
params: Dict[str, Any] = dict()
params["n"] = n
# Note float is converted to int with triad _get_or
assert_or_throw(
isinstance(n, int),
ValueError("n needs to be an integer"),
)
assert_or_throw(
na_position in ("first", "last"),
ValueError("na_position must be either 'first' or 'last'"),
)
params["na_position"] = na_position
if presort is not None:
params["presort"] = presort
df = self.workflow.process(
self, using=Take, pre_partition=self.partition_spec, params=params
)
return self._to_self_type(df)
def weak_checkpoint(self: TDF, lazy: bool = False, **kwargs: Any) -> TDF:
"""Cache the dataframe in memory
:param lazy: whether it is a lazy checkpoint, defaults to False (eager)
:param kwargs: paramteters for the underlying execution engine function
:return: the cached dataframe
:Notice:
Weak checkpoint in most cases is the best choice for caching a dataframe to
avoid duplicated computation. However it does not guarantee to break up the
the compute dependency for this dataframe, so when you have very complicated
compute, you may encounter issues such as stack overflow. Also, weak checkpoint
normally caches the dataframe in memory, if memory is a concern, then you should
consider :meth:`~.strong_checkpoint`
"""
self._task.set_checkpoint(WeakCheckpoint(lazy=lazy, **kwargs))
return self
def strong_checkpoint(
self: TDF,
lazy: bool = False,
partition: Any = None,
single: bool = False,
**kwargs: Any,
) -> TDF:
"""Cache the dataframe as a temporary file
:param lazy: whether it is a lazy checkpoint, defaults to False (eager)
:param partition: |PartitionLikeObject|, defaults to None.
:param single: force the output as a single file, defaults to False
:param kwargs: paramteters for the underlying execution engine function
:return: the cached dataframe
:Notice:
Strong checkpoint guarantees the output dataframe compute dependency is
from the temporary file. Use strong checkpoint only when
:meth:`~.weak_checkpoint` can't be used.
Strong checkpoint file will be removed after the execution of the workflow.
"""
self._task.set_checkpoint(
FileCheckpoint(
file_id=str(uuid4()),
deterministic=False,
permanent=False,
lazy=lazy,
partition=partition,
single=single,
**kwargs,
)
)
return self
def deterministic_checkpoint(
self: TDF,
lazy: bool = False,
partition: Any = None,
single: bool = False,
namespace: Any = None,
**kwargs: Any,
) -> TDF:
"""Cache the dataframe as a temporary file
:param lazy: whether it is a lazy checkpoint, defaults to False (eager)
:param partition: |PartitionLikeObject|, defaults to None.
:param single: force the output as a single file, defaults to False
:param kwargs: paramteters for the underlying execution engine function
:param namespace: a value to control determinism, defaults to None.
:return: the cached dataframe
:Notice:
The difference vs :meth:`~.strong_checkpoint` is that this checkpoint is not
removed after execution, so it can take effect cross execution if the dependent
compute logic is not changed.
"""
self._task.set_checkpoint(
FileCheckpoint(
file_id=self._task.__uuid__(),
deterministic=True,
permanent=True,
lazy=lazy,
partition=partition,
single=single,
namespace=namespace,
**kwargs,
)
)
return self
def yield_file_as(self: TDF, name: str) -> None:
"""Cache the dataframe in file
:param name: the name of the yielded dataframe
:Notice:
In only the following cases you can yield file:
* you have not checkpointed (persisted) the dataframe, for example
``df.yield_file_as("a")``
* you have used :meth:`~.deterministic_checkpoint`, for example
``df.deterministic_checkpoint().yield_file_as("a")``
* yield is workflow, compile level logic
For the first case, the yield will also be a strong checkpoint so
whenever you yield a dataframe as a file, the dataframe has been saved as a file
and loaded back as a new dataframe.
"""
if not self._task.has_checkpoint:
# the following == a non determinitic, but permanent checkpoint
self.deterministic_checkpoint(namespace=str(uuid4()))
self.workflow._yields[name] = self._task.yielded_file
def yield_dataframe_as(self: TDF, name: str) -> None:
"""Yield a dataframe that can be accessed without
the current execution engine
:param name: the name of the yielded dataframe
"""
yielded = YieldedDataFrame(self._task.__uuid__())
self.workflow._yields[name] = yielded
self._task.set_yield_dataframe_handler(lambda df: yielded.set_value(df))
def persist(self: TDF) -> TDF:
"""Persist the current dataframe
:return: the persisted dataframe
:rtype: :class:`~.WorkflowDataFrame`
:Notice:
``persist`` can only guarantee the persisted dataframe will be computed
for only once. However this doesn't mean the backend really breaks up the
execution dependency at the persisting point. Commonly, it doesn't cause
any issue, but if your execution graph is long, it may cause expected
problems for example, stack overflow.
``persist`` method is considered as weak checkpoint. Sometimes, it may be
necessary to use strong checkpint, which is :meth:`~.checkpoint`
"""
return self.weak_checkpoint(lazy=False)
def checkpoint(self: TDF) -> TDF:
return self.strong_checkpoint(lazy=False)
def broadcast(self: TDF) -> TDF:
"""Broadcast the current dataframe
:return: the broadcasted dataframe
:rtype: :class:`~.WorkflowDataFrame`
"""
self._task.broadcast()
return self
def partition(self: TDF, *args: Any, **kwargs: Any) -> TDF:
"""Partition the current dataframe. Please read |PartitionTutorial|
:param args: |PartitionLikeObject|
:param kwargs: |PartitionLikeObject|
:return: dataframe with the partition hint
:rtype: :class:`~.WorkflowDataFrame`
:Notice:
Normally this step is fast because it's to add a partition hint
for the next step.
"""
return self._to_self_type(
WorkflowDataFrame(
self.workflow,
self._task,
{"pre_partition": PartitionSpec(*args, **kwargs)},
)
)
def partition_by(self: TDF, *keys: str, **kwargs: Any) -> TDF:
"""Partition the current dataframe by keys. Please read |PartitionTutorial|.
This is a wrapper of :meth:`~.partition`
:param keys: partition keys
:param kwargs: |PartitionLikeObject| excluding ``by`` and ``partition_by``
:return: dataframe with the partition hint
:rtype: :class:`~.WorkflowDataFrame`
"""
assert_or_throw(len(keys) > 0, FugueWorkflowCompileError("keys can't be empty"))
assert_or_throw(
"by" not in kwargs and "partition_by" not in kwargs,
FugueWorkflowCompileError("by and partition_by can't be in kwargs"),
)
return self.partition(by=keys, **kwargs)
def per_partition_by(self: TDF, *keys: str) -> TDF:
"""Partition the current dataframe by keys so each physical partition contains
only one logical partition. Please read |PartitionTutorial|.
This is a wrapper of :meth:`~.partition`
:param keys: partition keys
:return: dataframe that is both logically and physically partitioned by ``keys``
:rtype: :class:`~.WorkflowDataFrame`
:Notice:
This is a hint but not enforced, certain execution engines will not
respect this hint.
"""
return self.partition_by(*keys, algo="even")
def per_row(self: TDF) -> TDF:
"""Partition the current dataframe to one row per partition.
Please read |PartitionTutorial|. This is a wrapper of :meth:`~.partition`
:return: dataframe that is evenly partitioned by row count
:rtype: :class:`~.WorkflowDataFrame`
:Notice:
This is a hint but not enforced, certain execution engines will not
respect this hint.
"""
return self.partition("per_row")
def _to_self_type(self: TDF, df: "WorkflowDataFrame") -> TDF:
return df # type: ignore
def drop( # type: ignore
self: TDF, columns: List[str], if_exists: bool = False
) -> TDF:
"""Drop columns from the dataframe.
:param columns: columns to drop
:param if_exists: if setting to True, it will ignore non-existent columns,
defaults to False
:return: the dataframe after dropping columns
:rtype: :class:`~.WorkflowDataFrame`
"""
df = self.workflow.process(
self, using=DropColumns, params=dict(columns=columns, if_exists=if_exists)
)
return self._to_self_type(df)
def rename(self: TDF, *args: Any, **kwargs: Any) -> TDF:
"""Rename the dataframe using a mapping dict
:param args: list of dicts containing rename maps
:param kwargs: rename map
:return: a new dataframe with the new names
:rtype: :class:`~.WorkflowDataFrame`
:Notice:
This interface is more flexible than
:meth:`fugue.dataframe.dataframe.DataFrame.rename`
:Examples:
>>> df.rename({"a": "b"}, c="d", e="f")
"""
m: Dict[str, str] = {}
for a in args:
m.update(a)
m.update(kwargs)
df = self.workflow.process(self, using=Rename, params=dict(columns=m))
return self._to_self_type(df)
def alter_columns(self: TDF, columns: Any) -> TDF:
"""Change column types
:param columns: |SchemaLikeObject|
:return: a new dataframe with the new column types
:rtype: :class:`~.WorkflowDataFrame`
:Notice:
The output dataframe will not change the order of original schema.
:Examples:
>>> df.alter_columns("a:int,b;str")
"""
df = self.workflow.process(
self, using=AlterColumns, params=dict(columns=columns)
)
return self._to_self_type(df)
def zip(
self: TDF,
*dfs: Any,
how: str = "inner",
partition: Any = None,
temp_path: Optional[str] = None,
to_file_threshold: Any = -1,
) -> TDF:
"""Zip this data frame with multiple dataframes together
with given partition specifications. It's a wrapper of
:meth:`fugue.workflow.workflow.FugueWorkflow.zip`.
:param dfs: |DataFramesLikeObject|
:param how: can accept ``inner``, ``left_outer``, ``right_outer``,
``full_outer``, ``cross``, defaults to ``inner``
:param partition: |PartitionLikeObject|, defaults to None.
:param temp_path: file path to store the data (used only if the serialized data
is larger than ``to_file_threshold``), defaults to None
:param to_file_threshold: file byte size threshold, defaults to -1
:return: a zipped dataframe
:rtype: :class:`~.WorkflowDataFrame`
:Notice:
* ``dfs`` must be list like, the zipped dataframe will be list like
* ``dfs`` is fine to be empty
* If you want dict-like zip, use
:meth:`fugue.workflow.workflow.FugueWorkflow.zip`
Read :ref:`CoTransformer <tutorial:/tutorials/dag.ipynb#cotransformer>`
and :ref:`Zip & Comap <tutorial:/tutorials/execution_engine.ipynb#zip-&-comap>`
for details
"""
if partition is None:
partition = self.partition_spec
df = self.workflow.zip(
self,
*dfs,
how=how,
partition=partition,
temp_path=temp_path,
to_file_threshold=to_file_threshold,
)
return self._to_self_type(df)
def __getitem__(self: TDF, columns: List[Any]) -> TDF:
df = self.workflow.process(
self, using=SelectColumns, params=dict(columns=columns)
)
return self._to_self_type(df)
def save(
self,
path: str,
fmt: str = "",
mode: str = "overwrite",
partition: Any = None,
single: bool = False,
**kwargs: Any,
) -> None:
"""Save this dataframe to a persistent storage
:param path: output path
:param fmt: format hint can accept ``parquet``, ``csv``, ``json``,
defaults to None, meaning to infer
:param mode: can accept ``overwrite``, ``append``, ``error``,
defaults to "overwrite"
:param partition: |PartitionLikeObject|, how to partition the
dataframe before saving, defaults to empty
:param single: force the output as a single file, defaults to False
:param kwargs: parameters to pass to the underlying framework
For more details and examples, read
:ref:`Save & Load <tutorial:/tutorials/dag.ipynb#save-&-load>`.
"""
if partition is None:
partition = self.partition_spec
self.workflow.output(
self,
using=Save,
pre_partition=partition,
params=dict(path=path, fmt=fmt, mode=mode, single=single, params=kwargs),
)
def save_and_use(
self: TDF,
path: str,
fmt: str = "",
mode: str = "overwrite",
partition: Any = None,
single: bool = False,
**kwargs: Any,
) -> TDF:
"""Save this dataframe to a persistent storage and load back to use
in the following steps
:param path: output path
:param fmt: format hint can accept ``parquet``, ``csv``, ``json``,
defaults to None, meaning to infer
:param mode: can accept ``overwrite``, ``append``, ``error``,
defaults to "overwrite"
:param partition: |PartitionLikeObject|, how to partition the
dataframe before saving, defaults to empty
:param single: force the output as a single file, defaults to False
:param kwargs: parameters to pass to the underlying framework
For more details and examples, read
:ref:`Save & Load <tutorial:/tutorials/dag.ipynb#save-&-load>`.
"""
if partition is None:
partition = self.partition_spec
df = self.workflow.process(
self,
using=SaveAndUse,
pre_partition=partition,
params=dict(path=path, fmt=fmt, mode=mode, single=single, params=kwargs),
)
return self._to_self_type(df)
@property
def schema(self) -> Schema: # pragma: no cover
"""
:raises NotImplementedError: don't call this method
"""
raise NotImplementedError("WorkflowDataFrame does not support this method")
@property
def is_local(self) -> bool: # pragma: no cover
"""
:raises NotImplementedError: don't call this method
"""
raise NotImplementedError("WorkflowDataFrame does not support this method")
def as_local(self) -> DataFrame: # type: ignore # pragma: no cover
"""
:raises NotImplementedError: don't call this method
"""
raise NotImplementedError("WorkflowDataFrame does not support this method")
@property
def is_bounded(self) -> bool: # pragma: no cover
"""
:raises NotImplementedError: don't call this method
"""
raise NotImplementedError("WorkflowDataFrame does not support this method")
@property
def empty(self) -> bool: # pragma: no cover
"""
:raises NotImplementedError: don't call this method
"""
raise NotImplementedError("WorkflowDataFrame does not support this method")
@property
def num_partitions(self) -> int: # pragma: no cover
"""
:raises NotImplementedError: don't call this method
"""
raise NotImplementedError("WorkflowDataFrame does not support this method")
def peek_array(self) -> Any: # pragma: no cover
"""
:raises NotImplementedError: don't call this method
"""
raise NotImplementedError("WorkflowDataFrame does not support this method")
def count(self) -> int: # pragma: no cover
"""
:raises NotImplementedError: don't call this method
"""
raise NotImplementedError("WorkflowDataFrame does not support this method")
def as_array(
self, columns: Optional[List[str]] = None, type_safe: bool = False
) -> List[Any]: # pragma: no cover
"""
:raises NotImplementedError: don't call this method
"""
raise NotImplementedError("WorkflowDataFrame does not support this method")
def as_array_iterable(
self, columns: Optional[List[str]] = None, type_safe: bool = False
) -> Iterable[Any]: # pragma: no cover
"""
:raises NotImplementedError: don't call this method
"""
raise NotImplementedError("WorkflowDataFrame does not support this method")
def _drop_cols(self: TDF, cols: List[str]) -> DataFrame: # pragma: no cover
raise NotImplementedError("WorkflowDataFrame does not support this method")
def _select_cols(self, keys: List[Any]) -> DataFrame: # pragma: no cover
raise NotImplementedError("WorkflowDataFrame does not support this method")
class WorkflowDataFrames(DataFrames):
"""Ordered dictionary of WorkflowDataFrames. There are two modes: with keys
and without keys. If without key ``_<n>`` will be used as the key
for each dataframe, and it will be treated as an array in Fugue framework.
It's immutable, once initialized, you can't add or remove element from it.
It's a subclass of
:class:`~fugue.dataframe.dataframes.DataFrames`, but different from
DataFrames, in the initialization you should always use
:class:`~fugue.workflow.workflow.WorkflowDataFrame`, and they should all
come from the same :class:`~fugue.workflow.workflow.FugueWorkflow`.
:Examples:
.. code-block:: python
dag = FugueWorkflow()
df1 = dag.df([[0]],"a:int").transform(a_transformer)
df2 = dag.df([[0]],"b:int")
dfs1 = WorkflowDataFrames(df1, df2) # as array
dfs2 = WorkflowDataFrames([df1, df2]) # as array
dfs3 = WorkflowDataFrames(a=df1, b=df2) # as dict
dfs4 = WorkflowDataFrames(dict(a=df1, b=df2)) # as dict
dfs5 = WorkflowDataFrames(dfs4, c=df2) # copy and update
dfs5["b"].show() # how you get element when it's a dict
dfs1[0].show() # how you get element when it's an array
"""
def __init__(self, *args: Any, **kwargs: Any):
self._parent: Optional["FugueWorkflow"] = None
super().__init__(*args, **kwargs)
def __setitem__( # type: ignore
self, key: str, value: WorkflowDataFrame, *args: Any, **kwds: Any
) -> None:
assert_or_throw(
isinstance(value, WorkflowDataFrame),
ValueError(f"{key}:{value} is not WorkflowDataFrame)"),
)
if self._parent is None:
self._parent = value.workflow
else:
assert_or_throw(
self._parent is value.workflow,
ValueError("different parent workflow detected in dataframes"),
)
super().__setitem__(key, value, *args, **kwds)
def __getitem__( # pylint: disable=W0235
self, key: Union[str, int] # type: ignore
) -> WorkflowDataFrame:
return super().__getitem__(key) # type: ignore
class FugueWorkflow(object):
"""Fugue Workflow, also known as the Fugue Programming Interface.
In Fugue, we use DAG to represent workflows, DAG construction and execution
are different steps, this class is mainly used in the construction step, so all
things you added to the workflow is **description** and they are not executed
until you call :meth:`~.run`
Read :ref:`The Tutorial <tutorial:/tutorials/dag.ipynb#initialize-a-workflow>`
to learn how to initialize it in different ways and pros and cons.
"""
def __init__(self, *args: Any, **kwargs: Any):
self._lock = RLock()
self._spec = WorkflowSpec()
self._workflow_ctx = self._to_ctx(*args, **kwargs)
self._computed = False
self._graph = _Graph()
self._yields: Dict[str, Yielded] = {}
@property
def conf(self) -> ParamDict:
"""All configs of this workflow and underlying
:class:`~fugue.execution.execution_engine.ExecutionEngine` (if given)
"""
return self._workflow_ctx.conf
def spec_uuid(self) -> str:
"""UUID of the workflow spec (`description`)"""
return self._spec.__uuid__()
def run(self, *args: Any, **kwargs: Any) -> DataFrames:
"""Execute the workflow and compute all dataframes.
If not arguments, it will use
:class:`~fugue.execution.native_execution_engine.NativeExecutionEngine`
to run the workflow.
:Examples:
.. code-block:: python
dag = FugueWorkflow()
df1 = dag.df([[0]],"a:int").transform(a_transformer)
df2 = dag.df([[0]],"b:int")
dag.run(SparkExecutionEngine)
df1.result.show()
df2.result.show()
dag = FugueWorkflow()
df1 = dag.df([[0]],"a:int").transform(a_transformer)
df1.yield_dataframe_as("x")
result = dag.run(SparkExecutionEngine)
result["x"] # SparkDataFrame
Read :ref:`The Tutorial <tutorial:/tutorials/dag.ipynb#initialize-a-workflow>`
to learn how to run in different ways and pros and cons.
"""
with self._lock:
self._computed = False
if len(args) > 0 or len(kwargs) > 0:
self._workflow_ctx = self._to_ctx(*args, **kwargs)
self._workflow_ctx.run(self._spec, {})
self._computed = True
return DataFrames(
{
k: v.result
for k, v in self.yields.items()
if isinstance(v, YieldedDataFrame)
}
)
@property
def yields(self) -> Dict[str, Yielded]:
return self._yields
def __enter__(self):
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.run()
def get_result(self, df: WorkflowDataFrame) -> DataFrame:
"""After :meth:`~.run`, get the result of a dataframe defined in the dag
:return: a calculated dataframe
:Examples:
.. code-block:: python
dag = FugueWorkflow()
df1 = dag.df([[0]],"a:int")
dag.run()
dag.get_result(df1).show()
"""
assert_or_throw(self._computed, FugueWorkflowError("not computed"))
return self._workflow_ctx.get_result(id(df._task))
def create(
self, using: Any, schema: Any = None, params: Any = None
) -> WorkflowDataFrame:
"""Run a creator to create a dataframe.
Please read the :ref:`Creator Tutorial <tutorial:/tutorials/creator.ipynb>`
:param using: creator-like object, can't be a string expression
:param schema: |SchemaLikeObject|, defaults to None. The creator
will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.output_schema`
:param params: |ParamsLikeObject| to run the creator,
defaults to None. The creator will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.params`
:param pre_partition: |PartitionLikeObject|, defaults to None.
The creator will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.partition_spec`
:return: result dataframe
"""
assert_or_throw(
not isinstance(using, str),
f"creator {using} can't be string expression",
)
task = Create(creator=using, schema=schema, params=params)
return self.add(task)
def process(
self,
*dfs: Any,
using: Any,
schema: Any = None,
params: Any = None,
pre_partition: Any = None,
) -> WorkflowDataFrame:
"""Run a processor on the dataframes.
Please read the :ref:`Processor Tutorial <tutorial:/tutorials/processor.ipynb>`
:param dfs: |DataFramesLikeObject|
:param using: processor-like object, can't be a string expression
:param schema: |SchemaLikeObject|, defaults to None. The processor
will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.output_schema`
:param params: |ParamsLikeObject| to run the processor, defaults to None.
The processor will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.params`
:param pre_partition: |PartitionLikeObject|, defaults to None.
The processor will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.partition_spec`
:return: result dataframe
"""
assert_or_throw(
not isinstance(using, str),
f"processor {using} can't be string expression",
)
_dfs = self._to_dfs(*dfs)
task = Process(
len(_dfs),
processor=using,
schema=schema,
params=params,
pre_partition=pre_partition,
input_names=None if not _dfs.has_key else list(_dfs.keys()),
)
if _dfs.has_key:
return self.add(task, **_dfs)
else:
return self.add(task, *_dfs.values())
def output(
self, *dfs: Any, using: Any, params: Any = None, pre_partition: Any = None
) -> None:
"""Run a outputter on dataframes.
Please read the :ref:`Outputter Tutorial <tutorial:/tutorials/outputter.ipynb>`
:param using: outputter-like object, can't be a string expression
:param params: |ParamsLikeObject| to run the outputter, defaults to None.
The outputter will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.params`
:param pre_partition: |PartitionLikeObject|, defaults to None.
The outputter will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.partition_spec`
"""
assert_or_throw(
not isinstance(using, str),
f"outputter {using} can't be string expression",
)
_dfs = self._to_dfs(*dfs)
task = Output(
len(_dfs),
outputter=using,
params=params,
pre_partition=pre_partition,
input_names=None if not _dfs.has_key else list(_dfs.keys()),
)
if _dfs.has_key:
self.add(task, **_dfs)
else:
self.add(task, *_dfs.values())
def create_data(
self,
data: Any,
schema: Any = None,
metadata: Any = None,
data_determiner: Optional[Callable[[Any], str]] = None,
) -> WorkflowDataFrame:
"""Create dataframe.
:param data: |DataFrameLikeObject| or :class:`~fugue.workflow.yielded.Yielded`
:param schema: |SchemaLikeObject|, defaults to None
:param metadata: |ParamsLikeObject|, defaults to None
:param data_determiner: a function to compute unique id from ``data``
:return: a dataframe of the current workflow
:Notice:
By default, the input ``data`` does not affect the determinism of the workflow
(but ``schema`` and ``etadata`` do), because the amount of compute can be
unpredictable. But if you want ``data`` to affect the
determinism of the workflow, you can provide the function to compute the unique
id of ``data`` using ``data_determiner``
"""
if isinstance(data, WorkflowDataFrame):
assert_or_throw(
data.workflow is self,
FugueWorkflowCompileError(f"{data} does not belong to this workflow"),
)
assert_or_throw(
schema is None and metadata is None,
FugueWorkflowCompileError(
"schema and metadata must be None when data is WorkflowDataFrame"
),
)
return data
if isinstance(data, Yielded):
assert_or_throw(
schema is None and metadata is None,
FugueWorkflowCompileError(
"schema and metadata must be None when data is Yielded"
),
)
return self.create(using=LoadYielded, params=dict(yielded=data))
task = CreateData(
data=data, schema=schema, metadata=metadata, data_determiner=data_determiner
)
return self.add(task)
def df(
self,
data: Any,
schema: Any = None,
metadata: Any = None,
data_determiner: Optional[Callable[[Any], str]] = None,
) -> WorkflowDataFrame:
"""Create dataframe. Alias of :meth:`~.create_data`
:param data: |DataFrameLikeObject| or :class:`~fugue.workflow.yielded.Yielded`
:param schema: |SchemaLikeObject|, defaults to None
:param metadata: |ParamsLikeObject|, defaults to None
:param data_determiner: a function to compute unique id from ``data``
:return: a dataframe of the current workflow
:Notice:
By default, the input ``data`` does not affect the determinism of the workflow
(but ``schema`` and ``etadata`` do), because the amount of compute can be
unpredictable. But if you want ``data`` to affect the
determinism of the workflow, you can provide the function to compute the unique
id of ``data`` using ``data_determiner``
"""
return self.create_data(
data=data, schema=schema, metadata=metadata, data_determiner=data_determiner
)
def load(
self, path: str, fmt: str = "", columns: Any = None, **kwargs: Any
) -> WorkflowDataFrame:
"""Load dataframe from persistent storage.
Read :ref:`this <tutorial:/tutorials/dag.ipynb#save-&-load>` for details
:param path: file path
:param fmt: format hint can accept ``parquet``, ``csv``, ``json``,
defaults to "", meaning to infer
:param columns: list of columns or a |SchemaLikeObject|, defaults to None
:return: dataframe from the file
:rtype: WorkflowDataFrame
"""
return self.create(
using=Load, params=dict(path=path, fmt=fmt, columns=columns, params=kwargs)
)
def show(
self,
*dfs: Any,
rows: int = 10,
show_count: bool = False,
title: Optional[str] = None,
) -> None:
"""Show the dataframes.
See :ref:`examples <tutorial:/tutorials/dag.ipynb#initialize-a-workflow>`.
:param dfs: |DataFramesLikeObject|
:param rows: max number of rows, defaults to 10
:param show_count: whether to show total count, defaults to False
:param title: title to display on top of the dataframe, defaults to None
:param best_width: max width for the output table, defaults to 100
:Notice:
* When you call this method, it means you want the dataframe to be
printed when the workflow executes. So the dataframe won't show until
you run the workflow.
* When ``show_count`` is True, it can trigger expensive calculation for
a distributed dataframe. So if you call this function directly, you may
need to :meth:`~.WorkflowDataFrame.persist` the dataframe. Or you can turn on
:ref:`tutorial:/tutorials/useful_config.ipynb#auto-persist`
"""
self.output(
*dfs, using=Show, params=dict(rows=rows, show_count=show_count, title=title)
)
def join(
self, *dfs: Any, how: str, on: Optional[Iterable[str]] = None
) -> WorkflowDataFrame:
"""Join dataframes.
|ReadJoin|
:param dfs: |DataFramesLikeObject|
:param how: can accept ``semi``, ``left_semi``, ``anti``, ``left_anti``,
``inner``, ``left_outer``, ``right_outer``, ``full_outer``, ``cross``
:param on: it can always be inferred, but if you provide, it will be
validated against the inferred keys. Default to None
:return: joined dataframe
"""
_on: List[str] = list(on) if on is not None else []
return self.process(*dfs, using=RunJoin, params=dict(how=how, on=_on))
def set_op(self, how: str, *dfs: Any, distinct: bool = True) -> WorkflowDataFrame:
"""Union, subtract or intersect dataframes.
:param how: can accept ``union``, ``left_semi``, ``anti``, ``left_anti``,
``inner``, ``left_outer``, ``right_outer``, ``full_outer``, ``cross``
:param dfs: |DataFramesLikeObject|
:param distinct: whether to perform `distinct` after the set operation,
default to True
:return: result dataframe of the set operation
:Notice:
Currently, all dataframes in ``dfs`` must have identical schema, otherwise
exception will be thrown.
"""
return self.process(
*dfs, using=RunSetOperation, params=dict(how=how, distinct=distinct)
)
def union(self, *dfs: Any, distinct: bool = True) -> WorkflowDataFrame:
"""Union dataframes in ``dfs``.
:param dfs: |DataFramesLikeObject|
:param distinct: whether to perform `distinct` after union,
default to True
:return: unioned dataframe
:Notice:
Currently, all dataframes in ``dfs`` must have identical schema, otherwise
exception will be thrown.
"""
return self.set_op("union", *dfs, distinct=distinct)
def subtract(self, *dfs: Any, distinct: bool = True) -> WorkflowDataFrame:
"""Subtract ``dfs[1:]`` from ``dfs[0]``.
:param dfs: |DataFramesLikeObject|
:param distinct: whether to perform `distinct` after subtraction,
default to True
:return: subtracted dataframe
:Notice:
Currently, all dataframes in ``dfs`` must have identical schema, otherwise
exception will be thrown.
"""
return self.set_op("subtract", *dfs, distinct=distinct)
def intersect(self, *dfs: Any, distinct: bool = True) -> WorkflowDataFrame:
"""Intersect dataframes in ``dfs``.
:param dfs: |DataFramesLikeObject|
:param distinct: whether to perform `distinct` after intersection,
default to True
:return: intersected dataframe
:Notice:
Currently, all dataframes in ``dfs`` must have identical schema, otherwise
exception will be thrown.
"""
return self.set_op("intersect", *dfs, distinct=distinct)
def zip(
self,
*dfs: Any,
how: str = "inner",
partition: Any = None,
temp_path: Optional[str] = None,
to_file_threshold: Any = -1,
) -> WorkflowDataFrame:
"""Zip multiple dataframes together with given partition
specifications.
:param dfs: |DataFramesLikeObject|
:param how: can accept ``inner``, ``left_outer``, ``right_outer``,
``full_outer``, ``cross``, defaults to ``inner``
:param partition: |PartitionLikeObject|, defaults to None.
:param temp_path: file path to store the data (used only if the serialized data
is larger than ``to_file_threshold``), defaults to None
:param to_file_threshold: file byte size threshold, defaults to -1
:return: a zipped dataframe
:Notice:
* If ``dfs`` is dict like, the zipped dataframe will be dict like,
If ``dfs`` is list like, the zipped dataframe will be list like
* It's fine to contain only one dataframe in ``dfs``
Read :ref:`CoTransformer <tutorial:/tutorials/dag.ipynb#cotransformer>`
and :ref:`Zip & Comap <tutorial:/tutorials/execution_engine.ipynb#zip-&-comap>`
for details
"""
return self.process(
*dfs,
using=Zip,
params=dict(
how=how, temp_path=temp_path, to_file_threshold=to_file_threshold
),
pre_partition=partition,
)
def transform(
self,
*dfs: Any,
using: Any,
schema: Any = None,
params: Any = None,
pre_partition: Any = None,
ignore_errors: List[Any] = _DEFAULT_IGNORE_ERRORS,
callback: Any = None,
) -> WorkflowDataFrame:
"""Transform dataframes using transformer.
Please read the
:ref:`Transformer Tutorial <tutorial:/tutorials/transformer.ipynb>`
:param dfs: |DataFramesLikeObject|
:param using: transformer-like object, can't be a string expression
:param schema: |SchemaLikeObject|, defaults to None. The transformer
will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.output_schema`
:param params: |ParamsLikeObject| to run the processor, defaults to None.
The transformer will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.params`
:param pre_partition: |PartitionLikeObject|, defaults to None. It's
recommended to use the equivalent wayt, which is to call
:meth:`~.partition` and then call :meth:`~.transform` without this parameter
:param ignore_errors: list of exception types the transformer can ignore,
defaults to empty list
:param callback: |RPCHandlerLikeObject|, defaults to None
:return: the transformed dataframe
:Notice:
:meth:`~.transform` can be lazy and will return the transformed dataframe,
:meth:`~.out_transform` is guaranteed to execute immediately (eager) and
return nothing
"""
assert_or_throw(
not isinstance(using, str),
f"transformer {using} can't be string expression",
)
assert_or_throw(
len(dfs) == 1,
NotImplementedError("transform supports only single dataframe"),
)
tf = _to_transformer(using, schema)
tf._partition_spec = PartitionSpec(pre_partition) # type: ignore
callback = to_rpc_handler(callback)
tf._has_rpc_client = not isinstance(callback, EmptyRPCHandler) # type: ignore
tf.validate_on_compile()
return self.process(
*dfs,
using=RunTransformer,
schema=None,
params=dict(
transformer=tf,
ignore_errors=ignore_errors,
params=params,
rpc_handler=callback,
),
pre_partition=pre_partition,
)
def out_transform(
self,
*dfs: Any,
using: Any,
params: Any = None,
pre_partition: Any = None,
ignore_errors: List[Any] = _DEFAULT_IGNORE_ERRORS,
callback: Any = None,
) -> None:
"""Transform dataframes using transformer, it materializes the execution
immediately and returns nothing
Please read the
:ref:`Transformer Tutorial <tutorial:/tutorials/transformer.ipynb>`
:param dfs: |DataFramesLikeObject|
:param using: transformer-like object, can't be a string expression
:param schema: |SchemaLikeObject|, defaults to None. The transformer
will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.output_schema`
:param params: |ParamsLikeObject| to run the processor, defaults to None.
The transformer will be able to access this value from
:meth:`~fugue.extensions.context.ExtensionContext.params`
:param pre_partition: |PartitionLikeObject|, defaults to None. It's
recommended to use the equivalent wayt, which is to call
:meth:`~.partition` and then call :meth:`~.out_transform` without this
parameter
:param ignore_errors: list of exception types the transformer can ignore,
defaults to empty list
:param callback: |RPCHandlerLikeObject|, defaults to None
:Notice:
:meth:`~.transform` can be lazy and will return the transformed dataframe,
:meth:`~.out_transform` is guaranteed to execute immediately (eager) and
return nothing
"""
assert_or_throw(
not isinstance(using, str),
f"output transformer {using} can't be string expression",
)
assert_or_throw(
len(dfs) == 1,
NotImplementedError("output transform supports only single dataframe"),
)
tf = _to_output_transformer(using)
tf._partition_spec = PartitionSpec(pre_partition) # type: ignore
callback = to_rpc_handler(callback)
tf._has_rpc_client = not isinstance(callback, EmptyRPCHandler) # type: ignore
tf.validate_on_compile()
self.output(
*dfs,
using=RunOutputTransformer,
params=dict(
transformer=tf,
ignore_errors=ignore_errors,
params=params,
rpc_handler=callback,
),
pre_partition=pre_partition,
)
def select(
self,
*statements: Any,
sql_engine: Any = None,
sql_engine_params: Any = None,
) -> WorkflowDataFrame:
"""Execute ``SELECT`` statement using
:class:`~fugue.execution.execution_engine.SQLEngine`
:param statements: a list of sub-statements in string
or :class:`~.WorkflowDataFrame`
:param sql_engine: it can be empty string or null (use the default SQL
engine), a string (use the registered SQL engine), an
:class:`~fugue.execution.execution_engine.SQLEngine` type, or
the :class:`~fugue.execution.execution_engine.SQLEngine` instance
(you can use ``None`` to use the default one), defaults to None
:return: result of the ``SELECT`` statement
:Example:
.. code-block:: python
with FugueWorkflow() as dag:
a = dag.df([[0,"a"]],a:int,b:str)
b = dag.df([[0]],a:int)
c = dag.select("SELECT a FROM",a,"UNION SELECT * FROM",b)
Please read :ref:`this <tutorial:/tutorials/dag.ipynb#select-query>`
for more examples
"""
s_str: List[str] = []
dfs: Dict[str, DataFrame] = {}
for s in statements:
if isinstance(s, str):
s_str.append(s)
if isinstance(s, DataFrame):
ws = self.df(s)
dfs[ws.name] = ws
s_str.append(ws.name)
sql = " ".join(s_str).strip()
if not sql[:10].upper().startswith("SELECT") and not sql[
:10
].upper().startswith("WITH"):
sql = "SELECT " + sql
return self.process(
dfs,
using=RunSQLSelect,
params=dict(
statement=sql,
sql_engine=sql_engine,
sql_engine_params=ParamDict(sql_engine_params),
),
)
def assert_eq(self, *dfs: Any, **params: Any) -> None:
"""Compare if these dataframes are equal. It's for internal, unit test
purpose only. It will convert both dataframes to
:class:`~fugue.dataframe.dataframe.LocalBoundedDataFrame`, so it assumes
all dataframes are small and fast enough to convert. DO NOT use it
on critical or expensive tasks.
:param dfs: |DataFramesLikeObject|
:param digits: precision on float number comparison, defaults to 8
:param check_order: if to compare the row orders, defaults to False
:param check_schema: if compare schemas, defaults to True
:param check_content: if to compare the row values, defaults to True
:param check_metadata: if to compare the dataframe metadatas, defaults to True
:param no_pandas: if true, it will compare the string representations of the
dataframes, otherwise, it will convert both to pandas dataframe to compare,
defaults to False
:raises AssertionError: if not equal
"""
self.output(*dfs, using=AssertEqual, params=params)
def assert_not_eq(self, *dfs: Any, **params: Any) -> None:
"""Assert if all dataframes are not equal to the first dataframe.
It's for internal, unit test purpose only. It will convert both dataframes to
:class:`~fugue.dataframe.dataframe.LocalBoundedDataFrame`, so it assumes
all dataframes are small and fast enough to convert. DO NOT use it
on critical or expensive tasks.
:param dfs: |DataFramesLikeObject|
:param digits: precision on float number comparison, defaults to 8
:param check_order: if to compare the row orders, defaults to False
:param check_schema: if compare schemas, defaults to True
:param check_content: if to compare the row values, defaults to True
:param check_metadata: if to compare the dataframe metadatas, defaults to True
:param no_pandas: if true, it will compare the string representations of the
dataframes, otherwise, it will convert both to pandas dataframe to compare,
defaults to False
:raises AssertionError: if any dataframe equals to the first dataframe
"""
self.output(*dfs, using=AssertNotEqual, params=params)
def add(self, task: FugueTask, *args: Any, **kwargs: Any) -> WorkflowDataFrame:
"""This method should not be called directly by users. Use
:meth:`~.create`, :meth:`~.process`, :meth:`~.output` instead
"""
assert_or_throw(task._node_spec is None, f"can't reuse {task}")
dep = _Dependencies(self, task, {}, *args, **kwargs)
name = "_" + str(len(self._spec.tasks))
wt = self._spec.add_task(name, task, dep.dependency)
# TODO: this is auto persist, the implementation needs imrpovement
for v in dep.dependency.values():
v = v.split(".")[0]
self._graph.add(name, v)
if len(self._graph.down[v]) > 1 and self.conf.get_or_throw(
FUGUE_CONF_WORKFLOW_AUTO_PERSIST, bool
):
self._spec.tasks[v].set_checkpoint(
WeakCheckpoint(
lazy=False,
level=self.conf.get_or_none(
FUGUE_CONF_WORKFLOW_AUTO_PERSIST_VALUE, object
),
)
)
return WorkflowDataFrame(self, wt)
def _to_dfs(self, *args: Any, **kwargs: Any) -> DataFrames:
return DataFrames(*args, **kwargs).convert(self.create_data)
def _to_ctx(self, *args: Any, **kwargs) -> FugueWorkflowContext:
if len(args) == 1 and isinstance(args[0], FugueWorkflowContext):
return args[0]
return FugueWorkflowContext(make_execution_engine(*args, **kwargs))
class _Dependencies(object):
def __init__(
self,
workflow: "FugueWorkflow",
task: FugueTask,
local_vars: Dict[str, Any],
*args: Any,
**kwargs: Any,
):
self.workflow = workflow
self._local_vars = local_vars
self.dependency: Dict[str, str] = {}
for i in range(len(args)):
key = task.inputs.get_key_by_index(i)
self.dependency[key] = self._parse_single_dependency(args[i])
for k, v in kwargs.items():
self.dependency[k] = self._parse_single_dependency(v)
def _parse_single_dependency(self, dep: Any) -> str:
# if isinstance(dep, tuple): # (cursor_like_obj, output_name)
# cursor = self._parse_cursor(dep[0])
# return cursor._task.name + "." + dep[1]
return self._parse_cursor(dep)._task.single_output_expression
def _parse_cursor(self, dep: Any) -> WorkflowDataFrame:
if isinstance(dep, WorkflowDataFrame):
return dep
# if isinstance(dep, DataFrame):
# return self.workflow.create_data(dep)
# if isinstance(dep, str):
# assert_or_throw(
# dep in self._local_vars, KeyError(f"{dep} is not a local variable")
# )
# if isinstance(self._local_vars[dep], WorkflowDataFrame):
# return self._local_vars[dep]
# # TODO: should also accept dataframe?
# raise TypeError(f"{self._local_vars[dep]} is not a valid dependency type")
raise TypeError(f"{dep} is not a valid dependency type") # pragma: no cover
# TODO: this should not exist, dependency libraries should do the job
class _Graph(object):
def __init__(self):
self.down: Dict[str, Set[str]] = defaultdict(set)
self.up: Dict[str, Set[str]] = defaultdict(set)
def add(self, name: str, depend_on: str) -> None:
depend_on = depend_on.split(".")[0]
self.down[depend_on].add(name)
self.up[name].add(depend_on)
| 39.305742
| 88
| 0.621654
|
e3b5c4969b2ba593cabcf703ab94180e9b8911f7
| 131
|
py
|
Python
|
src/window.py
|
ytyaru/Python.Pyxel.Minimum.Window.border_width.20200330194841
|
9ea98aa336b0ff7b5ed2e10d0c5bc8c8841ac8af
|
[
"CC0-1.0"
] | null | null | null |
src/window.py
|
ytyaru/Python.Pyxel.Minimum.Window.border_width.20200330194841
|
9ea98aa336b0ff7b5ed2e10d0c5bc8c8841ac8af
|
[
"CC0-1.0"
] | null | null | null |
src/window.py
|
ytyaru/Python.Pyxel.Minimum.Window.border_width.20200330194841
|
9ea98aa336b0ff7b5ed2e10d0c5bc8c8841ac8af
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf8
import pyxel
pyxel.init(256, 256, border_width=0)
pyxel.run(lambda: 0, lambda: pyxel.cls(8))
| 21.833333
| 42
| 0.725191
|
b5d684aff236b52be1ed0835a42881859da1a318
| 4,188
|
py
|
Python
|
fastdocx/ui/ui.py
|
SOVLOOKUP/FastDocx
|
9e7d9a2bed77fc7ebd8eb7f0a49ec9bbacaeb02d
|
[
"MIT"
] | 3
|
2021-02-05T08:46:26.000Z
|
2021-05-12T13:00:01.000Z
|
fastdocx/ui/ui.py
|
SOVLOOKUP/FastDocx
|
9e7d9a2bed77fc7ebd8eb7f0a49ec9bbacaeb02d
|
[
"MIT"
] | 4
|
2020-10-21T03:04:28.000Z
|
2021-05-26T04:06:01.000Z
|
fastdocx/ui/ui.py
|
SOVLOOKUP/FastDocx
|
9e7d9a2bed77fc7ebd8eb7f0a49ec9bbacaeb02d
|
[
"MIT"
] | 2
|
2021-05-12T09:46:01.000Z
|
2021-11-05T01:36:06.000Z
|
from PyQt5.QtCore import QUrl
from PyQt5.QtGui import QDesktopServices, QIcon, QIconEngine
from PyQt5.QtWidgets import QMessageBox, QMainWindow, QApplication, QListWidgetItem, QInputDialog, QFileDialog
from .form import Ui_MainWindow
import httpx, json, os
from fastdocx import WordCore
from .style import stype
# class CommonHelper:
# def __init__(self):
# pass
# @staticmethod
# def readQss(style):
# with open(style, 'r') as f:
# return f.read()
class item(QListWidgetItem):
def __init__(self, name :str, icon: str, id: str, author: str, version: str, config: str, description: str,tmpdir: str, parent = None):
super(item, self).__init__(parent)
self.tmpdir = tmpdir
self.setText(name)
iconame = icon.split("/")[-1]
with httpx.stream("GET", icon) as response:
with open(self.tmpdir+iconame,"wb+") as f:
for chunk in response.iter_bytes():
f.write(chunk)
self.setIcon(QIcon(self.tmpdir+iconame))
self.setToolTip(f"ID:{id}\n作者:{author}\n版本:{version}")
self.config = config
self.description = description
self.name = name
self.version = version
self.author = author
def __next__():
pass
def __iter__():
pass
class fastdocx(QMainWindow, Ui_MainWindow):
def __init__(self,tmpdir:str = "./tmp/",source_url:str = "https://v.gonorth.top:444/file/index.json", parent = None):
super(fastdocx, self).__init__(parent)
# 重连10次
self._time = 10
self.source_url = source_url
self.tmpdir = tmpdir
self.setupUi(self)
try:
self.setWindowIcon(QIcon('icon.ico'))
except:
pass
self.setWindowTitle('FastDocx')
self.workdirButton.clicked.connect(self.workdirButtonClicked)
self.process.clicked.connect(self.startProcess)
self.listWidget.itemClicked.connect(self.setDetails)
self.source.triggered.connect(self.setSourse)
self.about.triggered.connect(self.aboutOpenWeb)
# self.myinit()
# todo:修复打开bug
def aboutOpenWeb(self):
QDesktopServices.openUrl(QUrl("https://github.com/sovlookup"))
def setDetails(self, item):
self.name.setText(item.name)
self.author.setText(item.author)
self.version.setText(item.version)
self.description.setText(item.description)
self.config = item.config
def setSourse(self):
text, ok = QInputDialog.getText(self,"自定义源地址","设置源地址:")
if ok and str(text).startswith("http"):
self.source_url = str(text)
def workdirButtonClicked(self):
dir = QFileDialog.getExistingDirectory(self, "输出文件夹", "./")
self.workdir.setText(dir)
self.word = WordCore(dir)
def startProcess(self):
self.process.setText("处理中...")
try:
status = self.word.load(self.config).process()
if status:
QMessageBox.information(self,"成功","运行成功请查看输出目录!")
except AttributeError:
QMessageBox.warning(self,"检查","请选择任务和输出文件夹!")
except httpx.ConnectTimeout:
try:
self.word.load(self.config).process()
except httpx.ConnectTimeout:
QMessageBox.warning(self,"超时","请检查网络连接")
finally:
self.process.setText("开始任务")
def myinit(self):
if os.path.exists(self.tmpdir) == False:
os.makedirs(self.tmpdir)
for item in self.download():
self.listWidget.addItem(item)
def download(self):
try:
source = json.loads(httpx.get(self.source_url).content)
for k,v in source.items():
yield item("\n"+v.get("taskname")+"\n",v.get("icon"),k,v.get("author"),v.get("version"),v.get("config"),v.get("description"),self.tmpdir)
except httpx.ConnectTimeout:
if self._time == 0:
pass
self.download()
self._time -= 1
def ui():
"""可视化界面
"""
app = QApplication([])
widget = fastdocx()
# qssStyle = CommonHelper.readQss('./qss/black.qss')
widget.setStyleSheet(stype)
widget.show()
widget.myinit()
app.exec_()
| 31.969466
| 148
| 0.621538
|
42d7884f5f24dfc4f4ba3076fc7266b945bd3d3b
| 15,027
|
py
|
Python
|
grr/server/grr_response_server/file_store_test.py
|
ahmednofal/grr
|
08a57f6873ee13f425d0106e4143663bc6dbdd60
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/file_store_test.py
|
ahmednofal/grr
|
08a57f6873ee13f425d0106e4143663bc6dbdd60
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/file_store_test.py
|
ahmednofal/grr
|
08a57f6873ee13f425d0106e4143663bc6dbdd60
|
[
"Apache-2.0"
] | 2
|
2020-08-24T00:22:03.000Z
|
2020-11-14T08:34:43.000Z
|
#!/usr/bin/env python
"""Tests for REL_DB-based file store."""
from __future__ import absolute_import
from __future__ import unicode_literals
from grr_response_core.lib import flags
from grr_response_core.lib import rdfvalue
from grr_response_server import data_store
from grr_response_server import db
from grr_response_server import file_store
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import test_lib
class BlobStreamTest(test_lib.GRRBaseTest):
"""BlobStream tests."""
def setUp(self):
super(BlobStreamTest, self).setUp()
self.blob_size = 10
self.blob_data = [c * self.blob_size for c in b"abcde12345"]
self.blob_ids = [
rdf_objects.BlobID.FromBlobData(bd) for bd in self.blob_data
]
self.blob_refs = [
rdf_objects.BlobReference(
offset=i * self.blob_size, size=self.blob_size, blob_id=blob_id)
for i, blob_id in enumerate(self.blob_ids)
]
data_store.BLOBS.WriteBlobs(dict(zip(self.blob_ids, self.blob_data)))
self.blob_stream = file_store.BlobStream(self.blob_refs, None)
def testReadsFirstByte(self):
self.assertEqual(self.blob_stream.read(1), b"a")
def testReadsLastByte(self):
self.blob_stream.seek(-1, 2)
self.assertEqual(self.blob_stream.read(1), b"5")
def testReadsFirstChunkPlusOneByte(self):
self.assertEqual(
self.blob_stream.read(self.blob_size + 1), b"a" * self.blob_size + b"b")
def testReadsLastChunkPlusOneByte(self):
self.blob_stream.seek(-self.blob_size - 1, 2)
self.assertEqual(
self.blob_stream.read(self.blob_size + 1), b"4" + b"5" * self.blob_size)
def testReadsWholeFile(self):
self.assertEqual(self.blob_stream.read(), b"".join(self.blob_data))
def testRaisesWhenTryingToReadTooMuchDataAtOnce(self):
with test_lib.ConfigOverrider({
"Server.max_unbound_read_size": self.blob_size
}):
# Recreate to make sure the new config option value is applied.
self.blob_stream = file_store.BlobStream(self.blob_refs, None)
self.blob_stream.read(self.blob_size)
with self.assertRaises(file_store.OversizedRead):
self.blob_stream.read(self.blob_size + 1)
def testWhenReadingWholeFileAndWholeFileSizeIsTooBig(self):
self.blob_stream.read()
self.blob_stream.seek(0)
with test_lib.ConfigOverrider({
"Server.max_unbound_read_size": self.blob_size * 10 - 1
}):
# Recreate to make sure the new config option value is applied.
self.blob_stream = file_store.BlobStream(self.blob_refs, None)
with self.assertRaises(file_store.OversizedRead):
self.blob_stream.read()
class AddFileWithUnknownHashTest(test_lib.GRRBaseTest):
"""Tests for AddFileWithUnknownHash."""
def setUp(self):
super(AddFileWithUnknownHashTest, self).setUp()
self.blob_size = 10
self.blob_data = [c * self.blob_size for c in b"ab"]
self.blob_ids = [
rdf_objects.BlobID.FromBlobData(bd) for bd in self.blob_data
]
data_store.BLOBS.WriteBlobs(dict(zip(self.blob_ids, self.blob_data)))
def testRaisesIfSingleBlobIsNotFound(self):
blob_id = rdf_objects.BlobID.FromBlobData("")
with self.assertRaises(file_store.BlobNotFound):
file_store.AddFileWithUnknownHash([blob_id])
def testAddsFileWithSingleBlob(self):
hash_id = file_store.AddFileWithUnknownHash(self.blob_ids[:1])
self.assertEqual(hash_id.AsBytes(), self.blob_ids[0].AsBytes())
def testRaisesIfOneOfTwoBlobsIsNotFound(self):
blob_id = rdf_objects.BlobID.FromBlobData("")
with self.assertRaises(file_store.BlobNotFound):
file_store.AddFileWithUnknownHash([self.blob_ids[0], blob_id])
def testAddsFileWithTwoBlobs(self):
hash_id = file_store.AddFileWithUnknownHash(self.blob_ids)
self.assertEqual(
hash_id.AsBytes(),
rdf_objects.SHA256HashID.FromData(b"".join(self.blob_data)))
class OpenFileTest(test_lib.GRRBaseTest):
"""Tests for OpenFile."""
def setUp(self):
super(OpenFileTest, self).setUp()
self.client_id = self.SetupClient(0).Basename()
self.client_path = db.ClientPath.OS(self.client_id, ("foo", "bar"))
self.blob_size = 10
self.blob_data = [c * self.blob_size for c in b"abcdef"]
self.blob_ids = [
rdf_objects.BlobID.FromBlobData(bd) for bd in self.blob_data
]
data_store.BLOBS.WriteBlobs(dict(zip(self.blob_ids, self.blob_data)))
self.hash_id = file_store.AddFileWithUnknownHash(self.blob_ids[:3])
self.data = b"".join(self.blob_data[:3])
self.other_hash_id = file_store.AddFileWithUnknownHash(self.blob_ids[3:])
self.invalid_hash_id = rdf_objects.SHA256HashID.FromData(b"")
def _PathInfo(self, hash_id=None):
pi = rdf_objects.PathInfo.OS(components=self.client_path.components)
if hash_id:
pi.hash_entry.sha256 = hash_id.AsBytes()
return pi
def testOpensFileWithSinglePathInfoWithHash(self):
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.hash_id)])
fd = file_store.OpenFile(self.client_path)
self.assertEqual(fd.read(), self.data)
def testRaisesForFileWithSinglePathInfoWithoutHash(self):
data_store.REL_DB.WritePathInfos(self.client_id, [self._PathInfo()])
with self.assertRaises(file_store.FileHasNoContent):
file_store.OpenFile(self.client_path)
def testRaisesForFileWithSinglePathInfoWithUnknownHash(self):
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.invalid_hash_id)])
with self.assertRaises(file_store.FileHasNoContent):
file_store.OpenFile(self.client_path)
def testOpensFileWithTwoPathInfosWhereOldestHasHash(self):
# Oldest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.hash_id)])
# Newest.
data_store.REL_DB.WritePathInfos(self.client_id, [self._PathInfo()])
fd = file_store.OpenFile(self.client_path)
self.assertEqual(fd.read(), self.data)
def testOpensFileWithTwoPathInfosWhereNewestHasHash(self):
# Oldest.
data_store.REL_DB.WritePathInfos(self.client_id, [self._PathInfo()])
# Newest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.hash_id)])
fd = file_store.OpenFile(self.client_path)
self.assertEqual(fd.read(), self.data)
def testOpensFileWithTwoPathInfosWhereOldestHashIsUnknown(self):
# Oldest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.invalid_hash_id)])
# Newest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.hash_id)])
fd = file_store.OpenFile(self.client_path)
self.assertEqual(fd.read(), self.data)
def testOpensFileWithTwoPathInfosWhereNewestHashIsUnknown(self):
# Oldest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.hash_id)])
# Newest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.invalid_hash_id)])
fd = file_store.OpenFile(self.client_path)
self.assertEqual(fd.read(), self.data)
def testOpensLatestVersionForPathWithTwoPathInfosWithHashes(self):
# Oldest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.other_hash_id)])
# Newest.
data_store.REL_DB.WritePathInfos(self.client_id,
[self._PathInfo(self.hash_id)])
fd = file_store.OpenFile(self.client_path)
self.assertEqual(fd.read(), self.data)
class StreamFilesChunksTest(test_lib.GRRBaseTest):
"""Tests for StreamFilesChunks."""
def _WriteFile(self, client_path, blobs_range=None):
path_info = rdf_objects.PathInfo.OS(components=client_path.components)
if blobs_range:
hash_id = file_store.AddFileWithUnknownHash(
self.blob_ids[blobs_range[0]:blobs_range[1]])
path_info.hash_entry.sha256 = hash_id.AsBytes()
data_store.REL_DB.WritePathInfos(client_path.client_id, [path_info])
def setUp(self):
super(StreamFilesChunksTest, self).setUp()
self.client_id = self.SetupClient(0).Basename()
self.client_id_other = self.SetupClient(1).Basename()
self.blob_size = 10
self.blob_data = [c * self.blob_size for c in b"abcdef"]
self.blob_ids = [
rdf_objects.BlobID.FromBlobData(bd) for bd in self.blob_data
]
data_store.BLOBS.WriteBlobs(dict(zip(self.blob_ids, self.blob_data)))
def testStreamsSingleFileWithSingleChunk(self):
client_path = db.ClientPath.OS(self.client_id, ("foo", "bar"))
self._WriteFile(client_path, (0, 1))
chunks = list(file_store.StreamFilesChunks([client_path]))
self.assertEqual(len(chunks), 1)
self.assertEqual(chunks[0].client_path, client_path)
self.assertEqual(chunks[0].data, self.blob_data[0])
self.assertEqual(chunks[0].chunk_index, 0)
self.assertEqual(chunks[0].total_chunks, 1)
self.assertEqual(chunks[0].offset, 0)
self.assertEqual(chunks[0].total_size, self.blob_size)
def testStreamsSingleFileWithTwoChunks(self):
client_path = db.ClientPath.OS(self.client_id, ("foo", "bar"))
self._WriteFile(client_path, (0, 2))
chunks = list(file_store.StreamFilesChunks([client_path]))
self.assertEqual(len(chunks), 2)
self.assertEqual(chunks[0].client_path, client_path)
self.assertEqual(chunks[0].data, self.blob_data[0])
self.assertEqual(chunks[0].chunk_index, 0)
self.assertEqual(chunks[0].total_chunks, 2)
self.assertEqual(chunks[0].offset, 0)
self.assertEqual(chunks[0].total_size, self.blob_size * 2)
self.assertEqual(chunks[1].client_path, client_path)
self.assertEqual(chunks[1].data, self.blob_data[1])
self.assertEqual(chunks[1].chunk_index, 1)
self.assertEqual(chunks[1].total_chunks, 2)
self.assertEqual(chunks[1].offset, self.blob_size)
self.assertEqual(chunks[1].total_size, self.blob_size * 2)
def testStreamsTwoFilesWithTwoChunksInEach(self):
client_path_1 = db.ClientPath.OS(self.client_id, ("foo", "bar"))
self._WriteFile(client_path_1, (0, 2))
client_path_2 = db.ClientPath.OS(self.client_id_other, ("foo", "bar"))
self._WriteFile(client_path_2, (2, 4))
chunks = list(file_store.StreamFilesChunks([client_path_1, client_path_2]))
self.assertEqual(len(chunks), 4)
self.assertEqual(chunks[0].client_path, client_path_1)
self.assertEqual(chunks[0].data, self.blob_data[0])
self.assertEqual(chunks[0].chunk_index, 0)
self.assertEqual(chunks[0].total_chunks, 2)
self.assertEqual(chunks[0].offset, 0)
self.assertEqual(chunks[0].total_size, self.blob_size * 2)
self.assertEqual(chunks[1].client_path, client_path_1)
self.assertEqual(chunks[1].data, self.blob_data[1])
self.assertEqual(chunks[1].chunk_index, 1)
self.assertEqual(chunks[1].total_chunks, 2)
self.assertEqual(chunks[1].offset, self.blob_size)
self.assertEqual(chunks[1].total_size, self.blob_size * 2)
self.assertEqual(chunks[2].client_path, client_path_2)
self.assertEqual(chunks[2].data, self.blob_data[2])
self.assertEqual(chunks[2].chunk_index, 0)
self.assertEqual(chunks[2].total_chunks, 2)
self.assertEqual(chunks[2].offset, 0)
self.assertEqual(chunks[2].total_size, self.blob_size * 2)
self.assertEqual(chunks[3].client_path, client_path_2)
self.assertEqual(chunks[3].data, self.blob_data[3])
self.assertEqual(chunks[3].chunk_index, 1)
self.assertEqual(chunks[3].total_chunks, 2)
self.assertEqual(chunks[3].offset, self.blob_size)
self.assertEqual(chunks[3].total_size, self.blob_size * 2)
def testIgnoresFileWithoutChunks(self):
client_path_1 = db.ClientPath.OS(self.client_id, ("foo", "bar"))
self._WriteFile(client_path_1, None)
client_path_2 = db.ClientPath.OS(self.client_id_other, ("foo", "bar"))
self._WriteFile(client_path_2, (2, 4))
chunks = list(file_store.StreamFilesChunks([client_path_1, client_path_2]))
self.assertEqual(len(chunks), 2)
self.assertEqual(chunks[0].client_path, client_path_2)
self.assertEqual(chunks[0].data, self.blob_data[2])
self.assertEqual(chunks[0].chunk_index, 0)
self.assertEqual(chunks[0].total_chunks, 2)
self.assertEqual(chunks[0].offset, 0)
self.assertEqual(chunks[0].total_size, self.blob_size * 2)
self.assertEqual(chunks[1].client_path, client_path_2)
self.assertEqual(chunks[1].data, self.blob_data[3])
self.assertEqual(chunks[1].chunk_index, 1)
self.assertEqual(chunks[1].total_chunks, 2)
self.assertEqual(chunks[1].offset, self.blob_size)
self.assertEqual(chunks[1].total_size, self.blob_size * 2)
def testRespectsClientPathsOrder(self):
client_path_1 = db.ClientPath.OS(self.client_id, ("foo", "bar"))
self._WriteFile(client_path_1, (0, 1))
client_path_2 = db.ClientPath.OS(self.client_id_other, ("foo", "bar"))
self._WriteFile(client_path_2, (0, 1))
chunks = list(file_store.StreamFilesChunks([client_path_1, client_path_2]))
self.assertEqual(len(chunks), 2)
self.assertEqual(chunks[0].client_path, client_path_1)
self.assertEqual(chunks[1].client_path, client_path_2)
# Check that reversing the list of requested client paths reverses the
# result.
chunks = list(file_store.StreamFilesChunks([client_path_2, client_path_1]))
self.assertEqual(len(chunks), 2)
self.assertEqual(chunks[0].client_path, client_path_2)
self.assertEqual(chunks[1].client_path, client_path_1)
def testReadsLatestVersionWhenStreamingWithoutSpecifiedTimestamp(self):
client_path = db.ClientPath.OS(self.client_id, ("foo", "bar"))
self._WriteFile(client_path, (0, 1))
self._WriteFile(client_path, (1, 2))
chunks = list(file_store.StreamFilesChunks([client_path]))
self.assertEqual(len(chunks), 1)
self.assertEqual(chunks[0].client_path, client_path)
self.assertEqual(chunks[0].data, self.blob_data[1])
def testRespectsMaxTimestampWhenStreamingSingleFile(self):
client_path = db.ClientPath.OS(self.client_id, ("foo", "bar"))
self._WriteFile(client_path, (0, 1))
timestamp_1 = rdfvalue.RDFDatetime.Now()
self._WriteFile(client_path, (1, 2))
timestamp_2 = rdfvalue.RDFDatetime.Now()
chunks = list(
file_store.StreamFilesChunks([client_path], max_timestamp=timestamp_2))
self.assertEqual(len(chunks), 1)
self.assertEqual(chunks[0].client_path, client_path)
self.assertEqual(chunks[0].data, self.blob_data[1])
chunks = list(
file_store.StreamFilesChunks([client_path], max_timestamp=timestamp_1))
self.assertEqual(len(chunks), 1)
self.assertEqual(chunks[0].client_path, client_path)
self.assertEqual(chunks[0].data, self.blob_data[0])
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| 39.031169
| 80
| 0.717176
|
108317dac2e68f6eaa6d3a5260825679b69baccd
| 146
|
py
|
Python
|
notebooks/paths.py
|
JoaoCarabetta/bovespa_fun
|
febd24d5af54ad32b383afb154a0154c12aa8a6f
|
[
"MIT"
] | 2
|
2019-11-22T05:31:24.000Z
|
2019-11-25T04:03:17.000Z
|
notebooks/paths.py
|
JoaoCarabetta/distancia_escolas_sp
|
22ac35e4577b40fff5d44feac53e56799e649f5f
|
[
"MIT"
] | 1
|
2020-03-21T19:56:34.000Z
|
2020-03-21T19:56:34.000Z
|
notebooks/paths.py
|
JoaoCarabetta/distancia_escolas_sp
|
22ac35e4577b40fff5d44feac53e56799e649f5f
|
[
"MIT"
] | 1
|
2019-11-22T05:31:36.000Z
|
2019-11-22T05:31:36.000Z
|
import os
import sys
sys.path.insert(0, '../')
sys.path.insert(0, '../scripts')
from config import RAW_PATH, TREAT_PATH, OUTPUT_PATH, FIGURES_PATH
| 29.2
| 66
| 0.746575
|
7fd9e8e192970115641be927828ccb673fc03d87
| 6,189
|
py
|
Python
|
research/recommend/autodis/infer/utils/preprocess_txt.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 77
|
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/recommend/autodis/infer/utils/preprocess_txt.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 3
|
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/recommend/autodis/infer/utils/preprocess_txt.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 24
|
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
sample script of preprocessing txt data for autodis infer
"""
import collections
import pickle
import os
import argparse
class StatsDict():
"""preprocessed data"""
def __init__(self, field_size, dense_dim, slot_dim, skip_id_convert):
self.field_size = field_size
self.dense_dim = dense_dim
self.slot_dim = slot_dim
self.skip_id_convert = bool(skip_id_convert)
self.val_cols = ["val_{}".format(i + 1) for i in range(self.dense_dim)]
self.cat_cols = ["cat_{}".format(i + 1) for i in range(self.slot_dim)]
self.val_min_dict = {col: 0 for col in self.val_cols}
self.val_max_dict = {col: 0 for col in self.val_cols}
self.cat_count_dict = {col: collections.defaultdict(int) for col in self.cat_cols}
self.oov_prefix = "OOV"
self.cat2id_dict = {}
self.cat2id_dict.update({col: i for i, col in enumerate(self.val_cols)})
self.cat2id_dict.update(
{self.oov_prefix + col: i + len(self.val_cols) for i, col in enumerate(self.cat_cols)})
def load_dict(self, dict_path, prefix=""):
with open(os.path.join(dict_path, "{}val_max_dict.pkl".format(prefix)), "rb") as file_wrt:
self.val_max_dict = pickle.load(file_wrt)
with open(os.path.join(dict_path, "{}val_min_dict.pkl".format(prefix)), "rb") as file_wrt:
self.val_min_dict = pickle.load(file_wrt)
with open(os.path.join(dict_path, "{}cat_count_dict.pkl".format(prefix)), "rb") as file_wrt:
self.cat_count_dict = pickle.load(file_wrt)
print("val_max_dict.items()[:50]:{}".format(list(self.val_max_dict.items())))
print("val_min_dict.items()[:50]:{}".format(list(self.val_min_dict.items())))
def get_cat2id(self, threshold=100):
for key, cat_count_d in self.cat_count_dict.items():
new_cat_count_d = dict(filter(lambda x: x[1] > threshold, cat_count_d.items()))
for cat_str, _ in new_cat_count_d.items():
self.cat2id_dict[key + "_" + cat_str] = len(self.cat2id_dict)
print("cat2id_dict.size:{}".format(len(self.cat2id_dict)))
print("cat2id.dict.items()[:50]:{}".format(list(self.cat2id_dict.items())[:50]))
def map_cat2id(self, values, cats):
"""Cat to id"""
def minmax_scale_value(i, val):
max_v = float(self.val_max_dict["val_{}".format(i + 1)])
return float(val) * 1.0 / max_v
id_list = []
weight_list = []
for i, val in enumerate(values):
if val == "":
id_list.append(i)
weight_list.append(0)
else:
key = "val_{}".format(i + 1)
id_list.append(self.cat2id_dict[key])
weight_list.append(minmax_scale_value(i, float(val)))
for i, cat_str in enumerate(cats):
key = "cat_{}".format(i + 1) + "_" + cat_str
if key in self.cat2id_dict:
if self.skip_id_convert is True:
# For the synthetic data, if the generated id is between [0, max_vcoab], but the num examples is l
# ess than vocab_size/ slot_nums the id will still be converted to [0, real_vocab], where real_vocab
# the actually the vocab size, rather than the max_vocab. So a simple way to alleviate this
# problem is skip the id convert, regarding the synthetic data id as the final id.
id_list.append(cat_str)
else:
id_list.append(self.cat2id_dict[key])
else:
id_list.append(self.cat2id_dict[self.oov_prefix + "cat_{}".format(i + 1)])
weight_list.append(1.0)
return id_list, weight_list
def parse_args():
"""set and check parameters."""
parser = argparse.ArgumentParser(description="autodis process")
parser.add_argument('--data_dir', type=str, default='../data/input/origin_data')
parser.add_argument('--dst_dir', type=str, default='../data/input')
parser.add_argument('--data_input', type=str, default="test.txt")
parser.add_argument('--dense_dim', type=int, default=13)
parser.add_argument('--slot_dim', type=int, default=26)
parser.add_argument("--skip_id_convert", type=int, default=0)
parser.add_argument("--threshold", type=int, default=100)
args, _ = parser.parse_known_args()
return args
def run():
"""
preprocessing txt data
"""
args = parse_args()
stats = StatsDict(field_size=args.dense_dim+args.slot_dim, dense_dim=args.dense_dim, \
slot_dim=args.slot_dim, skip_id_convert=args.skip_id_convert)
stats.load_dict(dict_path="./stats_dict", prefix="")
stats.get_cat2id(threshold=args.threshold)
fi = open(os.path.join(args.data_dir, args.data_input), "r")
fo1 = open(os.path.join(args.dst_dir, "label.txt"), "w")
fo2 = open(os.path.join(args.dst_dir, "ids.txt"), "w")
fo3 = open(os.path.join(args.dst_dir, "wts.txt"), "w")
for line in fi:
line = line.strip("\n")
items = line.split("\t")
label = float(items[0])
values = items[1:1 + args.dense_dim]
cats = items[1 + args.dense_dim:]
ids, wts = stats.map_cat2id(values, cats)
fo1.write(str(int(label))+"\n")
fo2.write("\t".join(str(id) for id in ids)+"\n")
fo3.write("\t".join(str(wt) for wt in wts)+"\n")
fo1.close()
fo2.close()
fo3.close()
if __name__ == '__main__':
run()
| 44.207143
| 120
| 0.621587
|
40d22245bd4d9be1a6e29e176df88be485584970
| 3,257
|
py
|
Python
|
CDSB_series/kfingerprinting/random-evaluate-script.py
|
WFDetector/WFDetection
|
b16d35b3a3a5de62de9e0bac83eccd21b6358b53
|
[
"Apache-2.0"
] | null | null | null |
CDSB_series/kfingerprinting/random-evaluate-script.py
|
WFDetector/WFDetection
|
b16d35b3a3a5de62de9e0bac83eccd21b6358b53
|
[
"Apache-2.0"
] | null | null | null |
CDSB_series/kfingerprinting/random-evaluate-script.py
|
WFDetector/WFDetection
|
b16d35b3a3a5de62de9e0bac83eccd21b6358b53
|
[
"Apache-2.0"
] | null | null | null |
import subprocess
from os.path import join
import argparse
from os.path import abspath, dirname
# Directories
BASE_DIR = abspath(join(dirname(__file__)))
# prefix = "../split/results/"
# targets = [ #glue
# "ranpad2_0706_0829/",
# "ranpad2_0706_0830/",
# "ranpad2_0706_0831/",
# "ranpad2_0706_0832/",
# "ranpad2_0706_0833/",
# "ranpad2_0706_0834/",
# "ranpad2_0706_0835/",
# "ranpad2_0706_0836/",
# "ranpad2_0706_0837/",
# "ranpad2_0706_0838/",
# "ranpad2_0706_0839/",
# "ranpad2_0706_0840/",
# "ranpad2_0706_0841/",
# "ranpad2_0706_0842/",
# "ranpad2_0706_0843/",
# ]
# targets = [
# "ranpad2_0603_194535/",
# "ranpad2_0603_194602/",
# "ranpad2_0603_194639/",
# "ranpad2_0603_194724/",
# "ranpad2_0603_194824/",
# "ranpad2_0603_194930/",
# "ranpad2_0603_195046/",
# "ranpad2_0603_195215/",
# "ranpad2_0603_195355/",
# "ranpad2_0603_195536/",
# "ranpad2_0603_195733/",
# "ranpad2_0603_195940/",
# "ranpad2_0603_200155/",
# "ranpad2_0603_200422/",
# "ranpad2_0603_200652/",
# ]
# for target in targets:
# target = join(prefix, target)
# # cmd1 = "python3 random-evaluate.py -m clean.pkl -o clean.npy -mode head -p "+ target
# cmd1 = "python3 random-evaluate.py -m dirty.pkl -o dirty.npy -mode head -p "+ target
# cmd2 = "python3 random-evaluate.py -m clean.pkl -o clean.npy -mode other -p "+ target
# subprocess.call(cmd1, shell= True)
# subprocess.call(cmd2, shell= True)
# # print("\n\n\n\n\n\n\n")
def parse_arguments():
parser = argparse.ArgumentParser(description='DF ATTACK.')
parser.add_argument('-d',
metavar='<dirtymodel path>',
help='Path to the directory of the dirtymodel')
parser.add_argument('-c',
metavar='<cleanmodel path>',
help='Path to the directory of the cleanmodel')
parser.add_argument('-od',
metavar='<feature path>',
help='Path to the directory of the extracted features')
parser.add_argument('-oc',
metavar='<feature path>',
help='Path to the directory of the extracted features')
parser.add_argument('-t',
metavar='<target>',
help='Target to test')
# Parse arguments
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_arguments()
cmd1 = "python3 " + join(BASE_DIR,"random-evaluate.py") + " -m " + args.d + " -o " + args.od + " -mode head -p " + args.t
cmd2 = "python3 " + join(BASE_DIR,"random-evaluate.py") + " -m " + args.c + " -o " + args.oc + " -mode other -p " + args.t
subprocess.call(cmd1, shell= True)
subprocess.call(cmd2, shell= True)
# target = "../split/randomresults/mergepad_evaluation_16_200_10_random/"
# cmdtest1 = "python3 " + join(BASE_DIR,"random-evaluate.py") + " -m " + "./models/attacktrain_clean.pkl" + " -o " + "./models/attacktrain_clean.npy" + " -mode head -p " + target
# subprocess.call(cmdtest1, shell= True)
# cmdtest2 = "python3 " + join(BASE_DIR,"random-evaluate.py") + " -m " + "./models/attacktrain_clean.pkl" + " -o " + "./models/attacktrain_clean.npy" + " -mode other -p " + target
# subprocess.call(cmdtest2, shell= True)
| 36.595506
| 183
| 0.624501
|
bcec56ea40e0aa8cee040630613047eafa8c22ca
| 7,596
|
py
|
Python
|
git_profile_manager/utils.py
|
MeNsaaH/git-profile-manager
|
b084224b5947878a272258031780453c90a97525
|
[
"MIT"
] | 26
|
2020-06-02T11:17:23.000Z
|
2021-09-10T19:59:28.000Z
|
git_profile_manager/utils.py
|
MeNsaaH/git-profile-manager
|
b084224b5947878a272258031780453c90a97525
|
[
"MIT"
] | 4
|
2020-06-02T23:45:41.000Z
|
2020-06-06T12:00:06.000Z
|
git_profile_manager/utils.py
|
MeNsaaH/git-profile-manager
|
b084224b5947878a272258031780453c90a97525
|
[
"MIT"
] | 1
|
2020-08-17T23:34:34.000Z
|
2020-08-17T23:34:34.000Z
|
""" Utility functions """
import os
import re
import shutil
import sys
import subprocess
from git_profile_manager import configparser
HOME = os.path.expanduser("~")
# Directory to store all git-profile-manager config
GIT_PROFILE_DIR = os.path.join(HOME, ".gitprofiles")
GLOBAL_GITCONFIG = os.path.join(GIT_PROFILE_DIR, "global")
# Store config for current active user
PROFILE_RC=os.path.join(GIT_PROFILE_DIR, ".profilerc")
GIT_CONFIG = os.path.join(HOME, '.gitconfig')
def get_user_config_path(user):
""" Get the path to user config """
return os.path.join(GIT_PROFILE_DIR, user)
def get_user_from_config_path(path):
""" Get user from path """
return os.path.split(path)[-1]
def get_user_from_alias(alias):
""" Returns the user email using the alias """
config = configparser.ConfigParser()
config.read(PROFILE_RC)
return config.get("users", alias, fallback=None)
def get_alias_from_user(user, config=None):
""" returns alias for a user """
if not config:
config = configparser.ConfigParser()
config.read(PROFILE_RC)
if "users" in config._sections.keys():
for key, value in config._sections["users"].items():
if value == user:
return key
def user_exists(user, alias=False):
""" A user exists if the corresponding config file is present """
exists = False
config = configparser.ConfigParser()
config.read(PROFILE_RC)
return config.get("users", user, fallback=None) or config_exists(user)
def config_exists(user):
""" Check if config file exists for user """
return os.path.exists(get_user_config_path(user))
def add_alias(alias, user):
""" Add new alias to PROFILE_RC """
config = configparser.ConfigParser()
config.read(PROFILE_RC)
if not "users" in config._sections.keys():
config["users"] = {}
config["users"][alias] = user
with open(PROFILE_RC, 'w') as configfile:
config.write(configfile)
def is_email(string):
email_regex = re.compile(r"[^@]+@[^@]+\.[^@]+")
return email_regex.match(string)
def user_input(prompt, lower=True):
""" User input string for python independent version """
r = input(prompt)
return r.lower() if lower else r
def exec_command(command, **kwargs):
""" Executes a command and exit if it fails """
comp = subprocess.run(command, capture_output=True, **kwargs)
if comp.returncode != 0:
sys.exit(1)
return comp
def update_current_user(user):
""" update PROFILE_RC with to `user` """
config = configparser.ConfigParser()
config.read(PROFILE_RC)
try:
config["current"]["user"] = user
except KeyError:
config["current"] = {}
config["current"]["user"] = user
with open(PROFILE_RC, 'w') as configfile:
config.write(configfile)
def set_active_user(user):
""" set the current active user
This updates GIT_CONFIG with user data and update PROFILE_RC to reflect user is in session
"""
current_user = get_current_user()
update_current_user(user)
# load config and override global configuration
config = configparser.ConfigParser()
config.read(GLOBAL_GITCONFIG)
config.read(get_user_config_path(user))
with open(GIT_CONFIG, "w") as configfile:
config.write(configfile)
def get_current_user(append_name=False):
""" Get the current active user """
email = str(exec_command(["git", "config", "user.email"], universal_newlines=True).stdout).strip("\n")
if append_name:
name = str(exec_command(["git", "config", "user.name"], universal_newlines=True).stdout).strip("\n")
email = "%s (%s)" % (email, name)
return email
def get_all_users():
""" Get all users
All files within the GIT_PROFILE_DIR are user data except the .profilerc and global
"""
users = [f for f in os.listdir(GIT_PROFILE_DIR) if os.path.isfile(os.path.join(GIT_PROFILE_DIR, f)) and f not in [".profilerc", "global"]]
return users
def save_current_user_profile():
""" Save the config for the current user to personal config
If git config had been executed, the GIT_CONFIG file must have changed, update the personal user's config
"""
current_user = get_current_user()
# Remove entries that match in global config
global_config = configparser.ConfigParser()
global_config.read(GLOBAL_GITCONFIG)
current_config = configparser.ConfigParser()
current_config.read(GIT_CONFIG)
# Use a different config to make modifications. current_config cannot be modified during iteration
config = configparser.ConfigParser()
config.read(GIT_CONFIG)
# Delete every matching config that exists in global config
for section in current_config:
if section in global_config._sections.keys():
for key, value in current_config[section].items():
if key in global_config[section].keys():
if value == global_config[section][key]:
del config[section][key]
# Write current user config
with open(get_user_config_path(current_user), "w") as configfile:
config.write(configfile)
def remove_user(user):
config = configparser.ConfigParser()
config.read(PROFILE_RC)
alias = get_alias_from_user(user)
print(alias)
if alias:
del config["users"][alias]
with open(PROFILE_RC, "w") as configfile:
config.write(configfile)
try:
os.remove(get_user_config_path(user))
except FileNotFoundError:
print("Config for %s not found at %s" % (user, get_user_config_path(user)))
def setup():
"""
Setup user machine for git-profile-manager. This backups the current config as global config, creates the necessary files
"""
# create GIT_PROFILE_DIR and backup only when it doesn't exist. If it does, user may be upgrading
if not os.path.exists(GIT_PROFILE_DIR):
os.makedirs(GIT_PROFILE_DIR)
if os.path.isfile(GIT_CONFIG):
shutil.copyfile(GIT_CONFIG, GLOBAL_GITCONFIG)
else:
# create an empty global config file
with open(GLOBAL_GITCONFIG, 'a'):
os.utime(GLOBAL_GITCONFIG, None)
# Create `users` entry in profilerc
config = configparser.ConfigParser()
config.read(PROFILE_RC)
if "users" not in config._sections.keys():
config["users"] = {}
with open(PROFILE_RC, 'w') as configfile:
config.write(configfile)
def apply_profile(path, user):
""" Adds includeIf command to gitconfig for path """
path = os.path.abspath(path)
if path[-1] != os.path.sep:
path += os.path.sep
global_config = configparser.ConfigParser()
global_config.read(GLOBAL_GITCONFIG)
user_config_path = get_user_config_path(user)
includeIf_key = "includeIf \"gitdir:%s\"" % path
if not os.path.isdir(path):
print("path %s does not exist" % path)
return
if includeIf_key in global_config._sections.keys():
path_user = get_user_from_config_path(global_config[includeIf_key]["path"])
response = user_input("Path is already configured to use %s, do you want to override (y/N)? " % path_user)
if response != "y":
print("Path %s configuration skipped" % path)
return
global_config[includeIf_key] = {}
global_config[includeIf_key]["path"] = user_config_path
with open(GLOBAL_GITCONFIG, "w") as configfile:
global_config.write(configfile)
print("Path %s configured to use %s config" % (path, user))
| 32.323404
| 142
| 0.671669
|
6417fe46c67b5042dce7ca478004a867bfe9607c
| 5,967
|
py
|
Python
|
insights/parsers/samba.py
|
haithcockce/insights-core
|
b2e197c6bfc25bcbe2926f07c35a80f2cf8232f5
|
[
"Apache-2.0"
] | null | null | null |
insights/parsers/samba.py
|
haithcockce/insights-core
|
b2e197c6bfc25bcbe2926f07c35a80f2cf8232f5
|
[
"Apache-2.0"
] | null | null | null |
insights/parsers/samba.py
|
haithcockce/insights-core
|
b2e197c6bfc25bcbe2926f07c35a80f2cf8232f5
|
[
"Apache-2.0"
] | null | null | null |
"""
SambaConfig - file ``/etc/samba/smb.conf``
==========================================
This parser reads the Samba configuration file ``/etc/samba/smb.conf``, which
is in standard .ini format, with a couple of notable features:
* Samba ignores spaces at the start of options, which the ConfigParser class
normally does not. This spacing is stripped by this parser.
* Samba likewise ignores spaces in section heading names.
* Samba allows the same section to be defined multiple times, with the
options therein being merged as if they were one section.
* Samba allows options to be declared before the first section marker.
This parser puts these options in a `global` section.
* Samba treats ';' as a comment prefix, similar to '#'.
Sample configuration file::
# This is the main Samba configuration file. You should read the
# smb.conf(5) manual page in order to understand the options listed
#...
#======================= Global Settings =====================================
[global]
workgroup = MYGROUP
server string = Samba Server Version %v
max log size = 50
[homes]
comment = Home Directories
browseable = no
writable = yes
; valid users = %S
; valid users = MYDOMAIN\%S
[printers]
comment = All Printers
path = /var/spool/samba
browseable = no
guest ok = no
writable = no
printable = yes
# A publicly accessible directory, but read only, except for people in
# the "staff" group
[public]
comment = Public Stuff
path = /home/samba
public = yes
writable = yes
printable = no
write list = +staff
Examples:
>>> type(conf)
<class 'insights.parsers.samba.SambaConfig'>
>>> sorted(conf.sections()) == [u'global', u'homes', u'printers', u'public']
True
>>> global_options = conf.items('global') # get a section as a dictionary
>>> type(global_options) == type({})
True
>>> conf.get('public', 'comment') == u'Public Stuff' # Accessor for section and option
True
>>> conf.getboolean('public', 'writable') # Type conversion, but no default
True
>>> conf.getint('global', 'max log size') # Same for integer conversion
50
"""
import re
from . import ParseException
from .. import add_filter, IniConfigFile, parser
from insights.specs import Specs
add_filter(Specs.samba, ["["])
add_filter(Specs.testparm_s, ["["])
add_filter(Specs.testparm_s, ["Server role:"])
add_filter(Specs.testparm_v_s, ["["])
add_filter(Specs.testparm_v_s, ["Server role:"])
@parser(Specs.samba)
class SambaConfig(IniConfigFile):
"""
This parser reads the Samba configuration file ``/etc/samba/smb.conf``.
Note: It is needed for better resolution descriptions when it is necessary to know what exactly
is in the configuration file. For generic tasks use ``SambaConfigs`` or ``SambaConfigsAll``
instead.
"""
def parse_content(self, content):
# smb.conf is special from other ini files in the property that
# whatever is before the first section (before the first section)
# belongs to the [global] section. Therefore, the [global] section is
# appended right at the beginning so that everything that would be
# parsed as outside section belongs to [global].
# Python 2.7 RawConfigParser automatically merges multiple instances
# of the same section. (And if that ever changes, test_samba.py will
# catch it.)
lstripped = ["[global]"] + [line.lstrip() for line in content]
super(SambaConfig, self).parse_content(lstripped)
# Create a new instance of the same dict type used by the underlying
# RawConfigParser.
new_dict = self.data._dict()
# Transform the section names so that whitespace around is stripped
# and they are lowercase. smb.conf is special in the property that
# section names and option names are case-insensitive and treated
# like lower-case.
for old_key, old_section in self.data._sections.items():
new_key = old_key.strip().lower()
if new_key not in new_dict:
new_dict[new_key] = self.data._dict()
# Merge same-named sections just as samba's `testparm` does.
new_dict[new_key].update(old_section)
self.data._sections = new_dict
@parser(Specs.testparm_s)
class SambaConfigs(SambaConfig):
"""
This parser reads the Samba configuration from command `testparm -s` which is more reliable
than parsing the config file, as it includes configuration in internal registry. It also
includes server role.
Note: This is the most suitable parser when only user changes to the configuration are important
for the detection logic, i.e. misconfiguration.
Attributes:
server_role (string): Server role as reported by the command.
"""
def parse_content(self, content):
# Parse server role
for line in content:
r = re.search(r"Server role:\s+(\S+)", line)
if r:
self.server_role = r.group(1)
break
else:
raise ParseException("Server role not found.")
super(SambaConfigs, self).parse_content(content)
@parser(Specs.testparm_v_s)
class SambaConfigsAll(SambaConfigs):
"""
This parser reads the Samba configuration from command `testparm -v -s` which is more reliable
than parsing the config file, as it includes configuration in internal registry. It also
includes all default values and server role.
Note: This parser is needed for cases when active value of specific option is needed for the
detection logic, irrespective of its origin from user changes or defaults, i.e. security
vulnerabilities.
Attributes:
server_role (string): Server role as reported by the command.
"""
pass
| 36.163636
| 100
| 0.657952
|
54155eff8e26b16ff5303d8d279e81b4bf8a90f4
| 5,475
|
py
|
Python
|
demo/model_zoo/embedding/paraconvert.py
|
lzhao4ever/Paddle-master
|
5c0eb23d1c021fed88416df9eae8511d36df4372
|
[
"Apache-2.0"
] | 1
|
2018-12-20T12:15:39.000Z
|
2018-12-20T12:15:39.000Z
|
demo/model_zoo/embedding/paraconvert.py
|
lzhao4ever/Paddle-master
|
5c0eb23d1c021fed88416df9eae8511d36df4372
|
[
"Apache-2.0"
] | null | null | null |
demo/model_zoo/embedding/paraconvert.py
|
lzhao4ever/Paddle-master
|
5c0eb23d1c021fed88416df9eae8511d36df4372
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/env python
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example:
python paraconvert.py --b2t -i INPUT -o OUTPUT -d DIM
python paraconvert.py --t2b -i INPUT -o OUTPUT
Options:
-h, --help show this help message and exit
--b2t convert parameter file of embedding model from binary to text
--t2b convert parameter file of embedding model from text to binary
-i INPUT input parameter file name
-o OUTPUT output parameter file name
-d DIM dimension of parameter
"""
from optparse import OptionParser
import struct
def binary2text(input, output, paraDim):
"""
Convert a binary parameter file of embedding model to be a text file.
input: the name of input binary parameter file, the format is:
1) the first 16 bytes is filehead:
version(4 bytes): version of paddle, default = 0
floatSize(4 bytes): sizeof(float) = 4
paraCount(8 bytes): total number of parameter
2) the next (paraCount * 4) bytes is parameters, each has 4 bytes
output: the name of output text parameter file, for example:
0,4,32156096
-0.7845433,1.1937413,-0.1704215,...
0.0000909,0.0009465,-0.0008813,...
...
the format is:
1) the first line is filehead:
version=0, floatSize=4, paraCount=32156096
2) other lines print the paramters
a) each line prints paraDim paramters splitted by ','
b) there is paraCount/paraDim lines (embedding words)
paraDim: dimension of parameters
"""
fi = open(input, "rb")
fo = open(output, "w")
"""
"""
version, floatSize, paraCount = struct.unpack("iil", fi.read(16))
newHead = ','.join([str(version), str(floatSize), str(paraCount)])
print >> fo, newHead
bytes = 4 * int(paraDim)
format = "%df" % int(paraDim)
context = fi.read(bytes)
line = 0
while context:
numbers = struct.unpack(format, context)
lst = []
for i in numbers:
lst.append('%8.7f' % i)
print >> fo, ','.join(lst)
context = fi.read(bytes)
line += 1
fi.close()
fo.close()
print "binary2text finish, total", line, "lines"
def get_para_count(input):
"""
Compute the total number of embedding parameters in input text file.
input: the name of input text file
"""
numRows = 1
paraDim = 0
with open(input) as f:
line = f.readline()
paraDim = len(line.split(","))
for line in f:
numRows += 1
return numRows * paraDim
def text2binary(input, output, paddle_head=True):
"""
Convert a text parameter file of embedding model to be a binary file.
input: the name of input text parameter file, for example:
-0.7845433,1.1937413,-0.1704215,...
0.0000909,0.0009465,-0.0008813,...
...
the format is:
1) it doesn't have filehead
2) each line stores the same dimension of parameters,
the separator is commas ','
output: the name of output binary parameter file, the format is:
1) the first 16 bytes is filehead:
version(4 bytes), floatSize(4 bytes), paraCount(8 bytes)
2) the next (paraCount * 4) bytes is parameters, each has 4 bytes
"""
fi = open(input, "r")
fo = open(output, "wb")
newHead = struct.pack("iil", 0, 4, get_para_count(input))
fo.write(newHead)
count = 0
for line in fi:
line = line.strip().split(",")
for i in range(0, len(line)):
binary_data = struct.pack("f", float(line[i]))
fo.write(binary_data)
count += 1
fi.close()
fo.close()
print "text2binary finish, total", count, "lines"
def main():
"""
Main entry for running paraconvert.py
"""
usage = "usage: \n" \
"python %prog --b2t -i INPUT -o OUTPUT -d DIM \n" \
"python %prog --t2b -i INPUT -o OUTPUT"
parser = OptionParser(usage)
parser.add_option(
"--b2t",
action="store_true",
help="convert parameter file of embedding model from binary to text")
parser.add_option(
"--t2b",
action="store_true",
help="convert parameter file of embedding model from text to binary")
parser.add_option(
"-i", action="store", dest="input", help="input parameter file name")
parser.add_option(
"-o", action="store", dest="output", help="output parameter file name")
parser.add_option(
"-d", action="store", dest="dim", help="dimension of parameter")
(options, args) = parser.parse_args()
if options.b2t:
binary2text(options.input, options.output, options.dim)
if options.t2b:
text2binary(options.input, options.output)
if __name__ == '__main__':
main()
| 34.21875
| 79
| 0.61242
|
c3561981de76a5cb01d50e5e5596c6750564031e
| 20,544
|
py
|
Python
|
flux_combined_high_binding/model_42.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_combined_high_binding/model_42.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_combined_high_binding/model_42.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 5000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.111111
| 798
| 0.804079
|
b8b8010c109230e151b8e1e595edc1a3fa572be9
| 2,154
|
py
|
Python
|
tests/test_ner.py
|
vishalbelsare/ner-d
|
0231f8b14ab1e75fa4b7238ac8a237d9d9fa23f2
|
[
"MIT"
] | 16
|
2019-06-24T09:00:23.000Z
|
2021-09-02T14:13:18.000Z
|
tests/test_ner.py
|
vishalbelsare/ner-d
|
0231f8b14ab1e75fa4b7238ac8a237d9d9fa23f2
|
[
"MIT"
] | 1
|
2019-07-09T13:52:24.000Z
|
2020-01-13T14:11:31.000Z
|
tests/test_ner.py
|
vishalbelsare/ner-d
|
0231f8b14ab1e75fa4b7238ac8a237d9d9fa23f2
|
[
"MIT"
] | 3
|
2019-10-20T20:02:24.000Z
|
2020-09-06T13:08:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from nerd import ner
class NerTest(unittest.TestCase):
def test_load_model(self):
nlp = ner.load_model()
self.assertIsNotNone(nlp)
def test_name(self):
doc = ner.name(
"""GitHub launched April 10, 2008, a subsidiary of Microsoft, is an American web-based hosting service for version control using Git.
It is mostly used for computer code. It offers all of the distributed version control and source code management (SCM) functionality
of Git as well as adding its own features.""",
language="en_core_web_sm",
)
text_label = [(X.text, X.label_) for X in doc]
self.assertEqual(
text_label,
[
("GitHub", "ORG"),
("April 10, 2008", "DATE"),
("Microsoft", "ORG"),
("American", "NORP"),
("Git", "PERSON"),
("SCM", "ORG"),
("Git", "PERSON"),
],
)
doc = ner.name(
"""Michael Jeffrey Jordan born February 17, 1963 in Brooklyn, New York, United States of America. Known by his initials, MJ,[5] is an American former professional
basketball player who is the principal owner and chairman of the Charlotte Hornets of the National Basketball Association
""",
language="en_core_web_sm",
)
text_label = [(X.text, X.label_) for X in doc]
self.assertEqual(
text_label,
[
("Michael Jeffrey Jordan", "PERSON"),
("February 17, 1963", "DATE"),
("Brooklyn", "GPE"),
("New York", "GPE"),
("United States of America", "GPE"),
("American", "NORP"),
("the Charlotte Hornets of the National Basketball Association", "ORG"),
],
)
def main(self):
self.test_load_model()
self.test_name()
if __name__ == "__main__":
tests = NerTest()
tests.main()
| 34.741935
| 174
| 0.522748
|
f962d14b86c6fcd702805c8b4ff8a1073dc44a24
| 8,630
|
py
|
Python
|
mcwpy/datapack.py
|
vianneyveremme/minecraft_with_python
|
25edded44dd56348068758bc78eb71a3ce546583
|
[
"MIT"
] | 1
|
2021-07-29T14:22:15.000Z
|
2021-07-29T14:22:15.000Z
|
mcwpy/datapack.py
|
vianneyveremme/minecraft_with_python
|
25edded44dd56348068758bc78eb71a3ce546583
|
[
"MIT"
] | 2
|
2021-11-04T18:44:12.000Z
|
2021-11-08T03:29:43.000Z
|
mcwpy/datapack.py
|
vianneyveremme/minecraft_with_python
|
25edded44dd56348068758bc78eb71a3ce546583
|
[
"MIT"
] | null | null | null |
# -*- coding: ascii -*-
from datetime import date
from PIL import Image
from time import time
from typing import Any, Dict, List, Union
from .pack_meta import Pack_Meta
from .workspace import Workspace
from .utility import Minecraft_Pack_Version, create_file, Font, make_directory, remove_directory
import os
import shutil
class Datapack:
"""
Datapacks can be placed in the .minecraft/saves/(world)/datapacks folder of a world. Each data pack is either a sub-folder or a .zip file \
within the datapacks folder. After it is in the folder, a data pack is enabled for that world when the world is reloaded or loaded.
Data packs load their data based on the load order. This order can be seen and altered by using the /datapack command and is stored in the \
level.dat file.
The player can also select data packs at the world creation screen by clicking the Data Packs button and dragging-and-dropping their data \
pack folders/zip-files there. This is similar to the Resource Pack selection screen, and allows the player to enable data packs before \
the world is generated, and easily customize the load order too.
"""
def __init__(self,
title: str=None,
path: str=None,
pack_mcmeta: Union[Pack_Meta, Dict[str, Any]]=None,
workspaces: Union[Workspace, List[Workspace]]=None,
auto_compile: bool=None,
compile_as_zip: bool=None,
replace_existing: bool=None,
) -> None:
"""
Initialize a new Datapack object which will then generate a Minecraft Datapack.
:param title: The title of the datapack.
:param path: The path to the datapack.
:param pack_mcmeta: The metadata of the datapack.
:param workspaces: The workspace(s) in the datapack.
:param auto_compile: Whether or not to automatically compile the datapack.
:param compile_as_zip: Whether or not to compile the datapack as a zip file.
:param replace_existing: Whether or not to replace an existing datapack with the same name.
:return: None; this is a constructor.
"""
self.title = title if title not in (None, '') else "My_Amazing_Datapack"
self.path = (path if path[-len(os.path.sep)] != os.path.sep else path[:-len(os.path.sep)]) if path is not None else os.getcwd()
self.workspaces = (workspaces if isinstance(workspaces, list) else [workspaces]) if workspaces is not None else []
self.auto_compile = auto_compile if auto_compile is not None else False
self.compile_as_zip = compile_as_zip if compile_as_zip is not None else False
self.replace_existing = replace_existing if replace_existing is not None else False
self.pack_mcmeta = pack_mcmeta if pack_mcmeta is not None else Pack_Meta(
author=f"{os.getlogin()} using MCWPy",
minecraft_version=Minecraft_Pack_Version.LATEST,
version=f'{str(date.today().isocalendar()[0])[-2:]}w{date.today().isocalendar()[1]}s{hex(int(time()))[2:]}'
)
# Verifies that the workspaces are valid.
if not all(isinstance(w, Workspace) for w in self.workspaces):
raise TypeError(f'{Font.ERROR}The "workspaces" parameter must be a list of Workspace objects.{Font.END}')
# Auto-compile?
if self.auto_compile:
self.compile()
def __call__(self) -> None:
self.compile()
def __format__(self, format_specifier: str=None) -> str:
"""
Formats the Datapack in a human-readable format depending on the format specifier.
:param format_specifier: The format specifier.
:return: The formatted string.
"""
return f"{self.__str__():{format_specifier}}"
def __getitem__(self, index: int) -> Workspace:
"""
Return the Workspace at the given index.
:param index: The index of the Workspace to return.
:return: The Workspace at the given index.
"""
return self.workspaces[index]
def __len__(self) -> int:
"""Return the number of Workspaces in the Datapack."""
return len(self.workspaces)
def __repr__(self) -> str:
"""Return a string representation of the Datapack."""
return f"{self.title}: {self.workspaces}"
def __str__(self) -> str:
"""Return a string representation of the Datapack."""
return self.__repr__()
def append(self, element: object) -> None:
"""
Add a Workspace or a list of Workpaces to the Datapack.
:param element: The Workspace or the list of Workspaces to add to the Datapack.
"""
if isinstance(element, Workspace):
self.workspaces.append(element)
elif isinstance(element, list | Workspace):
for e in element:
self.append(e)
else:
raise TypeError(f'{Font.ERROR}The "element" parameter must be a Workspace or a list of Workspaces.{Font.END}')
def compile(self) -> None:
"""
Compiles the data entered by the user to create a Minecraft Datapack.
:return: None; this is a builder function (builds files).
"""
if os.path.exists(os.path.join(self.path, self.title)):
if self.replace_existing or input(f'{Font.WARN}{self.title} already exists, do you want to replace it? [yes/no]: {Font.END}')[0].lower() == 'y':
remove_directory(os.path.join(self.path, self.title))
else:
raise FileExistsError(f'{Font.ERROR}{self.title} already exists, and you have not chosen to replace it.{Font.END}')
# Create the Datapack directory and its data directory.
make_directory(self.title, self.path)
make_directory('data', os.path.join(self.path, self.title))
# Create the pack.mcmeta file.
create_file('pack.mcmeta', os.path.join(self.path, self.title), self.pack_mcmeta() if isinstance(self.pack_mcmeta, Pack_Meta) else self.pack_mcmeta)
# Create the pack.png image.
colors_list = [ord(c) % 255 for c in self.title]
cl_len = len(colors_list)
cl_div = sum([int(v) for v in f'{cl_len:b}'])
img = Image.new(mode='RGB', size=(64, 64), color=(0, 0, 0))
img.putdata([(colors_list[(i // cl_div) % cl_len], colors_list[((i // cl_div) + 1) % cl_len], colors_list[((i // cl_div) + 2) % cl_len]) for i in range (64 * 64)])
img.save(os.path.join(self.path, self.title, 'pack.png'))
# Add the minecraft Workspace to the Datapack.
Workspace(name='minecraft').compile(os.path.join(self.path, self.title, 'data'))
# Compile every workspace in the Datapack.
for w in self.workspaces:
w.compile(os.path.join(self.path, self.title, 'data'))
########################
# AT THE END
########################
# Zip the Datapack.
if self.compile_as_zip:
self.to_zip()
def pop(self, index: int=-1) -> Workspace:
"""
Remove and return the Workspace at the given index.
:param index: The index of the Workspace to remove.
:return: The Workspace removed.
"""
return self.workspaces.pop(index)
def to_zip(self) -> None:
"""This function compresses the Datapack into a zip file."""
if os.path.exists(os.path.join(self.path, self.title + '.zip')):
if self.replace_existing or input(f'{Font.WARN}{self.title}.zip already exists, do you want to replace it? [yes/no]: {Font.END}')[0].lower() == 'y':
os.remove(os.path.join(self.path, self.title + '.zip'))
else:
raise FileExistsError(f'{Font.ERROR}{self.title}.zip already exists, and you have not chosen to replace it.{Font.END}')
# Actually create the zip file.
shutil.make_archive(self.title, 'zip', os.path.join(self.path, self.title))
if os.path.exists(os.path.join(self.path, self.title + '.zip')):
print(f'{Font.OK_GREEN}Successfuly created the archive "{self.title}.zip".{Font.END}')
# Remove the original files
if os.path.exists(os.path.join(self.path, self.title)):
remove_directory(os.path.join(self.path, self.title))
else:
# Print an error message and say the original file was saved.
print(f'{Font.ERROR}Failed to create the file "{self.title}.zip".{Font.END}', f'{Font.FINAL_INFO}The file {self.title} was not deleted.{Font.END}')
| 47.15847
| 171
| 0.631866
|
bae6cfb199ec25253bfb67f75433cd0920fd7a70
| 435
|
py
|
Python
|
common/kafka/kakfa.py
|
reejit/Zorobot
|
d7adb58f47a4c6dc0a324f3a9ecfcfc54c473b73
|
[
"Apache-2.0"
] | null | null | null |
common/kafka/kakfa.py
|
reejit/Zorobot
|
d7adb58f47a4c6dc0a324f3a9ecfcfc54c473b73
|
[
"Apache-2.0"
] | null | null | null |
common/kafka/kakfa.py
|
reejit/Zorobot
|
d7adb58f47a4c6dc0a324f3a9ecfcfc54c473b73
|
[
"Apache-2.0"
] | 1
|
2021-03-25T16:56:51.000Z
|
2021-03-25T16:56:51.000Z
|
from logging import StreamHandler
from kafka_producer import KafkaSend
class KafkaHandler(StreamHandler):
def __init__(self, broker, topic):
StreamHandler.__init__(self)
self.broker = broker
self.topic = topic
# Kafka Broker Configuration
self.kafka_broker = KafkaSend(broker)
def emit(self, record):
msg = self.format(record)
self.kafka_broker.send(msg, self.topic)
| 25.588235
| 47
| 0.682759
|
c69df0fb087fb7fa908eb0c8bca3a760e34fae59
| 574
|
py
|
Python
|
ipython/start.py
|
matherm/rootrepo
|
f1b432018f685c3a3d8d28588c064002983c863a
|
[
"BSD-3-Clause"
] | 2
|
2020-10-23T18:47:48.000Z
|
2021-07-12T22:49:08.000Z
|
ipython/start.py
|
matherm/rootrepo
|
f1b432018f685c3a3d8d28588c064002983c863a
|
[
"BSD-3-Clause"
] | null | null | null |
ipython/start.py
|
matherm/rootrepo
|
f1b432018f685c3a3d8d28588c064002983c863a
|
[
"BSD-3-Clause"
] | 1
|
2021-07-12T22:49:11.000Z
|
2021-07-12T22:49:11.000Z
|
import os
import pathlib
import os.path as osp
import sys
import re
from collections import Counter, defaultdict, namedtuple
import itertools
import json
import numpy as np
import gzip
import pandas as pd
import pickle
import random
import matplotlib.pyplot as plt
import types
def imports():
for _, val in globals().items():
if isinstance(val, types.ModuleType):
yield val.__name__
print('successfully imported: [{:s}]'.format(
', '.join(sorted(set(
['"{:s}"'.format(e)
for e in imports()
if '__' not in e and 'types' not in e])))))
| 19.793103
| 56
| 0.695122
|
9237d2feb4dca1af72775e23e5f72bf9611d1236
| 3,327
|
py
|
Python
|
bin/ADFRsuite/CCSBpckgs/AutoSite/clusterNode.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | null | null | null |
bin/ADFRsuite/CCSBpckgs/AutoSite/clusterNode.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | null | null | null |
bin/ADFRsuite/CCSBpckgs/AutoSite/clusterNode.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | 1
|
2021-11-04T21:48:14.000Z
|
2021-11-04T21:48:14.000Z
|
import numpy,os
class clusterNode(object):
def __init__(self, id, size, gen=0, rg=100.0, buriedness=0.0, score=0.0, totalE=0.0, children=None):
self.id=id
self.size = size
self.children = []
self.rg=rg
self.score=score
self.totalE=totalE
self.buriedness=buriedness
self.generation=gen
if children is not None:
for child in children:
self.add_child(child)
def __repr__(self):
return self.size
def add_child(self, node):
assert isinstance(node, clusterNode)
self.children.append(node)
# get leaf level node index
def getNodeIndex(self):
resultlist=""
if not self.children:
return " "+str(self.id)
for child in self.children:
resultlist=resultlist+child.getNodeIndex()
return resultlist
def getNodebySize(self,sizecutoff):
resultlist=[]
if self.size<sizecutoff or len(self.children)==0:
resultlist.append(self)
return resultlist
if self.size<1.2*sizecutoff:
for node in self.children:
if node.size<0.8*sizecutoff:
resultlist.append(self)
return resultlist
for node in self.children:
#import pdb; pdb.set_trace()
resultlist=resultlist+node.getNodebySize(sizecutoff)
return resultlist
def getAllNodes(self):
resultlist=[]
if len(self.children)==0:
resultlist.append(self)
#print 1
return resultlist
for node in self.children:
resultlist=resultlist+node.getAllNodes()
if self.size!=999999:
resultlist.append(self)
#import pdb;pdb.set_trace()
#print len(resultlist)
return resultlist
def updateAllNodes(self,clProp):
#resultlist=[]
if len(self.children)==0:
self.buriedness = clProp[self.id-1][4]
self.rg = clProp[self.id-1][3]
self.size = clProp[self.id-1][1]
self.score = clProp[self.id-1][5]
self.totalE=clProp[self.id-1][0]
#resultlist.append(self)
#print 1
return
for node in self.children:
node.updateAllNodes(clProp)
if self.size!=999999:
#resultlist.append(self)
self.buriedness = clProp[self.id-1][4]
self.rg = clProp[self.id-1][3]
self.size = clProp[self.id-1][1]
self.score = clProp[self.id-1][5]
self.totalE=clProp[self.id-1][0]
#import pdb;pdb.set_trace()
#print len(resultlist)
#return resultlist
def writeJSON(self):
d={'id':self.id,'size':self.size,'score':self.score,'totalE':round(self.totalE,3),'rg':self.rg,'buriedness':self.buriedness}
#d={'id':self.id,'size':self.size,'score':self.score}
d['children']=[child.writeJSON() for child in self.children]
return d
def buildJSON(obj):
node = clusterNode(id=obj['id'], size=obj['size'],score=obj['score'],totalE=obj['totalE'],rg=obj['rg'],buriedness=obj['buriedness'])
for child in obj.get('children',[]):
node.add_child(buildJSON(child))
return node
| 32.617647
| 136
| 0.571386
|
d8c346c4368556a9517b27c44f5a63a3099a4219
| 6,698
|
py
|
Python
|
cvrminer/google.py
|
nemobis/cvrminer
|
0a8223d98a766d54b60e3f3ab1eb637678ca8730
|
[
"Apache-2.0"
] | null | null | null |
cvrminer/google.py
|
nemobis/cvrminer
|
0a8223d98a766d54b60e3f3ab1eb637678ca8730
|
[
"Apache-2.0"
] | null | null | null |
cvrminer/google.py
|
nemobis/cvrminer
|
0a8223d98a766d54b60e3f3ab1eb637678ca8730
|
[
"Apache-2.0"
] | null | null | null |
"""google.
Usage:
cvrminer.google get-archive-texts
cvrminer.google get-archive-websites
cvrminer.google interactive-query-and-save
References
----------
.. [1] https://developers.google.com/places/web-service/details
"""
from __future__ import print_function
from builtins import input
import configparser
from datetime import datetime
import json
from os.path import expanduser, isfile, join
import requests
# https://developers.google.com/places/web-service/details
GOOGLE_PLACE_DETAIL_URL = ("https://maps.googleapis.com/"
"maps/api/place/details/json")
# https://developers.google.com/places/web-service/search
GOOGLE_PLACE_SEARCH_URL = ("https://maps.googleapis.com/"
"maps/api/place/findplacefromtext/json")
GOOGLE_PLACE_ARCHIVE_FILENAME = join(expanduser('~'), 'cvrminer_data',
'google-place.ndjson')
CONFIG_FILENAMES = [
join(expanduser('~'), 'cvrminer.cfg'),
join(expanduser('~'), 'python.cfg')
]
class GoogleError(Exception):
"""Exception for Google API."""
pass
class GooglePlaceArchive(object):
"""Interface for archive of Google Place responses."""
def __init__(self):
"""Set up index and file."""
self.place_id_index = {}
self.last_line = -1
self.make_index()
self.file = open(GOOGLE_PLACE_ARCHIVE_FILENAME, 'a+')
def append(self, data):
"""Append downloaded data to file.
Parameters
----------
data : dict
Data to be written as a JSON line to a file.
"""
place_id = data['result']['place_id']
self.file.write(json.dumps(data) + '\n')
self.last_line += 1
self.place_id_index[place_id] = self.last_line
def has_place_id(self, place_id):
"""Test if place identifier is downloaded."""
return place_id in self.place_id_index
def make_index(self):
"""Make index of downloaded Place identifiers."""
if isfile(GOOGLE_PLACE_ARCHIVE_FILENAME):
for n, line in enumerate(open(GOOGLE_PLACE_ARCHIVE_FILENAME, 'r')):
data = json.loads(line)
place_id = data['result']['place_id']
self.place_id_index[place_id] = n
self.last_line = n
def texts(self):
"""Iterate over text in reviews.
Yields
------
text : str
Text from reviews.
"""
for line in self.file:
data = json.loads(line)
for review in data['result']['reviews']:
yield review['text']
def websites(self):
"""Iterate over websites.
Yields
------
website : str
String with website address.
"""
for line in self.file:
data = json.loads(line)
if 'website' in data['result']:
yield data['result']['website']
class GooglePlaceApi(object):
"""Interface to Google Place API."""
def __init__(self):
"""Set up API key."""
self.key = self.get_key_from_config_file()
def get_key_from_config_file(self):
"""Read and return API key from config file.
Returns
-------
key : str
Google API key.
"""
config = configparser.ConfigParser()
for filename in CONFIG_FILENAMES:
try:
config.read(filename)
key = config['google']['key']
break
except (IOError, KeyError):
continue
else:
raise GoogleError("Could not find Google API key in: "
", ".join(CONFIG_FILENAMES))
return key
def search_places(self, query, language='da'):
"""Search for places with Google Place API.
Parameters
----------
query : str
Query to Google Place API.
Returns
-------
place_ids : list of str
List of strings with place-IDs.
"""
search_response = requests.get(
GOOGLE_PLACE_SEARCH_URL,
params=dict(key=self.key, inputtype='textquery',
language=language, input=query))
search_data = search_response.json()
print(search_data)
place_ids = [candidate['place_id']
for candidate in search_data['candidates']]
return place_ids
def get_place_details(self, place_id, language='da'):
"""Get details about place from Google Place API.
Parameters
----------
place_id : str
String with place_id.
Returns
-------
place_details : dict
Place details in a nested structure.
"""
detail_response = requests.get(
GOOGLE_PLACE_DETAIL_URL,
params=dict(key=self.key, language=language,
placeid=place_id))
place_details = detail_response.json()
place_details['datetime'] = datetime.now().isoformat()
return place_details
class GooglePlaceApiAndArchive(object):
"""API and Archive for Google Place search and details."""
def __init__(self):
"""Initialize API and archive."""
self.api = GooglePlaceApi()
self.archive = GooglePlaceArchive()
def search_and_save(self, query, update=False):
"""Search for place and store place details.
Parameters
----------
query : str
Query to Google Place search.
"""
place_ids = self.api.search_places(query)
print(place_ids)
if len(place_ids) > 0:
place_id = place_ids[0]
if not self.archive.has_place_id(place_id) or update:
place_details = self.api.get_place_details(place_ids)
self.archive.append(place_details)
def main():
"""Handle command-line interface."""
from docopt import docopt
arguments = docopt(__doc__)
if arguments['interactive-query-and-save']:
api_and_archive = GooglePlaceApiAndArchive()
while True:
try:
query = input('query> ')
except KeyboardInterrupt:
break
api_and_archive.search_and_save(query)
elif arguments['get-archive-texts']:
archive = GooglePlaceArchive()
for text in archive.texts():
print(text.replace('\n', ' '))
elif arguments['get-archive-websites']:
archive = GooglePlaceArchive()
for text in archive.websites():
print(text)
if __name__ == '__main__':
main()
| 27.008065
| 79
| 0.574052
|
befe30d641e4b65ddc5628fb5cb79c0d3769907d
| 7,592
|
py
|
Python
|
sympy/physics/units/quantities.py
|
V1krant/sympy
|
f8e7019feded4e4d0852f49fe0ccfa777f0e8744
|
[
"BSD-3-Clause"
] | 1
|
2021-05-11T22:20:21.000Z
|
2021-05-11T22:20:21.000Z
|
sympy/physics/units/quantities.py
|
V1krant/sympy
|
f8e7019feded4e4d0852f49fe0ccfa777f0e8744
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/physics/units/quantities.py
|
V1krant/sympy
|
f8e7019feded4e4d0852f49fe0ccfa777f0e8744
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Physical quantities.
"""
from __future__ import division
from sympy import AtomicExpr, Symbol, sympify
from sympy.core.compatibility import string_types
from sympy.physics.units.dimensions import _QuantityMapper
from sympy.physics.units.prefixes import Prefix
from sympy.utilities.exceptions import SymPyDeprecationWarning
class Quantity(AtomicExpr):
"""
Physical quantity: can be a unit of measure, a constant or a generic quantity.
"""
is_commutative = True
is_real = True
is_number = False
is_nonzero = True
_diff_wrt = True
def __new__(cls, name, abbrev=None, dimension=None, scale_factor=None,
latex_repr=None, pretty_unicode_repr=None,
pretty_ascii_repr=None, mathml_presentation_repr=None,
**assumptions):
if not isinstance(name, Symbol):
name = Symbol(name)
# For Quantity(name, dim, scale, abbrev) to work like in the
# old version of Sympy:
if not isinstance(abbrev, string_types) and not \
isinstance(abbrev, Symbol):
dimension, scale_factor, abbrev = abbrev, dimension, scale_factor
if dimension is not None:
SymPyDeprecationWarning(
deprecated_since_version="1.3",
issue=14319,
feature="Quantity arguments",
useinstead="unit_system.set_quantity_dimension_map",
).warn()
if scale_factor is not None:
SymPyDeprecationWarning(
deprecated_since_version="1.3",
issue=14319,
feature="Quantity arguments",
useinstead="SI_quantity_scale_factors",
).warn()
if abbrev is None:
abbrev = name
elif isinstance(abbrev, string_types):
abbrev = Symbol(abbrev)
obj = AtomicExpr.__new__(cls, name, abbrev)
obj._name = name
obj._abbrev = abbrev
obj._latex_repr = latex_repr
obj._unicode_repr = pretty_unicode_repr
obj._ascii_repr = pretty_ascii_repr
obj._mathml_repr = mathml_presentation_repr
if dimension is not None:
# TODO: remove after deprecation:
obj.set_dimension(dimension)
if scale_factor is not None:
# TODO: remove after deprecation:
obj.set_scale_factor(scale_factor)
return obj
def set_dimension(self, dimension, unit_system="SI"):
SymPyDeprecationWarning(
deprecated_since_version="1.5",
issue=17765,
feature="Moving method to UnitSystem class",
useinstead="unit_system.set_quantity_dimension or {}.set_global_relative_scale_factor".format(self),
).warn()
from sympy.physics.units import UnitSystem
unit_system = UnitSystem.get_unit_system(unit_system)
unit_system.set_quantity_dimension(self, dimension)
def set_scale_factor(self, scale_factor, unit_system="SI"):
SymPyDeprecationWarning(
deprecated_since_version="1.5",
issue=17765,
feature="Moving method to UnitSystem class",
useinstead="unit_system.set_quantity_scale_factor or {}.set_global_relative_scale_factor".format(self),
).warn()
from sympy.physics.units import UnitSystem
unit_system = UnitSystem.get_unit_system(unit_system)
unit_system.set_quantity_scale_factor(self, scale_factor)
def set_global_dimension(self, dimension):
_QuantityMapper._quantity_dimension_global[self] = dimension
def set_global_relative_scale_factor(self, scale_factor, reference_quantity):
"""
Setting a scale factor that is valid across all unit system.
"""
from sympy.physics.units import UnitSystem
scale_factor = sympify(scale_factor)
# replace all prefixes by their ratio to canonical units:
scale_factor = scale_factor.replace(
lambda x: isinstance(x, Prefix),
lambda x: x.scale_factor
)
scale_factor = sympify(scale_factor)
UnitSystem._quantity_scale_factors_global[self] = (scale_factor, reference_quantity)
UnitSystem._quantity_dimensional_equivalence_map_global[self] = reference_quantity
@property
def name(self):
return self._name
@property
def dimension(self, unit_system=None):
from sympy.physics.units import UnitSystem
if unit_system is None:
unit_system = UnitSystem.get_default_unit_system()
return unit_system.get_quantity_dimension(self)
@property
def abbrev(self):
"""
Symbol representing the unit name.
Prepend the abbreviation with the prefix symbol if it is defines.
"""
return self._abbrev
@property
def scale_factor(self, unit_system=None):
"""
Overall magnitude of the quantity as compared to the canonical units.
"""
from sympy.physics.units import UnitSystem
if unit_system is None:
unit_system = UnitSystem.get_default_unit_system()
return unit_system.get_quantity_scale_factor(self)
def _eval_is_positive(self):
return True
def _eval_is_constant(self):
return True
def _eval_Abs(self):
return self
def _eval_subs(self, old, new):
if isinstance(new, Quantity) and self != old:
return self
@staticmethod
def get_dimensional_expr(expr, unit_system="SI"):
SymPyDeprecationWarning(
deprecated_since_version="1.5",
issue=17765,
feature="get_dimensional_expr() is now associated with UnitSystem objects. " \
"The dimensional relations depend on the unit system used.",
useinstead="unit_system.get_dimensional_expr"
).warn()
from sympy.physics.units import UnitSystem
unit_system = UnitSystem.get_unit_system(unit_system)
return unit_system.get_dimensional_expr(expr)
@staticmethod
def _collect_factor_and_dimension(expr, unit_system="SI"):
"""Return tuple with scale factor expression and dimension expression."""
SymPyDeprecationWarning(
deprecated_since_version="1.5",
issue=17765,
feature="This method has been moved to the UnitSystem class.",
useinstead="unit_system._collect_factor_and_dimension",
).warn()
from sympy.physics.units import UnitSystem
unit_system = UnitSystem.get_unit_system(unit_system)
return unit_system._collect_factor_and_dimension(expr)
def _latex(self, printer):
if self._latex_repr:
return self._latex_repr
else:
return r'\text{{{}}}'.format(self.args[1] \
if len(self.args) >= 2 else self.args[0])
def convert_to(self, other, unit_system="SI"):
"""
Convert the quantity to another quantity of same dimensions.
Examples
========
>>> from sympy.physics.units import speed_of_light, meter, second
>>> speed_of_light
speed_of_light
>>> speed_of_light.convert_to(meter/second)
299792458*meter/second
>>> from sympy.physics.units import liter
>>> liter.convert_to(meter**3)
meter**3/1000
"""
from .util import convert_to
return convert_to(self, other, unit_system)
@property
def free_symbols(self):
"""Return free symbols from quantity."""
return set([])
| 34.986175
| 115
| 0.647392
|
7fe5382b3449fd2093edb7f083119869fcec4c49
| 6,654
|
py
|
Python
|
tests/test_strategy.py
|
agoragames/limits
|
13be116196bd7d44d691793fdb1cfcf3970b3281
|
[
"MIT"
] | null | null | null |
tests/test_strategy.py
|
agoragames/limits
|
13be116196bd7d44d691793fdb1cfcf3970b3281
|
[
"MIT"
] | null | null | null |
tests/test_strategy.py
|
agoragames/limits
|
13be116196bd7d44d691793fdb1cfcf3970b3281
|
[
"MIT"
] | null | null | null |
"""
"""
import threading
import time
import unittest
import hiro
import redis
import pymemcache.client
from limits.limits import RateLimitItemPerSecond, RateLimitItemPerMinute
from limits.storage import (
MemoryStorage, RedisStorage,MemcachedStorage
)
from limits.strategies import (
MovingWindowRateLimiter,
FixedWindowElasticExpiryRateLimiter,
FixedWindowRateLimiter
)
from tests import skip_if_pypy
class WindowTests(unittest.TestCase):
def setUp(self):
redis.Redis().flushall()
pymemcache.client.Client(('localhost', 11211)).flush_all()
def test_fixed_window(self):
storage = MemoryStorage()
limiter = FixedWindowRateLimiter(storage)
with hiro.Timeline().freeze() as timeline:
start = int(time.time())
limit = RateLimitItemPerSecond(10, 2)
self.assertTrue(all([limiter.hit(limit) for _ in range(0,10)]))
timeline.forward(1)
self.assertFalse(limiter.hit(limit))
self.assertEqual(limiter.get_window_stats(limit)[1], 0)
self.assertEqual(limiter.get_window_stats(limit)[0], start + 2)
timeline.forward(1)
self.assertEqual(limiter.get_window_stats(limit)[1], 10)
self.assertTrue(limiter.hit(limit))
def test_fixed_window_with_elastic_expiry_in_memory(self):
storage = MemoryStorage()
limiter = FixedWindowElasticExpiryRateLimiter(storage)
with hiro.Timeline().freeze() as timeline:
start = int(time.time())
limit = RateLimitItemPerSecond(10, 2)
self.assertTrue(all([limiter.hit(limit) for _ in range(0,10)]))
timeline.forward(1)
self.assertFalse(limiter.hit(limit))
self.assertEqual(limiter.get_window_stats(limit)[1], 0)
# three extensions to the expiry
self.assertEqual(limiter.get_window_stats(limit)[0], start + 3)
timeline.forward(1)
self.assertFalse(limiter.hit(limit))
timeline.forward(3)
start = int(time.time())
self.assertTrue(limiter.hit(limit))
self.assertEqual(limiter.get_window_stats(limit)[1], 9)
self.assertEqual(limiter.get_window_stats(limit)[0], start + 2)
def test_fixed_window_with_elastic_expiry_memcache(self):
storage = MemcachedStorage('memcached://localhost:11211')
limiter = FixedWindowElasticExpiryRateLimiter(storage)
limit = RateLimitItemPerSecond(10, 2)
self.assertTrue(all([limiter.hit(limit) for _ in range(0,10)]))
time.sleep(1)
self.assertFalse(limiter.hit(limit))
time.sleep(1)
self.assertFalse(limiter.hit(limit))
def test_fixed_window_with_elastic_expiry_memcache_concurrency(self):
storage = MemcachedStorage('memcached://localhost:11211')
limiter = FixedWindowElasticExpiryRateLimiter(storage)
start = int(time.time())
limit = RateLimitItemPerSecond(100, 2)
def _c():
for i in range(0,50):
limiter.hit(limit)
t1, t2 = threading.Thread(target=_c), threading.Thread(target=_c)
t1.start(), t2.start()
[t1.join(), t2.join()]
self.assertEqual(limiter.get_window_stats(limit)[1], 0)
self.assertTrue(start + 2 <= limiter.get_window_stats(limit)[0] <= start + 3)
self.assertEqual(storage.get(limit.key_for()), 100)
def test_fixed_window_with_elastic_expiry_redis(self):
storage = RedisStorage('redis://localhost:6379')
limiter = FixedWindowElasticExpiryRateLimiter(storage)
limit = RateLimitItemPerSecond(10, 2)
self.assertTrue(all([limiter.hit(limit) for _ in range(0,10)]))
time.sleep(1)
self.assertFalse(limiter.hit(limit))
time.sleep(1)
self.assertFalse(limiter.hit(limit))
def test_moving_window_in_memory(self):
storage = MemoryStorage()
limiter = MovingWindowRateLimiter(storage)
with hiro.Timeline().freeze() as timeline:
limit = RateLimitItemPerMinute(10)
for i in range(0,5):
self.assertTrue(limiter.hit(limit))
self.assertTrue(limiter.hit(limit))
self.assertEqual(
limiter.get_window_stats(limit)[1],
10 - ((i + 1) * 2)
)
timeline.forward(10)
self.assertEqual(limiter.get_window_stats(limit)[1], 0)
self.assertFalse(limiter.hit(limit))
timeline.forward(20)
self.assertEqual(limiter.get_window_stats(limit)[1], 2)
self.assertEqual(limiter.get_window_stats(limit)[0], int(time.time() + 30))
timeline.forward(31)
self.assertEqual(limiter.get_window_stats(limit)[1], 10)
@skip_if_pypy
def test_moving_window_redis(self):
storage = RedisStorage("redis://localhost:6379")
limiter = MovingWindowRateLimiter(storage)
limit = RateLimitItemPerSecond(10, 2)
for i in range(0,10):
self.assertTrue(limiter.hit(limit))
self.assertEqual(limiter.get_window_stats(limit)[1], 10 - (i + 1))
time.sleep(2*0.095)
self.assertFalse(limiter.hit(limit))
time.sleep(0.4)
self.assertTrue(limiter.hit(limit))
self.assertTrue(limiter.hit(limit))
self.assertEqual(limiter.get_window_stats(limit)[1], 0)
def xest_moving_window_memcached(self):
storage = MemcachedStorage('memcacheD://localhost:11211')
self.assertRaises(NotImplementedError, MovingWindowRateLimiter, storage)
def test_test_fixed_window(self):
with hiro.Timeline().freeze() as timeline:
store = MemoryStorage()
limiter = FixedWindowRateLimiter(store)
limit = RateLimitItemPerSecond(2,1)
self.assertTrue(limiter.hit(limit), store)
self.assertTrue(limiter.test(limit), store)
self.assertTrue(limiter.hit(limit), store)
self.assertFalse(limiter.test(limit), store)
self.assertFalse(limiter.hit(limit), store)
def test_test_moving_window(self):
with hiro.Timeline().freeze() as timeline:
store = MemoryStorage()
limit = RateLimitItemPerSecond(2,1)
limiter = MovingWindowRateLimiter(store)
self.assertTrue(limiter.hit(limit), store)
self.assertTrue(limiter.test(limit), store)
self.assertTrue(limiter.hit(limit), store)
self.assertFalse(limiter.test(limit), store)
self.assertFalse(limiter.hit(limit), store)
| 41.074074
| 87
| 0.643222
|
f20a5cedd7d28b0af175ae9e6c6f3bee1e3ae92e
| 4,478
|
py
|
Python
|
python_module/stomp/test/s12_test.py
|
GeneralizedLearningUtilities/SuperGLU
|
1c373d1358431fb96dd70b324b26a14fc8ed1fcb
|
[
"MIT"
] | 8
|
2015-07-13T23:07:20.000Z
|
2020-11-13T21:09:55.000Z
|
python_module/stomp/test/s12_test.py
|
GeneralizedLearningUtilities/SuperGLU
|
1c373d1358431fb96dd70b324b26a14fc8ed1fcb
|
[
"MIT"
] | 7
|
2016-01-13T12:13:56.000Z
|
2021-12-14T21:12:28.000Z
|
python_module/stomp/test/s12_test.py
|
GeneralizedLearningUtilities/SuperGLU
|
1c373d1358431fb96dd70b324b26a14fc8ed1fcb
|
[
"MIT"
] | 6
|
2015-09-23T17:53:32.000Z
|
2020-04-30T07:27:01.000Z
|
import time
import unittest
from unittest.mock import Mock
import stomp
from stomp import exception
from stomp.listener import TestListener
from stomp.test.testutils import *
class Test12Connect(unittest.TestCase):
def setUp(self):
conn = stomp.Connection12(get_default_host())
listener = TestListener('123')
conn.set_listener('', listener)
conn.start()
conn.connect(get_default_user(), get_default_password(), wait=True)
self.conn = conn
self.listener = listener
self.timestamp = time.strftime('%Y%m%d%H%M%S')
def tearDown(self):
if self.conn:
self.conn.disconnect(receipt=None)
def test_send(self):
queuename = '/queue/testsend12-%s' % self.timestamp
self.conn.subscribe(destination=queuename, id=1, ack='auto')
self.conn.send(body='this is a test using protocol 1.2', destination=queuename, receipt='123')
self.listener.wait_on_receipt()
self.assertTrue(self.listener.connections == 1, 'should have received 1 connection acknowledgement')
self.assertTrue(self.listener.messages == 1, 'should have received 1 message')
self.assertTrue(self.listener.errors == 0, 'should not have received any errors')
def test_clientack(self):
queuename = '/queue/testclientack12-%s' % self.timestamp
self.conn.subscribe(destination=queuename, id=1, ack='client-individual')
self.conn.send(body='this is a test', destination=queuename, receipt='123')
self.listener.wait_for_message()
(headers, _) = self.listener.get_latest_message()
ack_id = headers['ack']
self.conn.ack(ack_id)
def test_clientnack(self):
queuename = '/queue/testclientnack12-%s' % self.timestamp
self.conn.subscribe(destination=queuename, id=1, ack='client-individual')
self.conn.send(body='this is a test', destination=queuename, receipt='123')
self.listener.wait_for_message()
(headers, _) = self.listener.get_latest_message()
ack_id = headers['ack']
self.conn.nack(ack_id)
def test_timeout(self):
server = TestStompServer('127.0.0.1', 60000)
try:
server.start()
server.add_frame('''ERROR
message: connection failed\x00''')
conn = stomp.Connection12([('127.0.0.1', 60000)])
listener = TestListener()
conn.set_listener('', listener)
conn.start()
try:
conn.connect(wait=True)
self.fail("shouldn't happen")
except exception.ConnectFailedException:
pass
finally:
server.stop()
def test_specialchars(self):
queuename = '/queue/testspecialchars12-%s' % self.timestamp
self.conn.subscribe(destination=queuename, id=1, ack='client')
hdrs = {
'special-1': 'test with colon : test',
'special-2': 'test with backslash \\ test',
'special-3': 'test with newlines \n \n',
'special-4': 'test with carriage return \r'
}
self.conn.send(body='this is a test', headers=hdrs, destination=queuename, receipt='123')
self.listener.wait_on_receipt()
(headers, _) = self.listener.get_latest_message()
_ = headers['message-id']
_ = headers['subscription']
self.assertTrue('special-1' in headers)
self.assertEqual('test with colon : test', headers['special-1'])
self.assertTrue('special-2' in headers)
self.assertEqual('test with backslash \\ test', headers['special-2'])
self.assertTrue('special-3' in headers)
self.assertEqual('test with newlines \n \n', headers['special-3'])
self.assertTrue('special-4' in headers)
self.assertEqual('test with carriage return \r', headers['special-4'])
def test_suppress_content_length(self):
queuename = '/queue/testspecialchars12-%s' % self.timestamp
self.conn = stomp.Connection12(get_default_host(), auto_content_length=False)
self.conn.transport = Mock()
self.conn.send(body='test', destination=queuename, receipt='123')
args, kwargs = self.conn.transport.transmit.call_args
frame = args[0]
self.assertTrue('content-length' not in frame.headers)
| 35.824
| 109
| 0.617686
|
fd2b9ab150efc287d3806ec58beb94671c8b0254
| 12,117
|
py
|
Python
|
infusionsoft/infusionsoft.py
|
LazyAfternoons/infusionsoft-im
|
80a5942fd0bd82e2d9e290b58cd049b8e0b07989
|
[
"MIT"
] | 1
|
2022-03-23T11:23:23.000Z
|
2022-03-23T11:23:23.000Z
|
infusionsoft/infusionsoft.py
|
LazyAfternoons/infusionsoft-im
|
80a5942fd0bd82e2d9e290b58cd049b8e0b07989
|
[
"MIT"
] | null | null | null |
infusionsoft/infusionsoft.py
|
LazyAfternoons/infusionsoft-im
|
80a5942fd0bd82e2d9e290b58cd049b8e0b07989
|
[
"MIT"
] | null | null | null |
import http
import json
import pickle
import time
from os.path import exists
import requests
from requests import RequestException
import base64
import logging
import http.client as http_client
import importlib
from infusionsoft.token import Token
class Infusionsoft:
"""Infusionsoft object for using their `REST API <https://developer.infusionsoft.com/docs/rest/#!>`.
"""
def __init__(self, client_id, client_secret):
"""Creates a new Infusionsoft object.
Args: client_id: The application client id which can be found `here
<https://keys.developer.keap.com/my-apps>`. client_secret: The application client secret which can be found
`here <https://keys.developer.keap.com/my-apps>`.
"""
self.client_id = client_id
self.client_secret = client_secret
self.token = None
self.api = {}
self.cached_objects = dict()
# Logging initializer
http_client.HTTPConnection.debuglevel = 0
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
logging.disable(logging.DEBUG)
def set_debug(self, flag: bool):
"""Enable or disable debug for HTTP requests.
Args:
flag: True to enable the debug, false to disable it.
"""
if flag:
logging.disable(logging.NOTSET)
http_client.HTTPConnection.debuglevel = 1
else:
logging.disable(logging.DEBUG)
http_client.HTTPConnection.debuglevel = 0
def is_token_serialized(self):
"""Check whether a token has been serialized previously.
Returns:
True if the serialized token exists, false otherwise.
"""
return exists('token.dat')
def deserialize_token(self):
"""Deserialize a previously stored token.
"""
with open('token.dat', 'rb') as f:
token = pickle.load(f)
return token
def set_token(self, token) -> None:
"""Set the token for the Infusionsoft object.
"""
self.token = token
def get_new_token(self, access_token: str, refresh_token: str, end_of_life: str):
"""Generates a new token with the given parameters.
Args: access_token: The generated access token from the `'Your Accounts'
<https://accounts.infusionsoft.com/app/central/home>` page. refresh_token: The generated refresh token from
the `'Your Accounts' <https://accounts.infusionsoft.com/app/central/home>` page. end_of_life: The token
expiration in unix time.
Returns:
The generated token.
"""
token = Token(access_token, refresh_token, end_of_life)
self.serialize_token(token)
"""Serialize token.
Args:
token: the token to be serialized.
"""
def serialize_token(self, token):
with open('token.dat', 'wb') as f:
pickle.dump(token, f)
def refresh_token(self):
"""Refreshes an expired token.
Raises:
InfusionsoftException: If an error occurs while refreshing the token.
"""
url = 'https://api.infusionsoft.com/token'
string = f'{self.client_id}:{self.client_secret}'
bytes_string = string.encode('ascii')
base64_byes = base64.b64encode(bytes_string)
base64_string = base64_byes.decode('ascii')
headers = {'Authorization': f'Basic {base64_string}', 'Content-type': 'application/x-www-form-urlencoded'}
data = {'grant_type': 'refresh_token', 'refresh_token': self.token.refresh_token}
r = requests.post(url, data=data, headers=headers)
json_res = r.json()
if r.status_code == 200:
self.token.access_token = json_res.get('access_token')
self.token.refresh_token = json_res.get('refresh_token')
self.token.end_of_life = str(int(time.time()) + int(json_res.get('expires_in')))
self.serialize_token(self.token)
else:
raise InfusionsoftException(f'An error occurred while refreshing the token: {json_res}')
def request(self, method, url, params=None, data=None, json_res=None, headers=None):
"""Performs a request to the REST endpoint.
Args:
method: The HTTP method.
url: URL of the REST endpoint.
params: Parameters of the request. Defaults to None.
data: Data of the request. Defaults to None.
json_res: JSON of the request. Defaults to None.
headers: Headers of the request. Defaults to None.
Returns:
The JSON of the answer or an empty JSON in case of error.
Raises:
RequestException
"""
payload = {'access_token': self.token.access_token}
if params:
payload.update(params)
method_to_call = getattr(requests, method)
r = method_to_call(url, params=payload, data=data, headers=headers, json=json_res)
status_code = r.status_code
text = r.text
try:
json_response = r.json()
if status_code != 200 and status_code != 201:
raise ApiException(status_code, text, json_response)
else:
return json_response
except RequestException: # Will change to JSONDecodeError in future versions of request
raise ApiException(status_code, text, None)
def request_raw(self, method, url, body=None, headers=None):
connection = http.client.HTTPSConnection(url)
if body is not None:
json_dict = json.dumps(body)
else:
json_dict = None
connection.request(method, '/markdown', json_dict, headers)
response = connection.getresponse()
return response.read().decode()
# missing return type
def get_api(self, service):
"""Getter for an object representing the chosen API interface.
Uses a cached array so no object is instantiated more than once during a request.
Args:
service: the name of the requested service.
"""
if service in self.cached_objects:
obj = self.cached_objects.get(service)
else:
try:
module = importlib.import_module(f"infusionsoft-im.api.{service}")
class_ = getattr(module, service.capitalize())
obj = class_(self)
self.cached_objects[service] = obj
except (ModuleNotFoundError, AttributeError):
raise InfusionsoftException("Unable to find the request API service object.")
return obj
def contact(self):
"""Getter for the Contact endpoint object.
Returns:
The object representing the Contact endpoint.
"""
key = 'contact'
return self.get_api(key)
def company(self):
"""Getter for the Company endpoint object.
Returns:
The object representing the Contact endpoint.
"""
key = 'company'
return self.get_api(key)
def account(self):
"""Getter for the Account endpoint object.
Returns:
The object representing the account endpoint.
"""
key = 'account'
return self.get_api(key)
def affiliate(self):
"""Getter for the Affiliate endpoint object.
Returns:
The object representing the Affiliate endpoint.
"""
key = 'affiliate'
return self.get_api(key)
def appointment(self):
"""Getter for the Appointment endpoint object.
Returns:
The object representing the Appointment endpoint.
"""
key = 'appointment'
return self.get_api(key)
def campaign(self):
"""Getter for the Campaign endpoint object.
Returns:
The object representing the Campaign endpoint.
"""
key = 'campaign'
return self.get_api(key)
def ecommerce(self):
"""Getter for the Ecommerce endpoint object.
Returns:
The object representing the Ecommerce endpoint.
"""
key = 'ecommerce'
return self.get_api(key)
def email(self):
"""Getter for the Email endpoint object.
Returns:
The object representing the Email endpoint.
"""
key = 'email'
return self.get_api(key)
def email_address(self):
"""Getter for the Email endpoint object.
Returns:
The object representing the Email endpoint.
"""
key = 'emailaddress'
return self.get_api(key)
def file(self):
"""Getter for the File endpoint object.
Returns:
The object representing the Email endpoint.
"""
key = 'file'
return self.get_api(key)
def locale(self):
"""Getter for the Locale endpoint object.
Returns:
The object representing the Email endpoint.
"""
key = 'locale'
return self.get_api(key)
def merchant(self):
"""Getter for the Merchant endpoint object.
Returns:
The object representing the Email endpoint.
"""
key = 'merchant'
return self.get_api(key)
def note(self):
"""Getter for the Note endpoint object.
Returns:
The object representing the Note endpoint.
"""
key = 'note'
return self.get_api(key)
def opportunity(self):
"""Getter for the Opportunity endpoint object.
Returns:
The object representing the Opportunity endpoint.
"""
key = 'opportunity'
return self.get_api(key)
def product(self):
"""Getter for the Product endpoint object.
Returns:
The object representing the Product endpoint.
"""
key = 'product'
return self.get_api(key)
def resthook(self):
"""Getter for the REST Hooks endpoint object.
Returns:
The object representing the REST Hooks endpoint.
"""
key = 'resthook'
return self.get_api(key)
def setting(self):
"""Getter for the Setting endpoint object.
Returns:
The object representing the Setting endpoint.
"""
key = 'setting'
return self.get_api(key)
def tags(self):
"""Getter for the Tags endpoint object.
Returns:
The object representing the Tags endpoint.
"""
key = 'tags'
return self.get_api(key)
def tasks(self):
"""Getter for the Tasks endpoint object.
Returns:
The object representing the Tasks endpoint.
"""
key = 'tasks'
return self.get_api(key)
def users(self):
"""Getter for the UserInfo endpoint object.
Returns:
The object representing the Tasks endpoint.
"""
key = 'userinfo'
return self.get_api(key)
class InfusionsoftException(Exception):
"""Exception thrown when an error related to Infusionsoft occurs.
"""
def __init__(self, message):
"""Creates a new InfusionsoftException.
Args:
message:
Message of the error.
"""
super().__init__(message)
class ApiException(Exception):
"""Exception thrown when an error occurs when performing an API request.
"""
def __init__(self, status_code, message, json_res):
"""Creates a new ApiException.
Args:
status_code:
Status code of the error.
message:
Message of the error.
json_res:
JSON response, if present.
"""
self.status_code = status_code
self.message = message
self.json = json_res
super().__init__(self.message)
| 30.521411
| 115
| 0.598498
|
da1ec9f421590151c9468fd9cd24c4c7008813d2
| 801
|
py
|
Python
|
scripts/followers.py
|
rbowen/centos-community-tools
|
46aa66be854d1de5c334c8918991879fb08865c2
|
[
"Apache-2.0"
] | 16
|
2020-03-23T15:06:11.000Z
|
2021-07-18T14:33:23.000Z
|
followers.py
|
rbowen/community-tools
|
f93d49e80ea96ccb4541c21eb97c042685bddee9
|
[
"Apache-2.0"
] | null | null | null |
followers.py
|
rbowen/community-tools
|
f93d49e80ea96ccb4541c21eb97c042685bddee9
|
[
"Apache-2.0"
] | 4
|
2020-03-25T07:00:26.000Z
|
2021-09-06T18:21:31.000Z
|
#!/usr/bin/python
# How many followers do you have?
import urllib.request
import re
print ("This doesn't work any more because Twitter is actively preventing it. Sorry.")
quit()
feeds = [
'rbowen','centosproject','centos'
];
for feed in feeds:
req = urllib.request.Request( 'https://twitter.com/' + feed + '/',
data = None,
headers={
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; Win64; x64)',
} )
f = urllib.request.urlopen(req)
html = f.read().decode('utf-8')
# Looks like ...
# <div class="statnum">2,615</div>
# <div class="statlabel"> Followers </div>
print ( feed + ': ' + re.search('.*?followers">.+?statnum">([\d,MK]+)</div>.*?<.*?statlabel"> Followers.*', html, re.DOTALL).group(1) )
| 27.62069
| 139
| 0.563046
|
7006eb68c46d5ab2e3a6d58aa04dd14069f96a47
| 13,764
|
py
|
Python
|
kws_streaming/train/model_train_eval.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-01-19T23:35:59.000Z
|
2022-01-19T23:35:59.000Z
|
kws_streaming/train/model_train_eval.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
kws_streaming/train/model_train_eval.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple speech recognition to spot a limited number of keywords.
It is based on tensorflow/examples/speech_commands
This is a self-contained example script that will train a very basic audio
recognition model in TensorFlow. It downloads the necessary training data and
runs with reasonable defaults to train within a few hours even only using a CPU.
It is intended as an introduction to using neural networks for audio
recognition, and is not a full speech recognition system. This network uses a
keyword detection style to spot discrete words from a small vocabulary,
consisting of
"yes", "no", "up", "down", "left", "right", "on", "off", "stop", and "go".
To run the training process, use:
bazel run model_train_eval.py
This will write out checkpoints to /tmp/speech_commands_train/, and will
download over 1GB of open source training data, so you'll need enough free space
and a good internet connection. The default data is a collection of thousands of
one-second .wav files, each containing one spoken word. This data set is
collected from https://aiyprojects.withgoogle.com/open_speech_recording, please
consider contributing to help improve this and other models!
As training progresses, it will print out its accuracy metrics, which should
rise above 90% by the end. Once it's complete, it will produce
Keras, SavedModel, TFLite and graphdef representations.
If you want to train on your own data, you'll need to create .wavs with your
recordings, all at a consistent length, and then arrange them into subfolders
organized by label. For example, here's a possible file structure:
data >
up >
audio_0.wav
audio_1.wav
down >
audio_2.wav
audio_3.wav
other>
audio_4.wav
audio_5.wav
You'll also need to tell the script what labels to look for, using the
`--wanted_words` argument. In this case, 'up,down' might be what you want, and
the audio in the 'other' folder would be used to train an 'unknown' category.
To pull this all together, you'd run:
bazel run tensorflow/examples/speech_commands:train --
--data_dir /data --wanted_words up,down
Above script will automatically split data into training/validation and testing.
If you prefer to split the data on your own, then you should set flag
"--split_data 0" and prepare folders with structure:
data >
training >
up >
audio_0.wav
audio_1.wav
down >
audio_2.wav
audio_3.wav
validation >
up >
audio_6.wav
audio_7.wav
down >
audio_8.wav
audio_9.wav
testing >
up >
audio_12.wav
audio_13.wav
down >
audio_14.wav
audio_15.wav
_background_noise_ >
audio_18.wav
To pull this all together, you'd run:
bazel run tensorflow/examples/speech_commands:train --
--data_dir /data --wanted_words up,down --split_data 0
"""
import json
import os
import sys
from absl import logging
import tensorflow.compat.v1 as tf
from kws_streaming.layers import modes
from kws_streaming.models import model_flags
from kws_streaming.models import model_utils
import kws_streaming.models.att_mh_rnn as att_mh_rnn
import kws_streaming.models.att_rnn as att_rnn
import kws_streaming.models.bc_resnet as bc_resnet
import kws_streaming.models.cnn as cnn
import kws_streaming.models.crnn as crnn
import kws_streaming.models.dnn as dnn
import kws_streaming.models.dnn_raw as dnn_raw
import kws_streaming.models.ds_cnn as ds_cnn
import kws_streaming.models.ds_tc_resnet as ds_tc_resnet
import kws_streaming.models.gru as gru
import kws_streaming.models.inception as inception
import kws_streaming.models.inception_resnet as inception_resnet
import kws_streaming.models.lstm as lstm
import kws_streaming.models.mobilenet as mobilenet
import kws_streaming.models.mobilenet_v2 as mobilenet_v2
import kws_streaming.models.svdf as svdf
import kws_streaming.models.svdf_resnet as svdf_resnet
import kws_streaming.models.tc_resnet as tc_resnet
import kws_streaming.models.xception as xception
from kws_streaming.train import base_parser
from kws_streaming.train import train
import kws_streaming.train.test as test
FLAGS = None
def main(_):
# Update flags
flags = model_flags.update_flags(FLAGS)
if flags.train:
# Create model folders where logs and model will be stored
try:
os.makedirs(flags.train_dir)
os.makedirs(os.path.join(flags.train_dir, 'restore'))
os.mkdir(flags.summaries_dir)
except OSError as e:
if flags.restore_checkpoint:
pass
else:
raise ValueError('model already exists in folder %s' %
flags.train_dir) from None
# Model training
train.train(flags)
else:
if not os.path.isdir(flags.train_dir):
raise ValueError('model is not trained set "--train 1" and retrain it')
# write all flags settings into json
with open(os.path.join(flags.train_dir, 'flags.json'), 'wt') as f:
json.dump(flags.__dict__, f)
# convert to SavedModel
test.convert_model_saved(flags, 'non_stream',
modes.Modes.NON_STREAM_INFERENCE)
try:
test.convert_model_saved(flags, 'stream_state_internal',
modes.Modes.STREAM_INTERNAL_STATE_INFERENCE)
except (ValueError, IndexError) as e:
logging.info('FAILED to run TF streaming: %s', e)
logging.info('run TF non streaming model accuracy evaluation')
# with TF
folder_name = 'tf'
test.tf_non_stream_model_accuracy(flags, folder_name)
# with TF.
# We can apply non stream model on stream data, by running inference
# every 200ms (for example), so that total latency will be similar with
# streaming model which is executed every 20ms.
# To measure the impact of sampling on model accuracy,
# we introduce time_shift_ms during accuracy evaluation.
# Convert milliseconds to samples:
time_shift_samples = int(
(flags.time_shift_ms * flags.sample_rate) / model_flags.MS_PER_SECOND)
test.tf_non_stream_model_accuracy(
flags,
folder_name,
time_shift_samples,
accuracy_name='tf_non_stream_model_sampling_stream_accuracy.txt')
name2opt = {
'': None,
'quantize_opt_for_size_': [tf.lite.Optimize.DEFAULT],
}
for opt_name, optimizations in name2opt.items():
if (opt_name and flags.feature_type == 'mfcc_tf' and
flags.preprocess == 'raw'):
logging.info('feature type mfcc_tf needs quantization aware training '
'for quantization - it is not implemented')
continue
folder_name = opt_name + 'tflite_non_stream'
file_name = 'non_stream.tflite'
mode = modes.Modes.NON_STREAM_INFERENCE
test.convert_model_tflite(flags, folder_name, mode, file_name,
optimizations=optimizations)
test.tflite_non_stream_model_accuracy(flags, folder_name, file_name)
# these models are using bi-rnn, so they are non streamable by default
# also models using striding or pooling are not supported for streaming now
non_streamable_models = {'att_mh_rnn', 'att_rnn', 'tc_resnet'}
model_is_streamable = True
if flags.model_name in non_streamable_models:
model_is_streamable = False
# below models can use striding in time dimension,
# but this is currently unsupported
elif flags.model_name == 'cnn':
for strides in model_utils.parse(flags.cnn_strides):
if strides[0] > 1:
model_is_streamable = False
break
elif flags.model_name == 'ds_cnn':
if model_utils.parse(flags.cnn1_strides)[0] > 1:
model_is_streamable = False
for strides in model_utils.parse(flags.dw2_strides):
if strides[0] > 1:
model_is_streamable = False
break
# set input data shape for testing inference in streaming mode
flags.data_shape = modes.get_input_data_shape(
flags, modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE)
# if model can be streamed, then run conversion/evaluation in streaming mode
if model_is_streamable:
# ---------------- TF streaming model accuracy evaluation ----------------
# Streaming model with external state evaluation using TF with state reset
if not opt_name:
logging.info('run TF evalution only without optimization/quantization')
try:
folder_name = 'tf'
test.tf_stream_state_external_model_accuracy(
flags,
folder_name,
accuracy_name='stream_state_external_model_accuracy_sub_set_reset1.txt',
reset_state=True) # with state reset between test sequences
# Streaming (with external state) evaluation using TF no state reset
test.tf_stream_state_external_model_accuracy(
flags,
folder_name,
accuracy_name='stream_state_external_model_accuracy_sub_set_reset0.txt',
reset_state=False) # without state reset
# Streaming (with internal state) evaluation using TF no state reset
test.tf_stream_state_internal_model_accuracy(flags, folder_name)
except (ValueError, IndexError) as e:
logging.info('FAILED to run TF streaming: %s', e)
logging.info('run TFlite streaming model accuracy evaluation')
try:
# convert model to TFlite
folder_name = opt_name + 'tflite_stream_state_external'
file_name = 'stream_state_external.tflite'
mode = modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE
test.convert_model_tflite(flags, folder_name, mode, file_name,
optimizations=optimizations)
# Streaming model accuracy evaluation with TFLite with state reset
test.tflite_stream_state_external_model_accuracy(
flags,
folder_name,
file_name,
accuracy_name='tflite_stream_state_external_model_accuracy_reset1.txt',
reset_state=True)
# Streaming model accuracy evaluation with TFLite without state reset
test.tflite_stream_state_external_model_accuracy(
flags,
folder_name,
file_name,
accuracy_name='tflite_stream_state_external_model_accuracy_reset0.txt',
reset_state=False)
except (ValueError, IndexError) as e:
logging.info('FAILED to run TFLite streaming: %s', e)
if __name__ == '__main__':
# parser for training/testing data and speach feature flags
parser = base_parser.base_parser()
# sub parser for model settings
subparsers = parser.add_subparsers(dest='model_name', help='NN model name')
# DNN model settings
parser_dnn = subparsers.add_parser('dnn')
dnn.model_parameters(parser_dnn)
# DNN raw model settings
parser_dnn_raw = subparsers.add_parser('dnn_raw')
dnn_raw.model_parameters(parser_dnn_raw)
# LSTM model settings
parser_lstm = subparsers.add_parser('lstm')
lstm.model_parameters(parser_lstm)
# GRU model settings
parser_gru = subparsers.add_parser('gru')
gru.model_parameters(parser_gru)
# SVDF model settings
parser_svdf = subparsers.add_parser('svdf')
svdf.model_parameters(parser_svdf)
# CNN model settings
parser_cnn = subparsers.add_parser('cnn')
cnn.model_parameters(parser_cnn)
# CRNN model settings
parser_crnn = subparsers.add_parser('crnn')
crnn.model_parameters(parser_crnn)
# ATT MH RNN model settings
parser_att_mh_rnn = subparsers.add_parser('att_mh_rnn')
att_mh_rnn.model_parameters(parser_att_mh_rnn)
# ATT RNN model settings
parser_att_rnn = subparsers.add_parser('att_rnn')
att_rnn.model_parameters(parser_att_rnn)
# DS_CNN model settings
parser_ds_cnn = subparsers.add_parser('ds_cnn')
ds_cnn.model_parameters(parser_ds_cnn)
# TC Resnet model settings
parser_tc_resnet = subparsers.add_parser('tc_resnet')
tc_resnet.model_parameters(parser_tc_resnet)
# Mobilenet model settings
parser_mobilenet = subparsers.add_parser('mobilenet')
mobilenet.model_parameters(parser_mobilenet)
# Mobilenet V2 model settings
parser_mobilenet_v2 = subparsers.add_parser('mobilenet_v2')
mobilenet_v2.model_parameters(parser_mobilenet_v2)
# xception model settings
parser_xception = subparsers.add_parser('xception')
xception.model_parameters(parser_xception)
# inception model settings
parser_inception = subparsers.add_parser('inception')
inception.model_parameters(parser_inception)
# inception resnet model settings
parser_inception_resnet = subparsers.add_parser('inception_resnet')
inception_resnet.model_parameters(parser_inception_resnet)
# svdf resnet model settings
parser_svdf_resnet = subparsers.add_parser('svdf_resnet')
svdf_resnet.model_parameters(parser_svdf_resnet)
# ds_tc_resnet model settings
parser_ds_tc_resnet = subparsers.add_parser('ds_tc_resnet')
ds_tc_resnet.model_parameters(parser_ds_tc_resnet)
# bc_resnet model settings
parser_bc_resnet = subparsers.add_parser('bc_resnet')
bc_resnet.model_parameters(parser_bc_resnet)
FLAGS, unparsed = parser.parse_known_args()
if unparsed and tuple(unparsed) != ('--alsologtostderr',):
raise ValueError('Unknown argument: {}'.format(unparsed))
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 36.125984
| 86
| 0.735106
|
1fca9afba8d48cdecca99015602540e53f5b3848
| 34,935
|
py
|
Python
|
bert4keras/optimizers.py
|
EthanChen1234/bert4keras
|
149b8abe4f5696f7762f49547533873b935f85b9
|
[
"Apache-2.0"
] | 1
|
2020-05-13T05:56:14.000Z
|
2020-05-13T05:56:14.000Z
|
bert4keras/optimizers.py
|
ZhenHengDong/bert4keras
|
de66f9b66a57152816920a6b068a3f28648dd547
|
[
"Apache-2.0"
] | null | null | null |
bert4keras/optimizers.py
|
ZhenHengDong/bert4keras
|
de66f9b66a57152816920a6b068a3f28648dd547
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# 优化相关
import numpy as np
import tensorflow as tf
from bert4keras.backend import keras, K, is_tf_keras
from bert4keras.snippets import is_string, string_matching
from bert4keras.snippets import is_one_of, insert_arguments
from bert4keras.backend import piecewise_linear
import re
class Adam(keras.optimizers.Optimizer):
"""重新定义Adam优化器,便于派生出新的优化器
(tensorflow的optimizer_v2类)
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
bias_correction=True,
**kwargs
):
kwargs['name'] = kwargs.get('name') or 'Adam'
super(Adam, self).__init__(**kwargs)
self._set_hyper('learning_rate', learning_rate)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or K.epislon()
self.bias_correction = bias_correction
def _create_slots(self, var_list):
for var in var_list:
self.add_slot(var, 'm')
self.add_slot(var, 'v')
def _resource_apply(self, grad, var, indices=None):
# 准备变量
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
beta_1_t = self._get_hyper('beta_1', var_dtype)
beta_2_t = self._get_hyper('beta_2', var_dtype)
epsilon_t = K.cast(self.epsilon, var_dtype)
local_step = K.cast(self.iterations + 1, var_dtype)
beta_1_t_power = K.pow(beta_1_t, local_step)
beta_2_t_power = K.pow(beta_2_t, local_step)
# 更新公式
if indices is None:
m_t = K.update(m, beta_1_t * m + (1 - beta_1_t) * grad)
v_t = K.update(v, beta_2_t * v + (1 - beta_2_t) * grad**2)
else:
mv_ops = [K.update(m, beta_1_t * m), K.update(v, beta_2_t * v)]
with tf.control_dependencies(mv_ops):
m_t = self._resource_scatter_add(
m, indices, (1 - beta_1_t) * grad
)
v_t = self._resource_scatter_add(
v, indices, (1 - beta_2_t) * grad**2
)
# 返回算子
with tf.control_dependencies([m_t, v_t]):
if self.bias_correction:
m_t = m_t / (1.0 - beta_1_t_power)
v_t = v_t / (1.0 - beta_2_t_power)
var_t = var - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
return K.update(var, var_t)
def _resource_apply_dense(self, grad, var):
return self._resource_apply(grad, var)
def _resource_apply_sparse(self, grad, var, indices):
return self._resource_apply(grad, var, indices)
def get_config(self):
config = {
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon,
}
base_config = super(Adam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AdaFactorBase(keras.optimizers.Optimizer):
"""AdaFactor优化器(基类)
论文链接:https://arxiv.org/abs/1804.04235
参考实现:https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/optimize.py
"""
def __init__(
self,
learning_rate=1e-3, # 可以为None
beta1=0.0,
beta2=None,
epsilon1=1e-30,
epsilon2=1e-3,
multiply_by_parameter_scale=True,
clipping_threshold=1.0,
min_dim_size_to_factor=128,
**kwargs
):
super(AdaFactorBase, self).__init__(**kwargs)
self._learning_rate = learning_rate
self.beta1 = beta1
self._beta2 = beta2
self.epsilon1 = epsilon1
self.epsilon2 = epsilon2
self.multiply_by_parameter_scale = multiply_by_parameter_scale
self.clipping_threshold = clipping_threshold
self.min_dim_size_to_factor = min_dim_size_to_factor
@property
def learning_rate(self):
if self._learning_rate is None:
iterations = K.cast(self.iterations + 1, K.floatx())
learning_rate = K.minimum(1.0 / K.sqrt(iterations), 0.01)
if self.multiply_by_parameter_scale:
return learning_rate
else:
return learning_rate * 0.05
else:
if not hasattr(self, '__learning_rate'):
with K.name_scope(self.__class__.__name__):
self.__learning_rate = K.variable(
self._learning_rate, name='learning_rate'
)
return self.__learning_rate
@property
def beta2(self):
if self._beta2 is None:
iterations = K.cast(self.iterations + 1, K.floatx())
return 1.0 - K.pow(iterations, -0.8)
else:
return self._beta2
def factored_shape(self, shape):
if len(shape) < 2:
return None
shape = np.array(shape)
indices = shape.argpartition(-2)
if indices[-2] < self.min_dim_size_to_factor:
return None
shape1, shape2 = np.array(shape), np.array(shape)
shape1[indices[-1]] = 1
shape2[indices[-2]] = 1
return shape1, indices[-1], shape2, indices[-2]
def get_config(self):
config = {
'learning_rate': self._learning_rate,
'beta1': self.beta1,
'beta2': self._beta2,
'epsilon1': self.epsilon1,
'epsilon2': self.epsilon2,
'multiply_by_parameter_scale': self.multiply_by_parameter_scale,
'clipping_threshold': self.clipping_threshold,
'min_dim_size_to_factor': self.min_dim_size_to_factor,
}
base_config = super(AdaFactorBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AdaFactorV1(AdaFactorBase):
"""AdaFactor优化器(纯Keras版)
论文链接:https://arxiv.org/abs/1804.04235
参考实现:https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/optimize.py
"""
def __init__(self, *args, **kwargs):
super(AdaFactorV1, self).__init__(*args, **kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
@K.symbolic
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
self.weights = [self.iterations]
lr = self.learning_rate
for i, (p, g) in enumerate(zip(params, grads)):
g2 = K.square(g) + self.epsilon1
shape, dtype = K.int_shape(p), K.dtype(p)
factored_shape = self.factored_shape(shape)
if factored_shape is None:
# 定义参数
v = K.zeros(shape, dtype=dtype, name='v_' + str(i))
self.weights.append(v)
# 定义更新
v_t = self.beta2 * v + (1.0 - self.beta2) * g2
self.updates.append(K.update(v, v_t))
else:
# 定义参数
shape1, axis1, shape2, axis2 = factored_shape
vr = K.zeros(shape1, dtype=dtype, name='vr_' + str(i))
vc = K.zeros(shape2, dtype=dtype, name='vc_' + str(i))
self.weights.extend([vr, vc])
# 定义更新
vr_t = self.beta2 * vr + K.mean(g2, axis=axis1, keepdims=True)
vc_t = self.beta2 * vc + K.mean(g2, axis=axis2, keepdims=True)
self.updates.extend([K.update(vr, vr_t), K.update(vc, vc_t)])
# 合成矩阵
v_t = vr_t * vc_t / K.mean(vr_t, axis=axis2, keepdims=True)
# 增量主体
u = g / K.sqrt(v_t)
# 增量裁剪
if self.clipping_threshold is not None:
u_rms = K.mean(K.sum(K.square(u)))
d = self.clipping_threshold
u = u / K.maximum(1.0, u_rms / d)
# 增量滑动
if self.beta1 > 0.0:
# 定义参数
m = K.zeros(shape, dtype=dtype, name='m_' + str(i))
self.weights.append(m)
# 定义更新
m_t = self.beta1 * m + (1.0 - self.beta1) * u
self.updates.append(K.update(m, m_t))
u = m_t
# 增量调整
if self.multiply_by_parameter_scale:
u = u * K.maximum(K.mean(K.sum(K.square(p))), self.epsilon2)
# 更新参数
self.updates.append(K.update(p, p - lr * u))
return self.updates
class AdaFactorV2(AdaFactorBase):
"""AdaFactor优化器(tf.keras版)
论文链接:https://arxiv.org/abs/1804.04235
参考实现:https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/optimize.py
"""
def __init__(self, *args, **kwargs):
kwargs['name'] = kwargs.get('name') or 'AdaFactor'
super(AdaFactorV2, self).__init__(*args, **kwargs)
def _create_slots(self, var_list):
for var in var_list:
if self.beta1 > 0.0:
self.add_slot(var, 'm')
shape = K.int_shape(var)
factored_shape = self.factored_shape(shape)
if factored_shape is None:
self.add_slot(var, 'v')
else:
shape1, axis1, shape2, axis2 = factored_shape
value1, value2 = np.zeros(shape1), np.zeros(shape2)
self.add_slot(var, 'vr', value1)
self.add_slot(var, 'vc', value2)
def _resource_apply(self, grad, var, indices=None):
lr = self.learning_rate
g2 = K.square(grad) + self.epsilon1
shape = K.int_shape(var)
factored_shape = self.factored_shape(shape)
if factored_shape is None:
v = self.get_slot(var, 'v')
# 定义更新
v_t = self.beta2 * v + (1.0 - self.beta2) * g2
v_t = K.update(v, v_t)
else:
shape1, axis1, shape2, axis2 = factored_shape
vr = self.get_slot(var, 'vr')
vc = self.get_slot(var, 'vc')
# 定义更新
vr_t = self.beta2 * vr + K.mean(g2, axis=axis1, keepdims=True)
vc_t = self.beta2 * vc + K.mean(g2, axis=axis2, keepdims=True)
vr_t, vc_t = K.update(vr, vr_t), K.update(vc, vc_t)
# 合成矩阵
v_t = vr_t * vc_t / K.mean(vr_t, axis=axis2, keepdims=True)
# 增量主体
u = grad / K.sqrt(v_t)
# 增量裁剪
if self.clipping_threshold is not None:
u_rms = K.mean(K.sum(K.square(u)))
d = self.clipping_threshold
u = u / K.maximum(1.0, u_rms / d)
# 增量滑动
if self.beta1 > 0.0:
m = self.get_slot(var, 'm')
# 定义更新
m_t = self.beta1 * m + (1.0 - self.beta1) * u
u = K.update(m, m_t)
# 增量调整
if self.multiply_by_parameter_scale:
u = u * K.maximum(K.mean(K.sum(K.square(var))), self.epsilon2)
# 更新参数
return K.update(var, var - lr * u)
def _resource_apply_dense(self, grad, var):
return self._resource_apply(grad, var)
def _resource_apply_sparse(self, grad, var, indices):
grad = tf.IndexedSlices(grad, indices, K.shape(var))
grad = tf.convert_to_tensor(grad)
return self._resource_apply_dense(grad, var)
def export_to_custom_objects(base_extend_with):
"""装饰器,用来将优化器放到custom_objects中
"""
def new_extend_with(BaseOptimizer, name=None):
NewOptimizer = base_extend_with(BaseOptimizer)
if is_string(name):
NewOptimizer.__name__ = name
name = NewOptimizer.__name__
keras.utils.get_custom_objects()[name] = NewOptimizer
return NewOptimizer
return new_extend_with
@export_to_custom_objects
def extend_with_weight_decay(BaseOptimizer):
"""返回新的优化器类,加入权重衰减
"""
class NewOptimizer(BaseOptimizer):
"""带有权重衰减的优化器
"""
@insert_arguments(weight_decay_rate=0.01, exclude_from_weight_decay=[])
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
if not hasattr(self, 'learning_rate'):
self.learning_rate = self.lr
@K.symbolic
def get_updates(self, loss, params):
old_update = K.update
def new_update(x, new_x):
if is_one_of(x, params) and self._do_weight_decay(x):
new_x = new_x - self.learning_rate * self.weight_decay_rate * x
return old_update(x, new_x)
K.update = new_update
updates = super(NewOptimizer, self).get_updates(loss, params)
K.update = old_update
return updates
def _do_weight_decay(self, w):
return (not string_matching(w.name, self.exclude_from_weight_decay))
def get_config(self):
config = {
'weight_decay_rate': self.weight_decay_rate,
'exclude_from_weight_decay': self.exclude_from_weight_decay,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_weight_decay_v2(BaseOptimizer):
"""返回新的优化器类,加入权重衰减
"""
class NewOptimizer(BaseOptimizer):
"""带有权重衰减的优化器
"""
@insert_arguments(weight_decay_rate=0.01, exclude_from_weight_decay=[])
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
def _resource_apply(self, grad, var, indices=None):
old_update = K.update
def new_update(x, new_x):
if x is var and self._do_weight_decay(x):
lr_t = self._decayed_lr(x.dtype.base_dtype)
new_x = new_x - lr_t * self.weight_decay_rate * x
return old_update(x, new_x)
K.update = new_update
op = super(NewOptimizer, self)._resource_apply(grad, var, indices)
K.update = old_update
return op
def _do_weight_decay(self, w):
return (not string_matching(w.name, self.exclude_from_weight_decay))
def get_config(self):
config = {
'weight_decay_rate': self.weight_decay_rate,
'exclude_from_weight_decay': self.exclude_from_weight_decay,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_layer_adaptation(BaseOptimizer):
"""返回新的优化器类,加入层自适应学习率
"""
class NewOptimizer(BaseOptimizer):
"""带有层自适应学习率的优化器
用每一层参数的模长来校正当前参数的学习率
https://arxiv.org/abs/1904.00962
"""
@insert_arguments(exclude_from_layer_adaptation=[])
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
if not hasattr(self, 'learning_rate'):
self.learning_rate = self.lr
@K.symbolic
def get_updates(self, loss, params):
old_update = K.update
def new_update(x, new_x):
if is_one_of(x, params) and self._do_layer_adaptation(x):
dx = new_x - x
lr_t = K.clip(self.learning_rate, K.epsilon(), 1e10)
x_norm = tf.norm(x)
g_norm = tf.norm(dx / lr_t)
ratio = K.switch(
x_norm > 0.0,
K.switch(g_norm > K.epsilon(), x_norm / g_norm, 1.0),
1.0
)
new_x = x + dx * ratio
return old_update(x, new_x)
K.update = new_update
updates = super(NewOptimizer, self).get_updates(loss, params)
K.update = old_update
return updates
def _do_layer_adaptation(self, w):
return (
not string_matching(w.name, self.exclude_from_layer_adaptation)
)
def get_config(self):
config = {
'exclude_from_layer_adaptation':
self.exclude_from_layer_adaptation,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_layer_adaptation_v2(BaseOptimizer):
"""返回新的优化器类,加入层自适应学习率
"""
class NewOptimizer(BaseOptimizer):
"""带有层自适应学习率的优化器
用每一层参数的模长来校正当前参数的学习率
https://arxiv.org/abs/1904.00962
"""
@insert_arguments(exclude_from_layer_adaptation=[])
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
def _resource_apply(self, grad, var, indices=None):
old_update = K.update
def new_update(x, new_x):
if x is var and self._do_layer_adaptation(x):
dx = new_x - x
lr_t = self._decayed_lr(x.dtype.base_dtype)
lr_t = K.clip(lr_t, K.epsilon(), 1e10)
x_norm = tf.norm(x)
g_norm = tf.norm(dx / lr_t)
ratio = K.switch(
x_norm > 0.0,
K.switch(g_norm > K.epsilon(), x_norm / g_norm, 1.0),
1.0
)
new_x = x + dx * ratio
return old_update(x, new_x)
K.update = new_update
op = super(NewOptimizer, self)._resource_apply(grad, var, indices)
K.update = old_update
return op
def _do_layer_adaptation(self, w):
return (
not string_matching(w.name, self.exclude_from_layer_adaptation)
)
def get_config(self):
config = {
'exclude_from_layer_adaptation':
self.exclude_from_layer_adaptation,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_piecewise_linear_lr(BaseOptimizer):
"""返回新的优化器类,加入分段线性学习率
"""
class NewOptimizer(BaseOptimizer):
"""带有分段线性学习率的优化器
其中schedule是形如{1000: 1, 2000: 0.1}的字典,
表示0~1000步内学习率线性地从零增加到100%,然后
1000~2000步内线性地降到10%,2000步以后保持10%
"""
@insert_arguments(lr_schedule={0: 1})
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
self.lr_schedule = {int(i): j for i, j in self.lr_schedule.items()}
@K.symbolic
def get_updates(self, loss, params):
lr_multiplier = piecewise_linear(self.iterations, self.lr_schedule)
old_update = K.update
def new_update(x, new_x):
if is_one_of(x, params):
new_x = x + (new_x - x) * lr_multiplier
return old_update(x, new_x)
K.update = new_update
updates = super(NewOptimizer, self).get_updates(loss, params)
K.update = old_update
return updates
def get_config(self):
config = {
'lr_schedule': self.lr_schedule,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_piecewise_linear_lr_v2(BaseOptimizer):
"""返回新的优化器类,加入分段线性学习率
"""
class NewOptimizer(BaseOptimizer):
"""带有分段线性学习率的优化器
其中schedule是形如{1000: 1, 2000: 0.1}的字典,
表示0~1000步内学习率线性地从零增加到100%,然后
1000~2000步内线性地降到10%,2000步以后保持10%
"""
@insert_arguments(lr_schedule={0: 1})
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
self.lr_schedule = {int(i): j for i, j in self.lr_schedule.items()}
def _decayed_lr(self, var_dtype):
lr_multiplier = piecewise_linear(self.iterations, self.lr_schedule)
lr_t = super(NewOptimizer, self)._decayed_lr(var_dtype)
return lr_t * K.cast(lr_multiplier, var_dtype)
def get_config(self):
config = {
'lr_schedule': self.lr_schedule,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_gradient_accumulation(BaseOptimizer):
"""返回新的优化器类,加入梯度累积
"""
class NewOptimizer(BaseOptimizer):
"""带有梯度累积的优化器
"""
@insert_arguments(grad_accum_steps=2)
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
self._first_get_gradients = True
def get_gradients(self, loss, params):
if self._first_get_gradients:
self._first_get_gradients = False
return super(NewOptimizer, self).get_gradients(loss, params)
else:
return [ag / self.grad_accum_steps for ag in self.accum_grads]
@K.symbolic
def get_updates(self, loss, params):
# 更新判据
cond = K.equal(self.iterations % self.grad_accum_steps, 0)
cond = K.cast(cond, K.floatx())
# 获取梯度
grads = self.get_gradients(loss, params)
self.accum_grads = [
K.zeros(
K.int_shape(p), dtype=K.dtype(p), name='accum_grad_%s' % i
) for i, p in enumerate(params)
]
old_update = K.update
def new_update(x, new_x):
new_x = cond * new_x + (1 - cond) * x
return old_update(x, new_x)
K.update = new_update
updates = super(NewOptimizer, self).get_updates(loss, params)
K.update = old_update
# 累积梯度
with tf.control_dependencies(updates):
accum_updates = [
K.update(ag, g + (1 - cond) * ag)
for g, ag in zip(grads, self.accum_grads)
]
return accum_updates
def get_config(self):
config = {
'grad_accum_steps': self.grad_accum_steps,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_gradient_accumulation_v2(BaseOptimizer):
"""返回新的优化器类,加入梯度累积
"""
class NewOptimizer(BaseOptimizer):
"""带有梯度累积的优化器
"""
@insert_arguments(grad_accum_steps=2)
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
def _create_slots(self, var_list):
super(NewOptimizer, self)._create_slots(var_list)
for var in var_list:
self.add_slot(var, 'ag')
def _resource_apply(self, grad, var, indices=None):
# 更新判据
cond = K.equal(self.iterations % self.grad_accum_steps, 0)
# 获取梯度
ag = self.get_slot(var, 'ag')
old_update = K.update
def new_update(x, new_x):
new_x = K.switch(cond, new_x, x)
return old_update(x, new_x)
K.update = new_update
ag_t = ag / self.grad_accum_steps
op = super(NewOptimizer, self)._resource_apply(ag_t, var)
K.update = old_update
# 累积梯度
with tf.control_dependencies([op]):
ag_t = K.switch(cond, K.zeros_like(ag), ag)
with tf.control_dependencies([K.update(ag, ag_t)]):
if indices is None:
ag_t = K.update(ag, ag + grad)
else:
ag_t = self._resource_scatter_add(ag, indices, grad)
return ag_t
def get_config(self):
config = {
'grad_accum_steps': self.grad_accum_steps,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_lookahead(BaseOptimizer):
"""返回新的优化器类,加入look ahead
"""
class NewOptimizer(BaseOptimizer):
"""带有look ahead的优化器
https://arxiv.org/abs/1907.08610
steps_per_slow_update: 即论文中的k;
slow_step_size: 即论文中的alpha。
"""
@insert_arguments(steps_per_slow_update=5, slow_step_size=0.5)
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
@K.symbolic
def get_updates(self, loss, params):
updates = super(NewOptimizer, self).get_updates(loss, params)
k, alpha = self.steps_per_slow_update, self.slow_step_size
cond = K.equal(self.iterations % k, 0)
slow_vars = [
K.zeros(
K.int_shape(p), dtype=K.dtype(p), name='slow_var_%s' % i
) for i, p in enumerate(params)
]
with tf.control_dependencies(updates):
slow_updates = [
K.update(q, K.switch(cond, q + alpha * (p - q), q))
for p, q in zip(params, slow_vars)
]
with tf.control_dependencies(slow_updates):
copy_updates = [
K.update(p, K.switch(cond, q, p))
for p, q in zip(params, slow_vars)
]
return copy_updates
def get_config(self):
config = {
'steps_per_slow_update': self.steps_per_slow_update,
'slow_step_size': self.slow_step_size,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_lookahead_v2(BaseOptimizer):
"""返回新的优化器类,加入look ahead
"""
class NewOptimizer(BaseOptimizer):
"""带有look ahead的优化器
https://arxiv.org/abs/1907.08610
steps_per_slow_update: 即论文中的k;
slow_step_size: 即论文中的alpha。
"""
@insert_arguments(steps_per_slow_update=5, slow_step_size=0.5)
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
def _create_slots(self, var_list):
super(NewOptimizer, self)._create_slots(var_list)
for var in var_list:
self.add_slot(var, 'slow_var')
def _resource_apply(self, grad, var, indices=None):
op = super(NewOptimizer, self)._resource_apply(grad, var, indices)
k, alpha = self.steps_per_slow_update, self.slow_step_size
cond = K.equal(self.iterations % k, 0)
slow_var = self.get_slot(var, 'slow_var')
slow_var_t = slow_var + alpha * (var - slow_var)
with tf.control_dependencies([op]):
slow_update = K.update(
slow_var, K.switch(cond, slow_var_t, slow_var)
)
with tf.control_dependencies([slow_update]):
copy_update = K.update(var, K.switch(cond, slow_var, var))
return copy_update
def get_config(self):
config = {
'steps_per_slow_update': self.steps_per_slow_update,
'slow_step_size': self.slow_step_size,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_lazy_optimization(BaseOptimizer):
"""返回新的优化器类,加入懒惰更新
"""
class NewOptimizer(BaseOptimizer):
"""带有懒惰更新的优化器
使得部分权重(尤其是embedding)只有在梯度不等于0时
才发生更新。
"""
@insert_arguments(include_in_lazy_optimization=[])
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
self._first_get_gradients = True
def get_gradients(self, loss, params):
if self._first_get_gradients:
self._first_get_gradients = False
return super(NewOptimizer, self).get_gradients(loss, params)
else:
return [self.grads[p] for p in params]
@K.symbolic
def get_updates(self, loss, params):
self.grads = dict(zip(params, self.get_gradients(loss, params)))
old_update = K.update
def new_update(x, new_x):
if is_one_of(x, params) and self._do_lazy_optimization(x):
g = self.grads[x]
r = K.any(K.not_equal(g, 0.0), axis=-1, keepdims=True)
new_x = x + (new_x - x) * K.cast(r, K.floatx())
return old_update(x, new_x)
K.update = new_update
updates = super(NewOptimizer, self).get_updates(loss, params)
K.update = old_update
return updates
def _do_lazy_optimization(self, w):
return string_matching(w.name, self.include_in_lazy_optimization)
def get_config(self):
config = {
'include_in_lazy_optimization':
self.include_in_lazy_optimization,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_lazy_optimization_v2(BaseOptimizer):
"""返回新的优化器类,加入懒惰更新
"""
class NewOptimizer(BaseOptimizer):
"""带有懒惰更新的优化器
使得部分权重(尤其是embedding)只有在梯度不等于0时
才发生更新。
"""
@insert_arguments(include_in_lazy_optimization=[])
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
def _resource_apply(self, grad, var, indices=None):
old_update = K.update
def new_update(x, new_x):
if x is var and self._do_lazy_optimization(x):
if indices is None:
r = K.any(
K.not_equal(grad, 0.0), axis=-1, keepdims=True
)
new_x = x + (new_x - x) * K.cast(r, K.floatx())
return old_update(x, new_x)
else:
return self._resource_scatter_add(
x, indices, K.gather(new_x - x, indices)
)
return old_update(x, new_x)
K.update = new_update
op = super(NewOptimizer, self)._resource_apply(grad, var, indices)
K.update = old_update
return op
def _do_lazy_optimization(self, w):
return string_matching(w.name, self.include_in_lazy_optimization)
def get_config(self):
config = {
'include_in_lazy_optimization':
self.include_in_lazy_optimization,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_exponential_moving_average(BaseOptimizer):
"""返回新的优化器类,加入EMA(权重滑动平均)
"""
class NewOptimizer(BaseOptimizer):
"""带EMA(权重滑动平均)的优化器
"""
@insert_arguments(ema_momentum=0.999)
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
def get_updates(self, loss, params):
updates = super(NewOptimizer, self).get_updates(loss, params)
self.model_weights = params
self.ema_weights = [K.zeros(K.shape(w)) for w in params]
self.old_weights = K.batch_get_value(params)
K.batch_set_value(zip(self.ema_weights, self.old_weights))
ema_updates, ema_momentum = [], self.ema_momentum
with tf.control_dependencies(updates):
for w1, w2 in zip(self.ema_weights, params):
new_w = ema_momentum * w1 + (1 - ema_momentum) * w2
ema_updates.append(K.update(w1, new_w))
return ema_updates
def get_config(self):
config = {
'ema_momentum': self.ema_momentum,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def apply_ema_weights(self):
"""备份原模型权重,然后将平均权重应用到模型上去。
"""
self.old_weights = K.batch_get_value(self.model_weights)
ema_weights = K.batch_get_value(self.ema_weights)
K.batch_set_value(zip(self.model_weights, ema_weights))
def reset_old_weights(self):
"""恢复模型到旧权重。
"""
K.batch_set_value(zip(self.model_weights, self.old_weights))
return NewOptimizer
@export_to_custom_objects
def extend_with_gradient_centralization(BaseOptimizer):
"""返回新的优化器类,将梯度零中心化
"""
class NewOptimizer(BaseOptimizer):
"""带梯度零中心化的优化器
"""
def get_gradients(self, loss, params):
grads = []
for g in super(NewOptimizer, self).get_gradients(loss, params):
if isinstance(g, tf.IndexedSlices):
g = tf.convert_to_tensor(g)
if K.ndim(g) > 1:
g = g - K.mean(g, axis=range(1, K.ndim(g)), keepdims=True)
grads.append(g)
return grads
return NewOptimizer
if is_tf_keras:
extend_with_weight_decay = extend_with_weight_decay_v2
extend_with_layer_adaptation = extend_with_layer_adaptation_v2
extend_with_piecewise_linear_lr = extend_with_piecewise_linear_lr_v2
extend_with_gradient_accumulation = extend_with_gradient_accumulation_v2
extend_with_lookahead = extend_with_lookahead_v2
extend_with_lazy_optimization = extend_with_lazy_optimization_v2
AdaFactor = AdaFactorV2
else:
Adam = keras.optimizers.Adam
AdaFactor = AdaFactorV1
custom_objects = {
'Adam': Adam,
'AdaFactor': AdaFactor,
}
keras.utils.get_custom_objects().update(custom_objects)
| 35.359312
| 83
| 0.571404
|
d1673ba767e34189800fabe08c9ddc36b30924f9
| 4,232
|
py
|
Python
|
bot/jobs/other_nodes_jobs.py
|
aasw0ng/thornode-telegram-bot
|
5f73b882381548f45fc9e690c6e4845def9600b7
|
[
"MIT"
] | 15
|
2020-04-21T07:51:26.000Z
|
2021-11-02T05:45:48.000Z
|
bot/jobs/other_nodes_jobs.py
|
aasw0ng/thornode-telegram-bot
|
5f73b882381548f45fc9e690c6e4845def9600b7
|
[
"MIT"
] | 78
|
2020-04-13T23:01:16.000Z
|
2021-05-09T11:46:25.000Z
|
bot/jobs/other_nodes_jobs.py
|
aasw0ng/thornode-telegram-bot
|
5f73b882381548f45fc9e690c6e4845def9600b7
|
[
"MIT"
] | 5
|
2020-09-03T21:19:16.000Z
|
2021-11-20T00:17:56.000Z
|
from data.other_nodes_dao import OtherNodesDao
from handlers.chat_helpers import *
from models.nodes import *
def check_other_nodes_health(context):
for node in OtherNodesDao().get_all_nodes():
message = check_health(node, context)
if message:
try_message_to_all_users(context, text=message)
def check_health(node: Node, context) -> [str, None]:
try:
is_node_currently_healthy = node.is_healthy()
except UnauthorizedException:
return f"😱 Your {node.to_string()} returns 401 - Unauthorized! 😱\n" \
f" Please make sure the credentials you set are correct!"
except Exception as e:
logger.error(e)
return None
was_node_healthy = context.bot_data.setdefault(node.node_id, {}).setdefault('health', True)
if was_node_healthy != is_node_currently_healthy:
context.bot_data[node.node_id]['health'] = is_node_currently_healthy
if is_node_currently_healthy:
text = f'{node.to_string()} is healthy again! 👌\n'
else:
text = f'{node.to_string()} is not healthy anymore! 💀 \n' \
f'Please check your node immediately'
return text
else:
return None
def check_bitcoin_height_increase_job(context):
nodes = OtherNodesDao().get_nodes_by_network_names([BitcoinNode.network_name])
for node in nodes:
message = check_block_height_increase(context, node)
if message:
try_message_to_all_users(context, message)
def check_ethereum_height_increase_job(context):
nodes = OtherNodesDao().get_nodes_by_network_names([EthereumNode.network_name])
for node in nodes:
message = check_block_height_increase(context, node)
if message:
try_message_to_all_users(context, message)
def check_block_height_increase(context, node: Node) -> [str, None]:
try:
current_block_height = node.get_block_height()
except UnauthorizedException:
return f"😱 Your {node.to_string()} returns 401 - Unauthorized! 😱\n" \
f" Please make sure the credentials you set are correct!"
except Exception as e:
logger.error(e)
return None
# Stuck count:
# 0 == everything's alright
# 1 == just got stuck
# -1 == just got unstuck
# > 1 == still stuck
node_data = context.bot_data.setdefault(node.node_id, {})
last_block_height = node_data.get('block_height', float('-inf'))
message = None
if current_block_height <= last_block_height:
node_data['block_height_stuck_count'] = node_data.get('block_height_stuck_count', 0) + 1
elif node_data.get('block_height_stuck_count', 0) > 0:
message = f"Block height is increasing again! 👌\n" \
f"{node.to_string()}"
node_data['block_height_stuck_count'] = -1
if node_data.get('block_height_stuck_count', 0) == 1:
message = 'Block height is not increasing anymore! 💀\n' \
f"{node.to_string()}"
node_data['block_height'] = current_block_height
return message
def check_other_nodes_syncing_job(context):
"""
Check if node is syncing or not and send appropriate notification
"""
nodes = OtherNodesDao().get_nodes_by_network_names([EthereumNode.network_name, BitcoinNode.network_name])
for node in nodes:
message = check_other_nodes_syncing(node, context)
if message:
try_message_to_all_users(context, text=message)
def check_other_nodes_syncing(node: Node, context) -> [str, None]:
try:
is_synced = node.is_fully_synced()
except UnauthorizedException:
return f"😱 Your {node.to_string()} returns 401 - Unauthorized! 😱\n" \
f" Please make sure the credentials you set are correct!"
was_synced = context.bot_data.setdefault(node.node_id, {}).get('syncing', True)
if is_synced != was_synced:
if is_synced:
message = f"Your {node.to_string()} is fully synced again!👌\n"
else:
message = f"Your {node.to_string()} node is syncing with the network... 🚧\n"
context.bot_data[node.node_id]['syncing'] = is_synced
return message
else:
return None
| 33.856
| 109
| 0.664461
|
d7cac83d5639a6437b9aaf5c4a599c81a3904cda
| 14,906
|
py
|
Python
|
gym_energyplus/envs/energyplus_env.py
|
tatsubori/rl-testbed-for-energyplus
|
5ea6580a7a45a12c341f5ce85f9945a016c65708
|
[
"MIT"
] | null | null | null |
gym_energyplus/envs/energyplus_env.py
|
tatsubori/rl-testbed-for-energyplus
|
5ea6580a7a45a12c341f5ce85f9945a016c65708
|
[
"MIT"
] | null | null | null |
gym_energyplus/envs/energyplus_env.py
|
tatsubori/rl-testbed-for-energyplus
|
5ea6580a7a45a12c341f5ce85f9945a016c65708
|
[
"MIT"
] | null | null | null |
# Copyright (c) IBM Corp. 2018. All Rights Reserved.
# Project name: Reinforcement Learning Testbed for Power Consumption Optimization
# This project is licensed under the MIT License, see LICENSE
from gym import Env
from gym import spaces
from gym.utils import seeding
import sys, os, subprocess, time, signal, stat
from glob import glob
import gzip
import shutil
import numpy as np
from scipy.special import expit
import pandas as pd
from argparse import ArgumentParser
from gym_energyplus.envs.pipe_io import PipeIo
from gym_energyplus.envs.energyplus_model import EnergyPlusModel
from gym_energyplus.envs.energyplus_build_model import build_ep_model
class EnergyPlusEnv(Env):
metadata = {'render.modes': ['human']}
def __init__(self,
energyplus_file=None,
model_file=None,
weather_file=None,
log_dir=None,
verbose=False):
self.energyplus_process = None
self.pipe_io = None
# Verify path arguments
if energyplus_file is None:
energyplus_file = os.getenv('ENERGYPLUS')
if energyplus_file is None:
print('energyplus_env: FATAL: EnergyPlus executable is not specified. Use environment variable ENERGYPLUS.')
return None
if model_file is None:
model_file = os.getenv('ENERGYPLUS_MODEL')
if model_file is None:
print('energyplus_env: FATAL: EnergyPlus model file is not specified. Use environment variable ENERGYPLUS_MODEL.')
return None
if weather_file is None:
weather_file = os.getenv('ENERGYPLUS_WEATHER')
if weather_file is None:
print('energyplus_env: FATAL: EnergyPlus weather file is not specified. Use environment variable ENERGYPLUS_WEATHER.')
return None
if log_dir is None:
log_dir = os.getenv('ENERGYPLUS_LOG')
if log_dir is None:
log_dir = 'log'
# Initialize paths
self.energyplus_file = energyplus_file
self.model_file = model_file
self.weather_files = weather_file.split(',')
self.log_dir = log_dir
# Create an EnergyPlus model
self.ep_model = build_ep_model(model_file = self.model_file, log_dir = self.log_dir)
self.action_space = self.ep_model.action_space
self.observation_space = self.ep_model.observation_space
# TODO: self.reward_space which defaults to [-inf,+inf]
self.pipe_io = PipeIo()
self.episode_idx = -1
self.verbose = verbose
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def __del__(self):
# In case you forget to call env.stop()
self.stop_instance()
def reset(self):
self.stop_instance()
self.episode_idx += 1
self.start_instance()
self.timestep1 = 0
self.ep_model.reset()
return self.step(None)[0]
def start_instance(self):
print('Starting new environment')
assert(self.energyplus_process is None)
output_dir = self.log_dir + '/output/episode-{:08}'.format(self.episode_idx)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
self.pipe_io.start()
print('start_instance(): idx={}, model_file={}'.format(self.episode_idx, self.model_file))
# Handling weather file override
weather_files_override = glob(self.log_dir + '/*.epw')
if len(weather_files_override) > 0:
weather_files_override.sort()
weather_files = weather_files_override
print('start_instance(): weather file override')
else:
weather_files = self.weather_files
# Handling of multiple weather files
weather_idx = self.episode_idx % len(weather_files)
weather_file = weather_files[weather_idx]
print('start_instance(): weather_files[{}]={}'.format(weather_idx, weather_file))
# Make copies of model file and weather file into output dir, and use it for execution
# This allow update of these files without affecting active simulation instances
shutil.copy(self.model_file, output_dir)
shutil.copy(weather_file, output_dir)
copy_model_file = output_dir + '/' + os.path.basename(self.model_file)
copy_weather_file = output_dir + '/' + os.path.basename(weather_file)
# Spawn a process
cmd = self.energyplus_file \
+ ' -r -x' \
+ ' -d ' + output_dir \
+ ' -w ' + copy_weather_file \
+ ' ' + copy_model_file
print('Starting EnergyPlus with command: %s' % cmd)
self.energyplus_process = subprocess.Popen(cmd.split(' '), shell=False)
def stop_instance(self):
if self.energyplus_process is not None:
self.energyplus_process.terminate()
self.energyplus_process = None
if self.pipe_io is not None:
self.pipe_io.stop()
if self.episode_idx >= 0:
def count_severe_errors(file):
if not os.path.isfile(file):
return -1 # Error count is unknown
# Sample: ' ************* EnergyPlus Completed Successfully-- 6214 Warning; 2 Severe Errors; Elapsed Time=00hr 00min 7.19sec'
fd = open(file)
lines = fd.readlines()
fd.close()
for line in lines:
if line.find('************* EnergyPlus Completed Successfully') >= 0:
tokens = line.split()
return int(tokens[6])
return -1
epsode_dir = self.log_dir + '/output/episode-{:08}'.format(self.episode_idx)
file_csv = epsode_dir + '/eplusout.csv'
file_csv_gz = epsode_dir + '/eplusout.csv.gz'
file_err = epsode_dir + '/eplusout.err'
files_to_preserve = ['eplusout.csv', 'eplusout.err', 'eplustbl.htm']
files_to_clean = ['eplusmtr.csv', 'eplusout.audit', 'eplusout.bnd',
'eplusout.dxf', 'eplusout.eio', 'eplusout.edd',
'eplusout.end', 'eplusout.eso', 'eplusout.mdd',
'eplusout.mtd', 'eplusout.mtr', 'eplusout.rdd',
'eplusout.rvaudit', 'eplusout.shd', 'eplusssz.csv',
'epluszsz.csv', 'sqlite.err']
# Check for any severe error
nerr = count_severe_errors(file_err)
if nerr != 0:
print('EnergyPlusEnv: Severe error(s) occurred. Error count: {}'.format(nerr))
print('EnergyPlusEnv: Check contents of {}'.format(file_err))
#sys.exit(1)
# Compress csv file and remove unnecessary files
# If csv file is not present in some reason, preserve all other files for inspection
if os.path.isfile(file_csv):
with open(file_csv, 'rb') as f_in:
with gzip.open(file_csv_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(file_csv)
if not os.path.exists("/tmp/verbose"):
for file in files_to_clean:
file_path = epsode_dir + '/' + file
if os.path.isfile(file_path):
os.remove(file_path)
def step(self, action):
self.timestep1 += 1
# Send action to the environment
if action is not None:
self.ep_model.set_action(action)
if not self.send_action():
print('EnergyPlusEnv.step(): Failed to send an action. Quitting.')
observation = (self.observation_space.low + self.observation_space.high) * 0.5
reward = 0.0
done = True
print('EnergyPlusEnv: (quit)')
return observation, reward, done, {}
# Receive observation from the environment
# Note that in our co-simulation environment, the state value of the last time step can not be retrived from EnergyPlus process
# because EMS framework of EnergyPlus does not allow setting EMS calling point ater the last timestep is completed.
# To remedy this, we assume set_raw_state() method of each model handle the case raw_state is None.
raw_state, done = self.receive_observation() # raw_state will be None for for call at total_timestep + 1
self.ep_model.set_raw_state(raw_state)
observation = self.ep_model.get_state()
reward = self.ep_model.compute_reward()
if done:
print('EnergyPlusEnv: (done)')
return observation, reward, done, {}
def send_action(self):
num_data = len(self.ep_model.action)
if self.pipe_io.writeline('{0:d}'.format(num_data)):
return False
for i in range(num_data):
self.pipe_io.writeline('{0:f}'.format(self.ep_model.action[i]))
self.pipe_io.flush()
return True
def receive_observation(self):
line = self.pipe_io.readline()
if (line == ''):
# This is the (usual) case when we send action data after all simulation timestep have finished.
return None, True
num_data = int(line)
# Number of data received may not be same as the size of observation_space
#assert(num_data == len(self.observation_space.low))
raw_state = np.zeros(num_data)
for i in range(num_data):
line = self.pipe_io.readline()
if (line == ''):
# This is usually system error
return None, True
val = float(line)
raw_state[i] = val
return raw_state, False
def render(self, mode='human'):
if mode == 'human':
return False
def close(self):
self.stop_instance()
def plot(self, log_dir='', csv_file=''):
self.ep_model.plot(log_dir=log_dir, csv_file=csv_file)
def dump_timesteps(self, log_dir='', csv_file='', reward_file=''):
self.ep_model.dump_timesteps(log_dir=log_dir, csv_file=csv_file)
def dump_episodes(self, log_dir='', csv_file='', reward_file=''):
self.ep_model.dump_episodes(log_dir=log_dir, csv_file=csv_file)
def parser():
usage = 'Usage: python {} [--verbose] [--energyplus <file>] [--model <file>] [--weather <file>] [--simulate] [--plot] [--help]'.format(__file__)
argparser = ArgumentParser(usage=usage)
#argparser.add_argument('fname', type=str,
# help='echo fname')
argparser.add_argument('-v', '--verbose',
action='store_true',
help='Show verbose message')
argparser.add_argument('-e', '--energyplus', type=str,
dest='energyplus',
help='EnergyPlus executable file')
argparser.add_argument('-m', '--model', type=str,
dest='model',
help='Model file')
argparser.add_argument('-w', '--weather', type=str,
dest='weather',
help='Weather file')
argparser.add_argument('-s', '--simulate',
action='store_true',
help='Do simulation')
argparser.add_argument('-p', '--plot',
action='store_true',
help='Do plotting')
return argparser.parse_args()
def easy_agent(next_state, target, hi, lo):
sensitivity_pos = 1.0
sensitivity_neg = 1.0
act_west_prev = 0
act_east_prev = 0
alpha = 0.4
delta_west = next_state[1] - target
if delta_west >= 0:
act_west = target - delta_west * sensitivity_pos
else:
act_west = target - delta_west * sensitivity_neg
act_west = act_west * alpha + act_west_prev * (1 - alpha)
act_west_prev = act_west
delta_east = next_state[2] - target
if delta_east >= 0:
act_east = target - delta_east * sensitivity_pos
else:
act_east = target - delta_east * sensitivity_neg
act_east = act_east * alpha + act_east_prev * (1 - alpha)
act_east_prev = act_east
act_west = max(lo, min(act_west, hi))
act_east = max(lo, min(act_east, hi))
action = np.array([act_west, act_west, act_west, act_west, act_east, act_east, act_east, act_east])
return action
if __name__ == '__main__':
args = parser()
print('args={}'.format(args))
lo = 0.0
hi = 40.0
target = 23.0
# obs[0]: Eronment:Site Outdoor Air Drybulb Temperature [C](TimeStep)
# obs[1]: Workload level (not implemented yet)
#obs_space = spaces.Box(np.array([-20.0, 0.0]),
# np.array([ 50.0, 1.0]))
# act[0]: WestZoneDECOutletNode_setpoint
# act[1]: WestZoneIECOutletNode_setpoint
# act[2]: WestZoneCCoilAirOutletNode_setpoint
# act[3]: WestAirLoopOutletNode_setpoint
# act[4]: EastZoneDECOutletNode_setpoint
# act[5]: EastZoneIECOutletNode_setpoint
# act[6]: EastZoneCCoilAirOutletNode_setpoint
# act[7]: EastAirLoopOutletNode_setpoint
#act_space = spaces.Box(np.array([ lo, lo, lo, lo, lo, lo, lo, lo]),
# np.array([ hi, hi, hi, hi, hi, hi, hi, hi]))
# just for testing
env = EnergyPlusEnv(verbose = args.verbose)
if env is None:
quit()
if (args.simulate):
for ep in range(1):
PUE_min = 100.
PUE_max = 0.
PUE_sum = 0.
PUE_count = 0
next_state = env.reset()
for i in range(1000000):
#if args.verbose:
# os.system('clear')
# print('Step {}'.format(i))
#action = env.action_space.sample()
action = easy_agent(next_state, target, hi, lo)
PUE = next_state[3]
PUE_sum += PUE
PUE_min = min(PUE, PUE_min)
PUE_max = max(PUE, PUE_max)
PUE_count += 1
next_state, reward, done, _ = env.step(action)
PUE_ave = PUE_sum / PUE_count
if args.verbose:
print('========= count={} PUE={} PUEave={} PUEmin={} PUEmax={}'.format(PUE_count, PUE, PUE_ave, PUE_min, PUE_max))
if done:
break
PUE_ave = PUE_sum / PUE_count
print('============================= Episodo done. count={} PUEave={} PUEmin={} PUEmax={}'.format(PUE_count, PUE_ave, PUE_min, PUE_max))
#env.close()
env.plot()
| 40.395664
| 148
| 0.580035
|
cc2461ab8788f6531bd924d7f338d5dfd0d8d25d
| 1,650
|
py
|
Python
|
wagtail_localize/test/migrations/0011_testmodelwithinvalidforeignkey.py
|
th3hamm0r/wagtail-localize
|
e9e0ba9245060c65e3247b62739abbed71bc2516
|
[
"BSD-3-Clause"
] | 6
|
2019-09-10T19:53:55.000Z
|
2019-11-14T16:57:07.000Z
|
wagtail_localize/test/migrations/0011_testmodelwithinvalidforeignkey.py
|
th3hamm0r/wagtail-localize
|
e9e0ba9245060c65e3247b62739abbed71bc2516
|
[
"BSD-3-Clause"
] | 17
|
2019-07-11T11:17:37.000Z
|
2019-11-19T16:40:31.000Z
|
wagtail_localize/test/migrations/0011_testmodelwithinvalidforeignkey.py
|
th3hamm0r/wagtail-localize
|
e9e0ba9245060c65e3247b62739abbed71bc2516
|
[
"BSD-3-Clause"
] | 2
|
2019-09-30T20:23:39.000Z
|
2019-10-31T14:09:31.000Z
|
# Generated by Django 3.1.3 on 2020-11-30 20:41
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0059_apply_collection_ordering"),
("wagtail_localize_test", "0010_testoverridetranslatablefieldspage"),
]
operations = [
migrations.CreateModel(
name="TestModelWithInvalidForeignKey",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"translation_key",
models.UUIDField(default=uuid.uuid4, editable=False),
),
(
"fk",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="wagtailcore.site",
),
),
(
"locale",
models.ForeignKey(
editable=False,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="wagtailcore.locale",
),
),
],
options={
"abstract": False,
"unique_together": {("translation_key", "locale")},
},
),
]
| 30
| 77
| 0.418788
|
8a258262e455109304caf1e67879b046459ff1bf
| 5,066
|
py
|
Python
|
aws-regions.py
|
groorj/cloud-regions
|
f085491c71440d99000ad29a885e6090dfc9332a
|
[
"MIT"
] | null | null | null |
aws-regions.py
|
groorj/cloud-regions
|
f085491c71440d99000ad29a885e6090dfc9332a
|
[
"MIT"
] | 1
|
2021-07-22T01:25:14.000Z
|
2021-07-22T17:29:09.000Z
|
aws-regions.py
|
groorj/cloud-regions
|
f085491c71440d99000ad29a885e6090dfc9332a
|
[
"MIT"
] | null | null | null |
import json
import logging
import os
import inspect
import urllib
import urllib.request
from urllib.error import HTTPError
# logger
logger = logging.getLogger()
logger_level = logging.getLevelName(os.environ['LOGGER_LEVEL'])
logger.setLevel(logger_level)
# validate access
def validate_access(event, context):
logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name)
logger.debug("RESTRICTED_ACCESS_ENABLED: [%s]", os.environ['RESTRICTED_ACCESS_ENABLED'])
error_message = "You are not allowed, get out!"
if os.environ['RESTRICTED_ACCESS_ENABLED'] == 'true':
logger.info("Restricted access is enabled")
logger.info("Value for header [%s] is: [%s]", os.environ['RESTRICTED_ACCESS_HTTP_HEADER'], event["headers"][os.environ['RESTRICTED_ACCESS_HTTP_HEADER']])
if event["headers"][os.environ['RESTRICTED_ACCESS_HTTP_HEADER']] != os.environ['RESTRICTED_ACCESS_SECRET']:
logger.info("Key provided is not valid")
logger.debug("Error: [%s]", error_message)
http_code = 403
raise ValueError(http_code, error_message)
else:
logger.info("Key provided is valid")
else:
logger.info("Restricted access is NOT enabled")
# create response
def create_response_new(status_code, message_body):
logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name)
return {
'statusCode': str(status_code),
'body': json.dumps(message_body),
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
}
# download json file
def get_json():
logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name)
try:
response = urllib.request.urlopen(os.environ['AWS_REGIONS_JSON_URL'])
except HTTPError as err:
# catch HTTP error
logger.debug("HTTP error: [%s]", err)
raise
json_data = json.loads(response.read())
return json_data
# entry point -> return region info
def get_region_info(event, context):
logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name)
return_info_final = {}
# validate the access to this resource
try:
validate_access(event, context)
except ValueError as err:
return_info_final['request'] = { "request_status": "Fail", "error_message": err.args[1], "http_error_code": err.args[0] }
return create_response_new(err.args[0], return_info_final)
# get region info
region_code = event['pathParameters']['region_code']
logger.debug("region_code: [%s]", region_code)
try:
json_data = get_json()
except HTTPError as err:
# http_code = err.code
http_code = 500
return_info_final['request'] = { "request_status": "Fail", "error_message": "Error getting Regions information.", "http_error_code": err.code }
return create_response_new(http_code, return_info_final)
# logger.debug("json_data: [%s]", json_data)
# logger.debug("type(json_data): [%s]", type(json_data))
for element in json_data['data']:
# logger.debug("code: [%s] && region_code: [%s]", element['code'], region_code)
if element['code'] == region_code:
logger.info("region_code found")
http_code = 200
return_info_final['request'] = { "request_status": "Success" }
return_info_final['info'] = json_data['info']
return_info_final['data'] = element
break
else:
logger.info("region_code NOT found")
return_info = "Region code NOT found."
http_code = 404
return_info_final['request'] = { "request_status": "Fail", "error_message": "Region code NOT found.", "http_error_code": http_code }
return create_response_new(http_code, return_info_final)
# entry point -> return region info
def get_all_regions_info(event, context):
logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name)
return_info_final = {}
# validate the access to this resource
try:
validate_access(event, context)
except ValueError as err:
return_info_final['request'] = { "request_status": "Fail", "error_message": err.args[1], "http_error_code": err.args[0] }
return create_response_new(err.args[0], return_info_final)
# get regions info
try:
json_data = get_json()
except HTTPError as err:
# http_code = err.code
http_code = 500
return_info_final['request'] = { "request_status": "Fail", "error_message": "Error getting Regions information.", "http_error_code": err.code }
return create_response_new(http_code, return_info_final)
logger.debug("json_data: [%s]", json_data)
http_code = 200
return_info_final['request'] = { "request_status": "Success" }
return_info_final['info'] = json_data['info']
return_info_final['data'] = json_data['data']
return create_response_new(http_code, return_info_final)
# End;
| 41.867769
| 161
| 0.66443
|
c64112f23899e7a6d539388dc97ab9b46be3c03e
| 331
|
py
|
Python
|
setup.py
|
sonvt1710/bigjson
|
b562d7be1e8de689cfaf44fdca7a636a8d21ca20
|
[
"MIT"
] | null | null | null |
setup.py
|
sonvt1710/bigjson
|
b562d7be1e8de689cfaf44fdca7a636a8d21ca20
|
[
"MIT"
] | null | null | null |
setup.py
|
sonvt1710/bigjson
|
b562d7be1e8de689cfaf44fdca7a636a8d21ca20
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
from setuptools import setup
setup(
name='bigjson',
version='1.0.9',
packages=['bigjson'],
description='Python library that reads JSON files of any size.',
author='Henrik Heino',
author_email='henrik.heino@gmail.com',
url='https://henu.fi/bigjson',
license='MIT',
)
| 22.066667
| 68
| 0.661631
|
8ac023e24dc3c11e8e67a36be232463c6c0a72d2
| 12,394
|
py
|
Python
|
python/ray/data/datasource/file_based_datasource.py
|
Phirefly9/ray
|
bbfb86c5130a1a6a11ba3cd6f928a7c4078788e1
|
[
"Apache-2.0"
] | null | null | null |
python/ray/data/datasource/file_based_datasource.py
|
Phirefly9/ray
|
bbfb86c5130a1a6a11ba3cd6f928a7c4078788e1
|
[
"Apache-2.0"
] | null | null | null |
python/ray/data/datasource/file_based_datasource.py
|
Phirefly9/ray
|
bbfb86c5130a1a6a11ba3cd6f928a7c4078788e1
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
from typing import Callable, Optional, List, Tuple, Union, Any, TYPE_CHECKING
from urllib.parse import urlparse
if TYPE_CHECKING:
import pyarrow
from ray.types import ObjectRef
from ray.data.block import Block, BlockAccessor
from ray.data.impl.arrow_block import (ArrowRow, DelegatingArrowBlockBuilder)
from ray.data.impl.block_list import BlockMetadata
from ray.data.datasource.datasource import Datasource, ReadTask, WriteResult
from ray.util.annotations import DeveloperAPI
from ray.data.impl.util import _check_pyarrow_version
from ray.data.impl.remote_fn import cached_remote_fn
logger = logging.getLogger(__name__)
@DeveloperAPI
class FileBasedDatasource(Datasource[Union[ArrowRow, Any]]):
"""File-based datasource, for reading and writing files.
This class should not be used directly, and should instead be subclassed
and tailored to particular file formats. Classes deriving from this class
must implement _read_file().
Current subclasses:
JSONDatasource, CSVDatasource, NumpyDatasource, BinaryDatasource
"""
def prepare_read(
self,
parallelism: int,
paths: Union[str, List[str]],
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
schema: Optional[Union[type, "pyarrow.lib.Schema"]] = None,
_block_udf: Optional[Callable[[Block], Block]] = None,
**reader_args) -> List[ReadTask]:
"""Creates and returns read tasks for a file-based datasource.
"""
_check_pyarrow_version()
import pyarrow as pa
import numpy as np
paths, filesystem = _resolve_paths_and_filesystem(paths, filesystem)
paths, file_infos = _expand_paths(paths, filesystem)
file_sizes = [file_info.size for file_info in file_infos]
read_file = self._read_file
filesystem = _wrap_s3_serialization_workaround(filesystem)
def read_files(
read_paths: List[str],
fs: Union["pyarrow.fs.FileSystem", _S3FileSystemWrapper]):
logger.debug(f"Reading {len(read_paths)} files.")
if isinstance(fs, _S3FileSystemWrapper):
fs = fs.unwrap()
builder = DelegatingArrowBlockBuilder()
for read_path in read_paths:
with fs.open_input_stream(read_path) as f:
data = read_file(f, read_path, **reader_args)
if isinstance(data, pa.Table) or isinstance(
data, np.ndarray):
builder.add_block(data)
else:
builder.add(data)
block = builder.build()
if _block_udf is not None:
block = _block_udf(block)
return block
read_tasks = []
for read_paths, file_sizes in zip(
np.array_split(paths, parallelism),
np.array_split(file_sizes, parallelism)):
if len(read_paths) <= 0:
continue
if self._rows_per_file() is None:
num_rows = None
else:
num_rows = len(read_paths) * self._rows_per_file()
read_task = ReadTask(
lambda read_paths=read_paths: read_files(
read_paths, filesystem),
BlockMetadata(
num_rows=num_rows,
size_bytes=sum(file_sizes),
schema=schema,
input_files=read_paths)
)
read_tasks.append(read_task)
return read_tasks
def _rows_per_file(self):
"""Returns the number of rows per file, or None if unknown.
"""
return None
def _read_file(self, f: "pyarrow.NativeFile", path: str, **reader_args):
"""Reads a single file, passing all kwargs to the reader.
This method should be implemented by subclasses.
"""
raise NotImplementedError(
"Subclasses of FileBasedDatasource must implement _read_files().")
def do_write(self,
blocks: List[ObjectRef[Block]],
metadata: List[BlockMetadata],
path: str,
dataset_uuid: str,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
_block_udf: Optional[Callable[[Block], Block]] = None,
**write_args) -> List[ObjectRef[WriteResult]]:
"""Creates and returns write tasks for a file-based datasource."""
path, filesystem = _resolve_paths_and_filesystem(path, filesystem)
path = path[0]
filesystem = _wrap_s3_serialization_workaround(filesystem)
_write_block_to_file = self._write_block
def write_block(write_path: str, block: Block):
logger.debug(f"Writing {write_path} file.")
fs = filesystem
if isinstance(fs, _S3FileSystemWrapper):
fs = fs.unwrap()
if _block_udf is not None:
block = _block_udf(block)
with fs.open_output_stream(write_path) as f:
_write_block_to_file(f, BlockAccessor.for_block(block))
write_block = cached_remote_fn(write_block)
file_format = self._file_format()
write_tasks = []
for block_idx, block in enumerate(blocks):
write_path = os.path.join(
path, f"{dataset_uuid}_{block_idx:06}.{file_format}")
write_task = write_block.remote(write_path, block)
write_tasks.append(write_task)
return write_tasks
def _write_block(self, f: "pyarrow.NativeFile", block: BlockAccessor,
**writer_args):
"""Writes a block to a single file, passing all kwargs to the writer.
This method should be implemented by subclasses.
"""
raise NotImplementedError(
"Subclasses of FileBasedDatasource must implement _write_files().")
def _file_format(self):
"""Returns the file format string, to be used as the file extension
when writing files.
This method should be implemented by subclasses.
"""
raise NotImplementedError(
"Subclasses of FileBasedDatasource must implement _file_format().")
# TODO(Clark): Add unit test coverage of _resolve_paths_and_filesystem and
# _expand_paths.
def _resolve_paths_and_filesystem(
paths: Union[str, List[str]],
filesystem: "pyarrow.fs.FileSystem" = None,
) -> Tuple[List[str], "pyarrow.fs.FileSystem"]:
"""
Resolves and normalizes all provided paths, infers a filesystem from the
paths and ensures that all paths use the same filesystem.
Args:
paths: A single file/directory path or a list of file/directory paths.
A list of paths can contain both files and directories.
filesystem: The filesystem implementation that should be used for
reading these files. If None, a filesystem will be inferred. If not
None, the provided filesystem will still be validated against all
filesystems inferred from the provided paths to ensure
compatibility.
"""
from pyarrow.fs import FileSystem, PyFileSystem, FSSpecHandler, \
_resolve_filesystem_and_path
import fsspec
if isinstance(paths, str):
paths = [paths]
elif (not isinstance(paths, list)
or any(not isinstance(p, str) for p in paths)):
raise ValueError(
"paths must be a path string or a list of path strings.")
elif len(paths) == 0:
raise ValueError("Must provide at least one path.")
if filesystem and not isinstance(filesystem, FileSystem):
if not isinstance(filesystem, fsspec.spec.AbstractFileSystem):
raise TypeError(f"The filesystem passed must either conform to "
f"pyarrow.fs.FileSystem, or "
f"fsspec.spec.AbstractFileSystem. The provided "
f"filesystem was: {filesystem}")
filesystem = PyFileSystem(FSSpecHandler(filesystem))
resolved_paths = []
for path in paths:
if filesystem is not None:
# If we provide a filesystem, _resolve_filesystem_and_path will not
# slice off the protocol from the provided URI/path when resolved.
path = _unwrap_protocol(path)
resolved_filesystem, resolved_path = _resolve_filesystem_and_path(
path, filesystem)
if filesystem is None:
filesystem = resolved_filesystem
resolved_path = filesystem.normalize_path(resolved_path)
resolved_paths.append(resolved_path)
return resolved_paths, filesystem
def _expand_paths(paths: Union[str, List[str]],
filesystem: "pyarrow.fs.FileSystem"):
"""
Expands all provided paths into concrete file paths by walking directories.
Also returns a sidecar of file infos.
This should be used on the output of _resolve_paths_and_filesystem.
Args:
paths: A single file/directory path or a list of file/directory paths.
A list of paths can contain both files and directories. These paths
should be properly resolved, e.g. the paths returned from
_resolve_paths_and_filesystem.
filesystem: The filesystem implementation that should be used for
reading these files.
"""
from pyarrow.fs import FileType
expanded_paths = []
file_infos = []
for path in paths:
file_info = filesystem.get_file_info(path)
if file_info.type == FileType.Directory:
paths, file_infos_ = _expand_directory(path, filesystem)
expanded_paths.extend(paths)
file_infos.extend(file_infos_)
elif file_info.type == FileType.File:
expanded_paths.append(path)
file_infos.append(file_info)
else:
raise FileNotFoundError(path)
return expanded_paths, file_infos
def _expand_directory(path: str,
filesystem: "pyarrow.fs.FileSystem",
exclude_prefixes: List[str] = [".", "_"]) -> List[str]:
"""
Expand the provided directory path to a list of file paths.
Args:
path: The directory path to expand.
filesystem: The filesystem implementation that should be used for
reading these files.
exclude_prefixes: The file relative path prefixes that should be
excluded from the returned file set. Default excluded prefixes are
"." and "_".
Returns:
A list of file paths contained in the provided directory.
"""
from pyarrow.fs import FileSelector
selector = FileSelector(path, recursive=True)
files = filesystem.get_file_info(selector)
base_path = selector.base_dir
filtered_paths = []
for file_ in files:
if not file_.is_file:
continue
file_path = file_.path
if not file_path.startswith(base_path):
continue
relative = file_path[len(base_path):]
if any(relative.startswith(prefix) for prefix in [".", "_"]):
continue
filtered_paths.append((file_path, file_))
# We sort the paths to guarantee a stable order.
return zip(*sorted(filtered_paths, key=lambda x: x[0]))
def _unwrap_protocol(path):
"""
Slice off any protocol prefixes on path.
"""
parsed = urlparse(path)
return parsed.netloc + parsed.path
def _wrap_s3_serialization_workaround(filesystem: "pyarrow.fs.FileSystem"):
# This is needed because pa.fs.S3FileSystem assumes pa.fs is already
# imported before deserialization. See #17085.
import pyarrow as pa
if isinstance(filesystem, pa.fs.S3FileSystem):
return _S3FileSystemWrapper(filesystem)
return filesystem
class _S3FileSystemWrapper:
def __init__(self, fs: "pyarrow.fs.S3FileSystem"):
self._fs = fs
def unwrap(self):
return self._fs
@classmethod
def _reconstruct(cls, fs_reconstruct, fs_args):
# Implicitly trigger S3 subsystem initialization by importing
# pyarrow.fs.
import pyarrow.fs # noqa: F401
return cls(fs_reconstruct(*fs_args))
def __reduce__(self):
return _S3FileSystemWrapper._reconstruct, self._fs.__reduce__()
| 37.671733
| 79
| 0.638373
|
ec97c5894237fe8526984e5b816a7b1dc3e80ce2
| 546
|
py
|
Python
|
exercicios/ExerciciosCursoEmVideo/ex009.py
|
mylenacferreira/Exercicios-python
|
6df83590e73d0ac1fe183bbf514ff56963ea4d5b
|
[
"MIT"
] | null | null | null |
exercicios/ExerciciosCursoEmVideo/ex009.py
|
mylenacferreira/Exercicios-python
|
6df83590e73d0ac1fe183bbf514ff56963ea4d5b
|
[
"MIT"
] | null | null | null |
exercicios/ExerciciosCursoEmVideo/ex009.py
|
mylenacferreira/Exercicios-python
|
6df83590e73d0ac1fe183bbf514ff56963ea4d5b
|
[
"MIT"
] | null | null | null |
# Faça um programa que leia um numero inteiro qualquer e mostre na tela a sua tabuada
num = int(input('Digite um numero de 1 a 10 para calcular sua tabuada: '))
print(f' {num} x 0 = 0\n {num} x 1 = {num}\n {num} x 2 = {num * 2}\n {num} x 3 = {num * 3}\n {num} x 4 = {num * 4}\n {num} x 5 = {num * 5}\n {num} x 6 = {num * 6}\n {num} x 7 = {num * 7}\n {num} x 8 = {num * 8}\n {num} x 9 = {num * 9}\n {num} x 10 = {num * 10}\n')
"""
outra forma de fazer
print('{} x {} = {}' .format(num, 1, num*1))
print('{} x {} = {}' .format(num, 2, num*2))
"""
| 54.6
| 264
| 0.534799
|
64411077ba30b7ed49a9d0a75b79d506c489e8b6
| 3,131
|
py
|
Python
|
egs/ofuton_p_utagoe_db/voc1/local/dataset_split.py
|
A-Quarter-Mile/ParallelWaveGAN
|
f4724870afcdb92f70cb489dc9e9930dcc7d4957
|
[
"MIT"
] | 1
|
2022-02-19T10:59:00.000Z
|
2022-02-19T10:59:00.000Z
|
egs/ofuton_p_utagoe_db/voc1/local/dataset_split.py
|
A-Quarter-Mile/ParallelWaveGAN
|
f4724870afcdb92f70cb489dc9e9930dcc7d4957
|
[
"MIT"
] | null | null | null |
egs/ofuton_p_utagoe_db/voc1/local/dataset_split.py
|
A-Quarter-Mile/ParallelWaveGAN
|
f4724870afcdb92f70cb489dc9e9930dcc7d4957
|
[
"MIT"
] | null | null | null |
import argparse
import os
import shutil
UTT_PREFIX = "ofuton"
DEV_LIST = ["chatsumi", "my_grandfathers_clock_3_2", "haruyo_koi", "momiji", "tetsudou_shouka"]
TEST_LIST = ["usagito_kame", "my_grandfathers_clock_1_2", "antagata_dokosa", "momotarou", "furusato"]
def train_check(song):
return (song not in DEV_LIST) and (song not in TEST_LIST)
def dev_check(song):
return song in DEV_LIST
def test_check(song):
return song in TEST_LIST
def pack_zero(string, size=20):
if len(string) < size:
string = "0" * (size - len(string)) + string
return string
def makedir(data_url):
if os.path.exists(data_url):
shutil.rmtree(data_url)
os.makedirs(data_url)
def process_text_info(text):
info = open(text, "r", encoding="utf-8")
label_info = []
text_info = []
for line in info.readlines():
line = line.strip().split()
label_info.append(
"{} {} {}".format(
float(line[0]) / 1e7, float(line[1]) / 1e7, line[2].strip()
)
)
text_info.append(line[2].strip())
return " ".join(label_info), " ".join(text_info)
def process_subset(src_data, subset, check_func, fs):
subfolder = os.listdir(src_data)
makedir(subset)
wavscp = open(os.path.join(subset, "wav.scp"), "w", encoding="utf-8")
utt2spk = open(os.path.join(subset, "utt2spk"), "w", encoding="utf-8")
label_scp = open(os.path.join(subset, "label"), "w", encoding="utf-8")
fixed_data = os.path.join(subset, "fix_byte")
makedir(fixed_data)
for folder in subfolder:
if not os.path.isdir(os.path.join(src_data, folder)):
continue
if not check_func(folder):
continue
utt_id = "{}_{}".format(UTT_PREFIX, pack_zero(folder))
makedir(os.path.join(fixed_data, folder))
cmd = f"sox {os.path.join(src_data, folder, folder)}.wav -c 1 -t wavpcm -b 16 -r {fs} {os.path.join(fixed_data, folder, folder)}_bits16.wav"
print(f"cmd: {cmd}")
os.system(cmd)
wavscp.write(
"{} {}\n".format(
utt_id, os.path.join(fixed_data, folder, "{}_bits16.wav".format(folder))
)
)
utt2spk.write("{} {}\n".format(utt_id, UTT_PREFIX))
label_info, text_info = process_text_info(
os.path.join(src_data, folder, "{}.lab".format(folder))
)
label_scp.write("{} {}\n".format(utt_id, label_info))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Prepare Data for Ofuton Database")
parser.add_argument("src_data", type=str, help="source data directory")
parser.add_argument("train", type=str, help="train set")
parser.add_argument("dev", type=str, help="development set")
parser.add_argument("test", type=str, help="test set")
parser.add_argument("--fs", type=int, help="frame rate (Hz)")
args = parser.parse_args()
process_subset(args.src_data, args.train, train_check, args.fs)
process_subset(args.src_data, args.dev, dev_check, args.fs)
process_subset(args.src_data, args.test, test_check, args.fs)
| 32.614583
| 148
| 0.630789
|
cc15581d796e9d79019a46dc019544d6f08ee56d
| 9,358
|
py
|
Python
|
docs/conf.py
|
SteelBall/StrutSkinn
|
0a2204140c5ca0658382c832c9fe3459a24846c1
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
SteelBall/StrutSkinn
|
0a2204140c5ca0658382c832c9fe3459a24846c1
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
SteelBall/StrutSkinn
|
0a2204140c5ca0658382c832c9fe3459a24846c1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Server Listing documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 26 19:22:58 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Server Listing'
copyright = u'2013, Server Listing Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.1'
# The full version, including alpha/beta/rc tags.
release = '0.2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ServerListingdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ServerListing.tex', u'Server Listing Documentation',
u'Server Listing Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'serverlisting', u'Server Listing Documentation',
[u'Server Listing Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ServerListing', u'Server Listing Documentation',
u'Server Listing Team', 'ServerListing', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Server Listing'
epub_author = u'Server Listing Team'
epub_publisher = u'Server Listing Team'
epub_copyright = u'2013, Server Listing Team'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 32.268966
| 148
| 0.716927
|
a17dd4cfc4fb9aa9f17d722da2124be638a8e7b1
| 20,206
|
py
|
Python
|
mypython/Lib/site-packages/pandas/core/computation/pytables.py
|
lilianatang/data-modelling-with-postgresql
|
4b5d057d23c346cc36695dc0548f11908aeb5431
|
[
"Apache-2.0"
] | null | null | null |
mypython/Lib/site-packages/pandas/core/computation/pytables.py
|
lilianatang/data-modelling-with-postgresql
|
4b5d057d23c346cc36695dc0548f11908aeb5431
|
[
"Apache-2.0"
] | null | null | null |
mypython/Lib/site-packages/pandas/core/computation/pytables.py
|
lilianatang/data-modelling-with-postgresql
|
4b5d057d23c346cc36695dc0548f11908aeb5431
|
[
"Apache-2.0"
] | 1
|
2021-04-26T22:41:56.000Z
|
2021-04-26T22:41:56.000Z
|
""" manage PyTables query interface via Expressions """
import ast
from functools import partial
from typing import Any, Dict, Optional, Tuple
import numpy as np
from pandas._libs.tslibs import Timedelta, Timestamp
from pandas.compat.chainmap import DeepChainMap
from pandas.core.dtypes.common import is_list_like
import pandas as pd
import pandas.core.common as com
from pandas.core.computation import expr, ops, scope as _scope
from pandas.core.computation.common import ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import UndefinedVariableError, is_term
from pandas.core.construction import extract_array
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
class PyTablesScope(_scope.Scope):
__slots__ = ("queryables",)
queryables: Dict[str, Any]
def __init__(
self,
level: int,
global_dict=None,
local_dict=None,
queryables: Optional[Dict[str, Any]] = None,
):
super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)
self.queryables = queryables or {}
class Term(ops.Term):
env: PyTablesScope
def __new__(cls, name, env, side=None, encoding=None):
if isinstance(name, str):
klass = cls
else:
klass = Constant
return object.__new__(klass)
def __init__(self, name, env: PyTablesScope, side=None, encoding=None):
super().__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == "left":
# Note: The behavior of __new__ ensures that self.name is a str here
if self.name not in self.env.queryables:
raise NameError(f"name {repr(self.name)} is not defined")
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
# read-only property overwriting read/write property
@property # type: ignore[misc]
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env: PyTablesScope, side=None, encoding=None):
assert isinstance(env, PyTablesScope), type(env)
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
op: str
queryables: Dict[str, Any]
condition: Optional[str]
def __init__(self, op: str, lhs, rhs, queryables: Dict[str, Any], encoding):
super().__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if isinstance(right, ConditionBinOp):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if isinstance(right, FilterBinOp):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(
self.op, left, right, queryables=self.queryables, encoding=self.encoding
).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self) -> bool:
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self) -> bool:
"""
return True if this is a valid column name for generation (e.g. an
actual column in the table)
"""
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs), "kind", None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs), "meta", None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs), "metadata", None)
def generate(self, v) -> str:
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return f"({self.lhs} {self.op} {val})"
def convert_value(self, v) -> "TermValue":
"""
convert the expression that is in the term to something that is
accepted by pytables
"""
def stringify(value):
if self.encoding is not None:
return pprint_thing_encoded(value, encoding=self.encoding)
return pprint_thing(value)
kind = ensure_decoded(self.kind)
meta = ensure_decoded(self.meta)
if kind == "datetime64" or kind == "datetime":
if isinstance(v, (int, float)):
v = stringify(v)
v = ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert("UTC")
return TermValue(v, v.value, kind)
elif kind == "timedelta64" or kind == "timedelta":
if isinstance(v, str):
v = Timedelta(v).value
else:
v = Timedelta(v, unit="s").value
return TermValue(int(v), v, kind)
elif meta == "category":
metadata = extract_array(self.metadata, extract_numpy=True)
result = metadata.searchsorted(v, side="left")
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, "integer")
elif kind == "integer":
v = int(float(v))
return TermValue(v, v, kind)
elif kind == "float":
v = float(v)
return TermValue(v, v, kind)
elif kind == "bool":
if isinstance(v, str):
v = not v.strip().lower() in [
"false",
"f",
"no",
"n",
"none",
"0",
"[]",
"{}",
"",
]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, str):
# string quoting
return TermValue(v, stringify(v), "string")
else:
raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column")
def convert_values(self):
pass
class FilterBinOp(BinOp):
filter: Optional[Tuple[Any, Any, pd.Index]] = None
def __repr__(self) -> str:
if self.filter is None:
return "Filter: Not Initialized"
return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]")
def invert(self):
""" invert the filter """
if self.filter is not None:
self.filter = (
self.filter[0],
self.generate_filter_op(invert=True),
self.filter[2],
)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
rhs = self.conform(self.rhs)
values = list(rhs)
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ["==", "!="] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, pd.Index(values))
return self
return None
# equality conditions
if self.op in ["==", "!="]:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, pd.Index(values))
else:
raise TypeError(
f"passing a filterable condition to a non-table indexer [{self}]"
)
return self
def generate_filter_op(self, invert: bool = False):
if (self.op == "!=" and not invert) or (self.op == "==" and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __repr__(self) -> str:
return pprint_thing(f"[Condition : [{self.condition}]]")
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError(
"cannot use an invert condition when passing to numexpr"
)
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ["==", "!="]:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = f"({' | '.join(vs)})"
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})"
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != "~":
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None and (
issubclass(klass, ConditionBinOp)
and operand.condition is not None
or not issubclass(klass, ConditionBinOp)
and issubclass(klass, FilterBinOp)
and operand.filter is not None
):
return operand.invert()
return None
class PyTablesExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super().__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(
self,
f"visit_{bin_node}",
lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),
)
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp("~", self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError("Unary addition not supported")
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(
ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]
)
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple subscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
if isinstance(slobj, Term):
# In py39 np.ndarray lookups with Term containing int raise
slobj = slobj.value
try:
return self.const_type(value[slobj], self.env)
except TypeError as err:
raise ValueError(
f"cannot subscript {repr(value)} with {repr(slobj)}"
) from err
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = type(node.ctx)
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError(f"Invalid Attribute context {ctx.__name__}")
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):
raise TypeError(
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
return w
class PyTablesExpr(expr.Expr):
"""
Hold a pytables-like expression, comprised of possibly multiple 'terms'.
Parameters
----------
where : string term expression, PyTablesExpr, or list-like of PyTablesExprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
a PyTablesExpr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
_visitor: Optional[PyTablesExprVisitor]
env: PyTablesScope
def __init__(
self,
where,
queryables: Optional[Dict[str, Any]] = None,
encoding=None,
scope_level: int = 0,
):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict: DeepChainMap[Any, Any] = DeepChainMap()
if isinstance(where, PyTablesExpr):
local_dict = where.env.scope
_where = where.expr
elif isinstance(where, (list, tuple)):
where = list(where)
for idx, w in enumerate(where):
if isinstance(w, PyTablesExpr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
_where = " & ".join(f"({w})" for w in com.flatten(where))
else:
_where = where
self.expr = _where
self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, str):
self.env.queryables.update(queryables)
self._visitor = PyTablesExprVisitor(
self.env,
queryables=queryables,
parser="pytables",
engine="pytables",
encoding=encoding,
)
self.terms = self.parse()
def __repr__(self) -> str:
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid condition"
) from err
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid filter"
) from err
return self.condition, self.filter
class TermValue:
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind: str):
assert isinstance(kind, str), kind
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding) -> str:
""" quote the string if not encoded else encode and return """
if self.kind == "string":
if encoding is not None:
return str(self.converted)
return f'"{self.converted}"'
elif self.kind == "float":
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return str(self.converted)
def maybe_expression(s) -> bool:
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, str):
return False
ops = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",)
# make sure we have an op at least
return any(op in s for op in ops)
| 31.670846
| 89
| 0.550035
|
831cfdda40a62b4bad89121b9d0092ad7b268999
| 9,119
|
py
|
Python
|
myprojectenv/lib/python3.5/site-packages/ansible/modules/cloud/azure/azure_rm_resourcegroup.py
|
lancerenteria/doFlask
|
2d4e242469b108c6c8316ee18a540307497bfb53
|
[
"MIT"
] | null | null | null |
myprojectenv/lib/python3.5/site-packages/ansible/modules/cloud/azure/azure_rm_resourcegroup.py
|
lancerenteria/doFlask
|
2d4e242469b108c6c8316ee18a540307497bfb53
|
[
"MIT"
] | null | null | null |
myprojectenv/lib/python3.5/site-packages/ansible/modules/cloud/azure/azure_rm_resourcegroup.py
|
lancerenteria/doFlask
|
2d4e242469b108c6c8316ee18a540307497bfb53
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure_rm_resourcegroup
version_added: "2.1"
short_description: Manage Azure resource groups.
description:
- Create, update and delete a resource group.
options:
force:
description:
- Remove a resource group and all associated resources. Use with state 'absent' to delete a resource
group that contains resources.
default: false
required: false
location:
description:
- Azure location for the resource group. Required when creating a new resource group. Cannot
be changed once resource group is created.
required: false
default: null
name:
description:
- Name of the resource group.
required: true
state:
description:
- Assert the state of the resource group. Use 'present' to create or update and
'absent' to delete. When 'absent' a resource group containing resources will not be removed unless the
force option is used.
default: present
choices:
- absent
- present
required: false
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Create a resource group
azure_rm_resourcegroup:
name: Testing
location: westus
tags:
testing: testing
delete: never
- name: Delete a resource group
azure_rm_resourcegroup:
name: Testing
state: absent
'''
RETURN = '''
contains_resources:
description: Whether or not the resource group contains associated resources.
returned: always
type: bool
sample: True
state:
description: Current state of the resource group.
returned: always
type: dict
sample: {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing",
"location": "westus",
"name": "Testing",
"provisioning_state": "Succeeded",
"tags": {
"delete": "on-exit",
"testing": "no"
}
}
'''
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.resource.resources.models import ResourceGroup
except ImportError:
pass
def resource_group_to_dict(rg):
return dict(
id=rg.id,
name=rg.name,
location=rg.location,
tags=rg.tags,
provisioning_state=rg.properties.provisioning_state
)
class AzureRMResourceGroup(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
location=dict(type='str'),
force=dict(type='bool', default=False)
)
self.name = None
self.state = None
self.location = None
self.tags = None
self.force = None
self.results = dict(
changed=False,
contains_resources=False,
state=dict(),
)
super(AzureRMResourceGroup, self).__init__(self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
results = dict()
changed = False
rg = None
contains_resources = False
try:
self.log('Fetching resource group {0}'.format(self.name))
rg = self.rm_client.resource_groups.get(self.name)
self.check_provisioning_state(rg, self.state)
contains_resources = self.resources_exist()
results = resource_group_to_dict(rg)
if self.state == 'absent':
self.log("CHANGED: resource group {0} exists but requested state is 'absent'".format(self.name))
changed = True
elif self.state == 'present':
update_tags, results['tags'] = self.update_tags(results['tags'])
self.log("update tags %s" % update_tags)
self.log("new tags: %s" % str(results['tags']))
if update_tags:
changed = True
if self.location and self.location != results['location']:
self.fail("Resource group '{0}' already exists in location '{1}' and cannot be "
"moved.".format(self.name, results['location']))
except CloudError:
self.log('Resource group {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: resource group {0} does not exist but requested state is "
"'present'".format(self.name))
changed = True
self.results['changed'] = changed
self.results['state'] = results
self.results['contains_resources'] = contains_resources
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
if not rg:
# Create resource group
self.log("Creating resource group {0}".format(self.name))
if not self.location:
self.fail("Parameter error: location is required when creating a resource group.")
if self.name_exists():
self.fail("Error: a resource group with the name {0} already exists in your subscription."
.format(self.name))
params = ResourceGroup(
location=self.location,
tags=self.tags
)
else:
# Update resource group
params = ResourceGroup(
location=results['location'],
tags=results['tags']
)
self.results['state'] = self.create_or_update_resource_group(params)
elif self.state == 'absent':
if contains_resources and not self.force:
self.fail("Error removing resource group {0}. Resources exist within the group.".format(self.name))
self.delete_resource_group()
return self.results
def create_or_update_resource_group(self, params):
try:
result = self.rm_client.resource_groups.create_or_update(self.name, params)
except Exception as exc:
self.fail("Error creating or updating resource group {0} - {1}".format(self.name, str(exc)))
return resource_group_to_dict(result)
def delete_resource_group(self):
try:
poller = self.rm_client.resource_groups.delete(self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error delete resource group {0} - {1}".format(self.name, str(exc)))
# The delete operation doesn't return anything.
# If we got here, assume all is good
self.results['state']['status'] = 'Deleted'
return True
def resources_exist(self):
found = False
try:
response = self.rm_client.resource_groups.list_resources(self.name)
except Exception as exc:
self.fail("Error checking for resource existence in {0} - {1}".format(self.name, str(exc)))
for item in response:
found = True
break
return found
def name_exists(self):
try:
exists = self.rm_client.resource_groups.check_existence(self.name)
except Exception as exc:
self.fail("Error checking for existence of name {0} - {1}".format(self.name, str(exc)))
return exists
def main():
AzureRMResourceGroup()
if __name__ == '__main__':
main()
| 33.774074
| 119
| 0.586358
|
25dafc922adb2fd1a20bc7d87d02dbaa309a7ec2
| 464
|
py
|
Python
|
article/migrations/0004_article_article_image.py
|
kingbaberre/Study-buddy
|
808f96ef7cd08cd535154de6bcaadede0c36157a
|
[
"MIT",
"Unlicense"
] | null | null | null |
article/migrations/0004_article_article_image.py
|
kingbaberre/Study-buddy
|
808f96ef7cd08cd535154de6bcaadede0c36157a
|
[
"MIT",
"Unlicense"
] | 10
|
2020-02-12T01:26:33.000Z
|
2022-02-10T12:29:37.000Z
|
article/migrations/0004_article_article_image.py
|
vendari12/akwasite
|
27a0e5fca498e8711daa912ab6697d032d90d8d4
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0 on 2018-03-27 12:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0003_remove_article_article_image'),
]
operations = [
migrations.AddField(
model_name='article',
name='article_image',
field=models.FileField(blank=True, null=True, upload_to='', verbose_name='Makaleye Fotoğraf Ekleyin'),
),
]
| 24.421053
| 114
| 0.635776
|
dfff4de08f47e562ab986dcedae482d63b61a9ca
| 9,466
|
py
|
Python
|
pycle/bicycle-scrapes/chainreacton-v2/modelParser.py
|
fusuyfusuy/School-Projects
|
8e38f19da90f63ac9c9ec91e550fc5aaab3d0234
|
[
"MIT"
] | null | null | null |
pycle/bicycle-scrapes/chainreacton-v2/modelParser.py
|
fusuyfusuy/School-Projects
|
8e38f19da90f63ac9c9ec91e550fc5aaab3d0234
|
[
"MIT"
] | null | null | null |
pycle/bicycle-scrapes/chainreacton-v2/modelParser.py
|
fusuyfusuy/School-Projects
|
8e38f19da90f63ac9c9ec91e550fc5aaab3d0234
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import os
import csv
bicycles = []
save = open('bicycleSave2.py','a')
types = os.listdir('modelDownload/modelPages')
for t in types:
bType = '------'
if str(t)=='bmx':
bType = 'BMX'
elif str(t)=='cross':
bType = 'Cyclo Cross'
elif str(t)=='electric':
bType = 'Electric'
elif str(t)=='folding':
bType = 'Folding'
elif str(t)=='hybrid' :
bType = 'Hybrid - City'
elif str(t)=='kid' :
bType = 'Kids'
elif str(t)=='mountain' :
bType = 'Mountain'
elif str(t)=='road' :
bType = 'Road'
elif str(t)=='tt' :
bType = 'TT'
files = os.listdir('modelDownload/modelPages/'+t)
for f in files:
listFile = open('modelDownload/modelPages/'+str(t)+'/'+str(f))
print('parsing '+str(t)+'/'+str(f)+' ',end='\r')
parsed = BeautifulSoup(listFile, 'html.parser')
brand = parsed.find('h1').text.strip().replace('\n',' &&&& ').split(' &&&& ')[0]
model = parsed.find('h1').text.strip().replace('\n',' &&&& ').split(' &&&& ')[1]
try:
price = parsed.find(attrs={'class':'crcPDPPriceHidden'}).text.strip().replace('\n','')
except:
print('price error on '+str(f))
price = '------'
# features
try:
features = parsed.find(text="Features:").findNext('ul')
featureList = features.findAll('li')
except:
print('error on ' + str(f))
else:
bicycle = {'Price':price,'Brand':brand,'Model':model,'Type':bType,'Colour': '------', 'Wheel Size': '------', 'Frame Size': '------', 'Gender': '------', 'Speed': '------', 'Material': '------', 'Age Group': '------', 'Fork Travel': '------', 'Rear Travel': '------','Frame': '------', 'Forks': '------', 'Brake': '------', 'Cable': '------', 'Brake Levers': '------', 'Chainwheel': '------', 'Freewheel': '------', 'Chain': '------', 'Headset': '------', 'Crank': '------', 'Bottom Bracket': '------', 'Rims': '------', 'Front Hub': '------', 'Rear Hub': '------', 'Tyres': '------', 'Seat': '------', 'Handlebars': '------', 'Handlebar Stem': '------', 'Grips': '------', 'Pedal': '------', 'Fork': '------', 'Steerer': '------', 'Stem': '------', 'Handlebar': '------', 'Handlebar Tape': '------', 'Shifter/Brake Levers': '------', 'Brake System': '------', 'Front Derailleur': '------', 'Rear Derailleur': '------', 'Crankset': '------', 'Cassette': '------', 'Saddle': '------', 'Seatpost': '------', 'Tubeless Ready Tyres': '------', 'Tubeless Ready Wheels': '------', 'Pedals': '------', 'Dropouts/Axle Type': '------', 'Maximum Tyre Size': '------', 'Rear Pannier Rack Compatible': '------', 'Mudguards Compatible': '------', 'Replacement Gear Hanger': '------', 'Weight': '------', 'Drivetrain': '------', 'Chainset': '------', 'Shifters': '------', 'Wheelset': '------', 'Brakes': '------', 'Bars': '------', 'Use': '------', 'Brake Rotors': '------', 'Bar Tape': '------', 'Brake Caliper Mounts': '------', 'Wheels': '------', 'Axles': '------', 'Max Tyre Clearance': '------', 'Seat Clamp': '------', 'Cog': '------', 'Hub Spacing': '------', 'Gear/Brake Levers': '------', 'Tyre Clearance': '------', 'Seat Post': '------', 'Cable Routing': '------', 'Brake Fitment': '------', 'Components': '------', 'Disc Brakes': '------', 'Handle Bar': '------', 'Stem/Seatpost': '------', 'Fork Weight': '------', 'Frame Weight': '------', 'Chainring': '------', 'Hubs': '------', 'Spokes/Nipples': '------', 'Accessories': '------', 'Rear Shock': '------', 'ISCG Tabs': '------', 'Chainguide': '------', 'Spokes': '------', 'Front Tyre': '------', 'Rear Tyre': '------', 'Seat Post Clamp': '------', 'Warranty': '------', 'Maximum Tyre Sizes': '------', 'Carrier / Basket': '------', 'Mudguards': '------', 'Stand': '------', 'Additional': '------', 'Light Front': '------', 'Light Rear': '------', 'Front Light': '------', 'Rear Light': '------', 'Seatclamp': '------', 'Tyre': '------', 'Seatpost Clamp': '------', 'Kickstand': '------', 'Mudguard': '------', 'Bell': '------', 'Extras': '------', 'Includes': '------', 'Shift/Brake Levers': '------', 'Shift Brake Levers': '------', 'Manufacturer Part Numbers': '------', '17” Blue/Red': '------', '19” Blue/Red': '------', '21” Blue/Red': '------', '17” Black/Flash Yellow': '------', '19” Black/Flash Yellow': '------', '21” Black/Flash Yellow': '------', 'Carrier': '------', 'Carrier Rack': '------', 'Shock Hardware': '------', 'Front Derailluer': '------', 'Cranks': '------', 'Integrated Handlebar/Stem': '------', 'Shift/ Brake Levers': '------', 'Engine': '------', 'Battery': '------', 'Charger': '------', 'Display': '------', 'Shifter': '------', '50cm': '------', '53cm': '------', '56cm': '------', '59cm': '------', '62cm': '------', 'Chain Guide': '------', 'Front Tyres': '------', 'Rear Pannier Rack': '------', '54cm': '------', '58cm': '------', 'Lights': '------', 'Frameset': '------', 'Brake/Shift Levers': '------', 'Front Brake': '------', 'Rear Brake': '------', 'Geometry': '------', 'BB': '------', 'Sprocket': '------', 'Front Wheel': '------', 'Rear Wheel': '------', 'Misc': '------', 'Brakeset': '------', 'Brakset': '------', 'Gear Hanger Model Number': '------', 'Chain Device': '------', 'Cog Set': '------', 'Handebar': '------', 'Rear Cog': '------', 'Rear Cogs': '------', 'Rear Cassette': '------', 'Frame Material': '------', 'Top Tube Length': '------', 'Brake Lever': '------', 'Brake Cable': '------', 'Driver': '------', 'Front Rim': '------', 'Rear Rim': '------', 'Gyro Tabs': '------', 'Stuntpegs': '------', 'Chain Stay Length': '------', 'Headtube Angle': '------', 'Seat Tube Angle': '------', 'Gearing': '------', 'Crankarms': '------', 'Chainrings': '------', 'Brake Calipers': '------', 'Front Brake Rotor': '------', 'Rear Brake Rotor': '------', 'B/b': '------', 'B/B': '------', 'Sitting Posture': '------', 'Hub Type': '------', 'Number of Gears': '------', 'Brake Type': '------', 'Luggage Carrier': '------', 'Child Seat Compatible': '------', 'Front Seat Compatible': '------', 'Rear Seat Compatible': '------', 'Light': '------', 'Lock Type': '------', 'Suspension Fork': '------', 'Tyre Sealant': '------', 'Discs': '------', 'Battery Charger': '------', 'Brake Rotor': '------', 'Axle to Crown': '------', 'Fork Offset': '------', 'Max Tyre Size': '------', 'Protection': '------', 'Front Rotor': '------', 'Rear Rotor': '------', 'Brakes/Shifter': '------', 'Stem Lengths': '------', 'Shock': '------', 'Rear Deraileur': '------', 'Derailleurs': '------', 'Levers': '------', 'Shifter L': '------', 'Shifter R': '------', 'Brakes Front': '------', 'Brakes Rear': '------', 'Tubes': '------', 'Frame/Fork': '------', 'Brake Discs': '------', 'Groupset': '------', 'Derailleur Rear': '------', 'Axle Front': '------', 'Axle Rear': '------', 'Front Axle': '------', 'Rear Axle': '------', 'Suggested Rider Size': '------', 'Barends': '------', 'Pegs': '------', 'Top Tube': '------', 'Chain Stay': '------', 'Head Tube': '------', 'Seat Tube': '------', 'Brake Mounts': '------', 'Bar Ends': '------', 'Stand Over': '------', 'BB Height': '------', 'Head Tube Angle': '------', 'Standover Height': '------', 'Bottom Bracket Height': '------', 'Rear Lever': '------', 'Adjustable Stem': '------', 'Suspension Forks': '------', 'Grip(Tape)': '------', 'Spoke': '------', 'Front Brake Set': '------', 'Rear Brake Set': '------', 'Seat Screw': '------', 'Rear Shifter': '------', 'Rotors': '------', 'Grip (Tape)': '------', 'Left Shifter': '------', 'Right Shifter': '------', 'Rear rim': '------', 'Rear hub': '------', 'Rear Bbrake Set': '------', 'Front hub': '------', 'Tyre Type': '------', 'Minimum Saddle Height': '------', 'Maximum Saddle Height': '------', 'F/rim': '------', 'R/rim': '------', 'F/hub': '------', 'R/hub': '------', 'F/tire': '------', 'R/tire': '------', 'F/brake Set': '------', 'R/brake Set': '------', 'R/derailleur': '------', 'R/shifter': '------', 'Ideal Rider Inside Leg Length': '------'}
for i in featureList:
try:
title = i.text.split(':', 1)[0].strip()
detail = i.text.split(':', 1)[1].strip()
bicycle[title] = detail
except:
print(' feature error on ' + str(f))
else:
try:
featuresSpec = parsed.find(text="Specs").findNext('ul')
featureListSpec = featuresSpec.findAll('li')
except:
print(' specs error on ' + str(f))
else:
for j in featureListSpec:
try:
titleSpec = j.text.split(':', 1)[0].strip()
detailSpec = j.text.split(':', 1)[1].strip()
bicycle[titleSpec] = detailSpec
except:
print(' specs error on ' + str(f))
bicycles.append(bicycle)
save.write(str(bicycle)+',\n')
# keys = bicycles[0].keys()
# with open('bicycles.csv', 'w', newline='') as output_file:
# dict_writer = csv.DictWriter(output_file, keys)
# dict_writer.writeheader()
# dict_writer.writerows(bicycles)
| 116.864198
| 6,549
| 0.414431
|
3c3bc11d6eceb2dec5fd79775c4f6ed794d50016
| 32,603
|
py
|
Python
|
sim-k227-quant/jmMC3.py
|
jrminter/dtsa2scripts
|
a7a4f3a63f47f0a8abe7ee13c72f5a27196c3a1b
|
[
"MIT"
] | 2
|
2018-04-19T12:25:29.000Z
|
2018-11-24T12:55:46.000Z
|
sim-k227-quant/jmMC3.py
|
jrminter/dtsa2Scripts
|
a7a4f3a63f47f0a8abe7ee13c72f5a27196c3a1b
|
[
"MIT"
] | null | null | null |
sim-k227-quant/jmMC3.py
|
jrminter/dtsa2Scripts
|
a7a4f3a63f47f0a8abe7ee13c72f5a27196c3a1b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# 1 2 3 4 5 6 7 |
# 3456789012345678901234567890123456789012345678901234567890123456789012
#
# jmMC3.py
#
# Wrapper scripts for MC3 Monte Carlo simulations with DTSA-II
# Developed for Iona version 2015-07-01
#
# Modifications
# Date Who Ver What
# ---------- --- ------ ---------------------------------------------
# 2015-07-13 JRM 0.0.10 Initial prototype: biLayerLineOnSubstrate,
# triLayerLineOnSubstrate, and
# simulateBulkStandard.
# 2015-07-14 JRM 0.0.11 Added simLineInMatrix and formatted.
# 2015-07-23 JRM 0.0.12 Added simLineInMatrixLimScan
# 2015-10-02 JRM 0.0.13 Added coatedSphereOnFilm
# 2015-10-03 JRM 0.0.14 Added coatedOverBlock
# 2016-05-14 JRM 0.0.15 Updated for Jupiter
# 2017-06-18 JRM 0.0.16 Added simCarbonCoatedStd
# 2017-06-20 JRM 0.0.17 Added simBulkStd to sim uncoated standard
# 2017-06-22 JRM 0.0.18 Added simCtdOxOnSi
# 2018-10-14 JRM 0.0.19 Added getSpecPath
__revision__ = "$Id: jmMC3.py John Minter $"
__version__ = "0.0.19"
import sys
sys.packageManager.makeJavaPackage("gov.nist.microanalysis.NISTMonte.Gen3", "CharacteristicXRayGeneration3, BremsstrahlungXRayGeneration3, FluorescenceXRayGeneration3, XRayTransport3", None)
import gov.nist.microanalysis.EPQLibrary as epq
import gov.nist.microanalysis.EPQLibrary.Detector as epd
import gov.nist.microanalysis.Utility as epu
import gov.nist.microanalysis.NISTMonte as nm
import gov.nist.microanalysis.NISTMonte.Gen3 as nm3
import gov.nist.microanalysis.EPQTools as et
import dtsa2.mcSimulate3 as mc3
import dtsa2.jmGen as jmg
import dtsa2 as dt2
import java.util as jutil
import java.io as jio
import java.nio.charset as cs
import os
import glob
import shutil
if 'defaultXtraParams' not in globals():
defaultXtraParams = {}
if 'defaultBremFluor' not in globals():
defaultBremFluor = False
if 'defaultCharFluor' not in globals():
defaultCharFluor = False
if 'defaultNumTraj' not in globals():
defaultNumTraj = 1000
if 'defaultDose' not in globals():
defaultDose = 120.0
def getSpecPath(baseName, baseDir, e0, nTraj):
"""
getSpecPath(baseName, baseDir, e0, nTraj)
Generate a file path for Monte Carlo simulated spectrum
Parameters
----------
baseName - a string. The base name for the simulation,
Example: "bulkC"
baseDir - a string. The path to the directory to write the spectrum
Example: "C:/username/Documents/work/project"
Note: no path separator!
e0 - a number. The voltage (kV) for the simulation
Example: 15
nTraj - a number. The number of trajectories for the simulation
Example: 20000
Returns
-------
path - A string. The path to the file to write
Example
-------
import dtsa2.jmMC3 as jm3
e0 = 15
nTraj = 20000
det = findDetector("Oxford p4 05eV 4K")
c = material("C", density=2.266)
a = jm3.simBulkStd(c, det, e0, nTraj, 100, 1.0, False)
a.display()
fi = jm3.getSpecPath("bulkC", "C:/username/Documents/work/project", e0, nTraj)
a.save(fi)
"""
sName = "%s-%g-kV-%g-Traj" % (baseName, e0, nTraj)
sPath = "%s/%s.msa" % (baseDir, sName)
return sPath
def simBulkStd(mat, det, e0, nTraj, lt=100, pc=1.0, ctd=True):
"""simBulkStd(mat, det, e0, nTraj, lt=100, pc=1.0)
Use mc3 simulation to simulate an uncoated standard specimen
Parameters
----------
mat - a dtsa material.
Note the material must have an associated density. It should have a useful name.
det - a dtsa detector
Here is where we get the detector properties and calibration
e0 - float
The accelerating voltage in kV
nTraj - integer
The number of trajectories to run
lt - integer (100)
The live time (sec)
pc - float (1.0)
The probe current in nA
ctd - Boolean (True) - is C coated
Returns
-------
sim - DTSA scriptable spectrum
The simulated standard spectrum
Example
-------
import dtsa2.jmMC3 as jm3
det = findDetector("Oxford p4 05eV 2K")
cu = material("Cu", density=8.92)
a = jm3.simBulkStd(cu, det, 20.0, 100, 100, 1.0)
a.display()
"""
dose = pc * lt # na-sec"
xrts = []
trs = mc3.suggestTransitions(mat, e0)
for tr in trs:
xrts.append(tr)
xtraParams={}
xtraParams.update(mc3.configureXRayAccumulators(xrts,True, True, True))
sim = mc3.simulate(mat, det, e0, dose, withPoisson=True, nTraj=nTraj,
sf=True, bf=True, xtraParams=xtraParams)
sName = "%s-%g-kV" % (mat, e0)
sim.rename(sName)
sim.setAsStandard(mat)
return sim
def simCtdOxOnSi(det, e0, nTraj, lt=100, pc=1.0, tox = 10.0, tc=20.0):
"""
simCtdOxOnSi(det, e0, nTraj, lt=100, pc=1.0, tox = 10.0, tc=20.0)
Use mc3 multilayer simulation to simulate a C-ctd silicon specimen
with a native oxide layer.
det - a dtsa detector
Here is where we get the detector properties and calibration
e0 - float
The accelerating voltage in kV
nTraj - integer
The number of trajectories to run
lt - integer (100)
The live time (sec)
pc - float (1.0)
The probe current in nA
tox - float (10.0)
The thickness of the native oxide in nm
tc - float (20.0)
C thickness in nm
Returns
-------
sim - DTSA scriptable spectrum
The simulated spectrum
Example
-------
import dtsa2.jmMC3 as jm3
det = findDetector("Oxford p4 05eV 2K")
a = jm3.simCtdOxOnSi(det, 3.0, 100, 100, 1.0, 10.0, 20.0)
a.display()
"""
c = dt2.material("C", density=2.1)
si = dt2.material("Si", density=2.329)
sio2 = dt2.material("SiO2", density=2.65)
dose = pc * lt # na-sec"
layers = [ [ c, tc*1.0e-9],
[sio2, tox*1.0e-9],
[si, 50.0e-6] ]
xrts = []
trs = mc3.suggestTransitions(c, e0)
for tr in trs:
xrts.append(tr)
trs = mc3.suggestTransitions(sio2, e0)
for tr in trs:
xrts.append(tr)
xtraParams={}
xtraParams.update(mc3.configureXRayAccumulators(xrts,True, True, True))
sim = mc3.multiFilm(layers, det, e0, withPoisson=True, nTraj=nTraj,
dose=dose, sf=True, bf=True, xtraParams=xtraParams)
sName = "%g-nm-C-on-%g-nm-SiO2-on-Si-%g-kV-%g-Traj" % (tc, tox, e0, nTraj)
sim.rename(sName)
return sim
def simCarbonCoatedStd(mat, det, e0, nTraj, lt=100, pc=1.0, tc=20.0):
"""simCarbonCoatedStd(mat, det, e0, nTraj, lt=100, pc=1.0, tc=20.0)
Use mc3 multilayer simulation to simulate a C-ctd standard specimen
Parameters
----------
mat - a dtsa material.
Note the material must have an associated density. It should have a useful name.
det - a dtsa detector
Here is where we get the detector properties and calibration
e0 - float
The accelerating voltage in kV
nTraj - integer
The number of trajectories to run
lt - integer (100)
The live time (sec)
pc - float (1.0)
The probe current in nA
tc - float (20.0)
C thickness in nm
Returns
-------
sim - DTSA scriptable spectrum
The simulated standard spectrum
Example
-------
import dtsa2.jmMC3 as jm3
det = findDetector("Oxford p4 05eV 2K")
mgo = material("MgO", density=3.58)
a = jm3.simCarbonCoatedStd(mgo, det, 20.0, 100, 100, 1.0, 20.0)
a.display()
"""
dose = pc * lt # na-sec"
c = dt2.material("C", density=2.1)
layers = [ [ c, tc*1.0e-9],
[mat, 50.0e-6]
]
xrts = []
trs = mc3.suggestTransitions(c, e0)
for tr in trs:
xrts.append(tr)
trs = mc3.suggestTransitions(mat, e0)
for tr in trs:
xrts.append(tr)
xtraParams={}
xtraParams.update(mc3.configureXRayAccumulators(xrts,True, True, True))
sim = mc3.multiFilm(layers, det, e0, withPoisson=True, nTraj=nTraj,
dose=dose, sf=True, bf=True, xtraParams=xtraParams)
sName = "%g-nm-C-on-%s-%g-kV-%g-Traj" % (tc, mat, e0, nTraj)
sim.rename(sName)
sim.setAsStandard(mat)
return sim
def coatedOverBlock(mat, height, width, coating, thickness, substrate, det, e0=20.0, withPoisson=True, nTraj=defaultNumTraj, dose=defaultDose, sf=defaultCharFluor, bf=defaultBremFluor, xtraParams=defaultXtraParams):
"""coatedOverBlock(mat, height, width, coating, thickness, substrate, det, e0=20.0, withPoisson=True, nTraj=defaultNumTraj, dose=defaultDose, sf=defaultCharFluor, bf=defaultBremFluor, substrate=None, xtraParams={})
Monte Carlo simulate a spectrum from a block shaped particle of the specified material (mat) and height (z in m) and width (x and y in m). \
The block and subtrate is coated in a material 'coating' of the specified thickness which fully encapsulates the particle and covers the substrate too."""
def buildBlock(monte, chamber, origin, buildParams):
height = buildParams["Height"]
width = buildParams["Width"]
subMat = buildParams["Substrate"]
mat = buildParams["Material"]
coating = buildParams["Coating"]
thickness = buildParams["Thickness"]
coatedCube = nm.MultiPlaneShape.createBlock([width + 2.0 * thickness, width + 2.0 * thickness, height + thickness], epu.Math2.plus(origin, [0.0, 0.0, 0.5*(height + thickness)]), 0.0, 0.0, 0.0)
sr1 = monte.addSubRegion(chamber, coating, coatedCube)
cube = nm.MultiPlaneShape.createBlock([width, width, height], epu.Math2.plus(origin, [0.0, 0.0, thickness+0.5*height]), 0.0, 0.0, 0.0)
monte.addSubRegion(sr1, mat, cube)
sideSlabWidth = 2.5*1.0e-6 - (thickness + 0.5*width)
sideSlabDims = [sideSlabWidth, 5.*1.0e-6, thickness]
leftSidePt = epu.Math2.plus(origin, [0.5*(width+sideSlabWidth), 0.0, thickness+height])
leftSide = nm.MultiPlaneShape.createBlock(sideSlabDims, leftSidePt, 0.0, 0.0, 0.0)
monte.addSubRegion(chamber, coating, leftSide)
rightSidePt = epu.Math2.plus(origin, [-0.5*(width+sideSlabWidth), 0.0, thickness+height])
rightSide = nm.MultiPlaneShape.createBlock(sideSlabDims, rightSidePt, 0.0, 0.0, 0.0)
monte.addSubRegion(chamber, coating, rightSide)
fbSlabDims = [width, sideSlabWidth, thickness]
frontSidePt = epu.Math2.plus(origin, [0.0, 0.5*(width+sideSlabWidth), thickness+height])
frontSide = nm.MultiPlaneShape.createBlock(fbSlabDims, frontSidePt, 0.0, 0.0, 0.0)
monte.addSubRegion(chamber, coating, frontSide)
backSidePt = epu.Math2.plus(origin, [0.0, -0.5*(width+sideSlabWidth), thickness+height])
backSide = nm.MultiPlaneShape.createBlock(fbSlabDims, backSidePt, 0.0, 0.0, 0.0)
monte.addSubRegion(chamber, coating, backSide)
# no substrate - don't want film under block...
# monte.addSubRegion(chamber, coating, nm.MultiPlaneShape.createFilm([0.0, 0.0, -1.0], epu.Math2.plus(origin, [0.0, 0.0, height + thickness]), thickness))
monte.addSubRegion(chamber, subMat, nm.MultiPlaneShape.createSubstrate([0.0, 0.0, -1.0], epu.Math2.plus(origin, [0.0, 0.0, height + 2*thickness])))
s1 = u"MC simulation of a [%0.2f,%0.2f,%0.2f] micron block of %s%s" % (width * 1.0e6, width * 1.0e6, height * 1.0e6, mat, (" on %s" % substrate if substrate else ""))
s2 = u" coated with %0.2f microns of %s at %0.1f keV%s%s" % (thickness* 1.0e6, coating, e0, (" + CSF" if sf else ""), (" + BSF" if bf else ""))
tmp = s1 + s2
# tmp = u"MC simulation of a [%0.2f,%0.2f,%0.2f] micron block of %s%s coated with %0.2f microns of %s at %0.1f keV%s%s" % (width * 1.0e6, width * 1.0e6, height * 1.0e6, mat, (" on %s" % substrate if substrate else ""), coating, e0, (" + CSF" if sf else ""), (" + BSF" if bf else ""))
params = {"Substrate": substrate, "Width" : width, "Height" : height, "Material" : mat, "Coating" : coating, "Thickness" : thickness}
return mc3.base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildBlock, params, xtraParams)
def coatedSphereOnFilm(mat, radius, coating, cThick, film, fThick, det, e0=20.0, withPoisson=True, nTraj=100, dose=100, sf=True, bf=True, xtraParams={}):
"""coatedSphereOnFilm(mat, radius, coating, cThick, film, fThick, det, e0=20.0, withPoisson=True, nTraj=100, dose=100, sf=True, bf=True, xtraParams={})
Monte Carlo simulate a spectrum from a spherical particle of the specified material (mat) and radius (in m). \
on a film of material film and thickness fThick immediately \
below the particle."""
if radius < 0.0:
raise "The sphere radius must be larger than zero."
if cThick < 0.0:
raise "The coating thickness must be larger than zero."
if fThick < 0.0:
raise "The film thickness must be larger than zero."
def buildSphere(monte, chamber, origin, buildParams):
mat = buildParams["Material"]
radius = buildParams["Radius"]
coating = buildParams["Coating"]
cThick = buildParams["Coating Thickness"]
film = buildParams["Film"]
fThick = buildParams["Film Thickness"]
coatSphere = nm.Sphere(epu.Math2.plus(origin, [0.0, 0.0, radius + cThick]), radius + cThick)
srC = monte.addSubRegion(chamber, coating, coatSphere)
sphere = nm.Sphere(epu.Math2.plus(origin, [0.0, 0.0, radius + cThick]), radius)
monte.addSubRegion(srC, mat, sphere)
monte.addSubRegion(chamber, film, nm.MultiPlaneShape.createFilm([0.0, 0.0, -1.0], epu.Math2.plus(origin, [0.0, 0.0, 2.0 * radius]), fThick))
tmp = u"MC simulation of a %0.3f micron sphere of %s coated with %0.3f microns of %s on %0.3f microns of %s at %0.1f keV%s%s" % (radius * 1.0e6, mat, cThick * 1.0e6, coating, fThick* 1.0e6, film, e0, (" + CSF" if sf else ""), (" + BSF" if bf else ""))
return mc3.base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildSphere, {"Film": film, "Radius" : radius, "Material" : mat, "Coating" : coating, "Coating Thickness" : cThick, "Film Thickness" : fThick}, xtraParams)
def biLayerLineOnSubstrate(matTopL, matBotL, matSub, htTopL, htBotL, width, length, det, title, e0=20.0, withPoisson=True, nTraj=100, dose=120.0, sf=True, bf=True, xtraParams={}, bVerbose=False):
"""biLayerLineOnSubstrate(matTopL, matBotL, matSub, htTopL, htBotL,
width, length, det, title, e0=20.0, withPoisson=True,
nTraj=100, dose=120.0, sf=True,
bf=True, xtraParams={}, bVerbose=False)
Monte Carlo simulate a spectrum from a bilayer line on a substrate of
the specified materials (matTopL, matBotL, matSub) and layer heights
( htTopL, htBotL; z in m ) and width and length (x and y in m)."""
def buildBlock(monte, chamber, origin, buildParams):
matSub = buildParams["Mat Subs"]
matTopL = buildParams["Mat Top"]
matBotL = buildParams["Mat Bot"]
htTopL = buildParams["Hei Top"]
htBotL = buildParams["Hei Bot"]
width = buildParams["Width"]
length = buildParams["Length"]
monte.addSubRegion(chamber, matTopL, nm.MultiPlaneShape.createBlock([width, length, htTopL], epu.Math2.plus(origin, [0.0, 0.0, 0.5*htTopL]), 0.0, 0.0, 0.0))
monte.addSubRegion(chamber, matBotL, nm.MultiPlaneShape.createBlock([width, length, htBotL], epu.Math2.plus(origin, [0.0, 0.0, 0.5*htBotL+htTopL]), 0.0, 0.0, 0.0))
monte.addSubRegion(chamber, matSub, nm.MultiPlaneShape.createSubstrate([0.0, 0.0, -1.0], epu.Math2.plus(origin, [0.0, 0.0, htTopL+htBotL])))
tmp = u"biLayerLineOnSub-%s" % title
params = {"Mat Top" : matTopL, "Mat Bot" : matBotL, "Mat Subs": matSub, "Hei Top" : htTopL, "Hei Bot" : htBotL, "Width" : width, "Length" : length}
if (bVerbose==120.0):
print(params)
return mc3.base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildBlock, params, xtraParams)
def triLayerLineOnSubstrate(matTopL, matMidL, matBotL, matSub, htTopL, htMidL, htBotL, width, length, det, title, e0=20.0, withPoisson=True, nTraj=100, dose=120.0, sf=True, bf=True, xtraParams={}, bVerbose=False):
"""triLayerLineOnSubstrate(matTopL, matMidL, matBotL, matSub, htTopL,
htMidL, htBotL, width, length, det, title, e0=20.0, withPoisson=True,
nTraj=100, dose=120.0, sf=True,
bf=True, xtraParams={})
Monte Carlo simulate a spectrum from a bilayer line on a substrate of
the specified materials (matTopL, matMid, matBotL, matSub) and layer
heights ( htTopL, htMidL, htBotL; z in m ) and width and length
(x and y in m)."""
def buildBlock(monte, chamber, origin, buildParams):
matSub = buildParams["Mat Subs"]
matTopL = buildParams["Mat Top"]
matMidL = buildParams["Mat Mid"]
matBotL = buildParams["Mat Bot"]
htTopL = buildParams["Hei Top"]
htMidL = buildParams["Hei Mid"]
htBotL = buildParams["Hei Bot"]
width = buildParams["Width"]
length = buildParams["Length"]
monte.addSubRegion(chamber, matTopL, nm.MultiPlaneShape.createBlock([width, length, htTopL], epu.Math2.plus(origin, [0.0, 0.0, 0.5*htTopL]), 0.0, 0.0, 0.0))
monte.addSubRegion(chamber, matMidL, nm.MultiPlaneShape.createBlock([width, length, htMidL], epu.Math2.plus(origin, [0.0, 0.0, 0.5*htMidL+htTopL]), 0.0, 0.0, 0.0))
monte.addSubRegion(chamber, matBotL, nm.MultiPlaneShape.createBlock([width, length, htBotL], epu.Math2.plus(origin, [0.0, 0.0, 0.5*htBotL+htTopL+htMidL]), 0.0, 0.0, 0.0))
monte.addSubRegion(chamber, matSub, nm.MultiPlaneShape.createSubstrate([0.0, 0.0, -1.0], epu.Math2.plus(origin, [0.0, 0.0, htTopL+htMidL+htBotL])))
tmp = u"triLayerLineOnSub-%s" % title
params = {"Mat Top" : matTopL, "Mat Mid" : matMidL, "Mat Bot" : matBotL, "Mat Subs": matSub, "Hei Top" : htTopL, "Hei Mid" : htMidL, "Hei Bot" : htBotL, "Width" : width, "Length" : length}
if (bVerbose==120.0):
print(params)
return mc3.base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildBlock, params, xtraParams)
def simulateBulkStandard(mat, name, det, e0, lt, pc, withPoisson=True, nTraj=100, sf=True, bf=True, xtraParams={}):
"""simulateBulkStandard(mat, name, det, e0, lt, pc, withPoisson=True,
nTraj=100, sf=True, bf=True, xtraParams={})"""
std = mc3.simulate(mat, det, e0, lt*pc, withPoisson=True, nTraj=nTraj, sf=True, bf=True, xtraParams={})
props=std.getProperties()
props.setNumericProperty(epq.SpectrumProperties.LiveTime, lt)
props.setNumericProperty(epq.SpectrumProperties.FaradayBegin, pc)
props.setNumericProperty(epq.SpectrumProperties.FaradayEnd, pc)
props.setNumericProperty(epq.SpectrumProperties.BeamEnergy, e0)
std.setAsStandard(mat)
return(std)
def simLineInMatrix(lin, linMat, blk, blkMat, nmLinWid, umBlock, nPts, trs, outDir, hdr, det, e0, lt, pc, withPoisson=True, nTraj=100, sf=True, bf=True, iDigits=5, bVerbose=False, xtraParams={}):
"""simLineInMatrix(lin, linMat, blk, blkMat, nmLinWid, umBlock, nPts,
trs, outDir, hdr, det, e0, lt, pc, withPoisson=True, nTraj=nTraj,
sf=True, bf=True, iDigits=5, bVerbose=False, xtraParams={})
Simulate a line of width `nmLinWid' nm at the center of a block of
`umBlock' microns. The line is of material `lin' with a name `linMat'.
The block is of material `blk' with a name `blkMat'.
We analyze an list `trs' of transitions, writing the K-ratios to a
.csv file with a header `hdr'. We use the detector `det', voltage `e0'
(kV) and live time `lt' sec and probe current `pc' nA. This will
compute the standard spectra, compute the spectra for nPts+1 from
-nPts/2 ... 0 ...nPts/2 times the block size. It will then compute the
K-ratios for each spectrum and write them to a file `name' in outDir
with a header `hdr' that matches the transition order.
"""
# order is order of trs..
sc = 1.0e-6 # scale from microns to meters for positions
lX = [] # an array for postions
lKlin = [] # an array for the K-ratio of the line
lKblk = [] # an array for the K-ratio of the block. Title correspond to hdr string
# start clean
dt2.DataManager.clearSpectrumList()
# create the standards
linStd = simulateBulkStandard(lin, linMat, det, e0, lt, pc, withPoisson=True, nTraj=nTraj, sf=True, bf=True, xtraParams={})
dt2.display(linStd)
blkStd = simulateBulkStandard(blk, blkMat, det, e0, lt, pc, withPoisson=True, nTraj=nTraj, sf=True, bf=True, xtraParams={})
dt2.display(blkStd)
lStd = {"El":dt2.element(linMat), "Spc":linStd}
bStd = {"El":dt2.element(blkMat), "Spc":blkStd}
stds = [lStd, bStd] # note: put the transitions in this order
iCount = 0
for x in range(-nPts/2, (nPts/2)+1, 1):
xv = sc*x*umBlock/nPts
lX.append(round(x*umBlock/nPts, iDigits))
monte=nm.MonteCarloSS()
monte.setBeamEnergy(epq.ToSI.keV(e0))
# use a 1 nm probe
beam=nm.GaussianBeam(1.0e-9)
monte.setElectronGun(beam)
beam.setCenter([xv, 0.0,-0.05])
# createBlock(double[] dims, double[] point, double phi, double theta, double psi)
# createBlock - Create a block of:
# dimensions specified in dims,
# centered at point,
# then rotated by the euler angles phi, theta, psi.
block = nm.MultiPlaneShape.createBlock([umBlock*1.0e-6, umBlock*1.0e-6, umBlock*1.0e-6],[0.0,0.0, 0.5*umBlock*1.0e-6],0.0,0.0,0.0)
matrix = monte.addSubRegion(monte.getChamber(), blk, block)
monte.addSubRegion(matrix, lin, nm.MultiPlaneShape.createBlock([1.0e-9*nmLinWid, umBlock*1.0e-6, umBlock*1.0e-6],[0.0, 0.0, 0.5*umBlock*1.0e-6],0.0,0.0,0.0))
det.reset()
# Add event listeners to model characteristic radiation
chXR = nm3.CharacteristicXRayGeneration3.create(monte)
xrel = nm3.XRayTransport3.create(monte, det, chXR)
brXR = nm3.BremsstrahlungXRayGeneration3.create(monte)
brem = nm3.XRayTransport3.create(monte, det, brXR)
fxg3 = nm3.FluorescenceXRayGeneration3.create(monte, chXR)
chSF = nm3.XRayTransport3.create(monte, det, fxg3)
brSF = nm3.XRayTransport3.create(monte, det, nm3.FluorescenceXRayGeneration3.create(monte, brXR))
# here is where we run the simulation
monte.runMultipleTrajectories(nTraj)
spec = det.getSpectrum((lt*pc*1.0e-9) / (nTraj * epq.PhysicalConstants.ElectronCharge))
props = spec.getProperties()
props.setNumericProperty(epq.SpectrumProperties.LiveTime, lt)
props.setNumericProperty(epq.SpectrumProperties.FaradayBegin, pc)
props.setNumericProperty(epq.SpectrumProperties.FaradayEnd, pc)
props.setNumericProperty(epq.SpectrumProperties.BeamEnergy, e0)
spcName = "x = %.3f um" % x
epq.SpectrumUtils.rename(spec, spcName)
spec = epq.SpectrumUtils.addNoiseToSpectrum(spec, 1.0)
# display(spec)
a = jmg.compKRs(spec, stds, trs, det, e0)
iCount += 1
print(iCount)
lKlin.append(round(a[0], iDigits))
lKblk.append(round(a[1], iDigits))
basFile ="%gnm-%s-in-%gum-%s-%gkV-%g-Traj.csv" % (nmLinWid, linMat, umBlock, blkMat, e0, nTraj)
strOutFile = outDir + "/" + basFile
f=open(strOutFile, 'w')
strLine = hdr + '\n'
f.write(strLine)
for i in range(iCount):
strLine = "%.5f" % lX[i] + ","
strLine = strLine + "%.5f" % lKlin[i] + ","
strLine = strLine + "%.5f" % lKblk[i] + "\n"
f.write(strLine)
f.close()
def simLineInMatrix3(lin, linMat, blk, blkMat, nmLinWid, umBlock, nPts, trs, outDir, hdr, det, e0, lt, pc, withPoisson=True, nTraj=100, sf=True, bf=True, iDigits=5, bVerbose=False, xtraParams={}):
"""simLineInMatrix3(lin, linMat, blk, blkMat, nmLinWid, umBlock, nPts,
trs, outDir, hdr, det, e0, lt, pc, withPoisson=True, nTraj=nTraj,
sf=True, bf=True, iDigits=5, bVerbose=False, xtraParams={})
Simulate a line of width `nmLinWid' nm at the center of a block of
`umBlock' microns. The line is of material `lin' with a name `linMat'.
The block is of material `blk' with a name `blkMat'.
We analyze an list `trs' of transitions, writing the K-ratios to a
.csv file with a header `hdr'. We use the detector `det', voltage `e0'
(kV) and live time `lt' sec and probe current `pc' nA. This will
compute the standard spectra, compute the spectra for nPts+1 from
-nPts/2 ... 0 ...nPts/2 times the block size. It will then compute the
K-ratios for each spectrum and write them to a file `name' in outDir
with a header `hdr' that matches the transition order.
"""
# order is order of trs..
sc = 1.0e-6 # scale from microns to meters for positions
dose = lt*pc
lX = [] # an array for postions
lKlin = [] # an array for the K-ratio of the line
lKblk = [] # an array for the K-ratio of the block. Title correspond to hdr string
umLine = nmLinWid * 1.0e-3
# start clean
dt2.DataManager.clearSpectrumList()
# create the standards
linStd = simulateBulkStandard(lin, linMat, det, e0, lt, pc, withPoisson=withPoisson, nTraj=nTraj, sf=sf, bf=bf, xtraParams={})
dt2.display(linStd)
blkStd = simulateBulkStandard(blk, blkMat, det, e0, lt, pc, withPoisson=withPoisson, nTraj=nTraj, sf=sf, bf=sf, xtraParams={})
dt2.display(blkStd)
lStd = {"El":dt2.element(linMat), "Spc":linStd}
bStd = {"El":dt2.element(blkMat), "Spc":blkStd}
stds = [lStd, bStd] # note: put the transitions in this order
iCount = 0
for x in range(-nPts/2, (nPts/2)+1, 1):
xv = sc*x*umBlock/nPts
lX.append(round(x*umBlock/nPts, iDigits))
xtraParams={}
xtraParams.update(mc3.configureXRayAccumulators(trs, charAccum=sf, charFluorAccum=sf, bremFluorAccum=bf))
xtraParams.update(mc3.configureOutput(outDir))
xtraParams.update(mc3.configureBeam(xv, 0, -0.099, 1.0))
spec = mc3.embeddedRectangle(lin, [umLine*sc, umBlock*sc, umBlock*sc], blk, 0, det, e0, withPoisson=withPoisson, nTraj=nTraj, dose=dose, sf=sf, bf=bf, xtraParams=xtraParams)
props = spec.getProperties()
props.setNumericProperty(epq.SpectrumProperties.LiveTime, lt)
props.setNumericProperty(epq.SpectrumProperties.FaradayBegin, pc)
props.setNumericProperty(epq.SpectrumProperties.FaradayEnd, pc)
props.setNumericProperty(epq.SpectrumProperties.BeamEnergy, e0)
spcName = "x = %.3f um" % x
epq.SpectrumUtils.rename(spec, spcName)
spec = epq.SpectrumUtils.addNoiseToSpectrum(spec, 1.0)
# display(spec)
a = jmg.compKRs(spec, stds, trs, det, e0)
iCount += 1
print(iCount)
lKlin.append(round(a[0], iDigits))
lKblk.append(round(a[1], iDigits))
basFile ="%gnm-%s-in-%gum-%s-%gkV-%g-Traj.csv" % (nmLinWid, linMat, umBlock, blkMat, e0, nTraj)
strOutFile = outDir + "/" + basFile
f=open(strOutFile, 'w')
strLine = hdr + '\n'
f.write(strLine)
for i in range(iCount):
strLine = "%.5f" % lX[i] + ","
strLine = strLine + "%.5f" % lKlin[i] + ","
strLine = strLine + "%.5f" % lKblk[i] + "\n"
f.write(strLine)
f.close()
def simLineInMatrixLimScan(lin, linMat, blk, blkMat, nmLinWid, umBlock,nmScan, nPts, trs, outDir, hdr, det, e0, lt, pc, withPoisson=True, nTraj=100, sf=True, bf=True, iDigits=5, bVerbose=False, xtraParams={}):
"""simLineInMatrixLimScan(lin, linMat, blk, blkMat, nmLinWid, umBlock,
nmScan, nPts, trs, outDir, hdr, det, e0, lt, pc, withPoisson=True,
nTraj=nTraj, sf=True, bf=True, iDigits=5, bVerbose=False,
xtraParams={})
Simulate a line of width `nmLinWid' nm at the center of a block of
`umBlock' microns. The line is of material `lin' with a name `linMat'.
The block is of material `blk' with a name `blkMat'. We step a total
distance of nmScan across the center of the line.
We analyze an list `trs' of transitions, writing the K-ratios to a
.csv file with a header `hdr'. We use the detector `det', voltage `e0'
(kV) and live time `lt' sec and probe current `pc' nA. This will
compute the standard spectra, compute the spectra the scanned
region. It will then compute the K-ratios for each spectrum and write
them to a file `name' in outDir with a header `hdr' that matches the
transition order.
"""
# order is order of trs..
sc = 1.0e-6 # scale from microns to meters for positions
dose = lt*pc
lX = [] # an array for postions
lKlin = [] # an array for the K-ratio of the line
lKblk = [] # an array for the K-ratio of the block. Title correspond to hdr string
umLine = nmLinWid * 1.0e-3
# start clean
dt2.DataManager.clearSpectrumList()
# create the standards
linStd = simulateBulkStandard(lin, linMat, det, e0, lt, pc, withPoisson=withPoisson, nTraj=nTraj, sf=sf, bf=bf, xtraParams={})
dt2.display(linStd)
blkStd = simulateBulkStandard(blk, blkMat, det, e0, lt, pc, withPoisson=withPoisson, nTraj=nTraj, sf=sf, bf=sf, xtraParams={})
dt2.display(blkStd)
lStd = {"El":dt2.element(linMat), "Spc":linStd}
bStd = {"El":dt2.element(blkMat), "Spc":blkStd}
stds = [lStd, bStd] # note: put the transitions in this order
iCount = 0
for x in range(-nPts/2, (nPts/2)+1, 1):
xPosNm = x * nmScan / nPts
lX.append(round(xPosNm, iDigits))
xtraParams={}
xtraParams.update(mc3.configureXRayAccumulators(trs, charAccum=sf, charFluorAccum=sf, bremFluorAccum=bf))
xtraParams.update(mc3.configureOutput(outDir))
xtraParams.update(mc3.configureBeam(xPosNm*1.0e-09, 0, -0.099, 1.0))
spec = mc3.embeddedRectangle(lin, [umLine*sc, umBlock*sc, umBlock*sc], blk, 0, det, e0, withPoisson=withPoisson, nTraj=nTraj, dose=dose, sf=sf, bf=bf, xtraParams=xtraParams)
props = spec.getProperties()
props.setNumericProperty(epq.SpectrumProperties.LiveTime, lt)
props.setNumericProperty(epq.SpectrumProperties.FaradayBegin, pc)
props.setNumericProperty(epq.SpectrumProperties.FaradayEnd, pc)
props.setNumericProperty(epq.SpectrumProperties.BeamEnergy, e0)
spcName = "x = %.3f um" % x
epq.SpectrumUtils.rename(spec, spcName)
spec = epq.SpectrumUtils.addNoiseToSpectrum(spec, 1.0)
# display(spec)
a = jmg.compKRs(spec, stds, trs, det, e0)
iCount += 1
print(iCount, xPosNm)
lKlin.append(round(a[0], iDigits))
lKblk.append(round(a[1], iDigits))
basFile ="%gnm-%s-in-%gum-%s-%gkV-%g-Traj.csv" % (nmLinWid, linMat, umBlock, blkMat, e0, nTraj)
strOutFile = outDir + "/" + basFile
f=open(strOutFile, 'w')
strLine = hdr + '\n'
f.write(strLine)
for i in range(iCount):
strLine = "%.3f" % lX[i] + ","
strLine = strLine + "%.5f" % lKlin[i] + ","
strLine = strLine + "%.5f" % lKblk[i] + "\n"
f.write(strLine)
f.close()
def lineInMatrix(lin, blk, nmLinWid, umBlock, det, e0=20.0, withPoisson=True, nTraj=100, dose=120.0, sf=True, bf=True, xtraParams={}):
"""lineInMatrix(lin, blk, nmLinWid, umBlock, det,
e0=20.0, withPoisson=True, nTraj=100, dose=120.0,
sf=True, bf=True,
xtraParams={}"""
def buildBlock(monte, chamber, origin, buildParams):
lin = buildParams["Line"]
blk = buildParams["Block"]
nmLinWid = buildParams["Width"]
umBlock = buildParams["Size"]
sc = 1.0e-6 # scale from microns to meters for positions
# createBlock(double[] dims, double[] point, double phi, double theta, double psi)
# createBlock - Create a block of:
# dimensions specified in dims,
# centered at point,
# then rotated by the euler angles phi, theta, psi.
block = nm.MultiPlaneShape.createBlock([umBlock*sc, umBlock*sc, umBlock*sc],[0.0,0.0, 0.5*umBlock*sc],0.0,0.0,0.0)
matrix = monte.addSubRegion(monte.getChamber(), blk, block)
monte.addSubRegion(matrix, lin, nm.MultiPlaneShape.createBlock([1.0e-9*nmLinWid, umBlock*sc, umBlock*sc],[0.0, 0.0, 0.5*umBlock*sc],0.0,0.0,0.0))
tmp = u"MC3-sim-%g-nm-%s-line-in-%g-um-%s-block-%0.1f-kV" % (nmLinWid, lin, umBlock, blk, e0)
params = { "Line": lin, "Width" : nmLinWid, "Block" : blk, "Size" : umBlock }
return (mc3.base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildBlock, params, xtraParams))
| 48.15805
| 287
| 0.648069
|
17346ae17b624cebe10b617a49e3e56ee067e916
| 758
|
py
|
Python
|
korean/__main__.py
|
sublee/korean
|
99c7808300bcd90d9fb183c99b925f525df0fd79
|
[
"BSD-3-Clause"
] | 74
|
2015-01-24T16:39:51.000Z
|
2022-03-22T03:16:28.000Z
|
korean/__main__.py
|
lqez/korean
|
7e626d4b9dec181420e80c06bfce1a99fb098180
|
[
"BSD-3-Clause"
] | 6
|
2016-08-03T09:50:08.000Z
|
2021-02-13T16:24:14.000Z
|
korean/__main__.py
|
lqez/korean
|
7e626d4b9dec181420e80c06bfce1a99fb098180
|
[
"BSD-3-Clause"
] | 13
|
2015-03-10T18:52:44.000Z
|
2018-11-10T14:52:06.000Z
|
# -*- coding: utf-8 -*-
"""
korean.__main__
~~~~~~~~~~~~~~~
Command-line tools.
:copyright: (c) 2012-2013 by Heungsub Lee
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import contextlib
import sys
from baker import Baker
from . import l10n
baker = Baker()
@contextlib.contextmanager
def file_or_stdin(path):
f = open(path) if path is not None else sys.stdin
yield f
f.close()
@baker.command
def proofread(path=None, charset='utf-8'):
with file_or_stdin(path) as f:
for line in f.xreadlines():
print l10n.proofread(line.decode(charset)),
@baker.command
def validate(path=None, charset='utf-8'):
pass
if __name__ == '__main__':
baker.run()
| 17.227273
| 55
| 0.653034
|
484ddb0bd4ca3ac1a4b3813351829a9b131c6392
| 5,042
|
py
|
Python
|
google/cloud/compute_v1/services/zones/transports/base.py
|
igor-solomatov/python-compute
|
16e7294cd536af9a8bf8e4e99219a883339aa955
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/compute_v1/services/zones/transports/base.py
|
igor-solomatov/python-compute
|
16e7294cd536af9a8bf8e4e99219a883339aa955
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/compute_v1/services/zones/transports/base.py
|
igor-solomatov/python-compute
|
16e7294cd536af9a8bf8e4e99219a883339aa955
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ZonesTransport(abc.ABC):
"""Abstract transport class for Zones."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file, scopes=scopes, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(
scopes=scopes, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.list: gapic_v1.method.wrap_method(
self.list, default_timeout=None, client_info=client_info,
),
}
@property
def get(
self,
) -> typing.Callable[
[compute.GetZoneRequest],
typing.Union[compute.Zone, typing.Awaitable[compute.Zone]],
]:
raise NotImplementedError()
@property
def list(
self,
) -> typing.Callable[
[compute.ListZonesRequest],
typing.Union[compute.ZoneList, typing.Awaitable[compute.ZoneList]],
]:
raise NotImplementedError()
__all__ = ("ZonesTransport",)
| 36.273381
| 86
| 0.653114
|
d6cd0357b3aa6c8d47c6abac2d99d3ea2afbdd62
| 37,251
|
py
|
Python
|
dcm/route.py
|
joshy/dcm
|
7ee44b93f2d3c3f3638244791da9fdf9c331a9bb
|
[
"MIT"
] | 11
|
2021-05-07T08:37:56.000Z
|
2022-03-23T17:05:08.000Z
|
dcm/route.py
|
joshy/dcm
|
7ee44b93f2d3c3f3638244791da9fdf9c331a9bb
|
[
"MIT"
] | 4
|
2021-08-05T02:18:09.000Z
|
2022-03-17T00:24:13.000Z
|
dcm/route.py
|
joshy/dcm
|
7ee44b93f2d3c3f3638244791da9fdf9c331a9bb
|
[
"MIT"
] | 2
|
2021-08-04T06:33:42.000Z
|
2022-01-12T12:09:22.000Z
|
"""Define static/dynamic routes for copying DICOM data between storage abstractions"""
from __future__ import annotations
import asyncio, logging
from copy import copy, deepcopy
from datetime import datetime
from dataclasses import dataclass, field
from typing import (
Optional,
Tuple,
Callable,
Dict,
List,
Union,
Iterable,
AsyncIterator,
AsyncContextManager,
Any,
cast,
)
from contextlib import asynccontextmanager
import janus
from pydicom import Dataset
from .lazyset import LazySet, FrozenLazySet
from .query import (
QueryLevel,
QueryResult,
DataNode,
InconsistentDataError,
get_uid,
minimal_copy,
)
from .filt import Filter, DataTransform, get_transform, Selector
from .report import (
CountableReport,
MultiListReport,
MultiDictReport,
MultiKeyedError,
ProgressHookBase,
)
from .util import DuplicateDataError, TomlConfigurable
from .net import DicomOpReport, IncomingDataError, IncomingErrorType
from .store import DataBucket, DataRepo, TransferMethod, LocalWriteReport
log = logging.getLogger(__name__)
class NoValidTransferMethodError(Exception):
"""Error raised when we are unable to select a valid transfer method"""
def __init__(
self,
src_dest_pair: Optional[
Tuple[DataBucket[Any, Any], DataBucket[Any, Any]]
] = None,
):
self.src_dest_pair = src_dest_pair
def __str__(self) -> str:
if self.src_dest_pair is None:
return "No valid transfer method for one or more routes"
else:
return f"No valid transfer method between {self.src_dest_pair[0]} and {self.src_dest_pair[1]}"
# TODO: Have been working under the assumption the filter would be applied
# before resolving dynamic routes, but it is more likely and common
# that we would want to route on the original data, since we may have
# a rather broad filter (i.e. anonymization) that screws up the elements
# used for routing.
#
# Any logic that would go into a pre-filter could just be placed in the
# dynamic routing function. We might just need to duplicate that logic
# into a filter if we also want to persist the changes which is an okay
# trade-off compared to the complexity of allowing both pre/post filters
#
# We do lose the ability to specify which elements might be
# modified, how they might be modified, and what their dependencies are.
# Do we implicitly disallow uninvertible shenanigans in the dynamic routing
# function?
@dataclass(frozen=True)
class Route:
"""Abstract base class for all Routes
The main functionality of routes is to map datasets to destinations.
Routes can have a filter associated with them, which take a dataset as
input and return one as output. The dataset can be modified and None can be
returned to reject the dataset.
"""
filt: Optional[Filter] = None
"""Streaming data filter for editing and rejecting data sets"""
def get_dests(
self, data_set: Dataset
) -> Optional[Tuple[DataBucket[Any, Any], ...]]:
"""Return the destintations for the `data set`
Must be implemented by all subclasses."""
raise NotImplementedError
def get_filtered(self, data_set: Dataset) -> Optional[Dataset]:
if self.filt is None:
return data_set
return self.filt(data_set)
@dataclass(frozen=True)
class _StaticBase:
dests: Tuple[DataBucket[Any, Any], ...]
"""Static tuple of destinations"""
methods: Tuple[TransferMethod, ...] = (TransferMethod.PROXY,)
"""The transfer methods to use, in order of preference
This will automatically be paired down to the methods supported by all the
dests (or just allow PROXY if we have a filter). If no valid transfer
methods are given a `NoValidTransferMethodError` will be raised.
"""
@dataclass(frozen=True)
class StaticRoute(Route, _StaticBase, TomlConfigurable["StaticRoute"]):
"""Static route that sends all (unfiltered) data to same dests"""
def __post_init__(self) -> None:
if self.filt is not None:
if TransferMethod.PROXY not in self.methods:
raise NoValidTransferMethodError()
avail_methods = [TransferMethod.PROXY]
else:
avail_methods = []
for meth in self.methods:
if all(meth in d._supported_methods for d in self.dests):
avail_methods.append(meth)
if len(avail_methods) == 0:
raise NoValidTransferMethodError()
object.__setattr__(self, "dests", tuple(self.dests))
object.__setattr__(self, "methods", tuple(avail_methods))
@classmethod
def from_toml_dict(cls, toml_dict: Dict[str, Any]) -> StaticRoute:
kwargs = deepcopy(toml_dict)
methods = kwargs.get("methods")
if methods is not None:
kwargs["methods"] = tuple(TransferMethod[m.upper()] for m in methods)
return cls(**kwargs)
def get_dests(self, data_set: Dataset) -> Tuple[DataBucket[Any, Any], ...]:
return self.dests
def get_method(self, src: DataBucket[Any, Any]) -> TransferMethod:
for method in self.methods:
if method in src._supported_methods:
return method
raise NoValidTransferMethodError()
def __str__(self) -> str:
return "Static: %s" % ",".join(str(d) for d in self.dests)
@dataclass(frozen=True)
class _DynamicBase:
lookup: Callable[[Dataset], Optional[Tuple[DataBucket[Any, Any], ...]]]
"""Callable takes a dataset and returns destinations"""
route_level: QueryLevel = QueryLevel.STUDY
"""The level in the DICOM hierarchy we are making routing decisions at"""
required_elems: FrozenLazySet[str] = field(default_factory=FrozenLazySet)
"""DICOM elements that we require to make a routing decision"""
dest_methods: Optional[
Dict[Optional[DataBucket[Any, Any]], Tuple[TransferMethod, ...]]
] = None
"""Specify transfer methods for (some) dests
Use `None` as the key to specify the default transfer methods for all dests
not explicitly listed.
Only respected when pre-routing is used. Dynamic routing can only proxy.
"""
@dataclass(frozen=True)
class DynamicRoute(Route, _DynamicBase):
"""Dynamic route which determines destinations based on the data.
Routing decisions are made before applying the filter to the data.
"""
def __post_init__(self) -> None:
if self.dest_methods is not None:
avail_meths: Dict[
Optional[DataBucket[Any, Any]], Tuple[TransferMethod, ...]
] = {}
for dest, methods in self.dest_methods.items():
if self.filt is not None:
if TransferMethod.PROXY not in methods:
raise NoValidTransferMethodError()
avail_meths[dest] = (TransferMethod.PROXY,)
elif dest is None:
avail_meths[dest] = methods
else:
meths = tuple(m for m in methods if m in dest._supported_methods)
if len(meths) == 0:
raise NoValidTransferMethodError()
avail_meths[dest] = meths
object.__setattr__(self, "dest_methods", avail_meths)
if self.route_level not in QueryLevel:
raise ValueError("Invalid route_level: %s" % self.route_level)
if not isinstance(self.required_elems, FrozenLazySet):
object.__setattr__(
self, "required_elems", FrozenLazySet(self.required_elems)
)
def get_dests(
self, data_set: Dataset
) -> Optional[Tuple[DataBucket[Any, Any], ...]]:
dests = self.lookup(data_set)
if dests is None:
return None
return tuple(dests)
def get_static_routes(self, data_set: Dataset) -> Optional[Tuple[StaticRoute, ...]]:
"""Resolve this dynamic route into one or more static routes"""
dests = self.lookup(data_set)
if dests is None:
return dests
dests = tuple(dests)
if self.dest_methods is not None:
meths_dests_map: Dict[
Tuple[TransferMethod, ...], List[DataBucket[Any, Any]]
] = {}
default_methods = self.dest_methods.get(None)
if default_methods is None:
default_methods = (TransferMethod.PROXY,)
for dest in dests:
d_methods = self.dest_methods.get(dest)
if d_methods is None:
d_methods = default_methods
if d_methods not in meths_dests_map:
meths_dests_map[d_methods] = []
meths_dests_map[d_methods].append(dest)
return tuple(
StaticRoute(tuple(sub_dests), filt=deepcopy(self.filt), methods=meths)
for meths, sub_dests in meths_dests_map.items()
)
else:
return (StaticRoute(dests, filt=deepcopy(self.filt)),)
def __str__(self) -> str:
return "Dynamic on: %s" % self.required_elems
@dataclass(frozen=True)
class SelectorDestMap(TomlConfigurable["SelectorDestMap"]):
"""Allow construction of dynamic routes from static config"""
routing_map: Tuple[Tuple[Selector, Tuple[DataBucket[Any, Any], ...]], ...]
"""One or more tuples of (selector, dests) pairs"""
default_dests: Optional[Tuple[DataBucket[Any, Any], ...]] = None
"""The default destinations to use when no selectors match"""
exclude: Optional[Tuple[Selector, ...]] = None
"""Exclude data at routing step (versus `filt` which is applied to each image)"""
stop_on_first: bool = True
"""Just return dests associated with first selector that matches"""
route_level: QueryLevel = QueryLevel.STUDY
"""The level in the DICOM hierarchy we are making routing decisions at"""
dest_methods: Optional[
Dict[Optional[DataBucket[Any, Any]], Tuple[TransferMethod, ...]]
] = None
"""Specify transfer methods for (some) dests
Use `None` as the key to specify the default transfer methods for all dests
not explicitly listed.
Only respected when pre-routing is used. Dynamic routing can only proxy.
"""
required_elems: FrozenLazySet[str] = field(
default_factory=FrozenLazySet, init=False
)
"""DICOM elements that we require to make a routing decision"""
filt: Optional[Filter] = None
"""Steaming data filter for editing and rejecting data sets"""
def __post_init__(self) -> None:
req_elems: LazySet[str] = LazySet()
for sel, _ in self.routing_map:
req_elems |= sel.get_read_elems()
if self.exclude:
for sel in self.exclude:
req_elems |= sel.get_read_elems()
object.__setattr__(self, "required_elems", FrozenLazySet(req_elems))
@classmethod
def from_toml_dict(cls, toml_dict: Dict[str, Any]) -> SelectorDestMap:
kwargs = deepcopy(toml_dict)
route_level = kwargs.get("route_level")
if route_level is not None:
kwargs["route_level"] = QueryLevel[route_level.upper()]
return cls(**kwargs)
def get_dynamic_route(self) -> DynamicRoute:
"""Return equivalent DynamicRoute object"""
def lookup_func(ds: Dataset) -> Optional[Tuple[DataBucket[Any, Any], ...]]:
res: List[DataBucket[Any, Any]] = []
if self.exclude:
if any(sel.test_ds(ds) for sel in self.exclude):
return None
for sel, dests in self.routing_map:
if sel.test_ds(ds):
if self.stop_on_first:
return dests
else:
res += dests
if not res:
return self.default_dests
return tuple(res)
return DynamicRoute(
lookup_func,
route_level=self.route_level,
required_elems=self.required_elems,
dest_methods=self.dest_methods,
filt=self.filt,
)
class ProxyTransferError(Exception):
def __init__(
self,
store_errors: Optional[MultiKeyedError] = None,
inconsistent: Optional[Dict[StaticRoute, List[Tuple[Dataset, Dataset]]]] = None,
duplicate: Optional[Dict[StaticRoute, List[Tuple[Dataset, Dataset]]]] = None,
incoming_error: Optional[IncomingDataError] = None,
):
self.store_errors = store_errors
self.inconsistent = inconsistent
self.duplicate = duplicate
self.incoming_error = incoming_error
def __str__(self) -> str:
res = ["ProxyTransferError:"]
if self.inconsistent is not None:
res.append("%d inconsistent data sets" % len(self.inconsistent))
if self.duplicate is not None:
res.append("%d duplicate data sets" % len(self.duplicate))
if self.store_errors is not None:
for err in self.store_errors.errors:
res.append(str(err))
if self.incoming_error is not None:
res.append(str(self.incoming_error))
return "\n\t".join(res)
# TODO: Some annoying overlap with IncomingDataReport here, but not clear we
# can do much about it since we need a RetrieveReport when the src is
# remote, and we need the `sent` dict here to track data transforms.
#
# Can we make sure the same (minimized) data set is used in all report
# structures? Does that alleviate all concerns about duplication?
# TODO: Update keep_errors handling here. I guess the `add` method should
# return a bool like with the IncomingDataReports? Also, this means that
# we might end up sending erroneous data, which can't be caputred in the
# DataTransforms under `sent` here. I guess this is okay and mimics what
# happens in a RetrieveReport
#
class ProxyReport(CountableReport):
"""Abstract base class for reports on proxy transfers"""
def __init__(
self,
description: Optional[str] = None,
meta_data: Optional[Dict[str, Any]] = None,
depth: int = 0,
prog_hook: Optional[ProgressHookBase[Any]] = None,
n_expected: Optional[int] = None,
keep_errors: Union[bool, Tuple[IncomingErrorType, ...]] = False,
):
self.keep_errors = keep_errors # type: ignore
self.sent: Dict[StaticRoute, DataTransform] = {}
self.inconsistent: Dict[StaticRoute, List[Tuple[Dataset, Dataset]]] = {}
self.duplicate: Dict[StaticRoute, List[Tuple[Dataset, Dataset]]] = {}
self._n_success = 0
super().__init__(description, meta_data, depth, prog_hook, n_expected)
@property
def keep_errors(self) -> Tuple[IncomingErrorType, ...]:
"""Whether or not we are forwarding inconsistent/duplicate data"""
return self._keep_errors
@keep_errors.setter
def keep_errors(self, val: Union[bool, Tuple[IncomingErrorType, ...]]) -> None:
if val == True:
self._keep_errors = tuple(IncomingErrorType)
elif val == False:
self._keep_errors = tuple()
else:
val = cast(Tuple[IncomingErrorType, ...], val)
self._keep_errors = val
@property
def n_success(self) -> int:
return self._n_success
@property
def n_errors(self) -> int:
n_errors = 0
if not self.keep_errors:
n_errors += self.n_inconsistent + self.n_duplicate
return n_errors
@property
def n_warnings(self) -> int:
n_warn = 0
if self.keep_errors:
n_warn += self.n_inconsistent + self.n_duplicate
return n_warn
@property
def n_sent(self) -> int:
"""Number of times datasets were sent out"""
res = sum(len(trans.new) * len(sr.dests) for sr, trans in self.sent.items())
if self.keep_errors:
res += sum(len(x) * len(sr.dests) for sr, x in self.inconsistent.items())
res += sum(len(x) * len(sr.dests) for sr, x in self.duplicate.items())
return res
@property
def n_inconsistent(self) -> int:
return sum(len(x) for _, x in self.inconsistent.items())
@property
def n_duplicate(self) -> int:
return sum(len(x) for _, x in self.duplicate.items())
@property
def n_reported(self) -> int:
"""Number store results that have been reported so far"""
raise NotImplementedError
@property
def all_reported(self) -> bool:
"""True if all sent data sets have a reported result"""
assert self.n_reported <= self.n_sent
return self.n_sent == self.n_reported
def add(self, route: StaticRoute, old_ds: Dataset, new_ds: Dataset) -> bool:
"""Add the route with pre/post filtering dataset to the report"""
self.count_input()
if route not in self.sent:
self.sent[route] = get_transform(QueryResult(QueryLevel.IMAGE), route.filt)
try:
self.sent[route].add(old_ds, new_ds)
except InconsistentDataError:
if route not in self.inconsistent:
self.inconsistent[route] = []
self.inconsistent[route].append((old_ds, new_ds))
return IncomingErrorType.INCONSISTENT in self._keep_errors
except DuplicateDataError:
if route not in self.duplicate:
self.duplicate[route] = []
self.duplicate[route].append((old_ds, new_ds))
return IncomingErrorType.DUPLICATE in self._keep_errors
else:
self._n_success += 1
return True
def log_issues(self) -> None:
"""Produce log messages for any warning/error statuses"""
n_inconsist = self.n_inconsistent
if n_inconsist:
if self.keep_errors:
log.warning("Sent %d inconsistent data sets" % n_inconsist)
else:
log.error("Skipped %d inconsistent data sets" % n_inconsist)
n_duplicate = self.n_duplicate
if n_duplicate:
if self.keep_errors:
log.warning("Sent %d duplicate data sets" % n_duplicate)
else:
log.error("Skipped %d duplicate data sets" % n_duplicate)
def check_errors(self) -> None:
"""Raise an exception if any errors have occured so far"""
if self.n_errors:
inconsist = None
if self.inconsistent:
inconsist = self.inconsistent
dupes = None
if self.duplicate:
dupes = self.duplicate
raise ProxyTransferError(inconsistent=inconsist, duplicate=dupes)
def clear(self) -> None:
self.sent.clear()
self.inconsistent.clear()
self.duplicate.clear()
StoreReportType = Union[DicomOpReport, LocalWriteReport]
class DynamicTransferReport(ProxyReport):
"""Track what data is being routed where and any store results"""
def __init__(
self,
description: Optional[str] = None,
meta_data: Optional[Dict[str, Any]] = None,
depth: int = 0,
prog_hook: Optional[ProgressHookBase[Any]] = None,
n_expected: Optional[int] = None,
keep_errors: Union[bool, Tuple[IncomingErrorType, ...]] = False,
):
self.store_reports: MultiDictReport[
DataBucket[Any, Any], MultiListReport[StoreReportType]
] = MultiDictReport(prog_hook=prog_hook)
super().__init__(
description, meta_data, depth, prog_hook, n_expected, keep_errors
)
@property
def n_success(self) -> int:
return super().n_success + self.store_reports.n_success
@property
def n_errors(self) -> int:
return super().n_errors + self.store_reports.n_errors
@property
def n_warnings(self) -> int:
return super().n_warnings + self.store_reports.n_warnings
@property
def n_reported(self) -> int:
return self.store_reports.n_input
def add_store_report(
self, dest: DataBucket[Any, Any], store_report: StoreReportType
) -> None:
"""Add a DicomOpReport to keep track of"""
if dest not in self.store_reports:
self.store_reports[dest] = MultiListReport(prog_hook=self._prog_hook)
self.store_reports[dest].append(store_report)
def log_issues(self) -> None:
"""Produce log messages for any warning/error statuses"""
super().log_issues()
self.store_reports.log_issues()
def check_errors(self) -> None:
"""Raise an exception if any errors have occured so far"""
if self.n_errors:
err = None
try:
super().check_errors()
except ProxyTransferError as e:
err = e
else:
err = ProxyTransferError()
try:
self.store_reports.check_errors()
except MultiKeyedError as e:
err.store_errors = e
raise err
def clear(self) -> None:
"""Clear current info about data sets we have results for"""
# TODO: If n_sent != n_reported here we will go out of sync. I guess
# this would need to be managed at a higher level if it is
# needed. Not clear if it makes sense to do anything about it
# here.
super().clear()
self.store_reports.clear()
@dataclass
class _CacheEntry:
"""Entry in a SendAssociationCache"""
ctx_mgr: AsyncContextManager["janus._AsyncQueueProxy[Dataset]"]
send_q: "janus._AsyncQueueProxy[Dataset]"
op_report: DicomOpReport
last_use: datetime
# TODO: Make generic association caching in `net` module supporting
# query/move/send. Could then use that everywhere, and use it to
# manage max association limits on any node.
class SendAssociationCache:
def __init__(self, timeout: float = 30.0):
"""Keeps cache of recent associations"""
self._timeout = timeout
self._cache: Dict[DataBucket[Any, Any], _CacheEntry] = {}
@property
def next_timeout(self) -> float:
"""Number of seconds until the next cache entry will timeout"""
next_timeout = self._timeout
now = datetime.now()
for cache_entry in self._cache.values():
td = now - cache_entry.last_use
timeout = max(self._timeout - td.total_seconds(), 0)
if timeout < next_timeout:
next_timeout = timeout
return next_timeout
async def send(
self, ds: Dataset, dest: DataBucket[Any, Any]
) -> Optional[DicomOpReport]:
"""Send a data set to dests, utilizing the cache of active associations"""
res = None
cache_entry = self._cache.get(dest, None)
if cache_entry is None:
op_report = dest.get_empty_send_report()
res = op_report
ctx_mgr = dest.send(op_report)
send_q = await ctx_mgr.__aenter__()
cache_entry = _CacheEntry(ctx_mgr, send_q, op_report, datetime.now())
self._cache[dest] = cache_entry
else:
cache_entry.last_use = datetime.now()
send_q = cache_entry.send_q
await send_q.put(ds)
return res
async def update_cache(self) -> Dict[DataBucket[Any, Any], DicomOpReport]:
"""Close associations that haven't been used in a while
Returns reports for all closed associations.
"""
curr_time = datetime.now()
reports = {}
for dest, cache_entry in self._cache.items():
age = curr_time - cache_entry.last_use
if age.total_seconds() > self._timeout:
await cache_entry.ctx_mgr.__aexit__(None, None, None)
reports[dest] = cache_entry.op_report
for dest in reports:
del self._cache[dest]
return reports
async def empty_cache(self) -> Dict[DataBucket[Any, Any], DicomOpReport]:
"""Close all associations
Returns dict of dest/op_report for all closed associations.
"""
reports = {}
for dest, cache_entry in self._cache.items():
await cache_entry.ctx_mgr.__aexit__(None, None, None)
reports[dest] = cache_entry.op_report
self._cache.clear()
return reports
class InsufficientElemsError(Exception):
"""We don't have the required DICOM elements for the operation"""
class Router:
"""Work with multiple dynamic/static routes"""
def __init__(self, routes: Iterable[Route], assoc_cache_time: int = 20):
self._routes = tuple(routes)
self._assoc_cache_time = assoc_cache_time
self._static: List[StaticRoute] = []
self._dynamic: List[DynamicRoute] = []
self._route_level = QueryLevel.PATIENT
req_elems: LazySet[str] = LazySet()
self._all_proxy = True
for route in routes:
if isinstance(route, DynamicRoute):
self._dynamic.append(route)
req_elems |= route.required_elems
self._route_level = max(self._route_level, route.route_level)
if route.dest_methods is not None:
for methods in route.dest_methods.values():
if TransferMethod.PROXY not in methods:
self._all_proxy = False
elif isinstance(route, StaticRoute):
self._static.append(route)
if TransferMethod.PROXY not in route.methods:
self._all_proxy = False
else:
raise ValueError("Unrecognized route type")
self._required_elems = FrozenLazySet(req_elems)
if len(self._dynamic) == 0:
self._route_level = QueryLevel.STUDY
elif not self.can_pre_route and not self.can_dyn_route:
raise NoValidTransferMethodError()
@property
def required_elems(self) -> FrozenLazySet[str]:
"""All required DICOM elements for making routing decisions"""
return self._required_elems
@property
def has_dynamic_routes(self) -> bool:
return len(self._dynamic) != 0
@property
def can_pre_route(self) -> bool:
return self._route_level != QueryLevel.IMAGE
@property
def can_dyn_route(self) -> bool:
return self._all_proxy
def get_filter_dest_map(
self, ds: Dataset
) -> Dict[Optional[Filter], Tuple[DataBucket[Any, Any], ...]]:
"""Get dict mapping filters to lists of destinations"""
selected: Dict[Optional[Filter], List[DataBucket[Any, Any]]] = {}
for route in self._routes:
dests = route.get_dests(ds)
if not dests:
continue
filt = route.filt
if filt not in selected:
selected[filt] = list(dests)
else:
selected[filt] += dests
return {k: tuple(v) for k, v in selected.items()}
async def pre_route(
self,
src: DataRepo[Any, Any, Any, Any],
query: Optional[Dataset] = None,
query_res: QueryResult = None,
) -> Dict[Tuple[StaticRoute, ...], QueryResult]:
"""Pre-calculate any dynamic routing for data on `src`
If DICOM elements needed for routing decisions can't be queried for, we
will retrieve an example data set for that study.
Parameters
----------
src
The data source
query
A query that defines the data to route
query_res
A QueryResult that defines the data to route
Returns
-------
result : dict
Maps tuples of StaticRoute objects to QueryResults defining all of
the data that should be sent to those routes.
"""
route_level = self._route_level
if route_level == QueryLevel.IMAGE:
raise ValueError("Can't pre-route at IMAGE level")
# Try to get required DICOM elements by doing a query if needed
query, query_res = await self._fill_qr(src, query, query_res)
# Nothing to do...
if len(self._dynamic) == 0:
return {tuple(self._static): query_res}
log.info("Trying to resolve dynamic routes with queries")
# Iteratively try to extract example data sets with all the elements
# needed for routing from our QueryResult, while also performing higher
# level-of-detail queries as needed. In the end the missing_qr will
# specify a single image for each chunk of data we don't have an
# example data set for
example_data: Dict[str, Dataset] = {}
missing_qr = query_res
while True:
new_missing_qr = QueryResult(level=missing_qr.level)
for pth, sub_uids in missing_qr.walk():
if pth.level < route_level:
continue
if pth.level != missing_qr.level:
# We only want to visit one sub-element
# TODO: Allow user defined sorting here?
del sub_uids[1:]
continue
lvl_uid = pth.uids[-1]
ds = deepcopy(missing_qr[lvl_uid])
for k in self.required_elems:
if k not in ds:
new_missing_qr.add(ds)
break
else:
route_uid = pth.uids[route_level]
assert route_uid not in example_data
example_data[route_uid] = ds
missing_qr = new_missing_qr
if len(missing_qr) == 0 or missing_qr.level == QueryLevel.IMAGE:
break
missing_qr = await src.query(
QueryLevel(missing_qr.level + 1), query, missing_qr
)
# For any studies where we don't have example data, fetch some
if len(missing_qr) != 0:
log.info("Fetching example data to resolve dynamic routes")
async for ds in src.retrieve(missing_qr):
route_uid = get_uid(route_level, ds)
assert route_uid not in example_data
example_data[route_uid] = ds
assert len(example_data) == query_res.get_count(route_level)
# Resolve all dynamic routes into data specific static routes
res: Dict[Tuple[StaticRoute, ...], QueryResult] = {}
for route_uid, ds in example_data.items():
sub_routes = copy(self._static)
for route in self._dynamic:
static_routes = route.get_static_routes(ds)
if static_routes:
sub_routes.extend(static_routes)
if sub_routes:
sub_routes_tup = tuple(sub_routes)
if sub_routes_tup not in res:
res[sub_routes_tup] = QueryResult(query_res.level)
sub_qr = query_res.sub_query(DataNode(route_level, route_uid))
res[sub_routes_tup] |= sub_qr
else:
log.info("Skipping chunk at routing stage: %s", route_uid)
# TODO: Track this in report
log.info("All dynamic routes have been resolved")
return res
@asynccontextmanager
async def route(
self,
keep_errors: Union[bool, Tuple[IncomingErrorType, ...]] = False,
report: Optional[DynamicTransferReport] = None,
) -> AsyncIterator["asyncio.Queue[Dataset]"]:
"""Produces queue where datasets can be put for dynamic routing
Parameters
----------
keep_errors
Set to true to send all data, even if it is inconsistent/duplicate
report
Pass a DynamicTransferReport in to be filled out on the fly
Provides insight into what data is being routed where
"""
if not self.can_dyn_route:
raise NoValidTransferMethodError()
data_q: "asyncio.Queue[Optional[Dataset]]" = asyncio.Queue()
route_task = asyncio.create_task(self._route(data_q, keep_errors, report))
try:
yield data_q # type: ignore
finally:
if not route_task.done():
await data_q.put(None)
await route_task
async def _route(
self,
data_q: "asyncio.Queue[Optional[Dataset]]",
keep_errors: Union[bool, Tuple[IncomingErrorType, ...]],
report: Optional[DynamicTransferReport],
) -> None:
if report is None:
extern_report = False
report = DynamicTransferReport()
else:
extern_report = True
report.keep_errors = keep_errors # type: ignore
assoc_cache = SendAssociationCache(self._assoc_cache_time)
try:
n_pushed = 0
while True:
try:
ds = await asyncio.wait_for(
data_q.get(), min(assoc_cache.next_timeout, 5.0)
)
except asyncio.TimeoutError:
await assoc_cache.update_cache()
continue
# TODO: Do we want this? Or should we just use task canceling?
# What happens if a user pushes None accidentally? Just
# use a different sentinel value?
if ds is None:
break
filter_dest_map = self.get_filter_dest_map(ds)
n_filt = len([f for f in filter_dest_map if f is not None])
# Only make copy of the data set if needed
if n_filt > 1:
orig_ds = deepcopy(ds)
else:
orig_ds = ds
min_old_ds = minimal_copy(ds)
for filt, dests in filter_dest_map.items():
static_route = StaticRoute(dests, filt=filt)
# Update report
if filt is not None:
filt_ds = filt(orig_ds)
if filt_ds is not None:
min_new_ds = minimal_copy(filt_ds)
else:
filt_ds = orig_ds
min_new_ds = min_old_ds
if filt_ds is None:
continue
if not report.add(static_route, min_old_ds, min_new_ds):
continue
# Initiate the transfers
coros = [assoc_cache.send(filt_ds, dest) for dest in dests]
log.debug("Router forwarding data set to %d dests" % len(dests))
op_reports = await asyncio.gather(*coros)
for op_report, dest in zip(op_reports, dests):
if op_report is not None:
report.add_store_report(dest, op_report)
n_pushed += 1
# Periodically check to avoid association timeouts under high
# traffic
if n_pushed % 100 == 0:
await assoc_cache.update_cache()
finally:
await assoc_cache.empty_cache()
report.done = True
if not extern_report:
report.log_issues()
report.check_errors()
async def _fill_qr(
self,
src: DataRepo[Any, Any, Any, Any],
query: Optional[Dataset],
query_res: Optional[QueryResult],
) -> Tuple[Dataset, QueryResult]:
"""Perform a query against the src if needed"""
if query is None:
query = Dataset()
req_elems = self.required_elems
if query_res is None:
level = self._route_level
else:
level = query_res.level
if level < self._route_level:
level = self._route_level
elif not req_elems:
# Nothing we need to query for
return (query, query_res)
elif req_elems.is_enumerable():
if query_res.prov.queried_elems is not None and all(
e in query_res.prov.queried_elems for e in req_elems
):
# All required elems were already queried for
return (query, query_res)
# Check if all required elems already exist
# TODO: Iterating every data set seems wasteful...
needs_query = False
for ds in query_res:
for elem in req_elems:
if elem not in ds:
log.debug("Router needs to query due to missing elements")
needs_query = True
break
if not needs_query:
return (query, query_res)
if req_elems.is_enumerable():
for e in req_elems:
setattr(query, e, "")
log.info("The Router is perfoming an intial query against the source: %s", src)
return (query, await src.query(level, query, query_res))
| 37.818274
| 106
| 0.603259
|
dbad52d0aff2477d9d512f8f9e1a028a2159b08f
| 517
|
py
|
Python
|
project/urls.py
|
aschokking/angular_django_template
|
50e92155a0e6c3e9926ee65d41bf8cbc0a7cc4cf
|
[
"MIT"
] | null | null | null |
project/urls.py
|
aschokking/angular_django_template
|
50e92155a0e6c3e9926ee65d41bf8cbc0a7cc4cf
|
[
"MIT"
] | null | null | null |
project/urls.py
|
aschokking/angular_django_template
|
50e92155a0e6c3e9926ee65d41bf8cbc0a7cc4cf
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers, serializers, viewsets
from django.views.generic import TemplateView
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="base.html"), name='app'),
url(r'^admin/', include(admin.site.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
| 34.466667
| 83
| 0.746615
|
f993b64db1cfa67346a49dbe23b818504ab2b326
| 13,817
|
py
|
Python
|
specutils/tests/test_template_comparison.py
|
olebole/specutils
|
566de5cec00bd1198f1275ce74b1e261b61813af
|
[
"BSD-3-Clause"
] | null | null | null |
specutils/tests/test_template_comparison.py
|
olebole/specutils
|
566de5cec00bd1198f1275ce74b1e261b61813af
|
[
"BSD-3-Clause"
] | null | null | null |
specutils/tests/test_template_comparison.py
|
olebole/specutils
|
566de5cec00bd1198f1275ce74b1e261b61813af
|
[
"BSD-3-Clause"
] | null | null | null |
import astropy.units as u
import numpy as np
from astropy.nddata import StdDevUncertainty
from ..spectra.spectrum1d import Spectrum1D
from ..spectra.spectrum_collection import SpectrumCollection
from ..analysis import template_comparison
from astropy.tests.helper import quantity_allclose
def test_template_match_no_overlap():
"""
Test template_match when both observed and template spectra have no overlap on the wavelength axis.
"""
# Seed np.random so that results are consistent
np.random.seed(42)
# Create test spectra
spec_axis = np.linspace(0, 50, 50) * u.AA
spec_axis_no_overlap = np.linspace(51, 102, 50) * u.AA
spec = Spectrum1D(spectral_axis=spec_axis,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
spec1 = Spectrum1D(spectral_axis=spec_axis_no_overlap,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Get result from template_match
tm_result = template_comparison.template_match(spec, spec1)
# Create new spectrum for comparison
spec_result = Spectrum1D(spectral_axis=spec_axis,
flux=spec1.flux * template_comparison._normalize_for_template_matching(spec, spec1))
# assert quantity_allclose(tm_result[0].flux, spec_result.flux, atol=0.01*u.Jy)
assert np.isnan(tm_result[1])
def test_template_match_minimal_overlap():
"""
Test template_match when both observed and template spectra have minimal overlap on the wavelength axis.
"""
# Seed np.random so that results are consistent
np.random.seed(42)
# Create test spectra
spec_axis = np.linspace(0, 50, 50) * u.AA
spec_axis_min_overlap = np.linspace(50, 100, 50) * u.AA
spec_axis[49] = 51.0 * u.AA
spec_axis_min_overlap[0] = 51.0 * u.AA
spec = Spectrum1D(spectral_axis=spec_axis,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
spec1 = Spectrum1D(spectral_axis=spec_axis_min_overlap,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Get result from template_match
tm_result = template_comparison.template_match(spec, spec1)
# Create new spectrum for comparison
spec_result = Spectrum1D(spectral_axis=spec_axis,
flux=spec1.flux * template_comparison._normalize_for_template_matching(spec, spec1))
# assert quantity_allclose(tm_result[0].flux, spec_result.flux, atol=0.01*u.Jy)
# TODO: investigate why the all elements in tm_result[1] are NaN even with overlap
assert np.isnan(tm_result[1])
def test_template_match_spectrum():
"""
Test template_match when both observed and template spectra have the same wavelength axis.
"""
# Seed np.random so that results are consistent
np.random.seed(42)
# Create test spectra
spec_axis = np.linspace(0, 50, 50) * u.AA
spec = Spectrum1D(spectral_axis=spec_axis,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
spec1 = Spectrum1D(spectral_axis=spec_axis,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Get result from template_match
tm_result = template_comparison.template_match(spec, spec1)
# Create new spectrum for comparison
spec_result = Spectrum1D(spectral_axis=spec_axis,
flux=spec1.flux * template_comparison._normalize_for_template_matching(spec, spec1))
assert quantity_allclose(tm_result[0].flux, spec_result.flux, atol=0.01*u.Jy)
assert tm_result[1] == 40093.28353756253
def test_template_match_with_resample():
"""
Test template_match when both observed and template spectra have different wavelength axis using resampling.
"""
np.random.seed(42)
# Create test spectra
spec_axis1 = np.linspace(0, 50, 50) * u.AA
spec_axis2 = np.linspace(0, 50, 50) * u.AA
spec = Spectrum1D(spectral_axis=spec_axis1,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
spec1 = Spectrum1D(spectral_axis=spec_axis2,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Get result from template_match
tm_result = template_comparison.template_match(spec, spec1)
# Create new spectrum for comparison
spec_result = Spectrum1D(spectral_axis=spec_axis1,
flux=spec1.flux * template_comparison._normalize_for_template_matching(spec, spec1))
assert quantity_allclose(tm_result[0].flux, spec_result.flux, atol=0.01*u.Jy)
np.testing.assert_almost_equal(tm_result[1], 40093.28353756253)
def test_template_match_list():
"""
Test template_match when template spectra are in a list.
"""
np.random.seed(42)
# Create test spectra
spec_axis1 = np.linspace(0, 50, 50) * u.AA
spec_axis2 = np.linspace(0, 50, 50) * u.AA
spec = Spectrum1D(spectral_axis=spec_axis1,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
spec1 = Spectrum1D(spectral_axis=spec_axis2,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
spec2 = Spectrum1D(spectral_axis=spec_axis2,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Combine spectra into list
template_list = [spec1, spec2]
# Get result from template_match
tm_result = template_comparison.template_match(spec, template_list)
np.testing.assert_almost_equal(tm_result[1], 40093.28353756253)
# make sure that multiple template spectra will create a list of
# chi2 values, one per template.
assert len(tm_result) == 4
def test_template_match_spectrum_collection():
"""
Test template_match when template spectra are in a SpectrumCollection object.
"""
np.random.seed(42)
# Create test spectra
spec_axis1 = np.linspace(0, 50, 50) * u.AA
spec_axis2 = np.linspace(0, 50, 50) * u.AA
spec = Spectrum1D(spectral_axis=spec_axis1,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50)))
spec1 = Spectrum1D(spectral_axis=spec_axis2,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50)))
spec2 = Spectrum1D(spectral_axis=spec_axis2,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50)))
# Combine spectra into SpectrumCollection object
spec_coll = SpectrumCollection.from_spectra([spec1, spec2])
# Get result from template_match
tm_result = template_comparison.template_match(spec, spec_coll)
np.testing.assert_almost_equal(tm_result[1], 40093.28353756253)
def test_template_match_multidim_spectrum():
"""
Test template matching with a multi-dimensional Spectrum1D object.
"""
np.random.seed(42)
# Create test spectra
spec_axis1 = np.linspace(0, 50, 50) * u.AA
spec_axis2 = np.linspace(0, 50, 50) * u.AA
spec = Spectrum1D(spectral_axis=spec_axis1,
flux=np.random.sample(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50)))
multidim_spec = Spectrum1D(spectral_axis=spec_axis2,
flux=np.random.sample((2, 50)) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample((2, 50))))
# Get result from template_match
tm_result = template_comparison.template_match(spec, multidim_spec)
np.testing.assert_almost_equal(tm_result[1], 250.26870401777543)
def test_template_unknown_redshift():
"""
Test template redshift when redshift is unknown.
"""
# Seed np.random so that results are consistent
np.random.seed(42)
# Create test spectra
spec_axis = np.linspace(0, 50, 50) * u.AA
perm_flux = np.random.randn(50) * u.Jy
redshift = 2.5
# Observed spectrum
spec = Spectrum1D(spectral_axis=spec_axis * (1+redshift),
flux=perm_flux,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Template spectrum
spec1 = Spectrum1D(spectral_axis=spec_axis,
flux=perm_flux,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Test redshift parameters
min_redshift = .5
max_redshift = 5.5
delta_redshift = .25
redshift_trial_values = np.arange(min_redshift, max_redshift, delta_redshift)
tr_result = template_comparison.template_redshift(observed_spectrum=spec, template_spectrum=spec1,
redshift=redshift_trial_values)
assert len(tr_result) == 3
assert tr_result[0] == 2.5
def test_template_redshift_with_one_template_spectrum_in_match():
# Seed np.random so that results are consistent
np.random.seed(42)
# Create test spectra
spec_axis = np.linspace(0, 50, 50) * u.AA
perm_flux = np.random.randn(50) * u.Jy
# Test redshift
redshift = 3
# Observed spectrum
spec = Spectrum1D(spectral_axis=spec_axis * (1+redshift),
flux=perm_flux,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Template spectrum
spec1 = Spectrum1D(spectral_axis=spec_axis,
flux=perm_flux,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Test redshift parameters
min_redshift = .5
max_redshift = 5.5
delta_redshift = .25
redshift_trial_values = np.arange(min_redshift, max_redshift+delta_redshift, delta_redshift)
tm_result = template_comparison.template_match(observed_spectrum=spec, spectral_templates=spec1,
resample_method="flux_conserving",
redshift=redshift_trial_values)
assert len(tm_result) == 4
np.testing.assert_almost_equal(tm_result[1], 73484.0053895151)
def test_template_redshift_with_multiple_template_spectra_in_match():
# Seed np.random so that results are consistent
np.random.seed(42)
# Create test spectra
spec_axis = np.linspace(0, 50, 50) * u.AA
perm_flux = np.random.randn(50) * u.Jy
# Test redshift
redshift = 3
# Observed spectrum
spec = Spectrum1D(spectral_axis=spec_axis * (1+redshift),
flux=perm_flux,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Template spectrum
spec1 = Spectrum1D(spectral_axis=spec_axis,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50)))
spec2 = Spectrum1D(spectral_axis=spec_axis,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50)))
# Combine spectra into SpectrumCollection object
spec_coll = SpectrumCollection.from_spectra([spec1, spec2])
# Test redshift parameters
min_redshift = .5
max_redshift = 5.5
delta_redshift = .25
redshift_trial_values = np.arange(min_redshift, max_redshift+delta_redshift, delta_redshift)
tm_result = template_comparison.template_match(observed_spectrum=spec, spectral_templates=spec_coll,
resample_method="flux_conserving",
redshift=redshift_trial_values)
assert len(tm_result) == 4
np.testing.assert_almost_equal(tm_result[1], 6803.922741644725)
# When a spectrum collection is matched with a redshift
# grid, a list-of-lists is returned with the trial chi2
# values computed for every combination redshift-template.
# The external list spans the templates in the collection,
# while each internal list contains all chi2 values
# for a given template.
assert len(tm_result[3]) == 2
def test_template_known_redshift():
"""
Test template match when the redshift is known.
"""
# Seed np.random so that results are consistent
np.random.seed(42)
# Create test spectra
spec_axis = np.linspace(0, 50, 50) * u.AA
perm_flux = np.random.randn(50) * u.Jy
redshift = 3
# Observed spectrum
spec = Spectrum1D(spectral_axis=spec_axis * (1+redshift),
flux=perm_flux,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Template spectrum
spec1 = Spectrum1D(spectral_axis=spec_axis,
flux=perm_flux,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
tm_result = template_comparison.template_match(observed_spectrum=spec, spectral_templates=spec1,
resample_method="flux_conserving",
redshift=redshift)
assert len(tm_result) == 4
np.testing.assert_almost_equal(tm_result[1], 1.9062409482056814e-31)
| 38.168508
| 113
| 0.650141
|
269f596da5d47509857efb6af229c1dc34c2ec22
| 2,853
|
py
|
Python
|
tests/test_ast_api.py
|
Ma233/olo
|
54eb3bd4e1330a0467159f9c968557d471537621
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ast_api.py
|
Ma233/olo
|
54eb3bd4e1330a0467159f9c968557d471537621
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ast_api.py
|
Ma233/olo
|
54eb3bd4e1330a0467159f9c968557d471537621
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from olo import select, select_, funcs
from .base import TestCase, Foo, Bar
class TestASTAPI(TestCase):
def is_same_q(self, q1, q2):
sql1, _ = q1.get_sql_and_params()
sql2, _ = q2.get_sql_and_params()
self.assertEqual(sql1, sql2)
def test_no_condition(self):
q1 = select(f for f in Foo)
q2 = Foo.query
self.is_same_q(q1, q2)
def test_entities(self):
q1 = select(f.age for f in Foo if f.id > 0)
q2 = Foo.query('age').filter(Foo.id > 0)
q3 = Foo.query(Foo.age).filter(Foo.id > 0)
self.is_same_q(q1, q2)
self.is_same_q(q1, q3)
q1 = select((f.age, f, f.id) for f in Foo if f.id > 0)
q2 = Foo.query('age', Foo, 'id').filter(Foo.id > 0)
q3 = Foo.query(Foo.age, Foo, Foo.id).filter(Foo.id > 0)
self.is_same_q(q1, q2)
self.is_same_q(q1, q3)
def test_multi_entities(self):
q1 = select((f, f.id, f.age) for f in Foo if f.id > 0)
q2 = Foo.query(Foo, Foo.id, Foo.age).filter(Foo.id > 0)
self.is_same_q(q1, q2)
def test_condition(self):
q1 = select(f for f in Foo if f.id == 1)
q2 = Foo.query.filter(id=1)
self.is_same_q(q1, q2)
def test_complex_condition(self):
q1 = select(
f for f in Foo
if f.id == 1 and f.age in [1, 2] or f.name == 'a'
)
q2 = Foo.query.filter(
(Foo.id == 1) & (Foo.age.in_([1, 2])) | (
Foo.name == 'a'
)
)
self.is_same_q(q1, q2)
def test_join(self):
q1 = select(
f for f in Foo
for b in Bar
if f.id == b.age and f.age in [1, 2] or b.name == 'a'
)
q2 = Foo.query.join(Bar).filter(
(Foo.id == Bar.age) & (Foo.age.in_([1, 2])) | (
Bar.name == 'a'
)
)
self.is_same_q(q1, q2)
def test_funcs(self):
q1 = select(
f for f in Foo
if f.id < max(f.id for f in Foo)
)
q2 = Foo.query.filter(
Foo.id < Foo.query(funcs.MAX(Foo.id))
)
self.is_same_q(q1, q2)
q1 = select_(
f for f in Foo
if f.id < max(f.id for f in Foo)
)
q2 = Foo.query.filter(
Foo.id < Foo.query(funcs.MAX(Foo.id))
)
self.is_same_q(q1, q2)
q1 = select(
f.id.distinct().count() for f in Foo
if f.id < 1 and f.name == 'foo'
)
q2 = select(
funcs.COUNT(funcs.DISTINCT(f.id)) for f in Foo
if f.id < 1 and f.name == 'foo'
)
q3 = Foo.query(funcs.COUNT(funcs.DISTINCT(Foo.id))).filter(
(Foo.id < 1) & (Foo.name == 'foo')
)
self.is_same_q(q1, q2)
self.is_same_q(q2, q3)
| 29.71875
| 67
| 0.48966
|
e699baf9ca2c13d83d4a7f030715f2de0a1a7fab
| 1,297
|
py
|
Python
|
Class Work/Testing-Homework/books_app/main/forms.py
|
Pondorasti/BEW-1.2
|
079d771f9c5e27a3b7871a689b3431bdab33af5e
|
[
"MIT"
] | null | null | null |
Class Work/Testing-Homework/books_app/main/forms.py
|
Pondorasti/BEW-1.2
|
079d771f9c5e27a3b7871a689b3431bdab33af5e
|
[
"MIT"
] | null | null | null |
Class Work/Testing-Homework/books_app/main/forms.py
|
Pondorasti/BEW-1.2
|
079d771f9c5e27a3b7871a689b3431bdab33af5e
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, DateField, SelectField, SubmitField, TextAreaField
from wtforms.ext.sqlalchemy.fields import QuerySelectField, QuerySelectMultipleField
from wtforms.validators import DataRequired, Length, ValidationError
from books_app.models import Audience, Book, Author, Genre, User
class BookForm(FlaskForm):
"""Form to create a book."""
title = StringField('Book Title',
validators=[DataRequired(), Length(min=3, max=80)])
publish_date = DateField('Date Published')
author = QuerySelectField('Author',
query_factory=lambda: Author.query, allow_blank=False)
audience = SelectField('Audience', choices=Audience.choices())
genres = QuerySelectMultipleField('Genres',
query_factory=lambda: Genre.query)
submit = SubmitField('Submit')
class AuthorForm(FlaskForm):
"""Form to create an author."""
name = StringField('Author Name',
validators=[DataRequired(), Length(min=3, max=80)])
biography = TextAreaField('Author Biography')
submit = SubmitField('Submit')
class GenreForm(FlaskForm):
"""Form to create a genre."""
name = StringField('Genre Name',
validators=[DataRequired(), Length(min=3, max=80)])
submit = SubmitField('Submit')
| 40.53125
| 98
| 0.723207
|
56689751ffd7455cb9a846e9ed85ea9750eda0d6
| 940
|
py
|
Python
|
API/PDPAPI/migrations/0002_auto_20181001_2019.py
|
almahdiy/IT_PDP_Conference
|
3470a1492899af3ebdd43837e361c247236180d1
|
[
"MIT"
] | null | null | null |
API/PDPAPI/migrations/0002_auto_20181001_2019.py
|
almahdiy/IT_PDP_Conference
|
3470a1492899af3ebdd43837e361c247236180d1
|
[
"MIT"
] | null | null | null |
API/PDPAPI/migrations/0002_auto_20181001_2019.py
|
almahdiy/IT_PDP_Conference
|
3470a1492899af3ebdd43837e361c247236180d1
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.1 on 2018-10-01 17:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('PDPAPI', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='question_text',
),
migrations.AddField(
model_name='question',
name='body',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='question',
name='isFiltered',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='question',
name='title',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='question',
name='votes',
field=models.IntegerField(default=0),
),
]
| 24.736842
| 52
| 0.531915
|
8af4ff1ab77c7f2e8ed4ef3ca98c4210758ae56b
| 2,830
|
py
|
Python
|
demo/settings.py
|
acdha/django-queryset-transform
|
33eafc124b74ee83e19211e2860ac2010a447977
|
[
"BSD-3-Clause"
] | 18
|
2015-02-09T09:22:36.000Z
|
2021-05-26T07:06:10.000Z
|
demo/settings.py
|
acdha/django-queryset-transform
|
33eafc124b74ee83e19211e2860ac2010a447977
|
[
"BSD-3-Clause"
] | null | null | null |
demo/settings.py
|
acdha/django-queryset-transform
|
33eafc124b74ee83e19211e2860ac2010a447977
|
[
"BSD-3-Clause"
] | 2
|
2015-06-02T13:41:08.000Z
|
2017-07-27T12:12:21.000Z
|
# Django settings for demo project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'data.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'hea-c%x=u^&2bypgp81_+tfmxkt3l-ni-3$(yml%d=!@9&+u2x'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'demo.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'demo_models',
)
| 34.512195
| 108
| 0.718021
|
007d21cbaf732e81d432a7f7f04f17cae065c9f2
| 4,562
|
py
|
Python
|
program_driver.py
|
comlor/Senior_Design_Movie_Project
|
f04b3d98c8d74c9399361dcc4b5f50ca7ac191c9
|
[
"Unlicense"
] | null | null | null |
program_driver.py
|
comlor/Senior_Design_Movie_Project
|
f04b3d98c8d74c9399361dcc4b5f50ca7ac191c9
|
[
"Unlicense"
] | null | null | null |
program_driver.py
|
comlor/Senior_Design_Movie_Project
|
f04b3d98c8d74c9399361dcc4b5f50ca7ac191c9
|
[
"Unlicense"
] | null | null | null |
import subprocess
import os
import sys
import time
from jpl_conf import FilePaths
import requests
def timing(f):
def wrap(*args):
time1 = time.time()
ret = f(*args)
time2 = time.time()
FilePaths().log_events('TIMING------: %s function took %0.3f ms' % (f.__name__, (time2 - time1)*1000.0) + "\n")
return wrap
@timing
def make_blend_file(job_dir, blend_file, texture_file, dtm_file, json):
FilePaths().log_events("CREATE SCENE\n")
# incoming args list
# args[0]
# args[1] json
# args[2] job_path
# args[3] output_dir
# args[4] randomid
blender = FilePaths().get_blender_exec()
FilePaths().log_events("Blender Exececutable: " + blender + "\n")
script = FilePaths().get_abs_path_project() + "job.py"
FilePaths().log_events("Script File: " + script)
# subprocess call arguments
# arg[0] blender
# arg[1] -b
# arg[2] -P
# arg[3] script
# arg[4] --
# arg[5] json
FilePaths().log_events("Creating Scene: " + blender + " -b -P " + script + " -- " + json + " " + job_dir + " " +
blend_file + " " + texture_file + " " + dtm_file + "\n")
sub = subprocess.Popen(
[blender + " -b -P " + script + " -- " + json + " " + job_dir + " " + blend_file + " " + texture_file + " " +
dtm_file], shell=True)
sub.communicate()
@timing
def render_scenes(hadoop_in):
FilePaths().log_events("RENDER SCENE\n")
hadoop = FilePaths().get_hadoop_exec()
FilePaths().log_events("Hadoop Executable: " + hadoop + "\n")
hadoop_streaming = FilePaths().get_hadoop_streaming()
FilePaths().log_events("Hadoop Streaming jar: " + hadoop_streaming + "\n")
cmd = hadoop
cmd += " jar "
cmd += hadoop_streaming
cmd += " -input "
cmd += hadoop_in + "/input/"
cmd += " -output "
cmd += hadoop_in + "/output/"
cmd += " -mapper "
cmd += FilePaths().get_abs_path_project() + "mapper.py"
cmd += " -reducer "
cmd += FilePaths().get_abs_path_project() + "reducer.py"
FilePaths().log_events("Execute Hadoop Process: " + cmd + "\n")
sub = subprocess.Popen([cmd], shell=True)
sub.communicate()
@timing
def animate_movie(job_dir, rid):
FilePaths().log_events("ANIMATE SCENE\n")
blender = FilePaths().get_blender_exec()
FilePaths().log_events("Blender Executable: " + blender + "\n")
output = FilePaths().get_final_output_dir() + rid + "/"
FilePaths().log_events("Movie Output Location: " + output + "\n")
cmd = blender
cmd += " -b -P "
cmd += FilePaths().get_abs_path_project() + "animate_scene.py"
cmd += " -- "
cmd += job_dir + "/temp/ "
cmd += output
FilePaths().log_events("Execute Animation: " + cmd + "\n")
sub = subprocess.Popen([cmd], shell=True)
sub.communicate()
def main():
FilePaths().log_events("MAIN PROGRAM\n")
FilePaths().log_events("System Args: " + str(sys.argv) + "\n")
job_dir = sys.argv[2]
# Create Absolute file path variables used to create the job directory structure
FilePaths().log_events("Creating Directory Structure\n")
job_hadoop = job_dir + "/hadoop"
job_hadoop_in = job_hadoop + "/input"
job_hadoop_out = job_hadoop + "/output"
job_temp = job_dir + "/temp"
job_assets = job_dir + "/assets"
# Name of the blend file
blend_file = sys.argv[4] + ".blend"
# For Future Implementation
texture_file = None
dtm_file = "my_image.IMG"
# Create Directory Structure for The Current Job
if not os.path.isdir(job_dir):
os.makedirs(job_dir)
FilePaths().log_events("Current Job: " + job_dir + "\n")
if not os.path.isdir(job_hadoop):
os.makedirs(job_hadoop)
FilePaths().log_events("Hadoop Job: " + job_hadoop + "\n")
if not os.path.isdir(job_hadoop_in):
os.makedirs(job_hadoop_in)
FilePaths().log_events("Hadoop Input: " + job_hadoop_in + "\n")
if not os.path.isdir(job_temp):
os.makedirs(job_temp)
FilePaths().log_events("Current Job Temp File: " + job_temp + "\n")
if not os.path.isdir(job_assets):
os.makedirs(job_assets)
FilePaths().log_events("Current Job Assets: " + job_assets + "\n")
make_blend_file(job_dir, blend_file, str(texture_file), dtm_file, sys.argv[1])
render_scenes(job_hadoop)
animate_movie(job_dir, sys.argv[4])
#CALL POST SEND EMAIL
FilePaths().log_events("Send Email to User\n")
r = requests.post("http://et-etb10c-x02:8281/completed", str(sys.argv[4]))
if __name__ == "__main__":
main()
| 32.35461
| 119
| 0.621438
|
8c451afe513bca9a828aaa04ebdd972270747210
| 4,231
|
py
|
Python
|
isi_sdk/models/cloud_job.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
isi_sdk/models/cloud_job.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
isi_sdk/models/cloud_job.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
class CloudJob(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
CloudJob - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'all': 'bool',
'state': 'str'
}
self.attribute_map = {
'all': 'all',
'state': 'state'
}
self._all = None
self._state = None
@property
def all(self):
"""
Gets the all of this CloudJob.
Whether to apply to the given operation type or to all jobs of the given operation type
:return: The all of this CloudJob.
:rtype: bool
"""
return self._all
@all.setter
def all(self, all):
"""
Sets the all of this CloudJob.
Whether to apply to the given operation type or to all jobs of the given operation type
:param all: The all of this CloudJob.
:type: bool
"""
self._all = all
@property
def state(self):
"""
Gets the state of this CloudJob.
The desired state of the job or operation
:return: The state of this CloudJob.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this CloudJob.
The desired state of the job or operation
:param state: The state of this CloudJob.
:type: str
"""
allowed_values = ["resume", "pause", "cancel"]
if state is not None and state not in allowed_values:
raise ValueError(
"Invalid value for `state`, must be one of {0}"
.format(allowed_values)
)
self._state = state
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.296774
| 95
| 0.548334
|
fb3709a072d24607c0931a43b41b70827b81655e
| 2,792
|
py
|
Python
|
examples/python/reactor/count-randomly.py
|
stewnorriss/https-github.com-apache-qpid-proton
|
4d25d88f5c96165c7c6aa24c28d214ad599caff8
|
[
"Apache-2.0"
] | 2
|
2020-04-28T13:33:06.000Z
|
2020-06-01T14:51:05.000Z
|
examples/python/reactor/count-randomly.py
|
stewnorriss/https-github.com-apache-qpid-proton
|
4d25d88f5c96165c7c6aa24c28d214ad599caff8
|
[
"Apache-2.0"
] | null | null | null |
examples/python/reactor/count-randomly.py
|
stewnorriss/https-github.com-apache-qpid-proton
|
4d25d88f5c96165c7c6aa24c28d214ad599caff8
|
[
"Apache-2.0"
] | 4
|
2015-10-17T20:44:45.000Z
|
2021-06-08T19:00:56.000Z
|
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import time, random
from proton.reactor import Reactor
# Let's try to modify our counter example. In addition to counting to
# 10 in quarter second intervals, let's also print out a random number
# every half second. This is not a super easy thing to express in a
# purely sequential program, but not so difficult using events.
class Counter:
def __init__(self, limit):
self.limit = limit
self.count = 0
def on_timer_task(self, event):
self.count += 1
print(self.count)
if not self.done():
event.reactor.schedule(0.25, self)
# add a public API to check for doneness
def done(self):
return self.count >= self.limit
class Program:
def on_reactor_init(self, event):
self.start = time.time()
print("Hello, World!")
# Save the counter instance in an attribute so we can refer to
# it later.
self.counter = Counter(10)
event.reactor.schedule(0.25, self.counter)
# Now schedule another event with a different handler. Note
# that the timer tasks go to separate handlers, and they don't
# interfere with each other.
event.reactor.schedule(0.5, self)
def on_timer_task(self, event):
# keep on shouting until we are done counting
print("Yay, %s!" % random.randint(10, 100))
if not self.counter.done():
event.reactor.schedule(0.5, self)
def on_reactor_final(self, event):
print("Goodbye, World! (after %s long seconds)" % (time.time() - self.start))
# In hello-world.py we said the reactor exits when there are no more
# events to process. While this is true, it's not actually complete.
# The reactor exits when there are no more events to process and no
# possibility of future events arising. For that reason the reactor
# will keep running until there are no more scheduled events and then
# exit.
r = Reactor(Program())
r.run()
| 35.341772
| 85
| 0.698782
|
cf4818ae08e1e8776acd5a98b7a278c82f7f18b0
| 625
|
py
|
Python
|
lucky.py
|
terrameijar/useful_scripts
|
3395068009f98c16a0c762a74383b4aed97c0a40
|
[
"MIT"
] | 1
|
2017-04-19T11:27:08.000Z
|
2017-04-19T11:27:08.000Z
|
lucky.py
|
terrameijar/useful_scripts
|
3395068009f98c16a0c762a74383b4aed97c0a40
|
[
"MIT"
] | 2
|
2017-01-25T08:17:11.000Z
|
2017-01-26T09:14:15.000Z
|
lucky.py
|
terrameijar/useful_scripts
|
3395068009f98c16a0c762a74383b4aed97c0a40
|
[
"MIT"
] | 3
|
2017-10-02T16:00:58.000Z
|
2019-10-12T13:23:29.000Z
|
#! /usr/bin/env python
# lucky.py -- Opens several Google search results.
import sys
import requests
import webbrowser
from bs4 import BeautifulSoup
print "Googling....." # Display text while searching
res = requests.get('http://google.com/search?q=' + ' '.join(sys.argv[1:]))
res.raise_for_status()
# Retrieve top search result links.
soup = BeautifulSoup(res.text, "lxml")
# Open a browser tab for each result.
link_elements = soup.select('.r a') # Select anchor under class r
num_open = min(5, len(link_elements))
for i in range(num_open + 1):
webbrowser.open('http://google.com' + link_elements[i].get('href'))
| 31.25
| 74
| 0.7184
|
0843899d9e30aab698ad2551c5379b7b302e53bc
| 666
|
py
|
Python
|
pyglet4.py
|
diallog/GCPpy
|
dabd55ece1c12c1a390a228cd04cb7eb110e564b
|
[
"Unlicense"
] | null | null | null |
pyglet4.py
|
diallog/GCPpy
|
dabd55ece1c12c1a390a228cd04cb7eb110e564b
|
[
"Unlicense"
] | null | null | null |
pyglet4.py
|
diallog/GCPpy
|
dabd55ece1c12c1a390a228cd04cb7eb110e564b
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# PURPOSE: inital forray into windowed applications / event-driven programming
import pyglet
window = pyglet.window.Window(width=400, height=300, caption='Test Window')
label = pyglet.text.Label('Nothing pressed yet.', font_name='Times New Roman',
font_size=18, x=50, y=200)
@window.event
def on_key_press(symbol, modifier):
global label
label = pyglet.text.Label('You pressed the '+key_pressed+' key.',
font_name='Times New Roman',
font_size=18, x=50, y=200)
@window.event
def on_draw():
window.clear()
label.draw()
pyglet.app.run()
| 26.64
| 78
| 0.629129
|
896f9d80e504a36fedb86d46bc8c9b2f217c82f6
| 70,864
|
py
|
Python
|
parsimony/functions/penalties.py
|
nguigs/pylearn-parsimony
|
f712d2828823d6d55a2470ce060bcaeda2d0589a
|
[
"BSD-3-Clause"
] | null | null | null |
parsimony/functions/penalties.py
|
nguigs/pylearn-parsimony
|
f712d2828823d6d55a2470ce060bcaeda2d0589a
|
[
"BSD-3-Clause"
] | null | null | null |
parsimony/functions/penalties.py
|
nguigs/pylearn-parsimony
|
f712d2828823d6d55a2470ce060bcaeda2d0589a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
The :mod:`parsimony.functions.penalties` module contains the penalties used to
constrain the loss functions. These represent mathematical functions and
should thus have properties used by the corresponding algorithms. These
properties are defined in :mod:`parsimony.functions.properties`.
Penalties should be stateless. Penalties may be shared and copied and should
therefore not hold anything that cannot be recomputed the next time it is
called.
Created on Mon Apr 22 10:54:29 2013
Copyright (c) 2013-2017, CEA/DSV/I2BM/Neurospin. All rights reserved.
@author: Tommy Löfstedt, Vincent Guillemot, Edouard Duchesnay and
Fouad Hadj-Selem
@email: lofstedt.tommy@gmail.com, edouard.duchesnay@cea.fr
@license: BSD 3-clause.
"""
import numpy as np
import scipy.optimize as optimize
import scipy.sparse as sparse
try:
from . import properties # Only works when imported as a package.
except (ValueError, SystemError):
import parsimony.functions.properties as properties # Run as a script.
import parsimony.utils.maths as maths
import parsimony.utils.consts as consts
import parsimony.utils.linalgs as linalgs
__all__ = ["ZeroFunction", "L1", "L0", "LInf", "L2", "L2Squared",
"L1L2Squared", "GraphNet",
"QuadraticConstraint", "RGCCAConstraint", "RidgeSquaredError",
"LinearConstraint",
"LinearVariableConstraint",
"SufficientDescentCondition",
"KernelL2Squared"]
class ZeroFunction(properties.AtomicFunction,
properties.Gradient,
properties.Penalty,
properties.Constraint,
properties.ProximalOperator,
properties.ProjectionOperator):
def __init__(self, l=1.0, c=0.0, penalty_start=0):
"""
Parameters
----------
l : float
A non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : float
The limit of the constraint. The function is feasible if
||\beta||_1 <= c. The default value is c=0, i.e. the default is a
regularisation formulation.
penalty_start : int
A non-negative integer. The number of columns, variables etc., to
be exempt from penalisation. Equivalently, the first index to be
penalised. Default is 0, all columns are included.
"""
self.l = max(0.0, float(l))
self.c = float(c)
if self.c < 0.0:
raise ValueError("A negative constraint parameter does not make "
"sense, since the function is always zero.")
self.penalty_start = max(0, int(penalty_start))
self.reset()
def reset(self):
self._zero = None
def f(self, x):
"""Function value.
"""
return 0.0
def grad(self, x):
"""Gradient of the function.
From the interface "Gradient".
"""
if self._zero is None:
self._zero = np.zeros(x.shape)
return self._zero
def prox(self, x, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
"""
return x
def proj(self, x, **kwargs):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
"""
return x
def feasible(self, x):
"""Feasibility of the constraint.
From the interface "Constraint".
"""
return self.c >= 0.0
class L1(properties.AtomicFunction,
properties.Penalty,
properties.Constraint,
properties.ProximalOperator,
properties.ProjectionOperator,
properties.SubGradient):
"""The L1 function in a penalty formulation has the form
f(\beta) = l * (||\beta||_1 - c),
where ||\beta||_1 is the L1 loss function. The constrained version has the
form
||\beta||_1 <= c.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
||\beta||_1 <= c. The default value is c=0, i.e. the default is a
regularisation formulation.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, penalty_start=0):
self.l = float(l)
self.c = float(c)
self.penalty_start = max(0, int(penalty_start))
def f(self, beta):
"""Function value.
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return self.l * (maths.norm1(beta_) - self.c)
def subgrad(self, beta, clever=True, random_state=None, **kwargs):
if random_state is None:
random_state = np.random.RandomState()
izero = np.abs(beta) < 10.0 * consts.FLOAT_EPSILON
inonzero = np.negative(izero)
grad = np.zeros(beta.shape)
grad[inonzero] = np.sign(beta[inonzero])
if clever:
# The "clever" part here is that since we are already at the
# minimum of the penalty, we have no reason to move away from here.
# Hence, the subgradient is zero at this point.
grad[izero] = np.zeros(np.sum(izero))
else:
grad[izero] = random_state.uniform(-1, 1, np.sum(izero))
return self.l * grad
def prox(self, beta, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
"""
l = self.l * factor
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
prox = (np.abs(beta_) > l) * (beta_ - l * np.sign(beta_ - l))
if self.penalty_start > 0:
prox = np.vstack((beta[:self.penalty_start, :], prox))
return prox
def proj(self, beta, **kwargs):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
p = beta_.shape[0]
abs_beta = np.absolute(beta_)
norm1 = np.sum(abs_beta)
if norm1 <= self.c: # Feasible?
return beta
a = np.flipud(np.sort(abs_beta, axis=0)).ravel()
suma = np.cumsum(a)
phi = np.zeros((p + 1,))
np.multiply(a, np.arange(-1, -p - 1, -1), phi[:p])
phi[:p] += (suma - self.c)
phi[p] = suma[p - 1] - self.c
# TODO: BUG: i may be equal to p => IndexError: list index out of range
i = np.searchsorted(phi, 0.0) # First positive (or zero).
if phi[i] < 0.0:
# TODO: This should not be able to happen! Do we know it doesn't?
return self.__proj_old(beta)
i -= 1 # The last negative phi before positive (or zero).
if phi[i] >= 0.0:
# TODO: This should not be able to happen! Do we know it doesn't?
return self.__proj_old(beta)
l = a[i] + phi[i] / float(i + 1) # Find the Lagrange multiplier.
# The correction by eps is to nudge the L1 norm just below self.c.
eps = consts.FLOAT_EPSILON
l += eps
return (np.abs(beta_) > l) * (beta_ - l * np.sign(beta_ - l))
def __proj_old(self, beta):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
abs_beta = np.absolute(beta_)
norm1 = np.sum(abs_beta)
if norm1 <= self.c: # Feasible?
return beta
from parsimony.algorithms.utils import Bisection
bisection = Bisection(force_negative=True,
parameter_positive=True,
parameter_negative=False,
parameter_zero=False,
eps=1e-8)
class F(properties.Function):
def __init__(self, beta, c):
self.beta = beta
self.c = c
def f(self, l):
beta = (abs_beta > l) \
* (self.beta - l * np.sign(self.beta - l))
return maths.norm1(beta) - self.c
func = F(beta_, self.c)
l = bisection.run(func, [0.0, np.max(np.abs(beta_))])
return (abs_beta > l) * (beta_ - l * np.sign(beta_ - l))
def feasible(self, beta):
"""Feasibility of the constraint.
From the interface "Constraint".
Parameters
----------
beta : Numpy array. The variable to check for feasibility.
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return maths.norm1(beta_) <= self.c
class L0(properties.AtomicFunction,
properties.Penalty,
properties.Constraint,
properties.ProximalOperator,
properties.ProjectionOperator):
"""The proximal operator of the "pseudo" L0 function
f(x) = l * (||x||_0 - c),
where ||x||_0 is the L0 loss function. The constrainted version has the
form
||x||_0 <= c.
Warning: Note that this function is not convex, and the regular assumptions
when using it in e.g. ISTA or FISTA will not apply. Nevertheless, it will
still converge to a local minimum if we can guarantee that we obtain a
reduction of the smooth part in each step. See e.g.:
http://eprints.soton.ac.uk/142499/1/BD_NIHT09.pdf
http://people.ee.duke.edu/~lcarin/blumensath.pdf
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
||x||_0 <= c. The default value is c=0, i.e. the default is a
regularisation formulation.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, penalty_start=0):
self.l = max(0.0, float(l))
self.c = float(c)
self.penalty_start = max(0, int(penalty_start))
def f(self, x):
"""Function value.
From the interface "Function".
Example
-------
>>> import numpy as np
>>> from parsimony.functions.penalties import L0
>>> import parsimony.utils.maths as maths
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1)
>>> l0 = L0(l=0.5)
>>> maths.norm0(x)
10
>>> l0.f(x) - 0.5 * maths.norm0(x)
0.0
>>> x[0, 0] = 0.0
>>> maths.norm0(x)
9
>>> l0.f(x) - 0.5 * maths.norm0(x)
0.0
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
return self.l * (maths.norm0(x_) - self.c)
def prox(self, x, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
Example
-------
>>> import numpy as np
>>> from parsimony.functions.penalties import L0
>>> import parsimony.utils.maths as maths
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1)
>>> l0 = L0(l=0.5)
>>> maths.norm0(x)
10
>>> np.linalg.norm(l0.prox(x) - np.array([[0. ],
... [0.95071431],
... [0.73199394],
... [0.59865848],
... [0. ],
... [0. ],
... [0. ],
... [0.86617615],
... [0.60111501],
... [0.70807258]])) < 5e-8
True
>>> l0.f(l0.prox(x))
3.0
>>> 0.5 * maths.norm0(l0.prox(x))
3.0
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
l = self.l * factor
prox = x_ * (np.abs(x_) > l) # Hard thresholding.
prox = np.vstack((x[:self.penalty_start, :], # Unregularised variables
prox))
return prox
def proj(self, x):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import L0
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1) * 2.0 - 1.0
>>> l0 = L0(c=5.0)
>>> l0.proj(x)
array([[ 0. ],
[ 0.90142861],
[ 0. ],
[ 0. ],
[-0.68796272],
[-0.68801096],
[-0.88383278],
[ 0.73235229],
[ 0. ],
[ 0. ]])
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
if maths.norm0(x_) <= self.c:
return x
K = int(np.floor(self.c) + 0.5)
ind = np.abs(x_.ravel()).argsort()[:K]
y = np.copy(x_)
y[ind] = 0.0
if self.penalty_start > 0:
# Add the unregularised variables.
y = np.vstack((x[:self.penalty_start, :],
y))
return y
def feasible(self, beta):
"""Feasibility of the constraint.
From the interface "Constraint".
Parameters
----------
beta : Numpy array. The variable to check for feasibility.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import L0
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1) * 2.0 - 1.0
>>> l0 = L0(c=5.0)
>>> l0.feasible(x)
False
>>> l0.feasible(l0.proj(x))
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return maths.norm0(beta_) <= self.c
class LInf(properties.AtomicFunction,
properties.Penalty,
properties.Constraint,
properties.ProximalOperator,
properties.ProjectionOperator):
"""The proximal operator of the L-infinity function
f(x) = l * (||x||_inf - c),
where ||x||_inf is the L-infinity loss function. The constrainted version
has the form
||x||_inf <= c.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
||x||_inf <= c. The default value is c=0, i.e. the default is a
regularisation formulation.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, penalty_start=0):
self.l = float(l)
self.c = float(c)
self.penalty_start = int(penalty_start)
def f(self, x):
"""Function value.
From the interface "Function".
Parameters
----------
x : Numpy array. The point at which to evaluate the function.
Example
-------
>>> import numpy as np
>>> from parsimony.functions.penalties import LInf
>>> import parsimony.utils.maths as maths
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1)
>>> linf = LInf(l=1.1)
>>> linf.f(x) - 1.1 * maths.normInf(x)
0.0
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
return self.l * (maths.normInf(x_) - self.c)
def prox(self, x, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import LInf
>>> import parsimony.utils.maths as maths
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1)
>>> linf = LInf(l=1.45673045, c=0.5)
>>> linf_prox = linf.prox(x)
>>> np.linalg.norm(linf_prox - np.asarray([[0.37454012],
... [0.5 ],
... [0.5 ],
... [0.5 ],
... [0.15601864],
... [0.15599452],
... [0.05808361],
... [0.5 ],
... [0.5 ],
... [0.5 ]])) < 5e-8
True
>>> linf_proj = linf.proj(x)
>>> np.linalg.norm(linf_proj - np.asarray([[0.37454012],
... [0.5 ],
... [0.5 ],
... [0.5 ],
... [0.15601864],
... [0.15599452],
... [0.05808361],
... [0.5 ],
... [0.5 ],
... [0.5 ]])) < 5e-8
True
>>> np.linalg.norm(linf_prox - linf_proj) < 5e-8
True
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
l = self.l * factor
l1 = L1(c=l) # Project onto an L1 ball with radius c=l.
y = x_ - l1.proj(x_)
# TODO: Check if this is correct!
# Put the unregularised variables back.
if self.penalty_start > 0:
y = np.vstack((x[:self.penalty_start, :],
y))
return y
def proj(self, x):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import LInf
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1) * 2.0 - 1.0
>>> linf = LInf(c=0.618)
>>> linf.proj(x)
array([[-0.25091976],
[ 0.618 ],
[ 0.46398788],
[ 0.19731697],
[-0.618 ],
[-0.618 ],
[-0.618 ],
[ 0.618 ],
[ 0.20223002],
[ 0.41614516]])
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
if maths.normInf(x_) <= self.c:
return x
y = np.copy(x_)
y[y > self.c] = self.c
y[y < -self.c] = -self.c
# Put the unregularised variables back.
if self.penalty_start > 0:
y = np.vstack((x[:self.penalty_start, :],
y))
return y
def feasible(self, x):
"""Feasibility of the constraint.
From the interface "Constraint".
Parameters
----------
x : Numpy array. The variable to check for feasibility.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import LInf
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1) * 2.0 - 1.0
>>> linf = LInf(c=0.618)
>>> linf.feasible(x)
False
>>> linf.feasible(linf.proj(x))
True
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
return maths.normInf(x_) <= self.c
class L2(properties.AtomicFunction,
properties.Penalty,
properties.Constraint,
properties.ProximalOperator,
properties.ProjectionOperator):
"""The proximal operator of the L2 function with a penalty formulation
f(\beta) = l * (0.5 * ||\beta||_2 - c),
where ||\beta||_2 is the L2 loss function. The constrained version has
the form
0.5 * ||\beta||_2 <= c.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
0.5 * ||\beta||_2 <= c. The default value is c=0, i.e. the
default is a regularised formulation.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, penalty_start=0):
self.l = max(0.0, float(l))
self.c = float(c)
self.penalty_start = max(0, int(penalty_start))
def f(self, beta):
"""Function value.
From the interface "Function".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return self.l * (maths.norm(beta_) - self.c)
def prox(self, beta, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
"""
l = self.l * factor
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
norm = maths.norm(beta_)
if norm >= l:
beta_ *= (1.0 - l / norm) * beta_
else:
beta_ *= 0.0
if self.penalty_start > 0:
prox = np.vstack((beta[:self.penalty_start, :], beta_))
else:
prox = beta_
return prox
def proj(self, beta, **kwargs):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import L2
>>> np.random.seed(42)
>>> l2 = L2(c=0.3183098861837907)
>>> y1 = l2.proj(np.random.rand(100, 1) * 2.0 - 1.0)
>>> np.linalg.norm(y1) # doctest: +ELLIPSIS
0.31830988...
>>> y2 = np.random.rand(100, 1) * 2.0 - 1.0
>>> l2.feasible(y2)
False
>>> l2.feasible(l2.proj(y2))
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
norm = maths.norm(beta_)
# Feasible?
if norm <= self.c:
return beta
# The correction by eps is to nudge the norm just below self.c.
eps = consts.FLOAT_EPSILON
beta_ *= self.c / (norm + eps)
proj = beta_
if self.penalty_start > 0:
proj = np.vstack((beta[:self.penalty_start, :], beta_))
return proj
def feasible(self, beta):
"""Feasibility of the constraint.
From the interface "Constraint".
Parameters
----------
beta : Numpy array. The variable to check for feasibility.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import L2
>>> np.random.seed(42)
>>> l2 = L2(c=0.3183098861837907)
>>> y1 = 0.01 * (np.random.rand(50, 1) * 2.0 - 1.0)
>>> l2.feasible(y1)
True
>>> y2 = 10.0 * (np.random.rand(50, 1) * 2.0 - 1.0)
>>> l2.feasible(y2)
False
>>> y3 = l2.proj(50.0 * np.random.rand(100, 1) * 2.0 - 1.0)
>>> l2.feasible(y3)
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return maths.norm(beta_) <= self.c + consts.FLOAT_EPSILON
class L2Squared(properties.AtomicFunction,
properties.Gradient,
properties.LipschitzContinuousGradient,
properties.Penalty,
properties.Constraint,
properties.ProximalOperator,
properties.ProjectionOperator):
"""The proximal operator of the squared L2 function with a penalty
formulation
f(\beta) = l * (0.5 * ||\beta||²_2 - c),
where ||\beta||²_2 is the squared L2 loss function. The constrained
version has the form
0.5 * ||\beta||²_2 <= c.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
0.5 * ||\beta||²_2 <= c. The default value is c=0, i.e. the
default is a regularised formulation.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, penalty_start=0):
self.l = max(0.0, float(l))
self.c = float(c)
self.penalty_start = max(0, int(penalty_start))
def f(self, beta):
"""Function value.
From the interface "Function".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return self.l * (0.5 * np.dot(beta_.T, beta_)[0, 0] - self.c)
def grad(self, beta):
"""Gradient of the function.
From the interface "Gradient".
Example
-------
>>> import numpy as np
>>> from parsimony.functions.penalties import L2Squared
>>>
>>> np.random.seed(42)
>>> beta = np.random.rand(100, 1)
>>> l2 = L2Squared(l=3.14159, c=2.71828)
>>> np.linalg.norm(l2.grad(beta)
... - l2.approx_grad(beta, eps=1e-4)) < 5e-10
True
>>>
>>> l2 = L2Squared(l=3.14159, c=2.71828, penalty_start=5)
>>> np.linalg.norm(l2.grad(beta)
... - l2.approx_grad(beta, eps=1e-4)) < 5e-10
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
grad = np.vstack((np.zeros((self.penalty_start, 1)),
self.l * beta_))
else:
beta_ = beta
grad = self.l * beta_
# approx_grad = utils.approx_grad(self.f, beta, eps=1e-4)
# print maths.norm(grad - approx_grad)
return grad
def L(self):
"""Lipschitz constant of the gradient.
"""
return self.l
def prox(self, beta, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
"""
l = self.l * factor
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.penalty_start > 0:
prox = np.vstack((beta[:self.penalty_start, :],
beta_ * (1.0 / (1.0 + l))))
else:
prox = beta_ * (1.0 / (1.0 + l))
return prox
def proj(self, beta, **kwargs):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import L2Squared
>>> np.random.seed(42)
>>> l2 = L2Squared(c=0.3183098861837907)
>>> y1 = l2.proj(np.random.rand(100, 1) * 2.0 - 1.0)
>>> 0.5 * np.linalg.norm(y1) ** 2 # doctest: +ELLIPSIS
0.31830988...
>>> y2 = np.random.rand(100, 1) * 2 - 1.0
>>> l2.feasible(y2)
False
>>> l2.feasible(l2.proj(y2))
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
sqnorm = np.dot(beta_.T, beta_)[0, 0]
# Feasible?
if 0.5 * sqnorm <= self.c:
return beta
# The correction by eps is to nudge the squared norm just below
# self.c.
eps = consts.FLOAT_EPSILON
if self.penalty_start > 0:
proj = np.vstack((beta[:self.penalty_start, :],
beta_ * np.sqrt((2.0 * self.c - eps) / sqnorm)))
else:
proj = beta_ * np.sqrt((2.0 * self.c - eps) / sqnorm)
return proj
def feasible(self, beta):
"""Feasibility of the constraint.
From the interface "Constraint".
Parameters
----------
beta : Numpy array. The variable to check for feasibility.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import L2Squared
>>> np.random.seed(42)
>>> l2 = L2Squared(c=0.3183098861837907)
>>> y1 = 0.1 * (np.random.rand(50, 1) * 2.0 - 1.0)
>>> l2.feasible(y1)
True
>>> y2 = 10.0 * (np.random.rand(50, 1) * 2.0 - 1.0)
>>> l2.feasible(y2)
False
>>> y3 = l2.proj(50.0 * np.random.rand(100, 1) * 2.0 - 1.0)
>>> l2.feasible(y3)
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
sqnorm = np.dot(beta_.T, beta_)[0, 0]
return 0.5 * sqnorm <= self.c + consts.FLOAT_EPSILON
class L1L2Squared(properties.AtomicFunction,
properties.Penalty,
properties.ProximalOperator):
"""The proximal operator of the L1 function with an L2 constraint.
The function is
f(x) = l1 * ||x||_1 + Indicator(||x||²_2 <= l2),
where ||.||_1 is the L1 norm and ||.||²_2 is the squared L2 norm.
Parameters
----------
l1 : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the L1 norm penalty.
l2 : Non-negative float. The limit of the constraint of of the squared L2
norm penalty.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l1=1.0, l2=1.0, penalty_start=0):
self.l1 = max(0.0, float(l1))
self.l2 = max(0.0, float(l2))
self.penalty_start = max(0, int(penalty_start))
def f(self, beta):
"""Function value.
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if maths.norm(beta_) ** 2 > self.l2:
return consts.FLOAT_INF
return self.l1 * maths.norm1(beta_)
def prox(self, beta, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
l1 = self.l1 * factor
prox = (np.abs(beta_) > l1) * (beta_ - l1 * np.sign(beta_ - l1))
prox *= np.sqrt(self.l2 / np.dot(prox.T, prox)[0, 0])
if self.penalty_start > 0:
prox = np.vstack((beta[:self.penalty_start, :], prox))
return prox
class QuadraticConstraint(properties.AtomicFunction,
properties.Gradient,
properties.Penalty,
properties.Constraint):
"""The proximal operator of the quadratic function
f(x) = l * (x'Mx - c),
or
f(x) = l * (x'M'Nx - c),
where M or M'N is a given symmatric positive-definite matrix. The
constrained version has the form
x'Mx <= c,
or
x'M'Nx <= c
if two matrices are given.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
x'Mx <= c. The default value is c=0, i.e. the default is a
regularisation formulation.
M : Numpy array. The given positive definite matrix. It is assumed that
the first penalty_start columns must be excluded.
N : Numpy array. The second matrix if the factors of the positive-definite
matrix are given. It is assumed that the first penalty_start
columns must be excluded.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, M=None, N=None, penalty_start=0):
self.l = max(0.0, float(l))
self.c = float(c)
if self.penalty_start > 0:
self.M = M[:, self.penalty_start:] # NOTE! We slice M here!
self.N = N[:, self.penalty_start:] # NOTE! We slice N here!
else:
self.M = M
self.N = N
self.penalty_start = max(0, int(penalty_start))
def f(self, beta):
"""Function value.
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.N is None:
#val = self.l * (np.dot(beta_.T, np.dot(self.M, beta_)) - self.c)
val = self.l * (np.dot(beta_.T, self.M.dot(beta_)) - self.c)
else:
val = self.l * (np.dot(beta_.T, self.M.T.dot(self.N.dot(beta_)))
- self.c)
#val = self.l * (np.dot(beta_.T, np.dot(self.M.T,
# np.dot(self.N, beta_))) \
# - self.c)
return val
def grad(self, beta):
"""Gradient of the function.
From the interface "Gradient".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.N is None:
grad = (2.0 * self.l) * self.M.dot(beta_)
#grad = (2.0 * self.l) * np.dot(self.M, beta_)
else:
grad = (2.0 * self.l) * self.M.T.dot(self.N.dot(beta_))
#grad = (2.0 * self.l) * np.dot(self.M.T, np.dot(self.N, beta_))
if self.penalty_start > 0:
grad = np.vstack((np.zeros((self.penalty_start, 1)), grad))
return grad
def feasible(self, beta):
"""Feasibility of the constraint.
From the interface "Constraint".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.N is None:
#bMb = np.dot(beta_.T, np.dot(self.M, beta_))
bMb = np.dot(beta_.T, self.M.dot(beta_))
else:
#bMb = np.dot(beta_.T, np.dot(self.M.T, np.dot(self.N, beta_)))
bMb = np.dot(beta_.T, self.M.T.dot(self.N.dot(beta_)))
return bMb <= self.c
class GraphNet(QuadraticConstraint,
properties.LipschitzContinuousGradient):
"""The proximal operator of the GraphNet function.
f(x) = l * sum_{(i, j) \in G}(b_i - b_j)^2,
Where nodes (i, j) are connected in the Graph G and A is a (sparse) matrix
of P columns where each line contains a pair of (-1, +1) for 2 connected
nodes, and zero elsewhere.
f(x) = l * x'A'Ax.
= l * sum((Ax)^2)
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
A : Numpy or (usually) scipy.sparse array. The a matrix, made of (-1, +1),
that computes all the differences between connected nodes of the graph.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, A=None, penalty_start=0):
self.l = float(l)
self.c = 0
self.M = A # for QuadraticConstraint
self.N = A # for QuadraticConstraint
self.A = A
self.penalty_start = penalty_start
self._lambda_max = None
# TODO: Redefine grad and f, without inheritance from QuadraticConstraint
# to speed up computing of f matrix-vector multiplication only needs to be
# performed once,
def L(self):
""" Lipschitz constant of the gradient.
From the interface "LipschitzContinuousGradient".
"""
if self.l < consts.TOLERANCE:
return 0.0
lmaxA = self.lambda_max()
# The (largest) Lipschitz constant of the gradient would be the operator
# norm of 2A'A, which thus is the square of the largest singular value
# of 2A'A.
return self.l * (2 * lmaxA) ** 2
def lambda_max(self):
""" Largest eigenvalue of the corresponding covariance matrix.
From the interface "Eigenvalues".
"""
# From functions.nesterov.tv.TotalVariation.L
# Note that we can save the state here since lmax(A) does not change.
# TODO: This only work if the elements of self._A are scipy.sparse. We
# should allow dense matrices as well.
if self._lambda_max is None:
from parsimony.algorithms.nipals import RankOneSparseSVD
A = self.A
# TODO: Add max_iter here!
v = RankOneSparseSVD().run(A) # , max_iter=max_iter)
us = A.dot(v)
self._lambda_max = np.sum(us ** 2)
return self._lambda_max
class RGCCAConstraint(QuadraticConstraint,
properties.ProjectionOperator):
"""Represents the quadratic function
f(x) = l * (x'(tau * I + ((1 - tau) / n) * X'X)x - c),
where tau is a given regularisation constant. The constrained version has
the form
x'(tau * I + ((1 - tau) / n) * X'X)x <= c.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
x'(tau * I + ((1 - tau) / n) * X'X)x <= c. The default value is
c=0, i.e. the default is a regularisation formulation.
tau : Non-negative float. The regularisation constant.
X : Numpy array, n-by-p. The associated data matrix. The first
penalty_start columns will be excluded.
unbiased : Boolean. Whether the sample variance should be unbiased or not.
Default is True, i.e. unbiased.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, tau=1.0, X=None, unbiased=True,
penalty_start=0):
self.l = max(0.0, float(l))
self.c = float(c)
self.tau = max(0.0, min(float(tau), 1.0))
if penalty_start > 0:
self.X = X[:, penalty_start:] # NOTE! We slice X here!
else:
self.X = X
self.unbiased = bool(unbiased)
self.penalty_start = max(0, int(penalty_start))
self.reset()
def reset(self):
self._U = None
self._S = None
self._V = None
def f(self, beta):
"""Function value.
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
xtMx = self._compute_value(beta_)
return self.l * (xtMx - self.c)
def grad(self, beta):
"""Gradient of the function.
From the interface "Gradient".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.unbiased:
n = float(self.X.shape[0] - 1.0)
else:
n = float(self.X.shape[0])
if self.tau < 1.0:
XtXbeta = np.dot(self.X.T, np.dot(self.X, beta_))
grad = (self.tau * 2.0) * beta_ \
+ ((1.0 - self.tau) * 2.0 / n) * XtXbeta
else:
grad = (self.tau * 2.0) * beta_
if self.penalty_start > 0:
grad = np.vstack(np.zeros((self.penalty_start, 1)),
grad)
# approx_grad = utils.approx_grad(self.f, beta, eps=1e-4)
# print maths.norm(grad - approx_grad)
return grad
def feasible(self, beta):
"""Feasibility of the constraint.
From the interface "Constraint".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
xtMx = self._compute_value(beta_)
return xtMx <= self.c
def proj(self, beta, **kwargs):
"""The projection operator corresponding to the function.
From the interface "ProjectionOperator".
Examples
--------
>>> import parsimony.functions.penalties as penalties
>>> import numpy as np
>>> np.random.seed(42)
>>>
>>> X = np.random.randn(10, 10)
>>> x = np.random.randn(10, 1)
>>> L2 = penalties.RGCCAConstraint(c=1.0, tau=1.0, X=X, unbiased=True)
>>> np.abs(L2.f(x) - 5.7906381220390024) < 5e-16
True
>>> y = L2.proj(x)
>>> abs(L2.f(y)) <= 2.0 * consts.FLOAT_EPSILON
True
>>> np.abs(np.linalg.norm(y) - 0.99999999999999989) < 5e-16
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
xtMx = self._compute_value(beta_)
if xtMx <= self.c + consts.FLOAT_EPSILON:
return beta
n, p = self.X.shape
if self.unbiased:
n_ = float(n - 1.0)
else:
n_ = float(n)
if self.tau == 1.0:
sqnorm = np.dot(beta_.T, beta_)
eps = consts.FLOAT_EPSILON
y = beta_ * np.sqrt((self.c - eps) / sqnorm)
else:
if self._U is None or self._S is None or self._V is None:
# self._U, self._S, self._V = np.linalg.svd(X_, full_matrices=0)
# numpy.linalg.svd runs faster on the transpose.
self._V, self._S, self._U = np.linalg.svd(self.X.T,
full_matrices=0)
self._V = self._V.T
self._U = self._U.T
self._S = ((1.0 - self.tau) / n_) * (self._S ** 2) + self.tau
self._S = self._S.reshape((min(n, p), 1))
atilde = np.dot(self._V, beta_)
atilde2 = atilde ** 2
ssdiff = np.dot(beta_.T, beta_)[0, 0] - np.sum(atilde2)
atilde2lambdas = atilde2 * self._S
atilde2lambdas2 = atilde2lambdas * self._S
tau2 = self.tau ** 2
from parsimony.algorithms.utils import NewtonRaphson
newton = NewtonRaphson(force_negative=True,
parameter_positive=True,
parameter_negative=False,
parameter_zero=True,
eps=consts.TOLERANCE,
max_iter=30)
class F(properties.Function,
properties.Gradient):
def __init__(self, tau, S, c):
self.tau = tau
self.S = S
self.c = c
self.precomp = None
self.precomp_mu = None
def f(self, mu):
term1 = (self.tau / ((1.0 + 2.0 * mu * self.tau) ** 2)) \
* ssdiff
self.precomp = 1.0 + (2.0 * mu) * self.S
self.precomp_mu = mu
term2 = np.sum(atilde2lambdas * (self.precomp ** -2))
return term1 + term2 - self.c
def grad(self, mu):
term1 = (-4.0 * tau2 \
/ ((1.0 + 2.0 * mu * self.tau) ** 3.0)) * ssdiff
if self.precomp is None or self.precomp_mu != mu:
self.precomp = 1.0 + (2.0 * mu) * self.S
term2 = -4.0 * np.sum(atilde2lambdas2 \
* (self.precomp ** -3.0))
self.precomp = None
self.precomp_mu = None
return term1 + term2
# if max(n, p) >= 1000:
# # A rough heuristic for finding a start value. Works well in
# # many cases, and when it does not work we have only lost one
# # iteration and restart at 0.0.
# start_mu = np.sqrt(min(n, p)) \
# / max(1.0, self.c) \
# / max(0.1, self.tau)
# elif max(n, p) >= 100:
# start_mu = 1.0
# else:
start_mu = 0.0
mu = newton.run(F(self.tau, self._S, self.c), start_mu)
# Seems to be possible because of machine precision.
if mu <= consts.FLOAT_EPSILON:
return beta
if p > n:
l2 = ((self._S - self.tau) \
* (1.0 / ((1.0 - self.tau) / n_))).reshape((n,))
a = 1.0 + 2.0 * mu * self.tau
b = 2.0 * mu * (1.0 - self.tau) / n_
y = (beta_ - np.dot(self.X.T, np.dot(self._U,
(np.reciprocal(l2 + (a / b)) \
* np.dot(self._U.T,
np.dot(self.X, beta_)).T).T))) * (1. / a)
else: # The case when n >= p
l2 = ((self._S - self.tau)
* (1.0 / ((1.0 - self.tau) / n_))).reshape((p,))
a = 1.0 + 2.0 * mu * self.tau
b = 2.0 * mu * (1.0 - self.tau) / n_
y = np.dot(self._V.T, (np.reciprocal(a + b * l2) * atilde.T).T)
if self.penalty_start > 0:
y = np.vstack((beta[:self.penalty_start, :],
y))
return y
def _compute_value(self, beta):
"""Helper function to compute the function value.
Note that beta must already be sliced!
"""
if self.unbiased:
n = float(self.X.shape[0] - 1.0)
else:
n = float(self.X.shape[0])
Xbeta = np.dot(self.X, beta)
val = self.tau * np.dot(beta.T, beta) \
+ ((1.0 - self.tau) / n) * np.dot(Xbeta.T, Xbeta)
return val[0, 0]
class RidgeSquaredError(properties.CompositeFunction,
properties.Gradient,
properties.StronglyConvex,
properties.Penalty,
properties.ProximalOperator):
"""Represents a ridge squared error penalty, i.e. a representation of
f(x) = l.((1 / (2 * n)) * ||Xb - y||²_2 + (k / 2) * ||b||²_2),
where ||.||²_2 is the L2 norm.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
X : Numpy array (n-by-p). The regressor matrix.
y : Numpy array (n-by-1). The regressand vector.
k : Non-negative float. The ridge parameter.
penalty_start : Non-negative integer. The number of columns, variables
etc., to except from penalisation. Equivalently, the first
index to be penalised. Default is 0, all columns are included.
mean : Boolean. Whether to compute the squared loss or the mean
squared loss. Default is True, the mean squared loss.
"""
def __init__(self, X, y, k, l=1.0, penalty_start=0, mean=True):
self.l = max(0.0, float(l))
self.X = X
self.y = y
self.k = max(0.0, float(k))
self.penalty_start = max(0, int(penalty_start))
self.mean = bool(mean)
self.reset()
def reset(self):
"""Free any cached computations from previous use of this Function.
From the interface "Function".
"""
self._Xty = None
self._s2 = None
self._V = None
def f(self, x):
"""Function value.
From the interface "Function".
Parameters
----------
x : Numpy array. Regression coefficient vector. The point at which to
evaluate the function.
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
# Xx = np.dot(self.X[:, self.penalty_start:], x_)
Xx_ = np.dot(self.X, x) \
- np.dot(self.X[:, :self.penalty_start],
x[:self.penalty_start, :])
# print "penalties.RidgeSquaredError, DIFF:", \
# np.linalg.norm(Xx - Xx_)
else:
x_ = x
Xx_ = np.dot(self.X, x_)
if self.mean:
d = 2.0 * float(self.X.shape[0])
else:
d = 2.0
f = (1.0 / d) * np.sum((Xx_ - self.y) ** 2) \
+ (self.k / 2.0) * np.sum(x_ ** 2)
return self.l * f
def grad(self, x):
"""Gradient of the function at beta.
From the interface "Gradient".
Parameters
----------
x : Numpy array. The point at which to evaluate the gradient.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.losses import RidgeRegression
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 150)
>>> y = np.random.rand(100, 1)
>>> rr = RidgeRegression(X=X, y=y, k=3.14159265)
>>> beta = np.random.rand(150, 1)
>>> np.linalg.norm(rr.grad(beta)
... - rr.approx_grad(beta, eps=1e-4)) < 5e-8
True
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
X_ = self.X[:, self.penalty_start:]
grad = np.dot(X_.T, np.dot(self.X_, x_) - self.y)
del X_
else:
x_ = x
grad = np.dot((np.dot(self.X, x_) - self.y).T, self.X).T
if self.mean:
grad *= 1.0 / float(self.X.shape[0])
grad += self.k * x_
if self.penalty_start > 0:
grad = np.vstack((np.zeros((self.penalty_start, 1)),
self.l * grad))
else:
grad += self.l
return grad
def L(self):
"""Lipschitz constant of the gradient.
From the interface "LipschitzContinuousGradient".
"""
if self._lambda_max is None:
s = np.linalg.svd(self.X, full_matrices=False, compute_uv=False)
self._lambda_max = np.max(s) ** 2
if len(s) < self.X.shape[1]:
self._lambda_min = 0.0
else:
self._lambda_min = np.min(s) ** 2
if self.mean:
self._lambda_max /= float(self.X.shape[0])
self._lambda_min /= float(self.X.shape[0])
return self.l * (self._lambda_max + self.k)
def parameter(self):
"""Returns the strongly convex parameter for the function.
From the interface "StronglyConvex".
"""
if self._lambda_min is None:
self._lambda_max = None
self.L() # Precompute
return self.l * (self._lambda_min + self.k)
def prox(self, x, factor=1.0, eps=consts.TOLERANCE, max_iter=100):
"""The proximal operator associated to this function.
Parameters
----------
x : Numpy array (p-by-1). The point at which to apply the proximal
operator.
factor : Positive float. A factor by which the Lagrange multiplier is
scaled. This is usually the step size.
eps : Positive float. This is the stopping criterion for inexact
proximal methods, where the proximal operator is approximated
numerically.
max_iter : Positive integer. This is the maximum number of iterations
for inexact proximal methods, where the proximal operator is
approximated numerically.
index : Non-negative integer. For multivariate functions, this
identifies the variable for which the proximal operator is
associated.
From the interface "ProximalOperator".
"""
# y = inv(X'.X + (k + 1 / l).I).((1 / l).x + X'.v)
n, p = self.X.shape
rho = 1.0 / self.l
if self._Xty is None:
self._Xty = np.dot(self.X.T, self.y)
v = rho * x + self._Xty
c = self.k + rho
if n >= p:
if self._s2 is None or self._V is None:
# # Ridge solution
# XtX_klI = np.dot(self.X.T, self.X)
# index = np.arange(min(XtX_klI.shape))
# XtX_klI[index, index] += c
# self._inv_XtX_klI = np.linalg.inv(XtX_klI)
_, self._s2, self._V = np.linalg.svd(self.X)
self._V = self._V.T
self._s2 = self._s2.reshape((p, 1)) ** 2
# _inv_XtX_klI = np.dot(V, np.reciprocal(c + s ** 2) * V.T)
# y = np.dot(self._inv_XtX_klI, v)
y = np.dot(self._V,
np.reciprocal(c + self._s2) * np.dot(self._V.T, v))
else: # If n < p
if self._s2 is None or self._V is None:
# # Ridge solution using the Woodbury matrix identity.
# XXt_klI = np.dot(self.X, self.X.T)
# index = np.arange(min(XXt_klI.shape))
# XXt_klI[index, index] += c
# self._inv_XtX_klI = np.linalg.inv(XXt_klI)
_, self._s2, self._V = np.linalg.svd(self.X.T)
self._V = self._V.T
self._s2 = self._s2.reshape((n, 1)) ** 2
# _inv_XtX_klI = np.dot(V, np.reciprocal(c + s ** 2) * V.T)
# y = (v - np.dot(self.X.T, np.dot(self._inv_XtX_klI,
# np.dot(self.X, v)))) / c
Xv = np.dot(self.X, v)
y = (v - np.dot(self.X.T, np.dot(self._V,
np.reciprocal(c + self._s2) \
* np.dot(self._V.T, Xv)))) \
* (1.0 / c)
return y
class LinearConstraint(properties.IndicatorFunction,
properties.Constraint,
properties.ProjectionOperator):
"""Represents a linear constraint
a'x + c = b,
where x is the variable.
Parameters
----------
a : numpy
The linear operator.
b : float
The response.
c : float
The offset.
"""
def __init__(self, a, b, c, penalty_start=0):
self.a = a
self.b = float(b)
self.c = float(c)
self.penalty_start = max(0, int(penalty_start))
self.reset()
def reset(self):
pass
def f(self, x):
"""The function value of this indicator function. The function value is
0 if the constraint is feasible and infinite otherwise.
Parameters
----------
x : numpy array
The point at which to evaluate the function.
"""
if self.feasible(x):
return 0.0
else:
return np.inf
def feasible(self, x):
"""Feasibility of the constraint at point x.
From the interface Constraint.
Parameters
----------
x : numpy array
The point at which to evaluate the feasibility.
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
ax = np.dot(self.a.T, x_)
return maths.norm((ax + self.c) - self.b) < consts.TOLERANCE
def proj(self, x, **kwargs):
"""The projection operator corresponding to the function.
From the interface ProjectionOperator.
Parameters
----------
x : numpy array
The point to project onto the feasible set.
"""
# Check feasibility
if self.feasible(x):
return x
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
def _f(t):
xx = x_ - t * self.a
return (np.dot(xx.T, self.a)[0, 0] + self.c) - self.b
# tmin = 0.0
# tmax = tmax1 = tmax2 = 1.0
# fmin = _f(tmin)
# sgn_fmin = np.sign(fmin)
# fmax1 = _f(tmax1)
# if np.sign(fmax1) == sgn_fmin:
# it = 0
# while True:
# tmax1 /= 2.0
# fmax1 = _f(tmax1)
# if np.sign(fmax1) != sgn_fmin:
# tmax = tmax1
# break
#
# tmax2 *= 2.0
# fmax2 = _f(tmax2)
# if np.sign(fmax2) != sgn_fmin:
# tmax = tmax2
# break
# it += 1
# if it > 1000:
# asdf = 1
#
# t = optimize.bisect(_f, tmin, tmax)
t = optimize.fsolve(_f, 0.5)
return x_ - t * self.a
class LinearVariableConstraint(properties.IndicatorFunction,
properties.Constraint,
properties.ProjectionOperator):
"""Represents a linear constraint
r = Ax,
where both x and r are variables.
Parameters
----------
A : Numpy or sparse scipy array. The linear map between x and r.
"""
def __init__(self, A, penalty_start=0, solver=linalgs.SparseSolver()):
self.A = A
self.penalty_start = max(0, int(penalty_start))
self.solver = solver
self.reset()
def reset(self):
self._inv_AtA_I = None
def f(self, xr):
"""The function value of this indicator function. The function value is
0 if the constraint is feasible and infinite otherwise.
Parameters
----------
xr : List or tuple with two elements, numpy arrays. The first element
is x and the second is r.
"""
if self.feasible(xr):
return 0.0
else:
return np.inf
def feasible(self, xr):
"""Feasibility of the constraint at points x and r.
From the interface Constraint.
Parameters
----------
xr : List or tuple with two elements, numpy arrays. The first element
is x and the second is r.
"""
if isinstance(xr, linalgs.MultipartArray):
xr = xr.get_parts()
x = xr[0]
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
r = xr[1]
Ax = [0.0] * len(self.A)
for i in range(len(self.A)):
Ax[i] = self.A[i].dot(x_)
Ax = np.vstack(Ax)
return maths.norm(Ax - r) < consts.TOLERANCE
def proj(self, xr):
"""The projection operator corresponding to the function.
From the interface ProjectionOperator.
Parameters
----------
xr : List or tuple with two elements, numpy arrays. The first element
is x and the second is r.
"""
if isinstance(xr, linalgs.MultipartArray):
xr = xr.get_parts()
x = xr[0]
p = x.shape[0]
# The inverse of a 1000-by-1000 matrix takes roughly 1 second.
# This is the cut-off point on my computer for where it is no more
# feasible to compute the inverse. After this, the time to compute the
# inverse grows very quickly.
p_limit = 1000
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
r = xr[1]
# Save a few calls to __getitem__.
A = self.A
# Check feasibility
Ax = [0.0] * len(A)
for i in range(len(A)):
Ax[i] = A[i].dot(x_)
Ax = np.vstack(Ax)
if maths.norm(Ax - r) < consts.TOLERANCE:
return xr
# Precompute
if self._inv_AtA_I is None:
AtA = A[0].T.dot(A[0])
if len(A) >= 2:
AtA = AtA + A[1].T.dot(A[1])
if len(A) >= 3:
AtA = AtA + A[2].T.dot(A[2])
if len(A) >= 4:
AtA = AtA + A[3].T.dot(A[3])
if len(A) > 4:
for i in range(4, len(A)):
AtA = AtA + A[i].T.dot(A[i])
AtA_I = AtA + sparse.eye(*AtA.shape, format=AtA.format)
if p >= p_limit:
self._inv_AtA_I = AtA_I.todia()
else:
self._inv_AtA_I = np.linalg.inv(AtA_I.toarray())
Atr = 0.0
start = 0
end = 0
for i in range(len(A)):
end += A[i].shape[0]
Atr += A[i].T.dot(r[start:end])
start = end
if p >= p_limit:
z = self.solver.solve(self._inv_AtA_I, Atr + x_)
else:
z = np.dot(self._inv_AtA_I, Atr + x_)
Az = [0.0] * len(A)
for i in range(len(A)):
Az[i] = A[i].dot(z)
s = np.vstack(Az)
return linalgs.MultipartArray([z, s], vertical=True)
class SufficientDescentCondition(properties.Function,
properties.Constraint):
def __init__(self, function, p, c=1e-4):
"""The sufficient condition
f(x + a * p) <= f(x) + c * a * grad(f(x))'p
for descent. This condition is sometimes called the Armijo condition.
Parameters
----------
p : numpy.ndarray
The descent direction.
c : float
A float satisfying 0 < c < 1. A constant for the condition. Should
be "small".
"""
self.function = function
self.p = p
self.c = max(0.0, max(float(c), 1.0))
def f(self, x, a):
return self.function.f(x + a * self.p)
def feasible(self, xa):
"""Feasibility of the constraint at point x with step a.
From the interface "Constraint".
"""
x = xa[0]
a = xa[1]
f_x_ap = self.function.f(x + a * self.p)
f_x = self.function.f(x)
grad_p = np.dot(self.function.grad(x).T, self.p)[0, 0]
# print "f_x_ap = %.10f, f_x = %.10f, grad_p = %.10f, feas = %.10f" \
# % (f_x_ap, f_x, grad_p, f_x + self.c * a * grad_p)
# if grad_p >= 0.0:
# pass
feasible = f_x_ap <= f_x + self.c * a * grad_p
return feasible
#class WolfeCondition(Function, Constraint):
#
# def __init__(self, function, p, c1=1e-4, c2=0.9):
# """
# Parameters:
# ----------
# c1 : Float. 0 < c1 < c2 < 1. A constant for the condition. Should be
# small.
# c2 : Float. 0 < c1 < c2 < 1. A constant for the condition. Depends on
# the minimisation algorithms. For Newton or quasi-Newton
# descent directions, 0.9 is a good choice. 0.1 is appropriate
# for nonlinear conjugate gradient.
# """
# self.function = function
# self.p = p
# self.c1 = c1
# self.c2 = c2
#
# def f(self, x, a):
#
# return self.function.f(x + a * self.p)
#
# """Feasibility of the constraint at point x.
#
# From the interface "Constraint".
# """
# def feasible(self, x, a):
#
# grad_p = np.dot(self.function.grad(x).T, self.p)[0, 0]
# cond1 = self.function.f(x + a * self.p) \
# <= self.function.f(x) + self.c1 * a * grad_p
# cond2 = np.dot(self.function.grad(x + a * self.p).T, self.p)[0, 0] \
# >= self.c2 * grad_p
#
# return cond1 and cond2
#
#
#class StrongWolfeCondition(Function, Constraint):
#
# def __init__(self, function, p, c1=1e-4, c2=0.9):
# """
# Parameters:
# ----------
# c1 : Float. 0 < c1 < c2 < 1. A constant for the condition. Should be
# small.
# c2 : Float. 0 < c1 < c2 < 1. A constant for the condition. Depends on
# the minimisation algorithms. For Newton or quasi-Newton
# descent directions, 0.9 is a good choice. 0.1 is appropriate
# for nonlinear conjugate gradient.
# """
# self.function = function
# self.p = p
# self.c1 = c1
# self.c2 = c2
#
# def f(self, x, a):
#
# return self.function.f(x + a * self.p)
#
# """Feasibility of the constraint at point x.
#
# From the interface "Constraint".
# """
# def feasible(self, x, a):
#
# grad_p = np.dot(self.function.grad(x).T, self.p)[0, 0]
# cond1 = self.function.f(x + a * self.p) \
# <= self.function.f(x) + self.c1 * a * grad_p
# grad_x_ap = self.function.grad(x + a * self.p)
# cond2 = abs(np.dot(grad_x_ap.T, self.p)[0, 0]) \
# <= self.c2 * abs(grad_p)
#
# return cond1 and cond2
class KernelL2Squared(properties.AtomicFunction,
properties.Gradient,
properties.LipschitzContinuousGradient,
properties.Penalty,
properties.ProximalOperator):
"""The proximal operator of the squared L2 function with a penalty
formulation
f(\beta) = (l / 2).\beta'.K.\beta,
where K is a Mercer kernel.
Parameters
----------
l : float
Must be non-negative. The Lagrange multiplier, or regularisation
constant, of the function.
kernel : kernel object, optional
A Mercer kernel of type algorithms.utils.Kernel. Default (when None) is
a linear kernel.
penalty_start : int, optional
Must be non-negative. The number of columns, variables etc., to be
exempt from penalisation. Equivalently, the first index to be
penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, kernel=None, penalty_start=0):
self.l = max(0.0, float(l))
if kernel is None:
import parsimony.algorithms.utils as alg_utils
self.kernel = alg_utils.LinearKernel()
else:
self.kernel = kernel
self.penalty_start = max(0, int(penalty_start))
def f(self, beta):
"""Function value.
From the interface "Function".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return (self.l / 2.0) * np.dot(beta_.T,
self.kernel.dot(beta_))[0, 0]
def grad(self, beta):
"""Gradient of the function.
From the interface "Gradient".
Example
-------
>>> import numpy as np
>>> from parsimony.functions.penalties import KernelL2Squared
>>> from parsimony.algorithms.utils import LinearKernel
>>> np.random.seed(42)
>>>
>>> X = np.random.randn(5, 10)
>>> beta = np.random.rand(5, 1)
>>> l2 = KernelL2Squared(l=3.14159, kernel=LinearKernel(X=X))
>>> np.linalg.norm(l2.grad(beta)
... - l2.approx_grad(beta, eps=1e-4)) < 5e-10
True
>>>
>>> np.random.seed(42)
>>>
>>> X = np.random.randn(50, 100)
>>> beta = np.random.rand(50, 1)
>>> l2 = KernelL2Squared(l=2.71828, kernel=LinearKernel(X=X))
>>> np.linalg.norm(l2.grad(beta)
... - l2.approx_grad(beta, eps=1e-4)) < 5e-8
True
"""
if self.penalty_start > 0:
beta_ = beta.copy()
beta_[:self.penalty_start, :] = 0.0
grad = self.l * self.kernel.dot(beta_)
grad[:self.penalty_start, :] = 0.0
else:
grad = self.l * self.kernel.dot(beta)
return grad
def L(self):
"""Lipschitz constant of the gradient.
"""
# TODO: Implement this!
raise RuntimeError("Not implemented yet!")
# return self.l
def prox(self, beta, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
"""
# TODO: Implement this!
raise RuntimeError("Not implemented yet!")
# l = self.l * factor
# if self.penalty_start > 0:
# beta_ = beta[self.penalty_start:, :]
# else:
# beta_ = beta
#
# if self.penalty_start > 0:
# prox = np.vstack((beta[:self.penalty_start, :],
# beta_ * (1.0 / (1.0 + l))))
# else:
# prox = beta_ * (1.0 / (1.0 + l))
#
# return prox
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30.999125
| 80
| 0.507423
|
e197bd0ba3e1b16c9dbe492d194b0b5faff388a8
| 4,104
|
py
|
Python
|
purity_fb/purity_fb_1dot6/models/array_response.py
|
unixtreme/purity_fb_python_client
|
e836afe9804ffa99f74bf4b5202f181c3c04d9df
|
[
"Apache-2.0"
] | null | null | null |
purity_fb/purity_fb_1dot6/models/array_response.py
|
unixtreme/purity_fb_python_client
|
e836afe9804ffa99f74bf4b5202f181c3c04d9df
|
[
"Apache-2.0"
] | null | null | null |
purity_fb/purity_fb_1dot6/models/array_response.py
|
unixtreme/purity_fb_python_client
|
e836afe9804ffa99f74bf4b5202f181c3c04d9df
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Purity//FB REST Client
Client for Purity//FB REST API (1.0 - 1.6), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.6
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ArrayResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[PureArray]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None):
"""
ArrayResponse - a model defined in Swagger
"""
self._pagination_info = None
self._items = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""
Gets the pagination_info of this ArrayResponse.
pagination information, only available in GET requests
:return: The pagination_info of this ArrayResponse.
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""
Sets the pagination_info of this ArrayResponse.
pagination information, only available in GET requests
:param pagination_info: The pagination_info of this ArrayResponse.
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""
Gets the items of this ArrayResponse.
a list of array objects
:return: The items of this ArrayResponse.
:rtype: list[PureArray]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this ArrayResponse.
a list of array objects
:param items: The items of this ArrayResponse.
:type: list[PureArray]
"""
self._items = items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ArrayResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.649351
| 203
| 0.56847
|
6157065b8203f67e23f1148cfedff2871ac963ca
| 137
|
py
|
Python
|
Mundo 1_Fundamentos/Desafio_16.py
|
VictorOliveira02/Desafios-Python3-Curso-em-Video
|
53ee8bd814b816f3a21936677ef3f155b582843f
|
[
"MIT"
] | null | null | null |
Mundo 1_Fundamentos/Desafio_16.py
|
VictorOliveira02/Desafios-Python3-Curso-em-Video
|
53ee8bd814b816f3a21936677ef3f155b582843f
|
[
"MIT"
] | null | null | null |
Mundo 1_Fundamentos/Desafio_16.py
|
VictorOliveira02/Desafios-Python3-Curso-em-Video
|
53ee8bd814b816f3a21936677ef3f155b582843f
|
[
"MIT"
] | null | null | null |
from math import trunc
n = float(input('Digite um numero com virgula: '))
print(f'O numero {n} tem a parte inteira igual a {trunc(n)}')
| 27.4
| 61
| 0.70073
|
998b62851b6a5a0ec83a35ebc2c6bae6e74d3f8e
| 2,244
|
py
|
Python
|
catalyst/contrib/models/cv/segmentation/encoder/unet.py
|
stjordanis/catalyst-1
|
93eedf0b9520bf1f83f63b13d6818df2a1e85b33
|
[
"Apache-2.0"
] | 4
|
2019-12-14T07:27:09.000Z
|
2021-03-23T14:34:37.000Z
|
catalyst/contrib/models/cv/segmentation/encoder/unet.py
|
Ran485/catalyst
|
84bc7576c981278f389279d87dda85dd66a758b6
|
[
"Apache-2.0"
] | null | null | null |
catalyst/contrib/models/cv/segmentation/encoder/unet.py
|
Ran485/catalyst
|
84bc7576c981278f389279d87dda85dd66a758b6
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
# @TODO: code formatting issue for 20.07 release
from typing import List
import torch
from torch import nn
from catalyst.contrib.models.cv.segmentation.blocks.unet import EncoderDownsampleBlock
from catalyst.contrib.models.cv.segmentation.encoder.core import _take, EncoderSpec # noqa: E501
class UnetEncoder(EncoderSpec):
"""@TODO: Docs. Contribution is welcome."""
def __init__(
self,
in_channels: int,
num_channels: int,
num_blocks: int,
layers_indices: List[int] = None,
**kwargs,
):
"""@TODO: Docs. Contribution is welcome."""
super().__init__()
self.num_filters = num_channels
self.num_blocks = num_blocks
self._layers_indices = layers_indices or list(range(num_blocks))
self._channels = [self.num_filters * 2 ** i for i in range(self.num_blocks)]
self._strides = [2 ** (i) for i in range(self.num_blocks)]
self._channels = _take(self._channels, self._layers_indices)
self._strides = _take(self._strides, self._layers_indices)
for i in range(num_blocks):
in_channels = in_channels if not i else num_channels * 2 ** (i - 1)
out_channels = num_channels * 2 ** i
self.add_module(
f"block{i + 1}",
EncoderDownsampleBlock(in_channels, out_channels, first_stride=1, **kwargs),
)
if i != self.num_blocks - 1:
self.add_module(f"pool{i + 1}", nn.MaxPool2d(2, 2))
@property
def out_channels(self) -> List[int]:
"""Number of channels produced by the block."""
return self._channels
@property
def out_strides(self) -> List[int]:
"""@TODO: Docs. Contribution is welcome."""
return self._strides
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
"""Forward call."""
output = []
for i in range(self.num_blocks):
x = self.__getattr__(f"block{i + 1}")(x)
output.append(x)
if i != self.num_blocks - 1:
x = self.__getattr__(f"pool{i + 1}")(x)
output = _take(output, self._layers_indices)
return output
__all__ = ["UnetEncoder"]
| 33
| 97
| 0.607398
|
cc18450ad8570f5ccb8a091de6a58e219e1897b1
| 5,266
|
py
|
Python
|
pymongo/server_selectors.py
|
ldennis/mongo-python-driver
|
cc029a1e6208863eaab453777363d3935b927f32
|
[
"Apache-2.0"
] | 2,593
|
2015-01-02T10:53:55.000Z
|
2022-03-28T15:42:47.000Z
|
pymongo/server_selectors.py
|
ldennis/mongo-python-driver
|
cc029a1e6208863eaab453777363d3935b927f32
|
[
"Apache-2.0"
] | 356
|
2015-02-05T15:57:18.000Z
|
2022-03-31T19:12:30.000Z
|
pymongo/server_selectors.py
|
ldennis/mongo-python-driver
|
cc029a1e6208863eaab453777363d3935b927f32
|
[
"Apache-2.0"
] | 774
|
2015-01-05T09:30:07.000Z
|
2022-03-30T03:36:25.000Z
|
# Copyright 2014-2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Criteria to select some ServerDescriptions from a TopologyDescription."""
from pymongo.server_type import SERVER_TYPE
class Selection(object):
"""Input or output of a server selector function."""
@classmethod
def from_topology_description(cls, topology_description):
known_servers = topology_description.known_servers
primary = None
for sd in known_servers:
if sd.server_type == SERVER_TYPE.RSPrimary:
primary = sd
break
return Selection(topology_description,
topology_description.known_servers,
topology_description.common_wire_version,
primary)
def __init__(self,
topology_description,
server_descriptions,
common_wire_version,
primary):
self.topology_description = topology_description
self.server_descriptions = server_descriptions
self.primary = primary
self.common_wire_version = common_wire_version
def with_server_descriptions(self, server_descriptions):
return Selection(self.topology_description,
server_descriptions,
self.common_wire_version,
self.primary)
def secondary_with_max_last_write_date(self):
secondaries = secondary_server_selector(self)
if secondaries.server_descriptions:
return max(secondaries.server_descriptions,
key=lambda sd: sd.last_write_date)
@property
def primary_selection(self):
primaries = [self.primary] if self.primary else []
return self.with_server_descriptions(primaries)
@property
def heartbeat_frequency(self):
return self.topology_description.heartbeat_frequency
@property
def topology_type(self):
return self.topology_description.topology_type
def __bool__(self):
return bool(self.server_descriptions)
def __getitem__(self, item):
return self.server_descriptions[item]
def any_server_selector(selection):
return selection
def readable_server_selector(selection):
return selection.with_server_descriptions(
[s for s in selection.server_descriptions if s.is_readable])
def writable_server_selector(selection):
return selection.with_server_descriptions(
[s for s in selection.server_descriptions if s.is_writable])
def secondary_server_selector(selection):
return selection.with_server_descriptions(
[s for s in selection.server_descriptions
if s.server_type == SERVER_TYPE.RSSecondary])
def arbiter_server_selector(selection):
return selection.with_server_descriptions(
[s for s in selection.server_descriptions
if s.server_type == SERVER_TYPE.RSArbiter])
def writable_preferred_server_selector(selection):
"""Like PrimaryPreferred but doesn't use tags or latency."""
return (writable_server_selector(selection) or
secondary_server_selector(selection))
def apply_single_tag_set(tag_set, selection):
"""All servers matching one tag set.
A tag set is a dict. A server matches if its tags are a superset:
A server tagged {'a': '1', 'b': '2'} matches the tag set {'a': '1'}.
The empty tag set {} matches any server.
"""
def tags_match(server_tags):
for key, value in tag_set.items():
if key not in server_tags or server_tags[key] != value:
return False
return True
return selection.with_server_descriptions(
[s for s in selection.server_descriptions if tags_match(s.tags)])
def apply_tag_sets(tag_sets, selection):
"""All servers match a list of tag sets.
tag_sets is a list of dicts. The empty tag set {} matches any server,
and may be provided at the end of the list as a fallback. So
[{'a': 'value'}, {}] expresses a preference for servers tagged
{'a': 'value'}, but accepts any server if none matches the first
preference.
"""
for tag_set in tag_sets:
with_tag_set = apply_single_tag_set(tag_set, selection)
if with_tag_set:
return with_tag_set
return selection.with_server_descriptions([])
def secondary_with_tags_server_selector(tag_sets, selection):
"""All near-enough secondaries matching the tag sets."""
return apply_tag_sets(tag_sets, secondary_server_selector(selection))
def member_with_tags_server_selector(tag_sets, selection):
"""All near-enough members matching the tag sets."""
return apply_tag_sets(tag_sets, readable_server_selector(selection))
| 33.974194
| 76
| 0.695404
|
008214753749e467da3f3482027bdf00e79b4049
| 7,296
|
py
|
Python
|
fairmodels/plotnine/geoms/geom_text.py
|
Locust2520/python-fairmodels
|
0572f7c205b67c148bdc83b8dc4eaf70c06468a5
|
[
"MIT"
] | null | null | null |
fairmodels/plotnine/geoms/geom_text.py
|
Locust2520/python-fairmodels
|
0572f7c205b67c148bdc83b8dc4eaf70c06468a5
|
[
"MIT"
] | 1
|
2020-10-02T21:43:06.000Z
|
2020-10-15T22:52:39.000Z
|
fairmodels/plotnine/geoms/geom_text.py
|
Locust2520/python-fairmodels
|
0572f7c205b67c148bdc83b8dc4eaf70c06468a5
|
[
"MIT"
] | null | null | null |
from contextlib import suppress
from matplotlib.text import Text
try:
from adjustText import adjust_text
except ImportError:
HAS_ADJUST_TEXT = False
else:
HAS_ADJUST_TEXT = True
from ..utils import to_rgba
from ..doctools import document
from ..positions import position_nudge
from ..exceptions import PlotnineError
from .geom import geom
# Note: hjust & vjust are parameters instead of aesthetics
# due to a limitation imposed by MPL
# see: https://github.com/matplotlib/matplotlib/pull/1181
@document
class geom_text(geom):
"""
Textual annotations
{usage}
Parameters
----------
{common_parameters}
parse : bool (default: False)
If :py:`True`, the labels will be rendered with
`latex <http://matplotlib.org/users/usetex.html>`_.
family : str (default: None)
Font family.
fontweight : int or str (default: normal)
Font weight.
fontstyle : str (default: normal)
Font style. One of *normal*, *italic* or *oblique*
nudge_x : float (default: 0)
Horizontal adjustment to apply to the text
nudge_y : float (default: 0)
Vertical adjustment to apply to the text
adjust_text: dict (default: None)
Parameters to :class:`adjustText.adjust_text` will repel
overlapping texts. This parameter takes priority of over
``nudge_x`` and ``nudge_y``.
See https://github.com/Phlya/adjustText/wiki .
format_string : str (default: None)
If not :py:`None`, then the text if formatted with this
string using :meth:`str.format`
path_effects : list (default: None)
If not :py:`None`, then the text will use these effects.
See `path_effects
<https://matplotlib.org/tutorials/advanced/patheffects_guide.html>`_
documentation for more details.
See Also
--------
matplotlib.text.Text
matplotlib.patheffects
"""
_aesthetics_doc = """
{aesthetics_table}
.. rubric:: Aesthetics Descriptions
ha
Horizontal alignment. One of *left*, *center* or *right.*
va
Vertical alignment. One of *top*, *center*, *bottom*, *baseline*.
"""
DEFAULT_AES = {'alpha': 1, 'angle': 0, 'color': 'black',
'size': 11, 'lineheight': 1.2, 'ha': 'center',
'va': 'center'}
REQUIRED_AES = {'label', 'x', 'y'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False, 'parse': False,
'family': None, 'fontweight': 'normal',
'fontstyle': 'normal', 'nudge_x': 0, 'nudge_y': 0,
'adjust_text': None,
'format_string': None,
'path_effects': None}
def __init__(self, mapping=None, data=None, **kwargs):
nudge_kwargs = {}
adjust_text = kwargs.get('adjust_text', None)
if adjust_text is None:
with suppress(KeyError):
nudge_kwargs['x'] = kwargs['nudge_x']
with suppress(KeyError):
nudge_kwargs['y'] = kwargs['nudge_y']
if nudge_kwargs:
kwargs['position'] = position_nudge(**nudge_kwargs)
elif not HAS_ADJUST_TEXT:
raise PlotnineError(
"To use adjust_text you must install the adjustText "
"package."
)
# Accomodate the old names
if mapping and 'hjust' in mapping:
mapping['ha'] = mapping.pop('hjust')
if mapping and 'vjust' in mapping:
mapping['va'] = mapping.pop('vjust')
geom.__init__(self, mapping, data, **kwargs)
def setup_data(self, data):
parse = self.params['parse']
fmt = self.params['format_string']
# format
if fmt:
data['label'] = [fmt.format(l) for l in data['label']]
# Parse latex
if parse:
data['label'] = ['${}$'.format(l) for l in data['label']]
return data
def draw_panel(self, data, panel_params, coord, ax, **params):
super().draw_panel(data, panel_params, coord, ax, **params)
@staticmethod
def draw_group(data, panel_params, coord, ax, **params):
data = coord.transform(data, panel_params)
# Bind color and alpha
color = to_rgba(data['color'], data['alpha'])
# Create a dataframe for the plotting data required
# by ax.text
df = data[['x', 'y', 'size']].copy()
df['s'] = data['label']
df['rotation'] = data['angle']
df['linespacing'] = data['lineheight']
df['color'] = color
df['ha'] = data['ha']
df['va'] = data['va']
df['family'] = params['family']
df['fontweight'] = params['fontweight']
df['fontstyle'] = params['fontstyle']
df['zorder'] = params['zorder']
df['clip_on'] = True
# 'boxstyle' indicates geom_label so we need an MPL bbox
draw_label = 'boxstyle' in params
if draw_label:
fill = to_rgba(data.pop('fill'), data['alpha'])
if isinstance(fill, tuple):
fill = [list(fill)] * len(data['x'])
df['facecolor'] = fill
if params['boxstyle'] in ('round', 'round4'):
boxstyle = '{},pad={},rounding_size={}'.format(
params['boxstyle'],
params['label_padding'],
params['label_r'])
elif params['boxstyle'] in ('roundtooth', 'sawtooth'):
boxstyle = '{},pad={},tooth_size={}'.format(
params['boxstyle'],
params['label_padding'],
params['tooth_size'])
else:
boxstyle = '{},pad={}'.format(
params['boxstyle'],
params['label_padding'])
bbox = {'linewidth': params['label_size'],
'boxstyle': boxstyle}
else:
bbox = {}
# For labels add a bbox
for i in range(len(data)):
kw = df.iloc[i].to_dict()
if draw_label:
kw['bbox'] = bbox
kw['bbox']['edgecolor'] = params['boxcolor'] or kw['color']
kw['bbox']['facecolor'] = kw.pop('facecolor')
text_elem = ax.text(**kw)
if params['path_effects']:
text_elem.set_path_effects(params['path_effects'])
if params['adjust_text']:
adjust_text(list(ax.texts), ax=ax, **params['adjust_text'])
@staticmethod
def draw_legend(data, da, lyr):
"""
Draw letter 'a' in the box
Parameters
----------
data : dataframe
da : DrawingArea
lyr : layer
Returns
-------
out : DrawingArea
"""
key = Text(x=0.5*da.width,
y=0.5*da.height,
text='a',
alpha=data['alpha'],
size=data['size'],
family=lyr.geom.params['family'],
color=data['color'],
rotation=data['angle'],
horizontalalignment='center',
verticalalignment='center')
da.add_artist(key)
return da
| 33.013575
| 76
| 0.538514
|
08ab0ba72b9a466a09ba03242e5864993a00dfc7
| 2,504
|
py
|
Python
|
andinopy/__main__.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
andinopy/__main__.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
andinopy/__main__.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
import logging
import multiprocessing
import resource
import threading
import time
import andinopy
import gpiozero
import sys
from gpiozero.pins.mock import MockFactory
from andinopy.tcp.andino_tcp import andino_tcp
if len(sys.argv) == 1:
andinopy.initialize_cfg("default.cfg")
elif len(sys.argv) == 2:
# with open(sys.argv[1], encoding="utf8") as fp:
# print(fp.read())
andinopy.initialize_cfg(sys.argv[1])
else:
print("Usage: python3 __main__.py <configfile.cfg")
sys.exit(-1)
log = logging.getLogger("andinopy")
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
if sys.platform.startswith("win"):
gpiozero.Device.pin_factory = MockFactory()
server = andino_tcp()
try:
server.start()
print("andino server started on port 9999")
cores = multiprocessing.cpu_count()
usage = resource.getrusage(resource.RUSAGE_SELF)
user_time = usage.ru_utime
total_user_time = user_time
system_time = usage.ru_stime
total_system_time = system_time
debug_timer = 10
while 1:
time.sleep(debug_timer)
if andinopy.andinopy_logger.isEnabledFor(logging.DEBUG):
usage = resource.getrusage(resource.RUSAGE_SELF)
user_time = usage.ru_utime - total_user_time
total_user_time = usage.ru_utime
system_time = usage.ru_stime - total_system_time
total_system_time = usage.ru_stime
available_time = debug_timer * cores
andinopy.andinopy_logger.debug(f"In {debug_timer}s elapsed times:"
f" User-Time: {user_time:06.4f}s"
f", System-Time: {system_time:06.4f}s"
f", Available-Time: {available_time:06.4f}s (elapsed*cores)"
f", %-Time total used: {(user_time + system_time) / available_time:07.4%}"
f", Max Memory Used: {usage.ru_maxrss / 1024}mb"
f", Active threads: {threading.active_count()}")
except SystemExit as ex:
print("sys exit")
finally:
# Keyboard interrupt ...
print("stopped")
# Stop the server to free the socket in all cases
server.stop()
| 36.289855
| 118
| 0.613419
|
3f2acbb295cf3c18d638e61cc3e162ba99711b52
| 6,406
|
py
|
Python
|
xblobs/blob.py
|
gregordecristoforo/xblobs
|
961beb1df5ce237a7e99c35b5dfffc1cf8312062
|
[
"MIT"
] | null | null | null |
xblobs/blob.py
|
gregordecristoforo/xblobs
|
961beb1df5ce237a7e99c35b5dfffc1cf8312062
|
[
"MIT"
] | null | null | null |
xblobs/blob.py
|
gregordecristoforo/xblobs
|
961beb1df5ce237a7e99c35b5dfffc1cf8312062
|
[
"MIT"
] | null | null | null |
import numpy as np
class Blob():
"""
A single blob.
"""
def __init__(self, variable, id, n_var = 'n', t_dim = 'time', rad_dim = 'radial',
pol_dim = 'binormal', allow_length_one=True,
):
"""
variable : xbout Dataset containing blob_labels
id : integer between 0 and number of detected blobs
0: refers to the background
1-n: detected blobs
allow_length_one : Bool, default True
If changed to False, raise an exception if the 'Blob' only exists at one
time-point.
Choose other parameters equivalent to find_blobs() function.
"""
self.variable = variable
self.id = id
self.n_var = n_var
self.t_dim = t_dim
self.rad_dim = rad_dim
self.pol_dim = pol_dim
self.label_field = self.variable['blob_labels'].where(self.variable['blob_labels'] == self.id, drop=True)
if not allow_length_one and self.label_field.sizes[t_dim] == 1:
raise ValueError("Blob only exists at one time point")
self.n_field = self.variable[self.n_var].where(self.variable['blob_labels'] == self.id, drop=True)
com_radial_field = self.n_field[self.rad_dim]*self.n_field
com_binormal_field = self.n_field[self.pol_dim]*self.n_field
total_mass_unnormalized = self.n_field.sum(dim=(self.pol_dim,self.rad_dim))
self.com_radial = com_radial_field.sum(dim=(self.pol_dim,self.rad_dim)).values/total_mass_unnormalized.values
self.com_binormal = com_binormal_field.sum(dim=(self.pol_dim,self.rad_dim)).values/total_mass_unnormalized.values
def t_init(self):
"""
Returns
-------
time when blob is detected : np.scalar
"""
return self.label_field[self.t_dim].values[0]
def lifetime(self):
"""
Returns
-------
lifetime : np.scalar
"""
return self.label_field[self.t_dim].values[-1] - self.label_field[self.t_dim].values[0]
def com(self):
"""
Returns
-------
centre of mass for each time step : 2d np.array
"""
try:
return np.vstack((np.concatenate(self.com_radial), np.concatenate(self.com_binormal)))
except:
return np.vstack(((self.com_radial), (self.com_binormal)))
def velocity(self):
"""
Returns
-------
absolute velocity for each time step : np.array
"""
if(self.com_radial.size == 1):
#print('blob only detected in one frame')
return 0
else:
try:
return ((np.diff(np.concatenate(self.com_radial))/(self.label_field[self.t_dim].values[1] - self.label_field[self.t_dim].values[0]))**2 + \
(np.diff(np.concatenate(self.com_binormal))/(self.label_field[self.t_dim].values[1] - self.label_field[self.t_dim].values[0]))**2)**0.5
except:
return ((np.diff(self.com_radial)/(self.label_field[self.t_dim].values[1] - self.label_field[self.t_dim].values[0]))**2 + \
(np.diff(self.com_binormal)/(self.label_field[self.t_dim].values[1] - self.label_field[self.t_dim].values[0]))**2)**0.5
def velocity_x(self):
"""
Returns
-------
radial velocity for each time step : np.array
"""
if(self.com_radial.size == 1):
#print('blob only detected in one frame')
return 0
else:
try:
return np.diff(np.concatenate(self.com_radial))/(self.label_field[self.t_dim].values[1] - self.label_field[self.t_dim].values[0])
except:
return np.diff((self.com_radial))/(self.label_field[self.t_dim].values[1] - self.label_field[self.t_dim].values[0])
def velocity_y(self):
"""
Returns
-------
poloidal velocity for each time step : np.array
"""
if(self.com_binormal.size == 1):
#print('blob only detected in one frame')
return 0
else:
try:
return np.diff(np.concatenate(self.com_binormal))/(self.label_field[self.t_dim].values[1] - self.label_field[self.t_dim].values[0])
except:
return np.diff(self.com_binormal)/(self.label_field[self.t_dim].values[1] - self.label_field[self.t_dim].values[0])
def amplitude(self):
"""
Returns
-------
array of amplitudes of blob for each timestep : np.array
"""
try:
return np.concatenate(self.n_field.max(dim=(self.rad_dim,self.pol_dim)).values)
except:
return self.n_field.max(dim=(self.rad_dim,self.pol_dim)).values
def max_amplitude(self):
"""
Returns
-------
maximum amplitude in blob's lifetime : np.scalar
"""
return self.n_field.max(dim=(self.t_dim,self.rad_dim,self.pol_dim)).values
def mass(self):
"""
Returns
-------
array of mass of blob for each timestep : np.array
"""
try:
return np.concatenate(self.n_field.sum(dim=(self.rad_dim,self.pol_dim)).values*self.variable[self.rad_dim].values[1]*self.variable[self.pol_dim].values[1])
except:
return self.n_field.sum(dim=(self.rad_dim,self.pol_dim)).values*self.variable[self.rad_dim].values[1]*self.variable[self.pol_dim].values[1]
def average_mass(self):
"""
Returns
-------
time averaged mass of blob : np.scalar
"""
return self.n_field.sum(dim=(self.t_dim,self.rad_dim,self.pol_dim)).values*self.variable[self.rad_dim].values[1]*self.variable[self.pol_dim].values[1] \
/ self.n_field.sum(dim=(self.rad_dim,self.pol_dim)).values.size
def size(self):
"""
Returns
-------
array of size of blob for each timestep : np.array
"""
try:
return np.concatenate(self.label_field.sum(dim=(self.rad_dim,self.pol_dim)).values*self.variable[self.rad_dim].values[1]*self.variable[self.pol_dim].values[1] / self.id)
except:
return self.label_field.sum(dim=(self.rad_dim,self.pol_dim)).values*self.variable[self.rad_dim].values[1]*self.variable[self.pol_dim].values[1] / self.id
| 36.816092
| 181
| 0.589135
|
8b6a02295c184c3caa83cf73ff28418dc9d57851
| 1,720
|
py
|
Python
|
openpathsampling/tests/analysis/conftest.py
|
sroet/openpathsampling
|
97c2d51ada941b952189da3deb61cd71b0e5e4a3
|
[
"MIT"
] | 64
|
2016-07-06T13:38:51.000Z
|
2022-03-30T15:58:01.000Z
|
openpathsampling/tests/analysis/conftest.py
|
sroet/openpathsampling
|
97c2d51ada941b952189da3deb61cd71b0e5e4a3
|
[
"MIT"
] | 601
|
2016-06-13T10:22:01.000Z
|
2022-03-25T00:10:40.000Z
|
openpathsampling/tests/analysis/conftest.py
|
dwhswenson/openpathsampling
|
72fedad9ba8bc60d17c7cc73c641129898d5d530
|
[
"MIT"
] | 45
|
2016-11-10T11:17:53.000Z
|
2022-02-13T11:50:26.000Z
|
# TODO: Currently this is located in tests/analysis/conftest.py. However,
# this might be more suitable for a higher-level conftest.py, or perhaps it
# should be moved into `fixtures` subdirectory of tests with the necessary
# objects imported into the main tests/conftest.py, for use across the test
# suite.
import openpathsampling as paths
from openpathsampling.tests.analysis.utils.fixture_classes import (
TISSystemFixture, TPSSystemFixture, make_fixture, DEFAULT_CV
)
def unidirectional_tis_network():
r"""Fixture for unidirectional TIS with the default (RETIS) scheme.
This has states defined as initial state :math:`x < 0` and final state
:math:`x \ge 10`. The interfaces are at :math:`x=0`, :math:`x=3`, and
:math:`x=6`.
"""
paths.InterfaceSet._reset()
state_A = paths.CVDefinedVolume(DEFAULT_CV, float("-inf"), 0)
state_B = paths.CVDefinedVolume(DEFAULT_CV, 10, float("inf"))
interfaces = paths.VolumeInterfaceSet(DEFAULT_CV, float("-inf"),
[0, 3, 6])
network = paths.MISTISNetwork([(state_A, interfaces, state_B)])
return network
def two_state_tps_network():
state_A = paths.CVDefinedVolume(DEFAULT_CV, float("-inf"), 0)
state_B = paths.CVDefinedVolume(DEFAULT_CV, 10, float("inf"))
network = paths.TPSNetwork(state_A, state_B)
return network
default_unidirectional_tis = make_fixture(
fixture_type=TISSystemFixture,
make_network=unidirectional_tis_network,
scheme_type=paths.DefaultScheme,
state_bounds=(0, 10)
)
default_two_state_tps = make_fixture(
fixture_type=TPSSystemFixture,
make_network=two_state_tps_network,
scheme_type=paths.OneWayShootingMoveScheme,
)
| 35.102041
| 75
| 0.725
|
74d7e6a904d4122a24494458ae9a9b7df1f66474
| 726
|
py
|
Python
|
jsonschema_extractor/__init__.py
|
toumorokoshi/attrs-jsonschema
|
8c6de53d4dad192a1a2cbd826762a8eeeb442ad0
|
[
"MIT"
] | null | null | null |
jsonschema_extractor/__init__.py
|
toumorokoshi/attrs-jsonschema
|
8c6de53d4dad192a1a2cbd826762a8eeeb442ad0
|
[
"MIT"
] | null | null | null |
jsonschema_extractor/__init__.py
|
toumorokoshi/attrs-jsonschema
|
8c6de53d4dad192a1a2cbd826762a8eeeb442ad0
|
[
"MIT"
] | null | null | null |
from .extractor_set import SchemaExtractorSet
from .typing_extractor import TypingExtractor
DEFAULT_EXTRACTOR_LIST = [TypingExtractor()]
try:
from .attrs_extractor import AttrsExtractor
DEFAULT_EXTRACTOR_LIST.insert(0, AttrsExtractor())
except ImportError: # pragma: no cover
pass # pragma: no cover
def extract_jsonschema(typ):
return DEFAULT_EXTRACTOR.extract(typ)
def init_default_extractor():
"""
create a new extractor, providing the default (all available
extractors)
"""
return SchemaExtractorSet(DEFAULT_EXTRACTOR_LIST)
DEFAULT_EXTRACTOR = init_default_extractor()
from .exceptions import UnextractableSchema
def extract(cls):
return DEFAULT_EXTRACTOR.extract(cls)
| 22.6875
| 64
| 0.775482
|
395369f6fc3957d23c3252646d69074c96d91aff
| 1,304
|
py
|
Python
|
https/server.py
|
bridgesign/Mini-tweet
|
97e13afda73b816c953bd93baba31c4686621fdd
|
[
"Apache-2.0"
] | null | null | null |
https/server.py
|
bridgesign/Mini-tweet
|
97e13afda73b816c953bd93baba31c4686621fdd
|
[
"Apache-2.0"
] | null | null | null |
https/server.py
|
bridgesign/Mini-tweet
|
97e13afda73b816c953bd93baba31c4686621fdd
|
[
"Apache-2.0"
] | null | null | null |
import socket
from concurrent.futures import ThreadPoolExecutor
from . import handler
from . import views
import re
from .settings import NOT_FOUND_TEMPLATE
class server:
"""docstring for ClassName"""
def __init__(self, host:str ='', port:int =8080, timeout:int =60, threads:int =10):
self.port = port
self.host = host
self.timeout = timeout
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.thread_pool = ThreadPoolExecutor(max_workers=threads)
def handle(self, conn, addr):
# Create request
request = handler.httprequest(conn, addr)
ret = request.handle()
if ret:
handler.httpresponse(request, '', ret).handle()
conn.close()
return
for pattern in views.patterns:
if bool(re.match(pattern[0], request.headers['url'])):
pattern[1](request).handle()
break
else:
handler.httpresponse(request, NOT_FOUND_TEMPLATE, 404).handle()
if request.headers['connection']:
try:
self.handle(conn, addr)
except:
conn.close()
else:
conn.close()
def serve(self):
self.sock.bind((self.host, self.port))
print("Starting Server on", self.port)
self.sock.listen()
while True:
conn, addr = self.sock.accept()
conn.settimeout(self.timeout)
self.thread_pool.submit(self.handle, conn, addr)
#self.handle(conn, addr)
| 25.076923
| 84
| 0.705521
|
6447b1c91c0cd691d47e9a790e2d21c4b16ba494
| 29,946
|
py
|
Python
|
PS5 Price Collector.py
|
Realsaleh/PS5-Price-Collector
|
797234a5f36656a3ba5a3ab2fd2d49d7144082a8
|
[
"MIT"
] | null | null | null |
PS5 Price Collector.py
|
Realsaleh/PS5-Price-Collector
|
797234a5f36656a3ba5a3ab2fd2d49d7144082a8
|
[
"MIT"
] | null | null | null |
PS5 Price Collector.py
|
Realsaleh/PS5-Price-Collector
|
797234a5f36656a3ba5a3ab2fd2d49d7144082a8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import re
import os
import time
from unidecode import unidecode
def start():
print ("[+] PS5 Price Collector v1.0.0")
print ("[-] Coded By RealSalehn ")
print ("[-] https://salehn.ir")
global headers
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
def tilno():
r = requests.get('https://tilno.ir/shop/buy-ps5-console', headers=headers)
if r.status_code == 200:
try:
global tilno_price
soup = BeautifulSoup(r.text, 'html.parser')
section_value = soup.find_all('div', attrs={'class': 'summary-inner'})
price_value = section_value
price_value = soup.find_all('p', attrs={'class': 'price'})
tilno_price = price_value[0].text.strip()
if tilno_price == "تماس بگیرید":
tilno_price = "قیمت ناموجود"
print("Gheymat PS5 Dar Tilno Mojod Nist!")
else:
tilno_price = re.sub(r'\D+', '', tilno_price)
tilno_price = unidecode(tilno_price)
print("Gheymat PS5 Dar Tilno:", tilno_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
r = requests.get('https://tilno.ir/shop/buy-ps5-digital-console', headers=headers)
if r.status_code == 200:
try:
global tilno_digital_price
soup = BeautifulSoup(r.text, 'html.parser')
section_digital_value = soup.find_all('div', attrs={'class': 'summary-inner'})
price_digital_value = section_digital_value
price_digital_value = soup.find_all('p', attrs={'class': 'price'})
tilno_digital_price = price_digital_value[0].text.strip()
if tilno_digital_price == "تماس بگیرید":
tilno_digital_price = "قیمت ناموجود"
print("Gheymat PS5 Digital Dar Tilno Mojod Nist!")
else:
tilno_digital_price = re.sub(r'\D+', '', tilno_digital_price)
tilno_digital_price = unidecode(tilno_digital_price)
print("Gheymat PS5 Digital Dar Tilno:", tilno_digital_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
def dragonshop():
r = requests.get('https://dragon-shop.ir/product/%d8%ae%d8%b1%db%8c%d8%af-playstation-5-%d9%be%d9%84%db%8c-%d8%a7%d8%b3%d8%aa%db%8c%d8%b4%d9%86-5', headers=headers)
if r.status_code == 200:
try:
global dragonshop_price
soup = BeautifulSoup(r.text, 'html.parser')
section_value = soup.find_all('div', attrs={'class': 'row'})
price_value = section_value
price_value = soup.find_all('p', attrs={'class': 'price'})
if soup.find_all('ins'):
price_value = soup.find_all('ins')
dragonshop_price = price_value[0].text.strip()
dragonshop_price = re.sub(r'\D+', '', dragonshop_price)
dragonshop_price = unidecode(dragonshop_price)
print("Gheymat PS5 Dar Dragon-Shop:", dragonshop_price)
else:
dragonshop_price = price_value[0].text.strip()
if dragonshop_price == "تماس بگیرید":
dragonshop_price = "قیمت ناموجود"
print("Gheymat PS5 Dar Dragon-Shop Mojod Nist!")
else:
dragonshop_price = re.sub(r'\D+', '', dragonshop_price)
dragonshop_price = unidecode(dragonshop_price)
print("Gheymat PS5 Dar Dragon-Shop:", dragonshop_price)
except:
print("Gheymat PS5 Dar Dragon-Shop Mojod Nist!")
else:
print("Connection Error! Please Try Again.")
r = requests.get('https://dragon-shop.ir/product/%D8%AE%D8%B1%DB%8C%D8%AF-playstation-5-%D9%BE%D9%84%DB%8C-%D8%A7%D8%B3%D8%AA%DB%8C%D8%B4%D9%86-5-%D8%AF%DB%8C%D8%AC%DB%8C%D8%AA%D8%A7%D9%84', headers=headers)
if r.status_code == 200:
try:
global dragonshop_digital_price
soup = BeautifulSoup(r.text, 'html.parser')
section_digital_value = soup.find_all('div', attrs={'class': 'summary entry-summary col-lg-24 col-md-24 col-sm-21 col-xs-36'})
price_digital_value = section_digital_value
price_digital_value = soup.find_all('p', attrs={'class': 'price'})
if soup.find_all('ins'):
dragonshop_digital_price = price_digital_value[0].text.strip()
dragonshop_digital_price = re.sub(r'\D+', '', dragonshop_digital_price)
dragonshop_digital_price = unidecode(dragonshop_digital_price)
print("Gheymat PS5 Digital Dar Dragon-Shop:", dragonshop_digital_price)
else:
dragonshop_digital_price = price_digital_value[0].text.strip()
if dragonshop_digital_price == "تماس بگیرید":
dragonshop_digital_price = "قیمت ناموجود"
print("Gheymat PS5 Digital Dar Dragon-Shop Mojod Nist!")
else:
dragonshop_digital_price = re.sub(r'\D+', '', dragonshop_digital_price)
dragonshop_digital_price = unidecode(dragonshop_digital_price)
print("Gheymat PS5 Digital Dar Dragon-Shop:", dragonshop_digital_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
def pspro():
r = requests.get('https://pspro.ir/playstation-5-standard-edition-white', headers=headers)
if r.status_code == 200:
try:
global pspro_price
soup = BeautifulSoup(r.text, 'html.parser')
if soup.find_all('a', {'class': 'btn btn-lg btn-block red-btn'}):
pspro_price = "قیمت ناموجود"
print("Gheymat PS5 Dar PSPro Mojod Nist!")
else:
section_value = soup.find_all('div', attrs={'class': 'col-6 d-flex flex-column justify-content-between px-4'})
price_value = section_value
price_value = soup.find_all('button', attrs={'class': 'btn btn-lg btn-block green-btn'})
pspro_price = price_value[0].text.strip()
pspro_price = re.sub(r'(\D+)', '', pspro_price)
pspro_price = unidecode(pspro_price)
print("Gheymat PS5 Dar PSPro:", pspro_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
r = requests.get('https://pspro.ir/PlayStation-5-Digital-Edition-825GB-R2-CFI-1016B', headers=headers)
if r.status_code == 200:
try:
global pspro_digital_price
soup = BeautifulSoup(r.text, 'html.parser')
if soup.find_all('a', {'class': 'btn btn-lg btn-block red-btn'}):
pspro_digital_price = "قیمت ناموجود"
print("Gheymat PS5 Digital Dar PSPro Mojod Nist!")
else:
section_digital_value = soup.find_all('div', attrs={'class': 'col-6 d-flex flex-column justify-content-between px-4'})
price_digital_value = section_digital_value
price_digital_value = soup.find_all('button', attrs={'class': 'btn btn-lg btn-block green-btn'})
pspro_digital_price = price_digital_value[0].text.strip()
pspro_digital_price = re.sub(r'(\D+)', '', pspro_digital_price)
pspro_digital_price = unidecode(pspro_digital_price)
print("Gheymat PS5 Dar PSPro:", pspro_digital_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
def techsiro():
r = requests.get('https://techsiro.com/product/ps5-standard-edition-825gb-white-cfi-1015a', headers=headers)
if r.status_code == 200:
try:
global techsiro_price
soup = BeautifulSoup(r.text, 'html.parser')
section_value = soup.find_all('div', attrs={'class': 'summary entry-summary'})
price_value = section_value
if soup.find_all('p', attrs={'class': 'stock out-of-stock'}):
techsiro_price = "قیمت ناموجود"
print("Gheymat PS5 Dar Techsiro Mojod Nist!")
else:
price_value = soup.find_all('p', attrs={'class': 'price'})
techsiro_price = price_value[0].text.strip()
if techsiro_price == "تماس بگیرید":
techsiro_price = "قیمت ناموجود"
print("Gheymat PS5 Dar Techsiro Mojod Nist!")
else:
techsiro_price = re.sub(r'\D+', '', techsiro_price)
techsiro_price = unidecode(techsiro_price)
print("Gheymat PS5 Dar Techsiro:", techsiro_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
r = requests.get('https://techsiro.com/product/ps5-digital-edition-825gb-white-cfi-1016b', headers=headers)
if r.status_code == 200:
try:
global techsiro_digital_price
soup = BeautifulSoup(r.text, 'html.parser')
section_digital_value = soup.find_all('div', attrs={'class': 'summary-inner'})
price_digital_value = section_digital_value
if soup.find_all('p', attrs={'class': 'stock out-of-stock'}):
techsiro_digital_price = "قیمت ناموجود"
print("Gheymat PS5 Digital Dar Techsiro Mojod Nist!")
else:
price_digital_value = soup.find_all('p', attrs={'class': 'price'})
techsiro_digital_price = price_digital_value[0].text.strip()
if techsiro_digital_price == "تماس بگیرید":
techsiro_digital_price = "قیمت ناموجود"
print("Gheymat PS5 Digital Dar Techsiro Mojod Nist!")
else:
techsiro_digital_price = re.sub(r'\D+', '', techsiro_digital_price)
techsiro_digital_price = unidecode(techsiro_digital_price)
print("Gheymat PS5 Digital Dar Techsiro:", techsiro_digital_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
def gamiha():
r = requests.get('https://gamiha.net/shop/playstation5/P335-ps5.html', headers=headers)
if r.status_code == 200:
try:
global gamiha_price
soup = BeautifulSoup(r.text, 'html.parser')
if soup.find_all('div', {'class': 'wz-shop-product-out-stock'}):
gamiha_price = "قیمت ناموجود"
print("Gheymat PS5 Dar Gamiha Mojod Nist!")
else:
section_value = soup.find_all('div', attrs={'class': 'wz-shop-product-section'})
price_value = section_value
if soup.find_all('span', {'class': 'wz-shop-product-sale-price'}):
price_value = soup.find_all('span', attrs={'class': 'wz-shop-product-sale-price'})
gamiha_price = price_value[0].text.strip()
gamiha_price = re.sub(r'\D+', '', gamiha_price)
gamiha_price = unidecode(gamiha_price)
print("Gheymat PS5 Dar Gamiha:", gamiha_price)
else:
price_value = soup.find_all('div', attrs={'class': 'wz-shop-product-price'})
gamiha_price = price_value[0].text.strip()
gamiha_price = re.sub(r'\D+', '', gamiha_price)
gamiha_price = unidecode(gamiha_price)
print("Gheymat PS5 Dar Gamiha:", gamiha_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
r = requests.get('https://gamiha.net/shop/playstation-5-digital/P605-%DA%A9%D9%86%D8%B3%D9%88%D9%84-%D8%A8%D8%A7%D8%B2%DB%8C-%D8%B3%D9%88%D9%86%DB%8C-%D9%85%D8%AF%D9%84-%D9%BE%D9%84%DB%8C-%D8%A7%D8%B3%D8%AA%DB%8C%D8%B4%D9%86-%DB%B5-%D8%AF%DB%8C%D8%AC%DB%8C%D8%AA%D8%A7%D9%84.html', headers=headers)
if r.status_code == 200:
try:
global gamiha_digital_price
soup = BeautifulSoup(r.text, 'html.parser')
if soup.find_all('div', {'class': 'wz-shop-product-out-stock'}):
gamiha_digital_price = "قیمت ناموجود"
print("Gheymat PS5 Digital Dar Gamiha Mojod Nist!")
else:
section_digital_value = soup.find_all('div', attrs={'class': 'wz-shop-product-section'})
price_digital_value = section_digital_value
if soup.find_all('span', {'class': 'wz-shop-product-sale-price'}):
price_digital_value = soup.find_all('span', attrs={'class': 'wz-shop-product-sale-price'})
gamiha_digital_price = price_digital_value[0].text.strip()
gamiha_digital_price = re.sub(r'\D+', '', gamiha_digital_price)
gamiha_digital_price = unidecode(gamiha_digital_price)
print("Gheymat PS5 Digital Dar Gamiha:", gamiha_digital_price)
else:
price_digital_value = soup.find_all('div', attrs={'class': 'wz-shop-product-price'})
gamiha_digital_price = price_digital_value[0].text.strip()
gamiha_digital_price = re.sub(r'\D+', '', gamiha_digital_price)
gamiha_digital_price = unidecode(gamiha_digital_price)
print("Gheymat PS5 Digital Dar Gamiha:", gamiha_digital_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
def timcheh():
r = requests.get('https://timcheh.com/product/tpi-8378', headers=headers)
if r.status_code == 200:
try:
global timcheh_price
soup = BeautifulSoup(r.text, 'html.parser')
if soup.find_all('h4', {'class': 'product_styles_unavailable_title__2XMnW product_styles_unavailable__GKiyV'}):
timcheh_price = "قیمت ناموجود"
print("Gheymat PS5 Dar Timcheh Mojod Nist!")
else:
section_value = soup.find_all('div', attrs={'class': 'product_styles_product_info_holder__9IC6k'})
price_value = section_value
price_value = soup.find_all('span', attrs={'class': 'product_styles_price__3Ws3t'})
timcheh_price = price_value[0].text.strip()
timcheh_price = re.sub(r'\D+', '', timcheh_price)
print("Gheymat PS5 Dar timcheh:", timcheh_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
r = requests.get('https://timcheh.com/product/tpi-8374', headers=headers)
if r.status_code == 200:
try:
global timcheh_digital_price
soup = BeautifulSoup(r.text, 'html.parser')
if soup.find_all('h4', {'class': 'product_styles_unavailable_title__2XMnW product_styles_unavailable__GKiyV'}):
timcheh_digital_price = "قیمت ناموجود"
print("Gheymat PS5 Digital Dar timcheh Mojod Nist!")
else:
section_digital_value = soup.find_all('div', attrs={'class': 'product_styles_product_info_holder__9IC6k'})
price_digital_value = section_digital_value
price_digital_value = soup.find_all('span', attrs={'class': 'product_styles_price__3Ws3t'})
timcheh_digital_price = price_digital_value[0].text.strip()
timcheh_digital_price = re.sub(r'\D+', '', timcheh_digital_price)
print("Gheymat PS5 Digital Dar timcheh:", timcheh_digital_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
def lioncomputer():
r = requests.get('https://www.lioncomputer.com/product/48766/Sony-PlayStation-5-With-Blu-Ray-Drive-Console-PS5', headers=headers)
if r.status_code == 200:
try:
global lioncomputer_price
soup = BeautifulSoup(r.text, 'html.parser')
section_value = soup.find_all('div', attrs={'class': 'col-lg-6 col-sm-12'})
price_value = section_value
price_value = soup.find_all('strong', attrs={'class': 'text-success font-size-large font-weight-bold mt-2'})
lioncomputer_price = price_value[0].text.strip()
if lioncomputer_price == "ناموجود":
lioncomputer_price = "قیمت ناموجود"
print("Gheymat PS5 Dar Lioncomputer Mojod Nist!")
else:
lioncomputer_price = re.sub(r'\D+', '', lioncomputer_price)
lioncomputer_price = unidecode(lioncomputer_price)
print("Gheymat PS5 Dar Lioncomputer:", lioncomputer_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
r = requests.get('https://www.lioncomputer.com/product/gqlm1/Sony-PlayStation-5-CFI-1015B-Digital-Edition-PS5-Console', headers=headers)
if r.status_code == 200:
try:
global lioncomputer_digital_price
soup = BeautifulSoup(r.text, 'html.parser')
section_digital_value = soup.find_all('div', attrs={'class': 'col-lg-6 col-sm-12'})
price_digital_value = section_digital_value
price_digital_value = soup.find_all('strong', attrs={'class': 'text-success font-size-large font-weight-bold mt-2'})
lioncomputer_digital_price = price_digital_value[0].text.strip()
if lioncomputer_digital_price == "ناموجود":
lioncomputer_digital_price = "قیمت ناموجود"
print("Gheymat PS5 Digital Dar Lioncomputer Mojod Nist!")
else:
lioncomputer_digital_price = re.sub(r'\D+', '', lioncomputer_digital_price)
lioncomputer_digital_price = unidecode(lioncomputer_digital_price)
print("Gheymat PS5 Digital Dar Lioncomputer:", lioncomputer_digital_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
def nakhlmarket():
r = requests.get('https://nakhlmarket.com/product/playstation-5', headers=headers)
if r.status_code == 200:
try:
global nakhlmarket_price
soup = BeautifulSoup(r.text, 'html.parser')
section_value = soup.find_all('div', attrs={'class': 'summary-inner'})
section_value = soup.find_all('div', attrs={'class': 'single_variation_wrap'})
price_value = section_value
price_value = soup.find_all('span', attrs={'class': 'price'})
price_value = soup.find_all('bdi', attrs={'class': ''})
nakhlmarket_price = price_value[2].text.strip()
if nakhlmarket_price == "تماس بگیرید":
nakhlmarket_price = "قیمت ناموجود"
print("Gheymat PS5 Dar Nakhlmarket Mojod Nist!")
else:
nakhlmarket_price = re.sub(r'\D+', '', nakhlmarket_price)
print("Gheymat PS5 Dar Nakhlmarket:", nakhlmarket_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
r = requests.get('https://nakhlmarket.com/product/buy-ps5-digital-edition', headers=headers)
if r.status_code == 200:
try:
global nakhlmarket_digital_price
soup = BeautifulSoup(r.text, 'html.parser')
section_digital_value = soup.find_all('div', attrs={'class': 'summary-inner'})
section_digital_value = soup.find_all('div', attrs={'class': 'single_variation_wrap'})
price_digital_value = section_digital_value
price_digital_value = soup.find_all('span', attrs={'class': 'price'})
price_digital_value = soup.find_all('bdi', attrs={'class': ''})
nakhlmarket_digital_price = price_digital_value[2].text.strip()
if nakhlmarket_digital_price == "تماس بگیرید":
nakhlmarket_digital_price = "قیمت ناموجود"
print("Gheymat PS5 Digital Dar Nakhlmarket Mojod Nist!")
else:
nakhlmarket_digital_price = re.sub(r'\D+', '', nakhlmarket_digital_price)
print("Gheymat PS5 Digital Dar Nakhlmarket:", nakhlmarket_digital_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
def zirpele():
r = requests.get('https://zirpele.ir/product/%D8%AE%D8%B1%DB%8C%D8%AF-ps5-%D9%BE%D9%84%DB%8C-%D8%A7%D8%B3%D8%AA%DB%8C%D8%B4%D9%86-playstation-5', headers=headers)
if r.status_code == 200:
try:
global zirpele_price
soup = BeautifulSoup(r.text, 'html.parser')
section_value = soup.find_all('div', attrs={'class': 'col-lg-28 col-md-28 col-sm-36 col-xs-36'})
price_value = section_value
price_value = soup.find_all('p', attrs={'class': 'price'})
zirpele_price = price_value[0].text.strip()
if zirpele_price == "تماس بگیرید":
zirpele_price = "قیمت ناموجود"
print("Gheymat PS5 Dar Zirpele Mojod Nist!")
else:
zirpele_price = re.sub(r'\D+', '', zirpele_price)
print("Gheymat PS5 Dar Zirpele:", zirpele_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
r = requests.get('https://zirpele.ir/product/ps5-%d9%be%d9%84%db%8c-%d8%a7%d8%b3%d8%aa%db%8c%d8%b4%d9%86-playstation-5-digital', headers=headers)
if r.status_code == 200:
try:
global zirpele_digital_price
soup = BeautifulSoup(r.text, 'html.parser')
section_digital_value = soup.find_all('div', attrs={'class': 'col-lg-28 col-md-28 col-sm-36 col-xs-36'})
price_digital_value = section_digital_value
price_digital_value = soup.find_all('p', attrs={'class': 'price'})
zirpele_digital_price = price_digital_value[0].text.strip()
if zirpele_digital_price == "تماس بگیرید":
zirpele_digital_price = "قیمت ناموجود"
print("Gheymat PS5 Digital Dar Zirpele Mojod Nist!")
else:
zirpele_digital_price = re.sub(r'\D+', '', zirpele_digital_price)
print("Gheymat PS5 Digital Dar Zirpele:", zirpele_digital_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
def digikala():
r = requests.get('https://www.digikala.com/product/dkp-3737956/%DA%A9%D9%86%D8%B3%D9%88%D9%84-%D8%A8%D8%A7%D8%B2%DB%8C-%D8%B3%D9%88%D9%86%DB%8C-%D9%85%D8%AF%D9%84-playstation-5-%D8%B8%D8%B1%D9%81%DB%8C%D8%AA-825-%DA%AF%DB%8C%DA%AF%D8%A7%D8%A8%D8%A7%DB%8C%D8%AA', headers=headers)
if r.status_code == 200:
try:
global digikala_price
soup = BeautifulSoup(r.text, 'html.parser')
find_ava = soup.find_all('div', attrs={'class': 'c-product__summary js-product-summary'})
if soup.find_all('i', attrs={'class': 'c-product-stock__action--alarm-icon'}):
digikala_price = "قیمت ناموجود"
print("Gheymat PS5 Dar Digikala Mojod Nist!")
else:
section_value = soup.find_all('div', attrs={'class': 'c-product__summary js-product-summary'})
price_value = section_value
price_value = soup.find_all('div', attrs={'class': 'c-product__seller-price-pure js-price-value'})
digikala_price = price_value[0].text.strip()
digikala_price = re.sub(r'\D+', '', digikala_price)
digikala_price = unidecode(digikala_price)
print("Gheymat PS5 Dar Digikala:", digikala_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
r = requests.get('https://www.digikala.com/product/dkp-3738470/%DA%A9%D9%86%D8%B3%D9%88%D9%84-%D8%A8%D8%A7%D8%B2%DB%8C-%D8%B3%D9%88%D9%86%DB%8C-%D9%85%D8%AF%D9%84-playstation-5-digital-edition-%D8%B8%D8%B1%D9%81%DB%8C%D8%AA-825-%DA%AF%DB%8C%DA%AF%D8%A7%D8%A8%D8%A7%DB%8C%D8%AA', headers=headers)
if r.status_code == 200:
try:
global digikala_digital_price
soup = BeautifulSoup(r.text, 'html.parser')
find_ava = soup.find_all('div', attrs={'class': 'c-product__summary js-product-summary'})
if soup.find_all('i', attrs={'class': 'c-product-stock__action--alarm-icon'}):
digikala_digital_price = "قیمت ناموجود"
print("Gheymat PS5 Digital Dar Digikala Mojod Nist!")
else:
section_digital_value = soup.find_all('div', attrs={'class': 'c-product__summary js-product-summary'})
price_digital_value = section_digital_value
price_digital_value = soup.find_all('div', attrs={'class': 'c-product__seller-price-pure js-price-value'})
digikala_digital_price = price_digital_value[0].text.strip()
digikala_digital_price = re.sub(r'\D+', '', digikala_digital_price)
digikala_digital_price = unidecode(digikala_digital_price)
print("Gheymat PS5 Digital Dar Digikala:", digikala_digital_price)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
def save():
f = open("PS5.txt", "a", encoding="utf-8")
f.write("تاریخ قیمت ها:")
f.write(date)
f.write("\n")
f.write("======== Tilno.ir ========\n")
f.write("معمولی:\n")
f.write(tilno_price)
f.write("\n")
f.write("دیجیتال:\n")
f.write(tilno_digital_price)
f.write("\n")
f.write("=======================\n")
f.write("======== Dragon-shop.ir ========\n")
f.write("معمولی:\n")
f.write(dragonshop_price)
f.write("\n")
f.write("دیجیتال:\n")
f.write(dragonshop_digital_price)
f.write("\n")
f.write("=======================\n")
f.write("======== PSPro.ir ========\n")
f.write("معمولی:\n")
f.write(pspro_price)
f.write("\n")
f.write("دیجیتال:\n")
f.write(pspro_digital_price)
f.write("\n")
f.write("=======================\n")
f.write("======== Techsiro.com ========\n")
f.write("معمولی:\n")
f.write(techsiro_price)
f.write("\n")
f.write("دیجیتال:\n")
f.write(techsiro_digital_price)
f.write("\n")
f.write("=======================\n")
f.write("======== Gamiha.net ========\n")
f.write("معمولی:\n")
f.write(gamiha_price)
f.write("\n")
f.write("دیجیتال:\n")
f.write(gamiha_digital_price)
f.write("\n")
f.write("=======================\n")
f.write("======== Timcheh.com ========\n")
f.write("معمولی:\n")
f.write(timcheh_price)
f.write("\n")
f.write("دیجیتال:\n")
f.write(timcheh_digital_price)
f.write("\n")
f.write("=======================\n")
f.write("======== Lioncomputer.com ========\n")
f.write("معمولی:\n")
f.write(lioncomputer_price)
f.write("\n")
f.write("دیجیتال:\n")
f.write(lioncomputer_digital_price)
f.write("\n")
f.write("=======================\n")
f.write("======== Nakhlmarket.com ========\n")
f.write("معمولی:\n")
f.write(nakhlmarket_price)
f.write("\n")
f.write("دیجیتال:\n")
f.write(nakhlmarket_digital_price)
f.write("\n")
f.write("=======================\n")
f.write("======== Zirpele.ir ========\n")
f.write("معمولی:\n")
f.write(zirpele_price)
f.write("\n")
f.write("دیجیتال:\n")
f.write(zirpele_digital_price)
f.write("\n")
f.write("=======================\n")
f.write("======== Digikala.com ========\n")
f.write("معمولی:\n")
f.write(digikala_price)
f.write("\n")
f.write("دیجیتال:\n")
f.write(digikala_digital_price)
f.write("\n")
f.write("=======================\n")
f.close()
def get_date():
r = requests.get('https://www.time.ir', headers=headers)
if r.status_code == 200:
try:
global date
soup = BeautifulSoup(r.text, 'html.parser')
find_tag = soup.find_all('span', attrs={'id': 'ctl00_cphTop_Sampa_Web_View_TimeUI_ShowDate00cphTop_3734_lblShamsiNumeral'})
date = find_tag
date = date[0].text.strip()
date = unidecode(date)
print(date)
except:
print("Error! Something is wrong.")
else:
print("Connection Error! Please Try Again.")
if __name__ == '__main__':
start()
get_date()
tilno()
dragonshop()
pspro()
techsiro()
gamiha()
timcheh()
lioncomputer()
nakhlmarket()
zirpele()
digikala()
save()
| 49.091803
| 303
| 0.575837
|
3384ba3998bc75178b9d06dd2f885f23e7045b23
| 4,578
|
py
|
Python
|
configs/gdrn/lmPbrSingleObj/resnest50d_a6_AugCosyAAEGray_BG05_mlBCE_lm_pbr_100e_SO/resnest50d_a6_AugCosyAAEGary_BG05_mlBCE_lm_pbr_100e_holepuncher.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 33
|
2021-12-15T07:11:47.000Z
|
2022-03-29T08:58:32.000Z
|
configs/gdrn/lmPbrSingleObj/resnest50d_a6_AugCosyAAEGray_BG05_mlBCE_lm_pbr_100e_SO/resnest50d_a6_AugCosyAAEGary_BG05_mlBCE_lm_pbr_100e_holepuncher.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 3
|
2021-12-15T11:39:54.000Z
|
2022-03-29T07:24:23.000Z
|
configs/gdrn/lmPbrSingleObj/resnest50d_a6_AugCosyAAEGray_BG05_mlBCE_lm_pbr_100e_SO/resnest50d_a6_AugCosyAAEGary_BG05_mlBCE_lm_pbr_100e_holepuncher.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | null | null | null |
_base_ = ["../../../_base_/gdrn_base.py"]
OUTPUT_DIR = "output/gdrn/lm_pbr/resnest50d_a6_AugCosyAAEGray_BG05_mlBCE_lm_pbr_100e/holepuncher"
INPUT = dict(
DZI_PAD_SCALE=1.5,
TRUNCATE_FG=False,
CHANGE_BG_PROB=0.5,
COLOR_AUG_PROB=0.8,
COLOR_AUG_TYPE="code",
COLOR_AUG_CODE=(
"Sequential(["
# Sometimes(0.5, PerspectiveTransform(0.05)),
# Sometimes(0.5, CropAndPad(percent=(-0.05, 0.1))),
# Sometimes(0.5, Affine(scale=(1.0, 1.2))),
"Sometimes(0.5, CoarseDropout( p=0.2, size_percent=0.05) ),"
"Sometimes(0.4, GaussianBlur((0., 3.))),"
"Sometimes(0.3, pillike.EnhanceSharpness(factor=(0., 50.))),"
"Sometimes(0.3, pillike.EnhanceContrast(factor=(0.2, 50.))),"
"Sometimes(0.5, pillike.EnhanceBrightness(factor=(0.1, 6.))),"
"Sometimes(0.3, pillike.EnhanceColor(factor=(0., 20.))),"
"Sometimes(0.5, Add((-25, 25), per_channel=0.3)),"
"Sometimes(0.3, Invert(0.2, per_channel=True)),"
"Sometimes(0.5, Multiply((0.6, 1.4), per_channel=0.5)),"
"Sometimes(0.5, Multiply((0.6, 1.4))),"
"Sometimes(0.1, AdditiveGaussianNoise(scale=10, per_channel=True)),"
"Sometimes(0.5, iaa.contrast.LinearContrast((0.5, 2.2), per_channel=0.3)),"
"Sometimes(0.5, Grayscale(alpha=(0.0, 1.0)))," # maybe remove for det
"], random_order=True)"
# cosy+aae
),
)
SOLVER = dict(
IMS_PER_BATCH=24,
TOTAL_EPOCHS=100,
LR_SCHEDULER_NAME="flat_and_anneal",
ANNEAL_METHOD="cosine", # "cosine"
ANNEAL_POINT=0.72,
# REL_STEPS=(0.3125, 0.625, 0.9375),
OPTIMIZER_CFG=dict(_delete_=True, type="Ranger", lr=1e-4, weight_decay=0),
WEIGHT_DECAY=0.0,
WARMUP_FACTOR=0.001,
WARMUP_ITERS=1000,
)
DATASETS = dict(
TRAIN=("lm_pbr_holepuncher_train",),
TEST=("lm_real_holepuncher_test",),
DET_FILES_TEST=(
"datasets/BOP_DATASETS/lm/test/test_bboxes/yolov4x_640_test672_augCosyAAEGray_ranger_lm_pbr_lm_test_16e.json",
),
)
MODEL = dict(
LOAD_DETS_TEST=True,
PIXEL_MEAN=[0.0, 0.0, 0.0],
PIXEL_STD=[255.0, 255.0, 255.0],
POSE_NET=dict(
NAME="GDRN",
XYZ_ONLINE=True,
BACKBONE=dict(
FREEZE=False,
PRETRAINED="timm",
INIT_CFG=dict(
type="timm/resnest50d",
pretrained=True,
in_chans=3,
features_only=True,
out_indices=(4,),
),
),
## geo head: Mask, XYZ, Region
GEO_HEAD=dict(
FREEZE=False,
INIT_CFG=dict(
type="TopDownMaskXyzRegionHead",
in_dim=2048, # this is num out channels of backbone conv feature
),
NUM_REGIONS=64,
),
PNP_NET=dict(
INIT_CFG=dict(norm="GN", act="gelu"),
REGION_ATTENTION=True,
WITH_2D_COORD=True,
ROT_TYPE="allo_rot6d",
TRANS_TYPE="centroid_z",
),
LOSS_CFG=dict(
# xyz loss ----------------------------
XYZ_LOSS_TYPE="L1", # L1 | CE_coor
XYZ_LOSS_MASK_GT="visib", # trunc | visib | obj
XYZ_LW=1.0,
# mask loss ---------------------------
MASK_LOSS_TYPE="BCE", # L1 | BCE | CE
MASK_LOSS_GT="trunc", # trunc | visib | gt
MASK_LW=1.0,
# region loss -------------------------
REGION_LOSS_TYPE="CE", # CE
REGION_LOSS_MASK_GT="visib", # trunc | visib | obj
REGION_LW=1.0,
# pm loss --------------
PM_LOSS_SYM=True, # NOTE: sym loss
PM_R_ONLY=True, # only do R loss in PM
PM_LW=1.0,
# centroid loss -------
CENTROID_LOSS_TYPE="L1",
CENTROID_LW=1.0,
# z loss -----------
Z_LOSS_TYPE="L1",
Z_LW=1.0,
),
),
)
TEST = dict(EVAL_PERIOD=0, VIS=False, TEST_BBOX_TYPE="est") # gt | est
# bbnc4
# objects holepuncher Avg(1)
# ad_2 1.33 1.33
# ad_5 16.75 16.75
# ad_10 41.86 41.86
# rete_2 18.93 18.93
# rete_5 88.49 88.49
# rete_10 99.05 99.05
# re_2 31.11 31.11
# re_5 89.15 89.15
# re_10 99.14 99.14
# te_2 61.08 61.08
# te_5 98.57 98.57
# te_10 99.52 99.52
# proj_2 58.42 58.42
# proj_5 99.05 99.05
# proj_10 99.52 99.52
# re 3.20 3.20
# te 0.02 0.02
| 33.661765
| 118
| 0.526212
|
d668fe7e346d2ebf2aa0125bd33eab9e4ab0b1e5
| 29
|
py
|
Python
|
solution/retrieval/elastic_engine/__init__.py
|
taeukkkim/temp
|
91c90fe5da4678424d8aacacbf15773dc624021d
|
[
"MIT"
] | 5
|
2021-11-10T09:44:42.000Z
|
2022-03-20T06:14:42.000Z
|
solution/retrieval/elastic_engine/__init__.py
|
taeukkkim/temp
|
91c90fe5da4678424d8aacacbf15773dc624021d
|
[
"MIT"
] | null | null | null |
solution/retrieval/elastic_engine/__init__.py
|
taeukkkim/temp
|
91c90fe5da4678424d8aacacbf15773dc624021d
|
[
"MIT"
] | 7
|
2021-11-10T23:54:03.000Z
|
2022-01-03T02:55:50.000Z
|
from .api import ESRetrieval
| 14.5
| 28
| 0.827586
|
92434c73eaea77520ca2c2cab5d762518af7020f
| 2,080
|
py
|
Python
|
notifications/telegram/posts.py
|
dubadub/vas3k.club
|
45e8e7e198f7b139dadd50d877205463e64e1fb8
|
[
"MIT"
] | 1
|
2022-01-10T14:19:17.000Z
|
2022-01-10T14:19:17.000Z
|
notifications/telegram/posts.py
|
dubadub/vas3k.club
|
45e8e7e198f7b139dadd50d877205463e64e1fb8
|
[
"MIT"
] | 6
|
2021-09-22T18:55:27.000Z
|
2022-03-12T01:04:53.000Z
|
notifications/telegram/posts.py
|
dubadub/vas3k.club
|
45e8e7e198f7b139dadd50d877205463e64e1fb8
|
[
"MIT"
] | 2
|
2022-01-10T20:01:58.000Z
|
2022-01-11T09:42:41.000Z
|
import telegram
from django.template import TemplateDoesNotExist
from notifications.telegram.common import Chat, CLUB_CHANNEL, send_telegram_message, render_html_message, send_telegram_image, CLUB_CHAT
def announce_in_club_channel(post, announce_text=None, image=None):
if not announce_text:
announce_text = render_html_message("channel_post_announce.html", post=post)
if image:
send_telegram_image(
chat=CLUB_CHANNEL,
image_url=image,
text=announce_text,
)
else:
send_telegram_message(
chat=CLUB_CHANNEL,
text=announce_text,
disable_preview=False,
parse_mode=telegram.ParseMode.HTML,
)
def announce_in_club_chats(post):
if post.topic and post.topic.chat_id:
# announce to the topic chat
send_telegram_message(
chat=Chat(id=post.topic.chat_id),
text=render_html_message("channel_post_announce.html", post=post),
parse_mode=telegram.ParseMode.HTML,
disable_preview=True,
)
else:
# announce to public chat
send_telegram_message(
chat=CLUB_CHAT,
text=render_html_message("channel_post_announce.html", post=post),
parse_mode=telegram.ParseMode.HTML,
disable_preview=True,
)
def notify_post_approved(post):
if post.author.telegram_id:
send_telegram_message(
chat=Chat(id=post.author.telegram_id),
text=render_html_message("post_approved.html", post=post),
parse_mode=telegram.ParseMode.HTML,
)
def notify_post_rejected(post, reason):
try:
text = render_html_message(f"post_rejected/{reason.value}.html", post=post)
except TemplateDoesNotExist:
text = render_html_message(f"post_rejected/draft.html", post=post)
if post.author.telegram_id:
send_telegram_message(
chat=Chat(id=post.author.telegram_id),
text=text,
parse_mode=telegram.ParseMode.HTML,
)
| 31.515152
| 136
| 0.658654
|
0bf58a279a34695f1f373e75c219a2894ff48fa6
| 1,645
|
py
|
Python
|
10-Logical_Vulnerabilities/q4/q4.py
|
galtoubul/Introduction_to_Information_Security
|
565181ef2ec9530de8e4af43d97da454ddcc77be
|
[
"MIT"
] | null | null | null |
10-Logical_Vulnerabilities/q4/q4.py
|
galtoubul/Introduction_to_Information_Security
|
565181ef2ec9530de8e4af43d97da454ddcc77be
|
[
"MIT"
] | null | null | null |
10-Logical_Vulnerabilities/q4/q4.py
|
galtoubul/Introduction_to_Information_Security
|
565181ef2ec9530de8e4af43d97da454ddcc77be
|
[
"MIT"
] | null | null | null |
import json
def generate_example():
# WARNING: DON'T EDIT THIS FUNCTION!
return json.dumps({'command': 'echo cool', 'signature': '007152ab1a65e9e864928d0f5cc6f47e8ce6217c09f7e7518d3d15f901e33df7e4bd760e2538929bdf2c5bf710b2babbcb2f268f458551ecbee78df22f3bb039696a2fccf58ccdeeb0c235e36593aa4b8be3d62e4ae6a59f6aebf78e3aec5b1685672bff191969731a096fa8f9ef29926bbee303d0673482410a25d00c46bdc1'})
def generate_exploit() -> str:
"""This function returns the payload that will print `hacked`.
Our payload should cause `run.py` to print out `hacked`.
Warnings:
1. `run.py` should print `hacked`, and the testing will be case *sensitive*
2. This time, `run.py` must NOT crash
Returns:
The string of the payload.
"""
r = """{"command": "echo hacked", "command": "echo cool", "signature": "007152ab1a65e9e864928d0f5cc6f47e8ce6217c09f7e7518d3d15f901e33df7e4bd760e2538929bdf2c5bf710b2babbcb2f268f458551ecbee78df22f3bb039696a2fccf58ccdeeb0c235e36593aa4b8be3d62e4ae6a59f6aebf78e3aec5b1685672bff191969731a096fa8f9ef29926bbee303d0673482410a25d00c46bdc1" }"""
return r
def main(argv):
# WARNING: DON'T EDIT THIS FUNCTION!
if not 2 <= len(argv) <= 3:
print('USAGE: %s [--example] <script-path>' % argv[0])
return 1
if len(argv) == 2:
example, path = False, argv[1]
else:
example, path = True, argv[2]
if example:
script = generate_example()
else:
script = generate_exploit()
with open(path, 'w') as writer:
writer.write(script)
print('done')
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| 35.76087
| 338
| 0.717933
|
6d475ef1f98906b7d95066881c51a7f18a09f2a9
| 1,758
|
py
|
Python
|
tfx/experimental/templates/taxi/models/estimator/model_test.py
|
nex3z/tfx
|
1b6a36c223db79e2acb9c85da398746f9f7888be
|
[
"Apache-2.0"
] | null | null | null |
tfx/experimental/templates/taxi/models/estimator/model_test.py
|
nex3z/tfx
|
1b6a36c223db79e2acb9c85da398746f9f7888be
|
[
"Apache-2.0"
] | null | null | null |
tfx/experimental/templates/taxi/models/estimator/model_test.py
|
nex3z/tfx
|
1b6a36c223db79e2acb9c85da398746f9f7888be
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_metadata.proto.v0 import schema_pb2
from tfx.components.trainer import executor as trainer_executor
from tfx.experimental.templates.taxi.models.estimator import model
class ModelTest(tf.test.TestCase):
def testTrainerFn(self):
trainer_fn_args = trainer_executor.TrainerFnArgs(
train_files='/path/to/train.file',
transform_output='/path/to/transform_output',
serving_model_dir='/path/to/model_dir',
eval_files='/path/to/eval.file',
schema_file='/path/to/schema_file',
train_steps=1000,
eval_steps=100,
)
schema = schema_pb2.Schema()
result = model.trainer_fn(trainer_fn_args, schema)
self.assertIsInstance(result['estimator'], tf.estimator.Estimator)
self.assertIsInstance(result['train_spec'], tf.estimator.TrainSpec)
self.assertIsInstance(result['eval_spec'], tf.estimator.EvalSpec)
self.assertTrue(callable(result['eval_input_receiver_fn']))
if __name__ == '__main__':
tf.test.main()
| 36.625
| 74
| 0.754835
|
3d691cf0a2774c588ea4ee749ebc44fc210a49cc
| 1,346
|
py
|
Python
|
actions/is_valid_ip_port.py
|
StackStorm-Exchange/stackstorm-networking_utils
|
88a691c0bd4509355710cf5c36b438c7fdf9c2a6
|
[
"Apache-2.0"
] | 3
|
2019-08-26T02:39:04.000Z
|
2020-03-13T13:51:57.000Z
|
actions/is_valid_ip_port.py
|
StackStorm-Exchange/stackstorm-networking_utils
|
88a691c0bd4509355710cf5c36b438c7fdf9c2a6
|
[
"Apache-2.0"
] | 11
|
2017-02-24T12:47:47.000Z
|
2021-10-01T13:46:40.000Z
|
actions/is_valid_ip_port.py
|
StackStorm-Exchange/stackstorm-networking_utils
|
88a691c0bd4509355710cf5c36b438c7fdf9c2a6
|
[
"Apache-2.0"
] | 8
|
2017-02-23T16:36:16.000Z
|
2021-01-28T17:45:51.000Z
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.runners.base_action import Action
class IsValidIpPortAction(Action):
def run(self, port):
"""
args:
- port (int): a IP port number to check if valid.
raises:
- ValueError: For invalid ports.
returns:
- True: If a valid port.
"""
if port < 0:
raise ValueError("Invalid port: {} is less than 0.".format(port))
elif port > 65535:
raise ValueError("Invalid port: {} is greater than 65535.".format(port))
else:
return True
| 35.421053
| 84
| 0.681278
|
836f38b4ae21a2677ec9bdc4152bb7b0ea1c77b5
| 11,946
|
py
|
Python
|
tests/functional/test_routes.py
|
ropable/archivy
|
3fcd8032825a6b475c12cfa32637084240e30414
|
[
"MIT"
] | null | null | null |
tests/functional/test_routes.py
|
ropable/archivy
|
3fcd8032825a6b475c12cfa32637084240e30414
|
[
"MIT"
] | null | null | null |
tests/functional/test_routes.py
|
ropable/archivy
|
3fcd8032825a6b475c12cfa32637084240e30414
|
[
"MIT"
] | null | null | null |
import os
import re
from flask.testing import FlaskClient
from flask import request
from flask_login import current_user
from responses import RequestsMock, GET
from werkzeug.security import generate_password_hash
from archivy.helpers import get_max_id, get_db
from archivy.data import get_dirs, create_dir, get_items, get_item
def test_get_index(test_app, client: FlaskClient):
response = client.get("/")
assert response.status_code == 200
def test_get_custom_css(test_app, client: FlaskClient):
test_app.config["THEME_CONF"]["use_custom_css"] = True
css_file = "custom.css"
css_contents = """
body {
color: red
}
"""
os.mkdir(f"{test_app.config['USER_DIR']}/css/")
with open(f"{test_app.config['USER_DIR']}/css/{css_file}", "w") as f:
f.write(css_contents)
test_app.config["THEME_CONF"]["custom_css_file"] = css_file
resp = client.get("/static/custom.css")
assert css_contents.encode("utf-8") in resp.data
test_app.config["THEME_CONF"]["use_custom_css"] = False
def test_get_new_bookmark(test_app, client: FlaskClient):
response = client.get("/bookmarks/new")
assert response.status_code == 200
def test_post_new_bookmark_missing_fields(test_app, client: FlaskClient):
response = client.post("/bookmarks/new", data={"submit": True})
assert response.status_code == 200
assert b"This field is required" in response.data
def test_get_new_note(test_app, client: FlaskClient):
response = client.get("/notes/new")
assert response.status_code == 200
def test_get_dataobj_not_found(test_app, client: FlaskClient):
response = client.get("/dataobj/1")
assert response.status_code == 302
def test_get_dataobj(test_app, client: FlaskClient, note_fixture):
response = client.get("/dataobj/1")
assert response.status_code == 200
def test_get_delete_dataobj_not_found(test_app, client: FlaskClient):
response = client.get("/dataobj/delete/1")
assert response.status_code == 302
def test_get_delete_dataobj(test_app, client: FlaskClient, note_fixture):
response = client.get("/dataobj/delete/1")
assert response.status_code == 302
def test_create_new_bookmark(
test_app, client: FlaskClient, mocked_responses: RequestsMock
):
mocked_responses.add(
GET,
"https://example.com/",
body="""<html>
<head><title>Random</title></head><body><p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit
</p></body></html>
""",
)
bookmark_data = {
"url": "https://example.com",
"tags": "testing,bookmark",
"path": "",
"submit": "true",
}
resp = client.post("/bookmarks/new", data=bookmark_data)
assert resp.status_code == 302
assert not b"invalid" in resp.data
resp = client.post("/bookmarks/new", data=bookmark_data, follow_redirects=True)
assert resp.status_code == 200
assert b'<span class="post-tag">bookmark</span>' in resp.data
assert b'<span class="post-tag">testing</span>' in resp.data
assert b"https://example.com" in resp.data
assert b"Random" in resp.data
def test_creating_bookmark_without_passing_path_saves_to_default_dir(
test_app, client, mocked_responses
):
mocked_responses.add(GET, "http://example.org", body="Example\n")
bookmarks_dir = "bookmarks"
test_app.config["DEFAULT_BOOKMARKS_DIR"] = bookmarks_dir
create_dir(bookmarks_dir)
resp = client.post(
"/bookmarks/new",
data={
"url": "http://example.org",
"submit": "true",
},
)
bookmark = get_items(structured=False)[0]
assert (
"bookmarks" in bookmark["path"]
) # verify it was saved to default bookmark dir
def test_create_note(test_app, client: FlaskClient):
note_data = {
"title": "Testing the create route",
"tags": "testing,note",
"path": "",
"submit": "true",
}
resp = client.post("/notes/new", data=note_data)
assert resp.status_code == 302
assert not b"invalid" in resp.data
resp = client.post("/notes/new", data=note_data, follow_redirects=True)
assert resp.status_code == 200
assert b'<span class="post-tag">note</span>' in resp.data
assert b'<span class="post-tag">testing</span>' in resp.data
assert b"Testing the create route" in resp.data
def test_logging_in(test_app, client: FlaskClient):
resp = client.post(
"/login",
data={"username": "halcyon", "password": "password"},
follow_redirects=True,
)
assert resp.status_code == 200
assert request.path == "/"
assert current_user
def test_logging_in_with_invalid_creds(test_app, client: FlaskClient):
resp = client.post(
"/login",
data={"username": "invalid", "password": "dasdasd"},
follow_redirects=True,
)
assert resp.status_code == 200
assert request.path == "/login"
assert b"Invalid credentials" in resp.data
def test_edit_user(test_app, client: FlaskClient):
"""Tests editing a user's info, logging out and then logging in with new info."""
new_user = "new_halcyon"
new_pass = "password2"
resp = client.post(
"/user/edit",
data={"username": new_user, "password": new_pass},
follow_redirects=True,
)
assert request.path == "/"
client.delete("/logout")
resp = client.post(
"/login",
data={"username": new_user, "password": new_pass},
follow_redirects=True,
)
assert resp.status_code == 200
assert request.path == "/"
# check information has updated.
def test_logging_out(test_app, client: FlaskClient):
"""Tests logging out and then accessing restricted views"""
client.delete("/logout")
resp = client.get("/", follow_redirects=True)
assert request.path == "/login"
def test_create_dir(test_app, client: FlaskClient):
"""Tests /folders/create endpoint"""
resp = client.post(
"/folders/create",
data={"parent_dir": "", "new_dir": "testing"},
follow_redirects=True,
)
assert resp.status_code == 200
assert request.args.get("path") == "testing"
assert "testing" in get_dirs()
assert b"Folder successfully created" in resp.data
def test_creating_without_dirname_fails(test_app, client: FlaskClient):
resp = client.post(
"/folders/create", data={"parent_dir": ""}, follow_redirects=True
)
assert resp.status_code == 200
assert request.path == "/"
assert b"Could not create folder." in resp.data
def test_visiting_nonexistent_dir_fails(test_app, client: FlaskClient):
resp = client.get("/?path=nonexistent_dir", follow_redirects=True)
assert b"Directory does not exist." in resp.data
def test_deleting_dir(test_app, client: FlaskClient):
create_dir("testing")
assert "testing" in get_dirs()
resp = client.post(
"/folders/delete", data={"dir_name": "testing"}, follow_redirects=True
)
assert not "testing" in get_dirs()
assert b"Folder successfully deleted." in resp.data
def test_deleting_nonexisting_folder_fails(test_app, client: FlaskClient):
resp = client.post("/folders/delete", data={"dir_name": "testing"})
assert resp.status_code == 404
def test_bookmarklet(test_app, client: FlaskClient):
resp = client.get("/bookmarklet")
assert resp.status_code == 200
def test_backlinks_are_saved(
test_app, client: FlaskClient, note_fixture, bookmark_fixture
):
test_app.config["SEARCH_CONF"]["enabled"] = 1
test_app.config["SEARCH_CONF"]["engine"] = "ripgrep"
resp = client.put(
f"/api/dataobjs/{note_fixture.id}",
json={"content": f"[[{bookmark_fixture.title}|{bookmark_fixture.id}]]"},
)
assert resp.status_code == 200
resp = client.get(f"/dataobj/{bookmark_fixture.id}")
assert b"Backlinks" in resp.data # backlink was detected
test_app.config["SEARCH_CONF"]["enabled"] = 0
def test_bookmark_with_long_title_gets_truncated(test_app, client, mocked_responses):
long_title = "a" * 300
# check that our mock title is indeed longer than the limit
# and would cause an error, without our truncating
assert os.pathconf("/", "PC_NAME_MAX") < len(long_title)
mocked_responses.add(GET, "https://example.com", f"<title>{long_title}</title>")
bookmark_data = {
"url": "https://example.com",
"submit": "true",
}
resp = client.post("/bookmarks/new", data=bookmark_data)
assert resp.status_code == 200
def test_move_data(test_app, note_fixture, client):
create_dir("random")
resp = client.post(
"/dataobj/move/1",
data={"path": "random", "submit": "true"},
follow_redirects=True,
)
assert resp.status_code == 200
assert b"Data successfully moved to random." in resp.data
assert get_item(1)["dir"] == "random"
def test_invalid_inputs_fail_move_data(test_app, note_fixture, client):
resp = client.post("/dataobj/move/1", follow_redirects=True)
assert b"No path specified." in resp.data
resp = client.post(
"/dataobj/move/2", data={"path": "aaa", "submit": "true"}, follow_redirects=True
)
assert b"Data not found" in resp.data
resp = client.post(
"/dataobj/move/1", data={"path": "", "submit": "true"}, follow_redirects=True
)
assert b"Data already in target directory" in resp.data
faulty_paths = ["../adarnad", "~/adasd", "ssss"]
for p in faulty_paths:
resp = client.post(
"/dataobj/move/1", data={"path": p, "submit": "true"}, follow_redirects=True
)
assert b"Data could not be moved to " + bytes(p, "utf-8") in resp.data
def test_rename_dir(test_app, client):
create_dir("random")
resp = client.post(
"/folders/rename",
data={"current_path": "random", "new_name": "renamed_random"},
follow_redirects=True,
)
assert resp.status_code == 200
assert b"Renamed successfully" in resp.data
def test_invalid_inputs_fail_renaming(test_app, client):
create_dir("random")
create_dir("random2")
resp = client.post(
"/folders/rename",
data={"current_path": "inexisting", "new_name": "random3"},
follow_redirects=True,
)
assert b"Directory not found" in resp.data
resp = client.post(
"/folders/rename",
data={"current_path": "random", "new_name": "random2"},
follow_redirects=True,
)
assert b"Target directory exists." in resp.data
faulty_paths = ["../adarnad", "~/adasd", "/illegal_dir", "."]
for p in faulty_paths:
print(p)
resp = client.post(
"/folders/rename",
data={"current_path": "random", "new_name": p},
follow_redirects=True,
)
assert b"Invalid input" in resp.data
def test_get_config_page(test_app, client):
resp = client.get("/config")
assert resp.status_code == 200
assert b"Edit Config" in resp.data
def test_post_updated_config(test_app, client):
# use dark theme as random conf value to change
dark_theme = test_app.config["THEME_CONF"]["use_theme_dark"]
resp = client.post(
"/config", data={"submit": True, "THEME_CONF-use_theme_dark": not dark_theme}
)
assert test_app.config["THEME_CONF"]["use_theme_dark"] == (not dark_theme)
def test_getting_all_tags(test_app, client, bookmark_fixture):
# bookmark fixture has embedded tags
resp = client.get("/tags")
bookmark_tags = ["embedded-tag", "tag2"]
assert resp.status_code == 200
for tag in bookmark_tags:
assert f"#{tag}" in str(resp.data)
def test_getting_matches_for_specific_tag(test_app, client, bookmark_fixture):
resp = client.get("/tags/tag2")
assert resp.status_code == 200
assert bookmark_fixture.title in str(resp.data)
assert str(bookmark_fixture.id) in str(resp.data)
| 30.55243
| 88
| 0.663904
|
a7325b49f0b2774b889c10e24335441dcb3e7ac8
| 469
|
py
|
Python
|
infra/bots/recipe_modules/build/__init__.py
|
pospx/external_skia
|
7a135275c9fc2a4b3cbdcf9a96e7102724752234
|
[
"BSD-3-Clause"
] | 8
|
2019-04-20T09:12:17.000Z
|
2019-09-06T20:04:22.000Z
|
infra/bots/recipe_modules/build/__init__.py
|
pospx/external_skia
|
7a135275c9fc2a4b3cbdcf9a96e7102724752234
|
[
"BSD-3-Clause"
] | 3
|
2019-04-29T06:34:00.000Z
|
2019-08-18T11:56:32.000Z
|
infra/bots/recipe_modules/build/__init__.py
|
pospx/external_skia
|
7a135275c9fc2a4b3cbdcf9a96e7102724752234
|
[
"BSD-3-Clause"
] | 57
|
2016-12-29T02:00:25.000Z
|
2021-11-16T01:22:50.000Z
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'depot_tools/gclient',
'env',
'infra',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/path',
'recipe_engine/python',
'recipe_engine/step',
'run',
'vars',
]
from recipe_engine.recipe_api import Property
PROPERTIES = {
'buildername': Property(default=None),
}
| 20.391304
| 72
| 0.710021
|
71a933a13987b674871af6c00436440807f4898f
| 331
|
py
|
Python
|
tests/test_pandas.py
|
JessvLS/project_spring_2020
|
ae5387afce3faabba1d8ab579de2dd8c80f6ffa7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pandas.py
|
JessvLS/project_spring_2020
|
ae5387afce3faabba1d8ab579de2dd8c80f6ffa7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pandas.py
|
JessvLS/project_spring_2020
|
ae5387afce3faabba1d8ab579de2dd8c80f6ffa7
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import patch, Mock
@patch("path.to.file.pandas.read_sql")
def test_get_df(read_sql_mock: Mock):
read_sql_mock.return_value = pd.DataFrame({"foo_id": [1, 2, 3]})
results = get_df()
read_sql_mock.assert_called_once()
pd.testing.assert_frame_equal(results, pd.DataFrame({"bar_id": [1, 2, 3]}g
| 33.1
| 78
| 0.70997
|
b1703074a3c56637efdbbe50573ed45ca7569a2d
| 7,649
|
py
|
Python
|
trulioo_sdk/model/business_search_response.py
|
Trulioo/sdk-python
|
3bf0530e2ba1a3ec93d89b967b2e257e7401d5c2
|
[
"RSA-MD"
] | 1
|
2022-01-11T12:08:45.000Z
|
2022-01-11T12:08:45.000Z
|
trulioo_sdk/model/business_search_response.py
|
Trulioo/sdk-python
|
3bf0530e2ba1a3ec93d89b967b2e257e7401d5c2
|
[
"RSA-MD"
] | null | null | null |
trulioo_sdk/model/business_search_response.py
|
Trulioo/sdk-python
|
3bf0530e2ba1a3ec93d89b967b2e257e7401d5c2
|
[
"RSA-MD"
] | 1
|
2021-05-17T08:33:15.000Z
|
2021-05-17T08:33:15.000Z
|
"""
Trulioo Python SDK
Package version: 1.0.4
Trulioo OpenAPI version: v1
Generated by OpenAPI Generator version: 5.0.1
"""
import re # noqa: F401
import sys # noqa: F401
from trulioo_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from trulioo_sdk.model.business_record import BusinessRecord
from trulioo_sdk.model.service_error import ServiceError
globals()['BusinessRecord'] = BusinessRecord
globals()['ServiceError'] = ServiceError
class BusinessSearchResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'transaction_id': (str,), # noqa: E501
'uploaded_dt': (datetime,), # noqa: E501
'country_code': (str,), # noqa: E501
'product_name': (str,), # noqa: E501
'record': (BusinessRecord,), # noqa: E501
'errors': ([ServiceError],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'transaction_id': 'TransactionID', # noqa: E501
'uploaded_dt': 'UploadedDt', # noqa: E501
'country_code': 'CountryCode', # noqa: E501
'product_name': 'ProductName', # noqa: E501
'record': 'Record', # noqa: E501
'errors': 'Errors', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""BusinessSearchResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
transaction_id (str): The id for the transaction it will be a GUID. [optional] # noqa: E501
uploaded_dt (datetime): Time in UTC. [optional] # noqa: E501
country_code (str): The country code for which the verification was performed.. [optional] # noqa: E501
product_name (str): Product Name. [optional] # noqa: E501
record (BusinessRecord): [optional] # noqa: E501
errors ([ServiceError]): Collection of record errors. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 40.903743
| 116
| 0.588966
|
e4e7640d1b2a7c42fbd4bda3554e9d92485d1c8d
| 593
|
py
|
Python
|
torchcmh/dataset/base/__init__.py
|
chrisbyd/deep-cross-modal-hashing
|
b67bed412a499fdb619769d5132d897f5910c433
|
[
"MIT"
] | 65
|
2019-12-08T12:11:53.000Z
|
2022-03-10T09:25:45.000Z
|
torchcmh/dataset/base/__init__.py
|
silencelzx/deep-cross-modal-hashing
|
9784397c1076c81b43ebd856cb24b8a67cf8f41e
|
[
"MIT"
] | 17
|
2020-05-07T09:22:20.000Z
|
2022-03-02T02:05:18.000Z
|
torchcmh/dataset/base/__init__.py
|
silencelzx/deep-cross-modal-hashing
|
9784397c1076c81b43ebd856cb24b8a67cf8f41e
|
[
"MIT"
] | 17
|
2020-04-02T06:38:49.000Z
|
2022-01-11T12:41:49.000Z
|
# coding: utf-8
# @Time :
# @Author : Godder
# @Github : https://github.com/WangGodder
from __future__ import absolute_import
from __future__ import print_function
from .base import CrossModalTrainBase, CrossModalValidBase
from .pairwise import CrossModalPairwiseTrain
from .single import CrossModalSingleTrain
from .triplet import CrossModalTripletTrain
from .quadruplet import CrossModalQuadrupletTrain
__all__ = ['CrossModalTrainBase', 'CrossModalValidBase', 'CrossModalSingleTrain',
'CrossModalPairwiseTrain', 'CrossModalTripletTrain', 'CrossModalQuadrupletTrain']
| 32.944444
| 92
| 0.801012
|
9e1fea5d2d0682da5e87db620f0ff4a7cd2a11c7
| 13,445
|
py
|
Python
|
src/pretix/control/views/main.py
|
NorDULaN/pretix
|
e2b9fe8e71f3852721a42c594047d88f5181fd29
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-04-25T00:11:00.000Z
|
2020-04-25T00:11:00.000Z
|
src/pretix/control/views/main.py
|
NorDULaN/pretix
|
e2b9fe8e71f3852721a42c594047d88f5181fd29
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/pretix/control/views/main.py
|
NorDULaN/pretix
|
e2b9fe8e71f3852721a42c594047d88f5181fd29
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from django.conf import settings
from django.contrib import messages
from django.db import transaction
from django.db.models import (
F, IntegerField, Max, Min, OuterRef, Prefetch, Subquery, Sum,
)
from django.db.models.functions import Coalesce, Greatest
from django.http import JsonResponse
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.utils.functional import cached_property
from django.utils.translation import gettext, gettext_lazy as _
from django.views import View
from django.views.generic import ListView
from i18nfield.strings import LazyI18nString
from pretix.base.forms import SafeSessionWizardView
from pretix.base.i18n import language
from pretix.base.models import Event, EventMetaValue, Organizer, Quota, Team
from pretix.control.forms.event import (
EventWizardBasicsForm, EventWizardCopyForm, EventWizardFoundationForm,
)
from pretix.control.forms.filter import EventFilterForm
from pretix.control.permissions import OrganizerPermissionRequiredMixin
from pretix.control.views import PaginationMixin
class EventList(PaginationMixin, ListView):
model = Event
context_object_name = 'events'
template_name = 'pretixcontrol/events/index.html'
def get_queryset(self):
qs = self.request.user.get_events_with_any_permission(self.request).prefetch_related(
'organizer', '_settings_objects', 'organizer___settings_objects', 'organizer__meta_properties',
Prefetch(
'meta_values',
EventMetaValue.objects.select_related('property'),
to_attr='meta_values_cached'
)
).order_by('-date_from')
qs = qs.annotate(
min_from=Min('subevents__date_from'),
max_from=Max('subevents__date_from'),
max_to=Max('subevents__date_to'),
max_fromto=Greatest(Max('subevents__date_to'), Max('subevents__date_from'))
).annotate(
order_from=Coalesce('min_from', 'date_from'),
order_to=Coalesce('max_fromto', 'max_to', 'max_from', 'date_to', 'date_from'),
)
sum_tickets_paid = Quota.objects.filter(
event=OuterRef('pk'), subevent__isnull=True
).order_by().values('event').annotate(
s=Sum('cached_availability_paid_orders')
).values(
's'
)
qs = qs.annotate(
sum_tickets_paid=Subquery(sum_tickets_paid, output_field=IntegerField())
).prefetch_related(
Prefetch('quotas',
queryset=Quota.objects.filter(subevent__isnull=True).annotate(s=Coalesce(F('size'), 0)).order_by('-s'),
to_attr='first_quotas')
)
if self.filter_form.is_valid():
qs = self.filter_form.filter_qs(qs)
return qs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['filter_form'] = self.filter_form
orga_c = Organizer.objects.filter(
pk__in=self.request.user.teams.values_list('organizer', flat=True)
).count()
ctx['hide_orga'] = orga_c <= 1
ctx['meta_fields'] = [
self.filter_form[k] for k in self.filter_form.fields if k.startswith('meta_')
]
for s in ctx['events']:
s.first_quotas = s.first_quotas[:4]
for q in s.first_quotas:
q.cached_avail = (
(q.cached_availability_state, q.cached_availability_number)
if q.cached_availability_time is not None
else q.availability(allow_cache=True)
)
if q.size is not None:
q.percent_paid = min(
100,
round(q.cached_availability_paid_orders / q.size * 100) if q.size > 0 else 100
)
return ctx
@cached_property
def filter_form(self):
return EventFilterForm(data=self.request.GET, request=self.request)
def condition_copy(wizard):
return (
not wizard.clone_from and
EventWizardCopyForm.copy_from_queryset(wizard.request.user, wizard.request.session).exists()
)
class EventWizard(SafeSessionWizardView):
form_list = [
('foundation', EventWizardFoundationForm),
('basics', EventWizardBasicsForm),
('copy', EventWizardCopyForm),
]
templates = {
'foundation': 'pretixcontrol/events/create_foundation.html',
'basics': 'pretixcontrol/events/create_basics.html',
'copy': 'pretixcontrol/events/create_copy.html',
}
condition_dict = {
'copy': condition_copy
}
def get_form_initial(self, step):
initial = super().get_form_initial(step)
if self.clone_from:
if step == 'foundation':
initial['organizer'] = self.clone_from.organizer
initial['locales'] = self.clone_from.settings.locales
initial['has_subevents'] = self.clone_from.has_subevents
elif step == 'basics':
initial['name'] = self.clone_from.name
initial['slug'] = self.clone_from.slug + '-2'
initial['currency'] = self.clone_from.currency
initial['date_from'] = self.clone_from.date_from
initial['date_to'] = self.clone_from.date_to
initial['geo_lat'] = self.clone_from.geo_lat
initial['geo_lon'] = self.clone_from.geo_lon
initial['presale_start'] = self.clone_from.presale_start
initial['presale_end'] = self.clone_from.presale_end
initial['location'] = self.clone_from.location
initial['timezone'] = self.clone_from.settings.timezone
initial['locale'] = self.clone_from.settings.locale
if self.clone_from.settings.tax_rate_default:
initial['tax_rate'] = self.clone_from.settings.tax_rate_default.rate
if 'organizer' in self.request.GET:
if step == 'foundation':
try:
qs = Organizer.objects.all()
if not self.request.user.has_active_staff_session(self.request.session.session_key):
qs = qs.filter(
id__in=self.request.user.teams.filter(can_create_events=True).values_list('organizer', flat=True)
)
initial['organizer'] = qs.get(slug=self.request.GET.get('organizer'))
except Organizer.DoesNotExist:
pass
return initial
def dispatch(self, request, *args, **kwargs):
self.clone_from = None
if 'clone' in self.request.GET:
try:
clone_from = Event.objects.select_related('organizer').get(pk=self.request.GET.get("clone"))
except Event.DoesNotExist:
allow = False
else:
allow = (
request.user.has_event_permission(clone_from.organizer, clone_from,
'can_change_event_settings', request)
and request.user.has_event_permission(clone_from.organizer, clone_from,
'can_change_items', request)
)
if not allow:
messages.error(self.request, _('You do not have permission to clone this event.'))
else:
self.clone_from = clone_from
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, form, **kwargs):
ctx = super().get_context_data(form, **kwargs)
ctx['has_organizer'] = self.request.user.teams.filter(can_create_events=True).exists()
if self.steps.current == 'basics':
ctx['organizer'] = self.get_cleaned_data_for_step('foundation').get('organizer')
return ctx
def render(self, form=None, **kwargs):
if self.steps.current != 'foundation':
fdata = self.get_cleaned_data_for_step('foundation')
if fdata is None:
return self.render_goto_step('foundation')
return super().render(form, **kwargs)
def get_form_kwargs(self, step=None):
kwargs = {
'user': self.request.user,
'session': self.request.session,
}
if step != 'foundation':
fdata = self.get_cleaned_data_for_step('foundation')
if fdata is None:
fdata = {
'organizer': Organizer(slug='_nonexisting'),
'has_subevents': False,
'locales': ['en']
}
# The show must go on, we catch this error in render()
kwargs.update(fdata)
return kwargs
def get_template_names(self):
return [self.templates[self.steps.current]]
def done(self, form_list, form_dict, **kwargs):
foundation_data = self.get_cleaned_data_for_step('foundation')
basics_data = self.get_cleaned_data_for_step('basics')
copy_data = self.get_cleaned_data_for_step('copy')
with transaction.atomic(), language(basics_data['locale']):
event = form_dict['basics'].instance
event.organizer = foundation_data['organizer']
event.plugins = settings.PRETIX_PLUGINS_DEFAULT
event.has_subevents = foundation_data['has_subevents']
event.testmode = True
form_dict['basics'].save()
if not EventWizardBasicsForm.has_control_rights(self.request.user, event.organizer):
if basics_data["team"] is not None:
t = basics_data["team"]
t.limit_events.add(event)
elif event.organizer.settings.event_team_provisioning:
t = Team.objects.create(
organizer=event.organizer, name=_('Team {event}').format(event=event.name),
can_change_event_settings=True, can_change_items=True,
can_view_orders=True, can_change_orders=True, can_view_vouchers=True,
can_change_vouchers=True
)
t.members.add(self.request.user)
t.limit_events.add(event)
if event.has_subevents:
se = event.subevents.create(
name=event.name,
date_from=event.date_from,
date_to=event.date_to,
presale_start=event.presale_start,
presale_end=event.presale_end,
location=event.location,
geo_lat=event.geo_lat,
geo_lon=event.geo_lon,
active=True
)
logdata = {}
for f in form_list:
logdata.update({
k: v for k, v in f.cleaned_data.items()
})
event.log_action('pretix.event.settings', user=self.request.user, data=logdata)
if copy_data and copy_data['copy_from_event']:
from_event = copy_data['copy_from_event']
event.copy_data_from(from_event)
elif self.clone_from:
event.copy_data_from(self.clone_from)
else:
if event.has_subevents:
event.checkin_lists.create(
name=str(se),
all_products=True,
subevent=se
)
else:
event.checkin_lists.create(
name=_('Default'),
all_products=True
)
event.set_defaults()
if basics_data['tax_rate']:
if not event.settings.tax_rate_default or event.settings.tax_rate_default.rate != basics_data['tax_rate']:
event.settings.tax_rate_default = event.tax_rules.create(
name=LazyI18nString.from_gettext(gettext('VAT')),
rate=basics_data['tax_rate']
)
event.settings.set('timezone', basics_data['timezone'])
event.settings.set('locale', basics_data['locale'])
event.settings.set('locales', foundation_data['locales'])
if (copy_data and copy_data['copy_from_event']) or self.clone_from or event.has_subevents:
return redirect(reverse('control:event.settings', kwargs={
'organizer': event.organizer.slug,
'event': event.slug,
}) + '?congratulations=1')
else:
return redirect(reverse('control:event.quick', kwargs={
'organizer': event.organizer.slug,
'event': event.slug,
}) + '?congratulations=1')
class SlugRNG(OrganizerPermissionRequiredMixin, View):
def get(self, request, *args, **kwargs):
# See Order.assign_code
charset = list('abcdefghjklmnpqrstuvwxyz3789')
for i in range(100):
val = get_random_string(length=settings.ENTROPY['order_code'], allowed_chars=charset)
if not self.request.organizer.events.filter(slug__iexact=val).exists():
break
return JsonResponse({'slug': val})
| 42.279874
| 125
| 0.587802
|
6c583d0385401de72251fdd260c4f81acc7033bd
| 2,598
|
py
|
Python
|
src/orqviz/scans/evals.py
|
n17/orqviz
|
76a3f9855515583d9a59ed3f11cef506b4f993af
|
[
"Apache-2.0"
] | 57
|
2021-11-09T03:21:36.000Z
|
2022-03-29T08:48:00.000Z
|
src/orqviz/scans/evals.py
|
n17/orqviz
|
76a3f9855515583d9a59ed3f11cef506b4f993af
|
[
"Apache-2.0"
] | 18
|
2021-11-09T10:58:40.000Z
|
2022-03-09T16:19:22.000Z
|
src/orqviz/scans/evals.py
|
n17/orqviz
|
76a3f9855515583d9a59ed3f11cef506b4f993af
|
[
"Apache-2.0"
] | 8
|
2021-11-09T11:55:52.000Z
|
2022-02-07T20:35:37.000Z
|
from typing import Callable, List, Optional
import numpy as np
from ..aliases import (
ArrayOfParameterVectors,
GridOfParameterVectors,
LossFunction,
ParameterVector,
)
def eval_points_on_path(
all_points: ArrayOfParameterVectors,
loss_function: LossFunction,
n_reps: int = 1,
verbose: bool = False,
) -> np.ndarray:
"""Function to evaluate loss function on a 1D path of parameters.
Args:
all_parameters: Array of parameters with shape (len, *(parameters.shape))
loss_function: Function to evaluate the parameters on. It must receive only a
numpy.ndarray of parameters, and return a real number.
If your function requires more arguments, consider using the
'LossFunctionWrapper' class from 'orqviz.loss_function'.
n_reps: Repetitions to average the output in noisy cases. Defaults to 1.
verbose: Flag for verbosity of progress. Defaults to False.
"""
n_points = len(all_points)
values: List[List[Optional[float]]] = [[None] * n_points] * n_reps
for rep in range(n_reps):
for idx, point in enumerate(all_points):
if idx % 10 == 0 and verbose:
print("Progress: {:.1f}%".format(round(idx / n_points * 100)))
values[rep][idx] = loss_function(point)
return np.array(np.mean(np.asarray(values), axis=0))
def eval_points_on_grid(
all_parameters: GridOfParameterVectors,
loss_function: LossFunction,
n_reps: int = 1,
verbose: bool = False,
) -> np.ndarray:
"""Function to evaluate loss function on a 2D grid of parameters.
Args:
all_parameters:
Grid of parameters with shape (len_y, len_x, *(parameters.shape))
loss_function: Function toevaluate the parameters on. It must receive only a
numpy.ndarray of parameters, and return a real number.
If your function requires more arguments, consider using the
'LossFunctionWrapper' class from 'orqviz.loss_function'.
n_reps: Repetitions to average the output in noisy cases. Defaults to 1.
verbose: Flag for verbosity of progress. Defaults to False.
"""
shape = np.shape(all_parameters)
(size_x, size_y), params_shape = shape[:2], shape[2:]
vector_of_parameters = all_parameters.reshape((size_x * size_y, *params_shape))
vector_of_values = eval_points_on_path(
all_points=vector_of_parameters,
loss_function=loss_function,
n_reps=n_reps,
verbose=verbose,
)
return vector_of_values.reshape((size_x, size_y))
| 34.64
| 85
| 0.676674
|
5af98f890810fb8a8f3430cd13240fd3a4500857
| 1,243
|
py
|
Python
|
roomai/common/AbstractActionChance.py
|
1696012928/RoomAI
|
37be09590489ab5f7c85083173e83ea31c40b76c
|
[
"MIT"
] | 1
|
2018-03-02T00:49:31.000Z
|
2018-03-02T00:49:31.000Z
|
roomai/common/AbstractActionChance.py
|
1696012928/RoomAI
|
37be09590489ab5f7c85083173e83ea31c40b76c
|
[
"MIT"
] | null | null | null |
roomai/common/AbstractActionChance.py
|
1696012928/RoomAI
|
37be09590489ab5f7c85083173e83ea31c40b76c
|
[
"MIT"
] | null | null | null |
#!/bin/python
#coding=utf8
import roomai
import roomai.common
logger = roomai.get_logger()
class AbstractActionChance(object):
'''
The abstract class of an chance action. The chance action is used by the chance player.
'''
def __init__(self, key):
self.__key__ = key
def __get_key__(self):
return self.__key__
key = property(__get_key__, doc="The key of the chance action. Every chance action in RoomAI has a key as its identification."
" We strongly recommend you to use the lookup function to get an chance action with the specified key")
@classmethod
def lookup(self, key):
'''
Get an action with the specified key. \n
We strongly recommend you to use the lookup function to get an action with the specified key, rather than use the constructor function.\n
:param key: the specified key
:return: the action with the specified key
'''
raise NotImplementedError("Not implemented")
def __deepcopy__(self, memodict={}, newinstance=None):
if newinstance is None:
newinstance = AbstractActionChance()
newinstance.__key__ = self.__key__
return newinstance
| 31.075
| 145
| 0.662108
|
c7bf488bca71058618f9c9d64cada4b56b1805ca
| 54,146
|
py
|
Python
|
main/export/sbml.py
|
TeselaGen/jbei-edd
|
92792fb30bbd504143b2f75bf08d05b141a7ef6f
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
main/export/sbml.py
|
TeselaGen/jbei-edd
|
92792fb30bbd504143b2f75bf08d05b141a7ef6f
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
main/export/sbml.py
|
TeselaGen/jbei-edd
|
92792fb30bbd504143b2f75bf08d05b141a7ef6f
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Backend for exporting SBML files. """
# FIXME need to track intracellular and extracellular measurements separately
# (and assign to SBML species differently)
import libsbml
import logging
import math
import re
import sys
from bisect import bisect
from collections import defaultdict, namedtuple, OrderedDict
from copy import copy
from decimal import Decimal
from django import forms
from django.core.exceptions import ValidationError
from django.db.models import Max, Min, Prefetch, Q
from django.http import QueryDict
from django.template.defaulttags import register
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from functools import partial, reduce
from future.utils import viewitems, viewvalues
from itertools import chain
from six import string_types
from threadlocals.threadlocals import get_current_request
from .. import models
from ..forms import (
MetadataTypeAutocompleteWidget, SbmlExchangeAutocompleteWidget, SbmlSpeciesAutocompleteWidget
)
from ..utilities import interpolate_at
logger = logging.getLogger(__name__)
Range = namedtuple('Range', ['min', 'max'])
Point = namedtuple('Point', ['x', 'y'])
class SbmlForm(forms.Form):
def __init__(self, *args, **kwargs):
kwargs.setdefault('label_suffix', '')
super(SbmlForm, self).__init__(*args, **kwargs)
self._sbml_warnings = []
@property
def sbml_warnings(self):
self.is_valid() # trigger validation if needed
return self._sbml_warnings
class SbmlExport(object):
""" Controller class handling the data coming from SbmlForm objects, creating further SbmlForm
objects based on previous input, and exporting an SBML file based on the inputs. """
def __init__(self, selection, *args, **kwargs):
self._sbml_template = None
self._selection = selection
self._from_study_page = False
self._forms = {}
self._match_fields = {}
self._match_sbml_warnings = []
self._export_errors = []
self._max = self._min = None
self._points = None
self._density = []
self._measures = defaultdict(list)
self._omics = defaultdict(list)
self._values_by_type = defaultdict(list)
def add_density(self, density_measurements):
""" Collect biomass density measurements to calculate final SBML values.
:param density_measurements: an initialized SbmlExportOdForm """
measurements = density_measurements.cleaned_data.get('measurement', [])
interpolate = density_measurements.cleaned_data.get('interpolate', [])
default_factor = density_measurements.cleaned_data.get('gcdw_default', 0.65)
factor_meta = density_measurements.cleaned_data.get('gcdw_conversion', None)
measurement_qs = self.load_measurement_queryset(density_measurements)
# try to load factor metadata for each assay
for m in measurement_qs:
if factor_meta is None:
factor = default_factor
else:
# check for factor on line first
factor = m.assay.line.metadata_get(factor_meta, default_factor)
# allow for factor on assay to override the one on line
factor = m.assay.metadata_get(factor_meta, factor)
for v in m.values:
# storing as arrays to keep compatibility with interpolate_at
self._density.append(Point([v.x[0]], [v.y[0] * factor]))
# make sure it's sorted; potentially out-of-order from multiple measurements
sorted(self._density, key=lambda p: p.x[0])
# capture lower/upper bounds of t values for all measurements
self._update_range_bounds(measurements, interpolate)
def add_measurements(self, sbml_measurements):
""" Add measurements to the export from a SbmlExportMeasurementsForm.
:param sbml_measurements: an initialized SbmlExportMeasurementsForm """
measurements = sbml_measurements.cleaned_data.get('measurement', [])
interpolate = sbml_measurements.cleaned_data.get('interpolate', [])
# process all the scalar measurements
types_qs = models.MeasurementType.objects.filter(
measurement__in=measurements,
measurement__measurement_format=models.Measurement.Format.SCALAR,
).distinct()
types_list = list(types_qs)
# add fields matching species/exchange for every scalar measurement type
self._build_match_fields(types_list)
# store measurements keyed off type
for m in measurements:
self._measures['%s' % m.measurement_type_id].append(m)
# capture lower/upper bounds of t values for all measurements
self._update_range_bounds(measurements, interpolate)
def add_omics(self, sbml_measurements):
""" Collect omics measurements to calculate final SBML values.
:param sbml_measurements: an initialized SbmlExportOmicsForm """
measurements = sbml_measurements.cleaned_data.get('measurement', [])
interpolate = sbml_measurements.cleaned_data.get('interpolate', [])
# store measurements keyed off type_name
# TODO: should probably link off another identifier mapping types to SBML names
for m in measurements:
self._omics[m.measurement_type.type_name].append(m)
# capture lower/upper bounds of t values for all measurements
self._update_range_bounds(measurements, interpolate)
def create_export_form(self, payload, **kwargs):
""" Constructs an SbmlExportSettingsForm based on data contained in a POST.
:param payload: the QueryDict from POST attribute of a request
:param kwargs: any additional kwargs to pass to the form; see Django Forms
documentation.
:return: a SbmlExportSettingsForm """
export_settings_form = SbmlExportSettingsForm(
data=payload,
initial={'sbml_template': self._selection.studies[0].metabolic_map, },
**kwargs
)
self._from_study_page = export_settings_form.add_prefix('sbml_template') not in payload
if self._from_study_page: # coming from study page, make sure bound data has default value
export_settings_form.update_bound_data_with_defaults()
self._forms.update(export_settings_form=export_settings_form)
if export_settings_form.is_valid():
self._sbml_template = export_settings_form.cleaned_data['sbml_template']
self._sbml_obj = self._sbml_template.parseSBML()
self._sbml_model = self._sbml_obj.getModel()
return export_settings_form
def create_match_form(self, payload, **kwargs):
""" Constructs an SbmlMatchReactions form, linking SBML reaction elements to specific
measurements.
:param payload: the QueryDict from POST attribute of a request
:param kwargs: any additional kwargs to pass to the form; see Django Forms
documentation.
:return: a SbmlMatchReactions form """
# create the form
match = SbmlMatchReactions(
data=payload,
sbml_template=self._sbml_template,
match_fields=self._match_fields,
**kwargs
)
# if payload does not have keys for some fields, make sure form uses default initial
replace_data = QueryDict(mutable=True)
# loop the fields
for key, field in self._match_fields.items():
base_name = match.add_prefix(key)
# then loop the values in the field
for i0, value in enumerate(field.initial):
# finally, loop the decompressed parts of the value
for i1, part in enumerate(field.widget.widgets[i0].decompress(value)):
part_key = '%s_%d_%d' % (base_name, i0, i1)
if part_key not in payload:
replace_data[part_key] = part
replace_data.update(payload)
match.data = replace_data
match.sbml_warnings.extend(self._match_sbml_warnings)
return match
def create_measurement_forms(self, payload, **kwargs):
""" Constructs a series of forms used to select which measurements to include in an SBML
export.
:param payload: the QueryDict from POST attribute of a request
:param kwargs: any additional kwargs to pass to ALL forms; see Django Forms
documentation. """
line = self._selection.lines[0]
m_forms = {
'od_select_form': SbmlExportOdForm(
data=payload, prefix='od', line=line,
qfilter=(Q(measurement_type__short_name='OD') &
Q(assay__protocol__categorization=models.Protocol.CATEGORY_OD)),
**kwargs
),
'hplc_select_form': SbmlExportMeasurementsForm(
data=payload, prefix='hplc', line=line,
qfilter=Q(assay__protocol__categorization=models.Protocol.CATEGORY_HPLC),
**kwargs
),
'ms_select_form': SbmlExportMeasurementsForm(
data=payload, prefix='ms', line=line,
qfilter=Q(assay__protocol__categorization=models.Protocol.CATEGORY_LCMS),
**kwargs
),
'ramos_select_form': SbmlExportMeasurementsForm(
data=payload, prefix='ramos', line=line,
qfilter=Q(assay__protocol__categorization=models.Protocol.CATEGORY_RAMOS),
**kwargs
),
'omics_select_form': SbmlExportOmicsForm(
data=payload, prefix='omics', line=line,
qfilter=Q(assay__protocol__categorization=models.Protocol.CATEGORY_TPOMICS),
**kwargs
),
}
for m_form in m_forms.values():
if self._from_study_page:
m_form.update_bound_data_with_defaults()
if m_form.is_valid() and self._sbml_template:
is_density = isinstance(m_form, SbmlExportOdForm)
is_omics = isinstance(m_form, SbmlExportOmicsForm)
if is_density:
self.add_density(m_form)
elif is_omics:
self.add_omics(m_form)
else:
self.add_measurements(m_form)
self._forms.update(m_forms)
def create_output_forms(self, payload, **kwargs):
""" Create forms altering output of SBML; depends on measurement forms already existing.
:param payload: the QueryDict from POST attribute of a request
:param kwargs: any additional kwargs to pass to ALL forms; see Django Forms
documentation. """
if all(map(lambda f: f.is_valid(), self._forms.values())):
match_form = self.create_match_form(payload, prefix='match', **kwargs)
time_form = self.create_time_select_form(payload, prefix='time', **kwargs)
self._forms.update({
'match_form': match_form,
'time_form': time_form,
})
def create_time_select_form(self, payload, **kwargs):
""" Constructs a form to select the timepoint of data to export to SBML and the output
filename. Depends on measurement forms already existing.
:param payload: the QueryDict from POST attribute of a request
:param kwargs: any additional kwargs to pass to ALL forms; see Django Forms
documentation.
:return: a SbmlExportSelectionForm """
# error if no range or if max < min
if self._min is None or self._max is None or self._max < self._min:
return None
points = self._points
t_range = Range(min=self._min, max=self._max)
if points is not None:
points = sorted(points)
time_form = SbmlExportSelectionForm(
t_range=t_range, points=points, line=self._selection.lines[0], data=payload, **kwargs
)
time_form.sbml_warnings.extend(self._export_errors)
return time_form
def init_forms(self, payload, context):
""" Constructs all the forms used in an SBML export based on data from a POST request.
:param payload: the QueryDict from POST attribute of a request
:param context: the view context object, for passing information to templates
:return: an updated context """
self.create_export_form(payload)
self.create_measurement_forms(payload)
self.create_output_forms(payload)
return self.update_view_context(context)
def load_measurement_queryset(self, m_form):
""" Creates a queryset from the IDs in the measurements parameter, prefetching values to a
values attr on each measurement.
:param m_form: an SbmlExportMeasurementsForm
:return: a QuerySet of measurements referenced in the form """
# TODO: change to .order_by('x__0') once Django supports ordering on transform
# https://code.djangoproject.com/ticket/24747
values_qs = models.MeasurementValue.objects.filter(x__len=1, y__len=1).order_by('x')
return m_form.measurement_qs.filter(
measurement_format=models.Measurement.Format.SCALAR
).select_related(
'assay__line',
).prefetch_related(
Prefetch('measurementvalue_set', queryset=values_qs, to_attr='values'),
)
def output(self, time, matches):
""" Writes the output SBML as a string.
:param time: the selected time from a SbmlExportSelectionForm
:param matches: the selected reaction<->measurement maches from a SbmlMatchReactions
form
:return: a SBML document serialized to a string """
# TODO: make matches param match_form instead of match_form.cleaned_data
# map species / reaction IDs to measurement IDs
our_species = {}
our_reactions = {}
for mtype, match in matches.items():
if match: # when not None, match[0] == species and match[1] == reaction
if match[0] and match[0] not in our_species:
our_species[match[0]] = mtype
if match[1] and match[1] not in our_reactions:
our_reactions[match[1]] = mtype
builder = SbmlBuilder()
self._update_biomass(builder, time)
self._update_species(builder, our_species, time)
self._update_reaction(builder, our_reactions, time)
self._update_carbon_ratio(builder, time)
return builder.write_to_string(self._sbml_obj)
def update_view_context(self, context):
""" Adds additional display information to the view context, to be used by the template
processor.
:param context: the view context object, for passing information to templates
:return: an updated context """
# collect all the warnings together for counting
forms = [f for f in self._forms.values() if isinstance(f, SbmlForm)]
sbml_warnings = chain(*[f.sbml_warnings if f else [] for f in forms])
context.update(self._forms)
context.update(sbml_warnings=list(sbml_warnings))
return context
def _build_match_fields(self, types_list):
species_qs = models.MetaboliteSpecies.objects.filter(
measurement_type__in=types_list,
sbml_template=self._sbml_template,
)
species_match = {s.measurement_type_id: s for s in species_qs}
exchange_qs = models.MetaboliteExchange.objects.filter(
measurement_type__in=types_list,
sbml_template=self._sbml_template,
)
exchange_match = {x.measurement_type_id: x for x in exchange_qs}
for t in types_list:
key = '%s' % t.pk
if key not in self._match_fields:
i_species = species_match.get(t.pk, self._guess_species(t))
i_exchange = exchange_match.get(t.pk, self._guess_exchange(t))
self._match_fields[key] = SbmlMatchReactionField(
initial=(i_species, i_exchange),
label=t.type_name,
required=False,
template=self._sbml_template,
)
def _guess_exchange(self, measurement_type):
mname = measurement_type.short_name
mname_transcoded = generate_transcoded_metabolite_name(mname)
guesses = [
mname,
mname_transcoded,
"M_" + mname + "_e",
"M_" + mname_transcoded + "_e",
"M_" + mname_transcoded + "_e_",
]
lookup = defaultdict(list)
exchanges = models.MetaboliteExchange.objects.filter(
reactant_name__in=guesses,
sbml_template=self._sbml_template,
)
for x in exchanges:
lookup[x.reactant_name].append(x)
for guess in guesses:
match = lookup.get(guess, None)
if match:
if len(match) > 1:
self._match_sbml_warnings.append(
_('Multiple exchanges found for %(type)s using %(guess)s. Selected '
'exchange %(match)s') % {
'guess': guess,
'match': match[0],
'type': measurement_type.type_name,
}
)
return match[0]
return None
def _guess_species(self, measurement_type):
guesses = generate_species_name_guesses_from_metabolite_name(measurement_type.short_name)
lookup = {
s.species: s
for s in models.MetaboliteSpecies.objects.filter(
sbml_template=self._sbml_template,
species__in=guesses,
)
}
for guess in guesses:
if guess in lookup:
return lookup[guess]
return None
def _update_biomass(self, builder, time):
biomass = self._sbml_template.biomass_exchange_name
reaction = self._sbml_model.getReaction(biomass)
flux = 0
try:
times = [p.x[0] for p in self._density]
next_index = bisect(times, time)
# already converted with gCDW in SbmlExport#addDensity()
if next_index == len(times) and time == times[-1]:
# calculate flux based on second-to-last for last element
y_0 = self._density[-2].y[0]
y_next = self._density[-1].y[0]
time_delta = float(time - times[-2])
elif next_index == len(times):
logger.warning('tried to calculate biomass flux beyond upper range of data')
return
elif next_index == 0 and times[0] != time:
logger.warning('tried to calculate biomass flux beyond lower range of data')
return
else:
# calculate flux to next value for all but last value
y_0 = interpolate_at(self._density, time)
y_next = float(self._density[next_index].y[0])
time_delta = float(times[next_index] - time)
flux = math.log(y_next / y_0) / time_delta
kinetic_law = reaction.getKineticLaw()
# NOTE: libsbml calls require use of 'bytes' CStrings
upper_bound = kinetic_law.getParameter("UPPER_BOUND")
lower_bound = kinetic_law.getParameter("LOWER_BOUND")
upper_bound.setValue(flux)
lower_bound.setValue(flux)
except Exception as e:
logger.exception('hit an error calculating biomass flux: %s', e)
def _update_carbon_ratio(self, builder, time):
notes = defaultdict(list)
for mlist in viewvalues(self._measures):
for m in mlist:
if m.is_carbon_ratio():
points = models.MeasurementValue.objects.filter(measurement=m, x__0=time)
if points.exists():
# only get first value object, unwrap values_list tuple to get y-array
magnitudes = points.values_list('y')[0][0]
combined = ['%s(0.02)' % v for v in magnitudes]
# pad out to 13 elements
combined += ['-'] * (13 - len(magnitudes))
name = m.measurement_type.short_name
value = '\t'.join(combined)
# TODO: find a better way to store/update this magic string
notes['LCMSLabelData'].append(' %s\tM-0\t%s' % (name, value))
else:
logger.warning(
"No vector data found for %(measurement)s at %(time)s",
{'measurement': m, 'time': time}
)
if self._sbml_model.isSetNotes():
notes_obj = self._sbml_model.getNotes()
else:
notes_obj = builder.create_note_body()
notes_obj = builder.update_note_body(notes_obj, **notes)
self._sbml_model.setNotes(notes_obj)
def _update_omics(self, builder, reaction, time):
transcripts = []
p_copies = []
if reaction.isSetNotes():
reaction_note_body = reaction.getNotes()
else:
reaction_note_body = builder.create_note_body()
notes = builder.parse_note_body(reaction_note_body)
for name in builder.read_note_associations(notes):
values = models.MeasurementValue.objects.filter(
measurement__in=self._omics.get(name, []),
x__0=time,
).select_related('measurement__measurement_type')
for v in values:
text = '%s=%d' % (name, v.y[0])
if v.measurement.measurement_type.is_gene():
transcripts.append(text)
elif v.measurement.measurement_type.is_protein():
p_copies.append(text)
reaction_note_body = builder.update_note_body(
reaction_note_body,
GENE_TRANSCRIPTION_VALUES=' '.join(transcripts),
PROTEIN_COPY_VALUES=' '.join(p_copies),
)
reaction.setNotes(reaction_note_body)
def _update_range_bounds(self, measurements, interpolate):
measurement_qs = models.Measurement.objects.filter(pk__in=measurements)
values_qs = models.MeasurementValue.objects.filter(x__len=1).order_by('x')
# capture lower/upper bounds of t values for all measurements
trange = measurement_qs.aggregate(
max_t=Max('measurementvalue__x'), min_t=Min('measurementvalue__x'),
)
if trange['max_t']:
self._max = min(trange['max_t'][0], self._max or sys.maxsize)
if trange['min_t']:
self._min = max(trange['min_t'][0], self._min or -sys.maxsize)
# iff no interpolation, capture intersection of t values bounded by max & min
m_inter = measurement_qs.exclude(assay__protocol__in=interpolate).prefetch_related(
Prefetch('measurementvalue_set', queryset=values_qs, to_attr='values'),
)
for m in m_inter:
points = {p.x[0] for p in m.values if self._min <= p.x[0] <= self._max}
if self._points is None:
self._points = points
elif self._points:
self._points.intersection_update(points)
if not self._points:
# Adding warning as soon as no valid timepoints found
self._export_errors.append(
_('Including measurement %(type_name)s results in no valid export '
'timepoints; consider excluding this measurement, or enable '
'interpolation for the %(protocol)s protocol.') % {
'type_name': m.measurement_type.type_name,
'protocol': m.assay.protocol.name,
}
)
def _update_reaction(self, builder, our_reactions, time):
# loop over all template reactions, if in our_reactions set bounds, notes, etc
for reaction_sid, mtype in viewitems(our_reactions):
type_key = '%s' % mtype
reaction = self._sbml_model.getReaction(reaction_sid)
if reaction is None:
logger.warning(
'No reaction found in %(template)s with ID %(id)s' % {
'template': self._sbml_template,
'id': reaction_sid,
}
)
continue
else:
logger.info("working on reaction %s", reaction_sid)
self._update_omics(builder, reaction, time)
try:
values = self._values_by_type[type_key]
times = [v.x[0] for v in values]
next_index = bisect(times, time)
if time > times[-1]:
logger.warning('tried to calculate reaction flux beyond upper range of data')
continue
elif time < times[0]:
logger.warning('tried to calculate reaction flux beyond lower range of data')
continue
elif next_index == len(times):
# calculate flux based on second-to-last for last element
y_0 = float(values[-1].y[0])
y_prev = float(values[-2].y[0])
y_delta = y_0 - y_prev
time_delta = float(time - times[-2])
else:
# calculate flux to next value for all but last value
y_0 = interpolate_at(values, time) # interpolate_at returns a float
y_next = float(values[next_index].y[0])
y_delta = y_next - y_0
time_delta = float(times[next_index] - time)
# NOTE: arithmetic operators do not work between float and Decimal
density = interpolate_at(self._density, time)
start_density = interpolate_at(self._density, time - time_delta)
# TODO: find better way to detect ratio units
if values[0].measurement.y_units.unit_name.endswith('/hr'):
flux_end = y_0 / density
flux_start = flux_end
else:
flux_start = (y_delta / time_delta) / start_density
flux_end = (y_delta / time_delta) / density
kinetic_law = reaction.getKineticLaw()
# NOTE: libsbml calls require use of 'bytes' CStrings
upper_bound = kinetic_law.getParameter("UPPER_BOUND")
lower_bound = kinetic_law.getParameter("LOWER_BOUND")
upper_bound.setValue(max(flux_start, flux_end))
lower_bound.setValue(min(flux_start, flux_end))
except Exception as e:
logger.exception('hit an error calculating reaction values: %s', type(e))
def _update_species(self, builder, our_species, time):
# loop over all template species, if in our_species set the notes section
# TODO: keep MeasurementType in match_form, remove need to re-query Metabolite
for species_sid, mtype in viewitems(our_species):
type_key = '%s' % mtype
metabolite = None
try:
metabolite = models.Metabolite.objects.get(pk=type_key)
except models.Metabolite.DoesNotExist:
logger.warning('Type %s is not a Metabolite', type_key)
species = self._sbml_model.getSpecies(species_sid)
if species is None:
logger.warning(
'No species found in %(template)s with ID %(id)s' % {
'template': self._sbml_template,
'id': species_sid,
}
)
continue
# collected all measurement_id matching type in add_measurements()
measurements = self._measures.get(type_key, [])
current = minimum = maximum = ''
try:
# TODO: change to .order_by('x__0') once Django supports ordering on transform
# https://code.djangoproject.com/ticket/24747
values = list(models.MeasurementValue.objects.filter(
measurement__in=measurements
).select_related('measurement__y_units').order_by('x'))
# convert units
for v in values:
units = v.measurement.y_units
f = models.MeasurementUnit.conversion_dict.get(units.unit_name, None)
if f is not None:
v.y = [f(y, metabolite) for y in v.y]
else:
logger.warning('unrecognized unit %s', units)
# save here so _update_reaction does not need to re-query
self._values_by_type[type_key] = values
minimum = float(min(values, key=lambda v: v.y[0]).y[0])
maximum = float(max(values, key=lambda v: v.y[0]).y[0])
current = interpolate_at(values, time)
except Exception as e:
logger.exception('hit an error calculating species values: %s', type(e))
else:
if species.isSetNotes():
species_notes = species.getNotes()
else:
species_notes = builder.create_note_body()
species_notes = builder.update_note_body(
species_notes,
CONCENTRATION_CURRENT='%s' % current,
CONCENTRATION_HIGHEST='%s' % maximum,
CONCENTRATION_LOWEST='%s' % minimum,
)
species.setNotes(species_notes)
class SbmlExportSettingsForm(SbmlForm):
""" Form used for selecting settings on SBML exports. """
sbml_template = forms.ModelChoiceField(
# TODO: potentially narrow options based on current user?
models.SBMLTemplate.objects.exclude(biomass_exchange_name=''),
label=_('SBML Template'),
)
def update_bound_data_with_defaults(self):
""" Forces data bound to the form to update to default values. """
if self.is_bound:
# create mutable copy of QueryDict
replace_data = QueryDict(mutable=True)
replace_data.update(self.data)
# set initial measurementId values
field = self.fields['sbml_template']
if field.initial:
replace_data[self.add_prefix('sbml_template')] = '%s' % field.initial
else:
self._sbml_warnings.append(
_('No SBML template set for this study; a template must be selected to '
'export data as SBML.')
)
self.data = replace_data
@register.filter(name='scaled_x')
def scaled_x(point, x_range):
""" Template filter calculates the relative X value for SVG sparklines. """
return ((point.x[0] / x_range[1]) * 450) + 10
class MeasurementChoiceField(forms.ModelMultipleChoiceField):
""" Custom ModelMultipleChoiceField that changes the display of measurement labels. """
def label_from_instance(self, obj):
return obj.full_name
class SbmlExportMeasurementsForm(SbmlForm):
""" Form used for selecting measurements to include in SBML exports. """
measurement = MeasurementChoiceField(
queryset=models.Measurement.objects.none(), # this is overridden in __init__()
required=False,
widget=forms.CheckboxSelectMultiple,
)
interpolate = forms.ModelMultipleChoiceField(
label=_('Allow interpolation for'),
queryset=models.Protocol.objects.none(), # this is overridden in __init__()
required=False,
widget=forms.CheckboxSelectMultiple,
)
def __init__(self, line, *args, **kwargs):
"""
Required:
line = a main.models.Line object defining the items for export
Optional:
qfilter = arguments to filter a measurement queryset from
main.export.table.ExportSelection
"""
qfilter = kwargs.pop('qfilter', None)
super(SbmlExportMeasurementsForm, self).__init__(*args, **kwargs)
self._line = line
self._init_fields(qfilter)
def _init_fields(self, qfilter):
f = self.fields['measurement']
f.queryset = models.Measurement.objects.filter(
assay__line=self._line,
).order_by(
'assay__protocol__name', 'assay__name',
).select_related(
# including these to cut down on additional queries later
'assay',
'assay__protocol',
'y_units',
'measurement_type',
).prefetch_related(
'measurementvalue_set',
)
if qfilter is not None:
f.queryset = f.queryset.filter(qfilter)
if f.queryset.count() == 0:
self._sbml_warnings.append(_('No protocols have usable data.'))
f.initial = []
del self.fields['interpolate']
else:
f.initial = f.queryset
# Add in warnings for any Metabolite measurements that have no defined molar_mass
missing_mass = models.Metabolite.objects.filter(
Q(measurement__in=f.queryset),
Q(molar_mass__isnull=True) | Q(molar_mass=0),
).order_by('type_name')
for metabolite in missing_mass:
self._sbml_warnings.append(
_('Measurement type %(type_name)s has no defined molar mass.') % {
'type_name': metabolite.type_name,
}
)
self.fields['interpolate'].queryset = models.Protocol.objects.filter(
assay__measurement__in=f.queryset
).distinct()
return f.queryset
def form_without_measurements(self):
""" Returns a copy of the form without the measurement field; this allows rendering in
templates as, e.g. `form_var.form_without_measurements.as_p`. """
fles = copy(self)
fles.fields = OrderedDict(self.fields)
del fles.fields['measurement']
return fles
def measurement_split(self):
""" Generator which yields a Measurement object and the widget used to select the same. """
for index, measurement in enumerate(self.measurement_list):
yield (measurement, self.measurement_widgets[index])
def protocol_split(self):
""" Generator which yields a Protocol name and a list of
(Measurement object, Measurement select widget) tuples. """
prev_protocol = None
items = []
# loop over all the choices in the queryset
for index, measurement in enumerate(self.measurement_list):
protocol = measurement.assay.protocol
# when the protocol changes, yield the protocol and the choices using it
if protocol != prev_protocol:
if prev_protocol is not None:
yield (prev_protocol, items)
prev_protocol = protocol
items = []
items.append((measurement, self.measurement_widgets[index]))
# at the end, yield the final choices
if prev_protocol is not None:
yield (prev_protocol, items)
def x_range(self):
""" Returns the bounding range of X-values used for all Measurements in the form. """
f = self.fields['measurement']
x_range = f.queryset.aggregate(
max=Max('measurementvalue__x'), min=Min('measurementvalue__x')
)
# can potentially get None if there are no values; use __getitem__ default AND `or [0]`
x_max = x_range.get('max', [0]) or [0]
x_min = x_range.get('min', [0]) or [0]
# max and min are both still arrays, grab the first element
return (x_min[0], x_max[0])
def update_bound_data_with_defaults(self):
""" Forces data bound to the form to update to default values. """
if self.is_bound:
# create mutable copy of QueryDict
replace_data = QueryDict(mutable=True)
replace_data.update(self.data)
# set initial measurement values
mfield = self.fields['measurement']
replace_data.setlist(
self.add_prefix('measurement'),
['%s' % v.pk for v in mfield.initial]
)
self.data = replace_data
def _get_measurements(self):
# lazy eval and try not to query more than once
# NOTE: still gets evaled at least three times: populating choices, here, and validation
if not hasattr(self, '_measures'):
field = self.fields['measurement']
self._measures = list(field.queryset)
return self._measures
measurement_list = property(_get_measurements,
doc='A list of Measurements included in the form')
def _get_measurement_qs(self):
field = self.fields.get('measurement', None)
return field.queryset if field else models.Measurement.objects.none()
measurement_qs = property(_get_measurement_qs,
doc='A queryset of the Measurements included in the form')
def _get_measurement_widgets(self):
# lazy eval and try not to query more than once
if not hasattr(self, '_measure_widgets'):
widgets = self['measurement']
self._measure_widgets = list(widgets)
return self._measure_widgets
measurement_widgets = property(_get_measurement_widgets,
doc='A list of widgets used to select Measurements')
class SbmlExportOmicsForm(SbmlExportMeasurementsForm):
""" Specific named class for selection of Omics measurements. """
pass
class SbmlExportOdForm(SbmlExportMeasurementsForm):
""" Specific class for selection of density measurements. """
DEFAULT_GCDW_FACTOR = Decimal('0.65')
PREF_GCDW_META = 'export.sbml.gcdw_metadata'
gcdw_conversion = forms.ModelChoiceField(
empty_label=None,
help_text=_('Select the metadata containing the conversion factor for Optical Density '
'to grams carbon dry-weight per liter.'),
label=_('gCDW/L/OD factor metadata'),
queryset=models.MetadataType.objects.filter(),
required=False,
widget=MetadataTypeAutocompleteWidget,
)
gcdw_default = forms.DecimalField(
help_text=_('Override the default conversion factor used if no metadata value is found.'),
initial=DEFAULT_GCDW_FACTOR,
label=_('Default gCDW/L/OD factor'),
min_value=Decimal(0),
required=True,
)
field_order = ['gcdw_default', 'gcdw_conversion', 'interpolate', ]
def clean(self):
data = super(SbmlExportOdForm, self).clean()
gcdw_default = data.get('gcdw_default', self.DEFAULT_GCDW_FACTOR)
conversion_meta = data.get('gcdw_conversion', None)
if conversion_meta is None:
self._sbml_warnings.append(mark_safe(
_('No gCDW/L/OD metadata selected, all measurements will be converted with the '
'default factor of <b>%(factor)f</b>.') % {'factor': gcdw_default}
))
else:
self._clean_check_for_gcdw(data, gcdw_default, conversion_meta)
# make sure that at least some OD measurements are selected
if len(data.get('measurement', [])) == 0:
raise ValidationError(
_('No Optical Data measurements were selected. Biomass measurements are essential '
'for flux balance analysis.'),
code='OD-required-for-FBA'
)
return data
def _clean_check_for_curve(self, data):
""" Ensures that each unique selected line has at least two points to calculate a
growth curve. """
for line in viewvalues(self._clean_collect_data_lines(data)):
count = 0
for m in self._measures_by_line[line.pk]:
count += len(m.measurementvalue_set.all())
if count > 1:
break
if count < 2:
raise ValidationError(
_('Optical Data for %(line)s contains less than two data points. Biomass '
'measurements are essential for FBA, and at least two are needed to define '
'a growth rate.') % {'line': line.name},
code='growth-rate-required-for-FBA'
)
def _clean_check_for_gcdw(self, data, gcdw_default, conversion_meta):
""" Ensures that each unique selected line has a gCDW/L/OD factor. """
# warn for any lines missing the selected metadata type
for line in viewvalues(self._clean_collect_data_lines(data)):
factor = line.metadata_get(conversion_meta)
# TODO: also check that the factor in metadata is a valid value
if factor is None:
self._sbml_warnings.append(
_('Could not find metadata %(meta)s on %(line)s; using default factor '
'of <b>%(factor)f</b>.') % {
'factor': gcdw_default,
'line': line.name,
'meta': conversion_meta.type_name,
}
)
def _clean_collect_data_lines(self, data):
""" Collects all the lines included in a data selection. """
if not hasattr(self, '_lines'):
self._lines = {}
self._measures_by_line = defaultdict(list)
# get unique lines first
for m in data.get('measurement', []):
self._lines[m.assay.line.pk] = m.assay.line
self._measures_by_line[m.assay.line.pk].append(m)
return self._lines
def _init_conversion(self):
""" Attempt to load a default initial value for gcdw_conversion based on user. """
request = get_current_request()
if request and request.user:
prefs = request.user.profile.prefs
try:
return models.MetadataType.objects.get(pk=prefs[self.PREF_GCDW_META])
except models.MetadataType.DoesNotExist:
return None
# TODO: load preferences from the system user if no request user
return None
def update_bound_data_with_defaults(self):
""" Forces data bound to the form to update to default values. """
super(SbmlExportOdForm, self).update_bound_data_with_defaults()
if self.is_bound:
# create mutable copy of QueryDict
replace_data = QueryDict(mutable=True)
replace_data.update(self.data)
# set initial gcdw_conversion values
cfield = self.fields['gcdw_conversion']
if cfield.initial:
name = self.add_prefix('gcdw_conversion')
for i, part in enumerate(cfield.widget.decompress(cfield.initial)):
replace_data['%s_%s' % (name, i)] = part
# set initial gcdw_default value
dfield = self.fields['gcdw_default']
replace_data[self.add_prefix('gcdw_default')] = '%s' % dfield.initial
self.data = replace_data
class SbmlMatchReactionWidget(forms.widgets.MultiWidget):
""" Widget combining both SBML species selection and SBML reaction selection for a particular
MeasurementType. """
def __init__(self, template, attrs=None):
widgets = (
SbmlSpeciesAutocompleteWidget(template),
SbmlExchangeAutocompleteWidget(template),
)
super(SbmlMatchReactionWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value is None:
return ['', '']
return value # value is a tuple anyway
def format_output(self, rendered_widgets):
return '</td><td>'.join(rendered_widgets)
class SbmlMatchReactionField(forms.MultiValueField):
""" A form Field combining the selected values of SBML species and SBML reaction. """
def __init__(self, template, *args, **kwargs):
fields = (forms.CharField(), forms.CharField()) # these are only placeholders
self.widget = SbmlMatchReactionWidget(template)
super(SbmlMatchReactionField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# TODO validation
return (data_list[0], data_list[1])
return None
class SbmlMatchReactions(SbmlForm):
""" A form to match selected MeasurementTypes to species and reactions contained in an
SBMLTemplate. """
def __init__(self, sbml_template, match_fields, *args, **kwargs):
super(SbmlMatchReactions, self).__init__(*args, **kwargs)
self._sbml_template = sbml_template
self.fields.update(match_fields)
def clean(self):
# TODO validate the choices
return super(SbmlMatchReactions, self).clean()
class SbmlExportSelectionForm(SbmlForm):
""" Form determining output timepoint and filename for an SBML download. """
time_select = forms.DecimalField(
help_text=_('Select the time to compute fluxes for embedding in SBML template'),
label=_('Time for export'),
)
filename = forms.CharField(
help_text=_('Choose the filename for the downloaded SBML file'),
initial=_('changeme.sbml'),
label=_('SBML Filename'),
max_length=255,
required=False,
)
def __init__(self, t_range, points=None, line=None, *args, **kwargs):
super(SbmlExportSelectionForm, self).__init__(*args, **kwargs)
time_field = self.fields['time_select']
if points is not None:
initial = points[0] if points else None
self.fields['time_select'] = forms.TypedChoiceField(
choices=[('%s' % t, '%s hr' % t) for t in points],
coerce=Decimal,
empty_value=None,
help_text=time_field.help_text,
initial=initial,
label=time_field.label,
)
else:
time_field.max_value = t_range.max
time_field.min_value = t_range.min
time_field.initial = t_range.min
time_field.help_text = _(
'Select the time to compute fluxes for embedding in SBML template (in the range '
'%(min)s to %(max)s)'
) % t_range._asdict()
if line is not None:
self.fields['filename'].initial = '%s.sbml' % line.name
# update self.data with defaults for fields
replace_data = QueryDict(mutable=True)
for fn in ['time_select', 'filename']:
fk = self.add_prefix(fn)
if fk not in self.data:
replace_data[fk] = self.fields[fn].initial
replace_data.update(self.data)
self.data = replace_data
class SbmlBuilder(object):
""" A little facade class to provide better interface to libsbml and some higher-level
utilities to work with SBML files. """
def create_note_body(self):
""" Creates an empty notes element.
:return: an empty notes XMLNode """
notes_node = libsbml.XMLNode()
body_tag = libsbml.XMLTriple("body", "", "")
attributes = libsbml.XMLAttributes()
namespace = libsbml.XMLNamespaces()
namespace.add("http://www.w3.org/1999/xhtml", "")
body_token = libsbml.XMLToken(body_tag, attributes, namespace)
body_node = libsbml.XMLNode(body_token)
notes_node.addChild(body_node)
return notes_node
def parse_note_body(self, node):
""" Reads a notes element into an OrderedDict (keys will iterate in order read).
:param node: the notes SBML node
:return: an OrderedDict of contents of the notes element """
notes = OrderedDict()
if node is None:
return notes
note_body = node
if note_body.hasChild('body'):
note_body = note_body.getChild(0)
# API not very pythonic, cannot just iterate over children
for index in range(note_body.getNumChildren()):
p = note_body.getChild(index)
if p.getNumChildren() > 1:
text = p.getChild(0).toXMLString()
key, value = text.split(':')
key = key.strip()
notes[key] = p.getChild(1)
elif p.getNumChildren() == 1:
text = p.getChild(0).toXMLString()
key, value = text.split(':')
notes[key.strip()] = value.strip()
return notes
def read_note_associations(self, notes):
""" Parses gene and protein associations from SBML notes.
:param notes: a dict parsed from SbmlBuilder#parse_note_body
:return: an iterable of names (strings) associated with a reaction """
# previous code tried to parse out based on boolean operators, but that info was never
# used; now using simpler method of finding 'word' tokens, discarding matches to:
# 'and', 'or', 'None', and 'N.A.'
ignore = {'and', 'or', 'None', 'N.A.'}
pattern = re.compile(r'\b\w+\b')
g_assoc = notes.get('GENE_ASSOCIATION', '')
p_assoc = notes.get('PROTEIN_ASSOCIATION', '')
return chain(
[name for name in pattern.findall(g_assoc) if name not in ignore],
[name for name in pattern.findall(p_assoc) if name not in ignore],
)
def update_note_body(self, _note_node, **kwargs):
""" Writes keys to a notes element.
:param _note_node: a notes XMLNode
:param kwargs: arbitrary key-values to add to the notes
:return: the notes element passed in """
# ensure adding to the <body> node
body = _note_node
if _note_node.hasChild('body'):
body = _note_node.getChild(0)
notes = self.parse_note_body(body)
notes.update(**kwargs)
body.removeChildren()
for key, value in viewitems(notes):
if isinstance(value, string_types):
self._add_p_tag(body, '%s: %s' % (key, value))
else:
try:
# add a p-tag for every element in list
for line in value:
self._add_p_tag(body, '%s:%s' % (key, line))
except TypeError:
# add p-tag and append any XML contained in value
p_node = self._add_p_tag(body, '%s: ' % (key, ))
if isinstance(value, libsbml.XMLNode):
p_node.addChild(value)
return _note_node
def write_to_string(self, document):
""" Writes an in-memory SBML document to a string.
:return: a string serialization of an SBML document """
return libsbml.writeSBMLToString(document)
def _add_p_tag(self, body, text):
attributes = libsbml.XMLAttributes()
namespace = libsbml.XMLNamespaces()
p_tag = libsbml.XMLTriple("p", "", "")
p_token = libsbml.XMLToken(p_tag, attributes, namespace)
text_token = libsbml.XMLToken(text)
text_node = libsbml.XMLNode(text_token)
p_node = libsbml.XMLNode(p_token)
p_node.addChild(text_node)
body.addChild(p_node)
return p_node
def compose(*args):
""" Composes argument functions and returns resulting function;
e.g. compose(f, g)(x) == f(g(x)) """
return reduce(lambda f, g: lambda x: f(g(x)), args, lambda x: x)
# functions to substitute character sequences in a string
dash_sub = partial(re.compile(r'-').sub, '_DASH_')
lparen_sub = partial(re.compile(r'\(').sub, '_LPAREN_')
rparen_sub = partial(re.compile(r'\)').sub, '_RPAREN_')
lsqbkt_sub = partial(re.compile(r'\[').sub, '_LSQBKT_')
rsqbkt_sub = partial(re.compile(r'\]').sub, '_RSQBKT_')
transcode = compose(dash_sub, lparen_sub, rparen_sub, lsqbkt_sub, rsqbkt_sub)
# "Transcoded" means that we make it friendlier for SBML species names,
# which means we translate symbols that are allowed in EDD metabolite names
# like "-" to things like "_DASH_".
def generate_transcoded_metabolite_name(mname):
# This is a hack to adapt two specific metabolites from their
# "-produced" and "-consumed" variants to their ordinary names.
# It's needed here because the original variants are considered
# "rates", while the metabolites we're matching to are not.
if (mname == "CO2p"):
mname = "co2"
elif (mname == "O2c"):
mname = "o2"
return transcode(mname)
# This returns an array of possible SBML species names from a metabolite name.
# FIXME should this distinguish between compartments?
def generate_species_name_guesses_from_metabolite_name(mname):
mname_transcoded = generate_transcoded_metabolite_name(mname)
return [
mname,
mname_transcoded,
"M_" + mname + "_c",
"M_" + mname_transcoded + "_c",
"M_" + mname_transcoded + "_c_",
]
########################################################################
# ADMIN FEATURES
#
def validate_sbml_attachment(file_data):
sbml = libsbml.readSBMLFromString(file_data)
errors = sbml.getErrorLog()
if (errors.getNumErrors() > 0):
raise ValueError(errors.getError(1).getMessage())
model = sbml.getModel()
assert (model is not None)
return sbml
| 45.046589
| 99
| 0.606471
|
d18cd6e6385708901c4cd46ebfd3610f6292b8e5
| 18,870
|
py
|
Python
|
falcon_mcmc/falcon.py
|
samuelgoh1525/falcon-blockchain
|
1480c1e71b624a147dc0a18aa043f1101435ba85
|
[
"MIT"
] | 2
|
2021-06-19T16:57:09.000Z
|
2021-06-22T08:04:17.000Z
|
falcon_mcmc/falcon.py
|
samuelgoh1525/falcon-blockchain
|
1480c1e71b624a147dc0a18aa043f1101435ba85
|
[
"MIT"
] | null | null | null |
falcon_mcmc/falcon.py
|
samuelgoh1525/falcon-blockchain
|
1480c1e71b624a147dc0a18aa043f1101435ba85
|
[
"MIT"
] | null | null | null |
"""
Python implementation of Falcon:
https://falcon-sign.info/.
"""
from common import q
from numpy import set_printoptions
from math import sqrt, exp, floor, ceil, log
from fft import fft, ifft, sub, neg, add_fft, mul_fft
from ntt import sub_zq, mul_zq, div_zq
from ffsampling import gram, ffldl_fft, ffsampling_fft, ffsampling_round
from ntrugen import ntru_gen
from encoding import compress, decompress
# https://pycryptodome.readthedocs.io/en/latest/src/hash/shake256.html
from Crypto.Hash import SHAKE256
# Randomness
from os import urandom
from rng import ChaCha20
# For debugging purposes
import sys
if sys.version_info >= (3, 4):
from importlib import reload # Python 3.4+ only.
from random import uniform
from copy import deepcopy
from numpy import round as np_round
from timeit import default_timer as timer
set_printoptions(linewidth=200, precision=5, suppress=True)
logn = {
2: 1,
4: 2,
8: 3,
16: 4,
32: 5,
64: 6,
128: 7,
256: 8,
512: 9,
1024: 10
}
# Bytelength of the signing salt and header
HEAD_LEN = 1
SALT_LEN = 40
SEED_LEN = 56
# Parameter sets for Falcon:
# - n is the dimension/degree of the cyclotomic ring
# - sigma is the std. dev. of signatures (Gaussians over a lattice)
# - sigmin is a lower bounds on the std. dev. of each Gaussian over Z
# - sigbound is the upper bound on ||s0||^2 + ||s1||^2
# - sig_bytelen is the bytelength of signatures
Params = {
# FalconParam(2, 2)
2: {
"n": 2,
"sigma": 144.81253976308423,
"sigmin": 1.1165085072329104,
"sig_bound": 101498,
"sig_bytelen": 44,
},
# FalconParam(4, 2)
4: {
"n": 4,
"sigma": 146.83798833523608,
"sigmin": 1.1321247692325274,
"sig_bound": 208714,
"sig_bytelen": 47,
},
# FalconParam(8, 2)
8: {
"n": 8,
"sigma": 148.83587593064718,
"sigmin": 1.147528535373367,
"sig_bound": 428865,
"sig_bytelen": 52,
},
# FalconParam(16, 4)
16: {
"n": 16,
"sigma": 151.78340713845503,
"sigmin": 1.170254078853483,
"sig_bound": 892039,
"sig_bytelen": 63,
},
# FalconParam(32, 8)
32: {
"n": 32,
"sigma": 154.6747794602761,
"sigmin": 1.1925466358390344,
"sig_bound": 1852696,
"sig_bytelen": 82,
},
# FalconParam(64, 16)
64: {
"n": 64,
"sigma": 157.51308555044122,
"sigmin": 1.2144300507766141,
"sig_bound": 3842630,
"sig_bytelen": 122,
},
# FalconParam(128, 32)
128: {
"n": 128,
"sigma": 160.30114421975344,
"sigmin": 1.235926056771981,
"sig_bound": 7959734,
"sig_bytelen": 200,
},
# FalconParam(256, 64)
256: {
"n": 256,
"sigma": 163.04153322607107,
"sigmin": 1.2570545284063217,
"sig_bound": 16468416,
"sig_bytelen": 356,
},
# FalconParam(512, 128)
512: {
"n": 512,
"sigma": 165.7366171829776,
"sigmin": 1.2778336969128337,
"sig_bound": 34034726,
"sig_bytelen": 666,
},
# FalconParam(1024, 256)
1024: {
"n": 1024,
"sigma": 168.38857144654395,
"sigmin": 1.298280334344292,
"sig_bound": 70265242,
"sig_bytelen": 1280,
},
}
def print_tree(tree, pref=""):
"""
Display a LDL tree in a readable form.
Args:
T: a LDL tree
Format: coefficient or fft
"""
leaf = "|_____> "
top = "|_______"
son1 = "| "
son2 = " "
width = len(top)
a = ""
if len(tree) == 3:
if (pref == ""):
a += pref + str(tree[0]) + "\n"
else:
a += pref[:-width] + top + str(tree[0]) + "\n"
a += print_tree(tree[1], pref + son1)
a += print_tree(tree[2], pref + son2)
return a
else:
return (pref[:-width] + leaf + str(tree) + "\n")
def normalize_tree(tree, sigma):
"""
Normalize leaves of a LDL tree (from values ||b_i||**2 to sigma/||b_i||).
Args:
T: a LDL tree
sigma: a standard deviation
Format: coefficient or fft
"""
if len(tree) == 3:
normalize_tree(tree[1], sigma)
normalize_tree(tree[2], sigma)
else:
tree[0] = sigma / sqrt(tree[0].real)
tree[1] = 0
class PublicKey:
"""
This class contains methods for performing public key operations in Falcon.
"""
def __init__(self, sk):
"""Initialize a public key."""
self.n = sk.n
self.h = sk.h
self.hash_to_point = sk.hash_to_point
self.signature_bound = sk.signature_bound
self.verify = sk.verify
def __repr__(self):
"""Print the object in readable form."""
rep = "Public for n = {n}:\n\n".format(n=self.n)
rep += "h = {h}\n\n".format(h=self.h)
return rep
class SecretKey:
"""
This class contains methods for performing
secret key operations (and also public key operations) in Falcon.
One can:
- initialize a secret key for:
- n = 128, 256, 512, 1024,
- phi = x ** n + 1,
- q = 12 * 1024 + 1
- find a preimage t of a point c (both in ( Z[x] mod (Phi,q) )**2 ) such that t*B0 = c
- hash a message to a point of Z[x] mod (Phi,q)
- sign a message
- verify the signature of a message
"""
def __init__(self, n, polys=None):
"""Initialize a secret key."""
# Public parameters
self.n = n
#TODO: change sigma and signature_bound
self.sigma = Params[n]["sigma"]
self.sigmin = Params[n]["sigmin"]
self.signature_bound = floor(Params[n]["sig_bound"])
self.sig_bytelen = Params[n]["sig_bytelen"]
# Compute NTRU polynomials f, g, F, G verifying fG - gF = q mod Phi
if polys is None:
self.f, self.g, self.F, self.G = ntru_gen(n)
else:
[f, g, F, G] = polys
assert all((len(poly) == n) for poly in [f, g, F, G])
self.f = f[:]
self.g = g[:]
self.F = F[:]
self.G = G[:]
# From f, g, F, G, compute the basis B0 of a NTRU lattice
# as well as its Gram matrix and their fft's.
B0 = [[self.g, neg(self.f)], [self.G, neg(self.F)]]
G0 = gram(B0)
self.B0_fft = [[fft(elt) for elt in row] for row in B0]
G0_fft = [[fft(elt) for elt in row] for row in G0]
self.T_fft = ffldl_fft(G0_fft)
'''
store original T_fft
'''
self.orig_T_fft = deepcopy(self.T_fft)
# Normalize Falcon tree
normalize_tree(self.T_fft, self.sigma)
# The public key is a polynomial such that h*f = g mod (Phi,q)
self.h = div_zq(self.g, self.f)
def __repr__(self, verbose=False):
"""Print the object in readable form."""
rep = "Private key for n = {n}:\n\n".format(n=self.n)
rep += "f = {f}\n\n".format(f=self.f)
rep += "g = {g}\n\n".format(g=self.g)
rep += "F = {F}\n\n".format(F=self.F)
rep += "G = {G}\n\n".format(G=self.G)
if verbose:
rep += "\nFFT tree\n"
rep += print_tree(self.T_fft, pref="")
return rep
def hash_to_point(self, message, salt):
"""
Hash a message to a point in Z[x] mod(Phi, q).
Inspired by the Parse function from NewHope.
"""
n = self.n
if q > (1 << 16):
raise ValueError("The modulus is too large")
k = (1 << 16) // q
# Create a SHAKE object and hash the salt and message.
shake = SHAKE256.new()
shake.update(salt)
shake.update(message)
# Output pseudorandom bytes and map them to coefficients.
hashed = [0 for i in range(n)]
i = 0
j = 0
while i < n:
# Takes 2 bytes, transform them in a 16 bits integer
twobytes = shake.read(2)
elt = (twobytes[0] << 8) + twobytes[1] # This breaks in Python 2.x
# Implicit rejection sampling
if elt < k * q:
hashed[i] = elt % q
i += 1
j += 1
return hashed
def sample_preimage(self, point, type_in, sigma_new, i_mix_sym, overwrite, seed=None):
"""
Sample a short vector s such that s[0] + s[1] * h = point.
"""
[[a, b], [c, d]] = self.B0_fft
# We compute a vector t_fft such that:
# (fft(point), fft(0)) * B0_fft = t_fft
# Because fft(0) = 0 and the inverse of B has a very specific form,
# we can do several optimizations.
point_fft = fft(point)
t0_fft = [(point_fft[i] * d[i]) / q for i in range(self.n)]
t1_fft = [(-point_fft[i] * b[i]) / q for i in range(self.n)]
t_fft = [t0_fft, t1_fft]
# We now compute v such that:
# v = z * B0 for an integral vector z
# v is close to (point, 0)
'''
MCMC sampling
'''
# Get initial state z_0 and i_mix
# If no MCMC sampling, this is the solution for original FALCON
i_mix = None
if seed is None:
# If no seed is defined, use urandom as the pseudo-random source.
z_0, sum_log_prob_0, i_mix = ffsampling_fft(t_fft, self.T_fft, self.sigmin, 0, 1, False, urandom)
else:
# If a seed is defined, initialize a ChaCha20 PRG
# that is used to generate pseudo-randomness.
chacha_prng = ChaCha20(seed)
z_0, sum_log_prob_0, i_mix = ffsampling_fft(t_fft, self.T_fft, self.sigmin, 0, 1, False,
chacha_prng.randombytes)
'''
# When initiating with round(t_fft) instead of ffsampling for symmetric MCMC
z_round = np_round(t_fft)
z_test, _, _ = ffsampling_fft(t_fft, self.T_fft, self.sigmin, 0, 1, urandom)
v0_test, v1_test = self.calc_v(z_test)
s_test = [sub(point, v0_test), neg(v1_test)]
test_norm = self.calc_norm(s_test)
v0_round, v1_round = self.calc_v(z_round)
s_round = [sub(point, v0_round), neg(v1_round)]
round_norm = self.calc_norm(s_round)
print("z_0: ", og_squared_norm)
print("z_round: ", round_norm)
print("z_test: ", test_norm)
'''
#print("i_mix for IMHK: ", i_mix)
'''Testing'''
'''
v0_og, v1_og = self.calc_v(z_0)
s_og = [sub(point, v0_og), neg(v1_og)]
og_squared_norm = self.calc_norm(s_og)
og_sum_log_prob = sum_log_prob_0
num_moves = 0
num_good_moves = 0
'''
'''End Test'''
if type_in == 'i':
if overwrite:
i_mix = 1
for i in range(ceil(i_mix) - 1):
if seed is None:
# If no seed is defined, use urandom as the pseudo-random source.
z_fft, sum_log_prob_1, _ = ffsampling_fft(t_fft, self.T_fft, self.sigmin, 0, 1, True, urandom)
else:
# If a seed is defined, initialize a ChaCha20 PRG
# that is used to generate pseudo-randomness.
chacha_prng = ChaCha20(seed)
z_fft, sum_log_prob_1, _ = ffsampling_fft(t_fft, self.T_fft, self.sigmin, 0, 1, True,
chacha_prng.randombytes)
old_new_ratio = sum_log_prob_1 - sum_log_prob_0
acceptance_ratio = min(0, old_new_ratio)
u = uniform(0, 1)
# cannot be 0 due to log
while u == 0:
u = uniform(0, 1)
#print("[", i+1, "]: new_sum: ", sum_log_prob_1, ", old_sum: ", sum_log_prob_0)
if log(u) <= acceptance_ratio:
#print("\naccepted -- ", "ratio: ", acceptance_ratio, ", log(u): ", log(u), "\n")
#num_moves += 1
z_0 = z_fft
sum_log_prob_0 = sum_log_prob_1
'''
if old_new_ratio >= 0:
num_good_moves += 1
'''
elif type_in == 's':
i_mix = i_mix_sym
for i in range(i_mix):
if seed is None:
# If no seed is defined, use urandom as the pseudo-random source.
z_fft, sum_log_prob, _ = ffsampling_fft(z_0, self.T_fft, self.sigmin, 0, 1, True, urandom)
else:
# If a seed is defined, initialize a ChaCha20 PRG
# that is used to generate pseudo-randomness.
chacha_prng = ChaCha20(seed)
z_fft, sum_log_prob, _ = ffsampling_fft(z_0, self.T_fft, self.sigmin, 0, 1, True,
chacha_prng.randombytes)
v0_new, v1_new = self.calc_v(z_fft)
v0_old, v1_old = self.calc_v(z_0)
# The difference s = (point, 0) - v is such that:
# s is short
# s[0] + s[1] * h = point
s_new = [sub(point, v0_new), neg(v1_new)]
s_old = [sub(point, v0_old), neg(v1_old)]
new_squared_norm = self.calc_norm(s_new)
old_squared_norm = self.calc_norm(s_old)
old_new_ratio = exp( (1 / (2 * (self.sigma ** 2) ) ) * (old_squared_norm - new_squared_norm) )
acceptance_ratio = min(1, old_new_ratio)
u = uniform(0, 1)
#print("[", i+1, "]: new_squared_norm: ", new_squared_norm, ", old_squared_norm: ", old_squared_norm)
if u <= acceptance_ratio:
#print("\naccepted -- ", "ratio: ", acceptance_ratio, ", u: ", u, "\n")
#num_moves += 1
z_0 = z_fft
'''
if old_new_ratio >= 1:
num_good_moves += 1
'''
v0, v1 = self.calc_v(z_0)
s = [sub(point, v0), neg(v1)]
'''
Testing
'''
'''
final_squared_norm = self.calc_norm(s)
print("\nOriginal squared norm: ", og_squared_norm, "; Final squared norm: ", final_squared_norm, "\n")
#print("\nOriginal sum log prob: ", og_sum_log_prob, "; Final sum log prob: ", sum_log_prob_0, "\n")
print("\nNumber of Markov moves: ", num_moves, "\n")
print("\nNumber of 'Good' Markov moves: ", num_good_moves, "\n")
'''
return s
def calc_v(self, z_fft):
[[a, b], [c, d]] = self.B0_fft
v0_fft = add_fft(mul_fft(z_fft[0], a), mul_fft(z_fft[1], c))
v1_fft = add_fft(mul_fft(z_fft[0], b), mul_fft(z_fft[1], d))
v0 = [int(round(elt)) for elt in ifft(v0_fft)]
v1 = [int(round(elt)) for elt in ifft(v1_fft)]
return v0, v1
@staticmethod
def calc_norm(s):
norm_sign = sum(coef ** 2 for coef in s[0])
norm_sign += sum(coef ** 2 for coef in s[1])
return norm_sign
def sign(self, message, type_in='', sigma_og=None, sigma_new=30, i_mix_sym=1000, overwrite=False, randombytes=urandom):
"""
Sign a message. The message MUST be a byte string or byte array.
Optionally, one can select the source of (pseudo-)randomness used
(default: urandom).
"""
start = timer()
int_header = 0x30 + logn[self.n]
header = int_header.to_bytes(1, "little")
salt = randombytes(SALT_LEN)
hashed = self.hash_to_point(message, salt)
# We repeat the signing procedure until we find a signature that is
# short enough (both the Euclidean norm and the bytelength)
'''Set the original sigma to sample'''
if sigma_og is not None:
self.sigma = float(sigma_og)
self.signature_bound = (1.1 ** 2) * 2 * self.n * (self.sigma ** 2)
self.T_fft = deepcopy(self.orig_T_fft)
normalize_tree(self.T_fft, self.sigma)
while(1):
if (randombytes == urandom):
s = self.sample_preimage(hashed, type_in, sigma_new, i_mix_sym, overwrite)
else:
seed = randombytes(SEED_LEN)
s = self.sample_preimage(hashed, type_in, sigma_new, i_mix_sym, overwrite, seed=seed)
norm_sign = self.calc_norm(s)
# Check the Euclidean norm
if norm_sign <= self.signature_bound:
enc_s = compress(s[1], self.sig_bytelen - HEAD_LEN - SALT_LEN)
# Check that the encoding is valid (sometimes it fails)
if (enc_s is not False):
'''
Restore T_fft
'''
self.sigma = Params[self.n]["sigma"]
self.signature_bound = floor(Params[self.n]["sig_bound"])
self.T_fft = deepcopy(self.orig_T_fft)
normalize_tree(self.T_fft, self.sigma)
end = timer()
#print("Time elapsed for sign (inside falcon.py): ", end-start, "\n")
return header + salt + enc_s
def verify(self, message, signature):
"""
Verify a signature.
"""
# Unpack the salt and the short polynomial s1
salt = signature[HEAD_LEN:HEAD_LEN + SALT_LEN]
enc_s = signature[HEAD_LEN + SALT_LEN:]
s1 = decompress(enc_s, self.sig_bytelen - HEAD_LEN - SALT_LEN, self.n)
# Check that the encoding is valid
if (s1 is False):
print("Invalid encoding")
return False
# Compute s0 and normalize its coefficients in (-q/2, q/2]
hashed = self.hash_to_point(message, salt)
s0 = sub_zq(hashed, mul_zq(s1, self.h))
s0 = [(coef + (q >> 1)) % q - (q >> 1) for coef in s0]
# Check that the (s0, s1) is short
norm_sign = sum(coef ** 2 for coef in s0)
norm_sign += sum(coef ** 2 for coef in s1)
if norm_sign > self.signature_bound:
print("Squared norm of signature is too large:", norm_sign)
return False
# If all checks are passed, accept
return True
| 33.39823
| 124
| 0.519396
|
6cb7648cf150c3f9e94b351f2b9af86fbe790b1e
| 175
|
py
|
Python
|
KenyaDelivers/wsgi.py
|
Sajeyks/Kenya-Delivers-1
|
9ef176603ea8f312e00b7ec061c5d2cc4e7db7e0
|
[
"MIT"
] | null | null | null |
KenyaDelivers/wsgi.py
|
Sajeyks/Kenya-Delivers-1
|
9ef176603ea8f312e00b7ec061c5d2cc4e7db7e0
|
[
"MIT"
] | null | null | null |
KenyaDelivers/wsgi.py
|
Sajeyks/Kenya-Delivers-1
|
9ef176603ea8f312e00b7ec061c5d2cc4e7db7e0
|
[
"MIT"
] | null | null | null |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'KenyaDelivers.settings')
application = get_wsgi_application()
| 19.444444
| 73
| 0.828571
|
55358eef2e9b56e9349dff7fa86ee6773bf84c73
| 4,362
|
py
|
Python
|
nchs_mortality/delphi_nchs_mortality/pull.py
|
jingjtang/covidcast-indicators
|
34cb8786f78fbea2710b810a9500ee02c2379241
|
[
"MIT"
] | 8
|
2020-10-12T04:27:04.000Z
|
2022-03-08T16:56:57.000Z
|
nchs_mortality/delphi_nchs_mortality/pull.py
|
jingjtang/covidcast-indicators
|
34cb8786f78fbea2710b810a9500ee02c2379241
|
[
"MIT"
] | 666
|
2020-09-30T21:18:41.000Z
|
2022-03-31T22:37:12.000Z
|
nchs_mortality/delphi_nchs_mortality/pull.py
|
jingjtang/covidcast-indicators
|
34cb8786f78fbea2710b810a9500ee02c2379241
|
[
"MIT"
] | 13
|
2020-10-01T14:25:06.000Z
|
2022-02-12T08:31:19.000Z
|
# -*- coding: utf-8 -*-
"""Functions for pulling NCHS mortality data API."""
from typing import Optional
import numpy as np
import pandas as pd
from sodapy import Socrata
from delphi_utils.geomap import GeoMapper
from .constants import METRICS, RENAME, NEWLINE
def standardize_columns(df):
"""Rename columns to comply with a standard set.
NCHS has changed column names a few times, so this will help us maintain
backwards-compatibility without the processing code getting all gnarly.
"""
rename_pairs = [(from_col, to_col) for (from_col, to_col) in RENAME
if from_col in df.columns]
return df.rename(columns=dict(rename_pairs))
def pull_nchs_mortality_data(token: str, test_file: Optional[str]=None):
"""Pull the latest NCHS Mortality data, and conforms it into a dataset.
The output dataset has:
- Each row corresponds to (State, Week), denoted (geo_id, timestamp)
- Each row additionally has columns 'covid_deaths', 'total_deaths',
'percent_of_expected_deaths', 'pneumonia_deaths',
'pneumonia_and_covid_deaths', 'influenza_deaths',
'pneumonia_influenza_or_covid_19_deaths' correspond to the aggregate
metric from Feb. 1st until the latest date.
# New York City would be included in New York State
Parameters
----------
token: str
My App Token for pulling the NCHS mortality data
test_file: Optional[str]
When not null, name of file from which to read test data
Returns
-------
pd.DataFrame
Dataframe as described above.
"""
# Constants
keep_columns = METRICS.copy()
type_dict = {key: float for key in keep_columns}
type_dict["timestamp"] = 'datetime64[ns]'
if test_file:
df = pd.read_csv("./test_data/%s"%test_file)
else:
# Pull data from Socrata API
client = Socrata("data.cdc.gov", token)
results = client.get("r8kw-7aab", limit=10**10)
df = pd.DataFrame.from_records(results)
# drop "By Total" rows
df = df[df["group"].transform(str.lower) == "by week"]
df = standardize_columns(df)
if "end_date" in df.columns:
# Check missing week_ending_date == end_date
try:
assert all(df["week_ending_date"] == df["end_date"])
except AssertionError as exc:
raise ValueError(
"week_ending_date is not always the same as end_date, check the raw file"
) from exc
else:
# Check missing start_week == end_week
try:
assert all(df["timestamp"] == df["end_week"])
except AssertionError as exc:
raise ValueError(
"end_week is not always the same as start_week, check the raw file"
) from exc
try:
df = df.astype(type_dict)
except KeyError as exc:
raise ValueError(f"""
Expected column(s) missed, The dataset schema may
have changed. Please investigate and amend the code.
Columns needed:
{NEWLINE.join(type_dict.keys())}
Columns available:
{NEWLINE.join(df.columns)}
""") from exc
# Drop rows for locations outside US
df = df[df["state"] != "United States"]
df = df[keep_columns + ["timestamp", "state"]].set_index("timestamp")
# NCHS considers NYC as an individual state, however, we want it included
# in NY. If values are nan for both NYC and NY, the aggreagtion should
# also have NAN.
df_ny = df.loc[df["state"] == "New York", :].drop("state", axis=1)
df_nyc = df.loc[df["state"] == "New York City", :].drop("state", axis=1)
# Get mask df to ignore cells where both of them have NAN values
mask = (df_ny[keep_columns].isnull().values \
& df_nyc[keep_columns].isnull().values)
df_ny = df_ny.append(df_nyc).groupby("timestamp").sum().where(~mask, np.nan)
df_ny["state"] = "New York"
# Drop NYC and NY in the full dataset
df = df.loc[~df["state"].isin(["New York", "New York City"]), :]
df = df.append(df_ny).reset_index().sort_values(["state", "timestamp"])
# Add population info
keep_columns.extend(["timestamp", "geo_id", "population"])
gmpr = GeoMapper()
df = gmpr.add_population_column(df, "state_name", geocode_col="state")
df = gmpr.add_geocode(df, "state_name", "state_id", from_col="state", new_col="geo_id")
return df[keep_columns]
| 35.754098
| 91
| 0.653599
|
d047c1c84f9453f40b944e51c087ecd45443b00b
| 4,919
|
py
|
Python
|
clip/simple_tokenizer.py
|
mapmeld/CLIP
|
0848ed71e5bb5b4eaaa4048364001877630345dc
|
[
"MIT"
] | null | null | null |
clip/simple_tokenizer.py
|
mapmeld/CLIP
|
0848ed71e5bb5b4eaaa4048364001877630345dc
|
[
"MIT"
] | null | null | null |
clip/simple_tokenizer.py
|
mapmeld/CLIP
|
0848ed71e5bb5b4eaaa4048364001877630345dc
|
[
"MIT"
] | null | null | null |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.vocab = vocab
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def extend(self, tokens):
self.vocab.extend(tokens)
self.encoder = dict(zip(self.vocab, range(len(self.vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
for token in tokens:
self.cache[token] = token
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
| 34.886525
| 144
| 0.560683
|
60bdd1d81837b78b24485fc3d5e07d475013091a
| 217
|
py
|
Python
|
saleor/userprofile/urls.py
|
mscharm/saleor_heroku
|
2b16901978c94d9444eb97526c21a6d6d25d54a1
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/userprofile/urls.py
|
mscharm/saleor_heroku
|
2b16901978c94d9444eb97526c21a6d6d25d54a1
|
[
"BSD-3-Clause"
] | 1
|
2022-02-10T10:40:41.000Z
|
2022-02-10T10:40:41.000Z
|
saleor/userprofile/urls.py
|
mscharm/saleor_heroku
|
2b16901978c94d9444eb97526c21a6d6d25d54a1
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.details, name='details'),
url(r'^address/(?P<pk>\d+)/delete/$',
views.address_delete, name='address-delete'),
]
| 19.727273
| 53
| 0.631336
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.