hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
97d3d479f4d7bb607ee11ef3af9de4bcb2b193c7
| 12,781
|
py
|
Python
|
tests/helpers/test_file.py
|
Centaurioun/PyFunceble
|
59b809f3322118f7824195752c6015220738d4a0
|
[
"Apache-2.0"
] | null | null | null |
tests/helpers/test_file.py
|
Centaurioun/PyFunceble
|
59b809f3322118f7824195752c6015220738d4a0
|
[
"Apache-2.0"
] | null | null | null |
tests/helpers/test_file.py
|
Centaurioun/PyFunceble
|
59b809f3322118f7824195752c6015220738d4a0
|
[
"Apache-2.0"
] | null | null | null |
"""
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Tests of the file helper.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021, 2021 Nissar Chababy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import secrets
import tempfile
import unittest
from PyFunceble.helpers.file import FileHelper
from PyFunceble.utils.platform import PlatformUtility
class TestFileHelper(unittest.TestCase):
"""
Tests of the file helpers.
"""
def test_set_path_return(self) -> None:
"""
Tests the response from the method which let us set the path to work with.
"""
given = tempfile.NamedTemporaryFile()
file_helper = FileHelper()
actual = file_helper.set_path(given.name)
self.assertIsInstance(actual, FileHelper)
def test_set_path(self) -> None:
"""
Tests the method which let us set the path to work with.
"""
given = tempfile.NamedTemporaryFile()
expected = given.name
file_helper = FileHelper()
file_helper.set_path(given.name)
actual = file_helper.path
self.assertEqual(expected, actual)
file_helper = FileHelper(given.name)
actual = file_helper.path
self.assertEqual(expected, actual)
def test_set_path_not_str(self) -> None:
"""
Tests the method which let us set the path to work with for the case
that it's not a string.
"""
given = ["Hello", "World"]
file_helper = FileHelper()
self.assertRaises(TypeError, lambda: file_helper.set_path(given))
def test_join_path(self) -> None:
"""
Tests the method which let us join paths.
"""
given = "/hello/world"
if PlatformUtility.is_windows():
expected = "/hello/world\\hello\\world"
else:
expected = "/hello/world/hello/world"
actual = FileHelper(given).join_path("hello", "world")
self.assertEqual(expected, actual)
def test_exists(self) -> None:
"""
Tests the method which let us check if the given file exists.
"""
file_helper = FileHelper(tempfile.gettempdir())
file_helper.set_path(file_helper.join_path(secrets.token_hex(8)))
expected = False
actual = file_helper.exists()
self.assertEqual(expected, actual)
with open(file_helper.path, "w") as file_stream:
file_stream.write("Hello, World!")
expected = True
actual = file_helper.exists()
self.assertEqual(expected, actual)
os.remove(file_helper.path)
expected = False
actual = file_helper.exists()
self.assertEqual(expected, actual)
def test_get_size(self) -> None:
"""
Tests the method which let us get the size of a file.
"""
file_helper = FileHelper(tempfile.gettempdir())
file_helper.set_path(file_helper.join_path(secrets.token_hex(8)))
expected = False
actual = file_helper.exists()
self.assertEqual(expected, actual)
with open(file_helper.path, "w") as file_stream:
file_stream.write("Hello, World!")
expected = True
actual = file_helper.exists()
self.assertEqual(expected, actual)
expected = 13
actual = file_helper.get_size()
self.assertEqual(expected, actual)
os.remove(file_helper.path)
def test_is_empty(self) -> None:
"""
Tests the method which let us check if a file is empty.
"""
file_helper = FileHelper(tempfile.gettempdir())
file_helper.set_path(file_helper.join_path(secrets.token_hex(8)))
expected = False
actual = file_helper.exists()
self.assertEqual(expected, actual)
with open(file_helper.path, "w") as file_stream:
file_stream.write("")
expected = True
actual = file_helper.is_empty()
self.assertEqual(expected, actual)
with open(file_helper.path, "w") as file_stream:
file_stream.write("Hello, World!")
expected = False
actual = file_helper.is_empty()
self.assertEqual(expected, actual)
os.remove(file_helper.path)
def test_delete(self) -> None:
"""
Tests the method which let us delete a file.
"""
file_helper = FileHelper(tempfile.gettempdir())
file_helper.set_path(file_helper.join_path(secrets.token_hex(8)))
expected = False
actual = file_helper.exists()
self.assertEqual(expected, actual)
with open(file_helper.path, "w") as file_stream:
file_stream.write("")
expected = True
actual = file_helper.exists()
self.assertEqual(expected, actual)
file_helper.delete()
expected = False
actual = file_helper.exists()
self.assertEqual(expected, actual)
def test_write(self) -> None:
"""
Tests the method which let us write a file.
"""
given = tempfile.NamedTemporaryFile(delete=False)
file_helper = FileHelper(given.name)
file_helper.write("Hello, World!")
given.seek(0)
expected = b"Hello, World!"
actual = given.read()
self.assertEqual(expected, actual)
file_helper.write("Hello, this is Funilrys!")
given.seek(0)
expected = b"Hello, World!Hello, this is Funilrys!"
actual = given.read()
self.assertEqual(expected, actual)
file_helper.write("Hello, World!", overwrite=True)
given.seek(0)
expected = b"Hello, World!"
actual = given.read()
self.assertEqual(expected, actual)
def test_read(self) -> None:
"""
Tests the method which let us read a file.
"""
given = tempfile.NamedTemporaryFile(delete=False)
file_helper = FileHelper(given.name)
file_helper.write("Hello, World!")
given.seek(0)
expected = "Hello, World!"
actual = file_helper.read()
self.assertEqual(expected, actual)
def test_read_file_does_not_exists(self) -> None:
"""
Tests the method which let us read a file for the case that the given
file does not exists.
"""
file_helper = FileHelper(tempfile.gettempdir())
file_helper.set_path(file_helper.join_path(secrets.token_hex(8)))
expected = False
actual = file_helper.exists()
self.assertEqual(expected, actual)
expected = None
actual = file_helper.read()
self.assertEqual(expected, actual)
def test_read_bytes(self) -> None:
"""
Tests the method which let us read (bytes) a file.
"""
given = tempfile.NamedTemporaryFile(delete=False)
file_helper = FileHelper(given.name)
file_helper.write("Hello, World!")
given.seek(0)
expected = b"Hello, World!"
actual = file_helper.read_bytes()
self.assertEqual(expected, actual)
def test_read_bytes_file_does_not_exists(self) -> None:
"""
Tests the method which let us read (bytes) a file for the case that
the given file does not exists.
"""
file_helper = FileHelper(tempfile.gettempdir())
file_helper.set_path(file_helper.join_path(secrets.token_hex(8)))
expected = False
actual = file_helper.exists()
self.assertEqual(expected, actual)
expected = None
actual = file_helper.read_bytes()
self.assertEqual(expected, actual)
def test_open(self) -> None:
"""
Tests the method which let us open the given file as we want.
"""
file_helper = FileHelper(tempfile.gettempdir())
file_helper.set_path(file_helper.join_path(secrets.token_hex(8)))
expected = False
actual = file_helper.exists()
self.assertEqual(expected, actual)
with file_helper.open("w") as file_stream:
file_stream.write("Hello, World!")
expected = True
actual = file_helper.exists()
self.assertEqual(expected, actual)
expected = "Hello, World!"
actual = file_helper.read()
self.assertEqual(expected, actual)
def test_copy(self) -> None:
"""
Tests the method which let us copy a file to another place.
"""
file_helper = FileHelper(tempfile.gettempdir())
file_helper.set_path(file_helper.join_path(secrets.token_hex(8)))
copy_file_helper = FileHelper(tempfile.gettempdir())
copy_file_helper.set_path(copy_file_helper.join_path(secrets.token_hex(8)))
expected = False
actual = file_helper.exists()
actual_copy = copy_file_helper.exists()
self.assertEqual(expected, actual)
self.assertEqual(expected, actual_copy)
file_helper.write("Hello, World!")
expected = True
actual = file_helper.exists()
self.assertEqual(expected, actual)
expected = False
actual_copy = copy_file_helper.exists()
self.assertEqual(expected, actual_copy)
file_helper.copy(copy_file_helper.path)
expected = True
actual_copy = copy_file_helper.exists()
self.assertEqual(expected, actual_copy)
expected = "Hello, World!"
actual = copy_file_helper.read()
self.assertEqual(expected, actual)
expected = True
actual = file_helper.exists()
actual_copy = copy_file_helper.exists()
self.assertEqual(expected, actual)
self.assertEqual(expected, actual_copy)
def test_move(self) -> None:
"""
Tests of the method which let us move a file to another location.
"""
file_helper = FileHelper(tempfile.gettempdir())
file_helper.set_path(file_helper.join_path(secrets.token_hex(8)))
destination_file_helper = FileHelper(tempfile.gettempdir())
destination_file_helper.set_path(
destination_file_helper.join_path(secrets.token_hex(8))
)
expected = False
actual = file_helper.exists()
actual_destination = destination_file_helper.exists()
self.assertEqual(expected, actual)
self.assertEqual(expected, actual_destination)
file_helper.write("Hello, World!")
expected = True
actual = file_helper.exists()
self.assertEqual(expected, actual)
expected = False
actual_destination = destination_file_helper.exists()
self.assertEqual(expected, actual_destination)
file_helper.move(destination_file_helper.path)
expected = True
actual_destination = destination_file_helper.exists()
self.assertEqual(expected, actual_destination)
expected = "Hello, World!"
actual = destination_file_helper.read()
self.assertEqual(expected, actual)
expected = False
actual = file_helper.exists()
self.assertEqual(expected, actual)
expected = True
actual_destination = destination_file_helper.exists()
self.assertEqual(expected, actual_destination)
if __name__ == "__main__":
unittest.main()
| 26.627083
| 88
| 0.606838
| 10,905
| 0.804026
| 0
| 0
| 0
| 0
| 0
| 0
| 4,333
| 0.319472
|
97d574a37c2dcf1ccbae57ff4f4d838393dd694f
| 1,938
|
py
|
Python
|
malaya_speech/supervised/unet.py
|
ishine/malaya-speech
|
fd34afc7107af1656dff4b3201fa51dda54fde18
|
[
"MIT"
] | 111
|
2020-08-31T04:58:54.000Z
|
2022-03-29T15:44:18.000Z
|
malaya_speech/supervised/unet.py
|
ishine/malaya-speech
|
fd34afc7107af1656dff4b3201fa51dda54fde18
|
[
"MIT"
] | 14
|
2020-12-16T07:27:22.000Z
|
2022-03-15T17:39:01.000Z
|
malaya_speech/supervised/unet.py
|
ishine/malaya-speech
|
fd34afc7107af1656dff4b3201fa51dda54fde18
|
[
"MIT"
] | 29
|
2021-02-09T08:57:15.000Z
|
2022-03-12T14:09:19.000Z
|
from malaya_speech.utils import (
check_file,
load_graph,
generate_session,
nodes_session,
)
from malaya_speech.model.tf import UNET, UNETSTFT, UNET1D
def load(model, module, quantized=False, **kwargs):
path = check_file(
file=model,
module=module,
keys={'model': 'model.pb'},
quantized=quantized,
**kwargs,
)
g = load_graph(path['model'], **kwargs)
inputs = ['Placeholder']
outputs = ['logits']
input_nodes, output_nodes = nodes_session(g, inputs, outputs)
return UNET(
input_nodes=input_nodes,
output_nodes=output_nodes,
sess=generate_session(graph=g, **kwargs),
model=model,
name=module,
)
def load_stft(model, module, instruments, quantized=False, **kwargs):
path = check_file(
file=model,
module=module,
keys={'model': 'model.pb'},
quantized=quantized,
**kwargs,
)
g = load_graph(path['model'], **kwargs)
inputs = ['Placeholder']
outputs = [f'logits_{i}' for i in range(len(instruments))]
input_nodes, output_nodes = nodes_session(g, inputs, outputs)
return UNETSTFT(
input_nodes=input_nodes,
output_nodes=output_nodes,
instruments=instruments,
sess=generate_session(graph=g, **kwargs),
model=model,
name=module,
)
def load_1d(model, module, quantized=False, **kwargs):
path = check_file(
file=model,
module=module,
keys={'model': 'model.pb'},
quantized=quantized,
**kwargs,
)
g = load_graph(path['model'], **kwargs)
inputs = ['Placeholder']
outputs = ['logits']
input_nodes, output_nodes = nodes_session(g, inputs, outputs)
return UNET1D(
input_nodes=input_nodes,
output_nodes=output_nodes,
sess=generate_session(graph=g, **kwargs),
model=model,
name=module,
)
| 25.168831
| 69
| 0.609391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 140
| 0.072239
|
97d6b1b1207de186f313949afee6fd694df16691
| 4,618
|
py
|
Python
|
scripts/GUI_restart.py
|
zainamir-98/bioradar
|
b826ed869a58778a321153dae3c93f17f40d2f7a
|
[
"MIT"
] | null | null | null |
scripts/GUI_restart.py
|
zainamir-98/bioradar
|
b826ed869a58778a321153dae3c93f17f40d2f7a
|
[
"MIT"
] | null | null | null |
scripts/GUI_restart.py
|
zainamir-98/bioradar
|
b826ed869a58778a321153dae3c93f17f40d2f7a
|
[
"MIT"
] | null | null | null |
# Use this command if numpy import fails: sudo apt-get install python-dev libatlas-base-dev
# If this doesn't work, uninstall both numpy and scipy. Thonny will keep an older default version of numpy.
# Install an older version of scipy that corresponds to the correct version of numpy.
from guizero import App, PushButton, Slider, Text, ButtonGroup, Picture, Box, CheckBox
import sys
import time
import subprocess
import os
DEBUG_MODE = False
#CONT_REALTIME_MONITORING = False
def gui_open_rr_hr():
app.destroy()
#os.system("cmd /c py final.py -u")
process = subprocess.run('python3 scripts/run_rr_hr.py -u', shell=True)
def gui_open_hrv_hr():
app.destroy()
process = subprocess.run('python3 scripts/run_hrv_hr.py -u', shell=True)
def gui_go_to_connect():
print("Connecting...")
start_menu_box.hide()
connect_menu_box.show()
start_footer_box.hide()
other_footer_box.show()
connect_menu_text2.hide()
# Connection function
connect_menu_text.after(1000, gui_check_connection)
def gui_go_to_manual():
start_menu_box.hide()
manual_menu_box.show()
start_footer_box.hide()
other_footer_box.show()
def gui_check_connection():
connect_menu_text.value = "Connected!"
connect_menu_text2.show()
def gui_go_back_to_menu():
connect_menu_box.hide()
manual_menu_box.hide()
if connect_menu_text.value == "Connected!":
connect_menu_text.value = "Connecting to MyVitals..."
start_menu_box.show()
other_footer_box.hide()
start_footer_box.show()
app = App(title="BioRadar (Prototype)", width=480, height=320, bg="#141414")
if not DEBUG_MODE:
app.full_screen = True
start_menu_box = Box(app, width="fill")
pad_1 = Box(start_menu_box, width="fill", height=20)
box_1 = Box(start_menu_box, width="fill")
pad_1_2 = Box(box_1, width=140, height=1, align="left")
picture = Picture(box_1, image="images/brlogo.png", width=51, height=40, align="left") # W:H = 1.277
pad_1_2 = Box(box_1, width=10, height=1, align="left")
message = Text(box_1, text="BioRadar", color="#FFFFFF", size=20, align="left")
pad_2 = Box(start_menu_box, width="fill", height=40)
message = Text(start_menu_box, text="Select how you want to monitor your vitals.", color="#FFFFFF", size=15)
pad_3 = Box(start_menu_box, width="fill", height=18)
button1 = PushButton(start_menu_box, text="Online mode", command=gui_go_to_connect)
button1.bg = "#6ED3A9"
pad_4 = Box(start_menu_box, width="fill", height=10)
button2 = PushButton(start_menu_box, text="Manual mode", command=gui_go_to_manual)
button2.bg = "#6ED3A9"
start_menu_box.hide()
connect_menu_box = Box(app, width="fill")
pad_1 = Box(connect_menu_box, width="fill", height=100)
connect_menu_text = Text(connect_menu_box, text="Connecting to MyVitals...", color="#FFFFFF", size=20)
pad_2 = Box(connect_menu_box, width="fill", height=30)
connect_menu_text2 = Text(connect_menu_box, text="Waiting for online commands...", color="#FFFFFF", size=16)
connect_menu_box.hide()
# Manual mode
manual_menu_box = Box(app, width="fill")
pad = Box(manual_menu_box, width="fill", height=20)
manual_menu_text = Text(manual_menu_box, text="Manual Mode", color="#FFFFFF", size=20)
pad = Box(manual_menu_box, width="fill", height=50)
button_box = Box(manual_menu_box, width=460, height=90)
button1 = PushButton(button_box, text="Respiration Rate\nHeart Rate", command=gui_open_rr_hr, align="left")
pad = Box(button_box, width=10, height=90, align="left")
button2 = PushButton(button_box, text="Heart Rate Variability\nHeart Rate*", command=gui_open_hrv_hr, align="right")
button1.text_size = 16
button2.text_size = 16
button1.bg = "#6ED3A9"
button2.bg = "#6ED3A9"
pad = Box(manual_menu_box, width="fill", height=30)
pad = Box(manual_menu_box, width="fill", height=6)
txt = Text(manual_menu_box, text="* You will need to hold your breath for 10 seconds for\nheart rate variability measurements.", color="#C8C8C8", size=11)
# Footers
start_footer_box = Box(app, width="fill", align="bottom")
fyp_text = Text(start_footer_box, text=" © 2021 Final-Year Project, SEECS, NUST", color="#C8C8C8", size=11, align="left")
exit_button = PushButton(start_footer_box, text="Exit", align="right", command=exit)
exit_button.bg = "#6ED3A9"
start_footer_box.hide()
other_footer_box = Box(app, width="fill", align="bottom")
exit_button = PushButton(other_footer_box, text="Exit", align="right", command=exit)
exit_button.bg = "#6ED3A9"
back_button = PushButton(other_footer_box, text="Back", align="right", command=gui_go_back_to_menu)
back_button.bg = "#6ED3A9"
app.display()
| 39.470085
| 154
| 0.731919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,274
| 0.275817
|
97da085bfcfa86877a3a5eae743b983ac785a5f4
| 1,182
|
py
|
Python
|
pyFileFixity/lib/distance/distance/_lcsubstrings.py
|
hadi-f90/pyFileFixity
|
2cb3dd6225a6b062a98fa2d61c4a0a29d8010428
|
[
"MIT"
] | null | null | null |
pyFileFixity/lib/distance/distance/_lcsubstrings.py
|
hadi-f90/pyFileFixity
|
2cb3dd6225a6b062a98fa2d61c4a0a29d8010428
|
[
"MIT"
] | 1
|
2022-01-19T13:46:55.000Z
|
2022-01-19T13:46:55.000Z
|
pyFileFixity/lib/distance/distance/_lcsubstrings.py
|
hadi-f90/pyFileFixity
|
2cb3dd6225a6b062a98fa2d61c4a0a29d8010428
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from array import array
def lcsubstrings(seq1, seq2, positions=False):
"""Find the longest common substring(s) in the sequences `seq1` and `seq2`.
If positions evaluates to `True` only their positions will be returned,
together with their length, in a tuple:
(length, [(start pos in seq1, start pos in seq2)..])
Otherwise, the substrings themselves will be returned, in a set.
Example:
>>> lcsubstrings("sedentar", "dentist")
{'dent'}
>>> lcsubstrings("sedentar", "dentist", positions=True)
(4, [(2, 0)])
"""
L1, L2 = len(seq1), len(seq2)
ms = []
mlen = last = 0
if L1 < L2:
seq1, seq2 = seq2, seq1
L1, L2 = L2, L1
column = array('L', range(L2))
for i in range(L1):
for j in range(L2):
old = column[j]
if seq1[i] == seq2[j]:
if i == 0 or j == 0:
column[j] = 1
else:
column[j] = last + 1
if column[j] > mlen:
mlen = column[j]
ms = [(i, j)]
elif column[j] == mlen:
ms.append((i, j))
else:
column[j] = 0
last = old
if positions:
return (mlen, tuple((i - mlen + 1, j - mlen + 1) for i, j in ms if ms))
return {seq1[i - mlen + 1:i + 1] for i, _ in ms if ms}
| 22.730769
| 76
| 0.583756
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 488
| 0.41286
|
97db509debe2b8503920910c68f09fde1efdca62
| 6,072
|
py
|
Python
|
colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.models.rgb.transfer_functions.\
panasonic_vlog` module.
"""
import numpy as np
import unittest
from colour.models.rgb.transfer_functions import (
log_encoding_VLog,
log_decoding_VLog,
)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'TestLogEncoding_VLog',
'TestLogDecoding_VLog',
]
class TestLogEncoding_VLog(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition unit tests methods.
"""
def test_log_encoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition.
"""
self.assertAlmostEqual(log_encoding_VLog(0.0), 0.125, places=7)
self.assertAlmostEqual(
log_encoding_VLog(0.18), 0.423311448760136, places=7)
self.assertAlmostEqual(
log_encoding_VLog(0.18, 12), 0.423311448760136, places=7)
self.assertAlmostEqual(
log_encoding_VLog(0.18, 10, False), 0.421287228403675, places=7)
self.assertAlmostEqual(
log_encoding_VLog(0.18, 10, False, False),
0.409009628526078,
places=7)
self.assertAlmostEqual(
log_encoding_VLog(1.0), 0.599117700158146, places=7)
def test_n_dimensional_log_encoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition n-dimensional arrays support.
"""
L_in = 0.18
V_out = log_encoding_VLog(L_in)
L_in = np.tile(L_in, 6)
V_out = np.tile(V_out, 6)
np.testing.assert_almost_equal(
log_encoding_VLog(L_in), V_out, decimal=7)
L_in = np.reshape(L_in, (2, 3))
V_out = np.reshape(V_out, (2, 3))
np.testing.assert_almost_equal(
log_encoding_VLog(L_in), V_out, decimal=7)
L_in = np.reshape(L_in, (2, 3, 1))
V_out = np.reshape(V_out, (2, 3, 1))
np.testing.assert_almost_equal(
log_encoding_VLog(L_in), V_out, decimal=7)
def test_domain_range_scale_log_encoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition domain and range scale support.
"""
L_in = 0.18
V_out = log_encoding_VLog(L_in)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
log_encoding_VLog(L_in * factor),
V_out * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_log_encoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition nan support.
"""
log_encoding_VLog(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLogDecoding_VLog(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition unit tests methods.
"""
def test_log_decoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition.
"""
self.assertAlmostEqual(log_decoding_VLog(0.125), 0.0, places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.423311448760136), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.423311448760136, 12), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.421287228403675, 10, False), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.409009628526078, 10, False, False),
0.18,
places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.599117700158146), 1.0, places=7)
def test_n_dimensional_log_decoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition n-dimensional arrays support.
"""
V_out = 0.423311448760136
L_in = log_decoding_VLog(V_out)
V_out = np.tile(V_out, 6)
L_in = np.tile(L_in, 6)
np.testing.assert_almost_equal(
log_decoding_VLog(V_out), L_in, decimal=7)
V_out = np.reshape(V_out, (2, 3))
L_in = np.reshape(L_in, (2, 3))
np.testing.assert_almost_equal(
log_decoding_VLog(V_out), L_in, decimal=7)
V_out = np.reshape(V_out, (2, 3, 1))
L_in = np.reshape(L_in, (2, 3, 1))
np.testing.assert_almost_equal(
log_decoding_VLog(V_out), L_in, decimal=7)
def test_domain_range_scale_log_decoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition domain and range scale support.
"""
V_out = 0.423311448760136
L_in = log_decoding_VLog(V_out)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
log_decoding_VLog(V_out * factor),
L_in * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_log_decoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition nan support.
"""
log_decoding_VLog(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
if __name__ == '__main__':
unittest.main()
| 31.138462
| 78
| 0.640316
| 5,320
| 0.876153
| 0
| 0
| 568
| 0.093544
| 0
| 0
| 1,783
| 0.293643
|
97db587e34c2af72ba15568d5a03261d228ebb29
| 3,546
|
py
|
Python
|
test/IECoreRI/All.py
|
gcodebackups/cortex-vfx
|
72fa6c6eb3327fce4faf01361c8fcc2e1e892672
|
[
"BSD-3-Clause"
] | 5
|
2016-07-26T06:09:28.000Z
|
2022-03-07T03:58:51.000Z
|
test/IECoreRI/All.py
|
turbosun/cortex
|
4bdc01a692652cd562f3bfa85f3dae99d07c0b15
|
[
"BSD-3-Clause"
] | null | null | null |
test/IECoreRI/All.py
|
turbosun/cortex
|
4bdc01a692652cd562f3bfa85f3dae99d07c0b15
|
[
"BSD-3-Clause"
] | 3
|
2015-03-25T18:45:24.000Z
|
2020-02-15T15:37:18.000Z
|
##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import sys
import unittest
import IECore
import IECoreRI
from SLOReader import *
from Renderer import *
from Instancing import *
from PTCParticleReader import *
from PTCParticleWriter import *
from ArchiveRecord import *
from DoubleSided import *
from Orientation import *
from MultipleContextsTest import *
from Camera import *
from CurvesTest import *
from TextureOrientationTest import *
from ArrayPrimVarTest import *
from CoordinateSystemTest import *
from IlluminateTest import *
from SubsurfaceTest import *
from PatchMeshTest import *
from RIBWriterTest import *
from ParameterisedProcedural import *
from MotionTest import MotionTest
from PythonProceduralTest import PythonProceduralTest
from DetailTest import DetailTest
from ProceduralThreadingTest import ProceduralThreadingTest
from StringArrayParameterTest import StringArrayParameterTest
from CoshaderTest import CoshaderTest
from GroupTest import GroupTest
from DspyTest import DspyTest
from RerenderingTest import RerenderingTest
if hasattr( IECoreRI, "SXRenderer" ) :
from SXRendererTest import SXRendererTest
if hasattr( IECoreRI, "GXEvaluator" ) :
from GXEvaluatorTest import GXEvaluatorTest
if hasattr( IECoreRI, "DTEXDeepImageReader" ) :
from DTEXDeepImageReaderTest import TestDTEXDeepImageReader
from DTEXDeepImageWriterTest import TestDTEXDeepImageWriter
if hasattr( IECoreRI, "SHWDeepImageReader" ) :
from SHWDeepImageReaderTest import TestSHWDeepImageReader
from SHWDeepImageWriterTest import TestSHWDeepImageWriter
if IECore.withFreeType() :
from TextTest import *
unittest.TestProgram(
testRunner = unittest.TextTestRunner(
stream = IECore.CompoundStream(
[
sys.stderr,
open( "test/IECoreRI/resultsPython.txt", "w" )
]
),
verbosity = 2
)
)
| 36.183673
| 76
| 0.758037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,857
| 0.523689
|
97dd0689130d6bd5ed6a18fd645d0dcff177ddd3
| 2,164
|
py
|
Python
|
molecool/tests/test_measure.py
|
pavankum/molecool
|
0aa4fe5423aa91cb59fb603e3293d89741cb87a6
|
[
"MIT"
] | null | null | null |
molecool/tests/test_measure.py
|
pavankum/molecool
|
0aa4fe5423aa91cb59fb603e3293d89741cb87a6
|
[
"MIT"
] | null | null | null |
molecool/tests/test_measure.py
|
pavankum/molecool
|
0aa4fe5423aa91cb59fb603e3293d89741cb87a6
|
[
"MIT"
] | null | null | null |
"""
Unit tests for measure
"""
# Import package, test suite, and other packages as needed
import numpy as np
import molecool
import pytest
def test_calculate_distance():
"""Sample test to check calculate_distance is working """
r1 = np.array([1, 0, 0])
r2 = np.array([3, 0, 0])
expected_distance = 2
calculated_distance = molecool.calculate_distance(r1, r2)
assert calculated_distance == expected_distance
def test_calculate_distance_typeerror():
r1 = [1, 0, 0]
r2 = [2, 0, 0]
with pytest.raises(TypeError):
calculated_distance = molecool.calculate_distance(r1, r2)
def test_calculate_angle():
"""Sample test to check calculate_anlge is working"""
r1 = np.array([1, 0, 0])
r2 = np.array([0, 0, 0])
r3 = np.array([0, 1, 0])
expected_angle = 90
calculated_angle = molecool.calculate_angle(r1, r2, r3, degrees=True)
assert calculated_angle == expected_angle
@pytest.mark.parametrize("p1, p2, p3, expected_angle", [
(np.array([np.sqrt(2)/2, np.sqrt(2)/2, 0]), np.array([0, 0, 0]), np.array([1, 0 , 0]), 45),
(np.array([0, 0, -1]), np.array([0, 1, 0]), np.array([1, 0, 0]), 60),
])
def test_calculate_angle_many(p1, p2, p3, expected_angle):
calculated_angle = molecool.calculate_angle(p1, p2, p3, degrees=True)
assert pytest.approx(calculated_angle) == expected_angle
def test_molecular_mass():
symbols = ['C', 'H', 'H', 'H', 'H']
calculated_mass = molecool.calculate_molecular_mass(symbols)
actual_mass = molecool.atom_data.atomic_weights['C'] + molecool.atom_data.atomic_weights['H'] +\
molecool.atom_data.atomic_weights['H'] + molecool.atom_data.atomic_weights['H'] + molecool.atom_data.atomic_weights['H']
assert actual_mass == calculated_mass
def test_center_of_mass():
symbols = np.array(['C', 'H', 'H', 'H', 'H'])
coordinates = np.array([[1,1,1], [2.4,1,1], [-0.4, 1, 1], [1, 1, 2.4], [1, 1, -0.4]])
center_of_mass = molecool.calculate_center_of_mass(symbols, coordinates)
expected_center = np.array([1,1,1])
assert np.allclose(center_of_mass, expected_center)
| 30.914286
| 129
| 0.652033
| 0
| 0
| 0
| 0
| 428
| 0.197782
| 0
| 0
| 271
| 0.125231
|
97dd106f5157a62375f9741a6b7c0edb0c3a8dee
| 1,240
|
py
|
Python
|
tests/test_util_matrix.py
|
PeerHerholz/pyrsa
|
994007086c59de93d86b982f1fff73fe6a8ea929
|
[
"MIT"
] | 4
|
2015-08-10T18:34:21.000Z
|
2018-05-15T20:43:15.000Z
|
tests/test_util_matrix.py
|
PeerHerholz/pyrsa
|
994007086c59de93d86b982f1fff73fe6a8ea929
|
[
"MIT"
] | null | null | null |
tests/test_util_matrix.py
|
PeerHerholz/pyrsa
|
994007086c59de93d86b982f1fff73fe6a8ea929
|
[
"MIT"
] | 2
|
2018-03-26T03:02:07.000Z
|
2021-11-10T21:09:48.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
test_util_matrix
@author: jdiedrichsen
"""
import unittest
import pyrsa.util as rsu
import numpy as np
class TestIndicator(unittest.TestCase):
def test_indicator(self):
a = np.array(range(0, 5))
a = np.concatenate((a, a))
X = rsu.matrix.indicator(a)
n_row, n_col = X.shape
self.assertEqual(n_row, 10)
self.assertEqual(n_col, 5)
self.assertEqual(X[0, 0], 1.0)
def test_indicator_pos(self):
a = np.array(range(0, 5))
a = np.concatenate((a, a))
X = rsu.matrix.indicator(a, positive=True)
n_row, n_col = X.shape
self.assertEqual(n_row, 10)
self.assertEqual(n_col, 4)
self.assertEqual(X[0, 0], 0.0)
def test_pairwise(self):
a = np.array(range(0, 5))
X = rsu.matrix.pairwise_contrast(a)
n_row, n_col = X.shape
self.assertEqual(n_row, 10)
self.assertEqual(n_col, 5)
self.assertEqual(X[0, 0], 1.0)
def test_centering(self):
X = rsu.matrix.centering(10)
n_row, n_col = X.shape
self.assertEqual(n_row, 10)
self.assertEqual(n_col, 10)
if __name__ == '__main__':
unittest.main()
| 24.313725
| 50
| 0.592742
| 1,032
| 0.832258
| 0
| 0
| 0
| 0
| 0
| 0
| 102
| 0.082258
|
97de7958e0a043ea00870086f0a3a9e86192755c
| 6,999
|
py
|
Python
|
custom_components/smartthinq_washer/wideq/washer.py
|
Golab/ha-smartthinq-washer
|
92e4589a9be143f9b167853e2b5a1607631c1c42
|
[
"Apache-2.0"
] | 1
|
2020-04-13T14:09:28.000Z
|
2020-04-13T14:09:28.000Z
|
custom_components/smartthinq_washer/wideq/washer.py
|
Golab/ha-smartthinq-washer
|
92e4589a9be143f9b167853e2b5a1607631c1c42
|
[
"Apache-2.0"
] | null | null | null |
custom_components/smartthinq_washer/wideq/washer.py
|
Golab/ha-smartthinq-washer
|
92e4589a9be143f9b167853e2b5a1607631c1c42
|
[
"Apache-2.0"
] | null | null | null |
"""------------------for Washer"""
import datetime
import enum
import time
import logging
from typing import Optional
from .device import (
Device,
DeviceStatus,
STATE_UNKNOWN,
STATE_OPTIONITEM_ON,
STATE_OPTIONITEM_OFF,
)
from .washer_states import (
STATE_WASHER,
STATE_WASHER_ERROR,
WASHERSTATES,
WASHERWATERTEMPS,
WASHERSPINSPEEDS,
WASHREFERRORS,
WASHERERRORS,
)
_LOGGER = logging.getLogger(__name__)
class WasherDevice(Device):
"""A higher-level interface for a washer."""
def poll(self) -> Optional["WasherStatus"]:
"""Poll the device's current state."""
res = self.device_poll("washerDryer")
if not res:
return None
return WasherStatus(self, res)
class WasherStatus(DeviceStatus):
"""Higher-level information about a washer's current status.
:param device: The Device instance.
:param data: JSON data from the API.
"""
def __init__(self, device, data):
super().__init__(device, data)
self._run_state = None
self._pre_state = None
self._error = None
def _get_run_state(self):
if not self._run_state:
state = self.lookup_enum(["State", "state"])
self._run_state = self._set_unknown(
state=WASHERSTATES.get(state, None), key=state, type="status"
)
return self._run_state
def _get_pre_state(self):
if not self._pre_state:
state = self.lookup_enum(["PreState", "preState"])
self._pre_state = self._set_unknown(
state=WASHERSTATES.get(state, None), key=state, type="status"
)
return self._pre_state
def _get_error(self):
if not self._error:
error = self.lookup_reference(["Error", "error"])
self._error = self._set_unknown(
state=WASHREFERRORS.get(error, None), key=error, type="error_status"
)
return self._error
@property
def is_on(self):
run_state = self._get_run_state()
return run_state != STATE_WASHER.POWER_OFF
@property
def is_wash_completed(self):
run_state = self._get_run_state()
pre_state = self._get_pre_state()
if run_state == STATE_WASHER.END or (
run_state == STATE_WASHER.POWER_OFF and pre_state == STATE_WASHER.END
):
return True
return False
@property
def is_error(self):
error = self._get_error()
if error != STATE_WASHER_ERROR.NO_ERROR and error != STATE_WASHER_ERROR.OFF:
return True
return False
@property
def run_state(self):
run_state = self._get_run_state()
return run_state.value
@property
def pre_state(self):
pre_state = self._get_pre_state()
return pre_state.value
@property
def error_state(self):
error = self._get_error()
return error.value
# error = self.lookup_reference('Error')
# if error == '-':
# return 'OFF'
# elif error == 'No Error':
# return 'NO_ERROR'
# else:
# return WASHERERROR(error)
@property
def spin_option_state(self):
spinspeed = self.lookup_enum(["SpinSpeed", "spin"])
if spinspeed == "-":
return "OFF"
return self._set_unknown(
state=WASHERSPINSPEEDS.get(spinspeed, None),
key=spinspeed,
type="spin_option",
).value
@property
def water_temp_option_state(self):
water_temp = self.lookup_enum(["WTemp", "WaterTemp", "temp"])
if water_temp == "-":
return "OFF"
return self._set_unknown(
state=WASHERWATERTEMPS.get(water_temp, None),
key=water_temp,
type="water_temp",
).value
@property
def current_course(self):
course = self.lookup_reference(
["APCourse", "Course", "courseFL24inchBaseTitan"]
)
if course == "-":
return "OFF"
return course
@property
def current_smartcourse(self):
smartcourse = self.lookup_reference(
["SmartCourse", "smartCourseFL24inchBaseTitan"]
)
if smartcourse == "-":
return "OFF"
else:
return smartcourse
@property
def remaintime_hour(self):
if self.is_api_v2:
return str(int(self._data.get("remainTimeHour")))
return self._data.get("Remain_Time_H")
@property
def remaintime_min(self):
if self.is_api_v2:
return str(int(self._data.get("remainTimeMinute")))
return self._data.get("Remain_Time_M")
@property
def initialtime_hour(self):
if self.is_api_v2:
return str(int(self._data.get("initialTimeHour")))
return self._data.get("Initial_Time_H")
@property
def initialtime_min(self):
if self.is_api_v2:
return str(int(self._data.get("initialTimeMinute")))
return self._data.get("Initial_Time_M")
@property
def reservetime_hour(self):
if self.is_api_v2:
return str(int(self._data.get("reserveTimeHour")))
return self._data.get("Reserve_Time_H")
@property
def reservetime_min(self):
if self.is_api_v2:
return str(int(self._data.get("reserveTimeMinute")))
return self._data.get("Reserve_Time_M")
@property
def creasecare_state(self):
if self.is_api_v2:
return self.lookup_bit_v2("creaseCare")
return self.lookup_bit("Option1", 1)
@property
def childlock_state(self):
if self.is_api_v2:
return self.lookup_bit_v2("childLock")
return self.lookup_bit("Option2", 7)
@property
def steam_state(self):
if self.is_api_v2:
return self.lookup_bit_v2("steam")
return self.lookup_bit("Option1", 7)
@property
def steam_softener_state(self):
if self.is_api_v2:
return self.lookup_bit_v2("steamSoftener")
return self.lookup_bit("Option1", 2)
@property
def doorlock_state(self):
if self.is_api_v2:
return self.lookup_bit_v2("doorLock")
return self.lookup_bit("Option2", 6)
@property
def prewash_state(self):
if self.is_api_v2:
return self.lookup_bit_v2("preWash")
return self.lookup_bit("Option1", 6)
@property
def remotestart_state(self):
if self.is_api_v2:
return self.lookup_bit_v2("remoteStart")
return self.lookup_bit("Option2", 1)
@property
def turbowash_state(self):
if self.is_api_v2:
return self.lookup_bit_v2("turboWash")
return self.lookup_bit("Option1", 0)
@property
def tubclean_count(self):
if self.is_api_v2:
return str(int(self._data.get("TCLCount", -1)))
return self._data.get("TCLCount")
| 27.555118
| 84
| 0.603658
| 6,538
| 0.934133
| 0
| 0
| 4,631
| 0.661666
| 0
| 0
| 1,120
| 0.160023
|
97df4a022eaff541facbf55fa41d937b36722e9a
| 375
|
py
|
Python
|
year2020/day17/reader.py
|
Sebaestschjin/advent-of-code
|
5fd708efa355483fc0ccddf7548b62682662bcc8
|
[
"MIT"
] | null | null | null |
year2020/day17/reader.py
|
Sebaestschjin/advent-of-code
|
5fd708efa355483fc0ccddf7548b62682662bcc8
|
[
"MIT"
] | null | null | null |
year2020/day17/reader.py
|
Sebaestschjin/advent-of-code
|
5fd708efa355483fc0ccddf7548b62682662bcc8
|
[
"MIT"
] | null | null | null |
from pathlib import Path
def read(filename='in'):
file_path = Path(__file__).parent / filename
with file_path.open('r') as file:
return read_lines(file.readlines())
def read_lines(lines):
cells = {}
for y in range(len(lines)):
line = lines[y].strip()
for x in range(len(line)):
cells[(x, y)] = line[x]
return cells
| 22.058824
| 48
| 0.597333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.018667
|
97e1339259b947d5c260266bb5a742c74a8323da
| 4,644
|
py
|
Python
|
squad/base/argument_parser.py
|
uwnlp/piqa
|
e18f2189c93965c94655d5cc943dcecdc2c1ea57
|
[
"Apache-2.0"
] | 89
|
2018-08-25T07:59:07.000Z
|
2021-05-04T06:37:27.000Z
|
squad/base/argument_parser.py
|
seominjoon/piqa
|
e18f2189c93965c94655d5cc943dcecdc2c1ea57
|
[
"Apache-2.0"
] | 11
|
2018-09-28T17:33:27.000Z
|
2019-11-27T23:34:45.000Z
|
squad/base/argument_parser.py
|
uwnlp/piqa
|
e18f2189c93965c94655d5cc943dcecdc2c1ea57
|
[
"Apache-2.0"
] | 10
|
2018-09-19T06:48:06.000Z
|
2020-04-14T20:42:06.000Z
|
import argparse
import os
class ArgumentParser(argparse.ArgumentParser):
def __init__(self, description='base', **kwargs):
super(ArgumentParser, self).__init__(description=description)
def add_arguments(self):
home = os.path.expanduser('~')
self.add_argument('model', type=str)
self.add_argument('--mode', type=str, default='train')
self.add_argument('--iteration', type=str, default='0')
self.add_argument('--pause', type=int, default=0) # ignore this argument.
# Data (input) paths
self.add_argument('--train_path', type=str, default=os.path.join(home, 'data', 'squad', 'train-v1.1.json'),
help='location of the training data')
self.add_argument('--test_path', type=str, default=os.path.join(home, 'data', 'squad', 'dev-v1.1.json'),
help='location of the test data')
# Output paths
self.add_argument('--output_dir', type=str, default='/tmp/piqa/squad/', help='Output directory')
self.add_argument('--save_dir', type=str, default=None, help='location for saving the model')
self.add_argument('--load_dir', type=str, default=None, help='location for loading the model')
self.add_argument('--dump_dir', type=str, default=None, help='location for dumping outputs')
self.add_argument('--report_path', type=str, default=None, help='location for report')
self.add_argument('--pred_path', type=str, default=None, help='location for prediction json file during `test`')
self.add_argument('--cache_path', type=str, default=None)
self.add_argument('--question_emb_dir', type=str, default=None)
self.add_argument('--context_emb_dir', type=str, default=None)
# Training arguments
self.add_argument('--epochs', type=int, default=20)
self.add_argument('--train_steps', type=int, default=0)
self.add_argument('--eval_steps', type=int, default=1000)
self.add_argument('--eval_save_period', type=int, default=500)
self.add_argument('--report_period', type=int, default=100)
# Similarity search (faiss, pysparnn) arguments
self.add_argument('--metric', type=str, default='ip', help='ip|l2')
self.add_argument('--nlist', type=int, default=1)
self.add_argument('--nprobe', type=int, default=1)
self.add_argument('--bpv', type=int, default=None, help='bytes per vector (e.g. 8)')
self.add_argument('--num_train_mats', type=int, default=100)
# Demo arguments
self.add_argument('--port', type=int, default=8080)
# Other arguments
self.add_argument('--draft', default=False, action='store_true')
self.add_argument('--cuda', default=False, action='store_true')
self.add_argument('--preload', default=False, action='store_true')
self.add_argument('--cache', default=False, action='store_true')
self.add_argument('--archive', default=False, action='store_true')
self.add_argument('--dump_period', type=int, default=20)
self.add_argument('--emb_type', type=str, default='dense', help='dense|sparse')
self.add_argument('--metadata', default=False, action='store_true')
self.add_argument('--mem_info', default=False, action='store_true')
def parse_args(self, **kwargs):
args = super().parse_args()
if args.draft:
args.batch_size = 2
args.eval_steps = 1
args.eval_save_period = 2
args.train_steps = 2
if args.save_dir is None:
args.save_dir = os.path.join(args.output_dir, 'save')
if args.load_dir is None:
args.load_dir = os.path.join(args.output_dir, 'save')
if args.dump_dir is None:
args.dump_dir = os.path.join(args.output_dir, 'dump')
if args.question_emb_dir is None:
args.question_emb_dir = os.path.join(args.output_dir, 'question_emb')
if args.context_emb_dir is None:
args.context_emb_dir = os.path.join(args.output_dir, 'context_emb')
if args.report_path is None:
args.report_path = os.path.join(args.output_dir, 'report.csv')
if args.pred_path is None:
args.pred_path = os.path.join(args.output_dir, 'pred.json')
if args.cache_path is None:
args.cache_path = os.path.join(args.output_dir, 'cache.b')
args.load_dir = os.path.abspath(args.load_dir)
args.context_emb_dir = os.path.abspath(args.context_emb_dir)
args.question_emb_dir = os.path.abspath(args.question_emb_dir)
return args
| 50.478261
| 120
| 0.644703
| 4,615
| 0.993755
| 0
| 0
| 0
| 0
| 0
| 0
| 1,143
| 0.246124
|
97e32ebe567c88c97e005c959868e8ed6406d1eb
| 2,210
|
py
|
Python
|
getml/loss_functions.py
|
srnnkls/getml-python-api
|
032b2fec19a0e0a519eab480ee61e0d422d63993
|
[
"MIT"
] | null | null | null |
getml/loss_functions.py
|
srnnkls/getml-python-api
|
032b2fec19a0e0a519eab480ee61e0d422d63993
|
[
"MIT"
] | null | null | null |
getml/loss_functions.py
|
srnnkls/getml-python-api
|
032b2fec19a0e0a519eab480ee61e0d422d63993
|
[
"MIT"
] | null | null | null |
# Copyright 2019 The SQLNet Company GmbH
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
This module contains the loss functions for the getml library.
"""
# ------------------------------------------------------------------------------
class _LossFunction(object):
"""
Base class. Should not ever be directly initialized!
"""
def __init__(self):
self.thisptr = dict()
self.thisptr["type_"] = "none"
# ------------------------------------------------------------------------------
class CrossEntropyLoss(_LossFunction):
"""
Cross entropy function.
Recommended loss function for classification problems.
"""
def __init__(self):
super(CrossEntropyLoss, self).__init__()
self.thisptr["type_"] = "CrossEntropyLoss"
# ------------------------------------------------------------------------------
class SquareLoss(_LossFunction):
"""
Square loss function.
Recommended loss function for regression problems.
"""
def __init__(self):
super(SquareLoss, self).__init__()
self.thisptr["type_"] = "SquareLoss"
# ------------------------------------------------------------------------------
| 32.985075
| 80
| 0.619005
| 705
| 0.319005
| 0
| 0
| 0
| 0
| 0
| 0
| 1,783
| 0.806787
|
97e4ff9556a184829362cc46861ffd16d6689ddb
| 870
|
py
|
Python
|
transit/helpers.py
|
moredatarequired/python-stitch-client
|
222ba24e34614d3acecab41cd78a5c78ab8ea782
|
[
"Apache-2.0"
] | 71
|
2015-01-03T07:55:33.000Z
|
2021-10-30T16:52:09.000Z
|
transit/helpers.py
|
moredatarequired/python-stitch-client
|
222ba24e34614d3acecab41cd78a5c78ab8ea782
|
[
"Apache-2.0"
] | 27
|
2015-01-02T06:10:25.000Z
|
2022-02-20T21:54:13.000Z
|
transit/helpers.py
|
moredatarequired/python-stitch-client
|
222ba24e34614d3acecab41cd78a5c78ab8ea782
|
[
"Apache-2.0"
] | 20
|
2015-01-05T04:07:52.000Z
|
2022-02-20T19:08:15.000Z
|
## Copyright 2014 Cognitect. All Rights Reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS-IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import itertools
from transit.pyversion import imap, izip
def mapcat(f, i):
return itertools.chain.from_iterable(imap(f, i))
def pairs(i):
return izip(*[iter(i)] * 2)
cycle = itertools.cycle
def take(n, i):
return itertools.islice(i, 0, n)
| 27.1875
| 75
| 0.725287
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 596
| 0.685057
|
97e737d9c2d51a5e35ef3bbd28e5bc15aadb06de
| 1,779
|
py
|
Python
|
part4/matplotlib/seoul_to_cn_gb_kw.py
|
tls1403/PythonTest
|
069f23b25ec655aa199d13aef9c14d2e33366861
|
[
"MIT"
] | null | null | null |
part4/matplotlib/seoul_to_cn_gb_kw.py
|
tls1403/PythonTest
|
069f23b25ec655aa199d13aef9c14d2e33366861
|
[
"MIT"
] | null | null | null |
part4/matplotlib/seoul_to_cn_gb_kw.py
|
tls1403/PythonTest
|
069f23b25ec655aa199d13aef9c14d2e33366861
|
[
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
#한글 폰트 오류 제거
from matplotlib import font_manager,rc
font_path ="D:/5674-833_4th/part4/malgun.ttf"
font_name = font_manager.FontProperties(fname=font_path).get_name()
rc('font',family = font_name)
df = pd.read_excel('D:/5674-833_4th/part4/시도별 전출입 인구수.xlsx',engine = 'openpyxl',header =0)
df = df.fillna(method='ffill') #누락값을 앞 데이터로 채움
#서울에서 다른 지역으로 이동한 데이터만 추출하여 정리
mask = (df['전출지별'] == '서울특별시') & (df['전입지별'] != '서울특별시')
df_seoul = df[mask]
df_seoul = df_seoul.drop(['전출지별'],axis= 1) #전출지별 column 삭제
df_seoul.rename({'전입지별':'전입지'},axis=1,inplace=True) #전입지별 column을 전입지로 바꿔줌
df_seoul.set_index('전입지',inplace = True)
print(df_seoul)
#서울에서 충청남도 , 경상북도 ,강원도 로 이동한 인구 데이터 값 선택
col_years = list(map(str,range(1970,2018)))
df_3 = df_seoul.loc[['충청남도','경상북도','강원도'],col_years]
#스타일 지정
plt.style.use('ggplot')
#그래프 객체 생성(figure에 1개의 서브 플롯생성)
fig = plt.figure(figsize=(20,5))
ax =fig.add_subplot(1,1,1)
#axe 객체에 plot 함수로 그래프 출력
ax.plot(col_years,df_3.loc['충청남도',:],marker = 'o',markerfacecolor = 'green',
markersize = 10,color = 'olive',linewidth = 2, label = '서울 -> 충남')
ax.plot(col_years,df_3.loc['경상북도',:],marker = 'o',markerfacecolor = 'blue',
markersize = 10, color = 'skyblue', linewidth = 2 , label = '서울 -> 경북')
ax.plot(col_years,df_3.loc['강원도',:],marker = 'o',markerfacecolor = 'red',
markersize =10, color = 'magenta',linewidth = 2, label = '서울 -> 강원')
#범례표시
ax.legend(loc = 'best')
#차트 제목 추가
ax.set_title('서울 -> 충남, 경북 , 강원 인구 이동',size = 20 )
#축 이름 추가
ax.set_xlabel('기간',size =12)
ax.set_ylabel('이동 인구수',size =12)
#축 눈금 라벨 지정 및 90 도 회전
ax.set_xticklabels(col_years,rotation = 90)
#축 눈금 라벨 크기
ax.tick_params(axis = "x", labelsize =10)
ax.tick_params(axis = "y", labelsize= 10)
plt.show()
| 30.672414
| 90
| 0.675098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,060
| 0.466755
|
97e73f20826e580f553c50fa8510c0e35ee9a048
| 365
|
py
|
Python
|
blsqpy/query.py
|
BLSQ/blsqpy
|
52fcbd655780e78eccceb2a61280262194c2416c
|
[
"MIT"
] | null | null | null |
blsqpy/query.py
|
BLSQ/blsqpy
|
52fcbd655780e78eccceb2a61280262194c2416c
|
[
"MIT"
] | 7
|
2018-12-18T10:11:34.000Z
|
2019-03-27T07:09:38.000Z
|
blsqpy/query.py
|
BLSQ/blsqpy
|
52fcbd655780e78eccceb2a61280262194c2416c
|
[
"MIT"
] | 2
|
2018-12-12T12:31:40.000Z
|
2019-02-25T12:34:48.000Z
|
import os
from jinja2 import Environment, FileSystemLoader
QUERIES_DIR = os.path.dirname(os.path.abspath(__file__))
def get_query(query_name, params):
j2_env = Environment(loader=FileSystemLoader(QUERIES_DIR+"/queries"),
trim_blocks=True)
return j2_env.get_template(query_name+'.sql').render(**params)+"\n -- query : "+query_name
| 36.5
| 94
| 0.715068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.087671
|
97e7b0008c9dde06dac12b270121649a12a1ff61
| 8,507
|
py
|
Python
|
SINE.py
|
EduardoMCF/SINE
|
061960b65164ae612a5cb63c540eb8a488505073
|
[
"MIT"
] | null | null | null |
SINE.py
|
EduardoMCF/SINE
|
061960b65164ae612a5cb63c540eb8a488505073
|
[
"MIT"
] | null | null | null |
SINE.py
|
EduardoMCF/SINE
|
061960b65164ae612a5cb63c540eb8a488505073
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import pyaudio, wave
import numpy as np
from collections import OrderedDict as OD
from struct import pack
from math import fmod
from os import system
def getNoteAndDuration(chord : str, defaultDuration : float):
if ',' in chord:
note,duration = chord.strip('()').split(',')
return note,float(duration)
return chord,defaultDuration
def generateSineWave(samplingFreq : int = 44100, freq : float = 440.0, amplitude : float = 0.4, duration : float = 1.0, phase : float = 0, chunk : int = 0):
t = np.arange(samplingFreq*duration)/samplingFreq if not chunk else np.arange(chunk)/samplingFreq
sineWave = amplitude*np.sin(2 * pi * freq * t + phase)
return sineWave
def generateSong(keysOfChords : [str], samplingFreq : int = 44100, amplitude : float = 0.4, defaultDuration : float = 0.5, phase : float = 0):
song = np.array([])
for chord in keysOfChords:
note, duration = getNoteAndDuration(chord,defaultDuration)
noteFreq = octaves[note]
sineWave = generateSineWave(samplingFreq,noteFreq,amplitude,duration,phase)
phase = fmod(2.0 * pi * noteFreq * duration + phase, 2.0*pi)
song = np.concatenate((song,sineWave))
return song
def playAudio(samples,samplingFreq : int = 44100):
stream = p.open(format = pyaudio.paFloat32, channels = 1, rate = samplingFreq, output = True)
stream.write(samples.astype(np.float32).tostring())
stream.close()
def playAudioFromFile(path : str):
wf = wave.open(path,'rb')
stream = p.open(format = p.get_format_from_width(wf.getsampwidth()), channels = wf.getnchannels(), rate = wf.getframerate(), output = True)
chunk = 4096
data = wf.readframes(chunk)
while data:
stream.write(data)
data = wf.readframes(chunk)
stream.close()
wf.close()
def pad(data : [float]):
nextPowerOf2 = lambda x: 1 << (x-1).bit_length()
return np.concatenate((data,np.zeros(nextPowerOf2(len(data))-len(data))))
def plot(data : [float], nchannels : int = 1, samplingFreq : int = 44100):
formerLen,data = len(data),pad(data)
channels = [[] for channel in range(nchannels)]
for index, channelData in enumerate(data):
channels[index%len(channels)].append(channelData)
t=np.linspace(0, int(formerLen/len(channels)/samplingFreq), num=int(formerLen/len(channels)))
fig,ax = plt.subplots(nrows=2,ncols=2)
fig.tight_layout()
for idx in range(len(channels)):
ax[0,idx].plot(t,channels[idx][:formerLen//nchannels],color='C'+str(idx))
ax[0,idx].set_title('Signal (channel %i)' %(idx+1))
ax[0,idx].set_xlabel('Time')
ax[0,idx].set_ylabel('Amplitude')
n = len(data)
T = n/samplingFreq
frq = np.arange(n)/T
frq = frq[range(n//2)]
for idx in range(len(channels)):
FFT = (np.fft.fft(channels[idx])/n)[range(n//2)]
ax[1,idx].plot(frq,abs(FFT),color='C'+str(idx+2))
ax[1,idx].set_title('Spectrum (channel %i)' %(idx+1))
ax[1,idx].set_xlabel('Freq (Hz)')
ax[1,idx].set_ylabel('Magnitude')
plt.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9, wspace=0.5, hspace=0.5)
plt.show()
def plotFromFile(path : str):
wf = wave.open(path,'rb')
data = np.frombuffer(wf.readframes(wf.getnframes()), np.int16)/32767
plot(data, wf.getnchannels(),wf.getframerate())
wf.close()
def groupByChunk(n, iterable):
l = len(iterable)
for idx in range(0,l,n):
yield iterable[idx:min(idx+n,l)]
def saveFile(fileName : str, samples : [float], sampleFreq : int = 44100):
wf=wave.open(fileName,"w")
nchannels = 1; sampwidth = 2
wf.setparams((nchannels, sampwidth, sampleFreq, len(samples), "NONE", "not compressed"))
for chunk in groupByChunk(4096,samples):
wf.writeframes(b''.join(map(lambda sample : pack('<h', int(sample * 32767)),chunk)))
wf.close()
def getParamsSineWave():
parameters = OD()
inputs = [input('Sampling Frequency (Hz | default = 44100): '),input('Sinewave Frequency (Hz | default = 440.0): '),input('Amplitude ( float (0,1] | default = 0.4): '),input('Duration ( s | default = 1): '),input('Phase ( radians | default = 0): ')]
parameters['samplingFreq'] = int(inputs[0]) if inputs[0] else 44100
parameters['freq'] = float(inputs[1]) if inputs[1] else 440.0
parameters['amplitude'] = float(inputs[2]) if inputs[2] else 0.4
parameters['duration'] = float(inputs[3]) if inputs[3] else 1
parameters['phase'] = eval(inputs[4]) if inputs[4] else 0
return parameters
def getParamsSong():
parameters = OD()
inputs = [input('Insert the path to a txt file with keys of chords (more info in help.txt): '), input('Sampling Frequency (Hz | default = 44100): '),input('Amplitude ( float (0,1] | default = 0.4): '),input('Duration ( s | default = 0.4): '),input('Phase ( radians | default = 0): ')]
f = open(inputs[0],'r')
parameters['keysOfChords'] = f.read().split()
parameters['samplingFreq'] = int(inputs[0]) if inputs[1] else 44100
parameters['amplitude'] = float(inputs[2]) if inputs[2] else 0.4
parameters['duration'] = float(inputs[3]) if inputs[3] else 0.4
parameters['phase'] = eval(inputs[4]) if inputs[4] else 0
f.close()
return parameters
def getParamsFile():
return input('Path to a wav file: ')
pi = np.pi
p = pyaudio.PyAudio()
octaves = {
'C0': 16.35, 'C#0': 17.32, 'D0': 18.35, 'D#0': 19.45, 'E0': 20.6, 'F0': 21.83, 'F#0': 23.12, 'G0': 24.5, 'G#0': 25.96, 'A0': 27.5, 'A#0': 29.14, 'B0': 30.87,
'C1': 32.70, 'C#1': 34.65, 'D1': 36.71, 'D#1': 38.89, 'E1': 41.20, 'F1': 43.65, 'F#1': 46.25, 'G1': 49.0, 'G#1': 51.91, 'A1': 55.0, 'A#1': 58.27, 'B1': 61.74,
'C2': 65.41, 'C#2': 69.3, 'D2': 73.42, 'D#2': 77.78, 'E2': 82.41, 'F2': 87.31, 'F#2': 92.5, 'G2': 98.0, 'G#2': 103.83, 'A2': 110.0, 'A#2': 116.54, 'B2': 123.47,
'C3': 130.81, 'C#3': 138.59, 'D3': 146.83, 'D#3': 155.56, 'E3': 164.81, 'F3': 174.62, 'F#3': 185.0, 'G3': 196.0, 'G#3': 207.65, 'A3': 220.0, 'A#3': 233.08, 'B3': 246.94,
'C4': 261.62, 'C#4': 277.19, 'D4': 293.67, 'D#4': 311.12, 'E4': 329.62, 'F4': 349.23, 'F#4': 370.0, 'G4': 392.0, 'G#4': 415.31, 'A4': 440.0, 'A#4': 466.17, 'B4': 493.88,
'C5': 523.25, 'C#5': 554.37, 'D5': 587.33, 'D#5': 622.25, 'E5': 659.25, 'F5': 698.46, 'F#5': 739.99, 'G5': 783.99, 'G#5': 830.61, 'A5': 880.0, 'A#5': 932.33, 'B5': 987.77,
'C6': 1046.5, 'C#6': 1108.74, 'D6': 1174.66, 'D#6': 1244.5, 'E6': 1318.5, 'F6': 1396.92, 'F#6': 1479.98, 'G6': 1567.98, 'G#6': 1661.22, 'A6': 1760.0, 'A#6': 1864.66,'B6': 1975.54,
'C7': 2093.0, 'C#7': 2217.48, 'D7': 2349.32, 'D#7': 2489.0, 'E7': 2637.0, 'F7': 2793.84, 'F#7': 2959.96, 'G7': 3135.96, 'G#7': 3322.44,'A7': 3520.0, 'A#7': 3729.32, 'B7': 3951.08,
'C8': 4186.0, 'C#8': 4434.96, 'D8': 4698.64, 'D#8': 4978.0, 'E8': 5274.0, 'F8': 5587.68, 'F#8': 5919.92, 'G8': 6271.92, 'G#8': 6644.88, 'A8': 7040.0, 'A#8': 7458.64, 'B8': 7902.16,
'.': 0
}
choice1 = int(input('Select an option:\n1 - Generate sine wave\n2 - Generate song\n3 - Load wav file\n\nYour choice (1,2 or 3): '))
if choice1 not in [1,2,3]: raise ValueError('Invalid choice: %i' %choice1)
options = {1: getParamsSineWave, 2:getParamsSong, 3:getParamsFile}
param = options[choice1]()
system('cls||clear')
dialog = 'Select an option:\n1 - Play\n2 - Plot\n3 - Save\n4 - Exit\n\nYour choice (1,2,3 or 4): '
dialog2 = 'Select an option:\n1 - Play\n2 - Plot\n3 - Exit\n\nYour choice (1,2 or 3): '
while True:
choice2 = int(input(dialog)) if choice1 in [1,2] else int(input(dialog2))
if choice1 in [1,2]:
dataSine = generateSineWave(*param.values()) if choice1 == 1 else None
dataSong = generateSong(*param.values()) if choice1 == 2 else None
if choice2 == 1:
playAudio(dataSine, param['samplingFreq']) if choice1 == 1 else playAudio(dataSong,param['samplingFreq'])
elif choice2 == 2:
plot(dataSine, samplingFreq = param['samplingFreq']) if choice1 == 1 else plot(dataSong, samplingFreq = param['samplingFreq'])
elif choice2 == 3:
fileName = input('File name: ')
saveFile(fileName,dataSine if choice1 == 1 else dataSong,param['samplingFreq'])
elif choice2 == 4:
break
elif choice1 == 3:
if choice2 == 1:
playAudioFromFile(param)
elif choice2 == 2:
plotFromFile(param)
elif choice2 == 3:
break
system("cls||clear")
p.terminate()
| 48.611429
| 288
| 0.611379
| 0
| 0
| 122
| 0.014341
| 0
| 0
| 0
| 0
| 1,582
| 0.185964
|
97e7c3ef3fb80b92eda0926518e235c327df3ae0
| 1,603
|
py
|
Python
|
setup.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
setup.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
setup.py
|
lkylych/lagom
|
64777be7f09136072a671c444b5b3fbbcb1b2f18
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from setuptools import find_packages
from lagom.version import __version__
# Read content of README.md
with open('README.md', 'r') as f:
long_description = f.read()
setup(name='lagom',
version=__version__,
author='Xingdong Zuo',
author_email='zuoxingdong@hotmail.com',
description='lagom: A light PyTorch infrastructure to quickly prototype reinforcement learning algorithms.',
# Long description of README markdown, shows in Python Package Index
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/zuoxingdong/lagom',
# Install dependencies
install_requires=['numpy',
'scipy',
'pandas',
'matplotlib',
'seaborn',
'scikit-image',
'jupyterlab',
'gym',
'cma'],
tests_require=['pytest'],
# Only Python 3+
python_requires='>=3',
# List all lagom packages (folder with __init__.py), useful to distribute a release
packages=find_packages(),
# tell pip some metadata (e.g. Python version, OS etc.)
classifiers=['Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Topic :: Scientific/Engineering :: Artificial Intelligence']
)
| 37.27907
| 114
| 0.5733
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 773
| 0.482221
|
97e804ef9c7c1c0635aab0477304f63f5daafe96
| 2,046
|
py
|
Python
|
plugins_inactive/plugin_wikipediasearch.py
|
ademaro/Irene-Voice-Assistant
|
34a71892258d993dc227e6653281444f091e86ae
|
[
"MIT"
] | null | null | null |
plugins_inactive/plugin_wikipediasearch.py
|
ademaro/Irene-Voice-Assistant
|
34a71892258d993dc227e6653281444f091e86ae
|
[
"MIT"
] | null | null | null |
plugins_inactive/plugin_wikipediasearch.py
|
ademaro/Irene-Voice-Assistant
|
34a71892258d993dc227e6653281444f091e86ae
|
[
"MIT"
] | null | null | null |
import os
import time
import pyautogui
# from voiceassmain import play_voice_assistant_speech
from vacore import VACore
# based on EnjiRouz realization https://github.com/EnjiRouz/Voice-Assistant-App/blob/master/app.py
# функция на старте
def start(core: VACore):
manifest = {
"name": "Википедия (поиск)",
"version": "1.0",
"require_online": True,
"commands": {
"википедия|вики": run_wiki,
},
}
return manifest
def run_wiki(core: VACore, phrase: str):
# if core != None:
# core.play_voice_assistant_speech("Ищу на вики {}".format(phrase))
import wikipediaapi
wiki = wikipediaapi.Wikipedia("ru")
# поиск страницы по запросу, чтение summary, открытие ссылки на страницу для получения подробной информации
wiki_page = wiki.page(phrase)
try:
if wiki_page.exists():
core.play_voice_assistant_speech(
"Вот что я нашла для {} в википедии".format(phrase)
)
# webbrowser.get().open(wiki_page.fullurl)
# чтение ассистентом первых двух предложений summary со страницы Wikipedia
# (могут быть проблемы с мультиязычностью)
core.play_voice_assistant_speech(wiki_page.summary.split(".")[:2])
else:
# открытие ссылки на поисковик в браузере в случае, если на Wikipedia не удалось найти ничего по запросу
# play_voice_assistant_speech(translator.get(
# "Can't find {} on Wikipedia. But here is what I found on google").format(search_term))
# url = "https://google.com/search?q=" + search_term
# webbrowser.get().open(url)
core.play_voice_assistant_speech("Не нашла {} в википедии".format(phrase))
# поскольку все ошибки предсказать сложно, то будет произведен отлов с последующим выводом без остановки программы
except:
import traceback
core.play_voice_assistant_speech("Проблемы с поиском в Википедии")
traceback.print_exc()
return
| 33.540984
| 118
| 0.655425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,598
| 0.638434
|
97e922fd511e37dd6ba6caa81bbded4c80d22dc7
| 316
|
py
|
Python
|
todo/management/urls.py
|
Sanguet/todo-challenge
|
8eabc02081e7ce6b33408558d4a4a39edee3944c
|
[
"MIT"
] | null | null | null |
todo/management/urls.py
|
Sanguet/todo-challenge
|
8eabc02081e7ce6b33408558d4a4a39edee3944c
|
[
"MIT"
] | null | null | null |
todo/management/urls.py
|
Sanguet/todo-challenge
|
8eabc02081e7ce6b33408558d4a4a39edee3944c
|
[
"MIT"
] | null | null | null |
# Django
from django.urls import include, path
# Django REST Framework
from rest_framework.routers import DefaultRouter
# Views
from .views import tasks as task_views
router = DefaultRouter()
router.register(r'tasks', task_views.TaskViewSet, basename='task')
urlpatterns = [
path('', include(router.urls))
]
| 19.75
| 66
| 0.762658
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 54
| 0.170886
|
97e9830408b6514215e19bea044829eb96f15f7c
| 7,936
|
py
|
Python
|
dnd5e/items.py
|
MegophrysNasuta/dnd5e
|
431c0c219052ddf5c62a500bd14f17fab3574648
|
[
"MIT"
] | null | null | null |
dnd5e/items.py
|
MegophrysNasuta/dnd5e
|
431c0c219052ddf5c62a500bd14f17fab3574648
|
[
"MIT"
] | null | null | null |
dnd5e/items.py
|
MegophrysNasuta/dnd5e
|
431c0c219052ddf5c62a500bd14f17fab3574648
|
[
"MIT"
] | null | null | null |
import enum
from typing import Any, List, Optional, Tuple
class ArmorType(enum.Enum):
LIGHT = enum.auto()
MEDIUM = enum.auto()
HEAVY = enum.auto()
def __repr__(self):
return '%s.%s' % (self.__class__.__name__, self.name)
class Armor:
def __init__(self, name: str, armor_class: int,
armor_type: Optional[ArmorType] = None,
min_str_requirement: Optional[int] = None,
disadvantages_stealth: bool = False):
self.name = str(name)
self.armor_class = int(armor_class)
self.armor_type = armor_type
self.min_str_requirement = (min_str_requirement and
int(min_str_requirement))
self.disadvantages_stealth = bool(disadvantages_stealth)
if self.armor_type == ArmorType.HEAVY:
self.disadvantages_stealth = True
@property
def max_dex_modifier(self) -> Optional[int]:
if self.armor_type == ArmorType.LIGHT:
return None
elif self.armor_type == ArmorType.MEDIUM:
return 2
else:
return 0
def __eq__(self, other: Any) -> bool:
if not isinstance(other, self.__class__):
return False
return (
self.name == other.name and
self.armor_class == other.armor_class and
self.armor_type == other.armor_type and
self.min_str_requirement == other.min_str_requirement and
self.disadvantages_stealth == other.disadvantages_stealth
)
def __repr__(self):
return ('Armor(%r, %r, armor_type=%r, '
'min_str_requirement=%r, disadvantages_stealth=%r)') % (
self.name, self.armor_class, self.armor_type,
self.min_str_requirement, self.disadvantages_stealth)
def __str__(self):
return '<%sArmor: %s (AC %i)>' % (self.armor_type.name.title(),
self.name, self.armor_class)
RangeIncrement = Tuple[int, int]
class WeaponType(enum.Enum):
SIMPLE = enum.auto()
MARTIAL = enum.auto()
def __repr__(self):
return '%s.%s' % (self.__class__.__name__, self.name)
class WeaponDamageType(enum.Enum):
PIERCING = 'P'
SLASHING = 'S'
BLUDGEONING = 'B'
def __repr__(self):
return '%s.%s' % (self.__class__.__name__, self.name)
class Weapon:
def __init__(self, name: str, damage: Optional[str] = None,
two_handed_damage: Optional[str] = None,
damage_type: Optional[WeaponDamageType] = None,
range_increment: Optional[RangeIncrement] = None,
requires_ammo: bool = False, finesse_weapon: bool = False,
is_heavy: bool = False, is_light: bool = False,
slow_loading: bool = False, has_reach: bool = False,
can_be_thrown: bool = False, requires_two_hands: bool = False,
versatile: bool = False):
self.name = str(name)
self.damage = damage and str(damage)
self.two_handed_damage = two_handed_damage and str(two_handed_damage)
self.damage_type = damage_type
self.range_increment = range_increment and tuple(map(int,
range_increment))
self.__requires_ammo = None
self.requires_ammo = bool(requires_ammo)
self.finesse_weapon = bool(finesse_weapon)
self.__is_heavy = None
self.__is_light = None
self.is_heavy = bool(is_heavy)
self.is_light = bool(is_light)
self.slow_loading = bool(slow_loading)
self.has_reach = bool(has_reach)
self.__can_be_thrown = None
self.can_be_thrown = bool(can_be_thrown)
if self.can_be_thrown:
self.range_increment = (20, 60)
self.__requires_two_hands = None
self.__versatile = None
self.requires_two_hands = bool(requires_two_hands)
self.versatile = bool(versatile)
if self.damage and self.two_handed_damage:
self.versatile = True
if self.versatile:
assert self.two_handed_damage is not None
@property
def can_be_thrown(self):
return bool(self.__can_be_thrown)
@can_be_thrown.setter
def can_be_thrown(self, value):
self.__can_be_thrown = bool(value)
if self.__can_be_thrown:
self.__requires_ammo = False
@property
def has_range(self) -> bool:
return self.range_increment is not None
@property
def is_heavy(self):
return bool(self.__is_heavy)
@is_heavy.setter
def is_heavy(self, value):
self.__is_heavy = bool(value)
if self.__is_heavy:
self.__is_light = False
@property
def is_light(self):
return bool(self.__is_light)
@is_light.setter
def is_light(self, value):
self.__is_light = bool(value)
if self.__is_light:
self.__is_heavy = False
@property
def requires_ammo(self):
return bool(self.__requires_ammo)
@requires_ammo.setter
def requires_ammo(self, value):
self.__requires_ammo = bool(value)
if self.__requires_ammo:
self.__can_be_thrown = False
@property
def requires_two_hands(self):
return bool(self.__requires_two_hands)
@requires_two_hands.setter
def requires_two_hands(self, value):
self.__requires_two_hands = bool(value)
if self.__requires_two_hands:
self.__versatile = False
@property
def versatile(self):
return bool(self.__versatile)
@versatile.setter
def versatile(self, other):
self.__versatile = bool(other)
if self.__versatile:
self.__requires_two_hands = False
@property
def properties(self) -> List[str]:
prop_list = []
if self.requires_ammo:
assert self.range_increment is not None
prop_list.append('Ammunition (range %i/%i)' % self.range_increment)
if self.finesse_weapon:
prop_list.append('Finesse')
if self.is_heavy:
prop_list.append('Heavy')
if self.is_light:
prop_list.append('Light')
if self.slow_loading:
prop_list.append('Loading')
if self.has_reach:
prop_list.append('Reach')
if self.can_be_thrown:
assert self.range_increment is not None
prop_list.append('Thrown (range %i/%i)' % self.range_increment)
if self.requires_two_hands:
prop_list.append('Two-handed')
if self.versatile:
prop_list.append('Versatile (%s)' % self.two_handed_damage)
return prop_list
def __repr__(self):
return ('Weapon("%s", %r, two_handed_damage=%r, '
'damage_type=%r, range_increment=%r, is_light=%r, '
'requires_ammo=%r, finesse_weapon=%r, is_heavy=%r, '
'slow_loading=%r, has_reach=%r, can_be_thrown=%r, '
'requires_two_hands=%r, versatile=%r)') % (
self.name, self.damage, self.two_handed_damage,
self.damage_type, self.range_increment, self.is_light,
self.requires_ammo, self.finesse_weapon, self.is_heavy,
self.slow_loading, self.has_reach, self.can_be_thrown,
self.requires_two_hands, self.versatile,
)
def __str__(self):
str_rep = ['<%s: %s']
str_rep_contents = [self.__class__.__name__, self.name]
if self.has_range:
str_rep.append(' %s')
str_rep_contents.append(self.range_increment)
str_rep.append(' %s (%s)>')
str_rep_contents.extend([self.damage, self.damage_type.value])
return ''.join(str_rep) % tuple(str_rep_contents)
class SimpleWeapon(Weapon):
pass
class MartialWeapon(Weapon):
pass
| 33.344538
| 79
| 0.606981
| 7,822
| 0.985635
| 0
| 0
| 2,717
| 0.342364
| 0
| 0
| 510
| 0.064264
|
97eb5eb44132b5d87929c59ff9f174afa27e84b4
| 7,094
|
py
|
Python
|
dbd/cli/dbdcli.py
|
AlexRogalskiy/dbd
|
ac2c6fb673861321b23fbf2a57d9e39fa5cb5352
|
[
"BSD-3-Clause"
] | 33
|
2022-01-09T09:32:17.000Z
|
2022-03-05T18:52:11.000Z
|
dbd/cli/dbdcli.py
|
zsvoboda/dbd
|
ac2c6fb673861321b23fbf2a57d9e39fa5cb5352
|
[
"BSD-3-Clause"
] | 2
|
2022-02-16T19:14:13.000Z
|
2022-02-16T19:14:34.000Z
|
dbd/cli/dbdcli.py
|
zsvoboda/dbd
|
ac2c6fb673861321b23fbf2a57d9e39fa5cb5352
|
[
"BSD-3-Clause"
] | null | null | null |
import importlib.metadata
import logging
import os
import shutil
from typing import Dict, Any, List
import click
from sqlalchemy import text
from dbd.log.dbd_exception import DbdException
from dbd.config.dbd_profile import DbdProfile
from dbd.config.dbd_project import DbdProject
from dbd.executors.model_executor import ModelExecutor, InvalidModelException
from dbd.log.dbd_logger import setup_logging
log = logging.getLogger(__name__)
this_script_dir = os.path.dirname(__file__)
class Dbd(object):
"""
Top level CLI object
"""
def __init__(self, debug: bool = False, logfile: str = 'dbd.log', profile: str = 'dbd.profile',
project: str = 'dbd.project'):
"""
Constructor
:param bool debug: debug flag
:param str logfile: log file
:param str profile: profile file
:param str project: project file
"""
self.__debug = debug
self.__logfile = logfile
self.__profile = profile
self.__project = project
def debug(self) -> bool:
"""
Debug flag getter
:return: debug flag
:rtype: bool
"""
return self.__debug
def logfile(self) -> str:
"""
Logfile getter
:return: logfile
:rtype: str
"""
return self.__logfile
def profile(self) -> str:
"""
Profile getter
:return: profile
:rtype: str
"""
return self.__profile
def project(self) -> str:
"""
Project getter
:return: project
:rtype: str
"""
return self.__project
def print_version():
"""
Prints DBD version
"""
click.echo(f"You're using DBD version {importlib.metadata.version('dbd')}.")
@click.group(invoke_without_command=True)
@click.option('--debug/--no-debug', envvar='DBD_DEBUG', default=False, help='Sets debugging on/off')
@click.option('--version', help="Print the DBD version and exit.", is_flag=True, is_eager=True)
@click.option('--logfile', envvar='DBD_LOG_FILE', default='dbd.log', help='Log file location')
@click.option('--profile', envvar='DBD_PROFILE', default='dbd.profile', help='Profile configuration file')
@click.option('--project', envvar='DBD_PROJECT', default='dbd.project', help='Project configuration file')
@click.pass_context
def cli(ctx, debug, logfile, version, profile, project):
if debug:
click.echo(f"Logging DEBUG info to '{logfile}'")
setup_logging(logging.DEBUG, logfile)
if version:
print_version()
ctx.exit(0)
ctx.obj = Dbd(debug, logfile, profile, project)
# noinspection PyUnusedLocal
@cli.command(help='Initializes a new DBD project.')
@click.argument('dest', required=False, default='my_new_dbd_project')
@click.pass_obj
def init(dbd, dest):
try:
src = os.path.join(this_script_dir, '..', 'resources', 'template')
if os.path.exists(dest):
log.error(f"Can't overwrite directory '{dest}'")
raise DbdException(f"Can't overwrite directory '{dest}'")
shutil.copytree(src, dest)
click.echo(f"New project {dest} generated. Do cd {dest}; dbd run .")
except DbdException as d:
click.echo(f"ERROR: '{d}'")
@cli.command(help='Executes project.')
@click.option('--only', envvar='DBD_ONLY', default=None, help='Comma separated list of fully qualified table names '
'(<schema>.<table-name-no suffix>) to execute.')
@click.option('--deps/--no-deps', envvar='DBD_DEPS', default=True, help='Ignores dependencies for the --only list.')
@click.argument('dest', required=False, default='.')
@click.pass_obj
def run(dbd, only, deps, dest):
try:
log.debug("Loading configuration.")
prf = DbdProfile.load(os.path.join('.', dbd.profile()))
prj = DbdProject.load(prf, os.path.join(dest, dbd.project()))
log.debug("Creating model.")
model = ModelExecutor(prj)
log.debug("Connecting database.")
engine = prj.alchemy_engine_from_project()
# engine.execution_options(supports_statement_cache=False)
log.debug("Executing model.")
if not deps and only is None:
log.error("You must specify --only list for --no-deps.")
raise DbdException("You must specify --only list for --no-deps.")
if only is not None:
only_list = only.split(',')
try:
model.execute(engine, only_list, deps)
except InvalidModelException as e:
log.error(f"Can't run {only_list}: {e}")
raise DbdException(f"Can't run {only_list}: {e}")
else:
model.execute(engine)
log.debug("Finished.")
click.echo("All tasks finished!")
except DbdException as d:
click.echo(f"ERROR: '{d}'")
@cli.command(help='Validates project.')
@click.argument('dest', required=False, default='.')
@click.pass_obj
def validate(dbd, dest):
try:
prf = DbdProfile.load(os.path.join('.', dbd.profile()))
prj = DbdProject.load(prf, os.path.join(dest, dbd.project()))
model = ModelExecutor(prj)
engine = prj.alchemy_engine_from_project()
# noinspection PyBroadException
try:
engine.execute(text("SELECT 1"))
except Exception:
click.echo(
f"Can't connect to the target database. Check profile configuration in "
f"'{os.path.normpath(os.path.join(dest, dbd.profile()))}'.")
validation_result, validation_errors = model.validate()
if validation_result:
click.echo("No errors found. Model is valid.")
else:
click.echo("Model isn't valid. Please fix the following errors:")
__echo_validation_errors(validation_errors)
except DbdException as d:
click.echo(f"ERROR: '{d}'")
def __echo_validation_errors(validation_errors: Dict[str, Any]):
"""
Top level function for printing validation errors
:param validation_errors:
:return:
"""
__echo_validation_level(validation_errors)
class InvalidValidationErrorStructure(DbdException):
pass
def __echo_validation_level(level_validation_errors: Dict[str, Any], indent: int = 0):
"""
Echo validation error line (called recursively on all Dict values)
:param level_validation_errors: Dict with validation result
:param indent: indentation level
"""
for (k, v) in level_validation_errors.items():
if isinstance(v, str):
msg = f"{k}:{v}"
click.echo(msg.rjust(indent * 2 + len(msg), ' '))
elif isinstance(v, Dict):
msg = f"{k}:"
click.echo(msg.rjust(indent * 2 + len(msg), ' '))
__echo_validation_level(v, indent + 1)
elif isinstance(v, List):
msg = f"{k}:{str(v)}"
click.echo(msg.rjust(indent * 2 + len(msg), ' '))
else:
raise InvalidValidationErrorStructure(f"Invalid validation result: '{v}' isn't supported type.")
| 34.436893
| 116
| 0.623203
| 1,214
| 0.171131
| 0
| 0
| 4,102
| 0.578235
| 0
| 0
| 2,532
| 0.356921
|
97eb87e8a632182f8518b1d3afd5e6530ac981a5
| 9,901
|
py
|
Python
|
bestiary/serializers.py
|
Itori/swarfarm
|
7192e2d8bca093b4254023bbec42b6a2b1887547
|
[
"Apache-2.0"
] | 66
|
2017-09-11T04:46:00.000Z
|
2021-03-13T00:02:42.000Z
|
bestiary/serializers.py
|
Itori/swarfarm
|
7192e2d8bca093b4254023bbec42b6a2b1887547
|
[
"Apache-2.0"
] | 133
|
2017-09-24T21:28:59.000Z
|
2021-04-02T10:35:31.000Z
|
bestiary/serializers.py
|
Itori/swarfarm
|
7192e2d8bca093b4254023bbec42b6a2b1887547
|
[
"Apache-2.0"
] | 28
|
2017-08-30T19:04:32.000Z
|
2020-11-16T04:09:00.000Z
|
from rest_framework import serializers
from bestiary import models
class GameItemSerializer(serializers.ModelSerializer):
category = serializers.SerializerMethodField()
class Meta:
model = models.GameItem
fields = [
'id',
'com2us_id',
'url',
'name',
'category',
'icon',
'description',
'sell_value',
]
extra_kwargs = {
'url': {
'view_name': 'bestiary/items-detail',
},
}
def get_category(self, instance):
return instance.get_category_display()
class SourceSerializer(serializers.ModelSerializer):
class Meta:
model = models.Source
fields = ['id', 'url', 'name', 'description', 'farmable_source']
extra_kwargs = {
'url': {
'view_name': 'bestiary/monster-sources-detail',
},
}
class SkillUpgradeSerializer(serializers.ModelSerializer):
effect = serializers.SerializerMethodField()
class Meta:
model = models.SkillUpgrade
fields = ('effect', 'amount')
def get_effect(self, instance):
return instance.get_effect_display()
class SkillEffectSerializer(serializers.ModelSerializer):
type = serializers.CharField(source='get_type_display')
class Meta:
model = models.SkillEffect
fields = ('id', 'url', 'name', 'is_buff', 'type', 'description', 'icon_filename')
extra_kwargs = {
'url': {
'view_name': 'bestiary/skill-effects-detail',
},
}
class SkillEffectDetailSerializer(serializers.ModelSerializer):
effect = SkillEffectSerializer()
class Meta:
model = models.SkillEffectDetail
fields = [
'effect',
'aoe', 'single_target', 'self_effect',
'chance', 'on_crit', 'on_death', 'random',
'quantity', 'all', 'self_hp', 'target_hp', 'damage',
'note',
]
class SkillSerializer(serializers.HyperlinkedModelSerializer):
level_progress_description = serializers.SerializerMethodField()
upgrades = SkillUpgradeSerializer(many=True, read_only=True)
effects = SkillEffectDetailSerializer(many=True, read_only=True, source='skilleffectdetail_set')
scales_with = serializers.SerializerMethodField()
used_on = serializers.PrimaryKeyRelatedField(source='monster_set', many=True, read_only=True)
class Meta:
model = models.Skill
fields = (
'id', 'com2us_id', 'name', 'description', 'slot', 'cooltime', 'hits', 'passive', 'aoe',
'max_level', 'upgrades', 'effects', 'multiplier_formula', 'multiplier_formula_raw',
'scales_with', 'icon_filename', 'used_on', 'level_progress_description',
)
def get_level_progress_description(self, instance):
if instance.level_progress_description:
return instance.level_progress_description.rstrip().split('\n')
else:
return []
def get_scales_with(self, instance):
# TODO: Fix N+1 query in API response caused by this
return instance.scaling_stats.values_list('stat', flat=True)
class LeaderSkillSerializer(serializers.ModelSerializer):
attribute = serializers.SerializerMethodField('get_stat')
area = serializers.SerializerMethodField()
element = serializers.SerializerMethodField()
class Meta:
model = models.LeaderSkill
fields = ('id', 'url', 'attribute', 'amount', 'area', 'element')
extra_kwargs = {
'url': {
'view_name': 'bestiary/leader-skills-detail',
},
}
def get_stat(self, instance):
return instance.get_attribute_display()
def get_area(self, instance):
return instance.get_area_display()
def get_element(self, instance):
return instance.get_element_display()
class HomunculusSkillCraftCostSerializer(serializers.ModelSerializer):
item = GameItemSerializer(read_only=True)
class Meta:
model = models.HomunculusSkillCraftCost
fields = ['item', 'quantity']
class HomunculusSkillSerializer(serializers.ModelSerializer):
craft_materials = HomunculusSkillCraftCostSerializer(source='homunculusskillcraftcost_set', many=True, read_only=True)
used_on = serializers.PrimaryKeyRelatedField(source='monsters', many=True, read_only=True)
class Meta:
model = models.HomunculusSkill
fields = ['id', 'url', 'skill', 'craft_materials', 'prerequisites', 'used_on']
extra_kwargs = {
'url': {
'view_name': 'bestiary/homunculus-skills-detail',
},
}
class MonsterCraftCostSerializer(serializers.ModelSerializer):
item = GameItemSerializer(read_only=True)
class Meta:
model = models.MonsterCraftCost
fields = ['item', 'quantity']
class AwakenCostSerializer(serializers.ModelSerializer):
item = GameItemSerializer(read_only=True)
class Meta:
model = models.AwakenCost
fields = ['item', 'quantity']
class MonsterSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='bestiary/monsters-detail')
element = serializers.SerializerMethodField()
archetype = serializers.SerializerMethodField()
source = SourceSerializer(many=True, read_only=True)
leader_skill = LeaderSkillSerializer(read_only=True)
awaken_cost = AwakenCostSerializer(source='awakencost_set', many=True, read_only=True)
homunculus_skills = serializers.PrimaryKeyRelatedField(source='homunculusskill_set', read_only=True, many=True)
craft_materials = MonsterCraftCostSerializer(many=True, source='monstercraftcost_set', read_only=True)
class Meta:
model = models.Monster
fields = (
'id', 'url', 'bestiary_slug', 'com2us_id', 'family_id',
'name', 'image_filename', 'element', 'archetype', 'base_stars', 'natural_stars',
'obtainable', 'can_awaken', 'awaken_level', 'awaken_bonus',
'skills', 'skill_ups_to_max', 'leader_skill', 'homunculus_skills',
'base_hp', 'base_attack', 'base_defense', 'speed', 'crit_rate', 'crit_damage', 'resistance', 'accuracy',
'raw_hp', 'raw_attack', 'raw_defense', 'max_lvl_hp', 'max_lvl_attack', 'max_lvl_defense',
'awakens_from', 'awakens_to', 'awaken_cost',
'source', 'fusion_food',
'homunculus', 'craft_cost', 'craft_materials',
)
def get_element(self, instance):
return instance.get_element_display()
def get_archetype(self, instance):
return instance.get_archetype_display()
class FusionSerializer(serializers.ModelSerializer):
class Meta:
model = models.Fusion
fields = ['id', 'url', 'product', 'cost', 'ingredients']
extra_kwargs = {
'url': {
'view_name': 'bestiary/fusions-detail',
},
}
class BuildingSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='bestiary/buildings-detail')
area = serializers.SerializerMethodField()
affected_stat = serializers.SerializerMethodField()
element = serializers.SerializerMethodField()
class Meta:
model = models.Building
fields = [
'id',
'url',
'area',
'affected_stat',
'element',
'com2us_id',
'name',
'max_level',
'stat_bonus',
'upgrade_cost',
'description',
'icon_filename',
]
def get_area(self, instance):
return instance.get_area_display()
def get_affected_stat(self, instance):
return instance.get_affected_stat_display()
def get_element(self, instance):
return instance.get_element_display()
class DungeonSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='bestiary/dungeons-detail')
levels = serializers.PrimaryKeyRelatedField(source='level_set', read_only=True, many=True)
category = serializers.SerializerMethodField()
class Meta:
model = models.Dungeon
fields = [
'id',
'url',
'enabled',
'name',
'slug',
'category',
'icon',
'levels',
]
def get_category(self, instance):
return instance.get_category_display()
class EnemySerializer(serializers.ModelSerializer):
class Meta:
model = models.Enemy
fields = [
'id',
'monster',
'stars',
'level',
'hp',
'attack',
'defense',
'speed',
'resist',
'crit_bonus',
'crit_damage_reduction',
'accuracy_bonus',
]
class WaveSerializer(serializers.ModelSerializer):
enemies = EnemySerializer(source='enemy_set', many=True, read_only=True)
class Meta:
model = models.Wave
fields = [
'enemies',
]
class LevelSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='bestiary/levels-detail')
difficulty = serializers.SerializerMethodField()
waves = WaveSerializer(source='wave_set', many=True, read_only=True)
class Meta:
model = models.Level
fields = [
'id',
'url',
'dungeon',
'floor',
'difficulty',
'energy_cost',
'xp',
'frontline_slots',
'backline_slots',
'total_slots',
'waves',
]
def get_difficulty(self, instance):
return instance.get_difficulty_display()
| 31.233438
| 122
| 0.620644
| 9,779
| 0.987678
| 0
| 0
| 0
| 0
| 0
| 0
| 2,238
| 0.226038
|
97ec6821afa2d1990aea0fcfa7884edc560b6cc4
| 56,761
|
py
|
Python
|
Code/ConvNetAbel.py
|
abel-gr/AbelNN
|
e9f54a6a3844a504ff82e4bae97d43064834e90a
|
[
"MIT"
] | 1
|
2021-11-05T16:01:15.000Z
|
2021-11-05T16:01:15.000Z
|
Code/ConvNetAbel.py
|
abel-gr/AbelNN
|
e9f54a6a3844a504ff82e4bae97d43064834e90a
|
[
"MIT"
] | null | null | null |
Code/ConvNetAbel.py
|
abel-gr/AbelNN
|
e9f54a6a3844a504ff82e4bae97d43064834e90a
|
[
"MIT"
] | null | null | null |
# Copyright Abel Garcia. All Rights Reserved.
# https://github.com/abel-gr/AbelNN
import numpy as np
import copy as copy
import random
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from pylab import text
import math
class ConvNetAbel:
version = 1.2
def __init__(self, hidden = [1], nEpochs = 1, learningRate=0.1, manualWeights=[],
debugLevel=1, rangeRandomWeight=None, showLogs=False, softmax=False,
activationFunction='leakyrelu', verbose = False, use='classification',
batch_size=1, batch_gradient='average', batch_mult=1, dropout=0, pre_norm=False,
shuffle=True, iterationDrop=0, convFilters = [32, 64, 128], convStride=2,
convFilterSizes=3, learningRateConv=0.001, convEpochs=10, kernel_initializer='he_normal'):
self.hiddenL = copy.deepcopy(hidden)
self.hiddenL2 = copy.deepcopy(hidden)
self.learningRate = learningRate
self.numEpochs = nEpochs
self.costs = [] # Costs list to check performance
self.debugWeights = []
self.meanCostByEpoch = []
self.hiddenWeights = []
self.manualWeights = manualWeights
self.debugMode = debugLevel
self.rangeRandomWeight = rangeRandomWeight
self.showLogs = showLogs
self.softmax = softmax
self.n_layer0 = -1
self.activationFunction = activationFunction
self.verbose = verbose
self.use = use
self.batch_size = batch_size
self.batch_gradient = batch_gradient
self.batch_mult = batch_mult
self.dropout = dropout
self.pre_norm = pre_norm
self.shuffle = shuffle
self.iterationDrop = iterationDrop
self.XavierInitialization = '1'
self.lastLayerNeurons = -1
# ConvNet:
self.convFilters = convFilters
self.filtersValues = [None] * len(convFilters)
self.convStride = convStride
self.convFilterSizes = convFilterSizes
self.learningRateConv = learningRateConv
self.convEpochs = convEpochs
self.kernel_initializer = kernel_initializer
# Conv2 with only one kernel
def conv2(self, x, kernel, stride=1):
output = [] #np.zeros((kernel.shape), dtype=np.float32)
kernel_l = kernel.shape[0]
kernel_size = kernel.shape[0] * kernel.shape[1]
c = int(kernel_l / 2)
for i in range(c, x.shape[0] - c, stride):
o = []
for j in range(c, x.shape[1] - c, stride):
i0 = i - c
j0 = j - c
i1 = i + c + 1
j1 = j + c + 1
o.append(np.sum(x[i0:i1, j0:j1] * kernel))
output.append(o)
output = np.asarray(output)
return output
# Convolution with multi-filters
def conv_filters(self, x, filters, stride=1, relu=False, mode='same'):
lex = len(x.shape)
lef = len(filters.shape)
if lex > lef:
print('conv_filters: The input array cannot have more dimensions than the filter array.')
return 0
output = []
kernel_l = filters.shape[0]
kernel_size = filters.shape[0] * filters.shape[1]
if lef == 2:
num_filters = 1
else:
num_filters = filters.shape[-1]
c = int(kernel_l / 2)
dim3 = False
evenShapeKernel = (kernel_l % 2 == 0)
if lex == 2:
dim2 = True
p0 = x.shape[0]
p1 = x.shape[1]
else:
# x parameter was the output of this method previously called
if lex == lef:
num_new_filters = int(num_filters / x.shape[-1])
if (num_new_filters % 2 != 0) and (num_filters % 2 == 0):
num_new_filters = num_new_filters - 1
if (num_new_filters == 0):
num_new_filters = 1
else: # It is the first convolutional layer of a color image
num_new_filters = num_filters
dim3 = True
dim2 = False
p0 = x.shape[0]
p1 = x.shape[1]
if mode == 'full':
fs0 = int(filters.shape[0] / 2)
fs1 = int(filters.shape[1] / 2)
max0 = p0 + fs0
max1 = p1 + fs1
ini0 = -1 * fs0
ini1 = -1 * fs1
elif mode == 'same':
max0 = p0
max1 = p1
ini0 = 0
ini1 = 0
elif mode == 'valid':
fs0 = int(filters.shape[0] / 2)
fs1 = int(filters.shape[1] / 2)
max0 = p0 - fs0
max1 = p1 - fs1
ini0 = fs0
ini1 = fs1
else:
print('Mode must be same, valid or full')
return 0
if evenShapeKernel and mode == 'valid':
max0 = max0 + 1
max1 = max1 + 1
for i in range(ini0, max0, stride):
o = []
for j in range(ini1, max1, stride):
i0 = i - c
j0 = j - c
i1 = i + c + 1
j1 = j + c + 1
if evenShapeKernel:
i0 = i0 + 1
j0 = j0 + 1
zero_padding_top = 0
zero_padding_bottom = 0
zero_padding_left = 0
zero_padding_right = 0
if i0 < 0:
zero_padding_top = abs(i0)
i0 = 0
if j0 < 0:
zero_padding_left = abs(j0)
j0 = 0
if i1 > p0:
zero_padding_bottom = i1 - p0
i1 = p0
if j1 > p1:
zero_padding_right = j1 - p1
j1 = p1
if dim2:
m = x[i0:i1, j0:j1]
#print('mshape:', m.shape, kernel_size, zero_padding_top, zero_padding_left)
# Zero padding:
m = np.pad(m, ((zero_padding_top,zero_padding_bottom),(zero_padding_left,zero_padding_right)), 'constant')
if lef != 2:
m = np.expand_dims(m, axis=-1)
m = np.repeat(m, num_filters, axis=-1)
else:
xi = x[i0:i1, j0:j1, :]
# Zero padding:
xi = np.pad(xi, ((zero_padding_top,zero_padding_bottom),(zero_padding_left,zero_padding_right),(0,0)), 'constant')
if dim3:
xi = np.expand_dims(xi, axis=-1)
m = np.repeat(xi, num_new_filters, axis=-1)
#print('M,F\n', m[:,:,0], filters[:,:,0])
#print(m.shape, filters.shape)
m = m * filters
#print('m*f\n', m[:,:,0])
m = np.sum(m, axis=0)
m = np.sum(m, axis=0)
if dim3:
m = np.sum(m, axis=0)
o.append(m)
output.append(o)
output = np.asarray(output)
if relu:
output[output < 0] = 0
return output
def kernelInitializer(self, i, ksize, inSize, outSize):
if 'xavier' in self.kernel_initializer:
if self.kernel_initializer == 'xavier_normal':
if len(ksize) == 4:
self.filtersValues[i] = np.random.randn(ksize[0],ksize[1],ksize[2],ksize[3]) * math.sqrt(2.0 / (inSize + outSize))
else:
self.filtersValues[i] = np.random.randn(ksize[0],ksize[1],ksize[2]) * math.sqrt(2.0 / (inSize + outSize))
elif self.kernel_initializer == 'xavier_uniform':
highVal = math.sqrt(6.0 / (inSize + outSize))
lowVal = -1 * highVal
self.filtersValues[i] = np.random.uniform(low=lowVal, high=highVal, size=ksize)
else:
if self.kernel_initializer == 'he_normal':
if len(ksize) == 4:
self.filtersValues[i] = np.random.randn(ksize[0],ksize[1],ksize[2],ksize[3]) * math.sqrt(2.0 / inSize)
else:
self.filtersValues[i] = np.random.randn(ksize[0],ksize[1],ksize[2]) * math.sqrt(2.0 / inSize)
elif self.kernel_initializer == 'he_uniform':
highVal = math.sqrt(6.0 / inSize)
lowVal = -1 * highVal
self.filtersValues[i] = np.random.uniform(low=lowVal, high=highVal, size=ksize)
def convLayersFeedForward(self, im):
self.convInputs = []
len_m = len(im.shape)
#print('len_m:', len_m)
for i, cl in enumerate(self.convFilters):
self.convInputs.append(im)
if (self.filtersValues[i] is None):
if (type(self.convFilterSizes) == list):
ks = self.convFilterSizes[i]
else:
ks = self.convFilterSizes
inSize = np.prod(im.shape)
if 'xavier' in self.kernel_initializer:
if self.batch_size == 1:
imshape = np.asarray([im.shape[0], im.shape[1]])
else:
imshape = np.asarray([im.shape[1], im.shape[2]])
extraShape = int((ks % 2) == 0)
ks2 = int(ks / 2) * 2
outSize = np.prod((imshape - ks2 + extraShape)) * cl
else:
outSize = 0
if i == 0 and len_m == 3:
if self.batch_size == 1:
self.kernelInitializer(i, (ks,ks,im.shape[2],cl), inSize, outSize)
else:
self.kernelInitializer(i, (ks,ks,cl), inSize, outSize)
else:
self.kernelInitializer(i, (ks,ks,cl), inSize, outSize)
k_filters = self.filtersValues[i]
if (type(self.convStride) == list):
stride_par = self.convStride[i]
else:
stride_par = self.convStride
#print('Convolutional layer', i, '\n')
#print('Layer input shape:', im.shape)
#print('Layer filters array shape:', k_filters.shape)
# Start of convolutions
#im = self.conv_filters(im, k_filters, relu=True, stride=stride_par, mode='valid')
filtersValues_shape01 = np.asarray([k_filters.shape[0], k_filters.shape[1]])
filtersValues_shape_d2 = (filtersValues_shape01 / 2).astype(int)
extraShape = (filtersValues_shape01 % 2) == 0
eS0 = extraShape[0].astype(int)
eS1 = extraShape[1].astype(int)
posYf = eS0
posXf = eS1
filter_shape0 = k_filters.shape[0]
filter_shape1 = k_filters.shape[1]
if (len(k_filters.shape) >= 3):
num_filters = k_filters.shape[-1]
else:
num_filters = 1
if self.batch_size == 1:
xshape = np.asarray([im.shape[0], im.shape[1]])
else:
xshape = np.asarray([im.shape[1], im.shape[2]])
output_shape = xshape - filtersValues_shape_d2*2 + eS0
if ((len(im.shape) < len(k_filters.shape)) or (len(im.shape) == 2 and num_filters == 1)):
Xr = np.expand_dims(im, axis=-1)
Xr = np.repeat(Xr, num_filters, axis=-1)
else:
if (len(im.shape) == len(k_filters.shape)):
if self.batch_size == 1:
new_filters = int(im.shape[-1] / num_filters)
Xr = np.repeat(im, new_filters, axis=-1)
else:
Xr = np.expand_dims(im, axis=-1)
Xr = np.repeat(Xr, num_filters, axis=-1)
else:
Xr = im
if (len(Xr.shape) == 2):
npad = ((0,eS0), (0,eS1))
out_s = [output_shape[0], output_shape[1], 1]
elif (len(Xr.shape) == 3):
npad = ((0,eS0), (0,eS1), (0,0))
out_s = [output_shape[0], output_shape[1], num_filters]
elif (len(Xr.shape) == 4):
if self.batch_size == 1:
npad = ((0,eS0), (0,eS1), (0,0), (0,0))
out_s = [output_shape[0], output_shape[1], im.shape[2], num_filters]
else:
npad = ((0,0), (0,eS0), (0,eS1), (0,0))
out_s = [im.shape[0], output_shape[0], output_shape[1], num_filters]
X_pad = np.pad(Xr, npad, 'constant')
out_s[0 if self.batch_size == 1 else 1] = int(np.ceil(out_s[0 if self.batch_size == 1 else 1] / stride_par))
out_s[1 if self.batch_size == 1 else 2] = int(np.ceil(out_s[1 if self.batch_size == 1 else 2] / stride_par))
conv_output = np.zeros(out_s)
if self.batch_size != 1:
k_filters = np.expand_dims(k_filters, axis=0)
k_filters = np.repeat(k_filters, im.shape[0], axis=0)
#print(Xr.shape, X_pad.shape, k_filters.shape, conv_output.shape, output_shape)
for posY in range(0, filter_shape0):
for posX in range(0, filter_shape1):
# valid convolution
if self.batch_size == 1:
conv_output += X_pad[posYf:posYf+output_shape[0]:stride_par, posXf:posXf+output_shape[1]:stride_par] * k_filters[posY, posX]
else:
conv_output += X_pad[:, posYf:posYf+output_shape[0]:stride_par, posXf:posXf+output_shape[1]:stride_par] * k_filters[:, posY, posX].reshape(k_filters.shape[0],1,1,k_filters.shape[3])
posXf = posXf + 1
posYf = posYf + 1
posXf = eS1
# End of convolutions
if self.pre_norm:
ax_f = tuple(range(0,len(conv_output.shape)))
if self.batch_size == 1:
ax_f = ax_f[0:-1]
conv_output = (conv_output - np.mean(conv_output, axis=ax_f)) / (np.std(conv_output, axis=ax_f) + 1e-7)
else:
ax_f = ax_f[1:-1]
conv_output = (conv_output - np.mean(conv_output, axis=ax_f).reshape(conv_output.shape[0],1,1,conv_output.shape[3])) / (np.std(conv_output, axis=ax_f).reshape(conv_output.shape[0],1,1,conv_output.shape[3]) + 1e-7)
#conv_output = (conv_output - conv_output.mean()) / (conv_output.std() + 1e-7)
im = self.ActivationFunction(conv_output, 'relu')
#print('Layer output shape:', im.shape, '\n---------------------\n')
return im
def convLayersBackpropagation(self, last_layer_output, prev_cost):
i = len(self.filtersValues) - 1
last_shape = list(last_layer_output.shape)
if self.batch_size != 1:
batch_el = last_shape[0]
last_shape = last_shape[1:] + [batch_el]
error_by_x = np.reshape(prev_cost, last_shape)
"""
if self.batch_size == 1:
num_filters = last_layer_output.shape[2]
else:
num_filters = last_layer_output.shape[3]
"""
self.log('Start of convLayersBackpropagation:', '\n')
#self.log('prev_cost:', prev_cost.shape, prev_cost, '\n')
#self.log('last_layer_output:', last_layer_output.shape, last_layer_output, '\n')
#self.log('error_by_x:', error_by_x.shape, error_by_x, '\n')
#if self.batch_size != 1:
#error_by_x = np.mean(error_by_x, axis=0)
for k_filters in self.filtersValues[::-1]:
X = self.convInputs[i]
if self.batch_size != 1:
X_batchshape = list(X.shape)
X_batch_elements = X_batchshape[0]
X_batchshape = X_batchshape[1:] + [X_batch_elements]
X = np.reshape(X, X_batchshape)
#X = np.mean(X, axis=0)
# to dilate gradient if needed because of stride
if (type(self.convStride) == list):
stride_par = self.convStride[i]
else:
stride_par = self.convStride
if stride_par != 1:
#erShape = error_by_x.shape[0] * stride_par
erShape = (X.shape[0])
if self.batch_size == 1:
error_by_output = np.zeros((erShape, erShape, self.convFilters[i]), dtype=float)
else:
error_by_output = np.zeros((erShape, erShape, self.convFilters[i], batch_el), dtype=float)
#print(error_by_output.shape, error_by_x.shape)
posI = 0
posJ = 0
erx1 = (error_by_x.shape[0])
erx2 = (error_by_x.shape[1])
# Zero-interweave:
for pe_i in range(0, erx1):
for pe_j in range(0, erx2):
error_by_output[posI, posJ] = error_by_x[pe_i, pe_j]
if (posJ + 2) < erShape:
posJ = posJ + 2
else:
posJ = posJ + 1
if (posI + 2) < erShape:
posI = posI + 2
else:
posI = posI + 1
posJ = 0
else:
# dE/dO
error_by_output = error_by_x
f_rotated = np.flip(self.filtersValues[i], 0)
f_rotated = np.flip(f_rotated, 1)
# dE/dF
#error_by_filter = self.conv_filters(X, error_by_output, relu=False, stride=1, mode='valid')
# dE/dX
#error_by_x = self.conv_filters(f_rotated, error_by_output, relu=False, stride=1, mode='full')
# Start of convolutions
err_output_shape01 = np.asarray([error_by_output.shape[0], error_by_output.shape[1]])
err_out_shape_d2 = (err_output_shape01 / 2).astype(int)
xshape = np.asarray([X.shape[0], X.shape[1]])
fshape = np.asarray([f_rotated.shape[0], f_rotated.shape[1]])
extraShape = (err_output_shape01 % 2) == 0
eS0 = extraShape[0].astype(int)
eS1 = extraShape[1].astype(int)
err_filt_shape = xshape - err_out_shape_d2*2 + eS0
err_x_shape = fshape + err_out_shape_d2*2 + eS0
num_filters = self.filtersValues[i].shape[-1]
#print(error_by_output.shape, xshape, err_output_shape01, err_out_shape_d2*2, eS0, err_filt_shape)
if self.batch_size == 1:
error_by_filter = np.zeros((err_filt_shape[0], err_filt_shape[1], num_filters))
error_by_x = np.zeros((err_x_shape[0], err_x_shape[1], num_filters))
else:
error_by_filter = np.zeros((err_filt_shape[0], err_filt_shape[1], num_filters, X_batch_elements))
error_by_x = np.zeros((err_x_shape[0], err_x_shape[1], num_filters, X_batch_elements))
err_out_shape0 = error_by_output.shape[0]
err_out_shape1 = error_by_output.shape[1]
fil_shape0 = error_by_filter.shape[0]
fil_shape1 = error_by_filter.shape[1]
ex_shape0 = self.filtersValues[i].shape[0]
ex_shape1 = self.filtersValues[i].shape[1]
posYf = eS0
posXf = eS1
if (len(X.shape) < 3):
Xr = np.expand_dims(X, axis=-1)
Xr = np.repeat(Xr, num_filters, axis=-1)
else:
Xr = X
if (len(Xr.shape) == 3):
X_pad = np.pad(Xr, ((0,eS0), (0,eS1), (0,0)), 'constant')
elif (len(Xr.shape) == 4):
X_pad = np.pad(Xr, ((0,eS0), (0,eS1), (0,0), (0,0)), 'constant')
else: # color image with batch
X_pad = np.pad(Xr, ((0,0), (0,eS0), (0,eS1), (0,0), (0,0)), 'constant')
layer_filters = self.filtersValues[i]
if self.batch_size != 1:
layer_filters = np.expand_dims(layer_filters, axis=-1)
layer_filters = np.repeat(layer_filters, X_batch_elements, axis=-1)
#print(X_pad.shape, error_by_output.shape, error_by_filter.shape, self.filtersValues[i].shape, error_by_output.shape, error_by_x.shape)
for posY in range(0, err_out_shape0):
for posX in range(0, err_out_shape1):
# valid convolution (dE/dF)
error_by_filter += X_pad[posYf:posYf+fil_shape0, posXf:posXf+fil_shape1] * error_by_output[posY, posX]
# full convolution (dE/dX)
error_by_x[posYf:posYf+ex_shape0, posXf:posXf+ex_shape1] += layer_filters * error_by_output[posY, posX]
posXf = posXf + 1
posYf = posYf + 1
posXf = eS1
error_by_x = np.flip(error_by_x, 0)
error_by_x = np.flip(error_by_x, 1)
# End of convolutions
#print(X.shape, X_pad.shape, self.filtersValues[i].shape, error_by_filter.shape, error_by_x.shape, error_by_output.shape)
#self.log('error_by_filter:', error_by_filter[:,:,0], '\n\n')
#self.log('prev filtersValues[i]:', self.filtersValues[i][:,:,0], '\n\n')
#self.log('error_by_x:', error_by_x[:,:,0], '\n\n')
if self.batch_size != 1:
error_by_filter = np.mean(error_by_filter, axis=-1)
#if self.pre_norm:
#ax_f = tuple(range(0,len(error_by_filter[i].shape)))[0:-1]
#error_by_filter = (error_by_filter - np.mean(error_by_filter, axis=ax_f)) / (np.std(error_by_filter, axis=ax_f) + 1e-7)
#error_by_filter = (error_by_filter - error_by_filter.mean()) / (error_by_filter.std() + 1e-7)
# Filters update
self.filtersValues[i] = self.filtersValues[i] - self.learningRateConv * error_by_filter
if self.pre_norm:
ax_f = tuple(range(0,len(self.filtersValues[i].shape)))[0:-1]
self.filtersValues[i] = (self.filtersValues[i] - np.mean(self.filtersValues[i], axis=ax_f)) / (np.std(self.filtersValues[i], axis=ax_f) + 1e-7)
#self.log('filtersValues[i] updated:', self.filtersValues[i][:,:,0], '\n\n')
#self.log('\n-----------------------\n')
i = i - 1
self.log('End of convLayersBackpropagation')
def draw(self, showWeights=False, textSize=9, customRadius=0):
plt.figure(figsize=(10,8))
fig = plt.gcf()
ax = fig.gca()
ax.set_xlim(xmin=0, xmax=1)
ax.set_ylim(ymin=0, ymax=1)
xmin, xmax, ymin, ymax = ax.axis()
xdim = xmax - xmin
ydim = ymax - ymin
space_per_layer = xdim / (len(self.hiddenL) + 1)
x0 = xmin
x1 = xmin + space_per_layer
medio_intervalo = space_per_layer / 2
if customRadius <= 0:
radio = 1 / ((sum(self.hiddenL) + self.n_layer0) * 5)
else:
radio = customRadius
lista_lineas_xy = []
lasth = self.n_layer0
for capa,h in enumerate([self.n_layer0] + self.hiddenL):
space_per_neuron = ydim / h
y0 = ymin
y1 = ymin + space_per_neuron
medio_intervalo_n = space_per_neuron / 2
lista_lineas_xy_pre = []
ne = (lasth * h) - 1
neY = h - 1
for j in range(0, h):
ax.add_patch(plt.Circle(((medio_intervalo + x0), (medio_intervalo_n + y0)), radio, color='r'))
neX = lasth - 1
for xy in lista_lineas_xy:
if True: #j == 2:
plt.plot([xy[0],(medio_intervalo + x0)],[xy[1], (medio_intervalo_n + y0)])
#print(capa, ne, self.hiddenWeights[capa-1][ne])
my = ((medio_intervalo_n + y0) - xy[1])
mx = ((medio_intervalo + x0) - xy[0])
pendiente = my / mx
ordenada_origen = xy[1] - pendiente * xy[0]
margen_ord = 0.015
if pendiente < 0:
margen_ord = -0.045 # para compensar la rotacion del texto
ordenada_origen = ordenada_origen + margen_ord # para evitar que el texto salga encima de la linea no sobre ella
# aleatorio entre las x del segmento de la recta (menos un margen para que no salga demasiado cerca de la neurona)
mx2 = random.uniform(xy[0] + 0.04, (medio_intervalo + x0) - 0.04)
my2 = pendiente*mx2 + ordenada_origen
alfa = math.degrees(math.atan(pendiente))
if showWeights:
#print(h, capa-1, neX, neY)
text(mx2, my2, round(self.hiddenWeights[capa-1][neX][neY],3), rotation = alfa, fontsize = textSize)
ne = ne - 1
neX = neX - 1
lista_lineas_xy_pre.append([(medio_intervalo + x0), (medio_intervalo_n + y0)])
neY = neY - 1
y0 = y0 + space_per_neuron
y1 = y1 + space_per_neuron
lasth = h
#print('\n')
x0 = x0 + space_per_layer
x1 = x1 + space_per_layer
#print('-------------\n')
lista_lineas_xy = lista_lineas_xy_pre
plt.show()
def importModel(self, path='', filename='ConvNetAbel_model'):
self.hiddenWeights = np.load(path + filename + '_weights.npy', allow_pickle=True)
mConfig = np.load(path + filename + '_config.npy', allow_pickle=True)
self.n_layer0 = int(mConfig[0])
self.showLogs = bool(mConfig[1])
self.lastLayerNeurons = int(mConfig[2])
self.numEpochs = int(mConfig[3])
self.learningRate = float(mConfig[4])
self.debugMode = int(mConfig[5])
self.softmax = bool(mConfig[6])
self.activationFunction = str(mConfig[7])
self.verbose = bool(mConfig[8])
self.use = str(mConfig[9])
self.batch_size = int(mConfig[10])
self.batch_gradient = str(mConfig[11])
self.batch_mult = int(mConfig[12])
self.dropout = float(mConfig[13])
self.pre_norm = bool(mConfig[14])
self.shuffle = bool(mConfig[15])
self.iterationDrop = float(mConfig[16])
self.version_importedModel = mConfig[17]
self.hiddenL2 = mConfig[18]
self.hiddenL = mConfig[19]
convConfig = np.load(path + filename + '_convConfig.npy', allow_pickle=True)
self.convFilters = convConfig[0]
self.convStride = convConfig[1]
self.convFilterSizes = convConfig[2]
self.kernel_initializer = str(convConfig[3])
self.convEpochs = int(convConfig[4])
self.learningRateConv = float(convConfig[5])
self.filtersValues = np.load(path + filename + '_filtersValues.npy', allow_pickle=True)
if self.debugMode > 0:
self.meanCostByEpoch = np.load(path + filename + '_meanCostByEpoch.npy', allow_pickle=True).tolist()
if self.debugMode > 1:
self.debugWeights = np.load(path + filename + '_debugWeights.npy', allow_pickle=True).tolist()
def exportModel(self, path='', filename='ConvNetAbel_model'):
np.save(path + filename + '_weights.npy', np.asarray(self.hiddenWeights, dtype=object))
mConfig = []
mConfig.append(self.n_layer0)
mConfig.append(self.showLogs)
mConfig.append(self.lastLayerNeurons)
mConfig.append(self.numEpochs)
mConfig.append(self.learningRate)
mConfig.append(self.debugMode)
mConfig.append(self.softmax)
mConfig.append(self.activationFunction)
mConfig.append(self.verbose)
mConfig.append(self.use)
mConfig.append(self.batch_size)
mConfig.append(self.batch_gradient)
mConfig.append(self.batch_mult)
mConfig.append(self.dropout)
mConfig.append(self.pre_norm)
mConfig.append(self.shuffle)
mConfig.append(self.iterationDrop)
mConfig.append(self.version)
mConfig.append(self.hiddenL2)
mConfig.append(self.hiddenL)
mConfig = np.asarray(mConfig, dtype=object)
np.save(path + filename + '_config.npy', mConfig)
convConfig = []
convConfig.append(self.convFilters)
convConfig.append(self.convStride)
convConfig.append(self.convFilterSizes)
convConfig.append(self.kernel_initializer)
convConfig.append(self.convEpochs)
convConfig.append(self.learningRateConv)
convConfig = np.asarray(convConfig, dtype=object)
np.save(path + filename + '_convConfig.npy', convConfig)
np.save(path + filename + '_filtersValues.npy', np.asarray(self.filtersValues, dtype=np.float32))
if self.debugMode > 0:
np.save(path + filename + '_meanCostByEpoch.npy', self.meanCostByEpoch)
if self.debugMode > 1:
np.save(path + filename + '_debugWeights.npy', np.asarray(self.debugWeights, dtype=object))
def log(self, *m):
if self.showLogs:
print(*m)
def printVerbose(self, *m):
if self.verbose:
print(*m)
def initializeWeight(self, n, i, lastN):
if len(self.manualWeights) == 0:
numW = n * lastN
if self.rangeRandomWeight is None:
if self.activationFunction == 'sigmoid':
if self.XavierInitialization == 'normalized': # Normalized Xavier initialization
highVal = math.sqrt(6.0) / math.sqrt(lastN + n)
lowVal = -1 * highVal
mnar = np.random.uniform(low=lowVal, high=highVal, size=(numW,1))
else: # Xavier initialization
mnar = np.random.randn(numW, 1) * math.sqrt(1.0 / lastN)
else:
mnar = np.random.randn(numW, 1) * math.sqrt(2.0 / lastN) # He initialization
else:
highVal = self.rangeRandomWeight[1]
lowVal = self.rangeRandomWeight[0]
mnar = np.random.uniform(low=lowVal, high=highVal, size=(numW,1))
else:
mnar = np.asarray(self.manualWeights[i])
#mnar = mnar.reshape(mnar.shape[0], 1)
#ns = int(mnar.shape[0] / lastN)
#print('ns: ', ns)
mnar = mnar.reshape(lastN, n, order='F')
return mnar
def ActivationFunction(self, x, activ_type='sigmoid'):
if activ_type=='sigmoid':
return 1.0/(1 + np.exp(-1*x))
elif activ_type=='relu':
return np.where(x > 0, x, 0)
elif activ_type=='softplus':
return np.log(1 + np.exp(x))
elif activ_type=='leakyrelu':
return np.where(x > 0, x, 0.01 * x)
elif activ_type=='identity':
return np.copy(x)
else:
x[x>0.5] = 1
x[x<=0.5] = 0
return x
def functionDerivative(self, x, activ_type='sigmoid'):
if activ_type=='sigmoid':
return self.ActivationFunction(x,activ_type) * (1-self.ActivationFunction(x,activ_type))
elif activ_type=='relu':
return np.where(x >= 0, 1, 0)
elif activ_type=='softplus':
return 1.0/(1 + np.exp(-1*x))
elif activ_type=='leakyrelu':
return np.where(x >= 0, 1, 0.01)
elif activ_type=='identity':
return 1
else:
return 1
def cost(self, y_true, y_pred):
c = y_true - y_pred
return c
def softmaxF(self, x):
if np.max(np.abs(x)) < 500: # prevent overflow
expX = np.exp(x)
return expX / np.sum(expX, axis=-1).reshape(-1, 1)
else:
return x / np.maximum(1, np.sum(x, axis=-1).reshape(-1, 1))
def pre_norm_forward_FC(self, v_layer):
if self.batch_size == 1 or len(v_layer.shape) == 1:
v_layer_norm = (v_layer - v_layer.mean()) / (v_layer.std() + 1e-7)
else:
v_layer_norm = ((v_layer.T - np.mean(v_layer, axis=1)) / (np.std(v_layer, axis=1) + 1e-7)).T
return v_layer_norm
def fit(self, x, y):
n_layer0 = -1
self.hiddenL = copy.deepcopy(self.hiddenL2)
hiddenW = [None] * (len(self.hiddenL) + 1)
self.lastLayerNeurons = y.shape[1]
self.hiddenL.append(y.shape[1])
self.convOutputs = []
self.printVerbose('Training started with', x.shape[0], 'samples')
if self.batch_size == 1:
numIterations = x.shape[0]
else:
numIterations = math.ceil(x.shape[0] / self.batch_size)
numIterations = int(numIterations * (1 - self.iterationDrop))
for epochs in range(0, self.numEpochs):
meanCostByEpochE = 0
batch_pos = 0
if epochs < self.convEpochs:
xy_ind = np.arange(x.shape[0])
else:
xy_ind = np.arange(len(self.convOutputs))
if self.shuffle:
np.random.shuffle(xy_ind)
for x_pos in range(0, numIterations):
if epochs < self.convEpochs:
if self.batch_size == 1:
c_positions = xy_ind[x_pos]
else:
if (batch_pos + self.batch_size) < xy_ind.shape[0]:
c_positions = xy_ind[batch_pos:batch_pos+self.batch_size]
else:
c_positions = xy_ind[batch_pos:]
x_val = x[c_positions]
x_val_batch_s = x_val.shape[0]
last_layer_output = self.convLayersFeedForward(x_val)
x_val = last_layer_output.flatten()
if self.batch_size != 1:
x_val = x_val.reshape(x_val_batch_s, int(x_val.shape[0] / x_val_batch_s))
if epochs == (self.convEpochs - 1):
self.convOutputs.append([x_val, c_positions])
else:
x_val, c_positions = self.convOutputs[xy_ind[x_pos]]
#self.log('x_val:', x_val.shape, x_val)
#print(x_val.shape)
if n_layer0 == -1:
if self.batch_size == 1:
n_layer0 = x_val.shape[0]
else:
n_layer0 = x_val.shape[1]
self.n_layer0 = n_layer0
v_layer = x_val
lastN = n_layer0
layerValues = []
preActivateValues = []
f_vlayer = self.ActivationFunction(v_layer, 'identity')
layerValues.append(f_vlayer)
preActivateValues.append(v_layer)
f_vlayer = v_layer
dropout_values = []
for i, hiddenLayer in enumerate(self.hiddenL):
entries = hiddenLayer * lastN
if hiddenW[i] is None:
hiddenW[i] = self.initializeWeight(hiddenLayer, i, lastN) # Initialize weights
valuesForPerc = int(entries / hiddenLayer)
firstPos = 0
lastPos = valuesForPerc
self.log('x_j: ', f_vlayer)
self.log('w_j: ', hiddenW[i])
v_layer = f_vlayer.dot(hiddenW[i])
if self.pre_norm and (i < (len(self.hiddenL) - 1)):
v_layer = self.pre_norm_forward_FC(v_layer)
if self.dropout != 0 and (i < (len(self.hiddenL) - 1)):
dropout_v = np.random.binomial(1, 1-self.dropout, size=hiddenLayer) / (1-self.dropout)
v_layer = v_layer * dropout_v
dropout_values.append(dropout_v)
self.log('net_j:', v_layer, '\n')
if (i == (len(self.hiddenL) - 1)):
if(self.softmax):
f_vlayer = self.softmaxF(v_layer).reshape(-1)
else:
if self.use == 'classification':
f_vlayer = self.ActivationFunction(v_layer, 'sigmoid') # use sigmoid on last layer if classification
else:
f_vlayer = self.ActivationFunction(v_layer, 'identity') # use identity on last layer if regression
else:
f_vlayer = self.ActivationFunction(v_layer, self.activationFunction)#.reshape(-1)
layerValues.append(f_vlayer)
preActivateValues.append(v_layer)
v_layer = f_vlayer
self.log('f(net_j):', f_vlayer, '\n')
lastN = hiddenLayer
coste_anterior = None
i = len(self.hiddenL) - 1
#print(f_vlayer)
"""
if(self.softmax):
f_vlayer = self.softmaxF(f_vlayer).reshape(-1)
self.log('f_vlayer (Softmax output):', f_vlayer)
#print(f_vlayer)
"""
#print(f_vlayer, '\n\n')
self.log('-----------------\nBackPropagation: \n')
# backpropagation:
for hiddenLayer in ([n_layer0] + self.hiddenL)[::-1]:
self.log('Neurons in this layer: ', hiddenLayer)
#print('i: ', i, '\n')
if coste_anterior is None:
if(self.softmax):
derivf_coste = self.functionDerivative(v_layer, self.activationFunction)
else:
if self.use == 'classification':
derivf_coste = self.functionDerivative(v_layer, 'sigmoid')
else:
derivf_coste = self.functionDerivative(v_layer, 'identity')
f_cost = self.cost(y[c_positions], f_vlayer)
#if self.batch_size != 1:
#f_cost = f_cost / v_layer.shape[0]
coste = f_cost * derivf_coste
if self.batch_size != 1:
batch_pos = batch_pos + self.batch_size
#coste = coste.reshape(-1)
#coste = coste.reshape(coste.shape[0], 1)
#if self.batch_size != 1:
#coste = np.sum(coste, axis=0)
#derivf_coste = np.sum(derivf_coste, axis=0)
if self.debugMode > 0:
meanCostByEpochE = meanCostByEpochE + (abs(coste) if self.batch_size == 1 else np.mean(np.absolute(coste), axis=0))
if self.debugMode > 2:
self.costs.append(coste)
self.log('derivf_coste: ', derivf_coste, 'cost: ', coste, '\n')
else:
entries = hiddenLayer * nextN
valuesForPerc = int(entries / hiddenLayer)
firstPos = 0
lastPos = valuesForPerc
#coste = []
#coste = np.zeros(shape=(hiddenLayer))
self.log('prev_error: ', coste_anterior)
pesos_salientes = hiddenW[i+1].T
#print('hiddenW[i+1][j::hiddenLayer]: ', pesos_salientes)
preActivateValueM = preActivateValues[i+1]
preDeriv = self.functionDerivative(preActivateValueM, self.activationFunction)
self.log('preDeriv: ', preDeriv)
costeA = coste_anterior.dot(pesos_salientes) # coste por los pesos que salen de la neurona
#costeA = np.asarray(costeA)
self.log("preCostA: ", costeA)
costeA = costeA * (preDeriv)
#costeA = costeA.reshape(-1)
#costeA = costeA.T
if self.dropout != 0 and i > -1: # dropout is not done on input layer
costeA = costeA * dropout_values[i]
self.log('costA: ', costeA)
layerValueM = layerValues[i+1]
#print("coste_anterior.shape: ", coste_anterior.shape)
self.log("layer values: ", layerValueM)
if self.batch_gradient == 'sum':
preT1 = coste_anterior.reshape((1 if self.batch_size==1 else coste_anterior.shape[0]), (coste_anterior.shape[0] if self.batch_size==1 else coste_anterior.shape[1]))
preT2 = layerValueM.reshape((layerValueM.shape[0] if self.batch_size==1 else layerValueM.shape[1]), (1 if self.batch_size==1 else layerValueM.shape[0]))
elif self.batch_size == 1:
preT1 = coste_anterior.reshape(1, coste_anterior.shape[0])
preT2 = layerValueM.reshape(layerValueM.shape[0], 1)
else:
preT1 = np.mean(coste_anterior, axis=0)
preT1 = preT1.reshape(1, preT1.shape[0])
preT2 = np.mean(layerValueM, axis=0)
preT2 = preT2.reshape(preT2.shape[0], 1)
pre = preT2.dot(preT1)
#if self.batch_size != 1:
#pre = pre * (1.0 / layerValueM.shape[0])
pre = pre * self.learningRate
self.log('pre: ', pre, '\n')
self.log('Old weight: ', hiddenW[i+1])
hiddenW[i+1] = (hiddenW[i+1] + pre)
self.log('New weight: ', hiddenW[i+1], '\n\n')
coste = costeA
self.log('\n\n')
#coste = coste.reshape(-1)
#print(coste.shape)
#if len(coste.shape) == 3:
#coste = coste.reshape(coste.shape[0] * coste.shape[1], coste.shape[2])
#print('Coste: ' , coste, coste.shape)
#print("\n\n")
coste_anterior = coste
nextN = hiddenLayer
i = i - 1
#print('------------------')
#print('\n\nNuevos pesos: ', hiddenW)
#print('Coste anterior shape: ', coste_anterior.shape)
if epochs < self.convEpochs: # because of resources limitations
self.convLayersBackpropagation(last_layer_output, coste_anterior)
self.printVerbose('\nEpoch', str(epochs+1) + '/' + str(self.numEpochs), 'completed')
if self.debugMode > 0:
self.meanCostByEpoch.append(meanCostByEpochE / numIterations)
self.printVerbose('--- Epoch loss:', round(np.mean(self.meanCostByEpoch[-1]),4))
if self.debugMode > 1:
self.debugWeights.append(copy.deepcopy(hiddenW))
self.batch_size = int(self.batch_size * self.batch_mult)
self.hiddenWeights = hiddenW
#print('\n\nNuevos pesos: ', hiddenW)
self.printVerbose('\n\nTraining finished\n\n')
return self
def predict(self, x, noProba=1):
n_layer0 = -1
layerValues = np.zeros(shape=(x.shape[0],self.lastLayerNeurons))
batch_pos = 0
if self.batch_size == 1:
numIterations = x.shape[0]
else:
numIterations = math.ceil(x.shape[0] / self.batch_size)
for x_pos in range(0, numIterations):
if self.batch_size == 1:
x_val = x[x_pos]
else:
if (batch_pos + self.batch_size) < x.shape[0]:
x_val = x[batch_pos:batch_pos+self.batch_size]
else:
x_val = x[batch_pos:]
x_val_batch_s = x_val.shape[0]
#for x_pos, x_val in enumerate(x):
x_val = self.convLayersFeedForward(x_val).flatten()
if self.batch_size != 1:
x_val = x_val.reshape(x_val_batch_s, int(x_val.shape[0] / x_val_batch_s))
if n_layer0 == -1:
n_layer0 = x_val.shape[0]
self.n_layer0 = n_layer0
v_layer = x_val
lastN = n_layer0
f_vlayer = self.ActivationFunction(v_layer, 'identity')
for i, hiddenLayer in enumerate(self.hiddenL):
entries = hiddenLayer * lastN
valuesForPerc = int(entries / hiddenLayer)
firstPos = 0
lastPos = valuesForPerc
v_layer = f_vlayer.dot(self.hiddenWeights[i])
if self.pre_norm and (i < (len(self.hiddenL) - 1)):
v_layer = self.pre_norm_forward_FC(v_layer)
if (i == (len(self.hiddenL) - 1)):
if(self.softmax):
f_vlayer = self.softmaxF(v_layer).reshape(-1)
else:
if self.use == 'classification':
f_vlayer = self.ActivationFunction(v_layer, 'sigmoid') # use sigmoid on last layer if classification
else:
f_vlayer = self.ActivationFunction(v_layer, 'identity') # use identity on last layer if regression
else:
f_vlayer = self.ActivationFunction(v_layer, self.activationFunction)#.reshape(-1)
v_layer = f_vlayer
lastN = hiddenLayer
if self.batch_size == 1:
layerValues[x_pos] = f_vlayer
else:
if (batch_pos + self.batch_size) < x.shape[0]:
layerValues[batch_pos:batch_pos+self.batch_size] = f_vlayer
else:
layerValues[batch_pos:] = f_vlayer
batch_pos = batch_pos + self.batch_size
"""
if(self.softmax):
layerValues = self.softmaxF(layerValues)
"""
if noProba==1:
if self.use == 'classification':
return self.ActivationFunction(layerValues, 2).astype(int)
else:
return layerValues
else:
return layerValues
def predict_proba(self, x):
return self.predict(x, 0)
def plot_mean_error_last_layer(self, customLabels=[], byClass=False):
if self.debugMode > 0:
meancost = np.asarray(self.meanCostByEpoch)
if len(meancost.shape) > 1 and not byClass:
meancost = np.mean(meancost, axis=1)
ptitle = 'Last layer mean error by epoch'
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(range(0, meancost.shape[0]), meancost)
ax.set(xlabel='Epoch', ylabel='Mean error', title=ptitle)
ax.grid()
if len(meancost.shape) > 1:
if meancost.shape[1] > 1:
if len(customLabels) == 0:
neur = [("Neuron " + str(i)) for i in range(0, meancost.shape[1])]
else:
neur = customLabels
plt.legend(neur, loc="upper right")
plt.show()
else:
print('ConvNet debug mode must be level 1 or higher')
def plot_weights_by_epoch(self, max_weights=-1):
if self.debugMode > 1:
dw = self.debugWeights
dwx = dw[0][len(dw[0]) - 1][:]
fig, ax = plt.subplots(figsize=(8,6))
ygrafico = {}
for jposH, posH in enumerate(range(0, len(dw))): # for each epoch
dwF = dw[jposH][len(dw[0]) - 1][:]
#print(dwF.shape)
for posg, neu in enumerate(dwF):
#print(neu.shape)
if posg in ygrafico:
ygrafico[posg].append(neu[0])
else:
ygrafico[posg] = [neu[0]]
if max_weights == -1:
for ygrafico2 in ygrafico.values():
ax.plot(range(0, len(ygrafico2)), ygrafico2)
else:
if max_weights < 1:
print('max_weights must be bigger than 0')
elif max_weights > len(ygrafico.values()):
print('max_weights must be lower than total weights of last layer')
else:
ygrafico3 = []
# Gets the weights that have changed the most from beginning to end.
for yi, ygrafico2 in enumerate(ygrafico.values()):
a = abs(ygrafico[yi][0] - ygrafico[yi][-1])
#print(ygrafico[yi][0], a)
ygrafico3.append([ygrafico2, a])
for ygrafico4 in sorted(ygrafico3, key=lambda tupval: -1*tupval[1])[0:max_weights]:
#print(ygrafico4)
plt.plot(range(0, len(ygrafico4[0])), ygrafico4[0])
ax.set(xlabel='Epoch', ylabel='Weight', title='Last layer weights by epoch')
ax.grid()
plt.show()
else:
print('ConvNet debug mode must be level 2 or higher')
| 37.590066
| 233
| 0.448847
| 56,511
| 0.995596
| 0
| 0
| 0
| 0
| 0
| 0
| 7,020
| 0.123676
|
97ef61709a2ecbbabd5edf5fdc1f79875ed56c5b
| 1,365
|
py
|
Python
|
trading_ig/config.py
|
schwankner/ig-markets-api-python-library
|
7a6add860e0abefcc252da232524e8ad0be86692
|
[
"BSD-3-Clause"
] | 1
|
2021-03-01T09:51:59.000Z
|
2021-03-01T09:51:59.000Z
|
trading_ig/config.py
|
schwankner/ig-markets-api-python-library
|
7a6add860e0abefcc252da232524e8ad0be86692
|
[
"BSD-3-Clause"
] | null | null | null |
trading_ig/config.py
|
schwankner/ig-markets-api-python-library
|
7a6add860e0abefcc252da232524e8ad0be86692
|
[
"BSD-3-Clause"
] | 1
|
2022-01-04T21:17:10.000Z
|
2022-01-04T21:17:10.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import logging
ENV_VAR_ROOT = "IG_SERVICE"
CONFIG_FILE_NAME = "trading_ig_config.py"
logger = logging.getLogger(__name__)
class ConfigEnvVar(object):
def __init__(self, env_var_base):
self.ENV_VAR_BASE = env_var_base
def _env_var(self, key):
return(self.ENV_VAR_BASE + "_" + key.upper())
def get(self, key, default_value=None):
env_var = self._env_var(key)
return(os.environ.get(env_var, default_value))
def __getattr__(self, key):
env_var = self._env_var(key)
try:
return(os.environ[env_var])
except KeyError:
raise Exception("Environment variable '%s' doesn't exist"
% env_var)
try:
from trading_ig_config import config
logger.info("import config from %s" % CONFIG_FILE_NAME)
except Exception:
logger.warning("can't import config from config file")
try:
config = ConfigEnvVar(ENV_VAR_ROOT)
logger.info("import config from environment variables '%s_...'"
% ENV_VAR_ROOT)
except Exception:
logger.warning("can't import config from environment variables")
raise("""Can't import config - you might create a '%s' filename or use
environment variables such as '%s_...'""" % (CONFIG_FILE_NAME, ENV_VAR_ROOT))
| 29.673913
| 78
| 0.650549
| 584
| 0.427839
| 0
| 0
| 0
| 0
| 0
| 0
| 387
| 0.283516
|
97ef67beb062520b730797c508d9465eec545451
| 6,434
|
py
|
Python
|
train.py
|
jmlipman/MedicDeepLabv3Plus
|
4eb5c6c21765db24502d434d01c0ee9b9fd66b27
|
[
"MIT"
] | 1
|
2021-11-23T16:41:24.000Z
|
2021-11-23T16:41:24.000Z
|
train.py
|
jmlipman/MedicDeepLabv3Plus
|
4eb5c6c21765db24502d434d01c0ee9b9fd66b27
|
[
"MIT"
] | null | null | null |
train.py
|
jmlipman/MedicDeepLabv3Plus
|
4eb5c6c21765db24502d434d01c0ee9b9fd66b27
|
[
"MIT"
] | 1
|
2021-09-08T02:02:11.000Z
|
2021-09-08T02:02:11.000Z
|
# Example usage:
# python train.py --device cuda --epochs 10 --input /home/miguelv/data/in/train/ --output /home/miguelv/data/out/delete/test/25/
import os, time, torch, json
import numpy as np
import nibabel as nib
from lib.utils import *
from lib.losses import Loss
from torch.utils.data import DataLoader
from datetime import datetime
from lib.models.MedicDeepLabv3Plus import MedicDeepLabv3Plus
from lib.data.DataWrapper import DataWrapper
def get_arguments():
"""Gets (and parses) the arguments from the command line.
Args:
`args`: If None, it takes the arguments from the command line.
Else, it will parse `args` (used for testing with sacred)
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
# Data
parser.add_argument("--input", type=str, required=True,
help="Directory with the data for optimizing MedicDeepLabv3+")
# Training
parser.add_argument("--epochs", type=int, default=300,
help="Epochs. If 0: only evaluate")
parser.add_argument("--batch_size", type=int, default=1,
help="Batch size")
parser.add_argument("--lr", type=float, default="1e-4",
help="Learning rate")
parser.add_argument("--wd", type=float, default="0",
help="Weight decay")
parser.add_argument("--filters", type=int, default=32,
help="Number of filters (fewer filters -> lower GPU requirements)")
# Validation
parser.add_argument("--validation", type=str, default="",
help="Directory with the data for validation")
parser.add_argument("--val_interval", type=int, default=1,
help="After how many epochs data is validated")
parser.add_argument("--val_metrics", type=str, default="dice",
help="List of metrics to measure during validation")
# Other
parser.add_argument("--output", type=str, required=True,
help="Output directory (if it doesn't exist, it will create it)")
parser.add_argument("--gpu", type=int, default=0, dest="device",
help="GPU Device. Write -1 if no GPU is available")
parser.add_argument("--model_state", type=str, default="",
help="File that contains the saved parameters of the model")
parsed = parser.parse_args()
# --input
if not os.path.isdir(parsed.input):
raise Exception("The input folder `" + parsed.input + "` does not exist")
# --output
if os.path.exists(parsed.output):
if os.path.isfile(parsed.output):
raise Exception("The provided path for the --output `" + parsed.output + "` corresponds to an existing file. Provide a non-existing path or a folder.")
elif os.path.isdir(parsed.output):
files = [int(f) for f in os.listdir(parsed.output) if f.isdigit()]
parsed.output = os.path.join(parsed.output, str(len(files)+1), "")
os.makedirs(parsed.output)
else:
raise Exception("The provided path for the --output `" + parsed.output + "` is invalid. Provide a non-existing path or a folder.")
else:
parsed.output = os.path.join(parsed.output, "1", "")
os.makedirs(parsed.output)
# --validation
if parsed.validation != "" and not os.path.isdir(parsed.validation):
raise Exception("The validaiton folder `" + parsed.validation + "` does not exist")
if parsed.validation == "":
print("> Note: No validation data was provided, so validation won't be done during MedicDeepLabv3+ optimization")
# --gpu
if parsed.device >= torch.cuda.device_count():
if torch.cuda.device_count() == 0:
print("> No available GPUs. Add --gpu -1 to not use GPU. NOTE: This may take FOREVER to run.")
else:
print("> Available GPUs:")
for i in range(torch.cuda.device_count()):
print(" > GPU #"+str(i)+" ("+torch.cuda.get_device_name(i)+")")
raise Exception("The GPU #"+str(parsed.device)+" does not exist. Check available GPUs.")
if parsed.device > -1:
parsed.device = "cuda:"+str(parsed.device)
else:
parsed.device = "cpu"
# Metrics to be evaluated during evaluation
allowed_metrics = ["dice", "HD", "compactness"]
# Metrics to be evaluated during validation
parsed.val_metrics = parsed.val_metrics.split(",")
for m in parsed.val_metrics:
if not m in allowed_metrics:
raise Exception("Wrong --val_metrics: "+str(m)+". Only allowed: "+str(allowed_metrics))
return parsed
def main(args):
log("Start training MedicDeepLabv3+", args.output)
# Creates the folder where the models will be saved
os.makedirs(args.output + "model")
# Parameters required to initialize the model
model = MedicDeepLabv3Plus(modalities=1, n_classes=3, first_filters=args.filters)
model.initialize(device=args.device, output=args.output,
model_state=args.model_state)
# Dataloaders
tr_data = DataWrapper(args.input, "train")
val_data = DataWrapper(args.validation, "val")
if len(tr_data) > 0 and args.epochs > 0:
# DataLoaders
tr_loader = DataLoader(tr_data, batch_size=args.batch_size,
shuffle=True, pin_memory=False, num_workers=6)
if len(val_data) > 0:
val_loader = DataLoader(val_data, batch_size=args.batch_size,
shuffle=False, pin_memory=False, num_workers=6)
else:
val_loader = [] # So that len(val_loader) = 0
# Loss function
loss = Loss("CrossEntropyDiceLoss_multiple") # Deep supervision
# Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
weight_decay=args.wd)
# Train the model
model.fit(tr_loader=tr_loader, val_loader=val_loader,
epochs=args.epochs, val_interval=args.val_interval,
loss=loss, val_metrics=args.val_metrics, opt=optimizer)
log("End", args.output)
if __name__ == "__main__":
# Get command-line arguments
args = get_arguments()
# Train MedicDeepLabv3+
main(args)
| 38.526946
| 164
| 0.63553
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,249
| 0.349549
|
97efd3b3f7f5f7bf285460221c0433426399a499
| 2,053
|
py
|
Python
|
src/graph_util.py
|
oonat/inverse-distance-weighted-trust-based-recommender
|
3f559f3e7dbc565da373f6297362ddf307b2d0ec
|
[
"BSD-3-Clause"
] | null | null | null |
src/graph_util.py
|
oonat/inverse-distance-weighted-trust-based-recommender
|
3f559f3e7dbc565da373f6297362ddf307b2d0ec
|
[
"BSD-3-Clause"
] | null | null | null |
src/graph_util.py
|
oonat/inverse-distance-weighted-trust-based-recommender
|
3f559f3e7dbc565da373f6297362ddf307b2d0ec
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from toml_parser import Parser
from scipy.sparse.csgraph import dijkstra, csgraph_from_dense
from sklearn.metrics.pairwise import nan_euclidean_distances
from math import sqrt
class Graph(object):
def __init__(self, transactions, weighted=True):
config = Parser("config.toml").load()
self._max_distance = \
config["graph"]["max_distance"]
self._transactions = transactions
self._weighted = weighted
self._create_customer_trust_matrix()
def _create_adjacency_matrix(self):
if self._weighted:
self._adjacency_matrix = nan_euclidean_distances(self._transactions, self._transactions, missing_values=0)
"""
self._adjacency_matrix /= sqrt(self._transactions.shape[1])
"""
self._adjacency_matrix[~np.isnan(self._adjacency_matrix)] += 1
else:
self._adjacency_matrix = np.zeros(
(self._transactions.shape[0], self._transactions.shape[0]),
dtype=np.bool,
)
list_of_neighbour_customers = [ np.nonzero(t)[0] for t in self._transactions.T ]
for neighbour_customers in list_of_neighbour_customers:
for i in range(neighbour_customers.shape[0]):
self._adjacency_matrix[neighbour_customers[i], neighbour_customers[i+1:]] = \
self._adjacency_matrix[neighbour_customers[i+1:], neighbour_customers[i]] = True
def _create_distance_matrix(self):
self._create_adjacency_matrix()
if self._weighted:
adjacency_csgraph = csgraph_from_dense(self._adjacency_matrix, null_value=np.nan)
self._distance_matrix = \
dijkstra(csgraph=adjacency_csgraph,
directed=False,
limit=self._max_distance)
else:
self._distance_matrix = \
dijkstra(csgraph=self._adjacency_matrix,
directed=False,
unweighted= True,
limit=self._max_distance)
self._distance_matrix[~np.isfinite(self._distance_matrix)] = 0
def _create_customer_trust_matrix(self):
self._create_distance_matrix()
self._customer_trust_matrix = \
np.reciprocal(self._distance_matrix, out=np.zeros_like(self._distance_matrix), where=self._distance_matrix!=0)
| 26.320513
| 113
| 0.754506
| 1,856
| 0.904043
| 0
| 0
| 0
| 0
| 0
| 0
| 107
| 0.052119
|
97efd442d5baa89669000d346b5c499ecd9f4c0b
| 203
|
py
|
Python
|
qtapps/skrf_qtwidgets/analyzers/analyzer_rs_zva.py
|
mike0164/scikit-rf
|
0af25754b097ee24089ea7e0eacde426a51df563
|
[
"BSD-3-Clause"
] | 379
|
2015-01-25T12:19:19.000Z
|
2022-03-29T14:01:07.000Z
|
qtapps/skrf_qtwidgets/analyzers/analyzer_rs_zva.py
|
mike0164/scikit-rf
|
0af25754b097ee24089ea7e0eacde426a51df563
|
[
"BSD-3-Clause"
] | 456
|
2015-01-06T19:15:55.000Z
|
2022-03-31T06:42:57.000Z
|
qtapps/skrf_qtwidgets/analyzers/analyzer_rs_zva.py
|
mike0164/scikit-rf
|
0af25754b097ee24089ea7e0eacde426a51df563
|
[
"BSD-3-Clause"
] | 211
|
2015-01-06T17:14:06.000Z
|
2022-03-31T01:36:00.000Z
|
from skrf.vi.vna import rs_zva
class Analyzer(rs_zva.ZVA):
DEFAULT_VISA_ADDRESS = "GPIB::16::INSTR"
NAME = "Rhode & Schwartz ZVA"
NPORTS = 4
NCHANNELS = 32
SCPI_VERSION_TESTED = ''
| 20.3
| 44
| 0.665025
| 169
| 0.832512
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.20197
|
97efe95631dbd9f43d8fc44a21511eb903a34116
| 1,507
|
py
|
Python
|
rules/taxonomic_classification/utils.py
|
dahak-metagenomics/taco-taxonomic-classification
|
854cae4f1b2427746a1faa6a0e0aefbfb11c5523
|
[
"BSD-3-Clause"
] | null | null | null |
rules/taxonomic_classification/utils.py
|
dahak-metagenomics/taco-taxonomic-classification
|
854cae4f1b2427746a1faa6a0e0aefbfb11c5523
|
[
"BSD-3-Clause"
] | null | null | null |
rules/taxonomic_classification/utils.py
|
dahak-metagenomics/taco-taxonomic-classification
|
854cae4f1b2427746a1faa6a0e0aefbfb11c5523
|
[
"BSD-3-Clause"
] | null | null | null |
def container_image_is_external(biocontainers, app):
"""
Return a boolean: is this container going to be run
using an external URL (quay.io/biocontainers),
or is it going to use a local, named Docker image?
"""
d = biocontainers[app]
if (('use_local' in d) and (d['use_local'] is True)):
# This container does not use an external url
return False
else:
# This container uses a quay.io url
return True
def container_image_name(biocontainers, app):
"""
Get the name of a container image for app,
using params dictionary biocontainers.
Verification:
- Check that the user provides 'local' if 'use_local' is True
- Check that the user provides both 'quayurl' and 'version'
"""
if container_image_is_external(biocontainers,app):
try:
qurl = biocontainers[k]['quayurl']
qvers = biocontainers[k]['version']
quayurls.append(qurl + ":" + qvers)
return quayurls
except KeyError:
err = "Error: quay.io URL for %s biocontainer "%(k)
err += "could not be determined"
raise Exception(err)
else:
try:
return biocontainers[app]['local']
except KeyError:
err = "Error: the parameters provided specify a local "
err += "container image should be used for %s, but none "%(app)
err += "was specified using the 'local' key."
raise Exception(err)
| 33.488889
| 75
| 0.606503
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 756
| 0.501659
|
97f060a2b95bbc614a022bf67e45afe532ebb45d
| 37,531
|
py
|
Python
|
Contents/Libraries/Shared/guessit/rules/properties/episodes.py
|
slvxstar/Kinopoisk.bundle
|
dcb96c870c3a96fcf33b8d13d79d47f0a7cbf5fb
|
[
"MIT"
] | 7
|
2021-02-11T08:03:00.000Z
|
2022-01-23T22:33:32.000Z
|
Contents/Libraries/Shared/guessit/rules/properties/episodes.py
|
slvxstar/Kinopoisk.bundle
|
dcb96c870c3a96fcf33b8d13d79d47f0a7cbf5fb
|
[
"MIT"
] | null | null | null |
Contents/Libraries/Shared/guessit/rules/properties/episodes.py
|
slvxstar/Kinopoisk.bundle
|
dcb96c870c3a96fcf33b8d13d79d47f0a7cbf5fb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
episode, season, disc, episode_count, season_count and episode_details properties
"""
import copy
from collections import defaultdict
from rebulk import Rebulk, RemoveMatch, Rule, AppendMatch, RenameMatch
from rebulk.match import Match
from rebulk.remodule import re
from rebulk.utils import is_iterable
from .title import TitleFromPosition
from ..common import dash, alt_dash, seps, seps_no_fs
from ..common.formatters import strip
from ..common.numeral import numeral, parse_numeral
from ..common.pattern import is_disabled
from ..common.validators import compose, seps_surround, seps_before, int_coercable
from ...reutils import build_or_pattern
def episodes(config):
"""
Builder for rebulk object.
:param config: rule configuration
:type config: dict
:return: Created Rebulk object
:rtype: Rebulk
"""
# pylint: disable=too-many-branches,too-many-statements,too-many-locals
def is_season_episode_disabled(context):
"""Whether season and episode rules should be enabled."""
return is_disabled(context, 'episode') or is_disabled(context, 'season')
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE).string_defaults(ignore_case=True)
rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator', 'episodeMarker', 'seasonMarker'])
episode_max_range = config['episode_max_range']
season_max_range = config['season_max_range']
def episodes_season_chain_breaker(matches):
"""
Break chains if there's more than 100 offset between two neighbor values.
:param matches:
:type matches:
:return:
:rtype:
"""
eps = matches.named('episode')
if len(eps) > 1 and abs(eps[-1].value - eps[-2].value) > episode_max_range:
return True
seasons = matches.named('season')
if len(seasons) > 1 and abs(seasons[-1].value - seasons[-2].value) > season_max_range:
return True
return False
rebulk.chain_defaults(chain_breaker=episodes_season_chain_breaker)
def season_episode_conflict_solver(match, other):
"""
Conflict solver for episode/season patterns
:param match:
:param other:
:return:
"""
if match.name != other.name:
if match.name == 'episode' and other.name == 'year':
return match
if match.name in ('season', 'episode'):
if other.name in ('video_codec', 'audio_codec', 'container', 'date'):
return match
if (other.name == 'audio_channels' and 'weak-audio_channels' not in other.tags
and not match.initiator.children.named(match.name + 'Marker')) or (
other.name == 'screen_size' and not int_coercable(other.raw)):
return match
if other.name in ('season', 'episode') and match.initiator != other.initiator:
if (match.initiator.name in ('weak_episode', 'weak_duplicate')
and other.initiator.name in ('weak_episode', 'weak_duplicate')):
return '__default__'
for current in (match, other):
if 'weak-episode' in current.tags or 'x' in current.initiator.raw.lower():
return current
return '__default__'
season_words = config['season_words']
episode_words = config['episode_words']
of_words = config['of_words']
all_words = config['all_words']
season_markers = config['season_markers']
season_ep_markers = config['season_ep_markers']
disc_markers = config['disc_markers']
episode_markers = config['episode_markers']
range_separators = config['range_separators']
weak_discrete_separators = list(sep for sep in seps_no_fs if sep not in range_separators)
strong_discrete_separators = config['discrete_separators']
discrete_separators = strong_discrete_separators + weak_discrete_separators
max_range_gap = config['max_range_gap']
def ordering_validator(match):
"""
Validator for season list. They should be in natural order to be validated.
episode/season separated by a weak discrete separator should be consecutive, unless a strong discrete separator
or a range separator is present in the chain (1.3&5 is valid, but 1.3-5 is not valid and 1.3.5 is not valid)
"""
values = match.children.to_dict()
if 'season' in values and is_iterable(values['season']):
# Season numbers must be in natural order to be validated.
if not list(sorted(values['season'])) == values['season']:
return False
if 'episode' in values and is_iterable(values['episode']):
# Season numbers must be in natural order to be validated.
if not list(sorted(values['episode'])) == values['episode']:
return False
def is_consecutive(property_name):
"""
Check if the property season or episode has valid consecutive values.
:param property_name:
:type property_name:
:return:
:rtype:
"""
previous_match = None
valid = True
for current_match in match.children.named(property_name):
if previous_match:
match.children.previous(current_match,
lambda m: m.name == property_name + 'Separator')
separator = match.children.previous(current_match,
lambda m: m.name == property_name + 'Separator', 0)
if separator.raw not in range_separators and separator.raw in weak_discrete_separators:
if not 0 < current_match.value - previous_match.value <= max_range_gap + 1:
valid = False
if separator.raw in strong_discrete_separators:
valid = True
break
previous_match = current_match
return valid
return is_consecutive('episode') and is_consecutive('season')
# S01E02, 01x02, S01S02S03
rebulk.chain(formatter={'season': int, 'episode': int},
tags=['SxxExx'],
abbreviations=[alt_dash],
children=True,
private_parent=True,
validate_all=True,
validator={'__parent__': ordering_validator},
conflict_solver=season_episode_conflict_solver,
disabled=is_season_episode_disabled) \
.regex(build_or_pattern(season_markers, name='seasonMarker') + r'(?P<season>\d+)@?' +
build_or_pattern(episode_markers + disc_markers, name='episodeMarker') + r'@?(?P<episode>\d+)',
validate_all=True,
validator={'__parent__': seps_before}).repeater('+') \
.regex(build_or_pattern(episode_markers + disc_markers + discrete_separators + range_separators,
name='episodeSeparator',
escape=True) +
r'(?P<episode>\d+)').repeater('*') \
.chain() \
.regex(r'(?P<season>\d+)@?' +
build_or_pattern(season_ep_markers, name='episodeMarker') +
r'@?(?P<episode>\d+)',
validate_all=True,
validator={'__parent__': seps_before}) \
.chain() \
.regex(r'(?P<season>\d+)@?' +
build_or_pattern(season_ep_markers, name='episodeMarker') +
r'@?(?P<episode>\d+)',
validate_all=True,
validator={'__parent__': seps_before}) \
.regex(build_or_pattern(season_ep_markers + discrete_separators + range_separators,
name='episodeSeparator',
escape=True) +
r'(?P<episode>\d+)').repeater('*') \
.chain() \
.regex(build_or_pattern(season_markers, name='seasonMarker') + r'(?P<season>\d+)',
validate_all=True,
validator={'__parent__': seps_before}) \
.regex(build_or_pattern(season_markers + discrete_separators + range_separators,
name='seasonSeparator',
escape=True) +
r'(?P<season>\d+)').repeater('*')
# episode_details property
for episode_detail in ('Special', 'Pilot', 'Unaired', 'Final'):
rebulk.string(episode_detail, value=episode_detail, name='episode_details',
disabled=lambda context: is_disabled(context, 'episode_details'))
def validate_roman(match):
"""
Validate a roman match if surrounded by separators
:param match:
:type match:
:return:
:rtype:
"""
if int_coercable(match.raw):
return True
return seps_surround(match)
rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator', 'episodeMarker', 'seasonMarker'],
validate_all=True, validator={'__parent__': seps_surround}, children=True, private_parent=True,
conflict_solver=season_episode_conflict_solver)
rebulk.chain(abbreviations=[alt_dash],
formatter={'season': parse_numeral, 'count': parse_numeral},
validator={'__parent__': compose(seps_surround, ordering_validator),
'season': validate_roman,
'count': validate_roman},
disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'season')) \
.defaults(validator=None) \
.regex(build_or_pattern(season_words, name='seasonMarker') + '@?(?P<season>' + numeral + ')') \
.regex(r'' + build_or_pattern(of_words) + '@?(?P<count>' + numeral + ')').repeater('?') \
.regex(r'@?' + build_or_pattern(range_separators + discrete_separators + ['@'],
name='seasonSeparator', escape=True) +
r'@?(?P<season>\d+)').repeater('*')
rebulk.regex(build_or_pattern(episode_words, name='episodeMarker') + r'-?(?P<episode>\d+)' +
r'(?:v(?P<version>\d+))?' +
r'(?:-?' + build_or_pattern(of_words) + r'-?(?P<count>\d+))?', # Episode 4
abbreviations=[dash], formatter={'episode': int, 'version': int, 'count': int},
disabled=lambda context: context.get('type') == 'episode' or is_disabled(context, 'episode'))
rebulk.regex(build_or_pattern(episode_words, name='episodeMarker') + r'-?(?P<episode>' + numeral + ')' +
r'(?:v(?P<version>\d+))?' +
r'(?:-?' + build_or_pattern(of_words) + r'-?(?P<count>\d+))?', # Episode 4
abbreviations=[dash],
validator={'episode': validate_roman},
formatter={'episode': parse_numeral, 'version': int, 'count': int},
disabled=lambda context: context.get('type') != 'episode' or is_disabled(context, 'episode'))
rebulk.regex(r'S?(?P<season>\d+)-?(?:xE|Ex|E|x)-?(?P<other>' + build_or_pattern(all_words) + ')',
tags=['SxxExx'],
abbreviations=[dash],
validator=None,
formatter={'season': int, 'other': lambda match: 'Complete'},
disabled=lambda context: is_disabled(context, 'season'))
# 12, 13
rebulk.chain(tags=['weak-episode'], formatter={'episode': int, 'version': int},
disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'episode')) \
.defaults(validator=None) \
.regex(r'(?P<episode>\d{2})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{2})').repeater('*')
# 012, 013
rebulk.chain(tags=['weak-episode'], formatter={'episode': int, 'version': int},
disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'episode')) \
.defaults(validator=None) \
.regex(r'0(?P<episode>\d{1,2})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>[x-])0(?P<episode>\d{1,2})').repeater('*')
# 112, 113
rebulk.chain(tags=['weak-episode'],
formatter={'episode': int, 'version': int},
name='weak_episode',
disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'episode')) \
.defaults(validator=None) \
.regex(r'(?P<episode>\d{3,4})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{3,4})').repeater('*')
# 1, 2, 3
rebulk.chain(tags=['weak-episode'], formatter={'episode': int, 'version': int},
disabled=lambda context: context.get('type') != 'episode' or is_disabled(context, 'episode')) \
.defaults(validator=None) \
.regex(r'(?P<episode>\d)') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{1,2})').repeater('*')
# e112, e113, 1e18, 3e19
# TODO: Enhance rebulk for validator to be used globally (season_episode_validator)
rebulk.chain(formatter={'season': int, 'episode': int, 'version': int},
disabled=lambda context: is_disabled(context, 'episode')) \
.defaults(validator=None) \
.regex(r'(?P<season>\d{1,2})?(?P<episodeMarker>e)(?P<episode>\d{1,4})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>e|x|-)(?P<episode>\d{1,4})').repeater('*')
# ep 112, ep113, ep112, ep113
rebulk.chain(abbreviations=[dash], formatter={'episode': int, 'version': int},
disabled=lambda context: is_disabled(context, 'episode')) \
.defaults(validator=None) \
.regex(r'ep-?(?P<episode>\d{1,4})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>ep|e|x|-)(?P<episode>\d{1,4})').repeater('*')
# cap 112, cap 112_114
rebulk.chain(abbreviations=[dash],
tags=['see-pattern'],
formatter={'season': int, 'episode': int},
disabled=is_season_episode_disabled) \
.defaults(validator=None) \
.regex(r'(?P<seasonMarker>cap)-?(?P<season>\d{1,2})(?P<episode>\d{2})') \
.regex(r'(?P<episodeSeparator>-)(?P<season>\d{1,2})(?P<episode>\d{2})').repeater('?')
# 102, 0102
rebulk.chain(tags=['weak-episode', 'weak-duplicate'],
formatter={'season': int, 'episode': int, 'version': int},
name='weak_duplicate',
conflict_solver=season_episode_conflict_solver,
disabled=lambda context: (context.get('episode_prefer_number', False) or
context.get('type') == 'movie') or is_season_episode_disabled(context)) \
.defaults(validator=None) \
.regex(r'(?P<season>\d{1,2})(?P<episode>\d{2})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>x|-)(?P<episode>\d{2})').repeater('*')
rebulk.regex(r'v(?P<version>\d+)', children=True, private_parent=True, formatter=int,
disabled=lambda context: is_disabled(context, 'version'))
rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator'])
# TODO: List of words
# detached of X count (season/episode)
rebulk.regex(r'(?P<episode>\d+)-?' + build_or_pattern(of_words) +
r'-?(?P<count>\d+)-?' + build_or_pattern(episode_words) + '?',
abbreviations=[dash], children=True, private_parent=True, formatter=int,
disabled=lambda context: is_disabled(context, 'episode'))
rebulk.regex(r'Minisodes?', name='episode_format', value="Minisode",
disabled=lambda context: is_disabled(context, 'episode_format'))
rebulk.rules(WeakConflictSolver, RemoveInvalidSeason, RemoveInvalidEpisode,
SeePatternRange(range_separators + ['_']),
EpisodeNumberSeparatorRange(range_separators),
SeasonSeparatorRange(range_separators), RemoveWeakIfMovie, RemoveWeakIfSxxExx,
RemoveWeakDuplicate, EpisodeDetailValidator, RemoveDetachedEpisodeNumber, VersionValidator,
RemoveWeak, RenameToAbsoluteEpisode, CountValidator, EpisodeSingleDigitValidator, RenameToDiscMatch)
return rebulk
class WeakConflictSolver(Rule):
"""
Rule to decide whether weak-episode or weak-duplicate matches should be kept.
If an anime is detected:
- weak-duplicate matches should be removed
- weak-episode matches should be tagged as anime
Otherwise:
- weak-episode matches are removed unless they're part of an episode range match.
"""
priority = 128
consequence = [RemoveMatch, AppendMatch]
def enabled(self, context):
return context.get('type') != 'movie'
@classmethod
def is_anime(cls, matches):
"""Return True if it seems to be an anime.
Anime characteristics:
- version, crc32 matches
- screen_size inside brackets
- release_group at start and inside brackets
"""
if matches.named('version') or matches.named('crc32'):
return True
for group in matches.markers.named('group'):
if matches.range(group.start, group.end, predicate=lambda m: m.name == 'screen_size'):
return True
if matches.markers.starting(group.start, predicate=lambda m: m.name == 'path'):
hole = matches.holes(group.start, group.end, index=0)
if hole and hole.raw == group.raw:
return True
return False
def when(self, matches, context):
to_remove = []
to_append = []
anime_detected = self.is_anime(matches)
for filepart in matches.markers.named('path'):
weak_matches = matches.range(filepart.start, filepart.end, predicate=(
lambda m: m.initiator.name == 'weak_episode'))
weak_dup_matches = matches.range(filepart.start, filepart.end, predicate=(
lambda m: m.initiator.name == 'weak_duplicate'))
if anime_detected:
if weak_matches:
to_remove.extend(weak_dup_matches)
for match in matches.range(filepart.start, filepart.end, predicate=(
lambda m: m.name == 'episode' and m.initiator.name != 'weak_duplicate')):
episode = copy.copy(match)
episode.tags = episode.tags + ['anime']
to_append.append(episode)
to_remove.append(match)
elif weak_dup_matches:
episodes_in_range = matches.range(filepart.start, filepart.end, predicate=(
lambda m:
m.name == 'episode' and m.initiator.name == 'weak_episode'
and m.initiator.children.named('episodeSeparator')
))
if not episodes_in_range and not matches.range(filepart.start, filepart.end,
predicate=lambda m: 'SxxExx' in m.tags):
to_remove.extend(weak_matches)
else:
for match in episodes_in_range:
episode = copy.copy(match)
episode.tags = []
to_append.append(episode)
to_remove.append(match)
if to_append:
to_remove.extend(weak_dup_matches)
return to_remove, to_append
class CountValidator(Rule):
"""
Validate count property and rename it
"""
priority = 64
consequence = [RemoveMatch, RenameMatch('episode_count'), RenameMatch('season_count')]
properties = {'episode_count': [None], 'season_count': [None]}
def when(self, matches, context):
to_remove = []
episode_count = []
season_count = []
for count in matches.named('count'):
previous = matches.previous(count, lambda match: match.name in ['episode', 'season'], 0)
if previous:
if previous.name == 'episode':
episode_count.append(count)
elif previous.name == 'season':
season_count.append(count)
else:
to_remove.append(count)
return to_remove, episode_count, season_count
class SeePatternRange(Rule):
"""
Create matches for episode range for SEE pattern. E.g.: Cap.102_104
"""
priority = 128
consequence = [RemoveMatch, AppendMatch]
def __init__(self, range_separators):
super(SeePatternRange, self).__init__()
self.range_separators = range_separators
def when(self, matches, context):
to_remove = []
to_append = []
for separator in matches.tagged('see-pattern', lambda m: m.name == 'episodeSeparator'):
previous_match = matches.previous(separator, lambda m: m.name == 'episode' and 'see-pattern' in m.tags, 0)
next_match = matches.next(separator, lambda m: m.name == 'season' and 'see-pattern' in m.tags, 0)
if not next_match:
continue
next_match = matches.next(next_match, lambda m: m.name == 'episode' and 'see-pattern' in m.tags, 0)
if previous_match and next_match and separator.value in self.range_separators:
to_remove.append(next_match)
for episode_number in range(previous_match.value + 1, next_match.value + 1):
match = copy.copy(next_match)
match.value = episode_number
to_append.append(match)
to_remove.append(separator)
return to_remove, to_append
class AbstractSeparatorRange(Rule):
"""
Remove separator matches and create matches for season range.
"""
priority = 128
consequence = [RemoveMatch, AppendMatch]
def __init__(self, range_separators, property_name):
super(AbstractSeparatorRange, self).__init__()
self.range_separators = range_separators
self.property_name = property_name
def when(self, matches, context):
to_remove = []
to_append = []
for separator in matches.named(self.property_name + 'Separator'):
previous_match = matches.previous(separator, lambda m: m.name == self.property_name, 0)
next_match = matches.next(separator, lambda m: m.name == self.property_name, 0)
initiator = separator.initiator
if previous_match and next_match and separator.value in self.range_separators:
to_remove.append(next_match)
for episode_number in range(previous_match.value + 1, next_match.value):
match = copy.copy(next_match)
match.value = episode_number
initiator.children.append(match)
to_append.append(match)
to_append.append(next_match)
to_remove.append(separator)
previous_match = None
for next_match in matches.named(self.property_name):
if previous_match:
separator = matches.input_string[previous_match.initiator.end:next_match.initiator.start]
if separator not in self.range_separators:
separator = strip(separator)
if separator in self.range_separators:
initiator = previous_match.initiator
for episode_number in range(previous_match.value + 1, next_match.value):
match = copy.copy(next_match)
match.value = episode_number
initiator.children.append(match)
to_append.append(match)
to_append.append(Match(previous_match.end, next_match.start - 1,
name=self.property_name + 'Separator',
private=True,
input_string=matches.input_string))
to_remove.append(next_match) # Remove and append match to support proper ordering
to_append.append(next_match)
previous_match = next_match
return to_remove, to_append
class RenameToAbsoluteEpisode(Rule):
"""
Rename episode to absolute_episodes.
Absolute episodes are only used if two groups of episodes are detected:
S02E04-06 25-27
25-27 S02E04-06
2x04-06 25-27
28. Anime Name S02E05
The matches in the group with higher episode values are renamed to absolute_episode.
"""
consequence = RenameMatch('absolute_episode')
def when(self, matches, context): # pylint:disable=inconsistent-return-statements
initiators = {match.initiator for match in matches.named('episode')
if len(match.initiator.children.named('episode')) > 1}
if len(initiators) != 2:
ret = []
for filepart in matches.markers.named('path'):
if matches.range(filepart.start + 1, filepart.end, predicate=lambda m: m.name == 'episode'):
ret.extend(
matches.starting(filepart.start, predicate=lambda m: m.initiator.name == 'weak_episode'))
return ret
initiators = sorted(initiators, key=lambda item: item.end)
if not matches.holes(initiators[0].end, initiators[1].start, predicate=lambda m: m.raw.strip(seps)):
first_range = matches.named('episode', predicate=lambda m: m.initiator == initiators[0])
second_range = matches.named('episode', predicate=lambda m: m.initiator == initiators[1])
if len(first_range) == len(second_range):
if second_range[0].value > first_range[0].value:
return second_range
if first_range[0].value > second_range[0].value:
return first_range
class EpisodeNumberSeparatorRange(AbstractSeparatorRange):
"""
Remove separator matches and create matches for episoderNumber range.
"""
def __init__(self, range_separators):
super(EpisodeNumberSeparatorRange, self).__init__(range_separators, "episode")
class SeasonSeparatorRange(AbstractSeparatorRange):
"""
Remove separator matches and create matches for season range.
"""
def __init__(self, range_separators):
super(SeasonSeparatorRange, self).__init__(range_separators, "season")
class RemoveWeakIfMovie(Rule):
"""
Remove weak-episode tagged matches if it seems to be a movie.
"""
priority = 64
consequence = RemoveMatch
def enabled(self, context):
return context.get('type') != 'episode'
def when(self, matches, context):
to_remove = []
to_ignore = set()
remove = False
for filepart in matches.markers.named('path'):
year = matches.range(filepart.start, filepart.end, predicate=lambda m: m.name == 'year', index=0)
if year:
remove = True
next_match = matches.range(year.end, filepart.end, predicate=lambda m: m.private, index=0)
if (next_match and not matches.holes(year.end, next_match.start, predicate=lambda m: m.raw.strip(seps))
and not matches.at_match(next_match, predicate=lambda m: m.name == 'year')):
to_ignore.add(next_match.initiator)
to_ignore.update(matches.range(filepart.start, filepart.end,
predicate=lambda m: len(m.children.named('episode')) > 1))
to_remove.extend(matches.conflicting(year))
if remove:
to_remove.extend(matches.tagged('weak-episode', predicate=(
lambda m: m.initiator not in to_ignore and 'anime' not in m.tags)))
return to_remove
class RemoveWeak(Rule):
"""
Remove weak-episode matches which appears after video, source, and audio matches.
"""
priority = 16
consequence = RemoveMatch
def when(self, matches, context):
to_remove = []
for filepart in matches.markers.named('path'):
weaks = matches.range(filepart.start, filepart.end, predicate=lambda m: 'weak-episode' in m.tags)
if weaks:
previous = matches.previous(weaks[0], predicate=lambda m: m.name in (
'audio_codec', 'screen_size', 'streaming_service', 'source', 'video_profile',
'audio_channels', 'audio_profile'), index=0)
if previous and not matches.holes(
previous.end, weaks[0].start, predicate=lambda m: m.raw.strip(seps)):
to_remove.extend(weaks)
return to_remove
class RemoveWeakIfSxxExx(Rule):
"""
Remove weak-episode tagged matches if SxxExx pattern is matched.
Weak episodes at beginning of filepart are kept.
"""
priority = 64
consequence = RemoveMatch
def when(self, matches, context):
to_remove = []
for filepart in matches.markers.named('path'):
if matches.range(filepart.start, filepart.end,
predicate=lambda m: not m.private and 'SxxExx' in m.tags):
for match in matches.range(filepart.start, filepart.end, predicate=lambda m: 'weak-episode' in m.tags):
if match.start != filepart.start or match.initiator.name != 'weak_episode':
to_remove.append(match)
return to_remove
class RemoveInvalidSeason(Rule):
"""
Remove invalid season matches.
"""
priority = 64
consequence = RemoveMatch
def when(self, matches, context):
to_remove = []
for filepart in matches.markers.named('path'):
strong_season = matches.range(filepart.start, filepart.end, index=0,
predicate=lambda m: m.name == 'season'
and not m.private and 'SxxExx' in m.tags)
if strong_season:
if strong_season.initiator.children.named('episode'):
for season in matches.range(strong_season.end, filepart.end,
predicate=lambda m: m.name == 'season' and not m.private):
# remove weak season or seasons without episode matches
if 'SxxExx' not in season.tags or not season.initiator.children.named('episode'):
if season.initiator:
to_remove.append(season.initiator)
to_remove.extend(season.initiator.children)
else:
to_remove.append(season)
return to_remove
class RemoveInvalidEpisode(Rule):
"""
Remove invalid episode matches.
"""
priority = 64
consequence = RemoveMatch
def when(self, matches, context):
to_remove = []
for filepart in matches.markers.named('path'):
strong_episode = matches.range(filepart.start, filepart.end, index=0,
predicate=lambda m: m.name == 'episode'
and not m.private and 'SxxExx' in m.tags)
if strong_episode:
strong_ep_marker = RemoveInvalidEpisode.get_episode_prefix(matches, strong_episode)
for episode in matches.range(strong_episode.end, filepart.end,
predicate=lambda m: m.name == 'episode' and not m.private):
ep_marker = RemoveInvalidEpisode.get_episode_prefix(matches, episode)
if strong_ep_marker and ep_marker and strong_ep_marker.value.lower() != ep_marker.value.lower():
if episode.initiator:
to_remove.append(episode.initiator)
to_remove.extend(episode.initiator.children)
else:
to_remove.append(ep_marker)
to_remove.append(episode)
return to_remove
@staticmethod
def get_episode_prefix(matches, episode):
"""
Return episode prefix: episodeMarker or episodeSeparator
"""
return matches.previous(episode, index=0,
predicate=lambda m: m.name in ('episodeMarker', 'episodeSeparator'))
class RemoveWeakDuplicate(Rule):
"""
Remove weak-duplicate tagged matches if duplicate patterns, for example The 100.109
"""
priority = 64
consequence = RemoveMatch
def when(self, matches, context):
to_remove = []
for filepart in matches.markers.named('path'):
patterns = defaultdict(list)
for match in reversed(matches.range(filepart.start, filepart.end,
predicate=lambda m: 'weak-duplicate' in m.tags)):
if match.pattern in patterns[match.name]:
to_remove.append(match)
else:
patterns[match.name].append(match.pattern)
return to_remove
class EpisodeDetailValidator(Rule):
"""
Validate episode_details if they are detached or next to season or episode.
"""
priority = 64
consequence = RemoveMatch
def when(self, matches, context):
ret = []
for detail in matches.named('episode_details'):
if not seps_surround(detail) \
and not matches.previous(detail, lambda match: match.name in ['season', 'episode']) \
and not matches.next(detail, lambda match: match.name in ['season', 'episode']):
ret.append(detail)
return ret
class RemoveDetachedEpisodeNumber(Rule):
"""
If multiple episode are found, remove those that are not detached from a range and less than 10.
Fairy Tail 2 - 16-20, 2 should be removed.
"""
priority = 64
consequence = RemoveMatch
dependency = [RemoveWeakIfSxxExx, RemoveWeakDuplicate]
def when(self, matches, context):
ret = []
episode_numbers = []
episode_values = set()
for match in matches.named('episode', lambda m: not m.private and 'weak-episode' in m.tags):
if match.value not in episode_values:
episode_numbers.append(match)
episode_values.add(match.value)
episode_numbers = list(sorted(episode_numbers, key=lambda m: m.value))
if len(episode_numbers) > 1 and \
episode_numbers[0].value < 10 and \
episode_numbers[1].value - episode_numbers[0].value != 1:
parent = episode_numbers[0]
while parent: # TODO: Add a feature in rebulk to avoid this ...
ret.append(parent)
parent = parent.parent
return ret
class VersionValidator(Rule):
"""
Validate version if previous match is episode or if surrounded by separators.
"""
priority = 64
dependency = [RemoveWeakIfMovie, RemoveWeakIfSxxExx]
consequence = RemoveMatch
def when(self, matches, context):
ret = []
for version in matches.named('version'):
episode_number = matches.previous(version, lambda match: match.name == 'episode', 0)
if not episode_number and not seps_surround(version.initiator):
ret.append(version)
return ret
class EpisodeSingleDigitValidator(Rule):
"""
Remove single digit episode when inside a group that doesn't own title.
"""
dependency = [TitleFromPosition]
consequence = RemoveMatch
def when(self, matches, context):
ret = []
for episode in matches.named('episode', lambda match: len(match.initiator) == 1):
group = matches.markers.at_match(episode, lambda marker: marker.name == 'group', index=0)
if group:
if not matches.range(*group.span, predicate=lambda match: match.name == 'title'):
ret.append(episode)
return ret
class RenameToDiscMatch(Rule):
"""
Rename episodes detected with `d` episodeMarkers to `disc`.
"""
consequence = [RenameMatch('disc'), RenameMatch('discMarker'), RemoveMatch]
def when(self, matches, context):
discs = []
markers = []
to_remove = []
disc_disabled = is_disabled(context, 'disc')
for marker in matches.named('episodeMarker', predicate=lambda m: m.value.lower() == 'd'):
if disc_disabled:
to_remove.append(marker)
to_remove.extend(marker.initiator.children)
continue
markers.append(marker)
discs.extend(sorted(marker.initiator.children.named('episode'), key=lambda m: m.value))
return discs, markers, to_remove
| 43.640698
| 119
| 0.588154
| 20,727
| 0.552263
| 0
| 0
| 1,109
| 0.029549
| 0
| 0
| 8,688
| 0.231489
|
97f09a874f39695917154d611858caf14ea0be1a
| 76,767
|
py
|
Python
|
cwinpy/heterodyne/heterodyne.py
|
nigeltrc72/cwinpy
|
f90cf46e20c4d5abd09dc0540d4694ca6d5d9b42
|
[
"MIT"
] | 5
|
2021-02-25T13:04:43.000Z
|
2022-01-15T22:37:33.000Z
|
cwinpy/heterodyne/heterodyne.py
|
nigeltrc72/cwinpy
|
f90cf46e20c4d5abd09dc0540d4694ca6d5d9b42
|
[
"MIT"
] | 4
|
2021-02-24T12:17:50.000Z
|
2021-12-09T16:41:33.000Z
|
cwinpy/heterodyne/heterodyne.py
|
nigeltrc72/cwinpy
|
f90cf46e20c4d5abd09dc0540d4694ca6d5d9b42
|
[
"MIT"
] | 1
|
2021-02-24T11:40:32.000Z
|
2021-02-24T11:40:32.000Z
|
"""
Run heterodyne pre-processing of gravitational-wave data.
"""
import ast
import configparser
import copy
import os
import shutil
import signal
import sys
import tempfile
from argparse import ArgumentParser
import cwinpy
import numpy as np
from bilby_pipe.bilbyargparser import BilbyArgParser
from bilby_pipe.job_creation.dag import Dag
from bilby_pipe.utils import (
BilbyPipeError,
check_directory_exists_and_if_not_mkdir,
parse_args,
)
from configargparse import ArgumentError
from ..condor.hetnodes import HeterodyneInput, HeterodyneNode, MergeHeterodyneNode
from ..data import HeterodynedData
from ..info import (
ANALYSIS_SEGMENTS,
CVMFS_GWOSC_DATA_SERVER,
CVMFS_GWOSC_DATA_TYPES,
CVMFS_GWOSC_FRAME_CHANNELS,
HW_INJ,
HW_INJ_RUNTIMES,
HW_INJ_SEGMENTS,
RUNTIMES,
)
from ..parfile import PulsarParameters
from ..utils import (
LAL_BINARY_MODELS,
LAL_EPHEMERIS_TYPES,
check_for_tempo2,
initialise_ephemeris,
sighandler,
)
from .base import Heterodyne, generate_segments, remote_frame_cache
def create_heterodyne_parser():
"""
Create the argument parser.
"""
description = """\
A script to heterodyne raw gravitational-wave strain data based on the \
expected evolution of the gravitational-wave signal from a set of pulsars."""
parser = BilbyArgParser(
prog=sys.argv[0],
description=description,
ignore_unknown_config_file_keys=False,
allow_abbrev=False,
)
parser.add("--config", type=str, is_config_file=True, help="Configuration ini file")
parser.add(
"--version",
action="version",
version="%(prog)s {version}".format(version=cwinpy.__version__),
)
parser.add(
"--periodic-restart-time",
default=14400,
type=int,
help=(
"Time after which the job will be self-evicted with code 130. "
"After this, condor will restart the job. Default is 14400s. "
"This is used to decrease the chance of HTCondor hard evictions."
),
)
parser.add(
"--overwrite",
action="store_true",
default=False,
help=(
"Set this flag to make sure any previously generated heterodyned "
'files are overwritten. By default the analysis will "resume" '
"from where it left off (by checking whether output files, as set "
'using "--output" and "--label" arguments, already exist), such '
"as after forced Condor eviction for checkpointing purposes. "
"Therefore, this flag is needs to be explicitly given (the "
"default is False) if not wanting to use resume and overwrite "
"existing files."
),
)
dataparser = parser.add_argument_group("Data inputs")
dataparser.add(
"--starttime",
required=True,
type=int,
help=("The start time of the data to be heterodyned in GPS seconds."),
)
dataparser.add(
"--endtime",
required=True,
type=int,
help=("The end time of the data to be heterodyned in GPS seconds."),
)
dataparser.add(
"--stride",
default=3600,
type=int,
help=(
"The number of seconds to stride through the data (i.e., this "
"number of seconds of data will be read in in one go), Defaults "
"to 3600."
),
)
dataparser.add(
"--detector",
required=True,
type=str,
help=("The name of the detectors for which the data is to be heterodyned."),
)
dataparser.add(
"--frametype",
type=str,
help=(
'The "frame type" name of the data to be heterodyned. If this '
"is not given the correct data set will be attempted to be found "
"using the channel name."
),
)
dataparser.add(
"--channel",
required=True,
type=str,
help=(
'The "channel" within the gravitational-wave data file(s) '
'(either a GW frame ".gwf", or HDF5 file) containing the strain '
"data to be heterodyned. The channel name should contain the "
"detector name prefix as the first two characters followed by a "
'colon, e.g., "L1:GWOSC-4KHZ_R1_STRAIN"'
),
)
dataparser.add(
"--host",
type=str,
help=(
"The server name for finding the gravitational-wave data files. "
'Use "datafind.ligo.org:443" for open data available via CVMFS. '
"To use open data available from the GWOSC use "
'"https://www.gw-openscience.org".'
),
)
dataparser.add(
"--outputframecache",
type=str,
help=(
"If given this should give a file path to which a list of "
"gravitational-wave data file paths, as found by the code, will "
"be written. If not given then the file list will not be output."
),
)
dataparser.add(
"--appendframecache",
action="store_true",
default=False,
help=(
"If writing out the frame cache to a file, set this to True to "
"append to the file rather than overwriting. Default is False."
),
)
dataparser.add(
"--framecache",
help=(
"Provide a pregenerated cache of gravitational-wave files, either "
"as a single file, or a list of files. Alternatively, you can "
"supply a directory containing the files (which will be "
"searched recursively for gwf and then hdf5 files), which should "
'be used in conjunction with the "frametype" argument. If giving '
"a list, this should be in the form of a Python list, surrounded "
"by quotation marks, e.g., \"['file1.lcf','file2.lcf']\"."
),
)
dataparser.add(
"--heterodyneddata",
help=(
"A string, or dictionary of strings, containing the full file "
"path, or directory path, pointing the the location of "
"pre-heterodyned data. For a single pulsar a file path can be "
"given. For multiple pulsars a directory containing heterodyned "
"files (in HDF5 or txt format) can be given provided that within "
"it the file names contain the pulsar names as supplied in the "
'file input with "--pulsarfiles". Alternatively, a dictionary '
"can be supplied, keyed on the pulsar name, containing a single "
"file path or a directory path as above. If supplying a "
"directory, it can contain multiple heterodyned files for a each "
"pulsar and all will be used. If giving a dictionary it should be "
"surrounded by quotation marks."
),
)
segmentparser = parser.add_argument_group("Analysis segment inputs")
segmentparser.add(
"--segmentlist",
help=(
"Provide a list of data segment start and end times, as "
"list/tuple pairs in the list, or an ASCII text file containing "
"the segment start and end times in two columns. If a list, this "
"should be in the form of a Python list, surrounded by quotation "
'marks, e.g., "[(900000000,900086400),(900100000,900186400)]".'
),
)
segmentparser.add(
"--includeflags",
help=(
"If not providing a segment list then give a string, or list of "
"strings, giving the data DQ flags that will be used to generate "
"a segment list. Lists should be surrounded by quotation marks, "
"e.g., \"['L1:DMT-ANALYSIS_READY:1']\"."
),
)
segmentparser.add(
"--excludeflags",
help=(
"A string, or list of strings, giving the data DQ flags to "
"when generating a segment list. Lists should be surrounded by "
"quotation marks."
),
)
segmentparser.add(
"--outputsegmentlist",
type=str,
help=(
"If generating a segment list it will be output to the file "
"specified by this argument."
),
)
segmentparser.add(
"--appendsegmentlist",
action="store_true",
default=False,
help=(
"If generating a segment list set this to True to append to the "
'file specified by "--outputsegmentlist" rather than '
"overwriting. Default is False."
),
)
segmentparser.add("--segmentserver", type=str, help=("The segment database URL."))
pulsarparser = parser.add_argument_group("Pulsar inputs")
pulsarparser.add(
"--pulsarfiles",
action="append",
help=(
"This specifies the pulsars for which to heterodyne the data. It "
"can be either i) a string giving the path to an individual "
"pulsar Tempo(2)-style parameter file, ii) a string giving the "
"path to a directory containing multiple Tempo(2)-style parameter "
"files (the path will be recursively searched for any file with "
'the extension ".par"), iii) a list of paths to individual '
"pulsar parameter files, iv) a dictionary containing paths to "
"individual pulsars parameter files keyed to their names. If "
"instead, pulsar names are given rather than parameter files it "
"will attempt to extract an ephemeris for those pulsars from the "
"ATNF pulsar catalogue. If such ephemerides are available then "
"they will be used (notification will be given when this is "
"these cases). If providing a list or dictionary it should be "
"surrounded by quotation marks."
),
)
pulsarparser.add(
"--pulsars",
action="append",
help=(
"You can analyse only particular pulsars from those specified by "
'parameter files found through the "--pulsarfiles" argument by '
"passing a string, or list of strings, with particular pulsars "
"names to use."
),
)
outputparser = parser.add_argument_group("Data output inputs")
outputparser.add(
"--output",
help=(
"The base directory into which the heterodyned results will be "
"output. To specify explicit directory paths for individual "
"pulsars this can be a dictionary of directory paths keyed to the "
'pulsar name (in which case the "--label" argument will be used '
"to set the file name), or full file paths, which will be used in "
'place of the "--label" argument. If not given then the current'
"working directory will be used."
),
)
outputparser.add(
"--label",
help=(
"The output format for the heterodyned data files. These can be "
'format strings containing the keywords "psr" for the pulsar '
'name, "det" for the detector, "freqfactor" for the rotation '
'frequency scale factor used, "gpsstart" for the GPS start '
'time, and "gpsend" for the GPS end time. The extension should '
'be given as ".hdf", ".h5", or ".hdf5". E.g., the default '
'is "heterodyne_{psr}_{det}_{freqfactor}_{gpsstart}-{gpsend}.hdf".'
),
)
heterodyneparser = parser.add_argument_group("Heterodyne inputs")
heterodyneparser.add(
"--filterknee",
type=float,
help=(
"The knee frequency (Hz) of the low-pass filter applied after "
"heterodyning the data. This should only be given when "
"heterodying raw strain data and not if re-heterodyning processed "
"data. Default is 0.5 Hz."
),
)
heterodyneparser.add(
"--resamplerate",
type=float,
required=True,
help=(
"The rate in Hz at which to resample the data (via averaging) "
"after application of the heterodyne (and filter if applied)."
),
)
heterodyneparser.add(
"--freqfactor",
type=float,
help=(
"The factor applied to the pulsars rotational parameters when "
"defining the gravitational-wave phase evolution. For example, "
"the default value of 2 multiplies the phase evolution by 2 under "
"the assumption of a signal emitted from the l=m=2 quadrupole "
"mode of a rigidly rotating triaxial neutron star."
),
)
heterodyneparser.add(
"--crop",
type=int,
help=(
"The number of seconds to crop from the start and end of data "
"segments to remove filter impulse effects and issues prior to "
"lock-loss. Default is 60 seconds."
),
)
heterodyneparser.add(
"--includessb",
action="store_true",
default=False,
help=(
"Set this flag to include removing the modulation of the signal due to "
"Solar System motion and relativistic effects (e.g., Roemer, "
"Einstein, and Shapiro delay) during the heterodyne."
),
)
heterodyneparser.add(
"--includebsb",
action="store_true",
default=False,
help=(
"Set this flag to include removing the modulation of the signal "
"due to binary system motion and relativistic effects during the "
'heterodyne. To use this "--includessb" must also be set.'
),
)
heterodyneparser.add(
"--includeglitch",
action="store_true",
default=False,
help=(
"Set this flag to include removing the effects of the phase "
"evolution of any modelled pulsar glitches during the heterodyne."
),
)
heterodyneparser.add(
"--includefitwaves",
action="store_true",
default=False,
help=(
"Set this to True to include removing the phase evolution of a "
"series of sinusoids designed to model low-frequency timing noise "
"in the pulsar signal during the heterodyne."
),
)
heterodyneparser.add(
"--usetempo2",
action="store_true",
default=False,
help=(
"Set this to True to use Tempo2 (via libstempo) to calculate the "
"signal phase evolution. For this to be used v2.4.2 or greater of "
"libstempo must be installed. When using Tempo2 the "
'"--earthephemeris", "--sunephemeris" and "--timeephemeris" '
"arguments do not need to be supplied. This can only be used when "
"running the full heterodyne in one stage, but not for "
're-heterodyning previous data, as such all the "--include..." '
"arguments will be assumed to be True."
),
)
ephemerisparser = parser.add_argument_group("Solar system ephemeris inputs")
ephemerisparser.add(
"--earthephemeris",
help=(
'A dictionary, keyed to ephemeris names, e.g., "DE405", pointing '
"to the location of a file containing that ephemeris for the "
"Earth. The dictionary must be supplied within quotation marks, "
"e.g., \"{'DE436':'earth_DE436.txt'}\". If a pulsar requires a "
"specific ephemeris that is not provided in this dictionary, then "
"the code will automatically attempt to find or download the "
"required file if available."
),
)
ephemerisparser.add(
"--sunephemeris",
help=(
'A dictionary, keyed to ephemeris names, e.g., "DE405", pointing '
"to the location of a file containing that ephemeris for the "
"Sun. If a pulsar requires a specific ephemeris that is not "
"provided in this dictionary, then the code will automatically "
"attempt to find or download the required file if available."
),
)
ephemerisparser.add(
"--timeephemeris",
help=(
"A dictionary, keyed to time system name, which can be either "
'"TCB" or "TDB", pointing to the location of a file containing '
"that ephemeris for that time system. If a pulsar requires a "
"specific ephemeris that is not provided in this dictionary, then "
"the code will automatically attempt to find or download the "
"required file if available."
),
)
cfparser = parser.add_argument_group("Configuration inputs")
cfparser.add(
"--cwinpy-heterodyne-dag-config-file",
help=(
"A path to the cwinpy_heterodyne_dag configuration file can be "
"supplied if this was has been used to setup the heterodyne job."
),
)
return parser
def heterodyne(**kwargs):
"""
Run heterodyne within Python. See the
`class::~cwinpy.heterodyne.Heterodyne` class for the required arguments.
Returns
-------
het: `class::~cwinpy.heterodyne.Heterodyne`
The heterodyning class object.
"""
if "cli" in kwargs or "config" in kwargs:
if "cli" in kwargs:
kwargs.pop("cli")
# get command line arguments
parser = create_heterodyne_parser()
# parse config file or command line arguments
if "config" in kwargs:
cliargs = ["--config", kwargs["config"]]
else:
cliargs = sys.argv[1:]
try:
args, _ = parse_args(cliargs, parser)
except BilbyPipeError as e:
raise IOError("{}".format(e))
# convert args to a dictionary
hetkwargs = vars(args)
if "config" in kwargs:
# update with other keyword arguments
hetkwargs.update(kwargs)
else:
hetkwargs = kwargs
# check non-standard arguments that could be Python objects
nsattrs = [
"framecache",
"heterodyneddata",
"segmentlist",
"includeflags",
"excludeflags",
"pulsarfiles",
"pulsars",
"output",
"earthephemeris",
"sunephemeris",
"timeephemeris",
]
for attr in nsattrs:
value = hetkwargs.pop(attr, None)
if isinstance(value, str):
# check whether the value can be evaluated as a Python object
try:
value = ast.literal_eval(value)
except (ValueError, SyntaxError):
pass
# if the value was a string within a string, e.g., '"[2.3]"',
# evaluate again just in case it contains a Python object!
if isinstance(value, str):
try:
value = ast.literal_eval(value)
except (ValueError, SyntaxError):
pass
hetkwargs[attr] = value
elif value is not None:
hetkwargs[attr] = value
# check if pulsarfiles is a single entry list containing a dictionary
if isinstance(hetkwargs["pulsarfiles"], list):
if len(hetkwargs["pulsarfiles"]) == 1:
try:
value = ast.literal_eval(hetkwargs["pulsarfiles"][0])
if isinstance(value, dict):
# switch to passing the dictionary
hetkwargs["pulsarfiles"] = value
except SyntaxError:
pass
signal.signal(signal.SIGALRM, handler=sighandler)
signal.alarm(hetkwargs.pop("periodic_restart_time", 14400))
# remove any None values
for key in hetkwargs.copy():
if hetkwargs[key] is None:
hetkwargs.pop(key)
# convert "overwrite" to "resume"
hetkwargs["resume"] = not hetkwargs.pop("overwrite", False)
# remove "config" from hetkwargs
if "config" in hetkwargs:
hetkwargs.pop("config")
# set up the run
het = Heterodyne(**hetkwargs)
# heterodyne the data
het.heterodyne()
return het
def heterodyne_cli(**kwargs): # pragma: no cover
"""
Entry point to ``cwinpy_heterodyne`` script. This just calls
:func:`cwinpy.heterodyne.heterodyne`, but does not return any objects.
"""
kwargs["cli"] = True # set to show use of CLI
_ = heterodyne(**kwargs)
def create_heterodyne_merge_parser():
"""
Create the argument parser for merging script.
"""
description = "A script to merge multiple heterodyned data files."
parser = BilbyArgParser(
prog=sys.argv[0],
description=description,
ignore_unknown_config_file_keys=False,
allow_abbrev=False,
)
parser.add("--config", type=str, is_config_file=True, help="Configuration ini file")
parser.add(
"--version",
action="version",
version="%(prog)s {version}".format(version=cwinpy.__version__),
)
parser.add(
"--heterodynedfiles",
action="append",
type=str,
help=("A path, or list of paths, to heterodyned data files to merge together."),
)
parser.add(
"--output",
type=str,
help=("The output file for the merged heterodyned data."),
)
parser.add(
"--overwrite",
action="store_true",
help=("Set if wanting to overwrite an existing merged file."),
)
parser.add(
"--remove",
action="store_true",
help=("Set if wanting to delete individual files being merged."),
)
return parser
def heterodyne_merge(**kwargs):
"""
Merge the output of multiple heterodynes for a specific pulsar.
Parameters
----------
heterodynedfiles: str, list
A string, or list of strings, giving the paths to heterodyned data
files to be read in and merged
output: str
The output file name to write the data to. If not given then the data
will not be output.
overwrite: bool
Set whether to overwrite an existing file. Defaults to False.
remove: bool
Set whether to remove the individual files that form the merged file.
Defaults to False.
Returns
-------
het: `class::~cwinpy.heterodyne.Heterodyne`
The merged heterodyning class object.
"""
if "cli" in kwargs:
# get command line arguments
parser = create_heterodyne_merge_parser()
cliargs = sys.argv[1:]
try:
args, _ = parse_args(cliargs, parser)
except BilbyPipeError as e:
raise IOError("{}".format(e))
# convert args to a dictionary
mergekwargs = vars(args)
else:
mergekwargs = kwargs
if "heterodynedfiles" not in mergekwargs:
raise ArgumentError("'heterodynedfiles' is a required argument")
heterodynedfiles = mergekwargs["heterodynedfiles"]
filelist = (
heterodynedfiles if isinstance(heterodynedfiles, list) else [heterodynedfiles]
)
filelist = [hf for hf in filelist if os.path.isfile(hf)]
if len(filelist) == 0:
raise ValueError("None of the heterodyned files given exists!")
# read in and merge all the files
het = HeterodynedData.read(filelist)
# write out the merged data file
if "output" in mergekwargs:
het.write(mergekwargs["output"], overwrite=mergekwargs.get("overwrite", False))
if mergekwargs.get("remove", False):
# remove the inidividual files
for hf in filelist:
os.remove(hf)
return het
def heterodyne_merge_cli(**kwargs): # pragma: no cover
"""
Entry point to ``cwinpy_heterodyne_merge`` script. This just calls
:func:`cwinpy.heterodyne.heterodyne_merge`, but does not return any
objects.
"""
kwargs["cli"] = True # set to show use of CLI
_ = heterodyne_merge(**kwargs)
class HeterodyneDAGRunner(object):
"""
Set up and run the heterodyne DAG.
Parameters
----------
config: :class:`configparser.ConfigParser`
A :class:`configparser.ConfigParser` object with the analysis setup
parameters.
"""
def __init__(self, config, **kwargs):
# create and build the dag
self.create_dag(config, **kwargs)
def create_dag(self, config, **kwargs):
"""
Create the HTCondor DAG from the configuration parameters.
Parameters
----------
config: :class:`configparser.ConfigParser`
A :class:`configparser.ConfigParser` object with the analysis setup
parameters.
"""
if not isinstance(config, configparser.ConfigParser):
raise TypeError("'config' must be a ConfigParser object")
inputs = HeterodyneInput(config)
dagsection = "heterodyne_dag" if config.has_section("heterodyne_dag") else "dag"
if "dag" in kwargs:
# get a previously created DAG if given (for example for a full
# analysis pipeline)
self.dag = kwargs["dag"]
# get whether to automatically submit the dag
self.dag.inputs.submit = config.getboolean(
dagsection, "submitdag", fallback=False
)
else:
self.dag = Dag(inputs)
# get whether to build the dag
self.build = config.getboolean(dagsection, "build", fallback=True)
# get any additional submission options
self.submit_options = config.get(dagsection, "submit_options", fallback=None)
# get the base directory
self.basedir = config.get("run", "basedir", fallback=os.getcwd())
# create configurations for each cwinpy_heterodyne job
if not config.has_section("heterodyne"):
raise IOError("Configuration file must have a [heterodyne] section.")
# detectors to use
detectors = self.eval(config.get("heterodyne", "detectors", fallback=None))
if isinstance(detectors, str):
detectors = [detectors] # make into a list
elif detectors is None:
raise ValueError("At least one detector must be supplied")
# get pulsar information
pulsarfiles = self.eval(config.get("ephemerides", "pulsarfiles", fallback=None))
pulsars = self.eval(config.get("ephemerides", "pulsars", fallback=None))
if pulsarfiles is None:
raise ValueError("A set of pulsar parameter files must be supplied")
# output information
outputdirs = self.eval(config.get("heterodyne", "outputdir", fallback=None))
if not isinstance(outputdirs, list):
outputdirs = [outputdirs]
for i, outputdir in enumerate(copy.deepcopy(outputdirs)):
if isinstance(outputdir, str):
outputdirs[i] = {det: outputdir for det in detectors}
elif isinstance(outputdir, dict):
if sorted(outputdir.keys()) != sorted(detectors):
raise KeyError(
"outputdirs dictionary must have same keys as the given "
"detectors"
)
for det in detectors:
if not isinstance(outputdir[det], str):
raise TypeError("outputdirs must be a string")
else:
raise TypeError("outputdirs must be a string or dictionary")
label = self.eval(config.get("heterodyne", "label", fallback=None))
if label is not None:
if isinstance(label, str):
label = [label]
elif not isinstance(label, list):
raise TypeError("label must be a string or a list")
freqfactors = self.eval(
config.get("heterodyne", "freqfactors", fallback="[2.0]")
)
if isinstance(freqfactors, (int, float)):
freqfactors = [freqfactors] # make into a list
# get times of data to analyse
fullstarttimes = self.eval(
config.get("heterodyne", "starttimes", fallback=None)
)
if isinstance(fullstarttimes, dict):
if sorted(detectors) != sorted(fullstarttimes.keys()):
raise ValueError("Start times must be specified for all detectors")
for key, value in fullstarttimes.copy().items():
if isinstance(value, int):
fullstarttimes[key] = [value] # convert values to lists
elif not isinstance(value, list):
raise TypeError("Must have a list of start times for a detector")
elif isinstance(fullstarttimes, int):
fullstarttimes = {
det: [fullstarttimes] for det in detectors
} # convert to dict
else:
raise ValueError("Start times must be given")
fullendtimes = self.eval(config.get("heterodyne", "endtimes", fallback=None))
if isinstance(fullendtimes, dict):
if sorted(detectors) != sorted(fullendtimes.keys()):
raise ValueError("End times must be specified for all detectors")
for key, value in fullendtimes.copy().items():
if isinstance(value, int):
fullendtimes[key] = [value] # convert values to lists
elif not isinstance(value, list):
raise TypeError("Must have a list of end times for a detector")
elif isinstance(fullendtimes, int):
fullendtimes = {det: [fullendtimes] for det in detectors} # convert to dict
else:
raise ValueError("End times must be given")
for det in detectors:
if len(fullendtimes[det]) != len(fullstarttimes[det]):
raise ValueError("Inconsistent numbers of start and end times")
stride = config.getint("heterodyne", "stride", fallback=None)
joblength = config.getint("heterodyne", "joblength", fallback=86400)
# get frame data information
frametypes = self.eval(config.get("heterodyne", "frametypes", fallback=None))
if isinstance(frametypes, str) and len(detectors) == 1:
frametypes = {det: frametypes for det in detectors}
framecaches = self.eval(config.get("heterodyne", "framecaches", fallback=None))
if isinstance(framecaches, str) and len(detectors) == 1:
framecaches = {det: framecaches for det in detectors}
channels = self.eval(config.get("heterodyne", "channels", fallback=None))
if isinstance(channels, str) and len(detectors) == 1:
channels = {det: channels for det in detectors}
host = config.get("heterodyne", "host", fallback=None)
heterodyneddata = self.eval(
config.get("heterodyne", "heterodyneddata", fallback=None)
)
framedata = {det: [] for det in detectors}
if frametypes is None and framecaches is None and heterodyneddata is None:
raise ValueError(
"Frame types, frame cache files, or heterodyned data information must "
"be supplied"
)
if heterodyneddata is None:
for fname, finfo in dict(
frametypes=frametypes, framecaches=framecaches, channels=channels
).items():
if finfo is not None:
# set frame types/caches
if isinstance(finfo, dict):
for key, value in finfo.copy().items():
if isinstance(value, str):
finfo[key] = [value] * len(fullstarttimes[key])
elif isinstance(value, list):
if len(value) != len(fullstarttimes[key]):
raise ValueError(
"{} lists must be consistent with the number of start and end times".format(
fname
)
)
else:
raise TypeError("Must have a list of {}".format(fname))
else:
raise TypeError("{} should be a dictionary".format(fname))
# get segment information
segmentserver = config.get("heterodyne", "segmentserver", fallback=None)
segmentlists = self.eval(
config.get("heterodyne", "segmentlists", fallback=None)
)
if isinstance(segmentlists, str) and len(detectors) == 1:
segmentlists = {det: segmentlists for det in detectors}
includeflags = self.eval(
config.get("heterodyne", "includeflags", fallback=None)
)
if isinstance(includeflags, str) and len(detectors) == 1:
includeflags = {det: includeflags for det in detectors}
excludeflags = self.eval(
config.get("heterodyne", "excludeflags", fallback=None)
)
if isinstance(excludeflags, str) and len(detectors) == 1:
excludeflags = {det: excludeflags for det in detectors}
segmentdata = {det: [] for det in detectors}
if segmentlists is None and includeflags is None and heterodyneddata is None:
raise ValueError(
"Segment lists of segment data quality flags must be supplied"
)
for sname, sinfo in dict(
includeflags=includeflags,
excludeflags=excludeflags,
segmentlists=segmentlists,
).items():
if sinfo is not None:
if isinstance(sinfo, dict):
for key, value in sinfo.copy().items():
if isinstance(value, str):
sinfo[key] = [value] * len(fullstarttimes[key])
elif isinstance(value, list):
if len(value) != len(fullstarttimes[key]):
raise ValueError(
"{} lists must be consistent with the number of start and end times".format(
sname
)
)
else:
raise TypeError("Must have a list of {}".format(sname))
else:
raise TypeError("{} should be a dictionary".format(sname))
# get ephemeris information
earthephemeris = self.eval(config.get("ephemerides", "earth", fallback=None))
sunephemeris = self.eval(config.get("ephemerides", "sun", fallback=None))
timeephemeris = self.eval(config.get("ephemerides", "time", fallback=None))
# get all the split segment times and frame caches
if joblength == 0:
starttimes = fullstarttimes
endtimes = fullendtimes
for det in detectors:
for i in range(len(fullstarttimes[det])):
frinfo = {}
if frametypes is not None:
# generate the frame caches now rather than relying on
# each job doing it
frcachedir = os.path.join(self.basedir, "cache")
check_directory_exists_and_if_not_mkdir(frcachedir)
frinfo["framecache"] = os.path.join(
frcachedir,
"frcache_{0:d}-{1:d}_{2}.txt".format(
starttimes[det][i], endtimes[det][i], frametypes[det][i]
),
)
_ = remote_frame_cache(
starttimes[det][i],
endtimes[det][i],
channels[det][i],
frametype=frametypes[det][i],
host=config.get("heterodyne", "host", fallback=None),
write=frinfo["framecache"],
)
else:
frinfo["framecache"] = framecaches[det][i]
frinfo["channel"] = channels[det][i]
framedata[det].append(frinfo.copy())
seginfo = {}
if segmentlists is not None:
seginfo["segmentlist"] = segmentlists[det][i]
else:
# GWOSC segments look like DET_DATA, DET_CW* or DET_*_CAT*
usegwosc = False
if (
"{}_DATA".format(det) == includeflags[det][i]
or "{}_CW".format(self.detector) in self.includeflags[0]
or "CBC_CAT" in includeflags[det][i]
or "BURST_CAT" in includeflags[det][i]
):
usegwosc = True
inputs.require_gwosc = True
# if segment list files are not provided create the lists
# now rather than relying on each job doing it
segdir = os.path.join(self.basedir, "segments")
check_directory_exists_and_if_not_mkdir(segdir)
seginfo["segmentlist"] = os.path.join(
segdir,
"segments_{0:d}-{1:d}_{2}.txt".format(
starttimes[det][i],
endtimes[det][i],
includeflags[det][i].replace(":", "_"),
),
)
_ = generate_segments(
starttime=starttimes[det][i],
endtime=endtimes[det][i],
includeflags=includeflags[det][i],
excludeflags=(
None
if excludeflags is None
else excludeflags[det][i].split(",")
),
writesegments=seginfo["segmentlist"],
usegwosc=usegwosc,
server=segmentserver,
)
segmentdata[det].append(seginfo.copy())
elif joblength > 0:
starttimes = {det: [] for det in detectors}
endtimes = {det: [] for det in detectors}
for det in detectors:
idx = 0
for starttime, endtime in zip(fullstarttimes[det], fullendtimes[det]):
# if segment list files are not provided create the lists
# now rather than relying on each job doing it
seginfo = {}
if segmentlists is not None:
seginfo["segmentlist"] = segmentlists[det][idx]
segmentlist = generate_segments(
starttime=starttime,
endtime=endtime,
segmentfile=seginfo["segmentlist"],
)
else:
# GWOSC segments look like DET_DATA or DET_*_CAT*
usegwosc = False
if (
"{}_DATA".format(det) == includeflags[det][idx]
or "CBC_CAT" in includeflags[det][idx]
or "BURST_CAT" in includeflags[det][idx]
):
usegwosc = True
inputs.require_gwosc = True
# if segment list files are not provided create the lists
# now rather than relying on each job doing it
segdir = os.path.join(self.basedir, "segments")
check_directory_exists_and_if_not_mkdir(segdir)
seginfo["segmentlist"] = os.path.join(
segdir,
"segments_{0:d}-{1:d}_{2}.txt".format(
starttime,
endtime,
includeflags[det][idx].replace(":", "_"),
),
)
segmentlist = generate_segments(
starttime=starttime,
endtime=endtime,
includeflags=includeflags[det][idx],
excludeflags=(
None
if excludeflags is None
else excludeflags[det][idx].split(",")
),
writesegments=seginfo["segmentlist"],
usegwosc=usegwosc,
server=segmentserver,
)
if len(segmentlist) == 0:
raise ValueError(
f"No science data segments exist for {det}"
)
# make segment list a list of lists, so values are not immutable
segmentlist = [list(seg) for seg in segmentlist]
frinfo = {}
if frametypes is not None:
# generate the frame caches now rather than relying on
# each job doing it
frcachedir = os.path.join(self.basedir, "cache")
check_directory_exists_and_if_not_mkdir(frcachedir)
frinfo["framecache"] = os.path.join(
frcachedir,
"frcache_{0:d}-{1:d}_{2}.txt".format(
starttime, endtime, frametypes[det][idx]
),
)
_ = remote_frame_cache(
starttime,
endtime,
channels[det][i],
frametype=frametypes[det][idx],
host=config.get("heterodyne", "host", fallback=None),
write=frinfo["framecache"],
)
else:
frinfo["framecache"] = framecaches[det][idx]
frinfo["channel"] = channels[det][idx]
segidx = 0
while segidx < len(segmentlist):
curstart = segmentlist[segidx][0]
# get segments containing up to joblength of data
sumseg = 0
while sumseg < joblength:
sumseg += segmentlist[segidx][1] - segmentlist[segidx][0]
segidx += 1
if segidx == len(segmentlist):
break
if segidx < len(segmentlist):
overlap = sumseg - joblength
segidx -= 1
curend = segmentlist[segidx][1] - overlap
segmentlist[segidx][0] = curend
else:
# ignore final segment if it's less than 30 mins
if sumseg < 30 * 60:
break
# use end value
curend = segmentlist[-1][1]
starttimes[det].append(int(curstart))
endtimes[det].append(int(curend))
# append frame data for jobs
framedata[det].append(frinfo.copy())
segmentdata[det].append(seginfo.copy())
idx += 1
else:
raise ValueError("Length of each job must be a positive integer")
# create Heterodyne object to get pulsar parameter file information
het = Heterodyne(
pulsarfiles=pulsarfiles,
pulsars=pulsars,
heterodyneddata=heterodyneddata,
)
# get number over which to split up pulsars
npulsarjobs = config.getint("heterodyne", "npulsarjobs", fallback=1)
pulsargroups = []
if npulsarjobs == 1 or len(het.pulsars) == 1:
pulsargroups.append(het.pulsars)
else:
pstep = int(np.ceil(len(het.pulsars) / npulsarjobs))
for i in range(npulsarjobs):
pulsargroups.append(het.pulsars[pstep * i : pstep * (i + 1)])
# set whether to perform the heterodyne in 1 or two stages
stages = config.getint("heterodyne", "stages", fallback=1)
if stages not in [1, 2]:
raise ValueError("Stages must either be 1 or 2")
# get the resample rate(s)
if stages == 1:
resamplerate = [
self.eval(
config.get("heterodyne", "resamplerate", fallback="1.0 / 60.0")
)
]
else:
resamplerate = self.eval(
config.get("heterodyne", "resamplerate", fallback="[1.0, 1.0 / 60.0]")
)
# set the components of the signal modulation, i.e., solar system,
# binary system, to include in the heterodyne stages. By default a
# single stage heterodyne will include all components and a two stage
# heterodyne will include no components in the first stage, but all
# components in the second stage. If supplying different values for a
# two stage process use lists
if stages == 1:
includessb = [config.getboolean("heterodyne", "includessb", fallback=True)]
includebsb = [config.getboolean("heterodyne", "includebsb", fallback=True)]
includeglitch = [
config.getboolean("heterodyne", "includeglitch", fallback=True)
]
includefitwaves = [
config.getboolean("heterodyne", "includefitwaves", fallback=True)
]
# filter knee frequency (default to 0.1 Hz for single stage heterodyne)
filterknee = config.getfloat("heterodyne", "filterknee", fallback=0.1)
else:
includessb = self.eval(
config.getboolean("heterodyne", "includessb", fallback="[False, True]")
)
includebsb = self.eval(
config.getboolean("heterodyne", "includebsb", fallback="[False, True]")
)
includeglitch = self.eval(
config.getboolean(
"heterodyne", "includeglitch", fallback="[False, True]"
)
)
includefitwaves = self.eval(
config.getboolean(
"heterodyne", "includefitwaves", fallback="[False, True]"
)
)
# filter knee frequency (default to 0.5 Hz for two stage heterodyne)
filterknee = config.getfloat("heterodyne", "filterknee", fallback=0.5)
# get whether using Tempo2 or not and check it's availability
usetempo2 = config.getboolean("heterodyne", "usetempo2", fallback=False)
if usetempo2 and not check_for_tempo2():
raise ImportError(
"libstempo is not installed so 'usetempo2' option cannot be used"
)
# get the required solar system ephemeris types and binary model for
# the given pulsars
etypes = []
binarymodels = []
for pf in het.pulsarfiles:
par = PulsarParameters(het.pulsarfiles[pf])
etypes.append(par["EPHEM"] if par["EPHEM"] is not None else "DE405")
if par["BINARY"] is not None:
binarymodels.append(par["BINARY"])
self.pulsar_files = het.pulsarfiles.copy()
# remove duplicates
etypes = set(etypes)
binarymodels = set(binarymodels)
# if ephemeris information is None download/extract information
if earthephemeris is None or sunephemeris is None:
earthephemeris = {} if earthephemeris is None else earthephemeris
sunephemeris = {} if sunephemeris is None else sunephemeris
for etype in LAL_EPHEMERIS_TYPES:
if etype not in earthephemeris:
edat = initialise_ephemeris(ephem=etype, ssonly=True)
earthephemeris[etype] = edat.filenameE
sunephemeris[etype] = edat.filenameS
if timeephemeris is None:
timeephemeris = {} if timeephemeris is None else timeephemeris
for unit in ["TCB", "TDB"]:
if unit not in timeephemeris:
_, fnames = initialise_ephemeris(
units=unit, timeonly=True, filenames=True
)
timeephemeris[unit] = fnames[0]
# create copy of each file to a unique name in case of identical filenames
# from astropy cache, which causes problems if requiring files be
# transferred
if inputs.transfer_files or inputs.osg:
for edat, ename in zip(
[earthephemeris, sunephemeris, timeephemeris], ["earth", "sun", "time"]
):
if (
len(set([os.path.basename(edat[etype]) for etype in edat])) == 1
and len(edat) > 1
):
for etype in edat:
tmpephem = os.path.join(
tempfile.gettempdir(), f"{ename}_{etype}"
)
shutil.copy(edat[etype], tmpephem)
edat[etype] = tmpephem
# check that ephemeris files exist for all required types
if not usetempo2:
for etype in etypes:
if etype not in earthephemeris or etype not in sunephemeris:
raise ValueError(
f"Pulsar(s) require ephemeris '{etype}' which has not been supplied"
)
# check that binary models exist for all required types
if not usetempo2:
for bmodel in binarymodels:
if bmodel not in LAL_BINARY_MODELS:
raise ValueError(
f"Pulsar(s) require binary model type '{bmodel}' "
"which is not available in LALSuite. Try the "
"usetempo2 option."
)
# check output directories and labels lists are correct length
if stages == 1:
if label is not None:
if len(label) == 0:
raise ValueError("A label must be supplied")
if len(outputdirs) == 0:
raise ValueError("An output directory must be supplied")
else:
if label is not None:
if len(label) != 2:
raise ValueError(
"Two labels must be supplied, one for each heterodyne stage"
)
if len(outputdirs) != 2:
raise ValueError(
"Two output directories must be supplied, one for each heterodyne stage"
)
interpolationstep = config.get("heterodyne", "interpolationstep", fallback=60)
crop = config.getint("heterodyne", "crop", fallback=60)
overwrite = config.getboolean("heterodyne", "overwrite", fallback=False)
merge = config.getboolean("merge", "merge", fallback=True) and joblength > 0
# create jobs
self.hetnodes = []
# dictionary to contain all nodes for a given pulsar (for passing on to
# cwinpy_pe if required)
self.pulsar_nodes = {psr: {det: [] for det in detectors} for psr in het.pulsars}
if merge:
# dictionary containing child nodes for each merge job
mergechildren = {
det: {ff: {psr: [] for psr in het.pulsars} for ff in freqfactors}
for det in detectors
}
# dictionary containing the output files for the merge results
self.mergeoutputs = {
det: {ff: {psr: None for psr in het.pulsars} for ff in freqfactors}
for det in detectors
}
# dictionary to contain all the heterodyned data files for each pulsar
self.heterodyned_files = {
det: {ff: {psr: [] for psr in het.pulsars} for ff in freqfactors}
for det in detectors
}
# loop over sets of pulsars
for pgroup in pulsargroups:
self.hetnodes.append([])
# loop over frequency factors
for ff in freqfactors:
# loop over each detector
for det in detectors:
# loop over times
idx = 0
for starttime, endtime in zip(starttimes[det], endtimes[det]):
configdict = {}
configdict["starttime"] = starttime
configdict["endtime"] = endtime
configdict["detector"] = det
configdict["freqfactor"] = ff
configdict["resamplerate"] = resamplerate[0]
configdict["filterknee"] = filterknee
configdict["crop"] = crop
configdict["overwrite"] = overwrite
# set frame data/heterodyned data info
configdict.update(framedata[det][idx])
configdict["host"] = host
configdict["stride"] = stride
configdict["heterodyneddata"] = (
heterodyneddata
if heterodyneddata is None
else {psr: het.heterodyneddata[psr] for psr in pgroup}
)
# set segment data info
configdict.update(segmentdata[det][idx])
configdict["pulsarfiles"] = {
psr: het.pulsarfiles[psr] for psr in pgroup
}
configdict["pulsars"] = copy.deepcopy(pgroup)
# set whether to include modulations
configdict["includessb"] = includessb[0]
configdict["includebsb"] = includebsb[0]
configdict["includeglitch"] = includeglitch[0]
configdict["includefitwaves"] = includefitwaves[0]
configdict["interpolationstep"] = interpolationstep
configdict["usetempo2"] = usetempo2
# include ephemeris files
configdict["earthephemeris"] = earthephemeris
configdict["sunephemeris"] = sunephemeris
configdict["timeephemeris"] = timeephemeris
# temporary Heterodyne object to get the output file names
tmphet = Heterodyne(
starttime=starttime,
endtime=endtime,
detector=det,
freqfactor=ff,
output=outputdirs[0][det],
label=label[0] if label is not None else None,
pulsars=copy.deepcopy(pgroup),
pulsarfiles=pulsarfiles,
)
# get lists of set of output heterodyned files for each pulsar/detector
for psr in pgroup:
self.heterodyned_files[det][ff][psr].append(
copy.deepcopy(tmphet.outputfiles[psr])
)
# set the final merged output files
for psr in pgroup:
if merge and self.mergeoutputs[det][ff][psr] is None:
# use full start and end times
tmphet.starttime = starttimes[det][0]
tmphet.endtime = endtimes[det][-1]
self.mergeoutputs[det][ff][psr] = os.path.join(
outputdirs[0][det],
tmphet.outputfiles[psr],
)
configdict["output"] = outputdirs[0][det]
configdict["label"] = label[0] if label is not None else None
self.hetnodes[-1].append(
HeterodyneNode(
inputs,
{
key: copy.deepcopy(value)
for key, value in configdict.items()
if value is not None
},
self.dag,
)
)
# put nodes into dictionary for each pulsar
if stages == 1:
for psr in pgroup:
self.pulsar_nodes[psr][det].append(
self.hetnodes[-1][-1]
)
if merge:
for psr in pgroup:
mergechildren[det][ff][psr].append(
self.hetnodes[-1][-1]
)
idx += 1
# need to check whether doing fine heterodyne - in this case need to create new jobs on a per pulsar basis
if stages == 2:
for i, pgroup in enumerate(pulsargroups):
for psr in pgroup:
for ff in freqfactors:
for det in detectors:
configdict = {}
configdict["starttime"] = starttimes[det][0]
configdict["endtime"] = endtimes[det][-1]
configdict["detector"] = det
configdict["freqfactor"] = ff
configdict["pulsars"] = psr
configdict["pulsarfiles"] = pulsarfiles
configdict["resamplerate"] = resamplerate[-1]
# include all modulations
configdict["includessb"] = includessb[-1]
configdict["includebsb"] = includebsb[-1]
configdict["includeglitch"] = includeglitch[-1]
configdict["includefitwaves"] = includefitwaves[-1]
# include ephemeris files
configdict["earthephemeris"] = earthephemeris
configdict["sunephemeris"] = sunephemeris
configdict["timeephemeris"] = timeephemeris
# input the data
configdict["heterodyneddata"] = {
psr: self.heterodyned_files[det][ff][psr]
}
# output structure
configdict["output"] = outputdirs[1][det]
configdict["label"] = (
label[1] if label is not None else None
)
self.pulsar_nodes[psr][det].append(
HeterodyneNode(
inputs,
{
key: copy.deepcopy(value)
for key, value in configdict.items()
if value is not None
},
self.dag,
generation_node=self.hetnodes[i],
)
)
elif merge:
# set output merge jobs
for i, pgroup in enumerate(pulsargroups):
for psr in pgroup:
for ff in freqfactors:
for det in detectors:
if len(self.heterodyned_files[det][ff][psr]) > 1:
self.pulsar_nodes[psr][det].append(
MergeHeterodyneNode(
inputs,
{
"heterodynedfiles": copy.deepcopy(
self.heterodyned_files[det][ff][psr]
),
"freqfactor": ff,
"detector": det,
"pulsar": psr,
"output": copy.deepcopy(
self.mergeoutputs[det][ff][psr]
),
},
self.dag,
generation_node=mergechildren[det][ff][psr],
)
)
if self.build:
self.dag.build()
def eval(self, arg):
"""
Try and evaluate a string using :func:`ast.literal_eval`.
Parameters
----------
arg: str
A string to be evaluated.
Returns
-------
object:
The evaluated object, or original string, if not able to be evaluated.
"""
# copy of string
newobj = str(arg)
try:
newobj = ast.literal_eval(newobj)
except (ValueError, SyntaxError):
# try evaluating expressions such as "1/60" or "[1., 1./60.]"",
# which fail for recent versions of ast in Python 3.7+
# if expression contains a list strip the brackets to start
objlist = newobj.strip("[").strip("]").split(",")
issafe = False
for obj in objlist:
try:
# check if value is just a number
_ = float(obj)
issafe = True
except ValueError:
issafe = False
for op in ["/", "*", "+", "-"]:
if op in obj:
if len(obj.split(op)) == 2:
try:
_ = [float(val) for val in obj.split(op)]
issafe = True
except ValueError:
break
# object is "safe", use eval
if issafe:
newobj = eval(newobj)
return newobj
def heterodyne_dag(**kwargs):
"""
Run heterodyne_dag within Python. This will create a `HTCondor <https://htcondor.readthedocs.io/>`_
DAG for running multiple ``cwinpy_heterodyne`` instances on a computer cluster. Optional
parameters that can be used instead of a configuration file (for "quick setup") are given in
the "Other parameters" section.
Parameters
----------
config: str
A configuration file, or :class:`configparser:ConfigParser` object,
for the analysis.
Other parameters
----------------
run: str
The name of an observing run for which open data exists, which will be
heterodyned, e.g., "O1".
detector: str, list
The detector, or list of detectors, for which the data will be
heterodyned. If not set then all detectors available for a given run
will be used.
hwinj: bool
Set this to True to analyse the continuous hardware injections for a
given run. No ``pulsar`` argument is required in this case.
samplerate: str:
Select the sample rate of the data to use. This can either be 4k or
16k for data sampled at 4096 or 16384 Hz, respectively. The default
is 4k, except if running on hardware injections for O1 or later, for
which 16k will be used due to being requred for the highest frequency
source. For the S5 and S6 runs only 4k data is avaialble from GWOSC,
so if 16k is chosen it will be ignored.
pulsar: str, list
The path to, or list of paths to, a Tempo(2)-style pulsar parameter
file(s), or directory containing multiple parameter files, to
heterodyne. If a pulsar name is given instead of a parameter file
then an attempt will be made to find the pulsar's ephemeris from the
ATNF pulsar catalogue, which will then be used.
osg: bool
Set this to True to run on the Open Science Grid rather than a local
computer cluster.
output: str,
The location for outputting the heterodyned data. By default the
current directory will be used. Within this directory, subdirectories
for each detector will be created.
joblength: int
The length of data (in seconds) into which to split the individual
analysis jobs. By default this is set to 86400, i.e., one day. If this
is set to 0, then the whole dataset is treated as a single job.
accounting_group_tag: str
For LVK users this sets the computing accounting group tag.
usetempo2: bool
Set this flag to use Tempo2 (if installed) for calculating the signal
phase evolution rather than the default LALSuite functions.
Returns
-------
dag:
An object containing a pycondor :class:`pycondor.Dagman` object.
"""
if "config" in kwargs:
configfile = kwargs.pop("config")
else: # pragma: no cover
parser = ArgumentParser(
description=(
"A script to create a HTCondor DAG to process GW strain data "
"by heterodyning it based on the expected phase evolution for "
"a selection of pulsars."
)
)
parser.add_argument(
"config",
nargs="?",
help=("The configuration file for the analysis"),
default=None,
)
optional = parser.add_argument_group(
"Quick setup arguments (this assumes CVMFS open data access)."
)
optional.add_argument(
"--run",
help=(
"Set an observing run name for which to heterodyne the data. "
"This can be one of {} for which open data exists".format(
list(RUNTIMES.keys())
)
),
)
optional.add_argument(
"--detector",
action="append",
help=(
"The detector for which the data will be heterodyned. This can "
"be used multiple times to specify multiple detectors. If not "
"set then all detectors available for a given run will be "
"used."
),
)
optional.add_argument(
"--hwinj",
action="store_true",
help=(
"Set this flag to analyse the continuous hardware injections "
"for a given run. No '--pulsar' arguments are required in "
"this case."
),
)
optional.add_argument(
"--samplerate",
help=(
"Select the sample rate of the data to use. This can either "
"be 4k or 16k for data sampled at 4096 or 16384 Hz, "
"respectively. The default is 4k, except if running on "
"hardware injections for O1 or later, for which 16k will be "
"used due to being requred for the highest frequency source. "
"For the S5 and S6 runs only 4k data is avaialble from GWOSC, "
"so if 16k is chosen it will be ignored."
),
default="4k",
)
optional.add_argument(
"--pulsar",
action="append",
help=(
"The path to a Tempo(2)-style pulsar parameter file, or "
"directory containing multiple parameter files, to "
"heterodyne. This can be used multiple times to specify "
"multiple pulsar inputs. If a pulsar name is given instead "
"of a parameter file then an attempt will be made to find the "
"pulsar's ephemeris from the ATNF pulsar catalogue, which "
"will then be used."
),
)
optional.add_argument(
"--osg",
action="store_true",
help=(
"Set this flag to run on the Open Science Grid rather than a "
"local computer cluster."
),
)
optional.add_argument(
"--output",
help=(
"The location for outputting the heterodyned data. By default "
"the current directory will be used. Within this directory, "
"subdirectories for each detector will be created."
),
default=os.getcwd(),
)
optional.add_argument(
"--joblength",
type=int,
help=(
"The length of data (in seconds) into which to split the "
"individual analysis jobs. By default this is set to 86400, "
"i.e., one day. If this is set to 0, then the whole dataset "
"is treated as a single job."
),
)
optional.add_argument(
"--accounting-group-tag",
dest="accgroup",
help=("For LVK users this sets the computing accounting group tag"),
)
optional.add_argument(
"--usetempo2",
action="store_true",
help=(
"Set this flag to use Tempo2 (if installed) for calculating "
"the signal phase evolution rather than the default LALSuite "
"functions."
),
)
args = parser.parse_args()
if args.config is not None:
configfile = args.config
else:
# use the "Quick setup" arguments
configfile = configparser.ConfigParser()
run = kwargs.get("run", args.run)
if run not in RUNTIMES:
raise ValueError(f"Requested run '{run}' is not available")
pulsars = []
if kwargs.get("hwinj", args.hwinj):
# use hardware injections for the run
runtimes = HW_INJ_RUNTIMES
segments = HW_INJ_SEGMENTS
pulsars.extend(HW_INJ[run]["hw_inj_files"])
# set sample rate to 16k, expect for S runs
srate = "16k" if run[0] == "O" else "4k"
else:
# use pulsars provided
runtimes = RUNTIMES
segments = ANALYSIS_SEGMENTS
pulsar = kwargs.get("pulsar", args.pulsar)
if pulsar is None:
raise ValueError("No pulsar parameter files have be provided")
pulsars.extend(pulsar if isinstance(list) else [pulsar])
# get sample rate
srate = (
"16k" if (args.samplerate[0:2] == "16" and run[0] == "O") else "4k"
)
detector = kwargs.get("detector", args.detector)
if args.detector is None:
detectors = list(runtimes[run].keys())
else:
detector = detector if isinstance(detector, list) else [detector]
detectors = [det for det in detector if det in runtimes[run]]
if len(detectors) == 0:
raise ValueError(
f"Provided detectors '{detector}' are not valid for the given run"
)
# create required settings
configfile["run"] = {}
configfile["run"]["basedir"] = kwargs.get("output", args.output)
configfile["heterodyne_dag"] = {}
configfile["heterodyne_dag"]["submitdag"] = "True"
if kwargs.get("osg", args.osg):
configfile["heterodyne_dag"]["osg"] = "True"
configfile["heterodyne_job"] = {}
configfile["heterodyne_job"]["getenv"] = "True"
if args.accgroup is not None:
configfile["heterodyne_job"]["accounting_group"] = kwargs.get(
"accounting_group_tag", args.accgroup
)
# add pulsars/pulsar ephemerides
configfile["ephemerides"] = {}
configfile["ephemerides"]["pulsarfiles"] = str(pulsars)
# add heterodyne settings
configfile["heterodyne"] = {}
configfile["heterodyne"]["detectors"] = str(detectors)
configfile["heterodyne"]["starttimes"] = str(
{det: runtimes[run][det][0] for det in detectors}
)
configfile["heterodyne"]["endtimes"] = str(
{det: runtimes[run][det][1] for det in detectors}
)
configfile["heterodyne"]["frametypes"] = str(
{det: CVMFS_GWOSC_DATA_TYPES[run][srate][det] for det in detectors}
)
configfile["heterodyne"]["channels"] = str(
{det: CVMFS_GWOSC_FRAME_CHANNELS[run][srate][det] for det in detectors}
)
configfile["heterodyne"]["host"] = CVMFS_GWOSC_DATA_SERVER
if args.hwinj:
configfile["heterodyne"]["includeflags"] = str(
{det: segments[run][det]["includesegments"] for det in detectors}
)
configfile["heterodyne"]["excludeflags"] = str(
{det: segments[run][det]["excludesegments"] for det in detectors}
)
else:
configfile["heterodyne"]["includeflags"] = str(
{det: segments[run][det] for det in detectors}
)
configfile["heterodyne"]["outputdir"] = str(
{
det: os.path.join(kwargs.get("output", args.output), det)
for det in detectors
}
)
configfile["heterodyne"]["overwrite"] = "False"
# set whether to use Tempo2 for phase evolution
if kwargs.get("usetempo2", args.usetempo2):
configfile["heterodyne"]["usetempo2"] = "True"
# split the analysis into on average day long chunks
if kwargs.get("joblength", args.joblength) is None:
configfile["heterodyne"]["joblength"] = "86400"
else:
configfile["heterodyne"]["joblength"] = str(
kwargs.get("joblength", args.joblength)
)
# merge the resulting files and remove individual files
configfile["merge"] = {}
configfile["merge"]["merge"] = "True"
configfile["merge"]["remove"] = "True"
configfile["merge"]["overwrite"] = "True"
if isinstance(configfile, configparser.ConfigParser):
config = configfile
else:
config = configparser.ConfigParser()
try:
config.read_file(open(configfile, "r"))
except Exception as e:
raise IOError(f"Problem reading configuration file '{configfile}'\n: {e}")
return HeterodyneDAGRunner(config, **kwargs)
def heterodyne_dag_cli(**kwargs): # pragma: no cover
"""
Entry point to the cwinpy_heterodyne_dag script. This just calls
:func:`cwinpy.heterodyne.heterodyne_dag`, but does not return any objects.
"""
_ = heterodyne_dag(**kwargs)
| 40.746815
| 116
| 0.524809
| 39,712
| 0.517306
| 0
| 0
| 0
| 0
| 0
| 0
| 28,777
| 0.374862
|
97f1c05811bbe3176ddd3d2e0d9d3415c269f3fe
| 5,787
|
py
|
Python
|
timpani/webserver/webhelpers.py
|
ollien/Timpani
|
0d1aac467e0bcbe2d1dadb4e6c025315d6be45cb
|
[
"MIT"
] | 3
|
2015-10-16T11:26:53.000Z
|
2016-08-28T19:28:52.000Z
|
timpani/webserver/webhelpers.py
|
ollien/timpani
|
0d1aac467e0bcbe2d1dadb4e6c025315d6be45cb
|
[
"MIT"
] | 22
|
2015-09-14T23:00:07.000Z
|
2016-07-22T08:39:39.000Z
|
timpani/webserver/webhelpers.py
|
ollien/timpani
|
0d1aac467e0bcbe2d1dadb4e6c025315d6be45cb
|
[
"MIT"
] | null | null | null |
import flask
import functools
import bs4
import urllib.parse
from .. import auth
from .. import themes
from .. import settings
INVALID_PERMISSIONS_FLASH_MESSAGE = "Sorry, you don't have permission to view that page."
def checkForSession():
if "uid" in flask.session:
session = auth.validateSession(flask.session["uid"])
if session is not None:
return session
return None
def redirectAndSave(path):
flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path
return flask.redirect(path)
def canRecoverFromRedirect():
if "donePage" in flask.session:
return flask.session["donePage"]
return None
#Decorator which checks if a user logged in and capable of using the specified permissions.
#If redirectPage is equal to none,
#the target funciton MUST have the arguments authed and authMessage defined.
def checkUserPermissions(redirectPage=None, saveRedirect=True, redirectMessage=INVALID_PERMISSIONS_FLASH_MESSAGE, requiredPermissions=None):
def decorator(function):
def decorated(*args, **kwargs):
session = checkForSession()
if session is not None:
username = session.user.username
result = True
#If we don't have any permissions necessary, a login is enough.
#Otherwise, we're going to check to make sure that all necessary permissions are in place.
if requiredPermissions is not None:
if type(requiredPermissions) == str:
result = auth.userHasPermission(username, requiredPermissions)
else:
for permission in requiredPermissions:
if not auth.userHasPermission(username, permission):
result = False
#If all permissions is valid, redirect as needed.
if result:
if redirectPage is not None:
return function(*args, **kwargs)
else:
return function(authed=True, authMessage=redirectMessage, *args, **kwargs)
else:
#We don't want to flash on thigns like ajax routes, so we use redirectPage is not None
willFlash = redirectPage is not None
return _permissionRedirect(redirectPage, saveRedirect, redirectMessage, willFlash, function, *args, **kwargs)
else:
return _permissionRedirect(redirectPage, saveRedirect, redirectMessage, False, function, *args, **kwargs)
return functools.update_wrapper(decorated, function)
return decorator
def _permissionRedirect(redirectPage, saveRedirect, redirectMessage, flash, function, *args, **kwargs):
if flash:
flask.flash(redirectMessage)
if redirectPage is not None:
if not saveRedirect:
return flask.redirect(redirectPage)
else:
return redirectAndSave(redirectPage)
else:
return function(authed=False, authMessage=redirectMessage, *args, **kwargs)
#Will return all information that is needed to render a post.
#Prevents fragmentation in various post display methods
def getPostsParameters():
title = settings.getSettingValue("title")
subtitle = settings.getSettingValue("subtitle")
displayName = settings.getSettingValue("display_name")
return {
"blogTitle": title,
"blogSubtitle": subtitle,
"displayName": displayName,
}
#Renders the theme's template if the theme contains one
#Otherwise, it renders the default template
def renderPosts(defaultPath, pageTitle, pageNumber, pageCount, nextPageExists, basePageUrl="", *args, **kwargs):
theme = themes.getCurrentTheme()
template = theme["template"]
postParams = getPostsParameters()
#Merge postParams and kwargs
#Anything in kwargs will overwrite postParams (which is why we use these two lines)
postParams.update(kwargs)
kwargs = postParams
if template is None:
templateFile = open(defaultPath, "r")
template = templateFile.read()
templateFile.close()
return flask.render_template_string(template, pageTitle=pageTitle,
pageNumber=pageNumber, pageCount=pageCount,
nextPageExists=nextPageExists, basePageUrl=basePageUrl,
*args, **kwargs)
def xssFilter(postBody):
whitelistedTags = ["div", "span", "b", "i", "u", "a", "p", "img", "code",
"ul", "li", "h1", "h2", "h3", "h4", "h5", "h6", "pre",
"br"]
#src and href must be checked seperately
whitelistedAttributes = ["id", "class", "style"]
soupedBody = bs4.BeautifulSoup(postBody, "html.parser")
blockedTags = soupedBody.findAll(lambda tag: tag.name not in whitelistedTags)
#Check if element has any attriutes that are not allowed, but only if
#they are not already in blockedTags. Those will be escaped, anyway.
blockedAttrs = soupedBody.findAll(lambda tag:
len(set(tag.attrs.keys()) - set(whitelistedAttributes)) != 0
and tag.name in whitelistedTags)
for tag in blockedTags:
#Beautiful soup will escape HTML strings
tag.replace_with(str(tag))
for tag in blockedAttrs:
allowedAttrs = {}
for attr in tag.attrs:
if attr in whitelistedAttributes:
allowedAttrs[attr] = tag.attrs[attr]
elif attr == "src" or attr == "href":
scheme = urllib.parse.urlparse(tag.attrs[attr]).scheme
if scheme != "data" and scheme != "javascript":
allowedAttrs[attr] = tag.attrs[attr]
tag.attrs = allowedAttrs
return str(soupedBody)
| 43.511278
| 140
| 0.644375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,346
| 0.23259
|
97f1ce8901d8660f5836035727b480380b3d1fc2
| 1,542
|
py
|
Python
|
bot/plugins/keyboard/__init__.py
|
grahamtito/TelegramFiletoCloud
|
63ac4a173102ee73615aa5bcf996e545746a1c27
|
[
"Unlicense"
] | null | null | null |
bot/plugins/keyboard/__init__.py
|
grahamtito/TelegramFiletoCloud
|
63ac4a173102ee73615aa5bcf996e545746a1c27
|
[
"Unlicense"
] | null | null | null |
bot/plugins/keyboard/__init__.py
|
grahamtito/TelegramFiletoCloud
|
63ac4a173102ee73615aa5bcf996e545746a1c27
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# This is bot coded by Abhijith N T and used for educational purposes only
# https://github.com/AbhijithNT
# Copyright ABHIJITH N T
# Thank you https://github.com/pyrogram/pyrogram
from pyrogram.types import (
InlineKeyboardMarkup,
InlineKeyboardButton
)
def server_select():
upload_selection = [
[
InlineKeyboardButton(
"transfer.sh",
callback_data="transfersh"
),
InlineKeyboardButton(
"File.io",
callback_data="File.io"
)
],
[
InlineKeyboardButton(
"gofile.io",
callback_data="gofileio"
),
InlineKeyboardButton(
"anonymfiles.com",
callback_data="anonymfiles"
)
],
[
InlineKeyboardButton(
"aparat",
callback_data="aparat"
),
InlineKeyboardButton(
"splus",
callback_data="splus"
)
]
]
return InlineKeyboardMarkup(upload_selection)
def completedKeyboard(dl):
replayMarkup = InlineKeyboardMarkup(
[[
InlineKeyboardButton(
"DOWNLOAD URL",
url=f"{dl}"
)
],
[
InlineKeyboardButton(
"🗂 SOURCE",
url="https://github.com/AbhijithNT/"
)
]])
return replayMarkup
| 23.723077
| 74
| 0.485084
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 389
| 0.25178
|
97f1fff136972b7db73eca847e9e3cb4870be823
| 4,022
|
py
|
Python
|
django_storymarket/forms.py
|
jacobian/django-storymarket
|
ec43318ddb9964e67220f6fa9675389b637422ce
|
[
"BSD-3-Clause"
] | 1
|
2019-01-12T10:05:59.000Z
|
2019-01-12T10:05:59.000Z
|
django_storymarket/forms.py
|
jacobian/django-storymarket
|
ec43318ddb9964e67220f6fa9675389b637422ce
|
[
"BSD-3-Clause"
] | null | null | null |
django_storymarket/forms.py
|
jacobian/django-storymarket
|
ec43318ddb9964e67220f6fa9675389b637422ce
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import operator
import storymarket
from django import forms
from django.core.cache import cache
from django.conf import settings
from .models import SyncedObject
# Timeout for choices cached from Storymarket. 5 minutes.
CHOICE_CACHE_TIMEOUT = 600
log = logging.getLogger('django_storymarket')
class StorymarketSyncForm(forms.ModelForm):
"""
A form allowing the choice of sync options for a given model instance.
"""
class Meta:
model = SyncedObject
fields = ['org', 'category', 'tags', 'pricing', 'rights']
def __init__(self, *args, **kwargs):
super(StorymarketSyncForm, self).__init__(*args, **kwargs)
# Override some fields. Tags is left alone; the default is fine.
self.fields['org'] = forms.TypedChoiceField(label='Org',
choices=self._choices('orgs'),
coerce=int)
self.fields['category'] = forms.TypedChoiceField(label='Category',
choices=self._choices('subcategories'),
coerce=int)
self.fields['pricing'] = forms.TypedChoiceField(label='Pricing',
choices=self._choices('pricing'),
coerce=int)
self.fields['rights'] = forms.TypedChoiceField(label='Rights',
choices=self._choices('rights'),
coerce=int)
def _choices(self, manager_name):
"""
Generate a list of choices from a given storymarket manager type.
These choices are cached to save API hits, sorted, and an empty
choice is included.
"""
cache_key = 'storymarket_choice_cache:%s' % manager_name
choices = cache.get(cache_key)
if choices is None:
manager = getattr(self._api, manager_name)
try:
objs = sorted(manager.all(), key=operator.attrgetter('name'))
except storymarket.exceptions.StorymarketError, e:
log.exception('Storymarket API call failed: %s' % e)
return [(u'', u'--- Storymarket Unavailable ---')]
# If there's only a single object, just select it -- don't offer
# an empty choice. Otherwise, offer an empty.
if len(objs) == 1:
empty_choice = []
else:
empty_choice = [(u'', u'---------')]
choices = empty_choice + [(o.id, o.name) for o in objs]
cache.set(cache_key, choices, CHOICE_CACHE_TIMEOUT)
return choices
@property
def _api(self):
return storymarket.Storymarket(settings.STORYMARKET_API_KEY)
class StorymarketOptionalSyncForm(StorymarketSyncForm):
"""
Like a StorymarketSyncForm, but with an extra boolean field indicating
whether syncing should take place or not.
"""
sync = forms.BooleanField(initial=False, required=False,
label="Upload to Storymarket")
def __init__(self, *args, **kwargs):
super(StorymarketOptionalSyncForm, self).__init__(*args, **kwargs)
# Make fields optional; we'll validate them in clean()
for field in ('org', 'category', 'tags'):
self.fields[field].required = False
def clean(self):
if self.cleaned_data['sync']:
for field in ('org', 'category', 'tags'):
if not self.cleaned_data.get(field, None):
message = self.fields[field].error_messages['required']
self._errors[field] = self.error_class([message])
del self.cleaned_data[field]
return self.cleaned_data
| 43.717391
| 96
| 0.544008
| 3,701
| 0.920189
| 0
| 0
| 98
| 0.024366
| 0
| 0
| 1,061
| 0.263799
|
97f201e4bc64fac90fde4b3a05b02b6bc4e482f8
| 5,773
|
py
|
Python
|
revisum/snippet.py
|
medariox/revisum
|
e92afa047ec66ef80bf3f27e6be81b1505f7151e
|
[
"MIT"
] | null | null | null |
revisum/snippet.py
|
medariox/revisum
|
e92afa047ec66ef80bf3f27e6be81b1505f7151e
|
[
"MIT"
] | null | null | null |
revisum/snippet.py
|
medariox/revisum
|
e92afa047ec66ef80bf3f27e6be81b1505f7151e
|
[
"MIT"
] | null | null | null |
import pickle
from collections import OrderedDict
from datetime import datetime
from .chunk import Chunk
from .review import Review
from .tokenizer import LineTokenizer
from .utils import norm_path
from .database.snippet import maybe_init, Snippet as DataSnippet
class Snippet(object):
def __init__(self, snippet_id, merged, chunks, source, target):
self.snippet_id = snippet_id
self.merged = merged
self._chunks = chunks
self._chunk_ids = []
self.start = chunks[0].start
self.length = self.total_len(chunks[0].start, chunks[-1].end)
self.source_file = norm_path(str(source))
self.target_file = norm_path(str(target))
self._target_lines = []
self._source_lines = []
self._target_tokens = []
self._source_tokens = []
def __str__(self):
return '\n-------------------------\n'.join(self.to_text())
def to_json(self):
snippet = OrderedDict()
snippet['snippet_id'] = self.snippet_id
reviews = Review.load(self.pr_number(self.snippet_id),
self.repo_id(self.snippet_id))
snippet['reviews'] = [review.to_json() for review in reviews]
snippet['chunk_ids'] = self.chunk_ids
return snippet
@property
def chunks(self):
return self._chunks
@property
def chunk_ids(self):
if not self._chunk_ids:
self._chunk_ids = [c.chunk_id for c in self._chunks]
return self._chunk_ids
@staticmethod
def repo_id(snippet_id):
return snippet_id.split('-')[3]
@staticmethod
def pr_number(snippet_id):
return snippet_id.split('-')[2]
@classmethod
def make_id(cls, hunk_no, file_no, pr_number, repo_id):
return '-'.join([str(hunk_no), str(file_no),
str(pr_number), str(repo_id)])
@staticmethod
def total_len(start, end):
length = end - start + 1
return length
def to_tokens(self):
chunks = []
for chunk in self._chunks:
chunks.append(chunk.as_tokens())
return chunks
def to_text(self):
chunks = []
for chunk in self._chunks:
chunks.append(chunk.as_text(pretty=True))
return chunks
@classmethod
def as_tokens(cls, code):
if not isinstance(code, list):
code = [code]
tokens = LineTokenizer(code).tokens
lines = []
for line in tokens:
lines += line
return lines
@classmethod
def as_elements(cls, code):
if not isinstance(code, list):
code = [code]
tokens = LineTokenizer(code).elements
lines = []
for line in tokens:
lines += line
return lines
@classmethod
def load(cls, snippet_id, path=None):
repo_id = cls.repo_id(snippet_id)
maybe_init(repo_id, path=path)
db_snippet = DataSnippet.get_or_none(snippet_id=snippet_id)
if db_snippet:
chunks = []
chunk_ids = pickle.loads(db_snippet.chunk_ids)
for chunk_id in chunk_ids:
chunks.append(Chunk.load(chunk_id))
merged = db_snippet.merged
source = db_snippet.source
target = db_snippet.target
snippet = cls(snippet_id, merged, chunks, source, target)
return snippet
@classmethod
def load_all(cls, repo_id, merged_only=False, path=None):
maybe_init(repo_id, path=path)
query = DataSnippet.select(
DataSnippet.snippet_id,
DataSnippet.chunk_ids,
DataSnippet.source,
DataSnippet.target)
if merged_only:
query = query.where(DataSnippet.merged == 1)
query = query.order_by(DataSnippet.last_mod.desc())
for db_snippet in query:
snippet_id = db_snippet.snippet_id
chunks = []
chunk_ids = pickle.loads(db_snippet.chunk_ids)
for chunk_id in chunk_ids:
chunks.append(Chunk.load(chunk_id))
merged = db_snippet.merged
source = db_snippet.source
target = db_snippet.target
snippet = cls(snippet_id, merged, chunks, source, target)
print('Finished loading snippet with ID: {0}'.format(snippet_id))
yield snippet
def _serialize_ids(self):
return pickle.dumps(self.chunk_ids, pickle.HIGHEST_PROTOCOL)
def exists(self):
repo_id = self.repo_id(self.snippet_id)
maybe_init(repo_id)
snippet = DataSnippet.get_or_none(snippet_id=self.snippet_id)
return bool(snippet)
def save(self):
repo_id = self.repo_id(self.snippet_id)
maybe_init(repo_id)
snippet = DataSnippet.get_or_none(snippet_id=self.snippet_id)
if snippet:
(DataSnippet
.update(snippet_id=self.snippet_id,
merged=self.merged,
last_mod=datetime.now(),
start=self.start,
length=self.length,
source=self.source_file,
target=self.target_file,
chunk_ids=self._serialize_ids())
.where(DataSnippet.snippet_id == self.snippet_id)
.execute())
else:
(DataSnippet
.create(snippet_id=self.snippet_id,
merged=self.merged,
last_mod=datetime.now(),
start=self.start,
length=self.length,
source=self.source_file,
target=self.target_file,
chunk_ids=self._serialize_ids()))
| 29.01005
| 77
| 0.580634
| 5,506
| 0.95375
| 960
| 0.166291
| 2,761
| 0.478261
| 0
| 0
| 111
| 0.019227
|
97f20ba0590c9d144a0c17683ec4a0a88ea21ea6
| 46
|
py
|
Python
|
ainnovation_dcim/workflow/__init__.py
|
ltxwanzl/ainnovation_dcim
|
b065489e2aa69729c0fd5142cf75d8caa7788b31
|
[
"Apache-2.0"
] | null | null | null |
ainnovation_dcim/workflow/__init__.py
|
ltxwanzl/ainnovation_dcim
|
b065489e2aa69729c0fd5142cf75d8caa7788b31
|
[
"Apache-2.0"
] | null | null | null |
ainnovation_dcim/workflow/__init__.py
|
ltxwanzl/ainnovation_dcim
|
b065489e2aa69729c0fd5142cf75d8caa7788b31
|
[
"Apache-2.0"
] | null | null | null |
# default_app_config = '.apps.WorkflowConfig'
| 23
| 45
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.978261
|
97f2191d807924b9920f7ca4379d337e4f2f9d92
| 6,361
|
py
|
Python
|
examples/api-samples/inc_samples/sample33.py
|
groupdocs-legacy-sdk/python
|
80e5ef5a9a14ac4a7815c6cf933b5b2997381455
|
[
"Apache-2.0"
] | null | null | null |
examples/api-samples/inc_samples/sample33.py
|
groupdocs-legacy-sdk/python
|
80e5ef5a9a14ac4a7815c6cf933b5b2997381455
|
[
"Apache-2.0"
] | null | null | null |
examples/api-samples/inc_samples/sample33.py
|
groupdocs-legacy-sdk/python
|
80e5ef5a9a14ac4a7815c6cf933b5b2997381455
|
[
"Apache-2.0"
] | null | null | null |
####<i>This sample will show how to convert several HTML documents to PDF and merge them to one document</i>
#Import of classes from libraries
import base64
import os
import shutil
import random
import time
from pyramid.renderers import render_to_response
from groupdocs.StorageApi import StorageApi
from groupdocs.AsyncApi import AsyncApi
from groupdocs.ApiClient import ApiClient
from groupdocs.GroupDocsRequestSigner import GroupDocsRequestSigner
from groupdocs.models.JobInfo import JobInfo
# Checking value on null
def IsNotNull(value):
return value is not None and len(value) > 0
####Set variables and get POST data
def sample33(request):
clientId = request.POST.get('client_id')
privateKey = request.POST.get('private_key')
firstUrl = request.POST.get('url1')
secondUrl = request.POST.get('url2')
thirdUrl = request.POST.get('url3')
basePath = request.POST.get('server_type')
message = ""
iframe = ""
# Checking clientId, privateKey and file_Id
if IsNotNull(clientId) == False or IsNotNull(privateKey) == False:
return render_to_response('__main__:templates/sample33.pt',
{ 'error' : 'You do not enter all parameters' })
####Create Signer, ApiClient and Storage Api objects
#Create signer object
signer = GroupDocsRequestSigner(privateKey)
#Create apiClient object
apiClient = ApiClient(signer)
#Create Storage Api object
storageApi = StorageApi(apiClient)
#Create Async api object
asyncApi = AsyncApi(apiClient)
#Set base Path
if basePath == "":
basePath = "https://api.groupdocs.com/v2.0"
storageApi.basePath = basePath
asyncApi.basePath = basePath
#Create list of URL's
urlList = [firstUrl, secondUrl, thirdUrl]
#Create empty list for uploaded files GUID's
guidList = []
for url in urlList:
try:
#Upload file
upload = storageApi.UploadWeb(clientId, url)
if upload.status == "Ok":
#Add GUID of uploaded file to list
guidList.append(upload.result.guid)
else:
raise Exception(upload.error_message)
except Exception, e:
return render_to_response('__main__:templates/sample33.pt',
{ 'error' : str(e) })
####Make a request to Signature API using clientId
try:
#Create list of result document type
convertType = []
convertType.append("pdf")
#Create JobInfo object and set attributes
jobInfo = JobInfo()
jobInfo.actions = "convert, combine"
jobInfo.out_formats = convertType
jobInfo.status = "-1"
jobInfo.email_results = True
rand = random.randint(0, 500)
jobInfo.name = "test" + str(rand)
#Create job
createJob = asyncApi.CreateJob(clientId, jobInfo)
if createJob.status == "Ok":
for guid in guidList:
try:
#Add all uploaded files to created job
addJobDocument = asyncApi.AddJobDocument(clientId, createJob.result.job_id, guid, False)
if addJobDocument.status != "Ok":
raise Exception(addJobDocument.error_message)
except Exception, e:
return render_to_response('__main__:templates/sample33.pt',
{ 'error' : str(e) })
#Change job status
jobInfo.status = "0"
try:
#Update job with new status
updateJob = asyncApi.UpdateJob(clientId,createJob.result.job_id, jobInfo)
if updateJob.status == "Ok":
time.sleep(5)
try:
#Get result file from job by it's ID
getJobDocument = asyncApi.GetJobDocuments(clientId, createJob.result.job_id)
if getJobDocument.status == "Ok":
fileGuid = getJobDocument.result.outputs[0].guid
#Generation of iframe URL using $pageImage->result->guid
#iframe to prodaction server
if basePath == "https://api.groupdocs.com/v2.0":
iframe = 'https://apps.groupdocs.com/document-viewer/embed/' + fileGuid
#iframe to dev server
elif basePath == "https://dev-api.groupdocs.com/v2.0":
iframe = 'https://dev-apps.groupdocs.com/document-viewer/embed/' + fileGuid
#iframe to test server
elif basePath == "https://stage-api.groupdocs.com/v2.0":
iframe = 'https://stage-apps.groupdocs.com/document-viewer/embed/' + fileGuid
elif basePath == "http://realtime-api.groupdocs.com":
iframe = 'http://realtime-apps.groupdocs.com/document-viewer/embed/' + fileGuid
iframe = signer.signUrl(iframe)
else:
raise Exception(getJobDocument.error_message)
except Exception, e:
return render_to_response('__main__:templates/sample33.pt',
{ 'error' : str(e) })
else:
raise Exception(updateJob.error_message)
except Exception, e:
return render_to_response('__main__:templates/sample33.pt',
{ 'error' : str(e) })
else:
raise Exception(createJob.error_message)
except Exception, e:
return render_to_response('__main__:templates/sample33.pt',
{ 'error' : str(e) })
#If request was successfull - set message variable for template
return render_to_response('__main__:templates/sample33.pt',
{ 'userId' : clientId,
'privateKey' : privateKey,
'url1' : firstUrl,
'url2' : secondUrl,
'url3' : thirdUrl,
'iframe' : iframe,
'message' : message },
request=request)
| 43.868966
| 111
| 0.562962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,828
| 0.287376
|
97f2ebb10db5b5ba4727a38411b745fbfd41201b
| 2,503
|
py
|
Python
|
silver/api/pagination.py
|
DocTocToc/silver
|
f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5
|
[
"Apache-2.0"
] | 222
|
2017-01-15T10:30:57.000Z
|
2022-03-08T20:34:46.000Z
|
silver/api/pagination.py
|
DocTocToc/silver
|
f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5
|
[
"Apache-2.0"
] | 141
|
2017-01-11T10:56:49.000Z
|
2021-10-12T11:51:00.000Z
|
silver/api/pagination.py
|
DocTocToc/silver
|
f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5
|
[
"Apache-2.0"
] | 76
|
2017-01-10T13:50:27.000Z
|
2022-03-25T21:37:00.000Z
|
# Copyright (c) 2015 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.utils.urls import replace_query_param, remove_query_param
class LinkHeaderPagination(PageNumberPagination):
page_size = api_settings.PAGE_SIZE or 30
page_size_query_param = 'page_size'
max_page_size = 100
def get_last_link(self):
url = self.request.build_absolute_uri()
page_number = self.page.paginator.num_pages
return replace_query_param(url, self.page_query_param, page_number)
def get_first_link(self, display_page_query_param=True):
url = self.request.build_absolute_uri()
if display_page_query_param:
page_number = self.page.paginator.validate_number(1)
return replace_query_param(url, self.page_query_param, page_number)
else:
return remove_query_param(url, self.page_query_param)
def get_paginated_response(self, data):
next_url = self.get_next_link()
previous_url = self.get_previous_link()
first_url = self.get_first_link()
last_url = self.get_last_link()
if next_url is not None and previous_url is not None:
link = '<{next_url}>; rel="next", <{previous_url}>; rel="prev"'
elif next_url is not None:
link = '<{next_url}>; rel="next"'
elif previous_url is not None:
link = '<{previous_url}>; rel="prev"'
else:
link = ''
if link:
link += ', '
link += '<{first_url}>; rel="first", <{last_url}> rel="last"'
link = link.format(next_url=next_url, previous_url=previous_url,
first_url=first_url, last_url=last_url)
headers = {'Link': link} if link else {}
return Response(data, headers=headers)
| 37.924242
| 79
| 0.691171
| 1,648
| 0.65841
| 0
| 0
| 0
| 0
| 0
| 0
| 755
| 0.301638
|
97f305739c9556bc7a629078425a1949c86c0361
| 3,117
|
py
|
Python
|
process_filing_headers.py
|
jsfenfen/fec2file
|
541a7dc40eb4ebf51d1c610ee19fdefc030fc7e3
|
[
"MIT"
] | 1
|
2019-04-24T16:45:07.000Z
|
2019-04-24T16:45:07.000Z
|
process_filing_headers.py
|
jsfenfen/fec2file
|
541a7dc40eb4ebf51d1c610ee19fdefc030fc7e3
|
[
"MIT"
] | null | null | null |
process_filing_headers.py
|
jsfenfen/fec2file
|
541a7dc40eb4ebf51d1c610ee19fdefc030fc7e3
|
[
"MIT"
] | null | null | null |
import os
import fecfile
import json
import csv
import sys
from settings import RAW_ELECTRONIC_DIR, MASTER_HEADER_ROW, HEADER_DUMP_FILE
START_YEAR = 2019
ERROR_HEADERS = ['path', 'error', ]
def readfile(filepath, writer):
filename = os.path.basename(filepath)
filename = filename.replace(".fec", "")
file_number = int(filename)
file = open(filepath, encoding = "ISO-8859-1")
#file = open(filepath)
firstline = file.readline()
secondline = file.readline()
firstline = firstline.replace("\n", "")
raw_results = fecfile.parse_header(firstline)
results = raw_results[0]
results["filing_number"] = file_number
version = raw_results[1]
lines = None
if len(raw_results)==3:
lines = raw_results[1]
original_report = results.get('report_id', None)
report_number = results.get('report_number', None)
if original_report:
original_report = original_report.replace("FEC-", "")
original_report_number = int(original_report)
results["amends"] = original_report_number
#print("Found amended filing %s amends %s # %s" % (file_number, original_report_number, report_number))
secondlineparsed = fecfile.parse_line(secondline, version)
#print(secondlineparsed)
results["form_type"] = secondlineparsed.get('form_type', '')
results["filer_committee_id_number"] = secondlineparsed.get('filer_committee_id_number', '')
results["committee_name"] = secondlineparsed.get('committee_name', '')
results["date_signed"] = secondlineparsed.get('date_signed', '')
results["coverage_from_date"] = secondlineparsed.get('coverage_from_date', '')
results["coverage_through_date"] = secondlineparsed.get('coverage_through_date', '')
writer.writerow(results)
if __name__ == '__main__':
outfile = open(HEADER_DUMP_FILE, 'w')
dw = csv.DictWriter(outfile, fieldnames=MASTER_HEADER_ROW, extrasaction='ignore')
dw.writeheader()
print("Writing output to %s" % HEADER_DUMP_FILE)
errorfile = open("header_read_errors.csv", 'w')
error_writer = csv.DictWriter(errorfile, fieldnames=ERROR_HEADERS, extrasaction='ignore')
error_writer.writeheader()
for dirName, subdirList, fileList in os.walk(RAW_ELECTRONIC_DIR, topdown=False):
try:
directory_year = int(dirName.split("/")[-1][0:4])
if directory_year < START_YEAR:
print("Ignoring directory %s" % dirName)
continue
except ValueError:
continue
for fname in fileList:
if fname.endswith(".fec"):
full_path = os.path.join(dirName, fname)
#readfile(full_path, dw)
#print("Found file %s" % full_path)
try:
readfile(full_path, dw)
except Exception as e:
print("error reading %s: %s" % (full_path, e))
error_writer.writerow({
'path':full_path,
'error':e
})
| 33.159574
| 111
| 0.62881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 681
| 0.218479
|
97f379ae1f9f041646342228c2bcfc62e5962980
| 331
|
py
|
Python
|
src/python/collector/urls.py
|
swqqn/django-collector
|
014e5974c8c6dda38682a7ae7eb1d4f0295679b8
|
[
"MIT"
] | 3
|
2015-11-05T13:42:15.000Z
|
2020-01-15T08:00:58.000Z
|
src/python/collector/urls.py
|
rentalita/django-collector
|
8646e514d26820e317b2b59828dc0e506a19c780
|
[
"MIT"
] | null | null | null |
src/python/collector/urls.py
|
rentalita/django-collector
|
8646e514d26820e317b2b59828dc0e506a19c780
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('collector.views',
url(r'^blob404/$', 'blob404'),
url(r'^deleted/$', 'deleted'),
url(r'^$', 'create'),
url(r'^(?P<uid>\w+)/$', 'delete'),
)
# Local Variables:
# indent-tabs-mode: nil
# End:
# vim: ai et sw=4 ts=4
| 20.6875
| 51
| 0.586103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 192
| 0.58006
|
97f5869664190ff99134b09c60ba7139b7a21527
| 7,658
|
py
|
Python
|
cdisp/core.py
|
felippebarbosa/cdisp
|
d9a612c252495ab017bffccdd7e82bbb555e07dd
|
[
"BSL-1.0"
] | null | null | null |
cdisp/core.py
|
felippebarbosa/cdisp
|
d9a612c252495ab017bffccdd7e82bbb555e07dd
|
[
"BSL-1.0"
] | null | null | null |
cdisp/core.py
|
felippebarbosa/cdisp
|
d9a612c252495ab017bffccdd7e82bbb555e07dd
|
[
"BSL-1.0"
] | null | null | null |
#-*- coding: utf-8 -*-
"""
Dispersion calculation functions
"""
import numpy # module for array manipulation
import pandas # module for general data analysis
import os # module for general OS manipulation
import scipy # module for scientific manipulation and analysis
##
def set_transverse_mode(data_frame, order_tag, neff_tag = 'neff', complex_neff = False):
""" Function for classification of transverse modes
For this function to work, the frequency and polarization must the the same.
Also the input have to be a Pandas data frame;
"""
if type(x) <> 'pandas.core.frame.DataFrame': raise(ValueError("The object MUST be a Pandas data frame"))
####
No = len(data_frame) # number of modes
order_list = np.array(['%1d' % x for x in np.arange(1, No + 1)][::-1]) # list with the transversal order
neffs = np.array(data_frame[neff_tag]) # neffs of the modes
if complex_neff:
neffs = np.abs(np.array([complex(s.replace('i' , 'j ')) for s in neffs])) # for complex neff
inds = neffs.argsort(kind = 'mergesort') # neff sorting
inds2 = np.array(inds).argsort(kind = 'mergesort') # index resorting (reverse sorting)
order_list_sorted = order_list[inds2] # list with the right (sorted) transversal order
data_frame[order_tag] = order_list_sorted
return data_frame
#######
def data_classification(data_frame, wavelength_tag = 'wlength', frequency_tag = 'freq',
input_tags = ['eig', 'Ptm', 'Ppml', 'Pcore', 'Pbus'],
class_tags = ['polarization', 'ring_bus', 'transverse_mode']):
""" Function for filtering quality factor, losses and classification of polarization and transverse modes
The input have to be a Pandas data frame;
"""
## limits setting
pml_thre = 0.5 # threshold for power in the PMLs
bus_thre = 1.0 # threshold for power in the bus waveguide relative to the ring
tm_thre = 1.0 # threshold for power in the TM mode
## tags for classification
[eigenval_tag, TM_tag, pml_tag, ring_tag, bus_tag] = input_tags
[pol_tag, ring_bus_tag, order_tag] = class_tags
## list of columns
list_col = list(data_frame.columns) # columns names
Neig = list_col.index(eigenval_tag) # index before
list_par = list_col[:Neig] # list of parameters
## create wavelength or frequency colunm
if frequency_tag not in list_col: data_frame[frequency_tag] = scipy.constants.c/data_frame[wavelength_tag]
if wavelength_tag not in list_col: data_frame[wavelength_tag] = scipy.constants.c/data_frame[frequency_tag]
## setting frequency column as the standard for internal use
if frequency_tag not in list_par:
list_par.remove(wavelength_tag)
list_par.append(frequency_tag)
## PML filtering
data_frame = data_frame[data_frame[pml_tag] < pml_thre] # Filter the light that goes to the Pml
## TE and TM modes separation
data_frame[pol_tag] = np.array(pandas.cut(np.array(data_frame[TM_tag]), [0, tm_thre, data_frame[TM_tag].max()], labels = ['TE', 'TM']))
list_tag = [pol_tag]
## waveguide and bus separation
if bus_tag in list_col:
data_frame[ring_bus_tag] = np.array(pandas.cut((np.array(data_frame[bus_tag])/np.array(data_frame[ring_tag]))**(1./4), [0, bus_thre, 1000000], labels = ['ring', 'bus']))
# data_frame[ring_bus_tag] = np.array(pandas.cut(np.array(data_frame[ring_tag]), [0, ring_thre, 100000], labels = ['','ring']))
list_tag = list_tag + [ring_bus_tag]
## transverse mode separation
list_group = list_par + list_tag # list to filter the first time
data_frame = data_frame.groupby(list_group, as_index = False).apply(set_transverse_mode, order_tag) # transverse order
return data_frame, list_group + [order_tag]
####
def find_idx_nearest_val(array, value):
'''function to find the nearest index matching to the value given'''
idx_sorted = np.argsort(array)
sorted_array = np.array(array[idx_sorted])
idx = np.searchsorted(sorted_array, value, side="left")
if idx >= len(array):
idx_nearest = idx_sorted[len(array)-1]
elif idx == 0:
idx_nearest = idx_sorted[0]
else:
if abs(value - sorted_array[idx-1]) < abs(value - sorted_array[idx]):
idx_nearest = idx_sorted[idx-1]
else:
idx_nearest = idx_sorted[idx]
return idx_nearest
###
def dispersion_calculation(data_frame, frequency_tag = 'freq', wavelength_tag = 'wlength',
neff_tag = 'neff', wlength0 = None):
""" functions for dispersion calculation """
## initial definitions
wlength = np.array(data_frame[wavelength_tag]) # wavelength
omega = 2*np.pi*np.array(data_frame[frequency_tag]) # angular frequency
beta = np.array(data_frame[neff_tag])*omega/scipy.constants.c # propagation constant
## dialing with circular waveguides
if 'r0' in data_frame.columns:
rad0 = np.array(data_frame['r0'])
beta = beta/rad0
else: rad0 = 1.0
## dispersion calculations
beta1 = Df(beta*rad0, omega)/rad0 # beta 1
beta2 = Df(beta1*rad0, omega)/rad0 # beta 2
beta3 = Df(beta2*rad0, omega)/rad0 # beta 3
beta4 = Df(beta3*rad0, omega)/rad0 # beta 4
D = -2*np.pi*scipy.constants.c/wlength*beta2 # D parameter
## set up the wlength for phase matching
wlength0 = 0.9e-6
if wlength0 == None: wlength0 = wlength[int(wlength.shape[0]/2)]
elif wlength0 < min(wlength): wlength0 = min(wlength)
elif wlength0 > max(wlength): wlength0 = max(wlength)
omega0 = 2*np.pi*scipy.constants.c/wlength0
## phase matching calculation
idx0 = find_idx_nearest_val(omega, omega0)
Dbeta = calculate_Dbeta(beta, idx0) # propagation constant in
Dbeta2 = beta2[idx0]*(omega - omega[idx0])**2 + beta4[idx0]/12*(omega - omega[idx0])**4
norm_gain = calculate_gain(Dbeta, 1.0e4)
## outputs
n_clad, n_core = 1.0, 3.5
output_tags = ['beta', 'beta1', 'beta2', 'beta3', 'beta4', 'D', 'Dbeta', 'Dbeta_approx', 'beta_norm', 'beta_clad', 'beta_core',
'n_clad', 'n_core', 'gain', 'ng', 'fsr']
outputs = [beta, beta1, beta2, beta3, beta4, D, Dbeta, Dbeta2, beta/scipy.constants.c, n_clad*omega/scipy.constants.c, n_core*omega/scipy.constants.c,
n_clad, n_core, norm_gain, beta1*scipy.constants.c, 1/(2*np.pi*rad0*beta1)]
for m, output in enumerate(outputs):
data_frame[output_tags[m]] = output
return data_frame
###
def dispersion_analysis(data_frame, list0, frequency_tag = 'freq'):
## list of columns
list0.remove(frequency_tag)
## remove short data_frames
Lmin = 3
data_frame = data_frame.groupby(list0, as_index = False).filter(lambda x: len(x) >= Lmin)
## calculate dispersion
data_frame = data_frame.groupby(list0, as_index = False).apply(dispersion_calculation)
return data_frame
##
def calculate_Dbeta(x, idx0):
'''calculate Dbeta for a set of date with equally spaced frequencies'''
d = x.shape[0] # array dimension
Dx = np.full(d, np.nan)
idxm = max(-idx0, idx0 - d + 1) # minimum index
idxp = min(idx0 + 1, d - idx0) # maximum index
for idx in range(idxm, idxp):
xm, xp = np.roll(x, idx), np.roll(x, -idx)
Dx[idx0 + idx] = xm[idx0] + xp[idx0] - 2*x[idx0]
return Dx
##
def calculate_gain(Dbeta, Pn):
'''calculate the gain of the 4 wave mixing
** here Pn is normalized such as Pn = gamma*P0'''
return np.sqrt(Pn**2 - (Dbeta/2 + Pn)**2)
| 48.77707
| 177
| 0.657482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,536
| 0.331157
|
97f8adb75c2bfb4df0070282016a4be3b8f42059
| 1,280
|
py
|
Python
|
appname/predict.py
|
Lambda-ds-31/build_week_spotify
|
ba5c77b457f8180f80883c61a5011eb3b38ffc95
|
[
"MIT"
] | null | null | null |
appname/predict.py
|
Lambda-ds-31/build_week_spotify
|
ba5c77b457f8180f80883c61a5011eb3b38ffc95
|
[
"MIT"
] | 1
|
2021-10-20T20:50:04.000Z
|
2021-10-20T20:50:04.000Z
|
appname/predict.py
|
Lambda-ds-31/build_week_spotify
|
ba5c77b457f8180f80883c61a5011eb3b38ffc95
|
[
"MIT"
] | 1
|
2022-02-18T13:51:29.000Z
|
2022-02-18T13:51:29.000Z
|
import numpy as np
from data_prep import data
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
from os import getenv
client_id = getenv('CLIENT_ID')
client_id_secret = getenv('CLIENT_ID_SECRET')
manager = SpotifyClientCredentials(
client_id = client_id,
client_secret= client_id_secret)
sp = spotipy.Spotify(client_credentials_manager=manager)
def find_knn(track_id, df, k=6):
"""
Takes in the user input song's track_id, and the prep-ed dataframe.
Outputs a list of k-1 nearest neighbors based on audio features
"""
features = sp.audio_features(track_id)[0]
df = data()
user_track = np.array(
[
features['acousticness'],
features['danceability'],
features['duration_ms'],
features['energy'],
features['instrumentalness'],
features['liveness'],
features['loudness'],
features['speechiness'],
features['tempo'],
features['valence']
]
)
df['distances'] = np.linalg.norm(df - user_track, axis=1)
nn_ids = df.sort_values(by='distances').index.to_list()[:k]
if nn_ids[0] == track_id:
nn_ids = nn_ids[1:]
else:
nn_ids = nn_ids[:-1]
return nn_ids
| 27.826087
| 71
| 0.630469
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 318
| 0.248438
|
97f988da234108443107eea262cb4a176c0459c9
| 176
|
py
|
Python
|
tests/cpydiff/modules_array_deletion.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 692
|
2016-12-19T23:25:35.000Z
|
2022-03-31T14:20:48.000Z
|
tests/cpydiff/modules_array_deletion.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 509
|
2017-03-28T19:37:18.000Z
|
2022-03-31T20:31:43.000Z
|
tests/cpydiff/modules_array_deletion.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 228
|
2016-12-19T05:03:30.000Z
|
2022-03-22T18:13:00.000Z
|
"""
categories: Modules,array
description: Array deletion not implemented
cause: Unknown
workaround: Unknown
"""
import array
a = array.array('b', (1, 2, 3))
del a[1]
print(a)
| 16
| 43
| 0.715909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.653409
|
97fa4f4535ac67853dbadcc3ffdf0124a1fb7efd
| 10,001
|
py
|
Python
|
jaysblog/models.py
|
cRiii/jaysblog
|
f96ecd82f17750a47147ae3c5e72cf1320be21e5
|
[
"MIT"
] | 5
|
2019-10-14T01:51:02.000Z
|
2019-11-07T15:01:14.000Z
|
jaysblog/models.py
|
cRiii/jaysblog
|
f96ecd82f17750a47147ae3c5e72cf1320be21e5
|
[
"MIT"
] | 1
|
2019-11-07T06:58:26.000Z
|
2019-11-07T06:58:26.000Z
|
jaysblog/models.py
|
cRiii/jaysblog
|
f96ecd82f17750a47147ae3c5e72cf1320be21e5
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Time : 2019/9/17 15:07
@Author : Jay Chen
@FileName: models.py
@GitHub : https://github.com/cRiii
"""
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from jaysblog.extensions import db
from flask_login import UserMixin
class BaseModel(object):
# 模型基类 为所有模型添加创建和更新的时间
create_time = db.Column(db.DateTime, default=datetime.utcnow)
update_time = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
class User(BaseModel, db.Model, UserMixin):
"""
UserMixin表示通过认证的用户
is_authenticated 表示用户已通过认证 返回True 否则False
is_active 表示允许用户登陆 返回True 否则False
is_anonymous 表示如果当前未用户登陆(匿名用户) 返回True 否则False
get_id() 以unicode形式返回用户唯一标识
"""
__tablename__ = 'b_users'
id = db.Column(db.Integer, primary_key=True) # 用户id
nick_name = db.Column(db.String(32), nullable=False) # 用户名
password_hash = db.Column(db.String(128), nullable=False) # 用户密码
mobile = db.Column(db.String(11), unique=True) # 手机号码
email = db.Column(db.String(64), unique=True, nullable=True) # 邮箱
desc = db.Column(db.Text) # 个人简介
location = db.Column(db.String(128)) # 地址
avatar_url = db.Column(db.String(256)) # 用户头像路径
is_admin = db.Column(db.Boolean, default=False) # 是否为管理员
last_login_time = db.Column(db.DateTime, default=datetime.utcnow) # 最后一次登陆时间
is_delete = db.Column(db.Integer, default=1) # 用户是否被删除 1正常 0被删除
gender = db.Column(
db.Enum(
'MAN', # 男
'WOMAN' # 女
), default='MAN'
)
@property
def password(self):
raise AttributeError(u'该属性不可读')
@password.setter
def password(self, value):
"""
generate_password_hash(password,method='pbkdf2:sha256',salt_length=8)
method指定计算散列值的方法
salt_length 指定盐长度
"""
self.password_hash = generate_password_hash(value)
def check_password(self, password):
"""
接收散列值 和 密码作比较 返回布尔类型
check_password_hash(pwhash,password)
"""
return check_password_hash(self.password_hash, password)
def to_dict(self):
res_dict = {
"id": self.id,
"nick_name": self.nick_name,
"email": self.email,
"desc": self.desc,
"avatar_url": self.avatar_url,
"gender": self.gender,
"is_admin": self.is_admin,
}
return res_dict
class Post(BaseModel, db.Model):
__tablename__ = 'b_posts'
id = db.Column(db.Integer, primary_key=True) # 文章编号
post_title = db.Column(db.String(256), nullable=False) # 文章标题
post_user_id = db.Column(db.Integer, nullable=False) # 创建文章用户
post_digest = db.Column(db.String(512), nullable=True) # 文章简介
post_content = db.Column(db.Text, nullable=False) # 文章内容
post_clicks = db.Column(db.Integer, default=0) # 点击量
post_like_num = db.Column(db.Integer, default=0) # 点赞数量
post_index_image_url = db.Column(db.String(256)) # 主页面列表图片地址
post_status = db.Column(db.Integer, default=1) # 文章状态
post_can_comment = db.Column(db.Integer, default=1) # 当前文章是否可以被评论
post_comments = db.relationship('Comment', backref='comment_post') # 当前文章的评论
post_category = db.relationship('Category', back_populates='cg_posts')
post_category_id = db.Column(db.Integer, db.ForeignKey('b_category.id'), nullable=False) # 文章类型
def get_comment_length(self):
comments = []
if self.post_comments is not []:
for comment in self.post_comments:
if comment.comment_status == 1:
comments.append(comment)
return len(comments)
def to_dict(self):
res_dict = {
"id": self.id,
"post_title": self.post_title,
"post_user_id": self.post_user_id,
"post_digest": self.post_digest if self.post_digest else "",
"post_clicks": self.post_clicks,
"post_like_num": self.post_like_num,
"post_index_image_url": self.post_index_image_url if self.post_index_image_url else "",
"post_category": self.post_category.to_dict() if self.post_category else None,
"post_comments_count": self.get_comment_length(),
"post_create_time": self.create_time,
"post_update_time": self.update_time,
}
return res_dict
def to_dict_details(self):
res_dict = {
"id": self.id,
"post_title": self.post_title,
"post_user_id": self.post_user_id,
"post_content": self.post_content,
"post_clicks": self.post_clicks,
"post_like_num": self.post_like_num,
"post_can_comment": self.post_can_comment,
"post_create_time": self.create_time,
"post_category": self.post_category.to_dict() if self.post_category else None,
"post_comments_count": self.get_comment_length(),
}
return res_dict
class Category(BaseModel, db.Model):
__tablename__ = 'b_category'
id = db.Column(db.Integer, primary_key=True) # 分类编号
cg_name = db.Column(db.String(64), nullable=False, unique=True) # 分类名称
cg_posts = db.relationship('Post', back_populates='post_category') # 分类下的文章
def to_dict(self):
res_dict = {
"id": self.id,
"cg_name": self.cg_name,
"cg_posts_count": len(self.cg_posts) if self.cg_posts else 0
}
return res_dict
class Comment(BaseModel, db.Model):
__tablename__ = 'b_comments'
id = db.Column(db.Integer, primary_key=True) # 评论编号
comment_user_id = db.Column(db.Integer, nullable=False) # 评论用户ID
comment_content = db.Column(db.Text, nullable=False) # 评论内容
comment_from_admin = db.Column(db.Integer, default=0) # 是否为管理员评论
comment_status = db.Column(db.Integer, default=0) # 评论是否通过审核 -1不可用 0:审核中 1:审核通过
comment_post_id = db.Column(db.Integer, db.ForeignKey('b_posts.id'), nullable=False) # 当前评论属于的文章id
comment_reply = db.relationship('Reply', backref='reply_comment') # 当前评论下的回复
def to_dict(self):
comment_replies = []
if self.comment_reply is not []:
for reply in self.comment_reply:
if reply.reply_status == 1:
comment_replies.append(reply.to_dict())
user = User.query.filter_by(id=self.comment_user_id).first()
res_dict = {
"id": self.id,
"comment_user_name": user.nick_name,
"comment_user_avatar_url": user.avatar_url,
"comment_content": self.comment_content,
"comment_from_admin": user.is_admin,
"comment_post_id": self.comment_post_id,
"comment_replies": comment_replies,
"comment_create_time": self.create_time,
"comment_update_time": self.update_time,
}
return res_dict
class Reply(BaseModel, db.Model):
__tablename__ = 'b_reply'
id = db.Column(db.Integer, primary_key=True) # 回复的id
reply_from_user = db.Column(db.String(32), nullable=False) # 谁回复的
reply_to_user = db.Column(db.String(32), nullable=False) # 回复给谁的
reply_content = db.Column(db.Text, nullable=False) # 回复的内容
reply_status = db.Column(db.Integer, default=0) # 回复是否通过审核 -1不可用 0:审核中 1:审核通过
reply_comment_id = db.Column(db.Integer, db.ForeignKey('b_comments.id'), nullable=False) # 当前回复属于的评论id
def to_dict(self):
user = User.query.filter_by(nick_name=self.reply_from_user).first()
res_dict = {
"id": self.id,
"reply_from_user": self.reply_from_user,
"reply_to_user": self.reply_to_user,
"reply_content": self.reply_content,
"reply_comment_id": self.reply_comment_id,
"reply_create_time": self.create_time,
"reply_update_time": self.update_time,
"reply_user_is_admin": user.is_admin,
"reply_user_avatar_url": user.avatar_url,
}
return res_dict
class Journey(BaseModel, db.Model):
__tablename__ = 'b_journey'
id = db.Column(db.Integer, primary_key=True) # 历程id
journey_title = db.Column(db.String(32), nullable=False) # 历程标题
journey_desc = db.Column(db.Text, nullable=False) # 历程详情
journey_time = db.Column(db.DateTime, default=datetime.utcnow) # 历程时间
def to_dict(self):
res_dict = {
"id": self.id,
"journey_title": self.journey_title,
"journey_desc": self.journey_desc,
"journey_time": self.journey_time
}
return res_dict
class MessageBoard(BaseModel, db.Model):
__tablename__ = 'b_board'
id = db.Column(db.Integer, primary_key=True) # 留言板id
board_user = db.Column(db.String(32), nullable=False) # 留言用户
board_desc = db.Column(db.Text, nullable=False) # 留言内容
board_status = db.Column(db.Integer, nullable=False, default=0) # 留言状态 -1不可用 0:审核中 1:审核通过
board_email = db.Column(db.String(50), nullable=False) # 留言回复邮箱
def to_dict(self):
res_dict = {
"id": self.id,
"board_user": self.board_user,
"board_desc": self.board_desc,
"board_status": self.board_status,
"board_create_time": self.create_time,
"board_update_time": self.update_time,
"board_email": self.board_email,
}
return res_dict
class UsersLikePosts(BaseModel, db.Model):
__tablename__ = 'b_users_like_posts'
id = db.Column(db.Integer, primary_key=True) # 主键
user_id = db.Column(db.Integer, nullable=False)
user_like_post_id = db.Column(db.Integer, nullable=False)
def to_dict(self):
res_dict = {
"id": self.id,
"user_id": self.user_id,
"user_like_post_id": self.user_like_post_id,
}
return res_dict
| 35.97482
| 107
| 0.633137
| 10,450
| 0.966072
| 0
| 0
| 404
| 0.037349
| 0
| 0
| 3,054
| 0.282333
|
97fa5c7d0604d6e2fc363a4c15650e9b99bf74f3
| 602
|
py
|
Python
|
112_Path Sum.py
|
Alvin1994/leetcode-python3-
|
ba2bde873c925554cc39f2bd13be81967713477d
|
[
"Apache-2.0"
] | null | null | null |
112_Path Sum.py
|
Alvin1994/leetcode-python3-
|
ba2bde873c925554cc39f2bd13be81967713477d
|
[
"Apache-2.0"
] | null | null | null |
112_Path Sum.py
|
Alvin1994/leetcode-python3-
|
ba2bde873c925554cc39f2bd13be81967713477d
|
[
"Apache-2.0"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def hasPathSum(self, root: 'TreeNode', sum: 'int') -> 'bool':
if not root:
return False
def helper(node,val):
if not node:
return False
val -= node.val
if node.left is None and node.right is None:
return val == 0
return helper(node.left, val) or helper(node.right, val)
return helper(root,sum)
| 28.666667
| 68
| 0.521595
| 429
| 0.712625
| 0
| 0
| 0
| 0
| 0
| 0
| 177
| 0.29402
|
97faabe77e17c6e2ce8553519c92f2c578ef3f08
| 1,509
|
py
|
Python
|
telemanom/_globals.py
|
tonyzeng2019/telemanom
|
ee1c9252c6ffc9581995aaf479f0d79cf0a2e914
|
[
"Apache-2.0"
] | null | null | null |
telemanom/_globals.py
|
tonyzeng2019/telemanom
|
ee1c9252c6ffc9581995aaf479f0d79cf0a2e914
|
[
"Apache-2.0"
] | null | null | null |
telemanom/_globals.py
|
tonyzeng2019/telemanom
|
ee1c9252c6ffc9581995aaf479f0d79cf0a2e914
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import yaml
import json
import sys
import os
sys.path.append('../venv/lib/python3.5/site-packages')
from elasticsearch import Elasticsearch
sys.path.append('../telemanom')
class Config:
'''Loads parameters from config.yaml into global object'''
def __init__(self, path_to_config):
if os.path.isfile(path_to_config):
pass
else:
path_to_config = '../%s' %path_to_config
setattr(self, "path_to_config", path_to_config)
dictionary = None
with open(path_to_config, "r") as f:
dictionary = yaml.load(f.read())
try:
for k,v in dictionary.items():
setattr(self, k, v)
except:
for k,v in dictionary.iteritems():
setattr(self, k, v)
def build_group_lookup(self, path_to_groupings):
channel_group_lookup = {}
with open(path_to_groupings, "r") as f:
groupings = json.loads(f.read())
for subsystem in groupings.keys():
for subgroup in groupings[subsystem].keys():
for chan in groupings[subsystem][subgroup]:
channel_group_lookup[chan["key"]] = {}
channel_group_lookup[chan["key"]]["subsystem"] = subsystem
channel_group_lookup[chan["key"]]["subgroup"] = subgroup
return channel_group_lookup
| 27.944444
| 82
| 0.561299
| 1,294
| 0.857522
| 0
| 0
| 0
| 0
| 0
| 0
| 210
| 0.139165
|
97fbc7c518483b22e3bd3fb0a4313e038f0a4e05
| 508
|
py
|
Python
|
nanome/_internal/_network/_commands/_serialization/_open_url.py
|
rramji/nanome-lib
|
2806598af31cfb4bb6e16366f0b300d2ddcc9c13
|
[
"MIT"
] | null | null | null |
nanome/_internal/_network/_commands/_serialization/_open_url.py
|
rramji/nanome-lib
|
2806598af31cfb4bb6e16366f0b300d2ddcc9c13
|
[
"MIT"
] | null | null | null |
nanome/_internal/_network/_commands/_serialization/_open_url.py
|
rramji/nanome-lib
|
2806598af31cfb4bb6e16366f0b300d2ddcc9c13
|
[
"MIT"
] | null | null | null |
from nanome._internal._util._serializers import _StringSerializer
from nanome._internal._util._serializers import _TypeSerializer
class _OpenURL(_TypeSerializer):
def __init__(self):
self.string = _StringSerializer()
def version(self):
return 0
def name(self):
return "OpenURL"
def serialize(self, version, value, context):
context.write_using_serializer(self.string, value)
def deserialize(self, version, context):
raise NotImplementedError
| 25.4
| 65
| 0.720472
| 375
| 0.738189
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.017717
|
97fd1501d115786d6770847e5c0def668bf7ecbe
| 196
|
py
|
Python
|
questoes/questao1.py
|
raulbarcelos/Lista-de-Exercicios-PO
|
70933896108b5f9fbdbf541c389ab9354d6ceaf2
|
[
"MIT"
] | null | null | null |
questoes/questao1.py
|
raulbarcelos/Lista-de-Exercicios-PO
|
70933896108b5f9fbdbf541c389ab9354d6ceaf2
|
[
"MIT"
] | null | null | null |
questoes/questao1.py
|
raulbarcelos/Lista-de-Exercicios-PO
|
70933896108b5f9fbdbf541c389ab9354d6ceaf2
|
[
"MIT"
] | null | null | null |
print("********************************")
print("********** QUESTÃO 01 **********")
print("********************************")
print("******** RAUL BARCELOS *********")
print()
print("Olá mundo")
| 24.5
| 41
| 0.30102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 149
| 0.752525
|
97fdbd42de4debdf4f69ae07026eb489c9f50129
| 2,772
|
py
|
Python
|
CorpusToolkit/ply_parser/test.py
|
howl-anderson/tools_for_corpus_of_people_daily
|
8178d9a62c356f83723d42ced60f8269eed84861
|
[
"Apache-2.0"
] | 243
|
2018-09-12T01:05:03.000Z
|
2022-03-30T11:25:59.000Z
|
CorpusToolkit/ply_parser/test.py
|
nkkkyyy/tools_for_corpus_of_people_daily
|
8178d9a62c356f83723d42ced60f8269eed84861
|
[
"Apache-2.0"
] | 3
|
2018-10-18T10:13:07.000Z
|
2020-09-10T06:34:40.000Z
|
CorpusToolkit/ply_parser/test.py
|
nkkkyyy/tools_for_corpus_of_people_daily
|
8178d9a62c356f83723d42ced60f8269eed84861
|
[
"Apache-2.0"
] | 56
|
2018-09-11T12:56:20.000Z
|
2021-11-09T04:02:00.000Z
|
import logging
from CorpusToolkit.ply_parser import make_parser, lexer
logging.basicConfig(
level=logging.DEBUG,
filename="parselog.txt",
filemode="w",
format="%(filename)10s:%(lineno)4d:%(message)s"
)
log = logging.getLogger()
test_data = (
"19980101-01-001-002/m 中共中央/nt 总书记/n 、/wu 国家/n 主席/n 江/nrf 泽民/nrg",
"19980101-01-001-006/m 在/p 1998年/t 来临/vi 之际/f ,/wd 我/rr 十分/dc 高兴/a 地/ui 通过/p [中央/n 人民/n 广播/vn 电台/n]nt 、/wu [中国/ns 国际/n 广播/vn 电台/n]nt 和/c [中央/n 电视台/n]nt ,/wd 向/p 全国/n 各族/rz 人民/n ,/wd 向/p [香港/ns 特别/a 行政区/n]ns 同胞/n 、/wu 澳门/ns 和/c 台湾/ns 同胞/n 、/wu 海外/s 侨胞/n ,/wd 向/p 世界/n 各国/rz 的/ud 朋友/n 们/k ,/wd 致以/vt 诚挚/a 的/ud 问候/vn 和/c 良好/a 的/ud 祝愿/vn !/wt",
"19980131-04-013-024/m 那{na4}/rz 音韵/n 如/vt 轻柔/a 的/ud 夜风/n ,/wd ",
"19980103-04-003-007/m 图文/n 兼/vt 重/a 的/ud 中国/ns 文明史/n ,/wd 就/p 方向/n 言/Vg 有利于/vt 历史学/n 和/c 考古学/n 的/ud 进一步/d 结合/vt 。/wj 考古学/n 本身/rz 是/vl 具有/vt 独立/a 的/ud 理论/n 和/c 方法/n 的/ud 学科/n ,/wd 然而/c 中国/ns 考古学/n 从/p 一/d 开始/vt 便/d 以/p 同/p 历史学/n 的/ud 密切/ad 结合/vt 为/vl 特点/n 。/wj 大家/rr 知道/vt ,/wd 王/nrf 国维/nrg 先生/n 二十/m 年代/n 在/p [清华/jn 国学/n 研究院/n]nt 的/ud 讲义/n 《/wkz 古史/n 新/a 证/n 》/wky 中/f 提出/vt 的/ud “/wyz 二/m 重/qc 证据法/n ”/wyy ,/wd 在/p 方法论/n 上{shang5}/f 为{wei4}/p 考古学/n 的/ud 建立/vn 发展/vn 开拓/vt 了/ul 道路/n 。/wj “/wyz 二/m 重/qc 证据法/n ”/wyy 指/vt 文献/n 同/p 文物/n 的/ud 互相/d 印证/vt ,/wd 即/vl 蕴涵/vt 着/uz 历史/n 、/wu 考古/n 的/ud 结合/vn 。/wj 亲手/d 在/p 中国/ns 开展/vt 考古学/n 工作/vn 的/ud 考古学家/n ,/wd 都/d 以/p 探索/vt 和/c 重建/vt 古史/n 为/vl 职/Ng 志/n 。/wj 最/dc 早/a 得到/vt 大规模/d 系统/ad 发掘/vt 的/ud 遗址/n 殷墟/ns ,/wd 其/rz 被/p 选定/vt 正是/vl 出于/vt 这样/rz 的/ud 要求/n 。/wj 长期/d 领导/vt [中国/ns 科学院/n (/wkz 后/f 属/vl [中国/ns 社会/n 科学院/n]nt )/wky 考古/vn 研究所/n]nt 的/ud 夏/nrf 鼐/nrg 先生/n ,/wd 1984年/t 在/p 《/wkz 什么/ryw 是/vl 考古学/n 》/wky 文/Ng 中/f 说/vt ,/wd 考古学/n 和/p 利用/vt 文献/n 记载/vn 进行/vx 研究/vn 的/ud 狭义/b 历史学/n 不/df 同/vt ,/wd 研究/vt 的/ud 对象/n 只/d 是/vl 物质/n 的/ud 遗存/vn ,/wd 但/c 两者/rz 同/d 以/p 恢复/vt 人类/n 历史/n 的/ud 本来面目/in 为/vl 目标/n ,/wd 如/vt 车{che1}/n 之/u 两/m 轮/Ng ,/wd 鸟/n 之/u 两翼/n 。/wj 对于/p 了解/vt 中国/ns 有着/vt 悠久/a 的/ud 文明/n 和/c 丰富/a 的/ud 文献/n 传统/n 的/ud 人们/n 来说/u ,/wd 中国/ns 考古学/n 的/ud 这种/r 特点/n 乃是/vl 自然/a 的/ud 。/wj"
)
s = test_data[3]
def test_lexer():
lexer.input(s)
while True:
tok = lexer.token()
if not tok:
break # No more input
print(tok.type, tok.value, tok.lineno, tok.lexpos)
def test_parser():
parser = make_parser()
result = parser.parse(s)
for token in result:
print(token.token, token.pinyin, token.pos)
| 72.947368
| 1,579
| 0.533911
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,224
| 0.846639
|
97fdbe6160aa3872cb3be14af73e7667fe00624c
| 978
|
py
|
Python
|
homeassistant/components/hue/v2/helpers.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/hue/v2/helpers.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
homeassistant/components/hue/v2/helpers.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Helper functions for Philips Hue v2."""
from __future__ import annotations
def normalize_hue_brightness(brightness: float | None) -> float | None:
"""Return calculated brightness values."""
if brightness is not None:
# Hue uses a range of [0, 100] to control brightness.
brightness = float((brightness / 255) * 100)
return brightness
def normalize_hue_transition(transition: float | None) -> float | None:
"""Return rounded transition values."""
if transition is not None:
# hue transition duration is in milliseconds and round them to 100ms
transition = int(round(transition, 1) * 1000)
return transition
def normalize_hue_colortemp(colortemp: int | None) -> int | None:
"""Return color temperature within Hue's ranges."""
if colortemp is not None:
# Hue only accepts a range between 153..500
colortemp = min(colortemp, 500)
colortemp = max(colortemp, 153)
return colortemp
| 32.6
| 76
| 0.682004
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 338
| 0.345603
|
97fe866f84f325af30eccf7ed7f76920a2b9b84a
| 186
|
py
|
Python
|
incapsula/__init__.py
|
zanachka/incapsula-cracker-py3
|
be1738d0e649e91de75583b694372bc04547fa85
|
[
"Unlicense"
] | null | null | null |
incapsula/__init__.py
|
zanachka/incapsula-cracker-py3
|
be1738d0e649e91de75583b694372bc04547fa85
|
[
"Unlicense"
] | null | null | null |
incapsula/__init__.py
|
zanachka/incapsula-cracker-py3
|
be1738d0e649e91de75583b694372bc04547fa85
|
[
"Unlicense"
] | null | null | null |
from .errors import IncapBlocked, MaxRetriesExceeded, RecaptchaBlocked
from .parsers import ResourceParser, WebsiteResourceParser, IframeResourceParser
from .session import IncapSession
| 46.5
| 80
| 0.876344
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
97feddd1f63ca0959b0312d053d59692a6f28e9d
| 3,646
|
py
|
Python
|
sdk/python/pulumi_civo/get_network.py
|
dirien/pulumi-civo
|
f75eb1482bade0d21fb25c9e20e6838791518226
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2020-08-04T12:27:02.000Z
|
2022-03-14T13:16:43.000Z
|
sdk/python/pulumi_civo/get_network.py
|
dirien/pulumi-civo
|
f75eb1482bade0d21fb25c9e20e6838791518226
|
[
"ECL-2.0",
"Apache-2.0"
] | 85
|
2020-08-17T19:03:57.000Z
|
2022-03-25T19:17:57.000Z
|
sdk/python/pulumi_civo/get_network.py
|
dirien/pulumi-civo
|
f75eb1482bade0d21fb25c9e20e6838791518226
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2020-08-04T12:27:03.000Z
|
2022-03-24T00:56:24.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetNetworkResult',
'AwaitableGetNetworkResult',
'get_network',
]
@pulumi.output_type
class GetNetworkResult:
"""
A collection of values returned by getNetwork.
"""
def __init__(__self__, default=None, id=None, label=None, name=None, region=None):
if default and not isinstance(default, bool):
raise TypeError("Expected argument 'default' to be a bool")
pulumi.set(__self__, "default", default)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if label and not isinstance(label, str):
raise TypeError("Expected argument 'label' to be a str")
pulumi.set(__self__, "label", label)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", region)
@property
@pulumi.getter
def default(self) -> bool:
"""
If is the default network.
"""
return pulumi.get(self, "default")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
A unique ID that can be used to identify and reference a Network.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def label(self) -> Optional[str]:
"""
The label used in the configuration.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the network.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def region(self) -> Optional[str]:
return pulumi.get(self, "region")
class AwaitableGetNetworkResult(GetNetworkResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkResult(
default=self.default,
id=self.id,
label=self.label,
name=self.name,
region=self.region)
def get_network(id: Optional[str] = None,
label: Optional[str] = None,
region: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkResult:
"""
Use this data source to access information about an existing resource.
:param str id: The unique identifier of an existing Network.
:param str label: The label of an existing Network.
:param str region: The region of an existing Network.
"""
__args__ = dict()
__args__['id'] = id
__args__['label'] = label
__args__['region'] = region
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('civo:index/getNetwork:getNetwork', __args__, opts=opts, typ=GetNetworkResult).value
return AwaitableGetNetworkResult(
default=__ret__.default,
id=__ret__.id,
label=__ret__.label,
name=__ret__.name,
region=__ret__.region)
| 31.162393
| 120
| 0.620954
| 2,155
| 0.591059
| 242
| 0.066374
| 1,836
| 0.503566
| 0
| 0
| 1,167
| 0.320077
|
97ff07ce80697d0e69e6e48e82606287cb5eb7ee
| 744
|
py
|
Python
|
Hard/longest_valid_parentheses.py
|
BrynjarGeir/LeetCode
|
dbd57e645c5398dec538b6466215b61491c8d1d9
|
[
"MIT"
] | null | null | null |
Hard/longest_valid_parentheses.py
|
BrynjarGeir/LeetCode
|
dbd57e645c5398dec538b6466215b61491c8d1d9
|
[
"MIT"
] | null | null | null |
Hard/longest_valid_parentheses.py
|
BrynjarGeir/LeetCode
|
dbd57e645c5398dec538b6466215b61491c8d1d9
|
[
"MIT"
] | null | null | null |
from collections import deque
class Solution:
def longestValidParentheses(self, s: str) -> int:
if len(s) == 1 or s == '': return 0
opened = deque()
for i,p in enumerate(s):
if p == '(': opened.append(i)
else:
if opened:
if s[opened[-1]] == '(': opened.pop()
else: opened.append(i)
else:
opened.append(i)
if not opened: return len(s)
else:
longest = 0
a, b = len(s), 0
while opened:
b = opened.pop()
longest = max(longest, a-b-1)
a = b
longest = max(longest, a)
return longest
| 32.347826
| 57
| 0.424731
| 714
| 0.959677
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.010753
|
97ff3603368750b9661b92eb04ae9042db5bd4fc
| 2,358
|
py
|
Python
|
IMFlask/config.py
|
iml1111/IMFlask
|
96af28460365c305e92ca2720fe6b015713c578f
|
[
"MIT"
] | 2
|
2020-09-07T11:33:41.000Z
|
2020-09-08T14:47:40.000Z
|
IMFlask/config.py
|
iml1111/IMFlask
|
96af28460365c305e92ca2720fe6b015713c578f
|
[
"MIT"
] | 1
|
2020-09-07T11:29:00.000Z
|
2022-03-31T10:01:06.000Z
|
IMFlask/config.py
|
iml1111/IMFlask
|
96af28460365c305e92ca2720fe6b015713c578f
|
[
"MIT"
] | 2
|
2020-10-06T18:25:46.000Z
|
2021-09-09T16:00:07.000Z
|
'''
Flask Application Config
'''
import os
from logging.config import dictConfig
BASEDIR = os.path.abspath(os.path.dirname(__file__))
class Config:
'''공통 Config'''
JWT_SECRET_KEY = os.environ.get('FLASK_JWT_SECRET_KEY')
# test only
TEST_ACCESS_TOKEN = os.environ.get('FLASK_TEST_ACCESS_TOKEN')
ADMIN_ID = os.environ.get('FLASK_ADMIN_ID', "iml")
ADMIN_PW = os.environ.get('FLASK_ADMIN_PW', "iml")
# DB_PROXY: basic, mysql, mongodb, redis, all
DB_PROXY = os.environ.get('FLASK_DB_PROXY')
if DB_PROXY in ['mysql', 'all']:
MYSQL_URI = os.environ.get('FLASK_MYSQL_URI')
if DB_PROXY in ['mongodb', 'all']:
MONGO_URI = os.environ.get('FLASK_MONGO_URI')
MONGO_DB_NAME = os.environ.get('FLASK_MONGO_DB_NAME')
if DB_PROXY == ['reids', 'all']:
REDIS_HOST = os.environ.get('FLASK_REDIS_HOST')
REDIS_PORT = os.environ.get('FLASK_REDIS_PORT')
REDIS_PW = os.environ.get('FLASK_REDIS_PW')
ALLOWED_EXTENSION = {'txt', 'docs', 'md', 'hwp', 'ppt', 'pptx'}
SLOW_API_TIME = 0.5
@staticmethod
def init_app(app):
'''전역 init_app 함수'''
class TestingConfig(Config):
'''Test 전용 Config'''
DEBUG = True
TESTING = True
class DevelopmentConfig(Config):
'''개발 환경 전용 Config'''
DEBUG = True
TESTING = False
class ProductionConfig(Config):
''' 상용환경 전용 Config'''
DEBUG = False
TESTING = False
@staticmethod
def init_app(app):
'''로거 등록 및 설정'''
dictConfig({
'version': 1,
'formatters': {
'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
}
},
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': './server_error.log',
'maxBytes': 1024 * 1024 * 5,
'backupCount': 5,
'formatter': 'default',
},
},
'root': {
'level': 'INFO',
'handlers': ['file']
}
})
config = {
'development':DevelopmentConfig,
'production':ProductionConfig,
'testing':TestingConfig,
'default':DevelopmentConfig,
}
| 26.2
| 87
| 0.54665
| 2,115
| 0.876866
| 0
| 0
| 867
| 0.359453
| 0
| 0
| 855
| 0.354478
|
97ff714eac7c0cc920b3005424b8958af7aec6ce
| 1,066
|
py
|
Python
|
cnn/conv_average_pooling.py
|
nforesperance/Tensorflow-Keras
|
12fa74e01c7081b2f5ef899ee9123498ef541483
|
[
"MIT"
] | 1
|
2021-01-07T11:05:07.000Z
|
2021-01-07T11:05:07.000Z
|
cnn/conv_average_pooling.py
|
nforesperance/Tensorflow-Keras
|
12fa74e01c7081b2f5ef899ee9123498ef541483
|
[
"MIT"
] | null | null | null |
cnn/conv_average_pooling.py
|
nforesperance/Tensorflow-Keras
|
12fa74e01c7081b2f5ef899ee9123498ef541483
|
[
"MIT"
] | null | null | null |
# example of average pooling
from numpy import asarray
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import AveragePooling2D
# define input data
data = [[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0]]
data = asarray(data)
data = data.reshape(1, 8, 8, 1)
# create model
model = Sequential()
model.add(Conv2D(1, (3,3), activation='relu', input_shape=(8, 8, 1)))
model.add(AveragePooling2D())
# summarize model
model.summary()
# define a vertical line detector
detector = [[[[0]],[[1]],[[0]]],
[[[0]],[[1]],[[0]]],
[[[0]],[[1]],[[0]]]]
weights = [asarray(detector), asarray([0.0])]
# store the weights in the model
model.set_weights(weights)
# apply filter to input data
yhat = model.predict(data)
# enumerate rows
for r in range(yhat.shape[1]):
# print each column in the row
print([yhat[0,r,c,0] for c in range(yhat.shape[2])])
| 30.457143
| 69
| 0.594747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 223
| 0.209193
|
3f000581137f7e8d12b07f946dab58d61d19c246
| 13,127
|
py
|
Python
|
acquisitions/models.py
|
18F/acqstackdb
|
7d939e7deb1cb8749f16fe6b6bc092f5db5c4469
|
[
"CC0-1.0"
] | 2
|
2016-06-03T16:33:34.000Z
|
2016-07-22T12:10:31.000Z
|
acquisitions/models.py
|
18F/acqstackdb
|
7d939e7deb1cb8749f16fe6b6bc092f5db5c4469
|
[
"CC0-1.0"
] | 26
|
2016-06-02T11:21:15.000Z
|
2016-07-18T14:10:03.000Z
|
acquisitions/models.py
|
18F/acqstackdb
|
7d939e7deb1cb8749f16fe6b6bc092f5db5c4469
|
[
"CC0-1.0"
] | 2
|
2017-07-14T08:33:32.000Z
|
2021-02-15T10:16:18.000Z
|
from django.db import models
from django.core.validators import RegexValidator, ValidationError
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from smart_selects.db_fields import ChainedForeignKey, ChainedManyToManyField
from ordered_model.models import OrderedModel
# Create your models here.
class Agency(models.Model):
name = models.CharField(max_length=100, blank=False)
abbreviation = models.CharField(max_length=10, null=True, blank=True)
department = models.CharField(max_length=100, null=True, blank=True)
omb_agency_code = models.IntegerField(null=True, blank=True)
omb_bureau_code = models.IntegerField(null=True, blank=True)
treasury_agency_code = models.IntegerField(null=True, blank=True)
cgac_agency_code = models.IntegerField(null=True, blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Agencies"
ordering = ('name',)
class Subagency(models.Model):
name = models.CharField(max_length=100, blank=False)
abbreviation = models.CharField(max_length=10, null=True, blank=True)
agency = models.ForeignKey(Agency)
def __str__(self):
return "%s - %s" % (self.agency, self.name)
class Meta:
ordering = ('name',)
verbose_name_plural = "Subagencies"
class ContractingOffice(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
verbose_name = "Contracting Office"
verbose_name_plural = "Contracting Offices"
class ContractingOfficer(models.Model):
name = models.CharField(max_length=100)
contracting_office = models.ForeignKey(ContractingOffice)
def __str__(self):
return "%s - %s" % (self.name, self.contracting_office)
class Meta:
ordering = ('name',)
verbose_name = "Contracting Officer"
verbose_name_plural = "Contracting Officers"
class COR(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
verbose_name = "Contracting Officer Representative"
verbose_name_plural = "Contracting Officer Representatives"
# Is the acquisition internal or external?
class Track(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "%s" % (self.name)
class Stage(OrderedModel):
name = models.CharField(max_length=50)
wip_limit = models.IntegerField(default=0, verbose_name="WIP Limit")
def __str__(self):
return "%s" % (self.name)
class Meta(OrderedModel.Meta):
pass
class Actor(models.Model):
name = models.CharField(max_length=200, blank=False)
def __str__(self):
return "%s" % (self.name)
class Step(models.Model):
actor = models.ForeignKey(
Actor,
blank=False
)
track = models.ManyToManyField(
Track,
blank=False,
through="StepTrackThroughModel"
)
stage = models.ForeignKey(
Stage,
blank=False
)
def __str__(self):
return "%s - %s" % (self.stage, self.actor,)
class Meta:
ordering = ('steptrackthroughmodel__order',)
class StepTrackThroughModel(OrderedModel):
track = models.ForeignKey(Track)
step = models.ForeignKey(Step)
wip_limit = models.IntegerField(default=0, verbose_name="WIP Limit")
order_with_respect_to = 'track'
class Meta(OrderedModel.Meta):
unique_together = ('track', 'step')
ordering = ('track', 'order')
class Vendor(models.Model):
name = models.CharField(max_length=200, blank=False)
email = models.EmailField(blank=False)
duns = models.CharField(max_length=9, blank=False, validators=[
RegexValidator(regex='^\d{9}$', message="DUNS number must be 9 digits")
])
def __str__(self):
return self.name
class Role(models.Model):
description = models.CharField(max_length=100, choices=(
('P', 'Product Lead'),
('A', 'Acquisition Lead'),
('T', 'Technical Lead')
), null=True, blank=True)
teammate = models.ForeignKey(User, blank=True, null=True)
def __str__(self):
return "%s - %s" % (self.get_description_display(), self.teammate)
class Acquisition(models.Model):
SET_ASIDE_CHOICES = (
("AbilityOne", "AbilityOne"),
("HUBZone Small Business", "HUBZone Small Business"),
("Multiple Small Business Categories",
"Multiple Small Business Categories"),
("Other Than Small", "Other Than Small"),
("Service Disabled Veteran-owned Small Business",
"Service Disabled Veteran-owned Small Business"),
("Small Business", "Small Business"),
("Small Disadvantaged Business (includes Section 8a)",
"Small Disadvantaged Business (includes Section 8a)"),
("To Be Determined-BPA", "To Be Determined-BPA"),
("To Be Determined-IDIQ", "To Be Determined-IDIQ"),
("Veteran-Owned Small Business", "Veteran-Owned Small Business"),
("Woman-Owned Small Business", "Woman-Owned Small Business"),
)
CONTRACT_TYPE_CHOICES = (
("Cost No Fee", "Cost No Fee"),
("Cost Plus Award Fee", "Cost Plus Award Fee"),
("Cost Plus Fixed Fee", "Cost Plus Fixed Fee"),
("Cost Plus Incentive Fee", "Cost Plus Incentive Fee"),
("Cost Sharing", "Cost Sharing"),
("Fixed Price Award Fee", "Fixed Price Award Fee"),
("Fixed Price Incentive", "Fixed Price Incentive"),
("Fixed Price Labor Hours", "Fixed Price Labor Hours"),
("Fixed Price Level of Effort", "Fixed Price Level of Effort"),
("Fixed Price Time and Materials", "Fixed Price Time and Materials"),
("Fixed Price with Economic Price Adjustment",
"Fixed Price with Economic Price Adjustment"),
("Fixed Price", "Fixed Price"),
("Interagency Agreement", "Interagency Agreement"),
("Labor Hours and Time and Materials",
"Labor Hours and Time and Materials"),
("Labor Hours", "Labor Hours"),
("Order Dependent", "Order Dependent"),
("Time and Materials", "Time and Materials"),
)
COMPETITION_STRATEGY_CHOICES = (
("A/E Procedures", "A/E Procedures"),
("Competed under SAP", "Competed under SAP"),
("Competitive Delivery Order Fair Opportunity Provided",
"Competitive Delivery Order Fair Opportunity Provided"),
("Competitive Schedule Buy", "Competitive Schedule Buy"),
("Fair Opportunity", "Fair Opportunity"),
("Follow On to Competed Action (FAR 6.302-1)",
"Follow On to Competed Action (FAR 6.302-1)"),
("Follow On to Competed Action", "Follow On to Competed Action"),
("Full and Open after exclusion of sources (competitive small business \
set-asides, competitive 8a)",
"Full and Open after exclusion of sources (competitive small \
business set-asides, competitive 8a)"),
("Full and Open Competition Unrestricted",
"Full and Open Competition Unrestricted"),
("Full and Open Competition", "Full and Open Competition"),
("Limited Sources FSS Order", "Limited Sources FSS Order"),
("Limited Sources", "Limited Sources"),
("Non-Competitive Delivery Order", "Non-Competitive Delivery Order"),
("Not Available for Competition (e.g., 8a sole source, HUBZone & \
SDVOSB sole source, Ability One, all > SAT)",
"Not Available for Competition (e.g., 8a sole source, HUBZone & \
SDVOSB sole source, Ability One, all > SAT)"),
("Not Competed (e.g., sole source, urgency, etc., all > SAT)",
"Not Competed (e.g., sole source, urgency, etc., all > SAT)"),
("Not Competed under SAP (e.g., Urgent, Sole source, Logical \
Follow-On, 8a, HUBZone & SDVOSB sole source, all < SAT)",
"Not Competed under SAP (e.g., Urgent, Sole source, Logical \
Follow-On, 8a, HUBZone & SDVOSB sole source, all < SAT)"),
("Partial Small Business Set-Aside",
"Partial Small Business Set-Aside"),
("Set-Aside", "Set-Aside"),
("Sole Source", "Sole Source"),
)
PROCUREMENT_METHOD_CHOICES = (
("Ability One", "Ability One"),
("Basic Ordering Agreement", "Basic Ordering Agreement"),
("Blanket Purchase Agreement-BPA", "Blanket Purchase Agreement-BPA"),
("BPA Call", "BPA Call"),
("Call Order under GSA Schedules BPA",
"Call Order under GSA Schedules BPA"),
("Commercial Item Contract", "Commercial Item Contract"),
("Contract modification", "Contract modification"),
("Contract", "Contract"),
("Definitive Contract other than IDV",
"Definitive Contract other than IDV"),
("Definitive Contract", "Definitive Contract"),
("Government-wide Agency Contract-GWAC",
"Government-wide Agency Contract-GWAC"),
("GSA Schedule Contract", "GSA Schedule Contract"),
("GSA Schedule", "GSA Schedule"),
("GSA Schedules Program BPA", "GSA Schedules Program BPA"),
("Indefinite Delivery Indefinite Quantity-IDIQ",
"Indefinite Delivery Indefinite Quantity-IDIQ"),
("Indefinite Delivery Vehicle (IDV)",
"Indefinite Delivery Vehicle (IDV)"),
("Indefinite Delivery Vehicle Base Contract",
"Indefinite Delivery Vehicle Base Contract"),
("Multi-Agency Contract", "Multi-Agency Contract"),
("Negotiated", "Negotiated"),
("Order under GSA Federal Supply Schedules Program",
"Order under GSA Federal Supply Schedules Program"),
("Order under GSA Schedules Program BPA",
"Order under GSA Schedules Program BPA"),
("Order under GSA Schedules Program",
"Order under GSA Schedules Program"),
("Order under IDV", "Order under IDV"),
("Purchase Order", "Purchase Order"),
("Sealed Bid", "Sealed Bid"),
)
subagency = models.ForeignKey(Subagency)
task = models.CharField(max_length=100, blank=False)
description = models.TextField(max_length=500, null=True, blank=True)
track = models.ForeignKey(
Track,
blank=False,
related_name="%(class)s_track"
)
step = ChainedForeignKey(
Step,
chained_field="track",
chained_model_field="track",
blank=False
)
dollars = models.DecimalField(decimal_places=2, max_digits=14, null=True,
blank=True)
period_of_performance = models.DateField(null=True, blank=True)
product_owner = models.CharField(max_length=50, null=True, blank=True)
roles = models.ManyToManyField(Role, blank=True)
contracting_officer = models.ForeignKey(ContractingOfficer, null=True,
blank=True)
contracting_officer_representative = models.ForeignKey(COR, null=True,
blank=True)
contracting_office = models.ForeignKey(ContractingOffice, null=True,
blank=True)
vendor = models.ForeignKey(Vendor, null=True, blank=True)
rfq_id = models.IntegerField(null=True, blank=True, verbose_name="RFQ ID")
naics = models.IntegerField(
null=True,
blank=True,
verbose_name="NAICS Code"
)
set_aside_status = models.CharField(max_length=100, null=True, blank=True,
choices=SET_ASIDE_CHOICES)
amount_of_competition = models.IntegerField(null=True, blank=True)
contract_type = models.CharField(max_length=100, null=True, blank=True,
choices=CONTRACT_TYPE_CHOICES)
competition_strategy = models.CharField(
max_length=100,
null=True,
blank=True,
choices=COMPETITION_STRATEGY_CHOICES)
procurement_method = models.CharField(
max_length=100,
null=True,
blank=True,
choices=PROCUREMENT_METHOD_CHOICES)
award_date = models.DateField(null=True, blank=True)
delivery_date = models.DateField(null=True, blank=True)
def clean(self):
print(self.step.track.all())
print(self.track)
if self.track not in self.step.track.all():
raise ValidationError(_('Tracks are not equal.'))
def __str__(self):
return "%s (%s)" % (self.task, self.subagency)
class Evaluator(models.Model):
name = models.CharField(max_length=100)
acquisition = models.ManyToManyField(Acquisition)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class Release(models.Model):
acquisition = models.ForeignKey(Acquisition)
def __str__(self):
return self.id
class Meta:
ordering = ('id',)
| 37.505714
| 80
| 0.63198
| 12,692
| 0.966862
| 0
| 0
| 0
| 0
| 0
| 0
| 5,010
| 0.381656
|
3f0006363bb84a90ae81c6bd90ba3b9c73aecdc7
| 714
|
py
|
Python
|
app/kobo/forms.py
|
wri/django_kobo
|
505d52fc0d49d875af068e58ad959b95d1464dd5
|
[
"MIT"
] | 1
|
2018-12-20T07:59:55.000Z
|
2018-12-20T07:59:55.000Z
|
app/kobo/forms.py
|
wri/django_kobo
|
505d52fc0d49d875af068e58ad959b95d1464dd5
|
[
"MIT"
] | 9
|
2018-11-06T01:51:28.000Z
|
2018-12-21T22:19:42.000Z
|
app/kobo/forms.py
|
wri/django_kobo
|
505d52fc0d49d875af068e58ad959b95d1464dd5
|
[
"MIT"
] | 2
|
2018-11-21T15:13:32.000Z
|
2020-02-19T08:39:37.000Z
|
from django import forms
from .models import Connection, KoboUser, KoboData
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.db.models import Q
class ConnectionForm(forms.ModelForm):
class Meta:
model = Connection
exclude = []
widgets = {
'auth_pass': forms.PasswordInput(),
}
class KoboUserForm(forms.ModelForm):
class Meta:
model = KoboUser
exclude = []
surveys = forms.ModelMultipleChoiceField(queryset=KoboData.objects.filter(Q(tags__contains=['bns']) | Q(tags__contains=['nrgt'])), widget=FilteredSelectMultiple(
'Surveys', is_stacked=False), label='')
| 31.043478
| 165
| 0.644258
| 537
| 0.752101
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 0.046218
|
3f01198a019097c1976dc940001aed540d4f3634
| 713
|
py
|
Python
|
old/dea/aws/__init__.py
|
robbibt/odc-tools
|
e2df2c9ef65dbd5652d97cd88617989b4b724814
|
[
"Apache-2.0"
] | null | null | null |
old/dea/aws/__init__.py
|
robbibt/odc-tools
|
e2df2c9ef65dbd5652d97cd88617989b4b724814
|
[
"Apache-2.0"
] | null | null | null |
old/dea/aws/__init__.py
|
robbibt/odc-tools
|
e2df2c9ef65dbd5652d97cd88617989b4b724814
|
[
"Apache-2.0"
] | null | null | null |
from odc.aws import (
ec2_metadata,
ec2_current_region,
botocore_default_region,
auto_find_region,
make_s3_client,
s3_url_parse,
s3_fmt_range,
s3_ls,
s3_ls_dir,
s3_find,
get_boto_session,
get_creds_with_retry,
s3_fetch,
)
from odc.aws._find import (
s3_file_info,
norm_predicate,
parse_query,
)
__all__ = (
"ec2_metadata",
"ec2_current_region",
"botocore_default_region",
"auto_find_region",
"make_s3_client",
"s3_url_parse",
"s3_fmt_range",
"s3_ls",
"s3_ls_dir",
"s3_find",
"get_boto_session",
"get_creds_with_retry",
"s3_fetch",
"s3_file_info",
"norm_predicate",
"parse_query",
)
| 16.97619
| 30
| 0.647966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 241
| 0.338008
|
3f0241d966136442d63f54ae450fa5bbf000c236
| 883
|
py
|
Python
|
systems/stage.py
|
will-nickson/starter_system
|
bce669250fc58c3966c71e84020e078871a79e4f
|
[
"MIT"
] | null | null | null |
systems/stage.py
|
will-nickson/starter_system
|
bce669250fc58c3966c71e84020e078871a79e4f
|
[
"MIT"
] | null | null | null |
systems/stage.py
|
will-nickson/starter_system
|
bce669250fc58c3966c71e84020e078871a79e4f
|
[
"MIT"
] | null | null | null |
from log.logger import logger
class SystemStage(object):
"""
Default stage object: creates a SystemStage for doing something
"""
@property
def name(self):
return "Need to replace name when inheriting"
def __repr__(self):
return "SystemStage '%s' Try %s.methods()" % (
self.name,
self.name,
)
def methods(self):
return get_methods(self)
def system_init(self, system: System):
# method called once we have a system
self._parent = system
# and a log
log = system.log.setup(stage=self.name)
self._log = log
@property
def log(self) -> logger:
log = getattr(self, "_log", logtoscreen(""))
return log
@property
def parent(self) -> System:
parent = getattr(self, "_parent", None)
return parent
| 22.641026
| 71
| 0.571914
| 850
| 0.962627
| 0
| 0
| 304
| 0.344281
| 0
| 0
| 221
| 0.250283
|
3f02d35a7926f58cae17ffac0f474623fde43a2e
| 37,840
|
py
|
Python
|
pybind/slxos/v17r_2_00/mpls_state/rsvp/igp_sync/link/lsp/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_2_00/mpls_state/rsvp/igp_sync/link/lsp/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_2_00/mpls_state/rsvp/igp_sync/link/lsp/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import hops
class lsp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls-operational - based on the path /mpls-state/rsvp/igp-sync/link/lsp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_name','__lsp_instance_id','__path_name','__cspf_enabled','__rro_enabled','__frr_enabled','__nbr_down_enabled','__link_count','__nbr_down_inprogress','__cspf_hop_count','__rro_hop_count','__hops',)
_yang_name = 'lsp'
_rest_name = 'lsp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__path_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="path-name", rest_name="path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__cspf_hop_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cspf-hop-count", rest_name="cspf-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__hops = YANGDynClass(base=YANGListType("index hop_type",hops.hops, yang_name="hops", rest_name="hops", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='index hop-type', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="hops", rest_name="hops", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__lsp_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__nbr_down_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-enabled", rest_name="nbr-down-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__rro_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="rro-enabled", rest_name="rro-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__cspf_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="cspf-enabled", rest_name="cspf-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__nbr_down_inprogress = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-inprogress", rest_name="nbr-down-inprogress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__lsp_instance_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-instance-id", rest_name="lsp-instance-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__rro_hop_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rro-hop-count", rest_name="rro-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__frr_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="frr-enabled", rest_name="frr-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__link_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="link-count", rest_name="link-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'rsvp', u'igp-sync', u'link', u'lsp']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'rsvp', u'igp-sync', u'link', u'lsp']
def _get_lsp_name(self):
"""
Getter method for lsp_name, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/lsp_name (string)
YANG Description: LSP name
"""
return self.__lsp_name
def _set_lsp_name(self, v, load=False):
"""
Setter method for lsp_name, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/lsp_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_name() directly.
YANG Description: LSP name
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__lsp_name = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_name(self):
self.__lsp_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_lsp_instance_id(self):
"""
Getter method for lsp_instance_id, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/lsp_instance_id (uint32)
YANG Description: Instance id of the lsp instance
"""
return self.__lsp_instance_id
def _set_lsp_instance_id(self, v, load=False):
"""
Setter method for lsp_instance_id, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/lsp_instance_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_instance_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_instance_id() directly.
YANG Description: Instance id of the lsp instance
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-instance-id", rest_name="lsp-instance-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_instance_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-instance-id", rest_name="lsp-instance-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__lsp_instance_id = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_instance_id(self):
self.__lsp_instance_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-instance-id", rest_name="lsp-instance-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_path_name(self):
"""
Getter method for path_name, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/path_name (string)
YANG Description: LSP Path name
"""
return self.__path_name
def _set_path_name(self, v, load=False):
"""
Setter method for path_name, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/path_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_path_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_path_name() directly.
YANG Description: LSP Path name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="path-name", rest_name="path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """path_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="path-name", rest_name="path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__path_name = t
if hasattr(self, '_set'):
self._set()
def _unset_path_name(self):
self.__path_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="path-name", rest_name="path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_cspf_enabled(self):
"""
Getter method for cspf_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/cspf_enabled (boolean)
YANG Description: CSPF enabled for LSP
"""
return self.__cspf_enabled
def _set_cspf_enabled(self, v, load=False):
"""
Setter method for cspf_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/cspf_enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_cspf_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cspf_enabled() directly.
YANG Description: CSPF enabled for LSP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="cspf-enabled", rest_name="cspf-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cspf_enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="cspf-enabled", rest_name="cspf-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__cspf_enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_cspf_enabled(self):
self.__cspf_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="cspf-enabled", rest_name="cspf-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_rro_enabled(self):
"""
Getter method for rro_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/rro_enabled (boolean)
YANG Description: RRO enabled for LSP
"""
return self.__rro_enabled
def _set_rro_enabled(self, v, load=False):
"""
Setter method for rro_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/rro_enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_rro_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rro_enabled() directly.
YANG Description: RRO enabled for LSP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="rro-enabled", rest_name="rro-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rro_enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="rro-enabled", rest_name="rro-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__rro_enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_rro_enabled(self):
self.__rro_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="rro-enabled", rest_name="rro-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_frr_enabled(self):
"""
Getter method for frr_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/frr_enabled (boolean)
YANG Description: FRR enabled for LSP
"""
return self.__frr_enabled
def _set_frr_enabled(self, v, load=False):
"""
Setter method for frr_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/frr_enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_frr_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_frr_enabled() directly.
YANG Description: FRR enabled for LSP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="frr-enabled", rest_name="frr-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """frr_enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="frr-enabled", rest_name="frr-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__frr_enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_frr_enabled(self):
self.__frr_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="frr-enabled", rest_name="frr-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_nbr_down_enabled(self):
"""
Getter method for nbr_down_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/nbr_down_enabled (boolean)
YANG Description: LSP Neighbour down is enabled
"""
return self.__nbr_down_enabled
def _set_nbr_down_enabled(self, v, load=False):
"""
Setter method for nbr_down_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/nbr_down_enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_nbr_down_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nbr_down_enabled() directly.
YANG Description: LSP Neighbour down is enabled
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="nbr-down-enabled", rest_name="nbr-down-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """nbr_down_enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-enabled", rest_name="nbr-down-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__nbr_down_enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_nbr_down_enabled(self):
self.__nbr_down_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-enabled", rest_name="nbr-down-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_link_count(self):
"""
Getter method for link_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/link_count (uint32)
YANG Description: Total links used by the LSP
"""
return self.__link_count
def _set_link_count(self, v, load=False):
"""
Setter method for link_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/link_count (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_count() directly.
YANG Description: Total links used by the LSP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="link-count", rest_name="link-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_count must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="link-count", rest_name="link-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__link_count = t
if hasattr(self, '_set'):
self._set()
def _unset_link_count(self):
self.__link_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="link-count", rest_name="link-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_nbr_down_inprogress(self):
"""
Getter method for nbr_down_inprogress, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/nbr_down_inprogress (boolean)
YANG Description: Neighbor down processing is in progress
"""
return self.__nbr_down_inprogress
def _set_nbr_down_inprogress(self, v, load=False):
"""
Setter method for nbr_down_inprogress, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/nbr_down_inprogress (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_nbr_down_inprogress is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nbr_down_inprogress() directly.
YANG Description: Neighbor down processing is in progress
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="nbr-down-inprogress", rest_name="nbr-down-inprogress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """nbr_down_inprogress must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-inprogress", rest_name="nbr-down-inprogress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__nbr_down_inprogress = t
if hasattr(self, '_set'):
self._set()
def _unset_nbr_down_inprogress(self):
self.__nbr_down_inprogress = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-inprogress", rest_name="nbr-down-inprogress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_cspf_hop_count(self):
"""
Getter method for cspf_hop_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/cspf_hop_count (uint32)
YANG Description: CSPF hop count
"""
return self.__cspf_hop_count
def _set_cspf_hop_count(self, v, load=False):
"""
Setter method for cspf_hop_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/cspf_hop_count (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_cspf_hop_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cspf_hop_count() directly.
YANG Description: CSPF hop count
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cspf-hop-count", rest_name="cspf-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cspf_hop_count must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cspf-hop-count", rest_name="cspf-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__cspf_hop_count = t
if hasattr(self, '_set'):
self._set()
def _unset_cspf_hop_count(self):
self.__cspf_hop_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cspf-hop-count", rest_name="cspf-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_rro_hop_count(self):
"""
Getter method for rro_hop_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/rro_hop_count (uint32)
YANG Description: RRO hop rout
"""
return self.__rro_hop_count
def _set_rro_hop_count(self, v, load=False):
"""
Setter method for rro_hop_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/rro_hop_count (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_rro_hop_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rro_hop_count() directly.
YANG Description: RRO hop rout
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rro-hop-count", rest_name="rro-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rro_hop_count must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rro-hop-count", rest_name="rro-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__rro_hop_count = t
if hasattr(self, '_set'):
self._set()
def _unset_rro_hop_count(self):
self.__rro_hop_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rro-hop-count", rest_name="rro-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_hops(self):
"""
Getter method for hops, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/hops (list)
YANG Description: MPLS Rsvp IGP Synchronization Hop information
"""
return self.__hops
def _set_hops(self, v, load=False):
"""
Setter method for hops, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/hops (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_hops is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hops() directly.
YANG Description: MPLS Rsvp IGP Synchronization Hop information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("index hop_type",hops.hops, yang_name="hops", rest_name="hops", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='index hop-type', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="hops", rest_name="hops", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hops must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("index hop_type",hops.hops, yang_name="hops", rest_name="hops", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='index hop-type', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="hops", rest_name="hops", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__hops = t
if hasattr(self, '_set'):
self._set()
def _unset_hops(self):
self.__hops = YANGDynClass(base=YANGListType("index hop_type",hops.hops, yang_name="hops", rest_name="hops", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='index hop-type', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="hops", rest_name="hops", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
lsp_name = __builtin__.property(_get_lsp_name)
lsp_instance_id = __builtin__.property(_get_lsp_instance_id)
path_name = __builtin__.property(_get_path_name)
cspf_enabled = __builtin__.property(_get_cspf_enabled)
rro_enabled = __builtin__.property(_get_rro_enabled)
frr_enabled = __builtin__.property(_get_frr_enabled)
nbr_down_enabled = __builtin__.property(_get_nbr_down_enabled)
link_count = __builtin__.property(_get_link_count)
nbr_down_inprogress = __builtin__.property(_get_nbr_down_inprogress)
cspf_hop_count = __builtin__.property(_get_cspf_hop_count)
rro_hop_count = __builtin__.property(_get_rro_hop_count)
hops = __builtin__.property(_get_hops)
_pyangbind_elements = {'lsp_name': lsp_name, 'lsp_instance_id': lsp_instance_id, 'path_name': path_name, 'cspf_enabled': cspf_enabled, 'rro_enabled': rro_enabled, 'frr_enabled': frr_enabled, 'nbr_down_enabled': nbr_down_enabled, 'link_count': link_count, 'nbr_down_inprogress': nbr_down_inprogress, 'cspf_hop_count': cspf_hop_count, 'rro_hop_count': rro_hop_count, 'hops': hops, }
| 66.737213
| 754
| 0.742072
| 37,434
| 0.989271
| 0
| 0
| 0
| 0
| 0
| 0
| 19,551
| 0.516675
|
3f042a0420967f88675a79d4f9cf3ecb5cca91b8
| 1,947
|
py
|
Python
|
vega/trainer/callbacks/horovod.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
vega/trainer/callbacks/horovod.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
vega/trainer/callbacks/horovod.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data parallel callback."""
import logging
import vega
from vega.common import ClassFactory, ClassType
from .callback import Callback
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.CALLBACK)
class Horovod(Callback):
"""Callback that saves the evaluated Performance."""
def __init__(self):
"""Initialize ModelCheckpoint callback."""
super(Horovod, self).__init__()
self.priority = 260
def before_train(self, logs=None):
"""Be called before the training process."""
if not self.trainer.horovod:
return
if vega.is_torch_backend():
self._init_torch()
def _init_torch(self):
import torch
import horovod.torch as hvd
hvd.broadcast_parameters(self.trainer.model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(self.trainer.optimizer, root_rank=0)
self.trainer._average_metrics = self._average_metrics
def _average_metrics(self, metrics_results):
import torch
import horovod.torch as hvd
for key, value in metrics_results.items():
tensor = torch.tensor(value)
avg_tensor = hvd.allreduce(tensor, name=key)
metrics_results[key] = avg_tensor.item()
return metrics_results
| 33.568966
| 78
| 0.698511
| 1,082
| 0.555727
| 0
| 0
| 1,125
| 0.577812
| 0
| 0
| 795
| 0.40832
|
3f0440a332e725d1be2b9f4d8bf41ca99082b5e6
| 5,580
|
py
|
Python
|
parse_doc.py
|
nprapps/idp-georgia
|
316eba6195b7f410567a7e11eb4811ff7cba54cc
|
[
"Unlicense"
] | 1
|
2017-04-15T01:48:27.000Z
|
2017-04-15T01:48:27.000Z
|
parse_doc.py
|
nprapps/idp-georgia
|
316eba6195b7f410567a7e11eb4811ff7cba54cc
|
[
"Unlicense"
] | 153
|
2017-04-14T18:06:26.000Z
|
2017-06-02T13:08:09.000Z
|
parse_doc.py
|
nprapps/idp-georgia
|
316eba6195b7f410567a7e11eb4811ff7cba54cc
|
[
"Unlicense"
] | 1
|
2021-02-18T11:15:52.000Z
|
2021-02-18T11:15:52.000Z
|
# _*_ coding:utf-8 _*_
import logging
import re
import app_config
from bs4 import BeautifulSoup
from shortcode import process_shortcode
logging.basicConfig(format=app_config.LOG_FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(app_config.LOG_LEVEL)
end_doc_regex = re.compile(ur'^\s*[Ee][Nn][Dd]\s*$',
re.UNICODE)
new_section_marker_regex = re.compile(ur'^\s*\+{50,}\s*$',
re.UNICODE)
section_end_marker_regex = re.compile(ur'^\s*-{50,}\s*$',
re.UNICODE)
frontmatter_marker_regex = re.compile(ur'^\s*-{3}\s*$',
re.UNICODE)
extract_metadata_regex = re.compile(ur'^(.*?):(.*)$',
re.UNICODE)
shortcode_regex = re.compile(ur'^\s*\[%\s*.*\s*%\]\s*$', re.UNICODE)
def is_section_marker(tag):
"""
Checks for the beginning of a new section
"""
text = tag.get_text()
m = new_section_marker_regex.match(text)
if m:
return True
else:
return False
def is_section_end_marker(tag):
"""
Checks for the beginning of a new section
"""
text = tag.get_text()
m = section_end_marker_regex.match(text)
if m:
return True
else:
return False
def process_headline(contents):
logger.debug('--process_headline start--')
headline = None
for tag in contents:
if tag.name == "h2":
headline = tag.get_text()
else:
logger.warning('unexpected tag found: Ignore %s' % tag.get_text())
if not headline:
logger.error('Did not find headline on post. Contents: %s' % contents)
return headline
def process_metadata(contents):
logger.debug('--process_metadata start--')
metadata = {}
for tag in contents:
text = tag.get_text()
m = extract_metadata_regex.match(text)
if m:
key = m.group(1).strip().lower()
value = m.group(2).strip().lower()
metadata[key] = value
else:
logger.error('Could not parse metadata. Text: %s' % text)
logger.debug("metadata: %s" % metadata)
return metadata
def process_section_contents(contents):
"""
Process episode copy content
In particular parse and generate HTML from shortcodes
"""
logger.debug('--process_post_contents start--')
parsed = []
for tag in contents:
text = tag.get_text()
m = shortcode_regex.match(text)
if m:
parsed.append(process_shortcode(tag))
else:
parsed.append(unicode(tag))
episode_contents = ''.join(parsed)
return episode_contents
def parse_raw_sections(raw_sections):
"""
parse raw episodes into an array of section objects
"""
# Divide each episode into its subparts
# - Headline
# - FrontMatter
# - Contents
sections = []
for raw_section in raw_sections:
section = {}
marker_counter = 0
section_raw_headline = []
section_raw_metadata = []
section_raw_contents = []
for tag in raw_section:
text = tag.get_text()
m = frontmatter_marker_regex.match(text)
if m:
marker_counter += 1
else:
if (marker_counter == 0):
section_raw_headline.append(tag)
elif (marker_counter == 1):
section_raw_metadata.append(tag)
else:
section_raw_contents.append(tag)
section[u'headline'] = process_headline(section_raw_headline)
metadata = process_metadata(section_raw_metadata)
for k, v in metadata.iteritems():
section[k] = v
section[u'contents'] = process_section_contents(section_raw_contents)
sections.append(section)
return sections
def split_sections(doc):
"""
split the raw document into an array of raw sections
"""
logger.debug('--split_sections start--')
raw_sections = []
raw_episode_contents = []
ignore_orphan_text = True
body = doc.soup.body
for child in body.children:
if is_section_marker(child):
# Detected first post stop ignoring orphan text
if ignore_orphan_text:
ignore_orphan_text = False
else:
if ignore_orphan_text:
continue
elif is_section_end_marker(child):
ignore_orphan_text = True
raw_sections.append(raw_episode_contents)
raw_episode_contents = []
else:
raw_episode_contents.append(child)
return raw_sections
def find_section_id(sections, id):
"""
Find the section with a given id
"""
for idx, section in enumerate(sections):
try:
if section['id'] == id:
return idx
except KeyError:
continue
return None
def process_extracted_contents(inline_intro):
"""
Remove html markup
"""
return inline_intro['contents']
def parse(doc):
"""
parse google doc files and extract markup
"""
try:
parsed_document = {}
logger.info('-------------start------------')
raw_sections = split_sections(doc)
sections = parse_raw_sections(raw_sections)
logger.info('Number of sections: %s' % len(sections))
parsed_document['sections'] = sections
finally:
logger.info('-------------end------------')
return parsed_document
| 28.040201
| 78
| 0.58405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,137
| 0.203763
|
3f04bc07d2d8f73a71534912779c419ef2aa5148
| 2,162
|
py
|
Python
|
01_irc_bot/bot.py
|
pymug/ARJ_SpoonfeedingSockets_APR2021
|
ba741d4fbde11f8ab4ddda704340ab5892c19478
|
[
"MIT"
] | null | null | null |
01_irc_bot/bot.py
|
pymug/ARJ_SpoonfeedingSockets_APR2021
|
ba741d4fbde11f8ab4ddda704340ab5892c19478
|
[
"MIT"
] | null | null | null |
01_irc_bot/bot.py
|
pymug/ARJ_SpoonfeedingSockets_APR2021
|
ba741d4fbde11f8ab4ddda704340ab5892c19478
|
[
"MIT"
] | null | null | null |
"""
Abdur-Rahmaan Janhangeer
Skeleton of https://github.com/pyhoneybot/honeybot/
"""
import time
import os
import socket
directory = "irc"
if not os.path.exists(directory):
os.makedirs(directory)
target = open(os.path.join(directory, "log.txt"), "w")
def message_checker(msgLine):
sendvar = ""
global mute
mute = False
completeLine = str(msgLine[1:]).replace("'b", "").split(":", 1)
info = completeLine[0].split()
message = (completeLine[1].split("\\r")[0]).replace("'b", "")
sender = info[0][2:].split("!", 1)[0]
refinedmsg = str(message.lower())
refinedmsgl = len(refinedmsg)
print("Complete Line-->" + str(completeLine))
print("Info-->" + str(info))
print("Message-->" + str(message))
print("Sender-->" + str(sender) + "\n")
def ping_checker(pingLine):
if pingLine.find(bytes("PING", "utf8")) != -1:
pingLine = pingLine.rstrip().split()
if pingLine[0] == bytes("PING", "utf8"):
irc.send(bytes("PONG ", "utf8") + pingLine[1] + bytes("\r\n", "utf8"))
BOT_IRC_SERVER = "chat.freenode.net"
BOT_IRC_CHANNEL = "##bottestingmu"
# BOT_IRC_CHANNEL = "#python"
BOT_IRC_PORT = 6667
BOT_NICKNAME = "appinventormuBot"
# BOT_PASSWORD = ''
irc = socket.socket()
irc.connect((BOT_IRC_SERVER, BOT_IRC_PORT))
irc.recv(4096)
irc.send(bytes("NICK " + BOT_NICKNAME + "\r\n", "utf8"))
ping_checker(irc.recv(4096))
irc.send(
bytes(
"USER appinventormuBot appinventormuBot appinventormuBot : appinventormuBot IRC\r\n",
"utf8",
)
)
ping_checker(irc.recv(4096))
# irc.send(bytes('msg NickServ identify ' + BOT_PASSWORD + " \r\n" ,'utf8') )
# ping_checker(irc.recv(4096))
# irc.send(bytes('NICKSERV identify ' + BOT_NICKNAME+' '+BOT_PASSWORD+ '\r\n','utf8' ) )
# ping_checker(irc.recv(4096))
time.sleep(3)
irc.send(bytes("JOIN " + BOT_IRC_CHANNEL + "\r\n", "utf8"))
while 1:
pass
line = irc.recv(4096)
print(line)
ping_checker(line)
if (
line.find(bytes("PRIVMSG", "utf8")) != -1
or line.find(bytes("NOTICE", "utf8")) != -1
):
message_checker(line)
target.write(str(line))
target.flush()
| 25.738095
| 93
| 0.623497
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 717
| 0.331637
|
3f05790f911b335d2d94be5f242d22af72e43329
| 5,494
|
py
|
Python
|
xenia_python_client_library/models/attachments_list.py
|
DutchAnalytics/xenia-python-client-library
|
60dc3e21094086124b552ff5bed5895fee826b57
|
[
"Apache-2.0"
] | null | null | null |
xenia_python_client_library/models/attachments_list.py
|
DutchAnalytics/xenia-python-client-library
|
60dc3e21094086124b552ff5bed5895fee826b57
|
[
"Apache-2.0"
] | null | null | null |
xenia_python_client_library/models/attachments_list.py
|
DutchAnalytics/xenia-python-client-library
|
60dc3e21094086124b552ff5bed5895fee826b57
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Xenia Python Client Library
Python Client Library to interact with the Xenia API. # noqa: E501
The version of the OpenAPI document: v2.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from xenia_python_client_library.configuration import Configuration
class AttachmentsList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'source_name': 'str',
'destination_name': 'str',
'mapping': 'list[AttachmentFieldsList]'
}
attribute_map = {
'id': 'id',
'source_name': 'source_name',
'destination_name': 'destination_name',
'mapping': 'mapping'
}
def __init__(self, id=None, source_name=None, destination_name=None, mapping=None, local_vars_configuration=None): # noqa: E501
"""AttachmentsList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._source_name = None
self._destination_name = None
self._mapping = None
self.discriminator = None
if id is not None:
self.id = id
if source_name is not None:
self.source_name = source_name
if destination_name is not None:
self.destination_name = destination_name
if mapping is not None:
self.mapping = mapping
@property
def id(self):
"""Gets the id of this AttachmentsList. # noqa: E501
:return: The id of this AttachmentsList. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this AttachmentsList.
:param id: The id of this AttachmentsList. # noqa: E501
:type: str
"""
self._id = id
@property
def source_name(self):
"""Gets the source_name of this AttachmentsList. # noqa: E501
:return: The source_name of this AttachmentsList. # noqa: E501
:rtype: str
"""
return self._source_name
@source_name.setter
def source_name(self, source_name):
"""Sets the source_name of this AttachmentsList.
:param source_name: The source_name of this AttachmentsList. # noqa: E501
:type: str
"""
self._source_name = source_name
@property
def destination_name(self):
"""Gets the destination_name of this AttachmentsList. # noqa: E501
:return: The destination_name of this AttachmentsList. # noqa: E501
:rtype: str
"""
return self._destination_name
@destination_name.setter
def destination_name(self, destination_name):
"""Sets the destination_name of this AttachmentsList.
:param destination_name: The destination_name of this AttachmentsList. # noqa: E501
:type: str
"""
self._destination_name = destination_name
@property
def mapping(self):
"""Gets the mapping of this AttachmentsList. # noqa: E501
:return: The mapping of this AttachmentsList. # noqa: E501
:rtype: list[AttachmentFieldsList]
"""
return self._mapping
@mapping.setter
def mapping(self, mapping):
"""Sets the mapping of this AttachmentsList.
:param mapping: The mapping of this AttachmentsList. # noqa: E501
:type: list[AttachmentFieldsList]
"""
self._mapping = mapping
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AttachmentsList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AttachmentsList):
return True
return self.to_dict() != other.to_dict()
| 27.60804
| 132
| 0.59028
| 5,144
| 0.936294
| 0
| 0
| 2,026
| 0.368766
| 0
| 0
| 2,472
| 0.449945
|
3f05ec3f00a5d7d90f5ef0232521b059bc84d999
| 672
|
py
|
Python
|
src/AuShadha/registry/icd10/aushadha.py
|
GosthMan/AuShadha
|
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
|
[
"PostgreSQL"
] | 46
|
2015-03-04T14:19:47.000Z
|
2021-12-09T02:58:46.000Z
|
src/AuShadha/registry/icd10/aushadha.py
|
aytida23/AuShadha
|
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
|
[
"PostgreSQL"
] | 2
|
2015-06-05T10:29:04.000Z
|
2015-12-06T16:54:10.000Z
|
src/AuShadha/registry/icd10/aushadha.py
|
aytida23/AuShadha
|
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
|
[
"PostgreSQL"
] | 24
|
2015-03-23T01:38:11.000Z
|
2022-01-24T16:23:42.000Z
|
################################################################################
# Create a Registration with the UI for a Role.
# Each module's aushadha.py is screened for this
#
# Each Class is registered for a Role in UI
# These can be used to generate Role based UI elements later.
#
# As of now string base role assignement is done.
# This can be later extended to class based role
################################################################################
from .models import Chapter, Section,Diagnosis
from AuShadha.apps.ui.ui import ui as UI
UI.register('RegistryApp',Chapter )
UI.register('DiseaseCodes',Chapter)
UI.register('ReferenceApp',Chapter)
| 37.333333
| 80
| 0.577381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 504
| 0.75
|
3f075c7ec34c5ad02a052b425ce2675ad65347ca
| 973
|
py
|
Python
|
Etap 2/Logia03/Zad1.py
|
aszokalski/Logia
|
5e29745b01623df8a2f162f143656a76056af407
|
[
"MIT"
] | null | null | null |
Etap 2/Logia03/Zad1.py
|
aszokalski/Logia
|
5e29745b01623df8a2f162f143656a76056af407
|
[
"MIT"
] | null | null | null |
Etap 2/Logia03/Zad1.py
|
aszokalski/Logia
|
5e29745b01623df8a2f162f143656a76056af407
|
[
"MIT"
] | null | null | null |
from turtle import *
def rysuj(s):
a = 720 / len(s)
up = "bdfhklt"
down = "gjpqy"
numb = "0123456789"
samogloski = "aeiouy"
pu(); bk(360); pd()
for elem in s:
if elem in numb:
prost(a, "green")
elif elem in up:
prost(a, "yellow")
elif elem in down:
col = "yellow"
if elem in samogloski:
col = "red"
pu(); rt(90); fd(a); lt(90); pd()
prost(a, col)
pu(); lt(90); fd(a); rt(90); pd()
else:
col = "yellow"
if elem in samogloski:
col = "red"
kwad(a, col)
def prost(a, col):
fillcolor(col)
begin_fill()
for i in range(2):
fd(a)
lt(90)
fd(2 * a)
lt(90)
fd(a)
end_fill()
def kwad(a, col):
fillcolor(col)
begin_fill()
for i in range(4):
fd(a)
lt(90)
fd(a)
end_fill()
| 20.702128
| 45
| 0.42446
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 77
| 0.079137
|
3f07c6d2135990949504b1e72bbaec00f43feafb
| 616
|
py
|
Python
|
server/src/models/movie.py
|
Rubilmax/netflux
|
9e79063b81e3dc78055fc683c230de511827f030
|
[
"MIT"
] | 2
|
2019-06-17T08:28:03.000Z
|
2019-06-17T08:28:32.000Z
|
server/src/models/movie.py
|
Rubilmax/netflux
|
9e79063b81e3dc78055fc683c230de511827f030
|
[
"MIT"
] | 3
|
2020-09-05T00:54:20.000Z
|
2021-05-07T15:34:58.000Z
|
server/src/models/movie.py
|
Rubilmax/netflux
|
9e79063b81e3dc78055fc683c230de511827f030
|
[
"MIT"
] | null | null | null |
"""
Define the Movie model
"""
from . import db
from .abc import BaseModel, MetaBaseModel
class Movie(db.Model, BaseModel, metaclass=MetaBaseModel):
""" The Movie model """
__tablename__ = "movies"
movie_id = db.Column(db.String(300), primary_key=True)
title = db.Column(db.String(300))
author = db.Column(db.String(300))
release_year = db.Column(db.Integer)
def __init__(self, movie_id, title, author, release_year):
""" Create a new movie """
self.movie_id = movie_id
self.title = title
self.author = author
self.release_year = release_year
| 25.666667
| 62
| 0.657468
| 523
| 0.849026
| 0
| 0
| 0
| 0
| 0
| 0
| 87
| 0.141234
|
3f07dc93b37cf1bf8c17deb226c77fdb8cc21bba
| 17,963
|
py
|
Python
|
wmt-shared-task/segment-level/segment_level_prism.py
|
chryssa-zrv/UA_COMET
|
527e7c86bd0a0d8ff90efda58e820108a5666b92
|
[
"Apache-2.0"
] | null | null | null |
wmt-shared-task/segment-level/segment_level_prism.py
|
chryssa-zrv/UA_COMET
|
527e7c86bd0a0d8ff90efda58e820108a5666b92
|
[
"Apache-2.0"
] | null | null | null |
wmt-shared-task/segment-level/segment_level_prism.py
|
chryssa-zrv/UA_COMET
|
527e7c86bd0a0d8ff90efda58e820108a5666b92
|
[
"Apache-2.0"
] | null | null | null |
f"""
Shell script tho reproduce results for BERTScores in data from WMT18/19 Metrics Shared task.
"""
import argparse
import hashlib
import logging
import os
import sys
from typing import Any, Dict, Iterator, List
import numpy as np
import pandas as pd
import sentencepiece as spm
import torch
from tqdm import tqdm
from fairseq import utils
from fairseq import checkpoint_utils
from fairseq.data import LanguagePairDataset
#!/usr/bin/env python3
logger = logging.getLogger('prism')
logger.setLevel(logging.INFO)
MODELS = {
'8412b2044da4b9b2c0a8ce87b305d0d1': {
'name': 'm39v1',
'path': 'todo',
'date': '2020-04-30',
'description': 'model released with arXiv paper April 2020',
'langs': ['ar', 'bg', 'bn', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'eo', 'fi', 'fr', 'he',
'hr', 'hu', 'id', 'it', 'ja', 'kk', 'lt', 'lv', 'mk', 'nl', 'no', 'pl', 'pt', 'ro', 'ru',
'sk', 'sl', 'sq', 'sr', 'sv', 'tr', 'uk', 'vi', 'zh'],
}
}
def hash_model(model_dir):
md5 = hashlib.md5()
block_size = 2 ** 20
for fname in ('checkpoint.pt', 'spm.model', 'dict.src.txt', 'dict.tgt.txt'):
with open(os.path.join(model_dir, fname), "rb") as f:
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
md5.digest()
return md5.hexdigest()
"""
Copy of https://github.com/pytorch/fairseq/blob/master/fairseq/sequence_scorer.py
with softmax temperature control added
"""
class SequenceScorer(object):
"""Scores the target for a given source sentence."""
def __init__(self, tgt_dict, softmax_batch=None, temperature=1.0):
self.pad = tgt_dict.pad()
self.eos = tgt_dict.eos()
self.softmax_batch = softmax_batch or sys.maxsize
self.temperature = temperature
assert self.softmax_batch > 0
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample['net_input']
def batch_for_softmax(dec_out, target):
# assumes decoder_out[0] is the only thing needed (may not be correct for future models!)
first, rest = dec_out[0], dec_out[1:]
bsz, tsz, dim = first.shape
if bsz * tsz < self.softmax_batch:
yield dec_out, target, True
else:
flat = first.contiguous().view(1, -1, dim)
flat_tgt = target.contiguous().view(flat.shape[:-1])
s = 0
while s < flat.size(1):
e = s + self.softmax_batch
yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False
s = e
def gather_target_probs(probs, target):
probs = probs.gather(
dim=2,
index=target.unsqueeze(-1),
)
return probs
orig_target = sample['target']
# compute scores for each model in the ensemble
avg_probs = None
avg_attn = None
for model in models:
model.eval()
decoder_out = model.forward(**net_input)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
batched = batch_for_softmax(decoder_out, orig_target)
probs, idx = None, 0
for bd, tgt, is_single in batched:
sample['target'] = tgt
# divide the logits by temperature prior to softmax
# for example, see https://github.com/pytorch/fairseq/blob/master/fairseq/sequence_generator.py:
# decoder_out[0][:, -1:, :].div_(temperature)
bd[0].div_(self.temperature)
curr_prob = model.get_normalized_probs(bd, log_probs=len(models) == 1, sample=sample).data
if is_single:
probs = gather_target_probs(curr_prob, orig_target)
else:
if probs is None:
probs = curr_prob.new(orig_target.numel())
step = curr_prob.size(0) * curr_prob.size(1)
end = step + idx
tgt_probs = gather_target_probs(curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt)
probs[idx:end] = tgt_probs.view(-1)
idx = end
sample['target'] = orig_target
probs = probs.view(sample['target'].shape)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None and torch.is_tensor(attn):
attn = attn.data
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(models) > 1:
avg_probs.div_(len(models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(models))
bsz = avg_probs.size(0)
hypos = []
start_idxs = sample['start_indices'] if 'start_indices' in sample else [0] * bsz
for i in range(bsz):
# remove padding from ref
ref = utils.strip_pad(sample['target'][i, start_idxs[i]:], self.pad) \
if sample['target'] is not None else None
tgt_len = ref.numel()
avg_probs_i = avg_probs[i][start_idxs[i]:start_idxs[i] + tgt_len]
score_i = avg_probs_i.sum() / tgt_len
if avg_attn is not None:
avg_attn_i = avg_attn[i]
alignment = utils.extract_hard_alignment(avg_attn_i, sample['net_input']['src_tokens'][i],
sample['target'][i], self.pad, self.eos)
else:
avg_attn_i = alignment = None
hypos.append([{
'tokens': ref,
'score': score_i,
'attention': avg_attn_i,
'alignment': alignment,
'positional_scores': avg_probs_i,
}])
return hypos
class Prism:
def __init__(self, model_dir, lang, temperature):
'''
model_dir should contain:
1) checkpoint.pt: the fairseq model
2) spm.model: the sentencepiece model
3) dict.src.txt: the fairseq source dictionary
4) dict.tgt.txt: the fairseq target dictionary (likely a copy of the source)
lang: ISO 639-1 Code (e.g. "en"). Must be a language compatable with the model.
'''
self.sp = spm.SentencePieceProcessor()
self.sp.Load(model_dir + '/spm.model')
self.lang = lang
self.temperature = temperature
# this prints things and I can't figure out how to disable it
sys.stdout = open(os.devnull, 'w')
self.models, self.args, self.task = checkpoint_utils.load_model_ensemble_and_task(
[model_dir + '/checkpoint.pt', ],
arg_overrides=dict(data=model_dir + '/'),
)
sys.stdout = sys.__stdout__
self.use_cuda = torch.cuda.is_available()
self.generator = SequenceScorer(self.task.target_dictionary, temperature=temperature)
for model in self.models:
if self.use_cuda:
model.cuda()
model.make_generation_fast_(
beamable_mm_beam_size=None,
need_attn=False,
)
# if model.args.fp16:
# model.half()
# hash model
self.model_hash = hash_model(model_dir)
if self.model_hash in MODELS:
model_langs = MODELS[self.model_hash]['langs']
if lang not in model_langs:
model_name = MODELS[self.model_hash]['name']
logger.warning(f'Language "{lang}" is unsupported for model "{model_name}"')
logger.warning(f'Supported languages for {model_name}: {", ".join(model_langs)}')
else:
logger.warning('unrecognized model, so cannot check language')
def identifier(self):
if self.model_hash in MODELS:
model_name = MODELS[self.model_hash]['name']
else:
logger.warning('unrecognized model, using hash to identify')
model_name = self.model_hash
return dict(version='0.1', model=model_name, seg_scores='avg_log_prob',
sys_scores='avg_log_prob', log_base=2, temperature=self.temperature)
def _binarize(self, sentence: str) -> torch.LongTensor:
return self.task.source_dictionary.encode_line(sentence, add_if_not_exist=False).long()
def _encode(self, sent, prepend=True):
sent = ' '.join(self.sp.EncodeAsPieces(sent))
if prepend:
sent = f'<{self.lang}> ' + sent
return self._binarize(sent)
def _build_batches(self,
source_tokens: List[List[int]],
target_tokens: List[List[int]],
skip_invalid_size_inputs: bool) -> Iterator[Dict[str, Any]]:
source_lengths = torch.LongTensor([t.numel() for t in source_tokens])
target_lengths = torch.LongTensor([t.numel() for t in target_tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=LanguagePairDataset(source_tokens, source_lengths, self.task.source_dictionary,
tgt=target_tokens, tgt_sizes=target_lengths,
tgt_dict=self.task.target_dictionary),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=(2000, 2000), # ???
ignore_invalid_inputs=skip_invalid_size_inputs,
).next_epoch_itr(shuffle=False)
return batch_iterator
def _score_forward(self, tok_sents_in, tok_sents_out):
assert len(tok_sents_in) == len(tok_sents_out)
tok_level_scores = [None, ] * len(tok_sents_in) # for debug
results = [None, ] * len(tok_sents_in)
for batch in self._build_batches(tok_sents_in, tok_sents_out, skip_invalid_size_inputs=False):
if self.use_cuda: # must be a better way
batch['id'] = batch['id'].cuda()
batch['net_input']['src_tokens'] = batch['net_input']['src_tokens'].cuda()
batch['net_input']['src_lengths'] = batch['net_input']['src_lengths'].cuda()
batch['net_input']['prev_output_tokens'] = batch['net_input']['prev_output_tokens'].cuda()
batch['target'] = batch['target'].cuda()
translations = self.task.inference_step(self.generator, self.models, batch)
ids = batch['id'].cpu().numpy()
tok_scores = [x[0]['positional_scores'].cpu().numpy() for x in translations]
# [1:] to skip language tag log prob
sent_scores = [np.mean(x[1:]) for x in tok_scores]
for _id, sent_score, _tok_score in zip(ids, sent_scores, tok_scores):
results[_id] = sent_score
tok_level_scores[_id] = _tok_score
if logger.level == logging.DEBUG:
for ii, (sent_in, scores_out, sent_out) in enumerate(zip(tok_sents_in, tok_level_scores, tok_sents_out)):
sent_in_str = ' '.join([self.task.source_dictionary[x] for x in sent_in])
logger.debug(f'Input[{ii}] = ' + sent_in_str)
sent_out_tok = [self.task.source_dictionary[x] for x in sent_out]
logger.debug(f'Output[{ii}] = ' + \
f' '.join([f'{a}[{b:.02f}]' for a, b in zip(sent_out_tok, scores_out)]))
if None in results:
raise Exception('Missing one or more sentence scores')
return np.array(results)
def score(self, cand, ref=None, src=None, segment_scores=False):
if not (ref is None) ^ (src is None):
raise Exception('Must provide exactly one of "ref" or "src"')
tokenized_cand = [self._encode(sentence, prepend=False) for sentence in cand]
tokenized_cand_prep = [self._encode(sentence, prepend=True) for sentence in cand]
if src is not None:
# Prism-src: score candidate given on source
if len(cand) != len(src):
raise Exception(f'Length of cand ({len(cand)}) does not match length of src ({len(src)})')
tokenized_src = [self._encode(sentence, prepend=False) for sentence in src]
scores = self._score_forward(tokenized_src, tokenized_cand_prep)
else:
# Prism-ref: average candidate given reference and reference given candidate
if len(cand) != len(ref):
raise Exception(f'Length of cand ({len(cand)}) does not match length of ref ({len(ref)})')
tokenized_ref = [self._encode(sentence, prepend=False) for sentence in ref]
tokenized_ref_prep = [self._encode(sentence, prepend=True) for sentence in ref]
forward_scores = self._score_forward(tok_sents_in=tokenized_ref, tok_sents_out=tokenized_cand_prep)
reverse_scores = self._score_forward(tok_sents_in=tokenized_cand, tok_sents_out=tokenized_ref_prep)
scores = 0.5 * forward_scores + 0.5 * reverse_scores
if not segment_scores:
scores = np.mean(scores)
return scores
def compute_kendall(
hyp1_scores: list, hyp2_scores: list, dataframe: pd.DataFrame
) -> (int, list):
""" Computes the official WMT19 shared task Kendall correlation score. """
assert len(hyp1_scores) == len(hyp2_scores) == len(data)
conc, disc = 0, 0
for i, row in tqdm(data.iterrows(), total=len(data), desc="Kendall eval..."):
if hyp1_scores[i] > hyp2_scores[i]:
conc += 1
else:
disc += 1
return (conc - disc) / (conc + disc)
def run_prism(mt: list, ref: list, language=False, temperature=1.0) -> list:
prism = Prism(model_dir="m39v1", lang=language, temperature=temperature)
scores = prism.score(cand=mt, ref=ref, segment_scores=True)
return list(scores)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Evaluates BERTScores against relative preferences."
)
parser.add_argument(
"--test_path",
default="wmt-metrics/wmt19/de-en/relative-ranks.csv",
help="Path to the test dataframe with relative preferences.",
type=str,
)
parser.add_argument(
"--language", default="en", help="Target language of the testset.", type=str,
)
parser.add_argument(
'--temperature',
type=float,
default=1.0,
help='Softmax temperature: values >1.0 produce more uniform samples and values <1.0 produce sharper samples')
parser.add_argument(
"--run_wmt18",
default=False,
help="Runs entire WMT18 evaluation.",
action="store_true",
)
parser.add_argument(
"--run_wmt19",
default=False,
help="Runs entire WMT19 evaluation.",
action="store_true",
)
args = parser.parse_args()
if args.run_wmt18:
lps = [
"en-cs",
"en-de",
"en-et",
"en-fi",
"en-ru",
"en-tr",
"en-zh",
"cs-en",
"de-en",
"et-en",
"fi-en",
"ru-en",
"tr-en",
"zh-en",
]
kendall_scores = {}
for lp in lps:
data = pd.read_csv(f"wmt-metrics/wmt18/{lp}/relative-ranks.csv")
hyp1_scores = run_prism([str(s) for s in data.hyp1], list(data.ref), language=lp.split('-')[1], temperature=args.temperature)
hyp2_scores = run_prism([str(s) for s in data.hyp2], list(data.ref), language=lp.split('-')[1], temperature=args.temperature)
#hyp1_scores = run_prism([str(s) for s in data.hyp1], list(data.ref), list(data.src), language=lp.split('-')[1])
#hyp2_scores = run_prism([str(s) for s in data.hyp2], list(data.ref), list(data.src), language=lp.split('-')[1])
kendall = compute_kendall(hyp1_scores, hyp2_scores, data)
print("Results for {}: {}".format(lp, kendall))
kendall_scores[lp] = kendall
print(kendall_scores)
elif args.run_wmt19:
lps = [
"en-cs",
"en-de",
"en-fi",
"en-gu",
"en-kk",
"en-lt",
"en-ru",
"en-zh",
"de-en",
"fi-en",
"gu-en",
"kk-en",
"lt-en",
"ru-en",
"zh-en",
"de-cs",
"de-fr",
"fr-de",
]
kendall_scores = {}
for lp in lps:
data = pd.read_csv(f"wmt-metrics/wmt19/{lp}/relative-ranks.csv")
hyp1_scores = run_prism([str(s) for s in data.hyp1], list(data.ref), language=lp.split('-')[1], temperature=args.temperature)
hyp2_scores = run_prism([str(s) for s in data.hyp2], list(data.ref), language=lp.split('-')[1], temperature=args.temperature)
kendall = compute_kendall(hyp1_scores, hyp2_scores, data)
print("Results for {}: {}".format(lp, kendall))
kendall_scores[lp] = kendall
print(kendall_scores)
else:
data = pd.read_csv(args.test_path)
kendall_scores = {}
hyp1_scores = run_prism([str(s) for s in data.hyp1], list(data.ref), language=lp.split('-')[1], temperature=args.temperature)
hyp2_scores = run_prism([str(s) for s in data.hyp2], list(data.ref), language=lp.split('-')[1], temperature=args.temperature)
kendall = compute_kendall(hyp1_scores, hyp2_scores, data)
print("Results for {}: {}".format(args.test_path, kendall))
kendall_scores[lp] = kendall
print(kendall_scores)
| 40.006682
| 137
| 0.571341
| 11,857
| 0.660079
| 4,235
| 0.235762
| 4,256
| 0.236931
| 0
| 0
| 3,890
| 0.216556
|
3f090c825452547dfa25b58d3c0bf2f6280faf90
| 826
|
py
|
Python
|
source_code/3-2-download.py
|
VickyMin1994/easy-scraping-tutorial
|
75b7ffc79da397afa95342022c29cd72520f155f
|
[
"MIT"
] | 708
|
2017-12-29T05:32:34.000Z
|
2022-03-25T14:29:05.000Z
|
source_code/3-2-download.py
|
VickyMin1994/easy-scraping-tutorial
|
75b7ffc79da397afa95342022c29cd72520f155f
|
[
"MIT"
] | 6
|
2018-01-06T07:58:31.000Z
|
2020-10-26T15:57:46.000Z
|
source_code/3-2-download.py
|
VickyMin1994/easy-scraping-tutorial
|
75b7ffc79da397afa95342022c29cd72520f155f
|
[
"MIT"
] | 609
|
2017-12-29T10:04:20.000Z
|
2022-03-23T18:32:37.000Z
|
import os
os.makedirs('./img/', exist_ok=True)
IMAGE_URL = "https://mofanpy.com/static/img/description/learning_step_flowchart.png"
def urllib_download():
from urllib.request import urlretrieve
urlretrieve(IMAGE_URL, './img/image1.png') # whole document
def request_download():
import requests
r = requests.get(IMAGE_URL)
with open('./img/image2.png', 'wb') as f:
f.write(r.content) # whole document
def chunk_download():
import requests
r = requests.get(IMAGE_URL, stream=True) # stream loading
with open('./img/image3.png', 'wb') as f:
for chunk in r.iter_content(chunk_size=32):
f.write(chunk)
urllib_download()
print('download image1')
request_download()
print('download image2')
chunk_download()
print('download image3')
| 23.6
| 84
| 0.670702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 241
| 0.291768
|
3f09b543086a1b61bb8cf4a38db61dcd67d88667
| 5,787
|
py
|
Python
|
flare_classifier/cnn.py
|
Wingham1/hessidf
|
18e63e25f9989565f1f361458f7ff8e53f4579e9
|
[
"Unlicense"
] | null | null | null |
flare_classifier/cnn.py
|
Wingham1/hessidf
|
18e63e25f9989565f1f361458f7ff8e53f4579e9
|
[
"Unlicense"
] | 14
|
2020-01-28T23:15:48.000Z
|
2022-03-12T00:12:36.000Z
|
flare_classifier/cnn.py
|
Wingham1/hessidf
|
18e63e25f9989565f1f361458f7ff8e53f4579e9
|
[
"Unlicense"
] | null | null | null |
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, Flatten, Dense, Dropout
import tensorflow.keras as keras
import os
import cv2
import numpy as np
from sklearn.model_selection import train_test_split
def data_prep(path, img_rows, img_cols, color):
"""
A function to preprocess the input data for a CNN.
The images are resized, normalised to have pixel values between 0-1, converted into greyscale if required and put into a numpy array.
Each class label is turned into a one hot pixel array and added to an ordered numpy array such that the order for the labels is the same as the images.
The data is shuffled to make sure each batch is representative of the overall data during training which will reduce overfitting to each batch.
This function requires that the images for each class are in a seperate directory.
param:
- path, a string of the path to the directory containing the images
- img_rows, an integer for the number of rows the resized image should have
- img_cols, an integer for the number of columns the resized image should have
- color, a boolean that is set to true if the image should be in RGB colour space or false for greyscale
return:
- images, a numpy array of images with pixel values normalised to be between 0 and 1.
numpy array dimensions are [number of images, number of rows, number of columns, number of chanels]
- labels, a numpy array of labels associated with each image (labels are a one hot pixel numpy array [1, 0, 0, ...] or [0, 1, 0, ...], etc)
"""
images = []
labels = []
for image_class in os.listdir(path):
print('image_class =', image_class)
path_to_class_directory = os.path.join(path, image_class)
for img_name in os.listdir(path_to_class_directory):
true_path = os.path.join(path_to_class_directory, img_name)
if color:
images.append(cv2.imread(true_path, 1)/255.0)
else:
images.append(cv2.imread(true_path, 0)/255.0) # greyscale
labels.append(os.listdir(path).index(image_class))
data = list(zip(images, labels))
np.random.shuffle(data)
images, labels = zip(*data)
images = [cv2.resize(img, (img_rows, img_cols), cv2.INTER_AREA) for img in images] # resize images to all be the same
if color:
images = np.array(images).reshape(len(images), img_rows, img_cols, 3)
else:
images = np.array(images).reshape(len(images), img_rows, img_cols, 1)
labels = keras.utils.to_categorical(labels, num_classes=len(os.listdir(path)))
return images, labels
def build_CNN(img_rows, img_cols, color=False):
model = Sequential()
if color:
model.add(Conv2D(20, kernel_size=(3, 3), strides=1, activation='relu', input_shape=(img_rows, img_cols, 3)))
else:
model.add(Conv2D(20, kernel_size=(3, 3), strides=1, activation='relu', input_shape=(img_rows, img_cols, 1)))
model.add(Conv2D(20, kernel_size=(3, 3), strides=1, activation='relu'))
model.add(Flatten())
#model.add(Dropout(0.25))
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy'])
return model
def decode_labels(coded, class_names):
"""
A funtion to get the name of the class by decoding a one hot pixel array.
Uses a list comprehension and boolean indexing.
The list comprehension returns the index of the variable with the highest value in each one hot pixel array.
That list is then used for boolean indexing with a numpy array to get a list of class_names for each label in coded.
Param:
- coded, a numpy array of coded labels
- class_names, a list of the class_names in the same order they were coded (alphabetical)
Return:
- numpy array of class names for each label in coded
"""
return np.array(class_names)[[np.argmax(example) for example in coded]]
def calc_accuracy(pred, real):
"""
A function to calculate the accuracy of a CNN when given a list of predicted classes and a list of the real classes
Param:
- pred, a numpy array of predicted classes
- real, a numpy array of the real classes
Return:
- Accuracy as a decimal
"""
return sum(pred==real) / len(pred)
if __name__ == '__main__':
path = 'data'
img_rows = 150
img_cols = 150
is_color = True
model_filename = 'flare_cnn'
print('\nloading training data\n')
num_classes = len(os.listdir(path))
x, y = data_prep(path, img_rows, img_cols, color=is_color)
x_train, x_test, y_train, y_test = train_test_split(x, y)
print('\nbuilding model\n')
cnn = build_CNN(img_rows, img_cols, color=is_color)
print('\ntraining model\n')
cnn.fit(x_train, y_train, batch_size=50, epochs=1, validation_split=0.2)
print('\nsaving model\n')
if is_color:
model_filename = model_filename + '_RGB' + '.h5'
else:
model_filename = model_filename + '_grey' + '.h5'
cnn.save(model_filename)
print('\nsaved model to file {}\n'.format(model_filename))
print('\nloading model\n')
loaded_cnn = keras.models.load_model(model_filename)
print('\ngenerating predictions\n')
predictions = loaded_cnn.predict(x_test)
dec_preds = decode_labels(predictions, os.listdir(path))
dec_ytest = decode_labels(y_test, os.listdir(path))
# F1 score would probably be a better metric due to skew of training expample (num B > num C)
print('\naccuracy =', calc_accuracy(dec_preds, dec_ytest))
| 44.515385
| 155
| 0.683428
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,720
| 0.470019
|
3f0acb5cf9be9113370cabc267dfa5dafd6e50f5
| 895
|
py
|
Python
|
survol/sources_types/oracle/library/__init__.py
|
AugustinMascarelli/survol
|
7a822900e82d1e6f016dba014af5741558b78f15
|
[
"BSD-3-Clause"
] | null | null | null |
survol/sources_types/oracle/library/__init__.py
|
AugustinMascarelli/survol
|
7a822900e82d1e6f016dba014af5741558b78f15
|
[
"BSD-3-Clause"
] | null | null | null |
survol/sources_types/oracle/library/__init__.py
|
AugustinMascarelli/survol
|
7a822900e82d1e6f016dba014af5741558b78f15
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Oracle library
"""
import lib_common
from lib_properties import pc
def Graphic_colorbg():
return "#CC99FF"
def EntityOntology():
return ( ["Db", "Schema", "Library"], )
# Ambiguity with tables, oracle or normal users.
def MakeUri(dbName,schemaName,libraryName):
return lib_common.gUriGen.UriMakeFromDict("oracle/library", { "Db" : dbName, "Schema" : schemaName, "Library" : libraryName } )
def AddInfo(grph,node,entity_ids_arr):
# TODO: SPECIAL. Imported here to avoid circular inclusions, see oracle/package_body/__init__.py
from sources_types.oracle import schema as oracle_schema
argDb = entity_ids_arr[0]
argSchema = entity_ids_arr[1]
node_oraschema = oracle_schema.MakeUri( argDb, argSchema )
grph.add( ( node_oraschema, pc.property_oracle_library, node ) )
def EntityName(entity_ids_arr):
return entity_ids_arr[0] + "." + entity_ids_arr[1] + "." + entity_ids_arr[2]
| 28.870968
| 128
| 0.750838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 239
| 0.267039
|
3f0adc8f234944eb3b185c95906a510034084c0d
| 4,104
|
py
|
Python
|
src/train.py
|
rnagumo/dgm_vae
|
ea9e1a39f0018c9ed55f13f0b88f4afc4657d7e4
|
[
"MIT"
] | 5
|
2020-05-27T02:28:32.000Z
|
2021-03-27T08:07:50.000Z
|
src/train.py
|
rnagumo/dgmvae
|
ea9e1a39f0018c9ed55f13f0b88f4afc4657d7e4
|
[
"MIT"
] | null | null | null |
src/train.py
|
rnagumo/dgmvae
|
ea9e1a39f0018c9ed55f13f0b88f4afc4657d7e4
|
[
"MIT"
] | null | null | null |
"""Training method"""
import argparse
import json
import os
import pathlib
from typing import Union
import numpy as np
import torch
from torch.backends import cudnn
import pytorch_lightning as pl
import dgmvae.models as dvm
from experiment import VAEUpdater
def main():
# -------------------------------------------------------------------------
# 1. Settings
# -------------------------------------------------------------------------
# Kwargs
args = init_args()
# Configs
condig_path = os.getenv("CONFIG_PATH", "./src/config_ch1.json")
with pathlib.Path(condig_path).open() as f:
config = json.load(f)
# Path
root = pathlib.Path(os.getenv("DATA_ROOT", "./data/mnist/"))
save_path = pathlib.Path(os.getenv("SAVE_PATH", "./logs/"),
os.getenv("EVALUATION_NAME", "dev"))
model_path = save_path / "representation"
dataset = os.getenv("DATASET_NAME", "mnist")
# Cuda setting
use_cuda = torch.cuda.is_available() and args.cuda != "null"
gpus = args.cuda if use_cuda else None
# Random seed
torch.manual_seed(args.seed)
np.random.seed(args.seed)
cudnn.deterministic = True
cudnn.benchmark = False
# -------------------------------------------------------------------------
# 2. Training
# -------------------------------------------------------------------------
# VAE model
model_dict = {
"beta": dvm.BetaVAE,
"factor": dvm.FactorVAE,
"dipi": dvm.DIPVAE,
"dipii": dvm.DIPVAE,
"joint": dvm.JointVAE,
"tcvae": dvm.TCVAE,
"aae": dvm.AAE,
"avb": dvm.AVB,
}
model = model_dict[args.model](**config[f"{args.model}_params"])
# Updater
updater = VAEUpdater(model, args, dataset, root, args.batch_size)
# Trainer
params = {
"default_save_path": save_path,
"gpus": gpus,
"early_stop_callback": None,
"max_steps": args.steps,
"log_save_interval": args.log_save_interval,
}
trainer = pl.Trainer(**params)
# Run
trainer.fit(updater)
# Export model
model_path.mkdir()
ch_num = config[f"{args.model}_params"]["channel_num"]
export_model(updater.model, str(model_path / "pytorch_model.pt"),
input_shape=(1, ch_num, 64, 64))
def export_model(model: Union[torch.nn.Module, torch.jit.ScriptModule],
path: Union[str, pathlib.Path],
input_shape: tuple = (1, 3, 64, 64),
use_script_module: bool = True
) -> Union[str, pathlib.Path]:
"""Exports model.
Args:
model (torch.nn.Module or torch.jit.ScriptModule): Saved model.
path (str or pathlib.Path): Path to file.
input_shape (tuple, optional): Tuple of input data shape.
use_script_module (bool, optional): Boolean flag for using script
module.
Returns:
path (str or pathlib.Path): Path to saved file.
"""
model = model.cpu().eval()
if isinstance(model, torch.jit.ScriptModule):
assert use_script_module, \
"Provided model is a ScriptModule, set use_script_module to True."
if use_script_module:
if not isinstance(model, torch.jit.ScriptModule):
assert input_shape is not None
traced_model = torch.jit.trace(model, torch.zeros(*input_shape))
else:
traced_model = model
torch.jit.save(traced_model, path)
else:
torch.save(model, path) # saves model as a nn.Module
return path
def init_args():
parser = argparse.ArgumentParser(description="VAE training")
parser.add_argument("--model", type=str, default="beta")
parser.add_argument("--cuda", type=str, default="0")
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--steps", type=int, default=100)
parser.add_argument("--batch-size", type=int, default=64)
parser.add_argument("--log-save-interval", type=int, default=100)
return parser.parse_args()
if __name__ == "__main__":
main()
| 29.52518
| 79
| 0.576754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,378
| 0.33577
|
3f0db4e9c999e9ae4b627b4d2fef5914dc26a29e
| 17,193
|
py
|
Python
|
kea/axi_lite_registers/_registers.py
|
SmartAcoustics/Kea
|
5790f18dafccfc01fe9dbe98de5bb1a5ce584c56
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 3
|
2020-02-28T13:03:59.000Z
|
2020-09-20T06:33:04.000Z
|
kea/axi_lite_registers/_registers.py
|
SmartAcoustics/Kea
|
5790f18dafccfc01fe9dbe98de5bb1a5ce584c56
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
kea/axi_lite_registers/_registers.py
|
SmartAcoustics/Kea
|
5790f18dafccfc01fe9dbe98de5bb1a5ce584c56
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 3
|
2018-12-17T16:33:08.000Z
|
2020-01-21T14:10:25.000Z
|
from myhdl import Signal, intbv, block, always_comb, ConcatSignal
import myhdl
from collections import OrderedDict
import keyword
def _is_valid_name(ident: str) -> bool:
'''Determine if ident is a valid register or bitfield name.
'''
if not isinstance(ident, str):
raise TypeError("expected str, but got {!r}".format(type(ident)))
if not ident.isidentifier():
return False
if keyword.iskeyword(ident):
return False
return True
@block
def assign_bitfield_from_register(reg, bitfield, offset):
if isinstance(bitfield.val, bool):
@always_comb
def assignment():
bitfield.next = reg[offset]
else:
start = offset
stop = offset + len(bitfield)
@always_comb
def assignment():
bitfield.next = reg[stop:start]
return assignment
class Bitfields:
def __eq__(self, other):
if not ((self._bitfields_config == other._bitfields_config) and
(self._initial_values == other._initial_values) and
(self._register_width == other._register_width) and
(self._reg_type == other._reg_type)):
return False
else:
# The values also need to be the same
for bf_name in self._bitfields_config:
if getattr(self, bf_name) != getattr(other, bf_name):
return False
if self.register != other.register:
return False
return True
def __init__(
self, register_width, register_type, bitfields_config,
initial_values=None):
'''
Creates a MyHDL interface representing a series of bitfields.
`register_width` is the width of the register that the bitfields sit
on top of.
`register_type` is one of `axi_read_write`, `axi_read_only` or
`axi_write_only`.
`initial_values` is an optional lookup for each bitfield when the
register type is `axis_read_write`. If a bitfield
has an initial value set, then, assuming the register_type is
`axis_read_write`, the bitfield will be set to the initial value. If
the register type is not `axis_read_write`, then a ValueError will be
raised if this argument is not `None`.
`bitfields_config` is a dictionary that provides the configuration
for each bitfield on a register. The keys are the names of the
bitfields and each key should point to a configuration dict.
Each configution should have the `type` key, which should have data
which is one of:
- `uint`
- `bool`
- `const-uint`
- `const-bool`
In addition, it should also have keys which depend on the type, as
follows:
- `uint`:
- `length` giving the length in bits of the uint
- `offset` giving the offset of the bitfield.
- `bool`:
- `offset` giving the offset of the boolean value.
- `const-uint`:
- `length` giving the length in bits of the uint
- `offset` giving the offset of the bitfield.
- `const-value` giving the value of the constant.
- `const-bool`:
- `offset` giving the offset of the boolean balue.
- `const-value` giving the value of the constant.
Extra keys are ignored.
Other constraints are enforced and will cause an error:
- All bitfields must fit within the register width.
- A `const-uint` and `const-bool` can only be set on a read-only
register.
- Overlapping bitfields are invalid.
- No bitfield can be called 'register'. This is reserved for the
full register representation.
- Only read-write registers can have an initial value.
An example bitfield entry might look something like:
{'foo':{'type': 'uint',
'length': 6,
'offset': 0},
'bar': {'type': 'bool',
'offset': 6},
'baz': {'type': 'const-uint',
'length': 5,
'offset': 7,
'const-value': 15}}
'''
if len(bitfields_config) == 0:
raise ValueError('bitfields_config cannot be empty')
if register_type not in (
'axi_read_write', 'axi_read_only', 'axi_write_only'):
raise ValueError(
'The register type must be one of `axi_read_write`, '
'`axi_read_only` or `axi_write_only`')
if initial_values != None and register_type != 'axi_read_write':
raise ValueError(
'`initial_values` must be `None` if the register type '
'is not `axi_read_write`')
if initial_values is None:
initial_values = {}
# We always create a register attribute
register_initial_val = 0
for bitfield in bitfields_config:
offset = bitfields_config[bitfield]['offset']
try:
init_val = initial_values[bitfield]
except KeyError:
init_val = 0
register_initial_val += init_val << offset
self._reg_type = register_type
self._register_width = register_width
self._bitfields_config = bitfields_config
self._initial_values = initial_values
bitfield_masks = {}
bitfield_starts = {}
bitfield_stops = {}
self._constant_vals = {}
for bitfield in bitfields_config:
if not _is_valid_name(bitfield):
raise ValueError(
'Bitfield names must be valid python identifiers: '
'{}'.format(bitfield))
if bitfield[0] == '_':
raise ValueError(
'Bitfield names cannot begin with an underscore: '
'{}'.format(bitfield))
if bitfield == 'register':
raise ValueError('Bitfields cannot be named `register`.')
if bitfields_config[bitfield]['type'] == 'uint':
length = bitfields_config[bitfield]['length']
offset = bitfields_config[bitfield]['offset']
bf_signal = Signal(intbv(0)[length:])
mask = (2**length - 1) << offset
bitfield_starts[offset] = bitfield
bitfield_stops[bitfield] = offset + length
elif bitfields_config[bitfield]['type'] == 'bool':
offset = bitfields_config[bitfield]['offset']
bf_signal = Signal(False)
mask = 1 << offset
bitfield_starts[offset] = bitfield
bitfield_stops[bitfield] = offset + 1
elif bitfields_config[bitfield]['type'] == 'const-uint':
if register_type != 'axi_read_only':
raise ValueError(
'The bitfield `{}` is of type `const-uint` which '
'requires the register is read-only, but the register '
'has been configured to be `{}`'.format(
bitfield, register_type))
length = bitfields_config[bitfield]['length']
offset = bitfields_config[bitfield]['offset']
const_val = int(bitfields_config[bitfield]['const-value'])
if (const_val >= 2**length or const_val < 0):
raise ValueError(
'The bitfield const value, {}, is invalid for '
'bitfield {}'.format(const_val, bitfield))
bf_signal = intbv(const_val)[length:]
self._constant_vals[bitfield] = const_val
# We also set the initial value for constants
register_initial_val += const_val << offset
mask = (2**length - 1) << offset
bitfield_starts[offset] = bitfield
bitfield_stops[bitfield] = offset + length
elif bitfields_config[bitfield]['type'] == 'const-bool':
if register_type != 'axi_read_only':
raise ValueError(
'The bitfield `{}` is of type `const-bool` which '
'requires the register is read-only, but the register '
'has been configured to be `{}`'.format(
bitfield, register_type))
offset = bitfields_config[bitfield]['offset']
const_val = bitfields_config[bitfield]['const-value']
if not isinstance(const_val, bool):
raise ValueError(
'The bitfield const value, {}, is invalid for '
'bitfield {}'.format(const_val, bitfield))
bf_signal = const_val
self._constant_vals[bitfield] = const_val
# We also set the initial value for constants
register_initial_val += const_val << offset
mask = 1 << offset
bitfield_starts[offset] = bitfield
bitfield_stops[bitfield] = offset + 1
else:
raise ValueError('A bitfield type must be one of `uint`, '
'`bool`, `const-uint` or `const-bool`: '
'{}'.format(bitfield))
if mask >= 2**register_width:
raise ValueError(
'The bitfield `{}` is out of range for a register of '
'width {}'.format(bitfield, register_width))
# Check the bitfield doesn't overlap with any others
for other_bf in bitfield_masks:
if (bitfield_masks[other_bf] & mask) != 0:
raise ValueError(
'Bitfield `{}` overlaps with bitfield `{}`'.format(
bitfield, other_bf))
bitfield_masks[bitfield] = mask
setattr(self, bitfield, bf_signal)
# We now need to construct the packed version of the bitfields,
# including padding.
rev_concat_list = []
bitfield_starts_list = list(bitfield_starts.keys())
bitfield_starts_list.sort()
if bitfield_starts_list[0] != 0:
padding = intbv(0)[bitfield_starts_list[0]:]
rev_concat_list.append(padding)
for i, start in enumerate(bitfield_starts_list):
bitfield = bitfield_starts[start]
rev_concat_list.append(getattr(self, bitfield))
try:
next_start = bitfield_starts_list[i + 1]
# The higher up checks make sure padding_len should never be
# negative.
padding_len = next_start - bitfield_stops[bitfield]
if padding_len > 0:
padding = intbv(0)[padding_len:]
rev_concat_list.append(padding)
except IndexError:
if bitfield_stops[bitfield] < register_width:
padding = intbv(0)[
register_width - bitfield_stops[bitfield]:]
rev_concat_list.append(padding)
self.register = Signal(intbv(register_initial_val)[register_width:])
self._concat_list = rev_concat_list[::-1]
self._bitfield_starts = bitfield_starts
self._bitfield_masks = bitfield_masks
@block
def bitfield_connector(self):
if self._reg_type in ('axi_read_write', 'axi_write_only'):
instances = []
for bitfield_start in self._bitfield_starts:
bitfield = getattr(self, self._bitfield_starts[bitfield_start])
instances.append(
assign_bitfield_from_register(
self.register, bitfield, bitfield_start))
return instances
elif self._reg_type in ('axi_read_only'):
if len(self._concat_list) == 1:
# This is a hack to allow a concat signal to work in
# all cases. An alternative would be to special case single
# signals, but that doesn't work well with constants, which
# themselves would require a special case, and some hackery to
# have the constant read (and requiring initial_values to be
# turned on).
keep = Signal(True)
keep.driven = True
reg_signal = ConcatSignal(keep, self._concat_list[0])
else:
reg_signal = ConcatSignal(*self._concat_list)
@always_comb
def assign_register():
self.register.next = reg_signal[self._register_width:]
return assign_register
class RegisterSet(object):
pass
class Registers(object):
''' A general purpose register definition.
'''
@property
def register_types(self):
return self._register_types
def __eq__(self, other):
return (self._bitfields == other._bitfields and
self._register_types == other._register_types and
self._register_width == other._register_width)
def __init__(
self, register_list, register_types=None, register_width=32,
initial_values=None, bitfields=None):
'''
Constructs a MyHDL interface that encapsulates each register name
given in `register_list`. The order of the registers in the list is
kept.
If `register_types` is set, it should be a dictionary like object
that provides data of the form `axi_read_write`, `axi_read_only` or
`axi_write_only` for the register name given by its key. If a register
name is missing from `register_types`, then the register type defaults
to `axi_read_write`. If `register_types` is `None`, then all the
registers are `axi_read_write`.
`register_width` gives the width in bits of each register that is
created, defaulting to 32.
`initial_values` is an optional dictionary that sets the initial
value of a read-write register. A `ValueError` will be raised if an
initial value is set for a non read-write register. The default is
for the initial values to be zero. If a register has bitfields set
(see below), then the dictionary entry should itself be a dictionary
to the initial values for each bitfield.
`bitfields` is an optional dictionary argument in which each register
that is included in the dictionary is populated as a Bitfield interface
rather than a signal. Each data in bitfields is passed directly as the
bitfields_config argument to the initialisation of a `Bitfield` class.
See the documentation for that class to see what form the data should
be.
'''
for name in register_list:
if not _is_valid_name(name):
raise ValueError('Invalid register name: {}'.format(name))
if register_types is None:
# Create a register types dictionary so that the system can handle
# an empty register types argument.
register_types = {}
self._register_width = register_width
# Create an ordered dictionary
self._register_types = OrderedDict()
for each in register_types:
if each not in register_list:
# Check that the register types have a corresponding register
# in the register list. If not error.
raise ValueError(
'Invalid register in register_types: %s' % each)
if initial_values is None:
initial_values = {}
if bitfields is None:
bitfields = {}
for initial_val_key in initial_values:
if (register_types.get(initial_val_key, 'axi_read_write') !=
'axi_read_write'):
raise ValueError(
'Only read-write registers can take initial values: %s' %
initial_val_key + ': ' +
str(register_types[initial_val_key]))
for name in register_list:
register_type = register_types.get(name, 'axi_read_write')
if name in bitfields:
initial_vals = initial_values.get(name, None)
setattr(
self, name,
Bitfields(register_width, register_type, bitfields[name],
initial_values=initial_vals))
else:
# Create the registers
setattr(self, name, Signal(
intbv(initial_values.get(name, 0))[register_width:]))
# Populate the ordered dictionary with the appropriate
# register types, defaulting to 'axi_read_write'
self._register_types[name] = (
register_types.get(name, 'axi_read_write'))
self._bitfields = bitfields
| 36.89485
| 79
| 0.571163
| 16,324
| 0.949456
| 0
| 0
| 1,803
| 0.104868
| 0
| 0
| 6,839
| 0.397778
|
3f0e2d51a2df3a348d377cd1a32d06c17973e189
| 1,429
|
py
|
Python
|
tools/clear_from_n.py
|
ubercomrade/MultiDeNA
|
128f2963cf0a49f94c85744c5eaaf5c41f0e161c
|
[
"MIT"
] | null | null | null |
tools/clear_from_n.py
|
ubercomrade/MultiDeNA
|
128f2963cf0a49f94c85744c5eaaf5c41f0e161c
|
[
"MIT"
] | null | null | null |
tools/clear_from_n.py
|
ubercomrade/MultiDeNA
|
128f2963cf0a49f94c85744c5eaaf5c41f0e161c
|
[
"MIT"
] | null | null | null |
import random
def read_fasta(path_in, path_out):
fasta = list()
append = fasta.append
fasta_in = open(path_in, 'r')
fasta_out = open(path_out, 'w')
for index, line in enumerate(fasta_in):
if not line.startswith('>'):
line = line.strip().upper()
line = clear_n(line)
if line != '':
fasta_out.write('>{0}\n'.format(int(index / 2)))
fasta_out.write(line + '\n')
fasta_in.close()
fasta_out.close()
pass
def longest(ss):
if len(ss[0]) > len(ss[1]):
return(ss[0])
else:
return(ss[1])
def clear_n(string):
while 1:
position = string.find('N')
if position == -1:
break
elif position == len(string) - 1:
string = string[:position - 1]
break
elif string[position + 1] != 'N':
string = string[:position] + random.choice('ACGT') + string[position + 1:]
else:
for index, n in enumerate(string[position:],position):
if n != 'N':
string = longest([string[:position], string[index:]])
break
elif index == len(string) - 1:
string = string[:position]
break
return(string)
def clear_from_n(fasta_in, fasta_out):
read_fasta(fasta_in, fasta_out)
return(0)
| 28.019608
| 86
| 0.501749
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 38
| 0.026592
|
3f109ba5a82b80a619a2cca61182b7519ce6df9d
| 2,217
|
py
|
Python
|
tensorbank/tf/slices.py
|
pshved/tensorbank
|
6a1497b58cfac5e7218ec42c04dd62e17b7bb88c
|
[
"MIT"
] | 1
|
2020-07-07T09:00:28.000Z
|
2020-07-07T09:00:28.000Z
|
tensorbank/tf/slices.py
|
pshved/tensorbank
|
6a1497b58cfac5e7218ec42c04dd62e17b7bb88c
|
[
"MIT"
] | null | null | null |
tensorbank/tf/slices.py
|
pshved/tensorbank
|
6a1497b58cfac5e7218ec42c04dd62e17b7bb88c
|
[
"MIT"
] | null | null | null |
"""Advanced Tensor slicing
==========================
Utilities for advanced tensor slicing and batching operations.
Reference
---------
"""
import tensorflow as tf
def slice_within_stride(x, stride, si=0, ei=None, keepdims=True):
"""Select ``x[..., (i * stride + si):(i * stride + ei)]`` for each i.
The tensor returned will have the last dimension shrunk by a factor of
``(ei-si)/stride``.
As a natural special case, ``tb.multiple_within_stride(x, N)`` is
equivalent to adding a dimension of ``N`` at the end, as in
``tf.expand_dims(x, (..., -1, N))``.
Example:
When predicting anchor positions in SSD, ``num_classes +
num_offsets`` are predicted for each anchor. To get only the
class confidence, this would be used::
logits = model(input)
class_logits = tb.slice_within_stride(
logits,
0,
num_classes,
num_classes + num_offsets)
loss = softmax_cross_entropy_with_logits(
class_preds, class_logits)
Args:
x (tf.Tensor): value to modify
stride (int): stride for the last dimension
si (int): starting index within stride. Negative indices are
supported. Defaults to 0.
ei (int): end index (1 element after the last) within stride.
Negative indices are supported. Defaults to ``None``, which
means "until the last element".
keepdims (bool): if False, adds another dimension that
iterates over each stride. This dimension will be of size
``ei-si``. Defaults to True.
Returns:
tf.Tensor: modified ``x`` with the last dimension sliced.
"""
step1 = tf.reshape(x, (-1, stride))
step2 = step1[..., si:ei]
new_shape = list(x.shape)
new_shape[-1] = -1
if not keepdims:
if ei is None:
ei = stride
# Calculate the size of the slice. This is O(stride) which is
# small.
last_dim_len = len(list(range(stride)[si:ei]))
new_shape.append(last_dim_len)
print("NS: {}".format(new_shape))
step3 = tf.reshape(step2, new_shape)
return step3
| 31.225352
| 74
| 0.592242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,721
| 0.776274
|
3f1168ed05032f188730bcd06823c66a0ec28d77
| 5,168
|
py
|
Python
|
testfixtures/tests/test_roundcomparison.py
|
Alexhuszagh/XLDiscoverer
|
60937b1f7f2e23af4219eb26519d6b83fb4232d6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
testfixtures/tests/test_roundcomparison.py
|
Alexhuszagh/XLDiscoverer
|
60937b1f7f2e23af4219eb26519d6b83fb4232d6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
testfixtures/tests/test_roundcomparison.py
|
Alexhuszagh/XLDiscoverer
|
60937b1f7f2e23af4219eb26519d6b83fb4232d6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright (c) 2014 Simplistix Ltd
# See license.txt for license details.
from decimal import Decimal
from testfixtures import RoundComparison as R, compare, ShouldRaise
from unittest import TestCase
from ..compat import PY2, PY3
class Tests(TestCase):
def test_equal_yes_rhs(self):
self.assertTrue(0.123457 == R(0.123456, 5))
def test_equal_yes_lhs(self):
self.assertTrue(R(0.123456, 5) == 0.123457)
def test_equal_no_rhs(self):
self.assertFalse(0.123453 == R(0.123456, 5))
def test_equal_no_lhs(self):
self.assertFalse(R(0.123456, 5) == 0.123453)
def test_not_equal_yes_rhs(self):
self.assertFalse(0.123457 != R(0.123456, 5))
def test_not_equal_yes_lhs(self):
self.assertFalse(R(0.123456, 5) != 0.123457)
def test_not_equal_no_rhs(self):
self.assertTrue(0.123453 != R(0.123456, 5))
def test_not_equal_no_lhs(self):
self.assertTrue(R(0.123456, 5) != 0.123453)
def test_equal_in_sequence_rhs(self):
self.assertEqual((1, 2, 0.123457),
(1, 2, R(0.123456, 5)))
def test_equal_in_sequence_lhs(self):
self.assertEqual((1, 2, R(0.123456, 5)),
(1, 2, 0.123457))
def test_not_equal_in_sequence_rhs(self):
self.assertNotEqual((1, 2, 0.1236),
(1, 2, R(0.123456, 5)))
def test_not_equal_in_sequence_lhs(self):
self.assertNotEqual((1, 2, R(0.123456, 5)),
(1, 2, 0.1236))
def test_not_numeric_rhs(self):
with ShouldRaise(TypeError):
'abc' == R(0.123456, 5)
def test_not_numeric_lhs(self):
with ShouldRaise(TypeError):
R(0.123456, 5) == 'abc'
def test_repr(self):
compare('<R:0.12346 to 5 digits>',
repr(R(0.123456, 5)))
def test_str(self):
compare('<R:0.12346 to 5 digits>',
repr(R(0.123456, 5)))
def test_str_negative(self):
if PY3:
expected = '<R:123500 to -2 digits>'
else:
expected = '<R:123500.0 to -2 digits>'
compare(expected, repr(R(123456, -2)))
TYPE_ERROR_DECIMAL = TypeError(
"Cannot compare <R:0.12346 to 5 digits> with <class 'decimal.Decimal'>"
)
def test_equal_yes_decimal_to_float_rhs(self):
with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2):
self.assertTrue(Decimal("0.123457") == R(0.123456, 5))
def test_equal_yes_decimal_to_float_lhs(self):
with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2):
self.assertTrue(R(0.123456, 5) == Decimal("0.123457"))
def test_equal_no_decimal_to_float_rhs(self):
with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2):
self.assertFalse(Decimal("0.123453") == R(0.123456, 5))
def test_equal_no_decimal_to_float_lhs(self):
with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2):
self.assertFalse(R(0.123456, 5) == Decimal("0.123453"))
TYPE_ERROR_FLOAT = TypeError(
"Cannot compare <R:0.12346 to 5 digits> with <class 'float'>"
)
def test_equal_yes_float_to_decimal_rhs(self):
with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2):
self.assertTrue(0.123457 == R(Decimal("0.123456"), 5))
def test_equal_yes_float_to_decimal_lhs(self):
with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2):
self.assertTrue(R(Decimal("0.123456"), 5) == 0.123457)
def test_equal_no_float_to_decimal_rhs(self):
with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2):
self.assertFalse(0.123453 == R(Decimal("0.123456"), 5))
def test_equal_no_float_to_decimal_lhs(self):
with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2):
self.assertFalse(R(Decimal("0.123456"), 5) == 0.123453)
def test_integer_float(self):
with ShouldRaise(TypeError, unless=PY2):
1 == R(1.000001, 5)
def test_float_integer(self):
with ShouldRaise(TypeError, unless=PY2):
R(1.000001, 5) == 1
def test_equal_yes_integer_other_rhs(self):
self.assertTrue(10 == R(11, -1))
def test_equal_yes_integer_lhs(self):
self.assertTrue(R(11, -1) == 10)
def test_equal_no_integer_rhs(self):
self.assertFalse(10 == R(16, -1))
def test_equal_no_integer_lhs(self):
self.assertFalse(R(16, -1) == 10)
def test_equal_integer_zero_precision(self):
self.assertTrue(1 == R(1, 0))
def test_equal_yes_negative_precision(self):
self.assertTrue(149.123 == R(101.123, -2))
def test_equal_no_negative_precision(self):
self.assertFalse(149.123 == R(150.001, -2))
def test_decimal_yes_rhs(self):
self.assertTrue(Decimal('0.123457') == R(Decimal('0.123456'), 5))
def test_decimal_yes_lhs(self):
self.assertTrue(R(Decimal('0.123456'), 5) == Decimal('0.123457'))
def test_decimal_no_rhs(self):
self.assertFalse(Decimal('0.123453') == R(Decimal('0.123456'), 5))
def test_decimal_no_lhs(self):
self.assertFalse(R(Decimal('0.123456'), 5) == Decimal('0.123453'))
| 33.128205
| 79
| 0.629257
| 4,932
| 0.954334
| 0
| 0
| 0
| 0
| 0
| 0
| 477
| 0.092299
|
3f11b3d9455edd6883b563bf0cbd4035db741ccc
| 23,628
|
py
|
Python
|
config/usb_device_cdc.py
|
newbs/usb
|
5aeafc26849673a357a6110713524387f2f5f84d
|
[
"0BSD"
] | null | null | null |
config/usb_device_cdc.py
|
newbs/usb
|
5aeafc26849673a357a6110713524387f2f5f84d
|
[
"0BSD"
] | null | null | null |
config/usb_device_cdc.py
|
newbs/usb
|
5aeafc26849673a357a6110713524387f2f5f84d
|
[
"0BSD"
] | null | null | null |
"""*****************************************************************************
* Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
currentQSizeRead = 1
currentQSizeWrite = 1
currentQSizeSerialStateNotification = 1
cdcInterfacesNumber = 2
cdcDescriptorSize = 58
cdcEndpointsPic32 = 2
cdcEndpointsSAM = 3
indexFunction = None
configValue = None
startInterfaceNumber = None
numberOfInterfaces = None
useIad = None
epNumberInterrupt = None
epNumberBulkOut = None
epNumberBulkIn = None
cdcEndpointNumber = None
def handleMessage(messageID, args):
global useIad
if (messageID == "UPDATE_CDC_IAD_ENABLE"):
useIad.setValue(args["iadEnable"])
return args
def onAttachmentConnected(source, target):
global cdcInterfacesNumber
global cdcDescriptorSize
global configValue
global startInterfaceNumber
global numberOfInterfaces
global useIad
global epNumberInterrupt
global epNumberBulkOut
global epNumberBulkIn
global cdcEndpointsPic32
global cdcEndpointsSAM
global currentQSizeRead
global currentQSizeWrite
global currentQSizeSerialStateNotification
print ("CDC Function Driver: Attached")
remoteComponent = target["component"]
remoteComponentId = remoteComponent.getID()
if (remoteComponentId == "usb_device"):
dependencyID = source["id"]
ownerComponent = source["component"]
# Read number of functions from USB Device Layer
nFunctions = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_FUNCTIONS_NUMBER")
if nFunctions != None:
#Log.writeDebugMessage ("USB Device CDC Function Driver: Attachment connected")
# Update Number of Functions in USB Device, Increment the value by One.
args = {"nFunction":nFunctions + 1}
res = Database.sendMessage("usb_device", "UPDATE_FUNCTIONS_NUMBER", args)
# If we have CDC function driver plus any function driver (no matter what Class), we enable IAD.
if nFunctions > 0:
args = {"nFunction":True}
res = Database.sendMessage("usb_device", "UPDATE_IAD_ENABLE", args)
iadEnableSymbol = ownerComponent.getSymbolByID("CONFIG_USB_DEVICE_FUNCTION_USE_IAD")
iadEnableSymbol.clearValue()
iadEnableSymbol.setValue(True, 1)
isIadEnabled = Database.getSymbolValue("usb_device_cdc_0", "CONFIG_USB_DEVICE_FUNCTION_USE_IAD")
if isIadEnabled == False:
args = {"iadEnable":True}
res = Database.sendMessage("usb_device_cdc_0", "UPDATE_CDC_IAD_ENABLE", args)
nCDCInstances = Database.getSymbolValue("usb_device_cdc", "CONFIG_USB_DEVICE_CDC_INSTANCES")
if nCDCInstances == 2:
configDescriptorSize = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_CONFIG_DESCRPTR_SIZE")
if configDescriptorSize != None:
args = {"nFunction": configDescriptorSize + 8}
res = Database.sendMessage("usb_device", "UPDATE_CONFIG_DESCRPTR_SIZE", args)
configDescriptorSize = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_CONFIG_DESCRPTR_SIZE")
if configDescriptorSize != None:
iadEnableSymbol = ownerComponent.getSymbolByID("CONFIG_USB_DEVICE_FUNCTION_USE_IAD")
if iadEnableSymbol.getValue() == True:
descriptorSize = cdcDescriptorSize + 8
else:
descriptorSize = cdcDescriptorSize
args = {"nFunction": configDescriptorSize + descriptorSize}
res = Database.sendMessage("usb_device", "UPDATE_CONFIG_DESCRPTR_SIZE", args)
nInterfaces = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_INTERFACES_NUMBER")
if nInterfaces != None:
args = {"nFunction": nInterfaces + cdcInterfacesNumber}
res = Database.sendMessage("usb_device", "UPDATE_INTERFACES_NUMBER", args)
startInterfaceNumber.setValue(nInterfaces, 1)
nEndpoints = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_ENDPOINTS_NUMBER")
if nEndpoints != None:
epNumberInterrupt.setValue(nEndpoints + 1, 1)
epNumberBulkOut.setValue(nEndpoints + 2, 1)
if any(x in Variables.get("__PROCESSOR") for x in ["PIC32MZ", "PIC32MX", "PIC32MK", "SAMD21", "SAMDA1","SAMD51", "SAME51", "SAME53", "SAME54", "SAML21", "SAML22", "SAMD11"]):
epNumberBulkIn.setValue(nEndpoints + 2, 1)
args = {"nFunction": nEndpoints + cdcEndpointsPic32}
res = Database.sendMessage("usb_device", "UPDATE_ENDPOINTS_NUMBER", args)
else:
epNumberBulkIn.setValue(nEndpoints + 3, 1)
args = {"nFunction": nEndpoints + cdcEndpointsSAM}
res = Database.sendMessage("usb_device", "UPDATE_ENDPOINTS_NUMBER", args)
def onAttachmentDisconnected(source, target):
print ("CDC Function Driver: Detached")
global cdcInterfacesNumber
global cdcDescriptorSize
global configValue
global startInterfaceNumber
global numberOfInterfaces
global useIad
global epNumberInterrupt
global epNumberBulkOut
global epNumberBulkIn
global cdcEndpointsPic32
global cdcEndpointsSAM
global cdcInstancesCount
global currentQSizeRead
global currentQSizeWrite
global currentQSizeSerialStateNotification
dependencyID = source["id"]
ownerComponent = source["component"]
remoteComponent = target["component"]
remoteComponentId = remoteComponent.getID()
if (remoteComponentId == "usb_device"):
nFunctions = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_FUNCTIONS_NUMBER")
if nFunctions != None:
nFunctions = nFunctions - 1
args = {"nFunction":nFunctions}
res = Database.sendMessage("usb_device", "UPDATE_FUNCTIONS_NUMBER", args)
endpointNumber = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_ENDPOINTS_NUMBER")
if endpointNumber != None:
if any(x in Variables.get("__PROCESSOR") for x in ["PIC32MZ"]):
args = {"nFunction":endpointNumber - cdcEndpointsPic32 }
res = Database.sendMessage("usb_device", "UPDATE_ENDPOINTS_NUMBER", args)
else:
args = {"nFunction":endpointNumber - cdcEndpointsSAM }
res = Database.sendMessage("usb_device", "UPDATE_ENDPOINTS_NUMBER", args)
interfaceNumber = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_INTERFACES_NUMBER")
if interfaceNumber != None:
args = {"nFunction": interfaceNumber - 2}
res = Database.sendMessage("usb_device", "UPDATE_INTERFACES_NUMBER", args)
nCDCInstances = Database.getSymbolValue("usb_device_cdc", "CONFIG_USB_DEVICE_CDC_INSTANCES")
if nCDCInstances != None:
nCDCInstances = nCDCInstances - 1
args = {"cdcInstanceCount": nCDCInstances}
res = Database.sendMessage("usb_device_cdc", "UPDATE_CDC_INSTANCES", args)
if nCDCInstances == 1 and nFunctions != None and nFunctions == 1:
args = {"iadEnable":False}
res = Database.sendMessage("usb_device_cdc_0", "UPDATE_CDC_IAD_ENABLE", args)
args = {"nFunction":False}
res = Database.sendMessage("usb_device", "UPDATE_IAD_ENABLE", args)
configDescriptorSize = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_CONFIG_DESCRPTR_SIZE")
if configDescriptorSize != None:
args = {"nFunction": configDescriptorSize - 8}
res = Database.sendMessage("usb_device", "UPDATE_CONFIG_DESCRPTR_SIZE", args)
configDescriptorSize = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_CONFIG_DESCRPTR_SIZE")
if configDescriptorSize != None:
if useIad.getValue() == True:
descriptorSize = cdcDescriptorSize + 8
else:
descriptorSize = cdcDescriptorSize
args = {"nFunction": configDescriptorSize - descriptorSize}
res = Database.sendMessage("usb_device", "UPDATE_CONFIG_DESCRPTR_SIZE", args)
def destroyComponent(component):
print ("CDC Function Driver: Destroyed")
# This function is called when user modifies the CDC Queue Size.
def usbDeviceCdcBufferQueueSize(usbSymbolSource, event):
global currentQSizeRead
global currentQSizeWrite
global currentQSizeSerialStateNotification
queueDepthCombined = Database.getSymbolValue("usb_device_cdc", "CONFIG_USB_DEVICE_CDC_QUEUE_DEPTH_COMBINED")
if (event["id"] == "CONFIG_USB_DEVICE_FUNCTION_READ_Q_SIZE"):
queueDepthCombined = queueDepthCombined - currentQSizeRead + event["value"]
currentQSizeRead = event["value"]
if (event["id"] == "CONFIG_USB_DEVICE_FUNCTION_WRITE_Q_SIZE"):
queueDepthCombined = queueDepthCombined - currentQSizeWrite + event["value"]
currentQSizeWrite = event["value"]
if (event["id"] == "CONFIG_USB_DEVICE_FUNCTION_SERIAL_NOTIFIACATION_Q_SIZE"):
queueDepthCombined = queueDepthCombined - currentQSizeSerialStateNotification + event["value"]
currentQSizeSerialStateNotification = event["value"]
# We have updated queueDepthCombined variable with current combined queue length.
# Now send a message to USB_DEVICE_CDC_COMMON.PY to modify the Combined queue length.
args = {"cdcQueueDepth": queueDepthCombined}
res = Database.sendMessage("usb_device_cdc", "UPDATE_CDC_QUEUE_DEPTH_COMBINED", args)
def instantiateComponent(usbDeviceCdcComponent, index):
global cdcDescriptorSize
global cdcInterfacesNumber
global cdcDescriptorSize
global configValue
global startInterfaceNumber
global numberOfInterfaces
global useIad
global currentQSizeRead
global currentQSizeWrite
global currentQSizeSerialStateNotification
global epNumberInterrupt
global epNumberBulkOut
global epNumberBulkIn
res = Database.activateComponents(["usb_device"])
if any(x in Variables.get("__PROCESSOR") for x in ["PIC32MZ"]):
MaxEpNumber = 7
BulkInDefaultEpNumber = 2
elif any(x in Variables.get("__PROCESSOR") for x in ["PIC32MX", "PIC32MK"]):
MaxEpNumber = 15
BulkInDefaultEpNumber = 2
elif any(x in Variables.get("__PROCESSOR") for x in ["SAMD21", "SAMDA1", "SAMD51", "SAME51", "SAME53", "SAME54", "SAML21", "SAML22", "SAMD11"]):
MaxEpNumber = 7
BulkInDefaultEpNumber = 2
elif any(x in Variables.get("__PROCESSOR") for x in ["SAMA5D2", "SAM9X60"]):
MaxEpNumber = 15
BulkInDefaultEpNumber = 3
elif any(x in Variables.get("__PROCESSOR") for x in ["SAME70", "SAMS70", "SAMV70", "SAMV71"]):
MaxEpNumber = 9
BulkInDefaultEpNumber = 3
elif any(x in Variables.get("__PROCESSOR") for x in ["SAMG55"]):
MaxEpNumber = 5
BulkInDefaultEpNumber = 3
# Index of this function
indexFunction = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_INDEX", None)
indexFunction.setVisible(False)
indexFunction.setMin(0)
indexFunction.setMax(16)
indexFunction.setDefaultValue(index)
# Config name: Configuration number
configValue = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_CONFIG_VALUE", None)
configValue.setLabel("Configuration Value")
configValue.setVisible(False)
configValue.setMin(1)
configValue.setMax(16)
configValue.setDefaultValue(1)
configValue.setReadOnly(True)
# Adding Start Interface number
startInterfaceNumber = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_INTERFACE_NUMBER", None)
startInterfaceNumber.setLabel("Start Interface Number")
helpText = '''Indicates the Interface Number of the first interfaces in
the Communication Device Interface Group. This is provided here for
indication purposes only and is automatically updated based on the
function driver selection.'''
startInterfaceNumber.setDescription(helpText)
startInterfaceNumber.setVisible(True)
startInterfaceNumber.setMin(0)
startInterfaceNumber.setDefaultValue(0)
startInterfaceNumber.setReadOnly(True)
# Adding Number of Interfaces
numberOfInterfaces = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_NUMBER_OF_INTERFACES", None)
numberOfInterfaces.setLabel("Number of Interfaces")
helpText = '''Indicates the interfaces in the Communication Device
Interface Group. This is provided here for indication purposes
only.'''
numberOfInterfaces.setDescription(helpText)
numberOfInterfaces.setVisible(True)
numberOfInterfaces.setMin(1)
numberOfInterfaces.setMax(16)
numberOfInterfaces.setDefaultValue(2)
numberOfInterfaces.setReadOnly(True)
# Use IAD
useIad = usbDeviceCdcComponent.createBooleanSymbol("CONFIG_USB_DEVICE_FUNCTION_USE_IAD", None)
useIad.setLabel("Use Interface Association Descriptor")
helpText = '''Enable this option to generate a Interface Association
Descriptor (IAD). This option should be enabled in case multiple CDC
interfaces are included in the Device. Enabling the option will update
the Class, Sublass fields in the Device Descriptor to indicate that
that device uses IAD.'''
useIad.setDescription(helpText)
useIad.setVisible(True)
useIad.setDefaultValue(False)
useIad.setUseSingleDynamicValue(True)
# CDC Function driver Read Queue Size
queueSizeRead = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_READ_Q_SIZE", None)
queueSizeRead.setLabel("CDC Read Queue Size")
helpText = '''Configure the size of the Read Queue. This configures the
maximum number of Read Requests that can be queued before the Function
Driver returns a queue full response. Using a queue increases memory
consumption but also increases throughput. The driver will queue
requests if the transfer request is currently being processed.'''
queueSizeRead.setDescription(helpText)
queueSizeRead.setVisible(True)
queueSizeRead.setMin(1)
queueSizeRead.setMax(32767)
queueSizeRead.setDefaultValue(1)
currentQSizeRead = queueSizeRead.getValue()
# CDC Function driver Write Queue Size
queueSizeWrite = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_WRITE_Q_SIZE", None)
helpText = '''Configure the size of the Write Queue. This configures
the maximum number of Write Requests that can be queued before the
Function Driver returns a queue full response. Using a queue increases
memory consumption but also increases throughput. The driver will queue
requests if the transfer request is currently being processed.'''
queueSizeWrite.setDescription(helpText)
queueSizeWrite.setLabel("CDC Write Queue Size")
queueSizeWrite.setVisible(True)
queueSizeWrite.setMin(1)
queueSizeWrite.setMax(32767)
queueSizeWrite.setDefaultValue(1)
currentQSizeWrite = queueSizeWrite.getValue()
# CDC Function driver Serial state notification Queue Size
queueSizeSerialStateNotification = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_SERIAL_NOTIFIACATION_Q_SIZE", None)
queueSizeSerialStateNotification.setLabel("CDC Serial Notification Queue Size")
helpText = '''Configure the size of the Serial State Notification
Queue. This configures the maximum number of Serial State Notification
Requests that can be queued before the Function Driver returns a queue
full response. Using a queue increases memory consumption but also
increases throughput. The driver will queue requests if the transfer
request is currently being processed.'''
queueSizeSerialStateNotification.setDescription(helpText)
queueSizeSerialStateNotification.setVisible(True)
queueSizeSerialStateNotification.setMin(1)
queueSizeSerialStateNotification.setMax(32767)
queueSizeSerialStateNotification.setDefaultValue(1)
currentQSizeSerialStateNotification = queueSizeSerialStateNotification.getValue()
# CDC Function driver Notification Endpoint Number
epNumberInterrupt = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_INT_ENDPOINT_NUMBER", None)
helpText = '''Specify the endpoint number of Interrupt IN Endpoint to
be used for this instance of the CDC Interface. Refer to Device
Datasheet for details on available endpoints and limitations.'''
epNumberInterrupt.setDescription(helpText)
epNumberInterrupt.setLabel("Interrupt Endpoint Number")
epNumberInterrupt.setVisible(True)
epNumberInterrupt.setMin(1)
epNumberInterrupt.setDefaultValue(1)
epNumberInterrupt.setMax(MaxEpNumber)
# CDC Function driver Data OUT Endpoint Number
epNumberBulkOut = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_BULK_OUT_ENDPOINT_NUMBER", None)
helpText = '''Specify the endpoint number of Bulk Out Endpoint to
be used for this instance of the CDC Interface. Refer to Device
Datasheet for details on available endpoints and limitations.'''
epNumberBulkOut.setDescription(helpText)
epNumberBulkOut.setLabel("Bulk OUT Endpoint Number")
epNumberBulkOut.setVisible(True)
epNumberBulkOut.setMin(1)
epNumberBulkOut.setDefaultValue(2)
epNumberBulkOut.setMax(MaxEpNumber)
# CDC Function driver Data IN Endpoint Number
epNumberBulkIn = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_BULK_IN_ENDPOINT_NUMBER", None)
helpText = '''Specify the endpoint number of Bulk IN Endpoint to
be used for this instance of the CDC Interface. Refer to Device
Datasheet for details on available endpoints and limitations.'''
epNumberBulkIn.setDescription(helpText)
epNumberBulkIn.setLabel("Bulk IN Endpoint Number")
epNumberBulkIn.setVisible(True)
epNumberBulkIn.setMin(1)
epNumberBulkIn.setMax(MaxEpNumber)
epNumberBulkIn.setDefaultValue(BulkInDefaultEpNumber)
usbDeviceCdcBufPool = usbDeviceCdcComponent.createBooleanSymbol("CONFIG_USB_DEVICE_CDC_BUFFER_POOL", None)
usbDeviceCdcBufPool.setLabel("**** Buffer Pool Update ****")
usbDeviceCdcBufPool.setDependencies(usbDeviceCdcBufferQueueSize, ["CONFIG_USB_DEVICE_FUNCTION_READ_Q_SIZE", "CONFIG_USB_DEVICE_FUNCTION_WRITE_Q_SIZE", "CONFIG_USB_DEVICE_FUNCTION_SERIAL_NOTIFIACATION_Q_SIZE"])
usbDeviceCdcBufPool.setVisible(False)
############################################################################
#### Dependency ####
############################################################################
# USB DEVICE CDC Common Dependency
Log.writeDebugMessage ("Dependency Started")
numInstances = Database.getSymbolValue("usb_device_cdc", "CONFIG_USB_DEVICE_CDC_INSTANCES")
if (numInstances == None):
numInstances = 0
args = {"cdcInstanceCount": index+1}
res = Database.sendMessage("usb_device_cdc", "UPDATE_CDC_INSTANCES", args)
#############################################################
# Function Init Entry for CDC
#############################################################
usbDeviceCdcFunInitFile = usbDeviceCdcComponent.createFileSymbol(None, None)
usbDeviceCdcFunInitFile.setType("STRING")
usbDeviceCdcFunInitFile.setOutputName("usb_device.LIST_USB_DEVICE_FUNCTION_INIT_ENTRY")
usbDeviceCdcFunInitFile.setSourcePath("templates/device/cdc/system_init_c_device_data_cdc_function_init.ftl")
usbDeviceCdcFunInitFile.setMarkup(True)
#############################################################
# Function Registration table for CDC
#############################################################
usbDeviceCdcFunRegTableFile = usbDeviceCdcComponent.createFileSymbol(None, None)
usbDeviceCdcFunRegTableFile.setType("STRING")
usbDeviceCdcFunRegTableFile.setOutputName("usb_device.LIST_USB_DEVICE_FUNCTION_ENTRY")
usbDeviceCdcFunRegTableFile.setSourcePath("templates/device/cdc/system_init_c_device_data_cdc_function.ftl")
usbDeviceCdcFunRegTableFile.setMarkup(True)
#############################################################
# HS Descriptors for CDC Function
#############################################################
usbDeviceCdcDescriptorHsFile = usbDeviceCdcComponent.createFileSymbol(None, None)
usbDeviceCdcDescriptorHsFile.setType("STRING")
usbDeviceCdcDescriptorHsFile.setOutputName("usb_device.LIST_USB_DEVICE_FUNCTION_DESCRIPTOR_HS_ENTRY")
usbDeviceCdcDescriptorHsFile.setSourcePath("templates/device/cdc/system_init_c_device_data_cdc_function_descrptr_hs.ftl")
usbDeviceCdcDescriptorHsFile.setMarkup(True)
#############################################################
# FS Descriptors for CDC Function
#############################################################
usbDeviceCdcDescriptorFsFile = usbDeviceCdcComponent.createFileSymbol(None, None)
usbDeviceCdcDescriptorFsFile.setType("STRING")
usbDeviceCdcDescriptorFsFile.setOutputName("usb_device.LIST_USB_DEVICE_FUNCTION_DESCRIPTOR_FS_ENTRY")
usbDeviceCdcDescriptorFsFile.setSourcePath("templates/device/cdc/system_init_c_device_data_cdc_function_descrptr_fs.ftl")
usbDeviceCdcDescriptorFsFile.setMarkup(True)
#############################################################
# Class code Entry for CDC Function
#############################################################
usbDeviceCdcDescriptorClassCodeFile = usbDeviceCdcComponent.createFileSymbol(None, None)
usbDeviceCdcDescriptorClassCodeFile.setType("STRING")
usbDeviceCdcDescriptorClassCodeFile.setOutputName("usb_device.LIST_USB_DEVICE_DESCRIPTOR_CLASS_CODE_ENTRY")
usbDeviceCdcDescriptorClassCodeFile.setSourcePath("templates/device/cdc/system_init_c_device_data_cdc_function_class_codes.ftl")
usbDeviceCdcDescriptorClassCodeFile.setMarkup(True)
################################################
# USB CDC Function driver Files
################################################
usbDeviceCdcHeaderFile = usbDeviceCdcComponent.createFileSymbol(None, None)
addFileName('usb_device_cdc.h', usbDeviceCdcComponent, usbDeviceCdcHeaderFile, "middleware/", "/usb/", True, None)
usbCdcHeaderFile = usbDeviceCdcComponent.createFileSymbol(None, None)
addFileName('usb_cdc.h', usbDeviceCdcComponent, usbCdcHeaderFile, "middleware/", "/usb/", True, None)
usbDeviceCdcSourceFile = usbDeviceCdcComponent.createFileSymbol(None, None)
addFileName('usb_device_cdc.c', usbDeviceCdcComponent, usbDeviceCdcSourceFile, "middleware/src/", "/usb/src", True, None)
usbDeviceCdcAcmSourceFile = usbDeviceCdcComponent.createFileSymbol(None, None)
addFileName('usb_device_cdc_acm.c', usbDeviceCdcComponent, usbDeviceCdcAcmSourceFile, "middleware/src/", "/usb/src", True, None)
usbDeviceCdcLocalHeaderFile = usbDeviceCdcComponent.createFileSymbol(None, None)
addFileName('usb_device_cdc_local.h', usbDeviceCdcComponent, usbDeviceCdcLocalHeaderFile, "middleware/src/", "/usb/src", True, None)
# all files go into src/
def addFileName(fileName, component, symbol, srcPath, destPath, enabled, callback):
configName1 = Variables.get("__CONFIGURATION_NAME")
#filename = component.createFileSymbol(None, None)
symbol.setProjectPath("config/" + configName1 + destPath)
symbol.setSourcePath(srcPath + fileName)
symbol.setOutputName(fileName)
symbol.setDestPath(destPath)
if fileName[-2:] == '.h':
symbol.setType("HEADER")
else:
symbol.setType("SOURCE")
symbol.setEnabled(enabled)
| 46.60355
| 210
| 0.752878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10,391
| 0.439775
|
3f147cdd3b7dfdb59f469f69eb27289609a80ec7
| 169
|
py
|
Python
|
quant_test/__init__.py
|
rgkimball/quant_test
|
efa74de02f6a65c2d61029d6e8a1c0b5ac34b0c2
|
[
"Apache-2.0"
] | null | null | null |
quant_test/__init__.py
|
rgkimball/quant_test
|
efa74de02f6a65c2d61029d6e8a1c0b5ac34b0c2
|
[
"Apache-2.0"
] | 1
|
2021-02-02T23:10:35.000Z
|
2021-02-02T23:10:35.000Z
|
quant_test/__init__.py
|
rgkimball/quant_test
|
efa74de02f6a65c2d61029d6e8a1c0b5ac34b0c2
|
[
"Apache-2.0"
] | null | null | null |
"""
quant_test
~~~~~~
The quant_test package - a Python package template project that is intended
to be used as a cookie-cutter for developing new Python packages.
"""
| 21.125
| 75
| 0.745562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 168
| 0.994083
|
3f14a246aafc9d9fb1bbbb14593c493646a1817d
| 5,189
|
py
|
Python
|
django_sql_dashboard/extensions/ExtendedParameter.py
|
ipamo/django-sql-dashboard
|
c976bb59db70df200bdc44f1598aab31a25d3930
|
[
"Apache-2.0"
] | null | null | null |
django_sql_dashboard/extensions/ExtendedParameter.py
|
ipamo/django-sql-dashboard
|
c976bb59db70df200bdc44f1598aab31a25d3930
|
[
"Apache-2.0"
] | null | null | null |
django_sql_dashboard/extensions/ExtendedParameter.py
|
ipamo/django-sql-dashboard
|
c976bb59db70df200bdc44f1598aab31a25d3930
|
[
"Apache-2.0"
] | null | null | null |
import re
from django.utils.html import escape
from django.utils.safestring import mark_safe
from ..utils import Parameter
class ExtendedParameter(Parameter):
extract_re = re.compile(r"\%\(([\w\-]+)(?:\:([\w\-]+))?\)(s|(?:0?\.(\d+))?d|b)")
extract_name_re = lambda name: re.compile(rf"\%\({name}(?:\:[\w\-]+)?\)(?:s|(?:0?\.(\d+))?d|b)")
number_re = re.compile(r"^\d+(?:\.\d+)?")
def __init__(self, name, default_value, typecode, decimals):
if decimals:
typecode = "d"
self.typecode = typecode
# Adapt default value depending on the type
if default_value == "":
if self.typecode == "b":
default_value = "false"
if self.typecode == "d":
default_value = "0"
self.decimals = int(decimals) if len(decimals) >= 1 else 0
super().__init__(name, default_value)
def ensure_consistency(self, previous):
super().ensure_consistency(previous)
if self.typecode != previous.typecode:
raise ValueError("Invalid typecode specification '%s' for parameter '%s': previously registered with typecode '%s'" % (self.typecode, self.name, previous.typecode))
if self.decimals != 0 and self.decimals != previous.typecode:
raise ValueError("Invalid decimals specification '%d' for parameter '%s': previously registered with %d decimals" % (self.decimals, self.name, previous.decimals))
def get_sanitized(self, value, for_default=False):
value = super().get_sanitized(value, for_default=for_default)
if value is None:
return None
if self.typecode == "s":
# String parameter: no need to check sanity because we use psycopg2 parameter-passing feature
return value
# Need to check sanity
if self.typecode == "b":
value = value.lower()
if not value in ["true", "false"]:
raise ValueError("Invalid %svalue for bool parameter '%s': '%s'" % ("default " if for_default else "", self.name, value))
return value
elif self.typecode == "d":
if not ExtendedParameter.number_re.match(value):
raise ValueError("Invalid %svalue for number parameter '%s': '%s'" % ("default " if for_default else "", self.name, value))
return value
else:
raise ValueError("Unsupported typecode '%s' for parameter '%s'" % (self.typecode, self.name))
@property
def step(self):
""" Determine "step" attribute for number inputs """
return pow(10, -1*self.decimals)
def form_control(self):
label = f"""<label for="qp_{escape(self.name)}">{escape(self.name)}</label>"""
if self.typecode == 'd':
control = f"""<input type="number" step="{str(self.step)}" id="qp_{escape(self.name)}" name="{escape(self.name)}" value="{escape(self.value) if self.value is not None else ""}">"""
elif self.typecode == 'b':
if self.default_value:
control = f"""<input type="hidden" name="{escape(self.name)}" value="false">
<input type="checkbox" id="qp_{escape(self.name)}" name="{escape(self.name)}" value="true" {"checked" if self.value == "true" else ""}>"""
else:
control = f"""<div>
<input type="radio" id="qp_{escape(self.name)}_null" name="{escape(self.name)}" value="" {"checked" if not self.value else ""}>
<label for="qp_{escape(self.name)}_null">null</label>
<input type="radio" id="qp_{escape(self.name)}_true" name="{escape(self.name)}" value="true" {"checked" if self.value == "true" else ""}>
<label for="qp_{escape(self.name)}_true">true</label>
<input type="radio" id="qp_{escape(self.name)}_false" name="{escape(self.name)}" value="false" {"checked" if self.value == "false" else ""}>
<label for="qp_{escape(self.name)}_false">false</label>
</div>"""
else:
control = f"""<input type="text" id="qp_{escape(self.name)}" name="{escape(self.name)}" value="{escape(self.value) if self.value is not None else ""}">"""
return mark_safe(label + '\n' + control)
@classmethod
def execute(cls, cursor, sql: str, parameters: list=[]):
string_values = {}
for parameter in parameters:
if parameter.typecode == 's':
# For strings, we will use psycopg2 name parameter passing
string_values[parameter.name] = parameter.value
# If a default value has been specified, this needs to be removed from the SQL
if parameter.default_value != "":
sql = ExtendedParameter.extract_name_re(parameter.name).sub(f"%({parameter.name})s", sql)
else:
# For non-strings, we cannot use psycopg2 name parameter passing (not supported)
value = parameter.value
sql = ExtendedParameter.extract_name_re(parameter.name).sub(value if value is not None else "null", sql)
cursor.execute(sql, string_values)
| 50.378641
| 192
| 0.586626
| 5,063
| 0.975718
| 0
| 0
| 1,024
| 0.197341
| 0
| 0
| 2,331
| 0.44922
|
3f159df489050cc9cb8053b59296d74b1792277e
| 3,644
|
py
|
Python
|
jiotc/models/bilstm_model.py
|
JHP4911/JioTC
|
be82159bdb0f2f10b1ac85966659626b5e8a7304
|
[
"MIT"
] | 4
|
2020-06-17T03:32:23.000Z
|
2021-07-02T06:46:26.000Z
|
jiotc/models/bilstm_model.py
|
dongrixinyu/JioTC
|
be82159bdb0f2f10b1ac85966659626b5e8a7304
|
[
"MIT"
] | null | null | null |
jiotc/models/bilstm_model.py
|
dongrixinyu/JioTC
|
be82159bdb0f2f10b1ac85966659626b5e8a7304
|
[
"MIT"
] | null | null | null |
# -*- coding=utf-8 -*-
# author: dongrixinyu
# contact: dongrixinyu.89@163.com
# blog: https://github.com/dongrixinyu/
# file: bare_embedding.py
# time: 2020-06-12 11:27
import os
import pdb
import logging
from typing import Union, Optional, Dict, Any, Tuple
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from jiotc.embeddings.base_embedding import BaseEmbedding
from .base_model import BaseModel
# Bidirectional LSTM neural network (many-to-one)
class BiLSTMModel(BaseModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_lstm': {
'hidden_size': 128,
'num_layers': 1,
'dropout': 0.2, # 当 num_layers == 1 时失效
'bidirectional': True
},
'layer_dense': {
'activation': 'softmax'
}
}
def __init__(self, embed_model: Optional[BaseEmbedding] = None,
device: Union['cuda', 'cpu'] = None,
hyper_parameters: Optional[Dict[str, Dict[str, Any]]] = None):
'''
self.device
self.embedding_layer
self.embedding
self.embedding_size
self.num_classes
参数已知,可以直接使用
'''
super(BiLSTMModel, self).__init__(embed_model, device=device)
self.hidden_size = hyper_parameters['layer_bi_lstm']['hidden_size']
self.num_layers = hyper_parameters['layer_bi_lstm']['num_layers']
self.dropout = hyper_parameters['layer_bi_lstm']['dropout']
self.lstm = nn.LSTM(
self.embedding_size, self.hidden_size, self.num_layers,
batch_first=True, bidirectional=True)
self.fc = nn.Linear(self.hidden_size * 2,
self.num_classes) # 2 for bidirection
def forward(self, samples):
masks = samples.gt(0)
embeds = self.embedding_layer(samples) #.to(self.device)
# 按长短调整样本顺序
seq_length = masks.sum(1)
sorted_seq_length, perm_idx = seq_length.sort(descending=True)
embeds = embeds[perm_idx, :] # 重新排序
pack_sequence = pack_padded_sequence(
embeds, lengths=sorted_seq_length, batch_first=True)
# Set initial states, involved with batch_size
'''
h0 = torch.autograd.Variable(torch.randn(
self.num_layers * 2, embeds.shape[0],
self.hidden_size)).to(self.device) # 2 for bidirection
c0 = torch.autograd.Variable(torch.randn(
self.num_layers * 2, embeds.shape[0],
self.hidden_size)).to(self.device)
#'''
# Forward propagate LSTM
packed_output, _ = self.lstm(pack_sequence) #, (h0, c0))
# out: tensor of shape (batch_size, seq_length, hidden_size * 2)
lstm_out, _ = pad_packed_sequence(packed_output, batch_first=True)
_, unperm_idx = perm_idx.sort()
lstm_out = lstm_out[unperm_idx, :]
# dropout_layer
lstm_out = lstm_out.permute(1, 0, 2) # [batch_size, seq_len, hidden_size * 2] => [seq_len, batch_size, hidden_size * 2]
# disabled when not training
lstm_out = F.dropout2d(lstm_out, p=self.dropout, training=self.training)
lstm_out = lstm_out.permute(1, 0, 2) # [seq_len, batch_size, hidden_size * 2] => [batch_size, seq_len, hidden_size * 2]
lstm_out_sum = torch.mean(lstm_out, dim=1)
output = self.fc(lstm_out_sum)
return output
| 33.431193
| 128
| 0.602634
| 3,178
| 0.858919
| 0
| 0
| 412
| 0.111351
| 0
| 0
| 1,380
| 0.372973
|
3f15b4889cdf171226bf2916a6b9994712b58560
| 56,576
|
py
|
Python
|
tests/learning/test_prediction_error_delta_function.py
|
mihaic/psyneulink
|
3d2fc3117c82bccc92fc585add330b0f9b35c830
|
[
"Apache-2.0"
] | null | null | null |
tests/learning/test_prediction_error_delta_function.py
|
mihaic/psyneulink
|
3d2fc3117c82bccc92fc585add330b0f9b35c830
|
[
"Apache-2.0"
] | null | null | null |
tests/learning/test_prediction_error_delta_function.py
|
mihaic/psyneulink
|
3d2fc3117c82bccc92fc585add330b0f9b35c830
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from psyneulink import PredictionErrorDeltaFunction
np.set_printoptions(suppress=True)
def test_prediction_error_delta_first_run():
learning_rate = 0.3
stimulus_onset = 41
sample = np.zeros(60)
sample[stimulus_onset:] = 1
reward_onset = 54
target = np.zeros(60)
target[reward_onset] = 1
delta_function = PredictionErrorDeltaFunction()
delta_vals = np.zeros((60, 60))
weights = np.zeros(60)
for t in range(60):
print("Timestep {}".format(t))
new_sample = sample * weights
# print("sample = {}".format(new_sample))
delta_vals[t] = delta_function.function(variable=[new_sample, target])
print("delta: {}".format(delta_vals[t]))
for i in range(59):
weights[i] = weights[i] + learning_rate * sample[i] * \
delta_vals[t][i + 1]
validation_array = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3,
0.7, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09,
0.42000000000000004, 0.49, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.027, 0.189,
0.44100000000000006, 0.34299999999999997, 0.0,
0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0081, 0.0756,
0.2646, 0.4116, 0.24009999999999998, 0.0, 0.0,
0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.00243, 0.02835, 0.1323,
0.3087, 0.3601500000000001,
0.16806999999999994, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0007289999999999999,
0.010206, 0.05953499999999999, 0.18522,
0.32413500000000006, 0.30252599999999996,
0.117649, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.00021869999999999998, 0.0035721,
0.025004699999999998, 0.09724049999999998,
0.2268945, 0.31765230000000005,
0.24706289999999997, 0.08235429999999999, 0.0,
0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 6.560999999999999e-05,
0.0012247199999999999, 0.01000188, 0.04667544,
0.1361367, 0.25412184, 0.29647548,
0.19765032000000005, 0.05764800999999997, 0.0,
0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.9682999999999998e-05, 0.000413343,
0.003857868, 0.021003947999999998,
0.073513818, 0.171532242, 0.26682793199999993,
0.2668279320000001, 0.15564962699999996,
0.040353607000000014, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
5.904899999999999e-06, 0.000137781,
0.0014467005, 0.009001692,
0.036756909000000004, 0.1029193452,
0.200120949, 0.26682793199999993,
0.2334744405000001, 0.12106082099999993,
0.028247524900000043, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.7714699999999997e-06,
4.5467729999999994e-05, 0.00053045685,
0.0037131979500000002, 0.0173282571,
0.05660563986, 0.13207982633999998,
0.2201330439, 0.25682188454999993,
0.19975035465000013, 0.09321683216999987,
0.019773267430000074, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
5.314409999999999e-07, 1.4880347999999997e-05,
0.00019096446599999996, 0.00148527918,
0.0077977156950000005, 0.029111471928000003,
0.07924789580399999, 0.15849579160799998,
0.23113969609499996, 0.23970042558000004,
0.16779029790600009, 0.07118376274799987,
0.013841287201000085, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
1.5943229999999994e-07, 4.8361131e-06,
6.770558339999998e-05, 0.0005792588802,
0.0033790101345, 0.014191842564900003,
0.044152399090799994, 0.1030222645452,
0.18028896295409996, 0.23370791494049992,
0.21812738727780012, 0.1388083373586,
0.05398102008389993, 0.009688901040700082,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 1.61026623e-06,
2.3696954189999994e-05, 0.00022117157244,
0.00141918425649, 0.006622859863620001,
0.023180009522670002, 0.06181335872711999,
0.12620227406787, 0.19631464855001995,
0.22903375664168996, 0.19433167230204007,
0.11336014217619006, 0.040693384370939945,
0.006782230728490046, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
8.719352486999997e-06, 8.2939339665e-05,
0.000580575377655, 0.002980286938629,
0.011590004761335003, 0.034770014284004995,
0.08113003332934499, 0.14723598641251498,
0.20613038097752096, 0.218623131339795,
0.1700402132642851, 0.09156011483461501,
0.03052003827820493, 0.0047475615099430435,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
3.3601154386499996e-05,
0.00023223015106199995, 0.0013004888459472001,
0.0055632022854408, 0.018544007618136,
0.048678019997607, 0.100961819254296,
0.16490430478201676, 0.20987820608620322,
0.20404825591714193, 0.1464961837353841,
0.07324809186769199, 0.02278829524772641,
0.0033232930569601082, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.00010327019970509998, 0.0005527077595275599,
0.0025793028777952804, 0.00945744388524936,
0.0275842113319773, 0.0643631597746137,
0.12014456491261222, 0.17839647517327273,
0.2081292210354848, 0.18678263426261454,
0.12452175617507655, 0.05811015288170229,
0.016948794590496474, 0.002326305139872087,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.00026908252756336796, 0.0011606862950078762,
0.004642745180031503, 0.014895474119267742,
0.038617895864768215, 0.08109758131601326,
0.13762013799081035, 0.18731629893193635,
0.2017252450036237, 0.1681043708363532,
0.10459827518706422, 0.045761745394340525,
0.012562047755309225, 0.0016284135979104386,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0006172884160657308, 0.002205303960514964,
0.007718563861802375, 0.022012200642917885,
0.05136180150014173, 0.09805434831845238,
0.15252898627314818, 0.1916389827534426,
0.1916389827534425, 0.1490525421415665,
0.08694731624924712, 0.03580183610263121,
0.009281957508089578, 0.0011398895185372737,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.00127887960422022,
0.0038592819309011877, 0.012006654896137028,
0.030817080900085034, 0.06536956554563493,
0.11439673970486111, 0.1642619852172365,
0.1916389827534426, 0.1788630505698796,
0.13042097437387068, 0.07160367220526243,
0.027845872524268733, 0.006839337111223864,
0.0007979226629760694, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0024366641834905763, 0.006303493820471939,
0.01764978269732143, 0.04118282629375,
0.08007771779340278, 0.12935631335857373,
0.17247508447809834, 0.1878062030983737,
0.16433042771107698, 0.11277578372328811,
0.058476332300964384, 0.021543911900355206,
0.005026912776749604, 0.0005585458640832153,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0043277123296321576, 0.009707380483526788,
0.024709695776250002, 0.05285129374364584,
0.09486129646295406, 0.1422919446944311,
0.17707442006418095, 0.18076347048218466,
0.1488640345147404, 0.09648594829659096,
0.047396606180781564, 0.01658881216327357,
0.0036864027029497315, 0.0003909821048582174,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.007239926474690194,
0.014208075071343751, 0.03315217516646875,
0.06545429455943831, 0.10909049093239716,
0.15272668730535605, 0.17818113518958212,
0.17119363969195134, 0.13315060864929562,
0.08175914566184805, 0.03815426797552923,
0.012718089325176374, 0.0026977765235223217,
0.00027368747340072996, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.011502348996093318,
0.01989130509988125, 0.04284281098435962,
0.07854515347132598, 0.1221813498442848,
0.16036302167062388, 0.17608488654029292,
0.15978073037915452, 0.11773316975306136,
0.0686776823559524, 0.030523414380423386,
0.009711995484680158, 0.0019705498084858775,
0.00019158123138052208, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.017469740526057695,
0.02677675686522476, 0.053553513730449524,
0.09163601238321362, 0.13363585139218653,
0.1650795811315246, 0.17119363969195145,
0.14716646219132656, 0.10301652353392865,
0.05723140196329368, 0.02427998871170045,
0.0073895617818218184, 0.0014368592353543042,
0.00013410686196635435, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.02550276758562512,
0.03480978392479219, 0.06497826332627876,
0.10423596408590546, 0.14306897031398796,
0.16691379869965267, 0.16398548644176403,
0.13392148059410713, 0.08928098706273813,
0.0473459779878157, 0.01921286063273686,
0.005603751017881575, 0.0010460335233379858,
9.387480337641474e-05, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.035945702763062776,
0.04386032774523817, 0.07675557355416678,
0.1158858659543302, 0.1502224188296874,
0.16603530502228608, 0.15496628468746698,
0.12052933253469633, 0.07670048434026144,
0.03890604278129206, 0.015130127748280264,
0.004236435769518487, 0.0007603859073495034,
6.571236236352362e-05, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.04910380108663422,
0.05372890148791675, 0.0884946612742158,
0.12618683181693738, 0.15496628468746698,
0.16271459892184037, 0.1446351990416358,
0.10738067807636587, 0.06536215187257055,
0.0317732682713886, 0.011862020154651653,
0.0031936208108678255, 0.0005519838438536873,
4.5998653654510946e-05, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.06522247153300925,
0.06415862942380646, 0.09980231243703228,
0.13482066767809628, 0.157290778957779,
0.157290778957779, 0.13345884275205488,
0.09477512021522716, 0.05528548679221601,
0.025799893836367493, 0.009261500351516516,
0.002401129720763562, 0.000400188286794001,
3.219905755813546e-05, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.08447006036015119,
0.07485173432777421, 0.1103078190093515,
0.14156170106200106, 0.157290778957779,
0.15014119809606175, 0.12185372599100663,
0.08292823018832374, 0.04643980890546151,
0.02083837579091219, 0.007203389162290574,
0.0018008472905727269, 0.0002897915180232191,
2.2539340290728127e-05, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.10692558065848345,
0.0854885597322474, 0.11968398362514637,
0.1462804244307344, 0.15514590469926381,
0.14165495646454518, 0.11017607725020184,
0.07198170380346502, 0.038759378971096714,
0.016747879802325727, 0.005582626600775242,
0.0013475305588078745, 0.00020961586470347182,
1.5777538203476382e-05, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.13257214857815766,
0.0957471869001171, 0.12766291586682277,
0.14894006851129327, 0.15109862022884823,
0.13221129270024212, 0.09871776521618081,
0.062015006353754565, 0.032155929220465396,
0.013398303841860582, 0.0043120977881849765,
0.0010061561505766425, 0.00015146436675339547,
1.1044276742477876e-05, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.16129630464819278,
0.10532190559012883, 0.13404606166016392,
0.14958763402655972, 0.1454324219702664,
0.12216323445502375, 0.08770693755745296,
0.05305728321376779, 0.02652864160688395,
0.0106724420257579, 0.003320315296902465,
0.0007497486154296462, 0.0001093383397501313,
7.730993719756718e-06, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.19289287632523142,
0.11393915241113936, 0.13870853337008265,
0.1483410704096717, 0.13845166571569367,
0.11182634538575253, 0.07731204125434732,
0.045098690731702695, 0.021771781732546125,
0.008466804007101203, 0.0025491452924606417,
0.0005576255327258695, 7.885613594094121e-05,
5.411695603863009e-06, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.22707462204857323,
0.12136996669882236, 0.14159829448195937,
0.14537424900147833, 0.1304640696167113,
0.10147205414633098, 0.06764803609755388,
0.038100618031955746, 0.017780288414912637,
0.00669150639270899, 0.0019516893645402655,
0.0004139947136904132, 5.682280383978444e-05,
3.7881869227041065e-06, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.26348561205821996,
0.12743846503376344, 0.14273108083781505,
0.14090119518604827, 0.12176646497559718,
0.09132484873169788, 0.058783810677874415,
0.03200451914684277, 0.014453653808251588,
0.005269561284258373, 0.001490380969285332,
0.00030684314073514685, 4.091241876469365e-05,
2.6517308459039768e-06, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.301717151568349,
0.1320262497749789, 0.14218211514228507,
0.13516077612291288, 0.11263398010242742,
0.08156253731555085, 0.05075002321856492,
0.026739259545265348, 0.011698426051053645,
0.004135807189766472, 0.001135319620720332,
0.00022706392414395538,
2.9434212389101155e-05, 1.856211592099477e-06,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.3413250265008427,
0.13507300938517075, 0.14007571343647335,
0.12840273731676732, 0.10331254726636441,
0.07231878308645512, 0.04354679411657503,
0.02222700949700185, 0.009429640392667471,
0.0032356609190525853, 0.0008628429117474301,
0.000167775010617488, 2.116081215008947e-05,
1.2993481144363273e-06, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.3818469293163939,
0.1365738206005615, 0.13657382060056156,
0.12087568030164642, 0.09401441801239163,
0.06368718639549109, 0.03715085873070312,
0.018387798765701513, 0.00757144655058295,
0.0025238155168610943, 0.0006543225414084031,
0.00012379075107726845,
1.5202372939393527e-05, 9.09543680149838e-07,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.4228190754965624,
0.13657382060056145, 0.13186437851088706,
0.11281730161487002, 0.08491624852732138,
0.05572628809605473, 0.031521940741202625,
0.01514289310116601, 0.006057157240466293,
0.0019629676242253202, 0.0004951630043090738,
9.121423763591707e-05, 1.0914524161576011e-05,
6.366805761492955e-07, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.4637912216767308,
0.13516098797365916, 0.12615025544208192,
0.10444698568860544, 0.07615926039794141,
0.04846498388959908, 0.026608226449191696,
0.012417172342956029, 0.0048289003555940235,
0.0015226262382503908, 0.0003739783743071934,
6.712432359357035e-05, 7.831171085936894e-06,
4.4567640333781355e-07, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.5043395180688286,
0.13245776821418598, 0.11963927451603895,
0.09596066810140624, 0.06785097744543866,
0.04190795665747693, 0.02235091021732094,
0.010140690746747505, 0.003837018120390834,
0.0011780318790675093, 0.00028192215909306206,
4.933637784132472e-05, 5.6155226810794545e-06,
3.119734823808784e-07, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.5440768485330844,
0.12861222010474194, 0.11253569259164908,
0.087527760904616, 0.06006807120905011,
0.03604084272543018, 0.0186878443761489,
0.008249588958840537, 0.0030393222479937476,
0.0009091989630751751, 0.00021214642471756306,
3.6220121293228935e-05, 4.024457921469882e-06,
2.183814377110238e-07, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.582660514564507,
0.12378926185081407, 0.1050333130855392,
0.07928985399594612, 0.05285990266396423,
0.030834943220645727, 0.015556367750956368,
0.006686508945586533, 0.0024002852625182314,
0.0007000832015678915, 0.00015936853369025172,
2.6561422281634606e-05,
2.8826349763866332e-06,
1.5286700638661443e-07, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.6197972931197512,
0.11816247722123163, 0.09731027535866121,
0.0713608685963516, 0.04625241483096865,
0.02625137057973892, 0.012895410109345473,
0.005400641840666021, 0.0018902246442331627,
0.0005378688012045441, 0.00011952640026768879,
1.9457786090026907e-05,
2.0637045854421388e-06,
1.0700690444842564e-07, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.6552460362861207,
0.1119068166624605, 0.08952545332996831,
0.0638283324667368, 0.04025210155559966,
0.02224458243862093, 0.010646979628741615,
0.004347516681736163, 0.0014845178913245327,
0.000412366080923543, 8.950581601441243e-05,
1.4239561638595966e-05,
1.4766952811662293e-06, 7.490483311389795e-08,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.6888180812848588,
0.10519240766271287, 0.0818163170709989,
0.05675546319339564, 0.03484984582050599,
0.018765301595657147, 0.008757140744639957,
0.003488617044612674, 0.0011628723482043357,
0.0003155080014507483, 6.69259397017008e-05,
1.0410701731355942e-05,
1.0561581467172232e-06,
5.2433383190830796e-08, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.7203758035836726,
0.0981795804851987, 0.07429806090771796,
0.05018377798152873, 0.030024482553051346,
0.01576285334035199, 0.007176583634631806,
0.0027908936356900726, 0.0009086630441783594,
0.00024093338292596744, 4.997136831064175e-05,
7.604338655986531e-06, 7.550407176148966e-07,
3.670336823358156e-08, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.7498296777292321,
0.09101512461195449, 0.06706377602986124,
0.04413598935298546, 0.025745993789241584,
0.013186972428635979, 0.005860876634949275,
0.002226224458236503, 0.0007083441458026751,
0.00018364477854138084, 3.726125941427849e-05,
5.549549274452836e-06, 5.395395127338887e-07,
2.569235779681378e-08, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.7771342151128184,
0.08382972003732658, 0.06018544002679849,
0.03861899068386232, 0.02197828738105989,
0.010989143690529946, 0.004770480981935443,
0.0017708603645063548, 0.0005509343356242535,
0.00013972972280329454,
2.7747746372375204e-05, 4.046546345892743e-06,
3.853853662860729e-07, 1.7984650435565186e-08,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.8022831311240164,
0.07673643603416813, 0.05371550522391766,
0.033626779693021636, 0.018681544273900896,
0.009123544877951528, 0.0038705947967068166,
0.0014048825558417022, 0.00042757295177797694,
0.00010613512987400764,
2.0637386364374954e-05,
2.9481980520218443e-06,
2.7516515155312504e-07, 1.258925530489563e-08,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.8253040619342669,
0.06983015679109295, 0.04768888756464884,
0.029143209067285403, 0.015814144455116086,
0.0075476598535781925, 0.00313088112444726,
0.0011116896746226068, 0.00033114160520675284,
8.048580682107342e-05, 1.5330629870691226e-05,
2.146288181847922e-06, 1.9639238268975845e-07,
8.812478746733632e-09, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.8462531089715948,
0.06318777602315973, 0.04212518401543974,
0.025144489683634585, 0.013334199074654718,
0.006222626234838935, 0.002525123689499864,
0.0008775252537979172, 0.0002559448656910268,
6.0939253735958765e-05,
1.1375327364060439e-05,
1.5613194420671661e-06, 1.401184115401577e-07,
6.168735078304621e-09, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.8652094417785428,
0.0568689984208437, 0.037030975715898196,
0.02160140250094056, 0.011200727222710039,
0.005113375471237247, 0.002030844158789291,
0.0006910511373657835, 0.000197443182104462,
4.607007582446698e-05, 8.431124987495764e-06,
1.1349591328979614e-06, 9.993350857939731e-08,
4.3181145326087744e-09, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.8822701413047959,
0.05091759160936005, 0.03240210375141095,
0.018481199917471325, 0.009374521697268268,
0.004188616077502871, 0.0016289062523622277,
0.0005429687507872982, 0.0001520312502205634,
3.4778390573309004e-05, 6.242275231160832e-06,
8.244514455579832e-07, 7.124889034315629e-08,
3.022680217235063e-09, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.8975454187876039,
0.04536294525197537, 0.028225832601229017,
0.015749196451410374, 0.007818750011338693,
0.0034207031299606783, 0.0013031250018897822,
0.00042568750061722227,
0.00011685539232642039, 2.621755597065345e-05,
4.616928095502182e-06, 5.984906790157396e-07,
5.078102727207323e-08, 2.115876140962314e-09,
0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.9111543023631965,
0.04022181145675141, 0.024482841756283458,
0.013370062519388881, 0.006499335946925311,
0.0027854296915393872, 0.0010398937515080364,
0.0003330378681299928, 8.96640414196348e-05,
1.9737367608074763e-05, 3.411396870545147e-06,
4.341777835037419e-07, 3.6181481921637726e-08,
1.4811133430825407e-09, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.9232208458002219,
0.035500120546611, 0.02114900798521513,
0.011308844547649799, 0.005385164070309534,
0.002261768909530004, 0.0008278369864945789,
0.0002600257201169631, 6.868603927612238e-05,
1.4839576386815878e-05,
2.5182311443883165e-06,
3.1477889306241735e-07, 2.577137137027563e-08,
1.0367793290555483e-09, 0.0, 0.0, 0.0, 0.0,
0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.9338708819642052,
0.031194786778192207, 0.018196958953945574,
0.009531740404447708, 0.0044481455220757304,
0.0018315893326192878, 0.0006574936065812942,
0.0002026238158647775, 5.253210040934153e-05,
1.1143172814032098e-05,
1.8571954689683423e-06, 2.280766365769793e-07,
1.8350993724602915e-08, 7.257455747478048e-10,
0.0, 0.0, 0.0, 0.0, 0.0],
])
for i in range(len(delta_vals)):
deltas = delta_vals[i]
validation_deltas = validation_array[i]
np.testing.assert_allclose(deltas, validation_deltas, atol=1e-08,
err_msg="mismatch on timestep {}".format(i))
| 72.255428
| 80
| 0.352128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 90
| 0.001591
|
3f18a598378fac5606353de6db627c25234fa321
| 22,823
|
py
|
Python
|
jts/backend/jobapps/views.py
|
goupaz/babylon
|
4e638d02705469061e563fec349676d8faa9f648
|
[
"MIT"
] | 1
|
2019-08-08T09:03:17.000Z
|
2019-08-08T09:03:17.000Z
|
backend/jobapps/views.py
|
goupaz/website
|
ce1bc8b6c52ee0815a7b98842ec3bde0c20e0add
|
[
"Apache-2.0"
] | 2
|
2020-10-09T19:16:09.000Z
|
2020-10-10T20:40:41.000Z
|
jts/backend/jobapps/views.py
|
goupaz/babylon-hackathon
|
4e638d02705469061e563fec349676d8faa9f648
|
[
"MIT"
] | 1
|
2019-07-21T01:42:21.000Z
|
2019-07-21T01:42:21.000Z
|
from datetime import datetime as dt
from django.utils import timezone
import uuid
from django.contrib.auth import get_user_model
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from company.utils import get_or_create_company
from position.utils import get_or_insert_position
from utils import utils
from utils.error_codes import ResponseCodes
from utils.generic_json_creator import create_response
from .models import JobApplication, Contact, ApplicationStatus, StatusHistory
from .models import JobApplicationNote, JobApplicationFile
from .models import Source
from alumni.serializers import AlumniSerializer
from .serializers import ApplicationStatusSerializer
from .serializers import JobApplicationNoteSerializer, JobApplicationFileSerializer
from .serializers import JobApplicationSerializer, ContactSerializer
from .serializers import SourceSerializer
from .serializers import StatusHistorySerializer
User = get_user_model()
@csrf_exempt
@api_view(["GET", "POST", "PUT", "PATCH", "DELETE"])
def job_applications(request):
body = request.data
if 'recaptcha_token' in body and utils.verify_recaptcha(None, body['recaptcha_token'],
'add_job') == ResponseCodes.verify_recaptcha_failed:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.verify_recaptcha_failed),
safe=False)
if request.method == "GET":
timestamp = request.GET.get('timestamp')
if timestamp is not None:
timestamp = int(timestamp) / 1000
if timestamp is None:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters))
profile = request.user
time = dt.fromtimestamp(int(timestamp))
user_job_apps = JobApplication.objects.filter(created__gte=time)
job_application_list = JobApplicationSerializer(instance=user_job_apps, many=True, context={
'user': request.user}).data
response = {'data': job_application_list, 'synching': profile.synching}
return JsonResponse(create_response(data=response), safe=False)
status_id = request.GET.get('status_id')
if status_id is not None:
user_job_apps = JobApplication.objects.filter(
application_status__id=status_id, user__id=request.user.id, is_deleted=False).order_by('-apply_date')
else:
user_job_apps = JobApplication.objects.filter(
user_id=request.user.id, is_deleted=False).order_by('-apply_date')
job_applications_list = JobApplicationSerializer(instance=user_job_apps, many=True, context={
'user': request.user}).data
return JsonResponse(create_response(data=job_applications_list), safe=False)
elif request.method == "POST":
job_title = body['job_title']
company = body['company']
application_date = body['application_date']
status = int(body['status_id'])
source = body['source']
jt = get_or_insert_position(job_title)
jc = get_or_create_company(company)
if Source.objects.filter(value__iexact=source).count() == 0:
source = Source.objects.create(value=source)
else:
source = Source.objects.get(value__iexact=source)
job_application = JobApplication(position=jt, company_object=jc, apply_date=application_date,
msg_id='', app_source=source, user=request.user)
job_application.application_status = ApplicationStatus.objects.get(pk=status)
job_application.save()
return JsonResponse(
create_response(
data=JobApplicationSerializer(instance=job_application, many=False, context={'user': request.user}).data),
safe=False)
elif request.method == "PUT":
status_id = body.get('status_id')
rejected = body.get('rejected')
job_application_ids = []
if 'jobapp_ids' in body:
job_application_ids = body['jobapp_ids']
if 'jobapp_id' in body:
job_application_ids.append(body['jobapp_id'])
if len(job_application_ids) == 0:
return JsonResponse(create_response(success=False, error_code=ResponseCodes.record_not_found), safe=False)
elif rejected is None and status_id is None:
return JsonResponse(create_response(success=False, error_code=ResponseCodes.record_not_found), safe=False)
else:
user_job_apps = JobApplication.objects.filter(pk__in=job_application_ids)
if user_job_apps.count() == 0:
return JsonResponse(create_response(success=False, error_code=ResponseCodes.record_not_found), safe=False)
else:
for user_job_app in user_job_apps:
if user_job_app.user == request.user:
if status_id is None:
user_job_app.is_rejected = rejected
else:
new_status = ApplicationStatus.objects.filter(pk=status_id)
if new_status.count() == 0:
return JsonResponse(
create_response(data=None, success=False,
error_code=ResponseCodes.invalid_parameters),
safe=False)
else:
if rejected is None:
user_job_app.application_status = new_status[0]
else:
user_job_app.application_status = new_status[0]
user_job_app.is_rejected = rejected
status_history = StatusHistory(
job_post=user_job_app, application_status=new_status[0])
status_history.save()
if rejected is not None:
user_job_app.rejected_date = timezone.now()
user_job_app.updated_date = timezone.now()
user_job_app.save()
return JsonResponse(create_response(data=None), safe=False)
elif request.method == "PATCH":
job_app_id = body.get('jobapp_id')
if job_app_id is None:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.record_not_found),
safe=False)
user_job_app = JobApplication.objects.get(pk=job_app_id)
if user_job_app.user != request.user:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.record_not_found),
safe=False)
if user_job_app.msg_id is not None and user_job_app.msg_id != '':
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.record_not_found),
safe=False)
job_title = body.get('job_title')
company = body.get('company')
application_date = body.get('application_date')
source = body.get('source')
if application_date is not None:
user_job_app.apply_date = application_date
if job_title is not None:
user_job_app.position = get_or_insert_position(job_title)
if company is not None:
user_job_app.company_object = get_or_create_company(company)
if source is not None:
if Source.objects.filter(value__iexact=source).count() == 0:
source = Source.objects.create(value=source)
else:
source = Source.objects.get(value__iexact=source)
user_job_app.app_source = source
user_job_app.updated_date = timezone.now()
user_job_app.save()
return JsonResponse(create_response(
data=JobApplicationSerializer(instance=user_job_app, many=False, context={'user': request.user}).data),
safe=False)
elif request.method == "DELETE":
job_application_ids = []
if 'jobapp_ids' in body:
job_application_ids = body['jobapp_ids']
if 'jobapp_id' in body:
job_application_ids.append(body['jobapp_id'])
if len(job_application_ids) == 0 or JobApplication.objects.filter(pk__in=job_application_ids).count() == 0:
return JsonResponse(create_response(success=False, error_code=ResponseCodes.record_not_found), safe=False)
else:
user_job_apps = JobApplication.objects.filter(pk__in=job_application_ids)
for user_job_app in user_job_apps:
if user_job_app.user == request.user:
user_job_app.deleted_date = timezone.now()
user_job_app.is_deleted = True
user_job_app.save()
return JsonResponse(create_response(data=None), safe=False)
@csrf_exempt
@api_view(["GET"])
def statuses(request):
statuses_list = ApplicationStatus.objects.all()
statuses_list = ApplicationStatusSerializer(instance=statuses_list, many=True).data
return JsonResponse(create_response(data=statuses_list), safe=False)
@csrf_exempt
@api_view(["GET"])
def sources(request):
source_list = SourceSerializer(instance=Source.objects.all(), many=True).data
return JsonResponse(create_response(data=source_list), safe=False)
@csrf_exempt
@api_view(["GET"])
def status_history(request, job_app_pk):
if job_app_pk is None:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False)
else:
statuses_list = StatusHistory.objects.filter(job_post__pk=job_app_pk)
statuses_list = StatusHistorySerializer(instance=statuses_list, many=True).data
return JsonResponse(create_response(data=statuses_list), safe=False)
@csrf_exempt
@api_view(["GET", "POST", "PUT", "DELETE"])
def contacts(request, job_app_pk):
body = request.data
if request.method == "GET":
data = {}
if job_app_pk is None:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False)
else:
contacts_list = Contact.objects.filter(job_post__pk=job_app_pk)
contacts_list = ContactSerializer(instance=contacts_list, many=True).data
data['contacts'] = contacts_list
user_profile = request.user
if not user_profile.user_type.alumni_listing_enabled:
alumni = []
else:
jobapp = JobApplication.objects.get(pk=job_app_pk)
alumni_list = User.objects.filter(college=user_profile.college, company=jobapp.company_object,
user_type__name__iexact='Alumni', is_demo=False)
alumni = AlumniSerializer(
instance=alumni_list, many=True, context={'user': request.user}).data
data['alumni'] = alumni
return JsonResponse(create_response(data=data, success=True, error_code=ResponseCodes.success), safe=False)
elif request.method == "POST":
first_name = body.get('first_name')
last_name = body.get('last_name')
if job_app_pk is None or first_name is None or last_name is None:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters),
safe=False)
user_job_app = JobApplication.objects.get(pk=job_app_pk)
if user_job_app.user == request.user:
phone_number = body.get('phone_number')
linkedin_url = body.get('linkedin_url')
description = body.get('description')
email = body.get('email')
job_title = body.get('job_title')
jt = None
jc = None
if job_title is not None:
jt = get_or_insert_position(job_title)
company = body.get('company')
if company is not None:
jc = get_or_create_company(company)
contact = Contact(
job_post=user_job_app, first_name=first_name, last_name=last_name, phone_number=phone_number,
linkedin_url=linkedin_url,
description=description, email=email,
position=jt, company=jc)
contact.save()
data = ContactSerializer(
instance=contact, many=False).data
return JsonResponse(create_response(data=data), safe=False)
else:
return JsonResponse(
create_response(data=None, success=False, error_code=ResponseCodes.record_not_found),
safe=False)
elif request.method == "PUT":
contact_id = body.get('contact_id')
if contact_id is None:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters),
safe=False)
contact = Contact.objects.get(pk=contact_id)
if contact.job_post.user == request.user:
first_name = body.get('first_name')
if first_name is not None:
contact.first_name = first_name
last_name = body.get('last_name')
if last_name is not None:
contact.last_name = last_name
email = body.get('email')
if email is not None:
contact.email = email
phone_number = body.get('phone_number')
if phone_number is not None:
contact.phone_number = phone_number
linkedin_url = body.get('linkedin_url')
if linkedin_url is not None:
contact.linkedin_url = linkedin_url
description = body.get('description')
if description is not None:
contact.description = description
job_title = body.get('job_title')
if job_title is not None:
contact.position = get_or_insert_position(job_title)
company = body.get('company')
if company is not None:
contact.company = get_or_create_company(company)
contact.update_date = timezone.now()
contact.save()
data = ContactSerializer(
instance=contact, many=False).data
return JsonResponse(create_response(data=data, success=True, error_code=ResponseCodes.success),
safe=False)
else:
return JsonResponse(
create_response(data=None, success=False, error_code=ResponseCodes.record_not_found),
safe=False)
elif request.method == "DELETE":
contact_id = body.get('contact_id')
if contact_id is None:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters),
safe=False)
user_job_app_contact = Contact.objects.filter(
pk=contact_id)
if user_job_app_contact.count() == 0:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.record_not_found),
safe=False)
user_job_app_contact = user_job_app_contact[0]
if user_job_app_contact.job_post.user == request.user:
user_job_app_contact.delete()
return JsonResponse(create_response(data=None, success=True, error_code=ResponseCodes.success), safe=False)
else:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.record_not_found),
safe=False)
@csrf_exempt
@api_view(["GET", "POST", "PUT", "DELETE"])
def notes(request, job_app_pk):
body = request.data
if 'recaptcha_token' in body and utils.verify_recaptcha(None, body['recaptcha_token'],
'jobapp_note') == ResponseCodes.verify_recaptcha_failed:
return JsonResponse(
create_response(data=None, success=False, error_code=ResponseCodes.verify_recaptcha_failed),
safe=False)
if request.method == "GET":
if job_app_pk is None:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters),
safe=False)
else:
notes_list = JobApplicationNote.objects.filter(
job_post__pk=job_app_pk).order_by('-update_date', '-created_date')
notes_list = JobApplicationNoteSerializer(
instance=notes_list, many=True).data
return JsonResponse(create_response(data=notes_list, success=True, error_code=ResponseCodes.success),
safe=False)
elif request.method == "POST":
description = body['description']
if job_app_pk is None or description is None:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters),
safe=False)
else:
user_job_app = JobApplication.objects.get(pk=job_app_pk)
if user_job_app.user == request.user:
note = JobApplicationNote(
job_post=user_job_app, description=description)
note.save()
data = JobApplicationNoteSerializer(
instance=note, many=False).data
return JsonResponse(create_response(data=data, success=True, error_code=ResponseCodes.success),
safe=False)
else:
return JsonResponse(
create_response(data=None, success=False, error_code=ResponseCodes.record_not_found), safe=False)
elif request.method == "PUT":
jobapp_note_id = body['jobapp_note_id']
description = body['description']
if jobapp_note_id is None:
return JsonResponse(
create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False)
else:
note = JobApplicationNote.objects.get(pk=jobapp_note_id)
if note.job_post.user == request.user:
note.description = description
note.update_date = timezone.now()
note.save()
data = JobApplicationNoteSerializer(
instance=note, many=False).data
return JsonResponse(create_response(data=data, success=True, error_code=ResponseCodes.success),
safe=False)
else:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.record_not_found),
safe=False)
elif request.method == "DELETE":
jobapp_note_id = body['jobapp_note_id']
if jobapp_note_id is None:
return JsonResponse(
create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False)
else:
user_job_app_note = JobApplicationNote.objects.get(
pk=jobapp_note_id)
if user_job_app_note.job_post.user == request.user:
user_job_app_note.delete()
return JsonResponse(create_response(data=None, success=True, error_code=ResponseCodes.success), safe=False)
else:
return JsonResponse(
create_response(data=None, success=False, error_code=ResponseCodes.record_not_found),
safe=False)
@csrf_exempt
@api_view(["GET", "POST", "DELETE"])
def files(request, job_app_pk):
body = request.data
if request.method == "GET":
if job_app_pk is None:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters),
safe=False)
else:
files_list = JobApplicationFile.objects.filter(
job_post__pk=job_app_pk).order_by('-update_date', '-created_date')
files_list = JobApplicationFileSerializer(
instance=files_list, many=True).data
return JsonResponse(create_response(data=files_list, success=True, error_code=ResponseCodes.success),
safe=False)
elif request.method == "POST":
file = body['file']
if job_app_pk is None or file is None:
return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters),
safe=False)
else:
ext = file.name.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
name = file.name.replace(('.' + ext), '')
filename = name + '_' + filename
user_job_app = JobApplication.objects.get(pk=job_app_pk)
if user_job_app.user == request.user:
jobapp_file = JobApplicationFile(
job_post=user_job_app, name=name)
jobapp_file.save()
jobapp_file.file.save(filename, file, save=True)
data = JobApplicationFileSerializer(
instance=jobapp_file, many=False).data
return JsonResponse(create_response(data=data, success=True, error_code=ResponseCodes.success),
safe=False)
else:
return JsonResponse(
create_response(data=None, success=False, error_code=ResponseCodes.record_not_found), safe=False)
elif request.method == "DELETE":
jobapp_file_id = body['jobapp_file_id']
if jobapp_file_id is None:
return JsonResponse(
create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False)
else:
user_job_app_file = JobApplicationFile.objects.get(
pk=jobapp_file_id)
if user_job_app_file.job_post.user == request.user:
user_job_app_file.delete()
return JsonResponse(create_response(data=None, success=True, error_code=ResponseCodes.success), safe=False)
else:
return JsonResponse(
create_response(data=None, success=False, error_code=ResponseCodes.record_not_found),
safe=False)
| 50.605322
| 131
| 0.623056
| 0
| 0
| 0
| 0
| 21,781
| 0.954344
| 0
| 0
| 1,014
| 0.044429
|
3f18ab10027c8065766c8a8c8fb7ac830007c2ab
| 127
|
py
|
Python
|
reassign.py
|
Ca2Patton/PythonStuff
|
9d13f340296bcea41dfca87a4b36e445821703de
|
[
"Apache-2.0"
] | null | null | null |
reassign.py
|
Ca2Patton/PythonStuff
|
9d13f340296bcea41dfca87a4b36e445821703de
|
[
"Apache-2.0"
] | null | null | null |
reassign.py
|
Ca2Patton/PythonStuff
|
9d13f340296bcea41dfca87a4b36e445821703de
|
[
"Apache-2.0"
] | null | null | null |
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
x=5
print x
def reassign(b):
x=6
print x
reassign(x)
print x
| 12.7
| 62
| 0.740157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 0.488189
|
3f1926f6984e1a663e867e004da2e2a9429fe1d9
| 6,632
|
py
|
Python
|
python_modules/dagster/dagster/core/meta/config_types.py
|
Ramshackle-Jamathon/dagster
|
959037ab8d8fb7ed49fbc2daff9fa566f71766f2
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/meta/config_types.py
|
Ramshackle-Jamathon/dagster
|
959037ab8d8fb7ed49fbc2daff9fa566f71766f2
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/meta/config_types.py
|
Ramshackle-Jamathon/dagster
|
959037ab8d8fb7ed49fbc2daff9fa566f71766f2
|
[
"Apache-2.0"
] | null | null | null |
from collections import namedtuple
from dagster import check
from dagster.config.config_type import ConfigType, ConfigTypeKind
from dagster.config.field import Field
from dagster.core.serdes import whitelist_for_serdes
@whitelist_for_serdes
class NonGenericTypeRefMeta(namedtuple('_NonGenericTypeRefMeta', 'key')):
def __new__(cls, key):
return super(NonGenericTypeRefMeta, cls).__new__(cls, check.str_param(key, 'key'))
@whitelist_for_serdes
class ConfigTypeMeta(
namedtuple(
'_ConfigTypeMeta',
'kind key given_name description '
'type_param_refs ' # only valid for closed generics (Set, Tuple, List, Optional)
'enum_values ' # only valid for enums
'fields', # only valid for dicts and selectors
)
):
def __new__(
cls, kind, key, given_name, type_param_refs, enum_values, fields, description,
):
return super(ConfigTypeMeta, cls).__new__(
cls,
kind=check.inst_param(kind, 'kind', ConfigTypeKind),
key=check.str_param(key, 'key'),
given_name=check.opt_str_param(given_name, 'given_name'),
type_param_refs=None
if type_param_refs is None
else check.list_param(type_param_refs, 'type_param_refs', of_type=TypeRef),
enum_values=None
if enum_values is None
else check.list_param(enum_values, 'enum_values', of_type=ConfigEnumValueMeta),
fields=None
if fields is None
else check.list_param(fields, 'field', of_type=ConfigFieldMeta),
description=check.opt_str_param(description, 'description'),
)
@property
def inner_type_refs(self):
'''
This recurses through the type references with non-generic types as leaves.
'''
def _doit():
next_level_refs = _get_next_level_refs(self)
if next_level_refs:
for next_level in next_level_refs:
for inner_ref in _recurse_through_generics(next_level):
yield inner_ref
# there might be duplicate keys (esp for scalars)
refs_by_key = {}
for ref in _doit():
if ref.key not in refs_by_key:
refs_by_key[ref.key] = ref
return list(refs_by_key.values())
# This function is used by the recursive descent
# through all the inner types. This does *not*
# recursively descend through the type parameters
# of generic types. It just gets the next level of
# types. Either the direct type parameters of a
# generic type. Or the type refs of all the fields
# if it is a type with fields.
def _get_next_level_refs(ref):
# if a generic type, get type params
# if a type with fields, get refs of the fields
if ConfigTypeKind.is_closed_generic(ref.kind):
return ref.type_param_refs
elif (
ConfigTypeKind.has_fields(ref.kind) and ref.fields
): # still check fields because permissive
return [field_meta.type_ref for field_meta in ref.fields]
def _recurse_through_generics(ref):
yield ref
if isinstance(ref, ConfigTypeMeta) and ConfigTypeKind.is_closed_generic(ref.kind):
for type_param_ref in ref.type_param_refs:
for inner_ref in _recurse_through_generics(type_param_ref):
yield inner_ref
# A type reference in these serializable data structures are one of two things
# 1) A closed generic type (e.g. List[Int] of Optional[Set[str]])
# 2) Or a reference to a non-generic type, such as Dict, Selector, or a Scalar.
# Upon deserialization and when hydrated back to the graphql query, it will
# be the responsibility of that module to maintain a dictionary of the
# non-generic types and then do lookups into the dictionary in order to
# to explode the entire type hierarchy requested by the client
TypeRef = (ConfigTypeMeta, NonGenericTypeRefMeta)
@whitelist_for_serdes
class ConfigEnumValueMeta(namedtuple('_ConfigEnumValueMeta', 'value description')):
def __new__(cls, value, description):
return super(ConfigEnumValueMeta, cls).__new__(
cls,
value=check.str_param(value, 'value'),
description=check.opt_str_param(description, 'description'),
)
@whitelist_for_serdes
class ConfigFieldMeta(
namedtuple(
'_ConfigFieldMeta',
'name type_ref is_required default_provided default_value_as_str description',
)
):
def __new__(
cls, name, type_ref, is_required, default_provided, default_value_as_str, description
):
return super(ConfigFieldMeta, cls).__new__(
cls,
name=check.opt_str_param(name, 'name'),
type_ref=check.inst_param(type_ref, 'type_ref', TypeRef),
is_required=check.bool_param(is_required, 'is_required'),
default_provided=check.bool_param(default_provided, 'default_provided'),
default_value_as_str=check.opt_str_param(default_value_as_str, 'default_value_as_str'),
description=check.opt_str_param(description, 'description'),
)
def meta_from_field(name, field):
check.str_param(name, 'name')
check.inst_param(field, 'field', Field)
return ConfigFieldMeta(
name=name,
type_ref=type_ref_of(field.config_type),
is_required=field.is_required,
default_provided=field.default_provided,
default_value_as_str=field.default_value_as_str if field.default_provided else None,
description=field.description,
)
def type_ref_of(config_type):
check.inst_param(config_type, 'config_type', ConfigType)
if ConfigTypeKind.is_closed_generic(config_type.kind):
return meta_from_config_type(config_type)
else:
return NonGenericTypeRefMeta(key=config_type.key)
def type_refs_of(type_list):
return list(map(type_ref_of, type_list)) if type_list is not None else None
def meta_from_config_type(config_type):
check.inst_param(config_type, 'config_type', ConfigType)
return ConfigTypeMeta(
key=config_type.key,
given_name=config_type.given_name,
kind=config_type.kind,
description=config_type.description,
type_param_refs=type_refs_of(config_type.type_params),
enum_values=[
ConfigEnumValueMeta(ev.config_value, ev.description) for ev in config_type.enum_values
]
if config_type.kind == ConfigTypeKind.ENUM
else None,
fields=[meta_from_field(name, field) for name, field in config_type.fields.items()]
if ConfigTypeKind.has_fields(config_type.kind)
else None,
)
| 37.258427
| 99
| 0.692853
| 3,204
| 0.483112
| 945
| 0.142491
| 3,292
| 0.496381
| 0
| 0
| 1,684
| 0.25392
|
3f197e7a784ea8a0684cc88fb9aeb9e0486240f7
| 624
|
py
|
Python
|
database/migrations/2017_06_14_205530_create_users_table.py
|
emirbek/cope
|
be72b71e8045d1fe16d7ac6c680fc9f274af6c50
|
[
"MIT"
] | 2
|
2017-06-21T09:26:51.000Z
|
2020-10-15T19:45:20.000Z
|
database/migrations/2017_06_14_205530_create_users_table.py
|
emirbek/cope
|
be72b71e8045d1fe16d7ac6c680fc9f274af6c50
|
[
"MIT"
] | 11
|
2017-06-18T21:16:58.000Z
|
2021-06-12T18:34:20.000Z
|
database/migrations/2017_06_14_205530_create_users_table.py
|
emirbek/cope
|
be72b71e8045d1fe16d7ac6c680fc9f274af6c50
|
[
"MIT"
] | 2
|
2017-10-27T06:53:57.000Z
|
2021-09-26T10:26:31.000Z
|
from orator.migrations import Migration
class CreateUsersTable(Migration):
def up(self):
"""
Run the migrations.
"""
with self.schema.create('users') as table:
table.integer('id')
table.string('name')
table.string('gender', 1)
table.tiny_integer('status').default(0)
table.integer('chat_id').unique()
table.string('lang', 2).default('ru')
table.timestamps()
table.primary('id')
def down(self):
"""
Revert the migrations.
"""
self.schema.drop('users')
| 24
| 51
| 0.525641
| 581
| 0.93109
| 0
| 0
| 0
| 0
| 0
| 0
| 152
| 0.24359
|
3f1a06109933032a2467ac3c5a49cf17e45b67a0
| 387
|
py
|
Python
|
make_json.py
|
jfalcou/infra
|
97e05039a3f4f3d69b7c50233aed5e5d60a59605
|
[
"BSD-2-Clause"
] | 135
|
2017-01-12T04:39:08.000Z
|
2020-05-08T17:08:52.000Z
|
make_json.py
|
jfalcou/infra
|
97e05039a3f4f3d69b7c50233aed5e5d60a59605
|
[
"BSD-2-Clause"
] | 229
|
2017-01-23T12:45:44.000Z
|
2020-05-13T17:36:57.000Z
|
make_json.py
|
jfalcou/infra
|
97e05039a3f4f3d69b7c50233aed5e5d60a59605
|
[
"BSD-2-Clause"
] | 106
|
2017-04-18T14:42:34.000Z
|
2020-05-07T14:24:34.000Z
|
from configparser import ConfigParser
import os
import json
obj = {}
config = ConfigParser()
config.read(os.path.join(os.getenv("HOME"), ".aws", "credentials"))
obj["MY_ACCESS_KEY"] = config.get("default", "aws_access_key_id", fallback="")
obj["MY_SECRET_KEY"] = config.get("default", "aws_secret_access_key", fallback="")
with open("config.json", "w") as out:
json.dump(obj, out)
| 29.769231
| 82
| 0.710594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 135
| 0.348837
|
3f1cf61b4a31d4bea3fa0897656382d3014a7dec
| 1,051
|
py
|
Python
|
ztest-type1.py
|
tochiji/ztest-type1
|
ca141d13a74708846cba414f2051200d162302a0
|
[
"MIT"
] | null | null | null |
ztest-type1.py
|
tochiji/ztest-type1
|
ca141d13a74708846cba414f2051200d162302a0
|
[
"MIT"
] | null | null | null |
ztest-type1.py
|
tochiji/ztest-type1
|
ca141d13a74708846cba414f2051200d162302a0
|
[
"MIT"
] | null | null | null |
#########################################################
# 母比率の差の検定/タイプ1
#########################################################
import sys
import math
def error_usage():
sys.stderr.write("usage: " + sys.argv[0] + "\n")
sys.stderr.write("\tこのプログラムは、4つの引数が必要です。\n\n")
sys.stderr.write(
"\t1.属性1のn数 2.属性1における比率p 3.属性2のn数 4.属性2における比率p\n")
sys.stderr.write("\t例: 200 0.6 100 0.48\n\n")
sys.stderr.write("\tただし、それぞれn数は30以上かつ比率pは[0<=p<=1]を満たすこと\n")
sys.exit(1)
# 引数がちょうど4つあるか?
if len(sys.argv[1:]) != 4:
error_usage()
n1,p1,n2,p2 = map(float, sys.argv[1:])
p = ((n1*p1) + (n2*p2))/(n1+n2)
# n数が30以上か?
if (n1 < 30) or (n2 < 30):
error_usage()
# 比率は0から1の間か?
if not (0 <= p1 <= 1) or not (0 <= p2 <= 1):
error_usage()
T = math.fabs(p1 - p2) / math.sqrt((p * (1-p)) * ((1/n1) + (1/n2)))
if T >= 2.58:
print("1%有意 (検定統計量:" + str(T) + ")")
elif T >= 1.96:
print("5%有意 (検定統計量:" + str(T) + ")")
elif T >= 1.65:
print("10%有意 (検定統計量:" + str(T) + ")")
else:
print("有意差なし (検定統計量:" + str(T) + ")")
| 24.44186
| 67
| 0.488107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 681
| 0.510112
|
3f1d2166206051864985cc1f8d2162c4a056737f
| 13,796
|
py
|
Python
|
flask_demo/main.py
|
yzj2019/database_learning
|
a9260799f96010674bb4077180ee45a51481e832
|
[
"MIT"
] | null | null | null |
flask_demo/main.py
|
yzj2019/database_learning
|
a9260799f96010674bb4077180ee45a51481e832
|
[
"MIT"
] | null | null | null |
flask_demo/main.py
|
yzj2019/database_learning
|
a9260799f96010674bb4077180ee45a51481e832
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import functools
from flask import Flask, session
from flask import redirect
from flask import request, make_response
from flask import render_template
from flask import url_for
from flask_bootstrap import Bootstrap
# 数据库处理
from db import *
# json
import json
# 生成一个app
app = Flask(__name__, instance_relative_config=True)
bootstrap=Bootstrap(app)
app.secret_key = 'lab3'
# 对app执行请求页面地址到函数的绑定
@app.route("/", methods=("GET", "POST"))
@app.route("/login", methods=("GET", "POST"))
def login():
"""Log in a registered user by adding the user id to the session."""
if request.method == "POST":
# 客户端在login页面发起的POST请求
username = request.form["username"]
password = request.form["password"]
ipaddr = request.form["ipaddr"]
database = request.form["database"]
db = MyDefSQL(username, password, ipaddr, database)
err = db.login()
if err != '0':
return render_template("login_fail.html", err=err)
else:
#print(err)
session['username'] = username
session['password'] = password
session['ipaddr'] = ipaddr
session['database'] = database
return redirect(url_for('home'))
else :
# 客户端GET 请求login页面时
return render_template("login.html")
# 主页面
@app.route("/home", methods=(["GET", "POST"]))
def home():
return render_template("home.html")
# 请求url为host/table的页面返回结果
@app.route("/table", methods=(["GET", "POST"]))
def table():
# 出于简单考虑,每次请求都需要连接数据库,可以尝试使用其它context保存数据库连接
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.showtablecnt()
if request.method == "POST":
if 'clear' in request.form:
return render_template("table.html", rows = '', dbname=session['database'])
elif 'search' in request.form:
return render_template("table.html", rows = tabs, dbname=session['database'])
else:
return render_template("table.html", rows = tabs, dbname=session['database'])
# 客户管理页面
@app.route("/customer", methods=(["GET", "POST"]))
def customer():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.showcustomer()
if tabs==None:
tabs=""
if request.method == "POST":
if 'search' in request.form:
# 是由search表单提交的post请求
searchinfo = {}
# print(len(request.form[u"客户身份证号"]))
for key,value in request.form.items():
# 注意这里key和value仍然是unicode编码,统一在db.py中处理!
if len(value) != 0 and key!='search':
# 做第一层过滤,使得可以表单中某块信息不填
searchinfo[key] = value
tabs = db.customer_search(searchinfo)
return render_template("customer.html", rows = tabs, dbname=session['database'])
# 其它删改查需求,是由Ajax提交的post
datas = json.loads(request.get_data(as_text=True))
function = datas["function"]
datas = datas["inputdata"]
# print(function)
# print(datas[0][u"客户身份证号"])
if function == "delete":
res = {'info':'删除成功!', 'errs':[]}
for data in datas:
err = db.customer_del(data)
if err != '0':
res['errs'].append([data[u"客户身份证号"],err])
if len(res['errs']) != 0:
res['info'] = "删除失败!"
return json.dumps(res)
elif function == "insert":
res = {'info':'插入成功!', 'errs':[]}
for data in datas:
err = db.customer_insert(data)
if err != '0':
res['errs'].append([data[u"客户身份证号"],err])
if len(res['errs']) != 0:
res['info'] = "插入失败!"
return json.dumps(res)
elif function == "update":
res = {'info':'修改成功!', 'errs':[]}
for data in datas:
err = db.customer_update(data)
if err != '0':
res['errs'].append([data[u"客户身份证号"],err])
if len(res['errs']) != 0:
res['info'] = "修改失败!"
return json.dumps(res)
else:
return render_template("customer.html", rows = tabs, dbname=session['database'])
# 账户管理页面
# 储蓄账户
@app.route("/account/saving", methods=(["GET", "POST"]))
def saving():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.showaccount(True)
if tabs==None:
tabs=""
if request.method == "POST":
if 'search' in request.form:
# 是由search表单提交的post请求
searchinfo = {}
for key,value in request.form.items():
# 注意这里key和value仍然是unicode编码,统一在db.py中处理!
if len(value) != 0 and key!='search':
# 做第一层过滤,使得可以表单中某块信息不填
searchinfo[key] = value
tabs = db.account_search(searchinfo, True)
return render_template("account_saving.html", rows = tabs, dbname=session['database'])
# 其它删改查需求,是由Ajax提交的post
datas = json.loads(request.get_data(as_text=True))
function = datas["function"]
datas = datas["inputdata"]
# print(function)
if function == "delete":
res = {'info':'删除成功!', 'errs':[]}
for data in datas:
err = db.account_del(data, True)
if err != '0':
res['errs'].append([data[u"账户.账户号"],err])
if len(res['errs']) != 0:
res['info'] = "删除失败!"
return json.dumps(res)
elif function == "insert":
res = {'info':'插入成功!', 'errs':[]}
for data in datas:
err = db.account_insert(data, True)
if err != '0':
res['errs'].append([data[u"账户.账户号"],err])
if len(res['errs']) != 0:
res['info'] = "插入失败!"
return json.dumps(res)
elif function == "update":
res = {'info':'修改成功!', 'errs':[]}
for data in datas:
err = db.account_update(data, True)
if err != '0':
res['errs'].append([data[u"账户.账户号"],err])
if len(res['errs']) != 0:
res['info'] = "修改失败!"
return json.dumps(res)
else:
return render_template("account_saving.html", rows = tabs, dbname=session['database'])
# 支票账户
@app.route("/account/checking", methods=(["GET", "POST"]))
def checking():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.showaccount(False)
if tabs==None:
tabs=""
if request.method == "POST":
if 'search' in request.form:
# 是由search表单提交的post请求
searchinfo = {}
for key,value in request.form.items():
# 注意这里key和value仍然是unicode编码,统一在db.py中处理!
if len(value) != 0 and key!='search':
# 做第一层过滤,使得可以表单中某块信息不填
searchinfo[key] = value
tabs = db.account_search(searchinfo, False)
return render_template("account_checking.html", rows = tabs, dbname=session['database'])
# 其它删改查需求,是由Ajax提交的post
datas = json.loads(request.get_data(as_text=True))
function = datas["function"]
datas = datas["inputdata"]
# print(function)
if function == "delete":
res = {'info':'删除成功!', 'errs':[]}
for data in datas:
err = db.account_del(data, False)
if err != '0':
res['errs'].append([data[u"账户.账户号"],err])
if len(res['errs']) != 0:
res['info'] = "删除失败!"
return json.dumps(res)
elif function == "insert":
res = {'info':'插入成功!', 'errs':[]}
for data in datas:
err = db.account_insert(data, False)
if err != '0':
res['errs'].append([data[u"账户.账户号"],err])
if len(res['errs']) != 0:
res['info'] = "插入失败!"
return json.dumps(res)
elif function == "update":
res = {'info':'修改成功!', 'errs':[]}
for data in datas:
err = db.account_update(data, False)
if err != '0':
res['errs'].append([data[u"账户.账户号"],err])
if len(res['errs']) != 0:
res['info'] = "修改失败!"
return json.dumps(res)
else:
return render_template("account_checking.html", rows = tabs, dbname=session['database'])
# 贷款管理页面
@app.route("/loan", methods=(["GET", "POST"]))
def loan():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.showloan()
if tabs==None:
tabs=""
if request.method == "POST":
if 'search' in request.form:
# 是由search表单提交的post请求
searchinfo = {}
for key,value in request.form.items():
# 注意这里key和value仍然是unicode编码,统一在db.py中处理!
if len(value) != 0 and key!='search':
# 做第一层过滤,使得可以表单中某块信息不填
searchinfo[key] = value
tabs = db.loan_search(searchinfo)
return render_template("loan.html", rows = tabs, dbname=session['database'])
# 其它删改查需求,是由Ajax提交的post
datas = json.loads(request.get_data(as_text=True))
function = datas["function"]
datas = datas["inputdata"]
# print(function)
if function == "delete":
res = {'info':'删除成功!', 'errs':[]}
for data in datas:
err = db.loan_del(data)
if err != '0':
res['errs'].append([data[u"贷款号"],err])
if len(res['errs']) != 0:
res['info'] = "删除失败!"
return json.dumps(res)
elif function == "insert":
res = {'info':'插入成功!', 'errs':[]}
for data in datas:
err = db.loan_insert(data)
if err != '0':
res['errs'].append([data[u"贷款号"],err])
if len(res['errs']) != 0:
res['info'] = "插入失败!"
return json.dumps(res)
elif function == "release":
res = {'info':'贷款发放成功!', 'errs':[]}
for data in datas:
err = db.loan_release(data)
if err != '0':
res['errs'].append([data[u"贷款号"],err])
if len(res['errs']) != 0:
res['info'] = "贷款发放失败!"
return json.dumps(res)
else:
return render_template("loan.html", rows = tabs, dbname=session['database'])
# 业务统计
# 按月
@app.route("/statistic/month")
def month():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.statistic_month()
return render_template("statistic.html", how = u'月份', rows = tabs, dbname=session['database'])
# 按季度
@app.route("/statistic/quarter")
def quarter():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.statistic_quarter()
return render_template("statistic.html", how = u'季度', rows = tabs, dbname=session['database'])
# 按年
@app.route("/statistic/year")
def year():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.statistic_year()
return render_template("statistic.html", how = u'年份', rows = tabs, dbname=session['database'])
# 测试新html页面
@app.route("/test")
def test():
if 'username' in session:
db = MyDefSQL(session['username'], session['password'],
session['ipaddr'], session['database'])
err = db.login()
else:
return redirect(url_for('login'))
tabs = db.showtablecnt()
return render_template("test.html", rows = tabs)
# 测试URL下返回html page
@app.route("/hello")
def hello():
return "hello world!"
#返回不存在页面的处理
@app.errorhandler(404)
def not_found(e):
return render_template("404.html")
if __name__ == "__main__":
app.run(host = "0.0.0.0", debug=True)
| 34.318408
| 101
| 0.508771
| 0
| 0
| 0
| 0
| 14,054
| 0.938811
| 0
| 0
| 4,392
| 0.293387
|
3f1e42b52ec11496ab90f620e8e049e8cb9d426e
| 1,462
|
py
|
Python
|
tests/test_env.py
|
dmitrvk/mymusichere-app
|
02a6d5f60a72197e08c98da59b0ef7e7168dcf4b
|
[
"MIT"
] | null | null | null |
tests/test_env.py
|
dmitrvk/mymusichere-app
|
02a6d5f60a72197e08c98da59b0ef7e7168dcf4b
|
[
"MIT"
] | 14
|
2020-06-06T19:08:03.000Z
|
2020-12-03T12:07:04.000Z
|
tests/test_env.py
|
dmitrvk/mymusichere-app
|
02a6d5f60a72197e08c98da59b0ef7e7168dcf4b
|
[
"MIT"
] | null | null | null |
# Licensed under the MIT License
from mymusichere import env
class TestEnv:
def test_get_config_from_env(self, monkeypatch):
monkeypatch.setenv('CONFIG', 'value')
assert env.get_str_config('config') == 'value'
def test_get_secret_from_env(self, monkeypatch):
monkeypatch.setenv('SECRET', 'value')
assert env.get_secret('secret') == 'value'
def test_get_config_from_file(self, fs):
fs.create_file('/config', contents='value')
assert env.get_str_config('config') == 'value'
def test_get_secret_from_file(self, fs):
fs.create_file('/run/secrets/secret', contents='value')
assert env.get_secret('secret') == 'value'
def test_config_default(self):
assert env.get_str_config('config', 'default') == 'default'
def test_secret_default(self):
assert env.get_secret('secret', 'default') == 'default'
def test_bool_config(self, monkeypatch):
monkeypatch.setenv('CONFIG_TRUE', '1')
monkeypatch.setenv('CONFIG_FALSE', '0')
assert env.get_bool_config('config_true') is True
assert env.get_bool_config('config_false') is False
assert env.get_bool_config('config_default', default=True) is True
assert env.get_bool_config('config_default', default=False) is False
def test_str_config(self, monkeypatch):
monkeypatch.setenv('CONFIG', 'config')
assert env.get_str_config('config') == 'config'
| 35.658537
| 76
| 0.678523
| 1,397
| 0.95554
| 0
| 0
| 0
| 0
| 0
| 0
| 342
| 0.233926
|
3f1f9aba8ecf3aa6254017a10062ec1345e2b069
| 2,943
|
py
|
Python
|
tests/factorys.py
|
2h4dl/pymilvus
|
6af6d4922242ae48d90ed5a1afb891d9e4d1540e
|
[
"Apache-2.0"
] | null | null | null |
tests/factorys.py
|
2h4dl/pymilvus
|
6af6d4922242ae48d90ed5a1afb891d9e4d1540e
|
[
"Apache-2.0"
] | null | null | null |
tests/factorys.py
|
2h4dl/pymilvus
|
6af6d4922242ae48d90ed5a1afb891d9e4d1540e
|
[
"Apache-2.0"
] | null | null | null |
# STL imports
import random
import logging
import string
import time
import datetime
import random
import struct
import sys
from functools import wraps
# Third party imports
import numpy as np
import faker
from faker.providers import BaseProvider
logging.getLogger('faker').setLevel(logging.ERROR)
sys.path.append('.')
# grpc
from milvus.grpc_gen import milvus_pb2
def gen_vectors(num, dim):
return [[random.random() for _ in range(dim)] for _ in range(num)]
def gen_single_vector(dim):
return [[random.random() for _ in range(dim)]]
def gen_vector(nb, d, seed=np.random.RandomState(1234)):
xb = seed.rand(nb, d).astype("float32")
return xb.tolist()
def gen_unique_str(str=None):
prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
return prefix if str is None else str + "_" + prefix
def get_current_day():
return time.strftime('%Y-%m-%d', time.localtime())
def get_last_day(day):
tmp = datetime.datetime.now() - datetime.timedelta(days=day)
return tmp.strftime('%Y-%m-%d')
def get_next_day(day):
tmp = datetime.datetime.now() + datetime.timedelta(days=day)
return tmp.strftime('%Y-%m-%d')
def gen_long_str(num):
string = ''
for _ in range(num):
char = random.choice('tomorrow')
string += char
def gen_one_binary(topk):
ids = [random.randrange(10000000, 99999999) for _ in range(topk)]
distances = [random.random() for _ in range(topk)]
return milvus_pb2.TopKQueryResult(struct.pack(str(topk) + 'l', *ids), struct.pack(str(topk) + 'd', *distances))
def gen_nq_binaries(nq, topk):
return [gen_one_binary(topk) for _ in range(nq)]
def fake_query_bin_result(nq, topk):
return gen_nq_binaries(nq, topk)
class FakerProvider(BaseProvider):
def collection_name(self):
return 'collection_names' + str(random.randint(1000, 9999))
def name(self):
return 'name' + str(random.randint(1000, 9999))
def dim(self):
return random.randint(0, 999)
fake = faker.Faker()
fake.add_provider(FakerProvider)
def collection_name_factory():
return fake.collection_name()
def records_factory(dimension, nq):
return [[random.random() for _ in range(dimension)] for _ in range(nq)]
def binary_records_factory(dimension, nq):
def binary_record(bsize):
s_m = "abcdefghijklmnopqrstuvwxyz"
s_list = [s_m[random.randint(0, 25)] for _ in range(bsize)]
s = "".join(s_list)
return bytes(s, encoding="ASCII")
bs = dimension // 8
return [binary_record(bs) for _ in range(nq)]
def integer_factory(nq):
return [random.randint(0, 128) for _ in range(nq)]
def time_it(func):
@wraps(func)
def inner(*args, **kwrgs):
pref = time.perf_counter()
result = func(*args, **kwrgs)
delt = time.perf_counter() - pref
print(f"[{func.__name__}][{delt:.4}s]")
return result
return inner
| 23.357143
| 115
| 0.675841
| 269
| 0.091403
| 0
| 0
| 228
| 0.077472
| 0
| 0
| 205
| 0.069657
|
3f201da335b43cb8e7b8ff1ba5bda41dec4c38c6
| 524
|
py
|
Python
|
HACKERRANK_Regrex&Parsing/Matrix_Script.py
|
StefaniaSferragatta/ADM2020-HW1
|
8f85ac1c8dd4bff52c5c17987c9e96b209a93830
|
[
"MIT"
] | null | null | null |
HACKERRANK_Regrex&Parsing/Matrix_Script.py
|
StefaniaSferragatta/ADM2020-HW1
|
8f85ac1c8dd4bff52c5c17987c9e96b209a93830
|
[
"MIT"
] | null | null | null |
HACKERRANK_Regrex&Parsing/Matrix_Script.py
|
StefaniaSferragatta/ADM2020-HW1
|
8f85ac1c8dd4bff52c5c17987c9e96b209a93830
|
[
"MIT"
] | null | null | null |
import math
import os
import random
import re
import sys
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
m = int(first_multiple_input[1])
matrix = []
if (n>0 and m>0 and n<100 and m< 100):
for _ in range(n):
matrix_item = input()
matrix.append(matrix_item)
for _ in range(m):
string = ""
for cols in range (m):
for rows in range (n):
string += matrix[rows][cols]
output = re.sub(r"\b[!@#$%& ]+\b"," ", string)
print(output)
| 21.833333
| 50
| 0.59542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.041985
|
3f203a9e4f2175047e23a90b2ce6f785f3b752e7
| 4,495
|
py
|
Python
|
smartsheet/models/filter.py
|
Funtimes-Smarts/Python-import-Smart
|
ffb99887d03e31d10da553c9ee8c7be1238816fc
|
[
"Apache-2.0"
] | null | null | null |
smartsheet/models/filter.py
|
Funtimes-Smarts/Python-import-Smart
|
ffb99887d03e31d10da553c9ee8c7be1238816fc
|
[
"Apache-2.0"
] | null | null | null |
smartsheet/models/filter.py
|
Funtimes-Smarts/Python-import-Smart
|
ffb99887d03e31d10da553c9ee8c7be1238816fc
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2016 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from .criteria import Criteria
from ..types import TypedList
from ..util import prep
from datetime import datetime
import json
import logging
import six
class Filter(object):
"""Smartsheet Filter data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the Filter model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self._pre_request_filter = None
self.allowed_values = {
'_type': [
'LIST',
'CUSTOM']}
self._criteria = TypedList(Criteria)
self._exclude_selected = None
self.__type = None
self._values = TypedList(str)
if props:
# account for alternate variable names from raw API response
if 'criteria' in props:
self.criteria = props['criteria']
if 'excludeSelected' in props:
self.exclude_selected = props['excludeSelected']
if 'exclude_selected' in props:
self.exclude_selected = props[
'exclude_selected']
if 'type' in props:
self._type = props['type']
if '_type' in props:
self._type = props['_type']
if 'values' in props:
self.values = props['values']
self.__initialized = True
def __getattr__(self, key):
if key == 'type':
return self._type
else:
raise AttributeError(key)
@property
def criteria(self):
return self._criteria
@criteria.setter
def criteria(self, value):
if isinstance(value, list):
self._criteria.purge()
self._criteria.extend([
(Criteria(x, self._base)
if not isinstance(x, Criteria) else x) for x in value
])
elif isinstance(value, TypedList):
self._criteria.purge()
self._criteria = value.to_list()
elif isinstance(value, Criteria):
self._criteria.purge()
self._criteria.append(value)
@property
def exclude_selected(self):
return self._exclude_selected
@exclude_selected.setter
def exclude_selected(self, value):
if isinstance(value, bool):
self._exclude_selected = value
@property
def _type(self):
return self.__type
@_type.setter
def _type(self, value):
if isinstance(value, six.string_types):
if value not in self.allowed_values['_type']:
raise ValueError(
("`{0}` is an invalid value for Filter`_type`,"
" must be one of {1}").format(
value, self.allowed_values['_type']))
self.__type = value
@property
def values(self):
return self._values
@values.setter
def values(self, value):
if isinstance(value, list):
self._values.purge()
self._values.extend([
(str(x)
if not isinstance(x, str) else x) for x in value
])
elif isinstance(value, TypedList):
self._values.purge()
self._values = value.to_list()
elif isinstance(value, str):
self._values.purge()
self._values.append(value)
def to_dict(self, op_id=None, method=None):
obj = {
'criteria': prep(self._criteria),
'excludeSelected': prep(self._exclude_selected),
'type': prep(self.__type),
'values': prep(self._values)}
return obj
def to_json(self):
return json.dumps(self.to_dict(), indent=2)
def __str__(self):
return json.dumps(self.to_dict())
| 30.787671
| 75
| 0.588654
| 3,629
| 0.807341
| 0
| 0
| 1,797
| 0.399778
| 0
| 0
| 1,064
| 0.236707
|
3f20478583f74a50977bf5b718f080efb96af674
| 5,524
|
py
|
Python
|
utils/train.py
|
danilonumeroso/MEG
|
86f2a664e22082b0ff5d01c8e0ad9618b64e9065
|
[
"Apache-2.0"
] | 6
|
2020-10-26T13:53:01.000Z
|
2021-03-12T14:26:43.000Z
|
utils/train.py
|
danilonumeroso/Explainer
|
e133c150738f09998d0350e58dece4824ee58a76
|
[
"Apache-2.0"
] | null | null | null |
utils/train.py
|
danilonumeroso/Explainer
|
e133c150738f09998d0350e58dece4824ee58a76
|
[
"Apache-2.0"
] | 1
|
2021-03-13T01:08:12.000Z
|
2021-03-13T01:08:12.000Z
|
import torch
import torch.nn.functional as F
import os.path as osp
import json
from torch_geometric.utils import precision, recall
from torch_geometric.utils import f1_score, accuracy
from torch.utils.tensorboard import SummaryWriter
def train_epoch_classifier(model, train_loader, len_train, optimizer, device):
model.train()
loss_all = 0
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
output, _ = model(data.x, data.edge_index, batch=data.batch)
loss = F.nll_loss(F.log_softmax(output, dim=-1), data.y)
loss.backward()
loss_all += data.num_graphs * loss.item()
optimizer.step()
return loss_all / len_train
def test_classifier(model, loader, device):
model.eval()
y = torch.tensor([]).long().to(device)
yp = torch.tensor([]).long().to(device)
loss_all = 0
for data in loader:
data = data.to(device)
pred, _ = model(data.x, data.edge_index, batch=data.batch)
loss = F.nll_loss(F.log_softmax(pred, dim=-1), data.y)
pred = pred.max(dim=1)[1]
y = torch.cat([y, data.y])
yp = torch.cat([yp, pred])
loss_all += data.num_graphs * loss.item()
return (
accuracy(y, yp),
precision(y, yp, model.num_output).mean().item(),
recall(y, yp, model.num_output).mean().item(),
f1_score(y, yp, model.num_output).mean().item(),
loss_all
)
def train_cycle_classifier(task, train_loader, val_loader, test_loader, len_train, len_val, len_test,
model, optimizer, device, base_path, epochs):
best_acc = (0, 0)
writer = SummaryWriter(base_path + '/plots')
for epoch in range(epochs):
loss = train_epoch_classifier(model, train_loader, len_train, optimizer, device)
writer.add_scalar('Loss/train', loss, epoch)
train_acc, train_prec, train_rec, train_f1, _ = test_classifier(model, train_loader, device)
val_acc, val_prec, val_rec, val_f1, l = test_classifier(model, val_loader, device)
writer.add_scalar('Accuracy/train', train_acc, epoch)
writer.add_scalar('Accuracy/val', val_acc, epoch)
writer.add_scalar('Loss/val', l / len_val, epoch)
print(f'Epoch: {epoch}, Loss: {loss:.5f}')
print(f'Train -> Acc: {train_acc:.5f} Rec: {train_rec:.5f} \
Prec: {train_prec:.5f} F1: {train_f1:.5f}')
print(f'Val -> Acc: {val_acc:.5f} Rec: {val_rec:.5f} \
Prec: {val_prec:.5f} F1: {val_f1:.5f}')
if best_acc[1] < val_acc:
best_acc = train_acc, val_acc
torch.save(
model.state_dict(),
osp.join(base_path + '/ckpt/',
model.__class__.__name__ + ".pth")
)
print("New best model saved!")
with open(base_path + '/best_result.json', 'w') as outfile:
json.dump({'train_acc': train_acc,
'val_acc': val_acc,
'train_rec': train_rec,
'val_rec': val_rec,
'train_f1': train_f1,
'val_f1': val_f1,
'train_prec': train_prec,
'val_prec': val_prec}, outfile)
def train_epoch_regressor(model, train_loader, len_train, optimizer, device):
model.train()
loss_all = 0
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
output, _ = model(data.x.float(), data.edge_index, batch=data.batch)
loss = F.mse_loss(output, data.y)
loss.backward()
loss_all += data.num_graphs * loss.item()
optimizer.step()
return loss_all / len_train
def test_regressor(model, loader, len_loader, device):
model.eval()
loss_all = 0
for data in loader:
data = data.to(device)
pred, _ = model(data.x.float(), data.edge_index, batch=data.batch)
loss = F.mse_loss(pred, data.y).detach()
loss_all += data.num_graphs * loss.item()
return loss_all / len_loader
def train_cycle_regressor(task, train_loader, val_loader, test_loader,
len_train, len_val, len_test, model,
optimizer, device, base_path, epochs):
best_acc = (0, 0)
writer = SummaryWriter(base_path + '/plots')
best_error = (+10000, +10000)
for epoch in range(epochs):
loss = train_epoch_regressor(model, train_loader, len_train, optimizer, device)
writer.add_scalar('Loss/train', loss, epoch)
train_error = test_regressor(model, train_loader, len_train, device)
val_error = test_regressor(model, val_loader, len_val, device)
writer.add_scalar('MSE/train', train_error, epoch)
writer.add_scalar('MSE/test', val_error, epoch)
print(f'Epoch: {epoch}, Loss: {loss:.5f}')
print(f'Training Error: {train_error:.5f}')
print(f'Val Error: {val_error:.5f}')
if best_error[1] > val_error:
best_error = train_error, val_error
torch.save(
model.state_dict(),
osp.join(base_path + '/ckpt/',
model.__class__.__name__ + ".pth")
)
print("New best model saved!")
with open(base_path + '/best_result.json', 'w') as outfile:
json.dump({'train_error': train_error,
'val_error': val_error}, outfile)
| 33.889571
| 101
| 0.589609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 664
| 0.120203
|
3f221d9155ff841349fd18bfe1fa0dbaac313b9d
| 260
|
py
|
Python
|
Algorithms/LCP/29/math1.py
|
M-Quadra/LeetCode-problems
|
0cc100aa1e50b02df289f04fe2e0b97239eb9895
|
[
"MIT"
] | null | null | null |
Algorithms/LCP/29/math1.py
|
M-Quadra/LeetCode-problems
|
0cc100aa1e50b02df289f04fe2e0b97239eb9895
|
[
"MIT"
] | null | null | null |
Algorithms/LCP/29/math1.py
|
M-Quadra/LeetCode-problems
|
0cc100aa1e50b02df289f04fe2e0b97239eb9895
|
[
"MIT"
] | null | null | null |
class Solution:
def orchestraLayout(self, num: int, xPos: int, yPos: int) -> int:
a, b = (min(xPos, num-1-yPos), 1) if yPos >= xPos else (min(yPos, num-1-xPos), -1)
return (4*num*a - 4*a*a - 2*a + b*(xPos+yPos) + (b>>1&1)*4*(num-a-1))%9 + 1
| 65
| 90
| 0.546154
| 260
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3f2291365a0ddde1dace00a736bbde9087e031ac
| 9,716
|
py
|
Python
|
python-sdk/nuimages/scripts/render_images.py
|
bjajoh/nuscenes-devkit
|
5bc5627801c3867de395a500a1905c24171cec7d
|
[
"Apache-2.0"
] | 1,284
|
2018-09-12T14:08:06.000Z
|
2022-03-31T08:28:20.000Z
|
python-sdk/nuimages/scripts/render_images.py
|
bjajoh/nuscenes-devkit
|
5bc5627801c3867de395a500a1905c24171cec7d
|
[
"Apache-2.0"
] | 518
|
2018-10-20T08:34:15.000Z
|
2022-03-31T08:16:08.000Z
|
python-sdk/nuimages/scripts/render_images.py
|
bjajoh/nuscenes-devkit
|
5bc5627801c3867de395a500a1905c24171cec7d
|
[
"Apache-2.0"
] | 487
|
2018-09-13T20:03:21.000Z
|
2022-03-31T04:41:17.000Z
|
# nuScenes dev-kit.
# Code written by Holger Caesar, 2020.
import argparse
import gc
import os
import random
from typing import List
from collections import defaultdict
import cv2
import tqdm
from nuimages.nuimages import NuImages
def render_images(nuim: NuImages,
mode: str = 'all',
cam_name: str = None,
log_name: str = None,
sample_limit: int = 50,
filter_categories: List[str] = None,
out_type: str = 'image',
out_dir: str = '~/Downloads/nuImages',
cleanup: bool = True) -> None:
"""
Render a random selection of images and save them to disk.
Note: The images rendered here are keyframes only.
:param nuim: NuImages instance.
:param mode: What to render:
"image" for the image without annotations,
"annotated" for the image with annotations,
"trajectory" for a rendering of the trajectory of the vehice,
"all" to render all of the above separately.
:param cam_name: Only render images from a particular camera, e.g. "CAM_BACK'.
:param log_name: Only render images from a particular log, e.g. "n013-2018-09-04-13-30-50+0800".
:param sample_limit: Maximum number of samples (images) to render. Note that the mini split only includes 50 images.
:param filter_categories: Specify a list of object_ann category names. Every sample that is rendered must
contain annotations of any of those categories.
:param out_type: The output type as one of the following:
'image': Renders a single image for the image keyframe of each sample.
'video': Renders a video for all images/pcls in the clip associated with each sample.
:param out_dir: Folder to render the images to.
:param cleanup: Whether to delete images after rendering the video. Not relevant for out_type == 'image'.
"""
# Check and convert inputs.
assert out_type in ['image', 'video'], ' Error: Unknown out_type %s!' % out_type
all_modes = ['image', 'annotated', 'trajectory']
assert mode in all_modes + ['all'], 'Error: Unknown mode %s!' % mode
assert not (out_type == 'video' and mode == 'trajectory'), 'Error: Cannot render "trajectory" for videos!'
if mode == 'all':
if out_type == 'image':
modes = all_modes
elif out_type == 'video':
modes = [m for m in all_modes if m not in ['annotated', 'trajectory']]
else:
raise Exception('Error" Unknown mode %s!' % mode)
else:
modes = [mode]
if filter_categories is not None:
category_names = [c['name'] for c in nuim.category]
for category_name in filter_categories:
assert category_name in category_names, 'Error: Invalid object_ann category %s!' % category_name
# Create output folder.
out_dir = os.path.expanduser(out_dir)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
# Filter by camera.
sample_tokens = [s['token'] for s in nuim.sample]
if cam_name is not None:
sample_tokens_cam = []
for sample_token in sample_tokens:
sample = nuim.get('sample', sample_token)
key_camera_token = sample['key_camera_token']
sensor = nuim.shortcut('sample_data', 'sensor', key_camera_token)
if sensor['channel'] == cam_name:
sample_tokens_cam.append(sample_token)
sample_tokens = sample_tokens_cam
# Filter by log.
if log_name is not None:
sample_tokens_cleaned = []
for sample_token in sample_tokens:
sample = nuim.get('sample', sample_token)
log = nuim.get('log', sample['log_token'])
if log['logfile'] == log_name:
sample_tokens_cleaned.append(sample_token)
sample_tokens = sample_tokens_cleaned
# Filter samples by category.
if filter_categories is not None:
# Get categories in each sample.
sd_to_object_cat_names = defaultdict(lambda: set())
for object_ann in nuim.object_ann:
category = nuim.get('category', object_ann['category_token'])
sd_to_object_cat_names[object_ann['sample_data_token']].add(category['name'])
# Filter samples.
sample_tokens_cleaned = []
for sample_token in sample_tokens:
sample = nuim.get('sample', sample_token)
key_camera_token = sample['key_camera_token']
category_names = sd_to_object_cat_names[key_camera_token]
if any([c in category_names for c in filter_categories]):
sample_tokens_cleaned.append(sample_token)
sample_tokens = sample_tokens_cleaned
# Get a random selection of samples.
random.shuffle(sample_tokens)
# Limit number of samples.
sample_tokens = sample_tokens[:sample_limit]
print('Rendering %s for mode %s to folder %s...' % (out_type, mode, out_dir))
for sample_token in tqdm.tqdm(sample_tokens):
sample = nuim.get('sample', sample_token)
log = nuim.get('log', sample['log_token'])
log_name = log['logfile']
key_camera_token = sample['key_camera_token']
sensor = nuim.shortcut('sample_data', 'sensor', key_camera_token)
sample_cam_name = sensor['channel']
sd_tokens = nuim.get_sample_content(sample_token)
# We cannot render a video if there are missing camera sample_datas.
if len(sd_tokens) < 13 and out_type == 'video':
print('Warning: Skipping video for sample token %s, as not all 13 frames exist!' % sample_token)
continue
for mode in modes:
out_path_prefix = os.path.join(out_dir, '%s_%s_%s_%s' % (log_name, sample_token, sample_cam_name, mode))
if out_type == 'image':
write_image(nuim, key_camera_token, mode, '%s.jpg' % out_path_prefix)
elif out_type == 'video':
write_video(nuim, sd_tokens, mode, out_path_prefix, cleanup=cleanup)
def write_video(nuim: NuImages,
sd_tokens: List[str],
mode: str,
out_path_prefix: str,
cleanup: bool = True) -> None:
"""
Render a video by combining all the images of type mode for each sample_data.
:param nuim: NuImages instance.
:param sd_tokens: All sample_data tokens in chronological order.
:param mode: The mode - see render_images().
:param out_path_prefix: The file prefix used for the images and video.
:param cleanup: Whether to delete images after rendering the video.
"""
# Loop through each frame to create the video.
out_paths = []
for i, sd_token in enumerate(sd_tokens):
out_path = '%s_%d.jpg' % (out_path_prefix, i)
out_paths.append(out_path)
write_image(nuim, sd_token, mode, out_path)
# Create video.
first_im = cv2.imread(out_paths[0])
freq = 2 # Display frequency (Hz).
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
video_path = '%s.avi' % out_path_prefix
out = cv2.VideoWriter(video_path, fourcc, freq, first_im.shape[1::-1])
# Load each image and add to the video.
for out_path in out_paths:
im = cv2.imread(out_path)
out.write(im)
# Delete temporary image if requested.
if cleanup:
os.remove(out_path)
# Finalize video.
out.release()
def write_image(nuim: NuImages, sd_token: str, mode: str, out_path: str) -> None:
"""
Render a single image of type mode for the given sample_data.
:param nuim: NuImages instance.
:param sd_token: The sample_data token.
:param mode: The mode - see render_images().
:param out_path: The file to write the image to.
"""
if mode == 'annotated':
nuim.render_image(sd_token, annotation_type='all', out_path=out_path)
elif mode == 'image':
nuim.render_image(sd_token, annotation_type='none', out_path=out_path)
elif mode == 'trajectory':
sample_data = nuim.get('sample_data', sd_token)
nuim.render_trajectory(sample_data['sample_token'], out_path=out_path)
else:
raise Exception('Error: Unknown mode %s!' % mode)
# Trigger garbage collection to avoid memory overflow from the render functions.
gc.collect()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Render a random selection of images and save them to disk.')
parser.add_argument('--seed', type=int, default=42) # Set to 0 to disable.
parser.add_argument('--version', type=str, default='v1.0-mini')
parser.add_argument('--dataroot', type=str, default='/data/sets/nuimages')
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--mode', type=str, default='all')
parser.add_argument('--cam_name', type=str, default=None)
parser.add_argument('--log_name', type=str, default=None)
parser.add_argument('--sample_limit', type=int, default=50)
parser.add_argument('--filter_categories', action='append')
parser.add_argument('--out_type', type=str, default='image')
parser.add_argument('--out_dir', type=str, default='~/Downloads/nuImages')
args = parser.parse_args()
# Set random seed for reproducible image selection.
if args.seed != 0:
random.seed(args.seed)
# Initialize NuImages class.
nuim_ = NuImages(version=args.version, dataroot=args.dataroot, verbose=bool(args.verbose), lazy=False)
# Render images.
render_images(nuim_, mode=args.mode, cam_name=args.cam_name, log_name=args.log_name, sample_limit=args.sample_limit,
filter_categories=args.filter_categories, out_type=args.out_type, out_dir=args.out_dir)
| 42.614035
| 120
| 0.656546
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,789
| 0.389975
|
3f23c6741b5a4eb8f1708037600b9e1ee26ac16e
| 10,868
|
py
|
Python
|
version_info.py
|
sairam4123/GodotReleaseScriptPython
|
2fd2644b0301f20b89b6772a0c93cec6d012f080
|
[
"MIT"
] | null | null | null |
version_info.py
|
sairam4123/GodotReleaseScriptPython
|
2fd2644b0301f20b89b6772a0c93cec6d012f080
|
[
"MIT"
] | null | null | null |
version_info.py
|
sairam4123/GodotReleaseScriptPython
|
2fd2644b0301f20b89b6772a0c93cec6d012f080
|
[
"MIT"
] | null | null | null |
import re
from configparser import ConfigParser
from constants import PROJECT_FOLDER, RELEASE_LEVEL_DICT
from release_type import ReleaseLevel, ReleaseType, value_from_key
class VersionInfo:
def __init__(
self,
major: int = 0,
minor: int = 0,
bugfix: int = 0,
hotfix: int = 0,
release_level: ReleaseLevel = ReleaseLevel.public,
serial: int = None,
release_type: ReleaseType = None,
short_version: bool = False,
):
self.major = 0 if major is None else int(major)
self.minor = 0 if minor is None else int(minor)
self.bugfix = 0 if bugfix is None else int(bugfix)
self.hotfix = 0 if hotfix is None else int(hotfix)
self.short_version = short_version
self.release_type = release_type
if self.release_type is None:
if self.hotfix == 0:
if self.bugfix == 0:
if self.minor == 0:
if self.major != 0:
self.release_type = ReleaseType.major
else:
self.release_type = ReleaseType.minor
else:
self.release_type = ReleaseType.bugfix
else:
self.release_type = ReleaseType.hotfix
self.serial = (serial and int(serial)) or 0
self.release_level = value_from_key(RELEASE_LEVEL_DICT, release_level) or release_level or ReleaseLevel.public
def __str__(self):
version: str = f'v{self.major}.{self.minor}.{self.bugfix}'
if self.release_type == ReleaseType.hotfix:
version = f'{version}.{self.hotfix}'
elif self.release_level != ReleaseLevel.public:
version = f'{version}{RELEASE_LEVEL_DICT[self.release_level]}{self.serial}'
return version
def increment(self, release_level: ReleaseLevel, release_type: ReleaseType = None):
sequel: bool = False
if release_type == self.release_type and self.release_level == ReleaseLevel.public:
sequel = True
if self.release_type != release_type or sequel:
if release_type == ReleaseType.hotfix:
self.hotfix += 1
else:
self.hotfix = 0
if release_type == ReleaseType.bugfix:
self.bugfix += 1
else:
self.bugfix = 0
if release_type == ReleaseType.minor:
self.minor += 1
else:
self.minor = 0
if release_type == ReleaseType.major:
self.major += 1
self.serial = None
self.release_type = release_type
if release_level != ReleaseLevel.public:
self.increase_serial(release_level)
elif release_level == ReleaseLevel.public:
self.serial = 0
self.release_level = release_level
self.release_type = release_type
def increase_serial(self, release_level: ReleaseLevel):
if self.serial is not None and self.release_level != release_level:
self.serial = 0
else:
if self.serial is not None:
self.serial += 1
else:
self.serial = 0
self.release_level = release_level
def convert_to_godot_format(self):
return repr(str(self).lstrip("v")).replace("'", '"')
@classmethod
def start_version(cls):
return cls(0, 1, 0)
@classmethod
def load_version(cls, version: str):
pattern: re.Pattern = re.compile(r"(\d)\.(\d)\.?(\d)?\.?(\d)?\.?([a-z]{1,2})?(\d{1,3})?")
match: re.Match = pattern.match(version.replace('"', ''))
if match:
return cls(*match.groups())
else:
return cls.start_version()
@classmethod
def check_version(cls, version: str):
pattern: re.Pattern = re.compile(r"(\d)\.(\d)\.?(\d)?\.?(\d)?\.?([a-z]{1,2})?(\d{1,3})?")
match: re.Match = pattern.match(version.replace('"', ''))
return bool(match)
def set_version(new_version: VersionInfo) -> None:
config = ConfigParser()
with open(list(PROJECT_FOLDER.glob("export_presets.cfg"))[0], 'r') as exports_config:
config.read_file(exports_config)
for section_name, section in config.items():
for key, value in section.items():
if key.endswith('version'):
config.set(section_name, key, new_version.convert_to_godot_format())
config_file = open(list(PROJECT_FOLDER.glob("export_presets.cfg"))[0], "w")
config.write(config_file)
config_file.close()
with open(list(PROJECT_FOLDER.glob("version.txt"))[0], 'w') as version_file:
version_file.write(str(new_version))
def get_version() -> VersionInfo:
try:
version_file = open(list(PROJECT_FOLDER.glob("version.txt"))[0], 'r')
except IndexError:
version_file = open(PROJECT_FOLDER/"version.txt", "w+")
else:
if not VersionInfo.check_version(version_file.read()):
print("Falling back to export presets")
config = ConfigParser()
with open(list(PROJECT_FOLDER.glob("export_presets.cfg"))[0], 'r') as exports_config:
config.read_file(exports_config)
version: VersionInfo = VersionInfo.start_version()
for section_name, section in config.items():
for key, value in section.items():
if key.endswith('version'):
version = VersionInfo.load_version(value)
return version
else:
return VersionInfo.load_version(version_file.read())
if __name__ == '__main__': # Test Script
index = 0
version_info = VersionInfo(1, 0, 0, 0, ReleaseLevel.public, None, ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.bugfix)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.bugfix)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.hotfix)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.hotfix)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.hotfix)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
_version = version_info.convert_to_godot_format()
print(_version)
_pattern: re.Pattern = re.compile(r"(\d)\.(\d)\.?(\d)?\.?(\d)?\.?([a-z]{1,2})?(\d{1,3})?")
_match: re.Match = _pattern.match(_version.replace('"', ''))
print(index, VersionInfo(*_match.groups()))
| 39.234657
| 118
| 0.656054
| 3,929
| 0.36152
| 0
| 0
| 641
| 0.05898
| 0
| 0
| 514
| 0.047295
|
3f2514948f103576dc7043e1528909e26cdfc7f7
| 2,302
|
py
|
Python
|
test/test_create_json_items_from_embark_xml.py
|
ndlib/mellon-search
|
30f7eb267e35d77ee6d126789866d44d825c3e0c
|
[
"Apache-2.0"
] | null | null | null |
test/test_create_json_items_from_embark_xml.py
|
ndlib/mellon-search
|
30f7eb267e35d77ee6d126789866d44d825c3e0c
|
[
"Apache-2.0"
] | null | null | null |
test/test_create_json_items_from_embark_xml.py
|
ndlib/mellon-search
|
30f7eb267e35d77ee6d126789866d44d825c3e0c
|
[
"Apache-2.0"
] | null | null | null |
# test_create_json_items_from_embark_xml.py 2/18/19 sm
""" test create_json_items_from_embark_xml.py """
import sys
import json
import unittest
import csv
from xml.etree.ElementTree import ElementTree, tostring
# add parent directory to path
import os
import inspect
CURRENTDIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
PARENTDIR = os.path.dirname(CURRENTDIR)
sys.path.insert(0, PARENTDIR)
import create_json_items_from_embark_xml
class Test(unittest.TestCase):
""" Class for test fixtures """
def test_write_json_output(self):
""" test writing json output """
json_data = '{"sample" : "test"}'
create_json_items_from_embark_xml.write_json_output('.', 'test_write_json_output.json', json_data)
with open('./test_write_json_output.json', 'r') as input_source:
data = json.load(input_source)
input_source.close()
self.assertTrue(json_data == data)
def test_everything(self):
""" run test on whole process, verifying expected results """
create_json_items_from_embark_xml.create_json_items_from_embark_xml('./objects 01_18_19.xml', 'temp/pnx',
csv_output_root_directory='temp')
# verify one csv
with open('temp/1976.057/main.csv', 'r') as read_actual:
reader = csv.reader(read_actual)
actual_csv = list(reader)
with open('./expected_results/test_everything.csv', 'r') as read_expected:
reader = csv.reader(read_expected)
expected_csv = list(reader)
self.assertTrue(actual_csv == expected_csv)
# verify one pnx
actual_results_file_name = 'temp/pnx/1976.057.xml'
expected_results_file_name = 'expected_results/test_everything.xml'
actual_results = ElementTree(file=actual_results_file_name)
expected_results = ElementTree(file=expected_results_file_name)
# print(ElementTree.tostring(xml_tree.getroot()))
self.assertTrue(tostring(actual_results.getroot()) == tostring(expected_results.getroot()))
def suite():
""" define test suite """
return unittest.TestLoader().loadTestsFromTestCase(Test)
if __name__ == '__main__':
suite()
unittest.main()
| 36.539683
| 113
| 0.682884
| 1,664
| 0.72285
| 0
| 0
| 0
| 0
| 0
| 0
| 631
| 0.274109
|
3f272482b04c8aa1d417b0e37326c6eff1cef597
| 3,000
|
py
|
Python
|
plot/laikago/plot_task.py
|
MaxxWilson/ASE389Project
|
13c3c72887e27fbed2eef63c1e27b4a185036a39
|
[
"MIT"
] | 17
|
2021-05-31T10:55:48.000Z
|
2022-03-30T10:09:37.000Z
|
plot/laikago/plot_task.py
|
MaxxWilson/ASE389Project
|
13c3c72887e27fbed2eef63c1e27b4a185036a39
|
[
"MIT"
] | 2
|
2021-10-01T22:11:43.000Z
|
2021-12-06T02:34:33.000Z
|
plot/laikago/plot_task.py
|
MaxxWilson/ASE389Project
|
13c3c72887e27fbed2eef63c1e27b4a185036a39
|
[
"MIT"
] | 3
|
2021-08-24T00:53:18.000Z
|
2022-03-31T17:29:07.000Z
|
import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
import pickle
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from plot.helper import plot_task, plot_weights, plot_rf_z_max, plot_rf_quad, plot_vector_traj
tasks = [
'com_pos', 'com_vel', 'chassis_quat', 'chassis_ang_vel', 'toeFL_pos',
'toeFL_vel', 'toeFR_pos', 'toeFR_vel', 'toeRR_pos', 'toeRR_vel',
'toeRL_pos', 'toeRL_vel'
]
weights = [
'w_com', 'w_chassis_ori', 'w_toeFL', 'w_toeFR', 'w_toeRR', 'w_toeRL'
]
rf_z = ['rf_z_max_toeFL', 'rf_z_max_toeFR', 'rf_z_max_toeRR', 'rf_z_max_toeRL']
time = []
phase = []
rf_cmd = []
des, act = dict(), dict()
for topic in tasks:
des[topic] = []
act[topic] = []
w = dict()
for topic in weights:
w[topic] = []
rf_z_max = dict()
for topic in rf_z:
rf_z_max[topic] = []
with open('data/pnc.pkl', 'rb') as file:
while True:
try:
d = pickle.load(file)
time.append(d['time'])
phase.append(d['phase'])
for topic in tasks:
des[topic].append(d[topic + '_des'])
act[topic].append(d[topic])
for topic in weights:
w[topic].append(d[topic])
for topic in rf_z:
rf_z_max[topic].append(d[topic])
rf_cmd.append(d['rf_cmd'])
except EOFError:
break
for k, v in des.items():
des[k] = np.stack(v, axis=0)
for k, v in act.items():
act[k] = np.stack(v, axis=0)
rf_cmd = np.stack(rf_cmd, axis=0)
phase = np.stack(phase, axis=0)
## =============================================================================
## Plot Task
## =============================================================================
plot_task(time, des['com_pos'], act['com_pos'], des['com_vel'], act['com_vel'],
phase, 'com lin')
plot_task(time, des['chassis_quat'], act['chassis_quat'],
des['chassis_ang_vel'], act['chassis_ang_vel'], phase, 'pelvis ori')
plot_task(time, des['toeFL_pos'], act['toeFL_pos'], des['toeFL_vel'],
act['toeFL_vel'], phase, 'left foot lin')
plot_task(time, des['toeFR_pos'], act['toeFR_pos'], des['toeFR_vel'],
act['toeFR_vel'], phase, 'left foot ori')
plot_task(time, des['toeRR_pos'], act['toeRR_pos'], des['toeRR_vel'],
act['toeRR_vel'], phase, 'right foot lin')
plot_task(time, des['toeRL_pos'], act['toeRL_pos'], des['toeRL_vel'],
act['toeRL_vel'], phase, 'right foot ori')
## =============================================================================
## Plot WBC Solutions
## =============================================================================
plot_rf_quad(time, rf_cmd, phase)
## =============================================================================
## Plot Weights and Max Reaction Force Z
## =============================================================================
plot_weights(time, w, phase)
plot_rf_z_max(time, rf_z_max, phase)
plt.show()
| 29.411765
| 94
| 0.515333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,221
| 0.407
|
3f272ad913a6368c2dd0e9360ea0f0c8243524c5
| 3,504
|
py
|
Python
|
h/views/api/users.py
|
bibliotechie/h
|
16e275f79ef7d1086971bd30ef403501c6b93beb
|
[
"BSD-2-Clause"
] | null | null | null |
h/views/api/users.py
|
bibliotechie/h
|
16e275f79ef7d1086971bd30ef403501c6b93beb
|
[
"BSD-2-Clause"
] | null | null | null |
h/views/api/users.py
|
bibliotechie/h
|
16e275f79ef7d1086971bd30ef403501c6b93beb
|
[
"BSD-2-Clause"
] | null | null | null |
from pyramid.httpexceptions import HTTPConflict
from h.auth.util import client_authority
from h.presenters import TrustedUserJSONPresenter
from h.schemas import ValidationError
from h.schemas.api.user import CreateUserAPISchema, UpdateUserAPISchema
from h.services.user_unique import DuplicateUserError
from h.views.api.config import api_config
from h.views.api.exceptions import PayloadError
@api_config(
versions=["v1", "v2"],
route_name="api.user_read",
request_method="GET",
link_name="user.read",
description="Fetch a user",
permission="read",
)
def read(context, _request):
"""
Fetch a user.
This API endpoint allows authorized clients (those able to provide a valid
Client ID and Client Secret) to read users in their authority.
"""
return TrustedUserJSONPresenter(context.user).asdict()
@api_config(
versions=["v1", "v2"],
route_name="api.users",
request_method="POST",
link_name="user.create",
description="Create a new user",
permission="create",
)
def create(request):
"""
Create a user.
This API endpoint allows authorised clients (those able to provide a valid
Client ID and Client Secret) to create users in their authority. These
users are created pre-activated, and are unable to log in to the web
service directly.
Note: the authority-enforcement logic herein is, by necessity, strange.
The API accepts an ``authority`` parameter but the only valid value for
the param is the client's verified authority. If the param does not
match the client's authority, ``ValidationError`` is raised.
:raises ValidationError: if ``authority`` param does not match client
authority
:raises HTTPConflict: if user already exists
"""
client_authority_ = client_authority(request)
schema = CreateUserAPISchema()
appstruct = schema.validate(_json_payload(request))
# Enforce authority match
if appstruct["authority"] != client_authority_:
raise ValidationError(
"authority '{auth_param}' does not match client authority".format(
auth_param=appstruct["authority"]
)
)
user_unique_service = request.find_service(name="user_unique")
try:
user_unique_service.ensure_unique(appstruct, authority=client_authority_)
except DuplicateUserError as err:
raise HTTPConflict(str(err)) from err
user_signup_service = request.find_service(name="user_signup")
user = user_signup_service.signup(require_activation=False, **appstruct)
presenter = TrustedUserJSONPresenter(user)
return presenter.asdict()
@api_config(
versions=["v1", "v2"],
route_name="api.user",
request_method="PATCH",
link_name="user.update",
description="Update a user",
permission="update",
)
def update(user, request):
"""
Update a user.
This API endpoint allows authorised clients (those able to provide a valid
Client ID and Client Secret) to update users in their authority.
"""
schema = UpdateUserAPISchema()
appstruct = schema.validate(_json_payload(request))
user_update_service = request.find_service(name="user_update")
user = user_update_service.update(user, **appstruct)
presenter = TrustedUserJSONPresenter(user)
return presenter.asdict()
def _json_payload(request):
try:
return request.json_body
except ValueError as err:
raise PayloadError() from err
| 31.567568
| 81
| 0.710616
| 0
| 0
| 0
| 0
| 2,961
| 0.845034
| 0
| 0
| 1,420
| 0.405251
|
3f2894b54d3e8597c52938f696795d8309755127
| 239
|
py
|
Python
|
controllers/social_auth/kivyauth/__init__.py
|
richierh/SalesKivyMD
|
f445adc701946ff38865b4a1a00a03529142613e
|
[
"MIT"
] | 126
|
2020-06-12T15:02:19.000Z
|
2022-03-31T10:13:29.000Z
|
controllers/social_auth/kivyauth/__init__.py
|
richierh/SalesKivyMD
|
f445adc701946ff38865b4a1a00a03529142613e
|
[
"MIT"
] | 13
|
2020-07-01T01:03:26.000Z
|
2022-02-21T02:21:24.000Z
|
controllers/social_auth/kivyauth/__init__.py
|
richierh/SalesKivyMD
|
f445adc701946ff38865b4a1a00a03529142613e
|
[
"MIT"
] | 22
|
2020-06-12T22:24:27.000Z
|
2022-03-10T13:24:33.000Z
|
from kivy.logger import Logger
from kivy.utils import platform
__version__ = "2.3.2"
_log_message = "KivyAuth:" + f" {__version__}" + f' (installed at "{__file__}")'
__all__ = ("login_providers", "auto_login")
Logger.info(_log_message)
| 23.9
| 80
| 0.723849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 95
| 0.39749
|
3f28d1e2f76100adc00945a0759d254a0a1638b4
| 20
|
py
|
Python
|
RDS/circle3_central_services/research_manager/src/api/User/__init__.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 10
|
2020-06-24T08:22:24.000Z
|
2022-01-13T16:17:36.000Z
|
RDS/circle3_central_services/research_manager/src/api/User/__init__.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 78
|
2020-01-23T14:32:06.000Z
|
2022-03-07T14:11:16.000Z
|
RDS/circle3_central_services/research_manager/src/api/User/__init__.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 1
|
2020-06-24T08:33:48.000Z
|
2020-06-24T08:33:48.000Z
|
from .user import *
| 20
| 20
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|