hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c678e771e656c40554793300ea72d58a952d58cd
| 301
|
py
|
Python
|
slack_invite/slack_invite/forms.py
|
sanchagrins/umuc-cs-slack
|
648e709905b153ad17a3df8bd826a784edd5c11b
|
[
"MIT"
] | null | null | null |
slack_invite/slack_invite/forms.py
|
sanchagrins/umuc-cs-slack
|
648e709905b153ad17a3df8bd826a784edd5c11b
|
[
"MIT"
] | 13
|
2017-08-18T01:19:49.000Z
|
2017-11-16T02:24:07.000Z
|
slack_invite/slack_invite/forms.py
|
umuc-cs/umuc-cs-slack
|
648e709905b153ad17a3df8bd826a784edd5c11b
|
[
"MIT"
] | null | null | null |
from django import forms
class InviteForm(forms.Form):
email_addr = forms.EmailField(widget=forms.EmailInput(attrs={'class':'form-control','placeholder':'Email address','id':'email'}), label='')
check_terms = forms.BooleanField(label="I agree to the <a link''>Terms and Code of Conduct</a>")
| 50.166667
| 143
| 0.724252
| 274
| 0.910299
| 0
| 0
| 0
| 0
| 0
| 0
| 118
| 0.392027
|
c67973e1c48ecff18bf6a4fc82b259940ef31d3c
| 4,561
|
py
|
Python
|
tools/fastq_pair_names/fastq_pair_names.py
|
Neato-Nick/pico_galaxy
|
79666612a9ca2d335622bc282a4768bb43d91419
|
[
"MIT"
] | 18
|
2015-06-09T13:57:09.000Z
|
2022-01-14T21:05:54.000Z
|
tools/fastq_pair_names/fastq_pair_names.py
|
Neato-Nick/pico_galaxy
|
79666612a9ca2d335622bc282a4768bb43d91419
|
[
"MIT"
] | 34
|
2015-04-02T19:26:08.000Z
|
2021-06-17T18:59:24.000Z
|
tools/fastq_pair_names/fastq_pair_names.py
|
Neato-Nick/pico_galaxy
|
79666612a9ca2d335622bc282a4768bb43d91419
|
[
"MIT"
] | 24
|
2015-02-25T13:40:19.000Z
|
2021-09-08T20:40:40.000Z
|
#!/usr/bin/env python
"""Extract paired read names from FASTQ file(s).
The input file should be a valid FASTQ file(s), the output is two tabular
files - the paired read names (without suffixes), and unpaired read names
(including any unrecognised pair names).
Note that the FASTQ variant is unimportant (Sanger, Solexa, Illumina, or even
Color Space should all work equally well).
This script is copyright 2014-2017 by Peter Cock, The James Hutton Institute
(formerly SCRI), Scotland, UK. All rights reserved.
See accompanying text file for licence details (MIT license).
"""
from __future__ import print_function
import os
import re
import sys
if "-v" in sys.argv or "--version" in sys.argv:
print("0.0.5")
sys.exit(0)
from galaxy_utils.sequence.fastq import fastqReader
msg = """Expects at least 3 arguments:
- Pair names tabular output filename
- Non-pair names tabular output filename
- Input FASTQ input filename(s)
"""
if len(sys.argv) < 3:
sys.exit(msg)
output_pairs = sys.argv[1]
output_nonpairs = sys.argv[2]
input_fastq_filenames = sys.argv[3:]
# Cope with three widely used suffix naming convensions,
# Illumina: /1 or /2
# Forward/revered: .f or .r
# Sanger, e.g. .p1k and .q1k
# See http://staden.sourceforge.net/manual/pregap4_unix_50.html
re_f = re.compile(r"(/1|\.f|\.[sfp]\d\w*)$")
re_r = re.compile(r"(/2|\.r|\.[rq]\d\w*)$")
# assert re_f.match("demo/1")
assert re_f.search("demo.f")
assert re_f.search("demo.s1")
assert re_f.search("demo.f1k")
assert re_f.search("demo.p1")
assert re_f.search("demo.p1k")
assert re_f.search("demo.p1lk")
assert re_r.search("demo/2")
assert re_r.search("demo.r")
assert re_r.search("demo.q1")
assert re_r.search("demo.q1lk")
assert not re_r.search("demo/1")
assert not re_r.search("demo.f")
assert not re_r.search("demo.p")
assert not re_f.search("demo/2")
assert not re_f.search("demo.r")
assert not re_f.search("demo.q")
re_illumina_f = re.compile(r"^@[a-zA-Z0-9_:-]+ 1:.*$")
re_illumina_r = re.compile(r"^@[a-zA-Z0-9_:-]+ 2:.*$")
assert re_illumina_f.match("@HWI-ST916:79:D04M5ACXX:1:1101:10000:100326 1:N:0:TGNCCA")
assert re_illumina_r.match("@HWI-ST916:79:D04M5ACXX:1:1101:10000:100326 2:N:0:TGNCCA")
assert not re_illumina_f.match(
"@HWI-ST916:79:D04M5ACXX:1:1101:10000:100326 2:N:0:TGNCCA"
)
assert not re_illumina_r.match(
"@HWI-ST916:79:D04M5ACXX:1:1101:10000:100326 1:N:0:TGNCCA"
)
count = 0
pairs = set() # Will this scale OK?
forward = 0
reverse = 0
neither = 0
out_pairs = open(output_pairs, "w")
out_nonpairs = open(output_nonpairs, "w")
for input_fastq in input_fastq_filenames:
if not os.path.isfile(input_fastq):
sys.exit("Missing input FASTQ file %r" % input_fastq)
in_handle = open(input_fastq)
# Don't care about the FASTQ type really...
for record in fastqReader(in_handle, "sanger"):
count += 1
name = record.identifier.split(None, 1)[0]
assert name[0] == "@", record.identifier # Quirk of the Galaxy parser
name = name[1:]
is_forward = False
suffix = re_f.search(name)
if suffix:
# ============
# Forward read
# ============
template = name[: suffix.start()]
is_forward = True
elif re_illumina_f.match(record.identifier):
template = name # No suffix
is_forward = True
if is_forward:
forward += 1
if template not in pairs:
pairs.add(template)
out_pairs.write(template + "\n")
else:
is_reverse = False
suffix = re_r.search(name)
if suffix:
# ============
# Reverse read
# ============
template = name[: suffix.start()]
is_reverse = True
elif re_illumina_r.match(record.identifier):
template = name # No suffix
is_reverse = True
if is_reverse:
reverse += 1
if template not in pairs:
pairs.add(template)
out_pairs.write(template + "\n")
else:
# ===========================
# Neither forward nor reverse
# ===========================
out_nonpairs.write(name + "\n")
neither += 1
in_handle.close()
out_pairs.close()
out_nonpairs.close()
print(
"%i reads (%i forward, %i reverse, %i neither), %i pairs"
% (count, forward, reverse, neither, len(pairs))
)
| 31.027211
| 86
| 0.611489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,847
| 0.404955
|
c6798f3695e83af119f05e4fdd4f14111d00889d
| 2,903
|
py
|
Python
|
code/05_speech_to_text/main_05_b_wake_word.py
|
padmalcom/AISpeechAssistant
|
b7501a23a8f513acb5043f3c7bb06df129bdc2cc
|
[
"Apache-2.0"
] | 1
|
2021-09-08T09:21:16.000Z
|
2021-09-08T09:21:16.000Z
|
code/05_speech_to_text/main_05_b_wake_word.py
|
padmalcom/AISpeechAssistant
|
b7501a23a8f513acb5043f3c7bb06df129bdc2cc
|
[
"Apache-2.0"
] | null | null | null |
code/05_speech_to_text/main_05_b_wake_word.py
|
padmalcom/AISpeechAssistant
|
b7501a23a8f513acb5043f3c7bb06df129bdc2cc
|
[
"Apache-2.0"
] | 2
|
2022-02-06T09:54:40.000Z
|
2022-03-01T07:52:51.000Z
|
from loguru import logger
import yaml
import time
import pyaudio
import struct
import os
import sys
from vosk import Model, SpkModel, KaldiRecognizer
import json
import text2numde
from TTS import Voice
import multiprocessing
CONFIG_FILE = "config.yml"
SAMPLE_RATE = 16000
FRAME_LENGTH = 512
class VoiceAssistant():
def __init__(self):
logger.info("Initialisiere VoiceAssistant...")
logger.debug("Lese Konfiguration...")
global CONFIG_FILE
with open(CONFIG_FILE, "r", encoding='utf8') as ymlfile:
self.cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
if self.cfg:
logger.debug("Konfiguration gelesen.")
else:
logger.debug("Konfiguration konnte nicht gelesen werden.")
sys.exit(1)
language = self.cfg['assistant']['language']
if not language:
language = "de"
logger.info("Verwende Sprache {}", language)
logger.debug("Initialisiere Audioeingabe...")
self.pa = pyaudio.PyAudio()
self.audio_stream = self.pa.open(
rate=SAMPLE_RATE,
channels=1,
format=pyaudio.paInt16,
input=True,
frames_per_buffer=FRAME_LENGTH,
input_device_index=0)
logger.debug("Audiostream geöffnet.")
logger.info("Initialisiere Sprachausgabe...")
self.tts = Voice()
voices = self.tts.get_voice_keys_by_language(language)
if len(voices) > 0:
logger.info('Stimme {} gesetzt.', voices[0])
self.tts.set_voice(voices[0])
else:
logger.warning("Es wurden keine Stimmen gefunden.")
self.tts.say("Initialisierung abgeschlossen")
logger.debug("Sprachausgabe initialisiert")
# Initialisiere Spracherkennung
logger.info("Initialisiere Spracherkennung...")
stt_model = Model('./vosk-model-de-0.6')
speaker_model = SpkModel('./vosk-model-spk-0.4')
self.rec = KaldiRecognizer(stt_model, speaker_model, 16000)
logger.info("Initialisierung der Spracherkennung abgeschlossen.")
def run(self):
logger.info("VoiceAssistant Instanz wurde gestartet.")
try:
while True:
pcm = self.audio_stream.read(FRAME_LENGTH)
if self.rec.AcceptWaveform(pcm):
recResult = json.loads(self.rec.Result())
# Hole das Resultat aus dem JSON Objekt
sentence = recResult['text']
logger.debug('Ich habe verstanden "{}"', sentence)
if sentence.lower().startswith("kevin"):
sentence = sentence [5:] # Schneide Kevin am Anfang des Satzes weg
sentence = sentence.strip() # Entferne Leerzeichen am Anfang und Ende des Satzes
logger.info("Prozessiere Befehl {}.", sentence)
except KeyboardInterrupt:
logger.debug("Per Keyboard beendet")
finally:
logger.debug('Beginne Aufräumarbeiten...')
if self.audio_stream is not None:
self.audio_stream.close()
if self.pa is not None:
self.pa.terminate()
if __name__ == '__main__':
multiprocessing.set_start_method('spawn')
va = VoiceAssistant()
logger.info("Anwendung wurde gestartet")
va.run()
| 27.130841
| 86
| 0.709955
| 2,457
| 0.845783
| 0
| 0
| 0
| 0
| 0
| 0
| 884
| 0.304303
|
c67b22952d56027ba353d1b1a79d790cab33d849
| 4,781
|
py
|
Python
|
PlotlyandPython/Lessons/(01) Intro to Python/Notebooks/Python Scripts/(09) Dictionaries (2).py
|
peternewman22/Python_Courses
|
07a798b6f264fc6069eb1205c9d429f00fb54bc5
|
[
"MIT"
] | null | null | null |
PlotlyandPython/Lessons/(01) Intro to Python/Notebooks/Python Scripts/(09) Dictionaries (2).py
|
peternewman22/Python_Courses
|
07a798b6f264fc6069eb1205c9d429f00fb54bc5
|
[
"MIT"
] | null | null | null |
PlotlyandPython/Lessons/(01) Intro to Python/Notebooks/Python Scripts/(09) Dictionaries (2).py
|
peternewman22/Python_Courses
|
07a798b6f264fc6069eb1205c9d429f00fb54bc5
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# # Dictionaries (2)
# In the last lesson we saw how to create dictionaries and how to access the different items in a dictionary by their key. We also saw how to add to and update the items in a dictionary using assignment, or the <code>dict.update()</code> method, and how to delete items using the <code>del</code> keyword.
#
# In this lesson we're going to continue looking at dictionaries. We're going to find out how to get a dictionary's length, access the keys and values from the dictionary, as well as how to access items inside nested dictionaries.
#
# This knowledge will come in really handy when we start building charts with Plotly, as we'll be passing the instructions for our chart to Plotly using nested dictionaries.
#
# ### Getting a dictionary's length
#
# You can get the number of items in a dictionary by using the <code>len()</code> function in the same way that we could get the length of a string or a list. This counts the number of key/value pairs in the dictionary:
# In[1]:
testScores1 = {'Alice' : 100, 'Bob' : 75, 'Ian' : 25, 'Susan' : 60}
len(testScores1)
# ### Getting a list of keys from a dictionary
#
# We can get a list of keys in a dictionary by using the <code>dict.keys()</code> method:
# In[2]:
print(testScores1.keys())
# This returns an object that looks a bit like a list, but doesn't behave like one. You can't slice it for example:
# In[3]:
var1 = testScores1.keys()
var1[0]
# You can however turn it into a list by using the <code>list()</code> function, which behaves like the <code>str()</code> or <code>int()</code> functions we learnt about previously. Converting the <code>dict_keys</code> object into a list lets us index and slice it:
# In[4]:
var2 = list(testScores1.keys())
var2[0]
# ### Getting a list of values in a dictionary
#
# In the same way that we got the dictionary's keys using <code>dict.keys()</code>, we can also get the dictionary's values using the <code>dict.values()</code> function:
# In[5]:
print(testScores1.values())
# Once again, we can't index or slice this object without turning it into a list:
# In[6]:
var3 = testScores1.values()
var3[0]
# In[7]:
var4 = list(testScores1.values())
var4[0]
# ### Getting the keys and values from a dictionary
#
# As well as getting the keys and values from a dictionary separately, we can also get them together using the <code>dict.items()</code> method. This is especially useful for looping through the items in a dictionary, and we'll look at this in more depth in the loops lesson.
#
# <code>dict.items()</code> returns a collection of tuples. A tuple is very similar to a list, in that you can index the items in it, however you cannot change their values.
# In[8]:
print(testScores1.items())
# Once again, we need to convert this <code>dict_items</code> object into a list to be able to index and slice it:
# In[9]:
var5 = list(testScores1.items())
var5[0]
# We can then select the items individually inside the tuple as if we were selecting items in a nested list:
# In[10]:
var5[0][0]
# ### Nested Dictionaries
#
# Just as we can create nested lists, we can also create nested dictionaries. Here I've created a dictionary which holds the test scores for the students. I've initially populated it with the scores for the first test:
# In[11]:
studentGrades = {'test1' : testScores1}
print(studentGrades)
# Now I can add the scores for the second test:
# In[12]:
testScores2 = {'Ian': 32, 'Susan': 71, 'Bob': 63, 'Alice': 99}
studentGrades.update({'test2' : testScores2})
print(studentGrades)
# We can access the scores for the first test:
# In[13]:
print(studentGrades['test1'])
# We can access the scores for a particular student for the first test in the same way. First of all, we access the <code>'test1'</code> dictionary, then within that we access the value we want by passing the corresponding key. Here I'm getting Ian's score for test 1:
# In[14]:
print(studentGrades['test1']['Ian'])
# ### What have we learnt this lesson?
# In this lesson we've seen how to get the length of a dictionary - the number of items in it. We've also seen how to get a list of the keys and values in the dictionary by using the <code>dict.keys()</code> and <code>dict.values()</code> functions in conjunction with the <code>list()</code> function, as well as how to get a list of tuples of the keys and values using <code>dict.items()</code>. We also learnt that a tuple is very similar to a list, but that you cannot change the items inside.
#
# Finally, we learnt how to create nested dictionaries and access the items inside them.
# If you have any questions, please ask in the comments section or email <a href="mailto:me@richard-muir.com">me@richard-muir.com</a>
| 32.972414
| 497
| 0.72328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,113
| 0.86028
|
c67c053ab03a0bdd40b9436db19002f5b98e01a7
| 7,851
|
py
|
Python
|
ros/src/waypoint_updater/waypoint_updater.py
|
rezarajan/sdc-capstone
|
a1612fc8acd544e892d8d98142ffc161edf9989a
|
[
"MIT"
] | null | null | null |
ros/src/waypoint_updater/waypoint_updater.py
|
rezarajan/sdc-capstone
|
a1612fc8acd544e892d8d98142ffc161edf9989a
|
[
"MIT"
] | null | null | null |
ros/src/waypoint_updater/waypoint_updater.py
|
rezarajan/sdc-capstone
|
a1612fc8acd544e892d8d98142ffc161edf9989a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
import numpy as np
from scipy.spatial import KDTree
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 100 # Number of waypoints we will publish. You can change this number
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
# rospy.Subscriber('/vehicle/obstacle', PoseStamped, self.obstacle_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.pose = None # To store vehice pose
self.base_waypoints = None # To store base waypoints
self.waypoints_2d = None # 2D array of waypoint coordinates
self.waypoints_tree = None # KD tree of waypoints for faster coordinate lookup
self.stop_line_wp = None # Waypoint index of the closest stop line
self.decel_limit = rospy.get_param('/dbw_node/decel_limit', -5)
self.max_velocity = rospy.get_param('/waypoint_loader/velocity', 40)*0.277778 #km/h -> m/s
self.max_braking_dist = -(self.max_velocity**2)/(2*self.decel_limit)
self.loop()
def loop(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if self.pose is not None and self.base_waypoints is not None:
# Publish the next waypoints ahead of the vehicle
closest_idx = self.get_next_waypoint_idx()
self.publish_waypoints(closest_idx)
rate.sleep()
def get_next_waypoint_idx(self):
"""Finds the next immediate waypoint to the vehicle.
Returns:
closest_idx: the next immediate waypoint to the vehicle
"""
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoints_tree.query([x,y], 1)[1]
# Find the next waypoint using some linear algebra
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx-1]
# Vectorizing components
closest_vec = np.array(closest_coord)
prev_vec = np.array(prev_coord)
pos_vec = np.array([x,y])
val = np.dot(closest_vec-prev_vec, pos_vec-closest_vec)
if(val > 0):
# Vehicle is ahead of the closest_idx, select next waypoint
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self, closest_idx):
"""Wrapper function which publishes the next waypoints given the index for the waypoint
which is immediately ahead of the vehicle.
Arguments:
closest_idx {integer}: index of waypoint immediately ahead of the vehicle
"""
if(self.base_waypoints is not None):
hedr = self.base_waypoints.header
farthest_idx = closest_idx + LOOKAHEAD_WPS
wpts = self.base_waypoints.waypoints[closest_idx:farthest_idx]
check_idx = farthest_idx
if(farthest_idx >= len(self.base_waypoints.waypoints)):
# Create cyclical list
check_idx = farthest_idx%len(self.base_waypoints.waypoints)
wpts += self.base_waypoints.waypoints[:check_idx]
# Check that light is red and stop line waypoint is within the lookahead range
if(self.stop_line_wp is not None and self.stop_line_wp != -1 \
and check_idx-LOOKAHEAD_WPS <= self.stop_line_wp <= check_idx):
# Decelerate on red light
wpts = self.decelerate(wpts, closest_idx)
final_waypoints = Lane()
final_waypoints.header = hedr
final_waypoints.waypoints = wpts
self.final_waypoints_pub.publish(final_waypoints)
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
if self.base_waypoints is None:
self.base_waypoints = waypoints
# Modify all base waypoint velocities so the vehicle moves in a loop (does not stop at last waypoint)
# see rubric requirements "Successful Navigation"
for i in range(len(self.base_waypoints.waypoints)):
self.base_waypoints.waypoints[i] = self.set_waypoint_velocity(self.base_waypoints.waypoints[i], self.max_velocity)
if self.waypoints_2d is None:
# Create a KD Tree for faster coordinate lookup
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoints_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
# If -1, then proceed, else stop (this node is updated from tl_detector)
self.stop_line_wp = msg.data # int from Int32 message type
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoint, velocity):
waypoint.twist.twist.linear.x = velocity
return waypoint
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
def decelerate(self, waypoints, closest_idx):
"""
Arguments:
waypoints: list of relevant waypoints ahead of vehicle
closest_index: waypoint index closest to vehicle
Returns:
Modified list of waypoints with stopping constraints
"""
new_waypoints = []
stop_idx = max(self.stop_line_wp - closest_idx - 3, 0) # -3 to ensure the front of the vehicle stops at the line
for i, wp in enumerate(waypoints):
p = Waypoint() # Create a new instance of the waypoint
p.pose = wp.pose # equivalent to the initial waypoint
dist = self.distance(waypoints, i, stop_idx)
vel = math.sqrt(-2*self.decel_limit*dist) #v^2 = u^2 + 2as, where v = 0 (stopped)
vel = max(min(vel, self.get_waypoint_velocity(wp)), 0) # constrain wp velocity
if(i == stop_idx):
vel = 0 # ensure last waypoint is at zero velocity
p = self.set_waypoint_velocity(p, vel) # assign waypoint velocity
new_waypoints.append(p)
return new_waypoints
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| 41.760638
| 136
| 0.659279
| 6,749
| 0.859636
| 0
| 0
| 0
| 0
| 0
| 0
| 2,930
| 0.373201
|
c67c49682485de62598f7200a4f39c46aba3d865
| 1,353
|
py
|
Python
|
apps/hosts/migrations/0001_initial.py
|
yhgnice/toolsvb
|
35f9d27ee2439d134cab160a7cf930ea13a31d26
|
[
"Apache-2.0"
] | null | null | null |
apps/hosts/migrations/0001_initial.py
|
yhgnice/toolsvb
|
35f9d27ee2439d134cab160a7cf930ea13a31d26
|
[
"Apache-2.0"
] | null | null | null |
apps/hosts/migrations/0001_initial.py
|
yhgnice/toolsvb
|
35f9d27ee2439d134cab160a7cf930ea13a31d26
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-03-09 10:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='HostList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('f_host_id', models.IntegerField(default=0, verbose_name='ServerID')),
('f_host_domain', models.CharField(max_length=50, verbose_name='\u57df\u540d')),
('f_host_lan', models.CharField(max_length=50, verbose_name='IP\u5730\u5740')),
('f_host_wan', models.CharField(max_length=50, verbose_name='wan_IP\u5730\u5740')),
('f_host_port', models.IntegerField(default='22', verbose_name='ssh')),
('f_host_pass', models.CharField(default='', max_length=20, verbose_name='\u5bc6\u7801')),
('host_context', models.TextField(default='', max_length=100, verbose_name='\u63cf\u8ff0\u4fe1\u606f')),
],
options={
'verbose_name': '\u4e3b\u673a\u4fe1\u606f',
'verbose_name_plural': '\u4e3b\u673a\u4fe1\u606f',
},
),
]
| 39.794118
| 120
| 0.602365
| 1,196
| 0.883962
| 0
| 0
| 0
| 0
| 0
| 0
| 378
| 0.279379
|
c67cc3624a702cafd7e7246abe8b88132e111d61
| 53
|
py
|
Python
|
modules/__init__.py
|
richardHaw/nagare
|
4909c4ba8833e7cf5152e39a7bc58a558aaa2c7c
|
[
"MIT"
] | null | null | null |
modules/__init__.py
|
richardHaw/nagare
|
4909c4ba8833e7cf5152e39a7bc58a558aaa2c7c
|
[
"MIT"
] | null | null | null |
modules/__init__.py
|
richardHaw/nagare
|
4909c4ba8833e7cf5152e39a7bc58a558aaa2c7c
|
[
"MIT"
] | null | null | null |
# this file is needed for python2, delete for python3
| 53
| 53
| 0.792453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 1
|
c67cf2280718f5ed4b61e267d9cdf0637f3ec6f1
| 3,603
|
py
|
Python
|
src/originexample/pipelines/handle_ggo_received.py
|
project-origin/example-backend
|
13d9b528533dcaada8b0f0b93bbe2ef6a25c38ae
|
[
"MIT"
] | null | null | null |
src/originexample/pipelines/handle_ggo_received.py
|
project-origin/example-backend
|
13d9b528533dcaada8b0f0b93bbe2ef6a25c38ae
|
[
"MIT"
] | 1
|
2021-02-10T02:22:51.000Z
|
2021-02-10T02:22:51.000Z
|
src/originexample/pipelines/handle_ggo_received.py
|
project-origin/example-backend
|
13d9b528533dcaada8b0f0b93bbe2ef6a25c38ae
|
[
"MIT"
] | null | null | null |
"""
TODO write this
"""
import marshmallow_dataclass as md
from sqlalchemy import orm
from originexample import logger
from originexample.db import inject_session
from originexample.tasks import celery_app, lock
from originexample.auth import User, UserQuery
from originexample.consuming import (
GgoConsumerController,
ggo_is_available,
)
from originexample.services.account import (
Ggo,
AccountService,
AccountServiceError,
)
# Settings
RETRY_DELAY = 10
MAX_RETRIES = (24 * 60 * 60) / RETRY_DELAY
LOCK_TIMEOUT = 60 * 2
# Services / controllers
controller = GgoConsumerController()
account_service = AccountService()
# JSON schemas
ggo_schema = md.class_schema(Ggo)()
def start_handle_ggo_received_pipeline(ggo, user):
"""
:param Ggo ggo:
:param User user:
"""
handle_ggo_received \
.s(
subject=user.sub,
ggo_json=ggo_schema.dump(ggo),
address=ggo.address,
) \
.apply_async()
@celery_app.task(
bind=True,
name='handle_ggo_received.handle_ggo_received',
default_retry_delay=RETRY_DELAY,
max_retries=MAX_RETRIES,
)
@logger.wrap_task(
title='Handling GGO received',
pipeline='handle_ggo_received',
task='handle_ggo_received',
)
@inject_session
def handle_ggo_received(task, subject, address, ggo_json, session):
"""
:param celery.Task task:
:param str subject:
:param str address:
:param JSON ggo_json:
:param Session session:
"""
__log_extra = {
'subject': subject,
'address': address,
'ggo': str(ggo_json),
'pipeline': 'handle_ggo_received',
'task': 'handle_ggo_received',
}
ggo = ggo_schema.load(ggo_json)
# Get User from database
try:
user = UserQuery(session) \
.is_active() \
.has_sub(subject) \
.one()
except orm.exc.NoResultFound:
raise
except Exception as e:
logger.exception('Failed to load User from database, retrying...', extra=__log_extra)
raise task.retry(exc=e)
# Affected subjects TODO
# affected_subjects = controller.get_affected_subjects(user, ggo, session)
# lock_keys = [get_lock_key(sub, ggo.begin) for sub in affected_subjects]
lock_key = ggo.begin.strftime('%Y-%m-%d-%H-%M')
# This lock is in place to avoid timing issues when executing multiple
# tasks for the same account at the same time, which can cause
# the transferred or retired amount to exceed the allowed amount
with lock(lock_key, timeout=LOCK_TIMEOUT) as acquired:
if not acquired:
logger.info('Could not acquire lock(s), retrying...', extra=__log_extra)
raise task.retry()
try:
if not ggo_is_available(user.access_token, ggo):
logger.info('GGO is unavailable, skipping...', extra=__log_extra)
return
# Consume GGO
controller.consume_ggo(user, ggo, session)
except AccountServiceError as e:
if e.status_code == 400:
raise
else:
logger.exception('Failed to consume GGO, retrying...', extra=__log_extra)
raise task.retry(exc=e)
except Exception as e:
logger.exception('Failed to consume GGO, retrying...', extra=__log_extra)
raise task.retry(exc=e)
# def get_lock_key(subject, begin):
# """
# :param str subject:
# :param datetime.datetime begin:
# :rtype: str
# """
# return '%s-%s' % (subject, begin.strftime('%Y-%m-%d-%H-%M'))
| 27.503817
| 93
| 0.64141
| 0
| 0
| 0
| 0
| 2,408
| 0.668332
| 0
| 0
| 1,264
| 0.350819
|
c680e030b9ebdb230ac43be3ba2a8e742227cf78
| 113
|
py
|
Python
|
tests/test_spliceai_wrapper.py
|
RipollJ/spliceai-wrapper
|
29adcc7c93fa45bec953e9df078151653f419a73
|
[
"MIT"
] | 2
|
2020-05-27T12:55:22.000Z
|
2020-05-30T12:59:49.000Z
|
tests/test_spliceai_wrapper.py
|
RipollJ/spliceai-wrapper
|
29adcc7c93fa45bec953e9df078151653f419a73
|
[
"MIT"
] | 3
|
2019-10-08T03:38:55.000Z
|
2021-11-15T17:51:09.000Z
|
tests/test_spliceai_wrapper.py
|
RipollJ/spliceai-wrapper
|
29adcc7c93fa45bec953e9df078151653f419a73
|
[
"MIT"
] | 2
|
2020-05-19T12:48:57.000Z
|
2020-05-20T05:44:21.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `spliceai_wrapper` package."""
import pytest # noqa
| 18.833333
| 43
| 0.637168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 93
| 0.823009
|
c68186a925cc94d28ae976073b01ff32e11600e6
| 313
|
py
|
Python
|
src/pythonboilerplate/app.py
|
favgeris/pythonBoilerplate
|
c367758ae01137f3b499ca0d4f8ebb414ae2c4d2
|
[
"MIT"
] | null | null | null |
src/pythonboilerplate/app.py
|
favgeris/pythonBoilerplate
|
c367758ae01137f3b499ca0d4f8ebb414ae2c4d2
|
[
"MIT"
] | null | null | null |
src/pythonboilerplate/app.py
|
favgeris/pythonBoilerplate
|
c367758ae01137f3b499ca0d4f8ebb414ae2c4d2
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import os
import pathlib
from typing import Union
logger = logging.getLogger(__name__)
def run(somearg) -> int:
"""Run app"""
try:
print(f'Some exciting argument: {somearg}')
except RuntimeError as ex:
logger.error(ex)
return 1
return 0
| 15.65
| 51
| 0.658147
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.15655
|
c681980ffe6cfcb7e6776781d202be19d967c86b
| 4,656
|
py
|
Python
|
hard-gists/81635e6cbc933b7e8862/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/81635e6cbc933b7e8862/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/81635e6cbc933b7e8862/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
import os
import sys
import time
import atomac
import subprocess
if len(sys.argv) < 2:
print "Usage: bouncer.py <path_to_logic_project> (<path_to_logic_project>)"
os.exit(1)
bundleId = 'com.apple.logic10'
for project in sys.argv[1:]:
projectName = project.split('/')[-1].replace('.logicx', '')
filename = projectName + ".wav"
print "Opening %s..." % project
# Open a project file
subprocess.call(['open', project])
print "Activating Logic Pro X..."
logic = atomac.getAppRefByBundleId(bundleId)
logic.activate()
print "Waiting for project '%s' to open..." % projectName
while len(filter(lambda x: projectName in x.AXTitle, logic.windows())) == 0:
time.sleep(0.1)
# Wait for the window to load
time.sleep(1)
print "Triggering bounce operation..."
logic.activate()
logic.sendGlobalKeyWithModifiers('b', [atomac.AXKeyCodeConstants.COMMAND])
print "Waiting for bounce window..."
bounce_window = None
while not bounce_window:
bounce_window = filter(lambda x: ('Output 1-2' in x.AXTitle) or
('Bounce' in x.AXTitle),
logic.windows())
time.sleep(0.1)
bounce_window = bounce_window[0]
print "Selecting output formats..."
qualityScrollArea = bounce_window.findFirst(AXRole='AXScrollArea')
qualityTable = qualityScrollArea.findFirst(AXRole='AXTable')
for row in qualityTable.findAll(AXRole='AXRow'):
rowName = row.findFirst(AXRole='AXTextField').AXValue
checkbox = row.findFirst(AXRole='AXCheckBox')
if rowName == 'PCM':
if checkbox.AXValue is 0:
print "Selected %s output format." % rowName
checkbox.Press()
else:
print "%s output format selected." % rowName
elif checkbox.AXValue is 1:
print "Deselected %s output format." % rowName
checkbox.Press()
print "Pressing Bounce button..."
bounce_button = bounce_window.findFirst(AXRole="AXButton",
AXTitle="Bounce")
if not bounce_button:
bounce_button = bounce_window.findFirst(
AXRole="AXButton",
AXTitle="OK"
)
bounce_button.Press()
bounce_window = None
# bounce_window is now gone and we have a modal dialog about saving
print "Waiting for save window..."
save_window = None
while not save_window:
save_window = filter(lambda x: ('Output 1-2' in x.AXTitle) or
('Bounce' in x.AXTitle),
logic.windows())
time.sleep(0.1)
save_window = save_window[0]
print "Entering filename..."
filenameBox = save_window.findFirst(AXRole="AXTextField")
filenameBox.AXValue = filename
print "Pressing 'Bounce' on the save window..."
bounce_button = save_window.findFirst(AXRole="AXButton", AXTitle="Bounce")
bounce_button.Press()
# Check to see if we got a "this file already exists" dialog
if len(save_window.sheets()) > 0:
print "Allowing overwriting of existing file..."
overwrite_sheet = save_window.sheets()[0]
overwrite_sheet.findFirst(AXRole="AXButton",
AXTitle=u"Replace").Press()
print "Bouncing '%s'..." % projectName
# All UI calls will block now, because Logic blocks the UI thread while bouncing
while len(logic.windows()) > 1:
time.sleep(0.1)
print "Waiting for Logic to regain its senses..."
time.sleep(2)
# Done - should be saved now.
# Close the window with command-option-W
logic.activate()
time.sleep(1)
print "Closing project '%s'..." % projectName
logic.sendGlobalKeyWithModifiers('w', [
atomac.AXKeyCodeConstants.COMMAND, atomac.AXKeyCodeConstants.OPTION
])
print "Waiting for the 'do you want to save changes' window..."
save_window = None
attempts = 0
while not save_window and attempts < 20:
save_window = filter(lambda x: '' == x.AXTitle, logic.windows())
time.sleep(0.1)
attempts += 1
if save_window:
print "Saying 'No, I don't want to save changes'..."
save_window = save_window[0]
logic.activate()
# Click the "Don't Save" button
filter(lambda x: 'Don' in x.AXTitle, save_window.findAll(AXRole="AXButton"))[0].Press()
print "Waiting for all Logic windows to close..."
while len(logic.windows()) > 0:
time.sleep(0.5)
print "Terminating Logic."
atomac.terminateAppByBundleId(bundleId)
| 30.631579
| 95
| 0.618986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,346
| 0.289089
|
c6832c12d1f11f0fd4b7b74f990fd950eb68d5c6
| 2,506
|
py
|
Python
|
functions/formatString.py
|
Steve-Xyh/AutoAoxiang
|
a8f1abed0f17b967456b1fa539c0aae79dac1d01
|
[
"WTFPL"
] | 7
|
2020-02-17T08:12:14.000Z
|
2021-12-29T09:41:35.000Z
|
functions/formatString.py
|
Steve-Xyh/AutoAoxiang
|
a8f1abed0f17b967456b1fa539c0aae79dac1d01
|
[
"WTFPL"
] | null | null | null |
functions/formatString.py
|
Steve-Xyh/AutoAoxiang
|
a8f1abed0f17b967456b1fa539c0aae79dac1d01
|
[
"WTFPL"
] | 1
|
2020-07-24T07:16:14.000Z
|
2020-07-24T07:16:14.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import colorama
colorama.init(autoreset=True)
logData = {
'所在位置': 'Location',
'是否经停湖北': '否',
'接触湖北籍人员': '否',
'接触确诊疑似': '否',
'今日体温': '37.2度以下',
'有无疑似或异常': '无',
'是否隔离': '否',
}
def log_line(dic: dict, color=True):
'''
中文单行log
#### Parameters::
dic: log dict(e.g. {name: value})
'''
time_info = setColor(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
color='greenFore') if color else datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
res = '[' + time_info + '] '
for key in dic:
flg = dic[key] is not None
res += str(key).ljust(12, chr(12288))
val_info = setColor(dic[key], color='yellowFore') if color else dic[key]
res += val_info if flg else ''.ljust(20, chr(12288)) + '\n'
return res
def log_cn(dic: dict):
"""
中文多行log
:param dic: log dict(e.g. {name: value})
"""
formLen = 40
res = '-' * formLen + '\n'
res += '[' + setColor(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S'), color='greenFore') + ']\n'
for key in dic:
flg = dic[key] is not None
res += str(key).ljust(12, chr(12288))
res += (setColor(dic[key], color='yellowFore')
if flg else '').ljust(20, chr(12288)) + '\n'
res += '-' * formLen
return res
def log_en(dic):
"""
英文log
:param dic: log dict(e.g. {name: value})
"""
formLen = 40
res = '-' * formLen + '\n'
res += '[' + setColor(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S'), color='greenFore') + ']\n'
for key in dic:
flg = dic[key] is not None
res += str(key).ljust(20)
res += (setColor(dic[key], color='yellowFore')
if flg else '').ljust(20) + '\n'
res += '-' * formLen
return res
def setColor(string, color):
'''设置颜色'''
convertColor = {
'redFore': colorama.Fore.RED + colorama.Back.RESET,
'redBack': colorama.Fore.WHITE + colorama.Back.RED,
'greenFore': colorama.Fore.GREEN + colorama.Back.RESET,
'greenBack': colorama.Fore.BLACK + colorama.Back.GREEN,
'yellowFore': colorama.Fore.YELLOW + colorama.Back.RESET,
}
return colorama.Style.BRIGHT + convertColor[color] + string + colorama.Style.RESET_ALL
if __name__ == "__main__":
a = 'This is red.'
b = setColor(a, 'redFore')
print(b)
print(log_cn(logData))
| 25.571429
| 111
| 0.541899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 774
| 0.294745
|
c6843679e999329dca1a8986c704607c2cb84a96
| 433
|
py
|
Python
|
2 - Automation tools with IP hiding techniques/checkValidJson.py
|
Phong940253/facebook-data-extraction
|
fa64680dcff900db4d852af06ff792ccf4d5be33
|
[
"MIT"
] | null | null | null |
2 - Automation tools with IP hiding techniques/checkValidJson.py
|
Phong940253/facebook-data-extraction
|
fa64680dcff900db4d852af06ff792ccf4d5be33
|
[
"MIT"
] | null | null | null |
2 - Automation tools with IP hiding techniques/checkValidJson.py
|
Phong940253/facebook-data-extraction
|
fa64680dcff900db4d852af06ff792ccf4d5be33
|
[
"MIT"
] | null | null | null |
import json
import glob
groupPost = glob.glob("rawData/*/*/*.json")
pagePost = glob.glob("rawData/*/*.json")
groupPagePost = groupPost + pagePost
def is_json(myjson):
try:
json.load(myjson)
except ValueError as e:
return False
return True
for postFile in groupPagePost:
with open(postFile, "r", encoding="utf-8") as f:
valid = is_json(f)
if not valid:
print(postFile)
| 19.681818
| 52
| 0.628176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.110855
|
c684d80b73e9aa13f3fe72698614ec87757d94ac
| 805
|
py
|
Python
|
Observed_Game_Parra/consent/models.py
|
danielfParra/Lying_Online_Parra2022
|
10e8ef6248f684f63e9dea1314ef57f197e48773
|
[
"CC0-1.0"
] | null | null | null |
Observed_Game_Parra/consent/models.py
|
danielfParra/Lying_Online_Parra2022
|
10e8ef6248f684f63e9dea1314ef57f197e48773
|
[
"CC0-1.0"
] | null | null | null |
Observed_Game_Parra/consent/models.py
|
danielfParra/Lying_Online_Parra2022
|
10e8ef6248f684f63e9dea1314ef57f197e48773
|
[
"CC0-1.0"
] | null | null | null |
from otree.api import (
models,
widgets,
BaseConstants,
BaseSubsession,
BaseGroup,
BasePlayer,
Currency as c,
currency_range,
)
author = 'Daniel Parra'
doc = """
Consent
"""
class Constants(BaseConstants):
name_in_url = 'consent'
players_per_group = None
num_rounds = 1
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
consent = models.IntegerField(label = '''I have read the data protection
information and I consent to participation in the experiment and the stated
processing of data:''',
choices = [[0, 'No'], [1, 'Yes']])
consent2 = models.IntegerField(label = '''Can you be in front of the screen for the
next 15 minutes?''',
choices = [[0, 'No'], [1, 'Yes']])
| 18.72093
| 88
| 0.649689
| 584
| 0.725466
| 0
| 0
| 0
| 0
| 0
| 0
| 266
| 0.330435
|
c685b0ee814971262f2ee615d82d87aa09cf8cef
| 448
|
py
|
Python
|
tests/compute/utils.py
|
joshcarty/dgl
|
4464b9734c1061bd84325a54883c5046031def37
|
[
"Apache-2.0"
] | 4
|
2018-12-25T14:59:08.000Z
|
2021-07-02T12:36:40.000Z
|
tests/compute/utils.py
|
xyanAI/dgl
|
36daf66f6216bad4d30651311bcb87aa45dd33d5
|
[
"Apache-2.0"
] | 6
|
2018-12-13T15:22:08.000Z
|
2021-04-22T02:40:27.000Z
|
tests/compute/utils.py
|
xyanAI/dgl
|
36daf66f6216bad4d30651311bcb87aa45dd33d5
|
[
"Apache-2.0"
] | 4
|
2020-12-26T10:39:36.000Z
|
2020-12-26T12:38:52.000Z
|
import pytest
import backend as F
if F._default_context_str == 'cpu':
parametrize_dtype = pytest.mark.parametrize("idtype", [F.int32, F.int64])
else:
# only test int32 on GPU because many graph operators are not supported for int64.
parametrize_dtype = pytest.mark.parametrize("idtype", [F.int32, F.int64])
def check_fail(fn, *args, **kwargs):
try:
fn(*args, **kwargs)
return False
except:
return True
| 28
| 86
| 0.676339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 103
| 0.229911
|
c685c691386312383505490cedb37b0394b57c94
| 1,069
|
py
|
Python
|
apps/controllerx/cx_helper.py
|
xaviml/z2m_ikea_controller
|
e612af5a913e8b4784dcaa23ea5319115427d083
|
[
"MIT"
] | 19
|
2019-11-21T19:51:40.000Z
|
2020-01-14T09:24:33.000Z
|
apps/controllerx/cx_helper.py
|
xaviml/z2m_ikea_controller
|
e612af5a913e8b4784dcaa23ea5319115427d083
|
[
"MIT"
] | 11
|
2019-11-20T16:43:35.000Z
|
2020-01-17T16:23:06.000Z
|
apps/controllerx/cx_helper.py
|
xaviml/z2m_ikea_controller
|
e612af5a913e8b4784dcaa23ea5319115427d083
|
[
"MIT"
] | 5
|
2019-12-20T21:31:07.000Z
|
2020-01-06T18:49:52.000Z
|
import importlib
import os
import pkgutil
from typing import Any, List, Type
def _import_modules(file_dir: str, package: str) -> None:
pkg_dir = os.path.dirname(file_dir)
for (_, name, ispkg) in pkgutil.iter_modules([pkg_dir]):
if ispkg:
_import_modules(pkg_dir + "/" + name + "/__init__.py", package + "." + name)
else:
importlib.import_module("." + name, package)
def _all_subclasses(cls: Type[Any]) -> List[Type[Any]]:
return list(
set(type.__subclasses__(cls)).union(
[s for c in type.__subclasses__(cls) for s in _all_subclasses(c)]
)
)
def get_classes(file_: str, package_: str, class_: Type[Any]) -> List[Type[Any]]:
_import_modules(file_, package_)
subclasses = _all_subclasses(class_)
subclasses = [cls_ for cls_ in subclasses if f"{package_}." in cls_.__module__]
return subclasses
def get_instances(file_: str, package_: str, class_: Type[Any]) -> List[Any]:
classes = get_classes(file_, package_, class_)
return [cls_() for cls_ in classes]
| 31.441176
| 88
| 0.658559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 0.034612
|
c686c1dded95c4fb11f50e8f958330e48395c1cb
| 304
|
py
|
Python
|
34.PySimpleGUI.py
|
sarincr/GUI-With-Tkinter-using-Python
|
3b57fc4aeed9e4a3018fc940bafdb4160ec853fc
|
[
"MIT"
] | null | null | null |
34.PySimpleGUI.py
|
sarincr/GUI-With-Tkinter-using-Python
|
3b57fc4aeed9e4a3018fc940bafdb4160ec853fc
|
[
"MIT"
] | null | null | null |
34.PySimpleGUI.py
|
sarincr/GUI-With-Tkinter-using-Python
|
3b57fc4aeed9e4a3018fc940bafdb4160ec853fc
|
[
"MIT"
] | null | null | null |
import PySimpleGUI as PySG
lay = [ [PySG.Text("What's your name?")],
[PySG.Input()],
[PySG.Button('Ok')] ]
wd = PySG.Window('Python Simple GUI', lay)
event, values = wd.read()
print('Hello', values[0])
wd.close()
| 21.714286
| 48
| 0.457237
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.161184
|
c687f6be8dfca9ca6885acd96b6da1567a4b50ba
| 270
|
py
|
Python
|
slbo/policies/uniform_policy.py
|
LinZichuan/AdMRL
|
50a22d4d480e99125cc91cc65dfcc0df4a883ac6
|
[
"MIT"
] | 27
|
2020-06-17T11:40:17.000Z
|
2021-11-16T07:39:33.000Z
|
slbo/policies/uniform_policy.py
|
LinZichuan/AdMRL
|
50a22d4d480e99125cc91cc65dfcc0df4a883ac6
|
[
"MIT"
] | 3
|
2020-06-19T07:01:48.000Z
|
2020-06-19T07:14:57.000Z
|
slbo/policies/uniform_policy.py
|
LinZichuan/AdMRL
|
50a22d4d480e99125cc91cc65dfcc0df4a883ac6
|
[
"MIT"
] | 5
|
2020-11-19T01:11:24.000Z
|
2021-12-24T09:03:56.000Z
|
import numpy as np
from . import BasePolicy
class UniformPolicy(BasePolicy):
def __init__(self, dim_action):
self.dim_action = dim_action
def get_actions(self, states):
return np.random.uniform(-1., 1., states.shape[:-1] + (self.dim_action,))
| 24.545455
| 81
| 0.685185
| 223
| 0.825926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c688fe0af58ac798c7af0c9f68af25aff660071c
| 5,304
|
py
|
Python
|
models/ScrabbleGAN.py
|
iambhuvi/ScrabbleGAN
|
30dce26a1a103a0fd6ce7269d6ccdcaccb32fd3b
|
[
"MIT"
] | 9
|
2021-02-02T06:31:32.000Z
|
2021-11-03T11:19:58.000Z
|
models/ScrabbleGAN.py
|
iambhuvi/ScrabbleGAN
|
30dce26a1a103a0fd6ce7269d6ccdcaccb32fd3b
|
[
"MIT"
] | 1
|
2021-12-01T12:13:14.000Z
|
2021-12-01T12:13:14.000Z
|
models/ScrabbleGAN.py
|
iambhuvi/ScrabbleGAN
|
30dce26a1a103a0fd6ce7269d6ccdcaccb32fd3b
|
[
"MIT"
] | 6
|
2021-02-02T06:31:49.000Z
|
2022-01-21T14:33:43.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from models.model_utils import BigGAN as BGAN
from utils.data_utils import *
import pandas as pd
class Recognizer(nn.Module):
def __init__(self, cfg):
super(Recognizer, self).__init__()
input_size = 1
conv_is = [1] + cfg.r_fs
self.convs = nn.Sequential(
nn.Sequential(
nn.Conv2d(conv_is[0], cfg.r_fs[0], kernel_size=cfg.r_ks[0], padding=cfg.r_pads[0]),
nn.ReLU(True),
nn.MaxPool2d(2)
),
nn.Sequential(
nn.Conv2d(conv_is[1], cfg.r_fs[1], kernel_size=cfg.r_ks[1], padding=cfg.r_pads[1]),
nn.ReLU(True),
nn.MaxPool2d(2)
),
nn.Sequential(
nn.Conv2d(conv_is[2], cfg.r_fs[2], kernel_size=cfg.r_ks[2], padding=cfg.r_pads[2]),
nn.BatchNorm2d(cfg.r_fs[2]),
nn.ReLU(True)
),
nn.Sequential(
nn.Conv2d(conv_is[3], cfg.r_fs[3], kernel_size=cfg.r_ks[3], padding=cfg.r_pads[3]),
nn.ReLU(True),
nn.MaxPool2d((2, 2), (2, 1), (0, 1))
),
nn.Sequential(
nn.Conv2d(conv_is[4], cfg.r_fs[4], kernel_size=cfg.r_ks[4], padding=cfg.r_pads[4]),
nn.BatchNorm2d(cfg.r_fs[4]),
nn.ReLU(True)
),
nn.Sequential(
nn.Conv2d(conv_is[5], cfg.r_fs[5], kernel_size=cfg.r_ks[5], padding=cfg.r_pads[5]),
nn.ReLU(True),
nn.MaxPool2d((2, 2), (2, 1), (0, 1))
),
nn.Sequential(
nn.Conv2d(conv_is[6], cfg.r_fs[6], kernel_size=cfg.r_ks[6], padding=cfg.r_pads[6]),
nn.BatchNorm2d(cfg.r_fs[6]),
nn.ReLU(True)
)
)
self.output = nn.Linear(512, cfg.num_chars)
self.prob = nn.LogSoftmax(dim=2)
def forward(self, x):
out = self.convs(x)
out = out.squeeze(2) # [b, c, w]
out = out.permute(0, 2, 1) # [b, w, c]
# Predict for len(num_chars) classes at each timestep
out = self.output(out)
out = self.prob(out)
return out
class ScrabbleGAN(nn.Module):
def __init__(self, cfg, char_map):
super().__init__()
self.z_dist = torch.distributions.Normal(loc=0, scale=1.)
self.z_dim = cfg.z_dim
# Get word list from lexicon to be used to generate fake images
if cfg.dataset == 'IAM':
self.fake_words = pd.read_csv(cfg.lexicon_file, sep='\t', names=['words'])
# filter words with len >= 20
self.fake_words = self.fake_words.loc[self.fake_words.words.str.len() < 20]
self.fake_words = self.fake_words.words.to_list()
else:
exception_chars = ['ï', 'ü', '.', '_', 'ö', ',', 'ã', 'ñ']
self.fake_words = pd.read_csv(cfg.lexicon_file, '\t')['lemme']
self.fake_words = [word.split()[-1] for word in self.fake_words
if (pd.notnull(word) and all(char not in word for char in exception_chars))]
fake_words_clean = []
for word in self.fake_words:
word_set = set(word)
if len(word_set.intersection(char_map.keys())) == len(word_set):
fake_words_clean.append(word)
self.fake_words = fake_words_clean
self.fake_y_dist = torch.distributions.Categorical(
torch.tensor([1. / len(self.fake_words)] * len(self.fake_words)))
self.batch_size = cfg.batch_size
self.num_chars = cfg.num_chars
self.word_map = WordMap(char_map)
self.batch_size = cfg.batch_size
self.num_chars = cfg.num_chars
self.config = cfg
self.R = Recognizer(cfg)
self.G = BGAN.Generator(resolution=cfg.resolution, G_shared=cfg.g_shared,
bn_linear=cfg.bn_linear, n_classes=cfg.num_chars, hier=True)
self.D = BGAN.Discriminator(resolution=cfg.resolution, bn_linear=cfg.bn_linear, n_classes=cfg.num_chars)
def forward_fake(self, z=None, fake_y=None, b_size=None):
b_size = self.batch_size if b_size is None else b_size
# If z is not provided, sample it
if z is None:
self.z = self.z_dist.sample([b_size, self.z_dim]).to(self.config.device)
else:
self.z = z.repeat(b_size, 1).to(self.config.device)
# If fake words are not provided, sample it
if fake_y is None:
# Sample lexicon indices, get words, and encode them using char_map
sample_lex_idx = self.fake_y_dist.sample([b_size])
fake_y = [self.fake_words[i] for i in sample_lex_idx]
fake_y, fake_y_lens = self.word_map.encode(fake_y)
self.fake_y_lens = fake_y_lens.to(self.config.device)
# Convert y into one-hot
self.fake_y = fake_y.to(self.config.device)
self.fake_y_one_hot = F.one_hot(fake_y, self.num_chars).to(self.config.device)
self.fake_img = self.G(self.z, self.fake_y_one_hot)
def create_model(config, char_map):
model = ScrabbleGAN(config, char_map)
model.to(config.device)
return model
| 37.617021
| 112
| 0.572587
| 4,989
| 0.939725
| 0
| 0
| 0
| 0
| 0
| 0
| 390
| 0.07346
|
c689b60ebca7bfda5e5401b93bdc1651fc7b24be
| 2,745
|
py
|
Python
|
jobbing/controllers/providers_controller.py
|
davidall-amdocs/jobbing
|
b13311da07606366dfbe2eb737483a5820038557
|
[
"Apache-2.0"
] | null | null | null |
jobbing/controllers/providers_controller.py
|
davidall-amdocs/jobbing
|
b13311da07606366dfbe2eb737483a5820038557
|
[
"Apache-2.0"
] | 1
|
2021-06-10T03:34:07.000Z
|
2021-06-10T03:34:07.000Z
|
jobbing/controllers/providers_controller.py
|
davidall-amdocs/jobbing
|
b13311da07606366dfbe2eb737483a5820038557
|
[
"Apache-2.0"
] | 1
|
2022-02-14T15:51:01.000Z
|
2022-02-14T15:51:01.000Z
|
from flask import abort
from jobbing.models.user_profile import UserProfile # noqa: E501
from jobbing.models.service import Service # noqa: E501
from jobbing.DBModels import Profile as DBProfile
from jobbing.DBModels import Service as DBService
from jobbing.login import token_required
@token_required
def get_provider_by_id(provider_id): # noqa: E501
"""get_provider_by_id
Shows a provider identified by id # noqa: E501
:param provider_id: id del proveedor de servicios
:type provider_id: int
:rtype: UserProfile
"""
profile = DBProfile.query.filter(DBProfile.provider_id == provider_id).first()
if profile == None:
abort(404)
return UserProfile(userprofile_id=profile.userprofile_id,
first_name=profile.first_name,
second_name=profile.second_name,
first_surname=profile.first_surname,
second_surname=profile.second_surname,
birthdate=profile.birthdate,
curp=profile.curp,
mobile_number=profile.mobile_number,
home_number=profile.home_number,
office_number=profile.office_number,
facebook_profile=profile.facebook_profile,
linkedin_profile=profile.linkedin_profile,
twitter_profile=profile.twitter_profile,
id_image=profile.id_image,
status=profile.status,
created=profile.created,
updated=profile.updated,
credentials_id=profile.credentials_id,
org_id=profile.org_id,
address=profile.address)
@token_required
def get_services_by_provider_id(provider_id): # noqa: E501
"""get_services_by_provider_id
Show all Services that offers a provider # noqa: E501
:param provider_id: Id de Provider
:type provider_id: int
:rtype: List[Service]
"""
services = DBService.query.filter(DBService.user_id == provider_id)
results = [
Service(
id = serv.service_id,
category_id = serv.category_id,
description = serv.description,
years_of_experience = serv.years_of_experience,
price_of_service = serv.price_of_service,
work_zone = serv.work_zone,
services_provided = serv.services_provided,
five_stars = serv.five_stars,
four_starts = serv.four_starts,
three_starts = serv.three_starts,
two_starts = serv.two_starts,
one_start = serv.one_start,
read_only = serv.read_only,
status_id = serv.status_id,
user_id = serv.user_id
) for serv in services]
return results
| 34.746835
| 82
| 0.647723
| 0
| 0
| 0
| 0
| 2,450
| 0.892532
| 0
| 0
| 427
| 0.155556
|
c68a6d8f407663035b0e8aaa5e7a9d1c6021d7ca
| 11,082
|
py
|
Python
|
app/tests/evaluation_tests/test_views.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | null | null | null |
app/tests/evaluation_tests/test_views.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | null | null | null |
app/tests/evaluation_tests/test_views.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | null | null | null |
from collections import namedtuple
from datetime import timedelta
import factory
import pytest
from django.db.models import signals
from django.utils import timezone
from tests.factories import (
EvaluationFactory,
MethodFactory,
SubmissionFactory,
)
from tests.utils import (
get_view_for_user,
validate_admin_only_view,
validate_admin_or_participant_view,
validate_open_view,
)
def submission_and_evaluation(*, challenge, creator):
"""Creates a submission and an evaluation for that submission."""
s = SubmissionFactory(challenge=challenge, creator=creator)
e = EvaluationFactory(submission=s)
return s, e
def submissions_and_evaluations(two_challenge_sets):
"""
Create (e)valuations and (s)ubmissions for each (p)articipant and
(c)hallenge.
"""
SubmissionsAndEvaluations = namedtuple(
"SubmissionsAndEvaluations",
[
"p_s1",
"p_s2",
"p1_s1",
"p12_s1_c1",
"p12_s1_c2",
"e_p_s1",
"e_p_s2",
"e_p1_s1",
"e_p12_s1_c1",
"e_p12_s1_c2",
],
)
# participant 0, submission 1, challenge 1, etc
p_s1, e_p_s1 = submission_and_evaluation(
challenge=two_challenge_sets.challenge_set_1.challenge,
creator=two_challenge_sets.challenge_set_1.participant,
)
p_s2, e_p_s2 = submission_and_evaluation(
challenge=two_challenge_sets.challenge_set_1.challenge,
creator=two_challenge_sets.challenge_set_1.participant,
)
p1_s1, e_p1_s1 = submission_and_evaluation(
challenge=two_challenge_sets.challenge_set_1.challenge,
creator=two_challenge_sets.challenge_set_1.participant1,
)
# participant12, submission 1 to each challenge
p12_s1_c1, e_p12_s1_c1 = submission_and_evaluation(
challenge=two_challenge_sets.challenge_set_1.challenge,
creator=two_challenge_sets.participant12,
)
p12_s1_c2, e_p12_s1_c2 = submission_and_evaluation(
challenge=two_challenge_sets.challenge_set_2.challenge,
creator=two_challenge_sets.participant12,
)
return SubmissionsAndEvaluations(
p_s1,
p_s2,
p1_s1,
p12_s1_c1,
p12_s1_c2,
e_p_s1,
e_p_s2,
e_p1_s1,
e_p12_s1_c1,
e_p12_s1_c2,
)
@pytest.mark.django_db
def test_method_list(client, two_challenge_sets):
validate_admin_only_view(
viewname="evaluation:method-list",
two_challenge_set=two_challenge_sets,
client=client,
)
@pytest.mark.django_db
def test_method_create(client, two_challenge_sets):
validate_admin_only_view(
viewname="evaluation:method-create",
two_challenge_set=two_challenge_sets,
client=client,
)
@pytest.mark.django_db
def test_method_detail(client, two_challenge_sets):
method = MethodFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
creator=two_challenge_sets.challenge_set_1.admin,
)
validate_admin_only_view(
viewname="evaluation:method-detail",
two_challenge_set=two_challenge_sets,
reverse_kwargs={"pk": method.pk},
client=client,
)
@pytest.mark.django_db
@factory.django.mute_signals(signals.post_save)
def test_submission_list(client, two_challenge_sets):
validate_admin_or_participant_view(
viewname="evaluation:submission-list",
two_challenge_set=two_challenge_sets,
client=client,
)
p_s1, p_s2, p1_s1, p12_s1_c1, p12_s1_c2, *_ = submissions_and_evaluations(
two_challenge_sets
)
# Participants should only be able to see their own submissions
response = get_view_for_user(
viewname="evaluation:submission-list",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
user=two_challenge_sets.challenge_set_1.participant,
)
assert str(p_s1.pk) in response.rendered_content
assert str(p_s2.pk) in response.rendered_content
assert str(p1_s1.pk) not in response.rendered_content
assert str(p12_s1_c1.pk) not in response.rendered_content
assert str(p12_s1_c2.pk) not in response.rendered_content
# Admins should be able to see all submissions
response = get_view_for_user(
viewname="evaluation:submission-list",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
user=two_challenge_sets.challenge_set_1.admin,
)
assert str(p_s1.pk) in response.rendered_content
assert str(p_s2.pk) in response.rendered_content
assert str(p1_s1.pk) in response.rendered_content
assert str(p12_s1_c1.pk) in response.rendered_content
assert str(p12_s1_c2.pk) not in response.rendered_content
# Only submissions relevant to this challenge should be listed
response = get_view_for_user(
viewname="evaluation:submission-list",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
user=two_challenge_sets.participant12,
)
assert str(p12_s1_c1.pk) in response.rendered_content
assert str(p12_s1_c2.pk) not in response.rendered_content
assert str(p_s1.pk) not in response.rendered_content
assert str(p_s2.pk) not in response.rendered_content
assert str(p1_s1.pk) not in response.rendered_content
@pytest.mark.django_db
def test_submission_create(client, two_challenge_sets):
validate_admin_or_participant_view(
viewname="evaluation:submission-create",
two_challenge_set=two_challenge_sets,
client=client,
)
response = get_view_for_user(
viewname="evaluation:submission-create",
challenge=two_challenge_sets.challenge_set_1.challenge,
user=two_challenge_sets.challenge_set_1.participant,
client=client,
)
assert response.status_code == 200
assert "Creator" not in response.rendered_content
@pytest.mark.django_db
def test_legacy_submission_create(client, two_challenge_sets):
validate_admin_only_view(
viewname="evaluation:submission-create-legacy",
two_challenge_set=two_challenge_sets,
client=client,
)
response = get_view_for_user(
viewname="evaluation:submission-create-legacy",
challenge=two_challenge_sets.challenge_set_1.challenge,
user=two_challenge_sets.admin12,
client=client,
)
assert response.status_code == 200
assert "Creator" in response.rendered_content
@pytest.mark.django_db
def test_submission_time_limit(client, two_challenge_sets):
SubmissionFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
creator=two_challenge_sets.challenge_set_1.participant,
)
def get_submission_view():
return get_view_for_user(
viewname="evaluation:submission-create",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
user=two_challenge_sets.challenge_set_1.participant,
)
assert "make 9 more" in get_submission_view().rendered_content
s = SubmissionFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
creator=two_challenge_sets.challenge_set_1.participant,
)
s.created = timezone.now() - timedelta(hours=23)
s.save()
assert "make 8 more" in get_submission_view().rendered_content
s = SubmissionFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
creator=two_challenge_sets.challenge_set_1.participant,
)
s.created = timezone.now() - timedelta(hours=25)
s.save()
assert "make 8 more" in get_submission_view().rendered_content
@pytest.mark.django_db
def test_submission_detail(client, two_challenge_sets):
submission = SubmissionFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
creator=two_challenge_sets.challenge_set_1.participant,
)
validate_admin_only_view(
viewname="evaluation:submission-detail",
two_challenge_set=two_challenge_sets,
reverse_kwargs={"pk": submission.pk},
client=client,
)
@pytest.mark.django_db
@factory.django.mute_signals(signals.post_save)
def test_evaluation_list(client, two_challenge_sets):
validate_admin_or_participant_view(
viewname="evaluation:list",
two_challenge_set=two_challenge_sets,
client=client,
)
(
*_,
e_p_s1,
e_p_s2,
e_p1_s1,
e_p12_s1_c1,
e_p12_s1_c2,
) = submissions_and_evaluations(two_challenge_sets)
# Participants should only be able to see their own evaluations
response = get_view_for_user(
viewname="evaluation:list",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
user=two_challenge_sets.challenge_set_1.participant,
)
assert str(e_p_s1.pk) in response.rendered_content
assert str(e_p_s2.pk) in response.rendered_content
assert str(e_p1_s1.pk) not in response.rendered_content
assert str(e_p12_s1_c1.pk) not in response.rendered_content
assert str(e_p12_s1_c2.pk) not in response.rendered_content
# Admins should be able to see all evaluations
response = get_view_for_user(
viewname="evaluation:list",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
user=two_challenge_sets.challenge_set_1.admin,
)
assert str(e_p_s1.pk) in response.rendered_content
assert str(e_p_s2.pk) in response.rendered_content
assert str(e_p1_s1.pk) in response.rendered_content
assert str(e_p12_s1_c1.pk) in response.rendered_content
assert str(e_p12_s1_c2.pk) not in response.rendered_content
# Only evaluations relevant to this challenge should be listed
response = get_view_for_user(
viewname="evaluation:list",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
user=two_challenge_sets.participant12,
)
assert str(e_p12_s1_c1.pk) in response.rendered_content
assert str(e_p12_s1_c2.pk) not in response.rendered_content
assert str(e_p_s1.pk) not in response.rendered_content
assert str(e_p_s2.pk) not in response.rendered_content
assert str(e_p1_s1.pk) not in response.rendered_content
@pytest.mark.django_db
def test_leaderboard(client, eval_challenge_set):
validate_open_view(
viewname="evaluation:leaderboard",
challenge_set=eval_challenge_set.challenge_set,
client=client,
)
# TODO: test that private results cannot be seen
@pytest.mark.django_db
def test_evaluation_detail(client, eval_challenge_set):
submission = SubmissionFactory(
challenge=eval_challenge_set.challenge_set.challenge,
creator=eval_challenge_set.challenge_set.participant,
)
e = EvaluationFactory(submission=submission)
validate_open_view(
viewname="evaluation:detail",
challenge_set=eval_challenge_set.challenge_set,
reverse_kwargs={"pk": e.pk},
client=client,
)
| 33.889908
| 78
| 0.721891
| 0
| 0
| 0
| 0
| 8,631
| 0.778831
| 0
| 0
| 1,328
| 0.119834
|
c68b2764c5975cf7d7cb7f41a4acfa6e9c0a27aa
| 2,496
|
py
|
Python
|
Misc/convm.py
|
Dengjianping/AlgorithmsPractice
|
612f40b4fca4c1cf2b0cd9ca4df63e217b7affbf
|
[
"MIT"
] | null | null | null |
Misc/convm.py
|
Dengjianping/AlgorithmsPractice
|
612f40b4fca4c1cf2b0cd9ca4df63e217b7affbf
|
[
"MIT"
] | null | null | null |
Misc/convm.py
|
Dengjianping/AlgorithmsPractice
|
612f40b4fca4c1cf2b0cd9ca4df63e217b7affbf
|
[
"MIT"
] | null | null | null |
a=[[1,2,3,4,5],[1,2,3,4,5],[1,2,3,4,5]]
b=[[1,2,3,4],[3,4,5,5]]
def convMatrix(a, b, mode='full'):
if mode == 'full':
row=len(a)+len(b) - 1
col=len(a[0])+len(b[0]) - 1
c= [[0 for i in range(col)] for i in range(row)]
for i in range(len(a)):
for j in range(len(a[0])):
for m in range(len(b)):
for n in range(len(b[0])):
c[i+m][j+n] += a[i][j] * b[m][n]
return c
if mode == 'same':
row=len(a)
col=len(a[0])
c= [[0 for i in range(col)] for i in range(row)]
for i in range(len(a)):
for j in range(len(a[0])):
for m in range(len(b)):
for n in range(len(b[0])):
if (0 <= i+m-len(b)/2 < row and 0 <= j+n-len(b[0])/2 < col):
c[i+m-len(b)/2][j+n-len(b[0])/2] += a[i][j] * b[m][n]
return c
if mode == 'valid':
row=len(a)-len(b) + 1
col=len(a[0])-len(b[0]) + 1
c= [[0 for i in range(col)] for i in range(row)]
for i in range(len(a)):
for j in range(len(a[0])):
for m in range(len(b)):
for n in range(len(b[0])):
r = i-len(b)+m+1
co = j+n-len(b[0])+1
if (0 <= r < row and 0 <= co < col):
c[r][co] += a[i][j] * b[m][n]
return c
def convArray(a, b, mode='full'):
if mode == 'full':
c=[0 for i in range(len(a) + len(b) - 1)]
for i, value in enumerate(a):
for j, key in enumerate(b):
c[i+j] += a[i] * b[j]
return c
if mode == 'same':
c=[0 for i in range(len(a))]
for i, value in enumerate(a):
for j, key in enumerate(b):
if (i+j-len(b)/2>=0 and i+j-len(b)/2<len(a)):
c[i+j-len(b)/2] += a[i] * b[j]
return c
if mode == 'valid':
c=[0 for i in range(len(a) - len(b)+1)]
for i, value in enumerate(a):
for j, key in enumerate(b):
if (i+j-len(b)+1>=0 and i+j-len(b)+1 < len(c)):
c[i+j-len(b)+1] += a[i] * b[j]
return c
print convArray(a[0], b[0],'full')
print convArray(a[0], b[0],'same')
print convArray(a[0], b[0],'valid')
print convMatrix(a, b,'full')
print convMatrix(a, b,'same')
print convMatrix(a, b,'valid')
| 35.15493
| 84
| 0.405849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 88
| 0.035256
|
c68b2c92d58355bdae49241aa6bb6793ce19665b
| 1,170
|
py
|
Python
|
com.systemincloud.examples.tasks.pythontask/src/test/py/tasks/ports/Atomic3SIn0AIn2Out.py
|
systemincloud/sic-examples
|
b82d5d672f515b1deb5ddb35c5a93c003e03c030
|
[
"Apache-2.0"
] | null | null | null |
com.systemincloud.examples.tasks.pythontask/src/test/py/tasks/ports/Atomic3SIn0AIn2Out.py
|
systemincloud/sic-examples
|
b82d5d672f515b1deb5ddb35c5a93c003e03c030
|
[
"Apache-2.0"
] | 15
|
2015-01-08T20:28:19.000Z
|
2016-07-20T07:19:15.000Z
|
com.systemincloud.examples.tasks.pythontask/src/test/py/tasks/ports/Atomic3SIn0AIn2Out.py
|
systemincloud/sic-examples
|
b82d5d672f515b1deb5ddb35c5a93c003e03c030
|
[
"Apache-2.0"
] | null | null | null |
from sicpythontask.PythonTaskInfo import PythonTaskInfo
from sicpythontask.PythonTask import PythonTask
from sicpythontask.InputPort import InputPort
from sicpythontask.OutputPort import OutputPort
from sicpythontask.data.Int32 import Int32
from sicpythontask.data.Float32 import Float32
from sicpythontask.data.Float64 import Float64
from sicpythontask.data.Text import Text
@PythonTaskInfo
class Atomic3SIn0AIn2Out(PythonTask):
def __init_ports__(self):
self.in1 = InputPort(name="in1", data_type=Float64)
self.in2 = InputPort(name="in2", data_type=Float32)
self.in3 = InputPort(name="in3", data_type=Int32)
self.out1 = OutputPort(name="out1", data_type=Int32)
self.out2 = OutputPort(name="out2", data_type=Text)
def execute(self, grp):
in1Text = self.in1.get_data(Float64)
in2Text = self.in2.get_data(Float32)
in3Text = self.in3.get_data(Int32)
in1Double = in1Text.value
in2Float = in2Text.value
sum_ = in1Double + in2Float
self.out2.put_data(Text(str(in1Double) + " + " + str(in2Float) + " = " + str(sum_)))
self.out1.put_data(Int32(in3Text.value));
| 39
| 92
| 0.718803
| 775
| 0.662393
| 0
| 0
| 791
| 0.676068
| 0
| 0
| 37
| 0.031624
|
c68c3919e177e8d1de7b30c2a650b62b74c47975
| 6,811
|
py
|
Python
|
bin/extract_bcs.py
|
dmaticzka/bctools
|
e4733b1f59a151f8158a8173a3cde48a5d119bc2
|
[
"Apache-2.0"
] | null | null | null |
bin/extract_bcs.py
|
dmaticzka/bctools
|
e4733b1f59a151f8158a8173a3cde48a5d119bc2
|
[
"Apache-2.0"
] | 3
|
2016-04-24T14:26:17.000Z
|
2017-04-28T15:17:20.000Z
|
bin/extract_bcs.py
|
dmaticzka/bctools
|
e4733b1f59a151f8158a8173a3cde48a5d119bc2
|
[
"Apache-2.0"
] | 2
|
2016-05-06T03:57:25.000Z
|
2018-11-06T10:57:32.000Z
|
#!/usr/bin/env python
import argparse
import logging
import re
from sys import stdout
from Bio.SeqIO.QualityIO import FastqGeneralIterator
# avoid ugly python IOError when stdout output is piped into another program
# and then truncated (such as piping to head)
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
tool_description = """
Exract barcodes from a FASTQ file according to a user-specified pattern. Starting from the 5'-end, positions marked by X will be moved into a separate FASTQ file. Positions marked bv N will be kept.
By default output is written to stdout.
Example usage:
- remove barcode nucleotides at positions 1-3 and 6-7 from FASTQ; write modified
FASTQ entries to output.fastq and barcode nucleotides to barcodes.fa:
fastq_extract_barcodes.py barcoded_input.fastq XXXNNXX --out output.fastq --bcs barcodes.fastq
"""
# parse command line arguments
parser = argparse.ArgumentParser(description=tool_description,
formatter_class=argparse.RawDescriptionHelpFormatter)
# positional arguments
parser.add_argument(
"infile",
help="Path to fastq file.")
parser.add_argument(
"pattern",
help="Pattern of barcode nucleotides starting at 5'-end. X positions will be moved to the header, N positions will be kept.")
# optional arguments
parser.add_argument(
"-o", "--outfile",
help="Write results to this file.")
parser.add_argument(
"-b", "--bcs",
dest="out_bc_fasta",
help="Write barcodes to this file in FASTQ format.")
parser.add_argument(
"--fasta-barcodes",
dest="save_bcs_as_fa",
action="store_true",
help="Save extracted barcodes in FASTA format.")
parser.add_argument(
"-a", "--add-bc-to-fastq",
dest="add_to_head",
help="Append extracted barcodes to the FASTQ headers.",
action="store_true")
parser.add_argument(
"-v", "--verbose",
help="Be verbose.",
action="store_true")
parser.add_argument(
"-d", "--debug",
help="Print lots of debugging information",
action="store_true")
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(filename)s - %(levelname)s - %(message)s")
elif args.verbose:
logging.basicConfig(level=logging.INFO, format="%(filename)s - %(levelname)s - %(message)s")
else:
logging.basicConfig(format="%(filename)s - %(levelname)s - %(message)s")
logging.info("Parsed arguments:")
logging.info(" infile: '{}'".format(args.infile))
logging.info(" pattern: '{}'".format(args.pattern))
if args.outfile:
logging.info(" outfile: enabled writing to file")
logging.info(" outfile: '{}'".format(args.outfile))
if args.out_bc_fasta:
logging.info(" bcs: enabled writing barcodes to fastq file")
logging.info(" bcs: {}".format(args.out_bc_fasta))
if args.save_bcs_as_fa:
logging.info(" fasta-barcodes: write barcodes in fasta format instead of fastq")
logging.info("")
# check if supplied pattern is valid
valid_pattern = re.compile("^[XN]+$")
pattern_match = valid_pattern.match(args.pattern)
if pattern_match is None:
raise ValueError("Error: supplied pattern '{}' is not valid.".format(args.pattern))
# check if at least one barcode position is included in the pattern
has_bcpos_pattern = re.compile("X")
pattern_match = has_bcpos_pattern.search(args.pattern)
if pattern_match is None:
raise ValueError("Error: supplied pattern '{}' does not contain a barcode position 'X'.".format(args.pattern))
logging.info("Barcode pattern analysis:")
# get X positions of pattern string
barcode_nt_pattern = re.compile("X+")
barcode_positions = []
for m in re.finditer(barcode_nt_pattern, args.pattern):
logging.info(' found barcode positions in pattern: %02d-%02d: %s' % (m.start(), m.end(), m.group(0)))
barcode_positions.append((m.start(), m.end()))
logging.info(" barcode positions: {}".format(barcode_positions))
# get last position of a barcode nt in the pattern
# reads must be long enough for all
min_readlen = barcode_positions[-1][-1]
logging.info(" last position of a barcode nt in pattern: {}".format(min_readlen))
logging.info("")
# get coordinates of nucleotides to keep
# the tail after the last barcode nt is handled separately
seq_positions = []
last_seq_start = 0
for bcstart, bcstop in barcode_positions:
seq_positions.append((last_seq_start, bcstart))
last_seq_start = bcstop
logging.info(" sequence positions: {}".format(seq_positions))
logging.info(" start of sequence tail: {}".format(last_seq_start))
samout = (open(args.outfile, "w") if args.outfile is not None else stdout)
if args.out_bc_fasta is not None:
faout = open(args.out_bc_fasta, "w")
for header, seq, qual in FastqGeneralIterator(open(args.infile)):
# skip reads that are too short to extract the full requested barcode
if len(seq) < min_readlen:
logging.warning("skipping read '{}', is too short to extract the full requested barcode".format(header))
logging.debug("seq: {}".format(seq))
logging.debug("len(seq): {}".format(len(seq)))
continue
# extract barcode nucleotides
barcode_list = []
barcode_qual_list = []
for bcstart, bcstop in barcode_positions:
barcode_list.append(seq[bcstart:bcstop])
barcode_qual_list.append(qual[bcstart:bcstop])
barcode = "".join(barcode_list)
barcode_quals = "".join(barcode_qual_list)
logging.debug("extracted barcode: {}".format(barcode))
# create new sequence and quality string without barcode nucleotides
new_seq_list = []
new_qual_list = []
for seqstart, seqstop in seq_positions:
new_seq_list.append(seq[seqstart:seqstop])
new_qual_list.append(qual[seqstart:seqstop])
new_seq_list.append(seq[last_seq_start:])
new_qual_list.append(qual[last_seq_start:])
new_seq = "".join(new_seq_list)
new_qual = "".join(new_qual_list)
# check if at least one nucleotide is left. having none would break fastq
if len(new_seq) == 0:
logging.warning("skipping read '{}', no sequence remains after barcode extraction".format(header))
logging.debug("seq: {}".format(seq))
logging.debug("len(seq): {}".format(len(seq)))
continue
# write barcode nucleotides into header
if args.add_to_head:
annotated_header = " ".join([header, barcode])
else:
annotated_header = header
samout.write("@%s\n%s\n+\n%s\n" % (annotated_header, new_seq, new_qual))
# write barcode to fasta if requested
if args.out_bc_fasta is not None:
if args.save_bcs_as_fa:
faout.write(">{}\n{}\n".format(header, barcode))
else:
faout.write("@{}\n{}\n+\n{}\n".format(header, barcode, barcode_quals))
# close files
samout.close()
if args.out_bc_fasta is not None:
faout.close()
| 39.143678
| 198
| 0.707238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,924
| 0.429306
|
c68e39b0e1053cfb768407c21209e2d2583bacc2
| 1,226
|
py
|
Python
|
main.py
|
pranavbaburaj/sh
|
dc0da9e10e7935310ae40d350c1897fcd65bce8f
|
[
"MIT"
] | 4
|
2021-01-30T12:25:21.000Z
|
2022-03-13T07:23:19.000Z
|
main.py
|
pranavbaburaj/sh
|
dc0da9e10e7935310ae40d350c1897fcd65bce8f
|
[
"MIT"
] | 3
|
2021-02-26T13:11:17.000Z
|
2021-06-04T17:26:05.000Z
|
main.py
|
pranavbaburaj/sh
|
dc0da9e10e7935310ae40d350c1897fcd65bce8f
|
[
"MIT"
] | 1
|
2021-02-08T10:18:29.000Z
|
2021-02-08T10:18:29.000Z
|
import pyfiglet as figlet
import click as click
from project import Project, ApplicationRunner
# The application package manager
# get
from package import PackageManager
# print out the application name
def print_app_name(app_name):
figlet_object = figlet.Figlet(font='slant')
return figlet_object.renderText(str(app_name))
# call the project class
# and create a new project
def create_new_project(project_name):
print(print_app_name(project_name))
new_project = Project(project_name)
# call teh run class
# and run the specified project
def run_project(project_name):
run = ApplicationRunner(project_name)
# call the package manager
# and install packages
def get_package(package):
package_manager = PackageManager(package)
@click.command()
@click.argument('command', type=str)
@click.argument('name', type=str)
def index(command, name):
if command == "new":
create_new_project(name)
elif command == "run":
run_project(name)
elif command == "install" or command == "i" or command == "get":
get_package(name)
else:
print(f"{command}:command not found")
if __name__ == "__main__":
index()
| 24.52
| 69
| 0.693312
| 0
| 0
| 0
| 0
| 386
| 0.314845
| 0
| 0
| 317
| 0.258564
|
c68e5add13f063fbcdf222fe47050eb0edc40e67
| 1,970
|
py
|
Python
|
src/container/cni/cni/test/kube_cni/test_kube_params.py
|
Dmitry-Eremeev/contrail-controller
|
1238bcff697981662225ec5a15bc4d3d2237ae93
|
[
"Apache-2.0"
] | null | null | null |
src/container/cni/cni/test/kube_cni/test_kube_params.py
|
Dmitry-Eremeev/contrail-controller
|
1238bcff697981662225ec5a15bc4d3d2237ae93
|
[
"Apache-2.0"
] | 2
|
2018-12-04T02:20:52.000Z
|
2018-12-22T06:16:30.000Z
|
src/container/cni/cni/test/kube_cni/test_kube_params.py
|
Dmitry-Eremeev/contrail-controller
|
1238bcff697981662225ec5a15bc4d3d2237ae93
|
[
"Apache-2.0"
] | 1
|
2018-12-04T02:07:47.000Z
|
2018-12-04T02:07:47.000Z
|
import sys
import mock
import unittest
import os
import types
from mock import patch, Mock
docker = Mock()
docker.client = Mock()
sys.modules['docker'] = docker
from cni.kube_cni import kube_params
class DockerClientMock(object):
def __init__(self):
pass
def inspect_container(self, id):
return {
'Config': {
'Labels': {
'io.kubernetes.pod.uid': "id" + id
}
}
}
class K8SParamsTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch('logging.getLogger', new=Mock())
def test_init(self):
os.environ['CNI_ARGS'] = "IgnoreUnknown=1;"\
"K8S_POD_NAMESPACE=default-ns;"\
"K8S_POD_NAME=hello-world-1-81nl8;"\
"K8S_POD_INFRA_CONTAINER_ID=abcdef;;TEST;"
mock_cni = Mock()
mock_cni.container_id = "123"
mock_cni.container_uuid = None
mock_cni.update = Mock()
docker.client.APIClient = Mock(return_value=DockerClientMock())
p = kube_params.K8SParams(mock_cni)
self.assertEquals("id123", p.pod_uuid)
self.assertEquals("hello-world-1-81nl8", p.pod_name)
mock_cni.update.assert_called_once_with("id123", "hello-world-1-81nl8")
docker.client.APIClient = Mock(return_value=None)
with self.assertRaises(kube_params.Error) as err:
kube_params.K8SParams(mock_cni)
self.assertEquals(kube_params.K8S_PARAMS_ERR_GET_UUID, err.exception.code)
docker.client.APIClient = Mock(return_value=DockerClientMock())
os.environ['CNI_ARGS'] = "IgnoreUnknown=1;"\
"K8S_POD_NAMESPACE=default-ns;"\
"K8S_POD_INFRA_CONTAINER_ID=id123"
with self.assertRaises(kube_params.Error) as err:
kube_params.K8SParams(mock_cni)
self.assertEquals(kube_params.K8S_ARGS_MISSING_POD_NAME, err.exception.code)
| 30.78125
| 84
| 0.63198
| 1,765
| 0.895939
| 0
| 0
| 1,376
| 0.698477
| 0
| 0
| 360
| 0.182741
|
c68e8c8615c536021a7d96bf97849a89f15fbe86
| 672
|
py
|
Python
|
build/lib/ripda/__init__.py
|
isakruas/ripda
|
a85e04be6f2d019a294a284e16b55b533cd32c33
|
[
"MIT"
] | 3
|
2021-06-05T13:05:49.000Z
|
2021-06-08T12:01:16.000Z
|
build/lib/ripda/__init__.py
|
isakruas/ripda
|
a85e04be6f2d019a294a284e16b55b533cd32c33
|
[
"MIT"
] | null | null | null |
build/lib/ripda/__init__.py
|
isakruas/ripda
|
a85e04be6f2d019a294a284e16b55b533cd32c33
|
[
"MIT"
] | 1
|
2021-06-05T13:06:15.000Z
|
2021-06-05T13:06:15.000Z
|
import os
from pathlib import Path
from .settings import default
import logging
try:
if os.path.isdir(str(Path.home()) + '/ripda/'):
if not os.path.isdir(str(Path.home()) + '/ripda/blocks/'):
os.mkdir(str(Path.home()) + '/ripda/blocks/')
pass
if not os.path.isfile(str(Path.home()) + '/ripda/config.ini'):
default()
pass
else:
os.mkdir(str(Path.home()) + '/ripda/')
os.mkdir(str(Path.home()) + '/ripda/blocks/')
default()
except Exception as e:
logging.exception(e)
__all__ = [
'block',
'blockchain',
'miner',
'node',
'transaction',
'wallet'
]
| 21.677419
| 70
| 0.549107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 138
| 0.205357
|
c6904f6da38987f613861eec004342d5edfec9c2
| 1,339
|
py
|
Python
|
src/21.py
|
peter-hunt/project-euler-solution
|
ce5be80043e892e3a95604bd5ebec9dc88c7c037
|
[
"MIT"
] | null | null | null |
src/21.py
|
peter-hunt/project-euler-solution
|
ce5be80043e892e3a95604bd5ebec9dc88c7c037
|
[
"MIT"
] | null | null | null |
src/21.py
|
peter-hunt/project-euler-solution
|
ce5be80043e892e3a95604bd5ebec9dc88c7c037
|
[
"MIT"
] | null | null | null |
"""
Amicable numbers
Let d(n) be defined as the sum of proper divisors of n
(numbers less than n which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and
each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55
and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and
142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
"""
from math import floor, sqrt
limit = 10_000
def initial_func(limit):
def sum_divisors(n):
result = 1
for i in range(2, floor(sqrt(n))):
if n % i == 0:
if i == n // i:
result += i
else:
result += i + n // i
return result
amicables = {*()}
result = 0
for i in range(2, limit):
if i in amicables:
continue
other = sum_divisors(i)
if other == i:
continue
if sum_divisors(other) == i:
amicables.add(i)
result += i
if other < limit:
amicables.add(other)
result += other
return result
def improved_func(limit):
pass
# 31626
print(initial_func(limit))
# print(improved_func(limit))
| 21.596774
| 79
| 0.551158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 531
| 0.395973
|
c691c92322330bef3cb93860c43c284649dcb60d
| 120
|
py
|
Python
|
cronicl/tracers/__init__.py
|
joocer/cronicl
|
5ab215554939699683752cb7b8549756edff9ea5
|
[
"Apache-2.0"
] | null | null | null |
cronicl/tracers/__init__.py
|
joocer/cronicl
|
5ab215554939699683752cb7b8549756edff9ea5
|
[
"Apache-2.0"
] | 73
|
2020-10-05T21:00:48.000Z
|
2020-11-16T23:29:41.000Z
|
cronicl/tracers/__init__.py
|
joocer/cronicl
|
5ab215554939699683752cb7b8549756edff9ea5
|
[
"Apache-2.0"
] | null | null | null |
from .file_tracer import FileTracer
from .null_tracer import NullTracer
from .base_tracer import BaseTracer, get_tracer
| 30
| 47
| 0.858333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c6929965a631981dae06929029921384cdc65b4d
| 2,156
|
py
|
Python
|
test.py
|
adrianlazar-personal/py-jwt-validator
|
1d586129a1279f90b4b326aa29f40b9302004e43
|
[
"MIT"
] | 6
|
2020-05-28T20:22:23.000Z
|
2021-09-21T06:26:52.000Z
|
test.py
|
adrianlazar-personal/py-jwt-validator
|
1d586129a1279f90b4b326aa29f40b9302004e43
|
[
"MIT"
] | 4
|
2020-11-09T23:12:38.000Z
|
2021-03-03T16:39:59.000Z
|
test.py
|
adrianlazar-personal/py-jwt-validator
|
1d586129a1279f90b4b326aa29f40b9302004e43
|
[
"MIT"
] | 1
|
2020-12-07T15:00:35.000Z
|
2020-12-07T15:00:35.000Z
|
from py_jwt_validator import PyJwtValidator, PyJwtException
import requests
jwt = 'eyJraWQiOiIyMjIiLCJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJhdF9oYXNoIjoic2pvdjVKajlXLXdkblBZUDd3djZ0USIsInN1YiI6Imh0dHBzOi8vbG9naW4uc2FsZXNmb3JjZS5jb20vaWQvMDBEMXQwMDAwMDBEVUo2RUFPLzAwNTF0MDAwMDAwRHlhUEFBUyIsInpvbmVpbmZvIjoiRXVyb3BlL0R1YmxpbiIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJhZGRyZXNzIjp7ImNvdW50cnkiOiJSTyJ9LCJwcm9maWxlIjoiaHR0cHM6Ly9hZHJpYW4tcHJvZC1kZXYtZWQubXkuc2FsZXNmb3JjZS5jb20vMDA1MXQwMDAwMDBEeWFQQUFTIiwiaXNzIjoiaHR0cHM6Ly9hZHJpYW4tcHJvZC1kZXYtZWQubXkuc2FsZXNmb3JjZS5jb20iLCJwaG9uZV9udW1iZXJfdmVyaWZpZWQiOnRydWUsInByZWZlcnJlZF91c2VybmFtZSI6ImFkcmlhbnNhbGVzZm9yY2VAbWFpbGluYXRvci5jb20iLCJnaXZlbl9uYW1lIjoiQWRyaWFuIiwibG9jYWxlIjoiZW5fSUVfRVVSTyIsIm5vbmNlIjoibm9uY2UiLCJwaWN0dXJlIjoiaHR0cHM6Ly9hZHJpYW4tcHJvZC1kZXYtZWQtLWMuZXUxNi5jb250ZW50LmZvcmNlLmNvbS9wcm9maWxlcGhvdG8vMDA1L0YiLCJhdWQiOiIzTVZHOWZUTG1KNjBwSjVKRF9GLndaTE1TZXJsRm03VmxCUWZPNWJhNHRSbDVrLmFPenhiTUVEN3g1ZTF6M2pwUmU2M1ZQOTNCbEp4eU5QUG9oWkcyLGh0dHA6Ly9sb2NhbGhvc3Q6NTUwMCIsInVwZGF0ZWRfYXQiOiIyMDE5LTA1LTAzVDE1OjQ4OjUyWiIsIm5pY2tuYW1lIjoiYWRyaWFuc2FsZXNmb3JjZSIsIm5hbWUiOiJBZHJpYW4gTGF6YXIiLCJwaG9uZV9udW1iZXIiOiIrNDAgMDcyNTUxMTg4NCIsImV4cCI6MTU4MDQwMTMxNiwiaWF0IjoxNTgwNDAxMTk2LCJmYW1pbHlfbmFtZSI6IkxhemFyIiwiZW1haWwiOiJhZHJpYW5zYWxlc2ZvcmNlQG1haWxpbmF0b3IuY29tIn0.QrEyD4qt1ZzT1-1ncdCqYxpGNsne8E22jwnHCvn3ygId1ZcA3305Mso2WfNASyMAyFWFcyc_sQmc67RZKFuMk0pdflkCwLl6JJdL9IKZo8qjcUmWdalAdpxU61F-NyUSa7IE6eh5y-Dm_qtrhxMXrqen9ugwf1MIiBm2VwgdaQFymEa8jKojfljOivHnEafX0D91NFLAFZPebPnMQp9YE-UR0n49lGT4x68avkqGXaRRVtxBCP_r5swOvqW9OL2Sa3kvSwUlp62Edf2Rxke6REnaWpYZs3rbGlQAzIsVAbansZBXv0dGJU8z2EFOmi7bKThjscqP-VmtASl1TJVrgWVBoRE9EyT10AUpGEuAAfTjGEtNOAq_u0UcFZc9quphy4cSJ2y66-KNwvD73y0Vl9KoeyJPc6Mrnu7yCVXTgsateaUwVn3dx2Cw0Jf3azUO-G5RfnQTpdE7huwofXUyh_WmaYVQ997lcXiVdhndZmSVDPEB9t05-qHCC5hafmnQqMpBvV-eI-OKhMVxwhdjzZnwmrALj-2Z9ApqfKsHxTy27RtIfNKPTijOAW8L6YwI909J__F7_tcPHOtEmusmg-CvU5qPUeq8D3pPC_IdzZBD-3GmavzzVeEjN1ucuo6aIIcvmsjQzeR4r_ZvWWdjx0gOHiGEraO2uETGiA3zesk'
try:
print(PyJwtValidator(jwt, auto_verify=False, check_expiry=False).verify(True))
except PyJwtException as e:
print(f"Exception caught. Error: {e}")
| 269.5
| 1,920
| 0.967532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,945
| 0.902134
|
c6934a03c692a0936dbdefc83d05a9252b05f1c4
| 6,795
|
py
|
Python
|
files/area.py
|
joaovpassos/USP-Programs
|
09ddb8aed238df1f1a2e80afdc202ac4538daf41
|
[
"MIT"
] | 2
|
2021-05-26T19:14:16.000Z
|
2021-05-27T21:14:24.000Z
|
files/area.py
|
joaovpassos/USP-Programs
|
09ddb8aed238df1f1a2e80afdc202ac4538daf41
|
[
"MIT"
] | null | null | null |
files/area.py
|
joaovpassos/USP-Programs
|
09ddb8aed238df1f1a2e80afdc202ac4538daf41
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#------------------------------------------------------------------
# Constantes que você pode utilizar nesse exercício
# Em notação científica 1.0e-6 é o o mesmo qoe 0.000001 (10 elevado a -6)
EPSILON = 1.0e-6
#------------------------------------------------------------------
# O import abaixo permite que o programa utilize todas as funções do módulo math,
# como por exemplo, math.exp e math.sin.
import math
#------------------------------------------------------------------
def main():
'''() -> None
Modifique essa função, escrevendo outros testes.
'''
# escolha a função que desejar e atribuia a f_x
f_x = math.cos
# f_x = math.sin
# f_x = math.exp # etc, para integração com outras funções.
# f_x = identidade # identidade() definidas mais adiante
# f_x = circunferencia # circunferencia() definida mais adiante
# f_x = exp # exp() definida mais adiante
print("Início dos testes.")
# Testes da f_x
nome = f_x.__name__ # nome da f_x usada
print(f"A função f_x usada nos testes é {nome}()")
print(f"Valor de f_x(0.0)= {f_x( 0.0 )}")
print(f"Valor de f_x(0.5)= {f_x( 0.5 )}")
print(f"Valor de f_x(1.0)= {f_x( 1.0 )}")
# testes da função área_por_retangulos
print()
print("Área por retângulos:")
a, b = 0, 1 # intervalo [a,b]
k = 1 # número de retângulos
n = 3 # número de iterações
i = 0
while i < n:
print(f"teste {i+1}: para {k} retângulos no intervalo [{a}, {b}]:")
print(f" área aproximada = {area_por_retangulos(f_x, a, b, k):g}")
k *= 10
i += 1
# testes da função área_aproximada
print()
print("Área aproximada:")
a, b = 0, 1 # intervalo
k, area = area_aproximada(f_x, a, b) # número de retângulos e aproximação
print(f"teste 1: para eps = {EPSILON:g} e intervalo [{a}, {b}]:")
print(f" com {k} retângulo a área é aproximadamente = {area:g}")
eps = 1e-6 # erro relativo aceitável
i = 1
n = 4
while i < n:
eps *= 10 # aumenta o erro relativo aceitável
k, area = area_aproximada(f_x, a, b, eps)
print(f"teste {i+1}: para eps = {eps:g} e intervalo [{a}, {b}]:")
print(f" com {k} retângulos a área é aproximadamente = {area:g}")
i += 1
print("Fim dos testes.")
#------------------------------------------------------------------
# FUNÇÃO AUXILIAR PARA TESTE: função f(x)=x
def identidade( x ):
''' (float) -> float
RECEBE um valor x.
RETORNA o valor recebido.
EXEMPLOS:
In [6]: identidade(3.14)
Out[6]: 3.14
In [7]: identidade(1)
Out[7]: 1
In [8]: identidade(-3)
Out[8]: -3
'''
return x
#------------------------------------------------------------------
# FUNÇÃO AUXILIAR PARA TESTE: função f(x)=sqrt(1 - x*x)
def circunferencia( x ):
''' (float) -> float
RECEBE um valor x.
RETORNA um valor y >= 0 tal que (x,y) é um ponto na circunferência de raio 1 e centro (0,0).
PRÉ-CONDIÇÃO: a função supõe que x é um valor tal que -1 <= x <= 1.
EXEMPLOS:
In [9]: circunferencia(-1)
Out[9]: 0.0
In [10]: circunferencia(0)
Out[10]: 1.0
In [11]: circunferencia(1)
Out[11]: 0.0
'''
y = math.sqrt( 1 - x*x )
return y
#------------------------------------------------------------------
# FUNÇÃO AUXILIAR PARA TESTE: função f(x) = e^x
def exp( x ):
''' (float) -> float
RECEBE um valor x.
RETORNA (uma aproximação de) exp(x).
EXEMPLOS:
In [12]: exp(1)
Out[12]: 2.718281828459045
In [13]: exp(0)
Out[13]: 1.0
In [14]: exp(-1)
Out[14]: 0.36787944117144233
'''
y = math.exp( x )
return y # return math.exp( x )
#------------------------------------------------------------------
#
def erro_rel(y, x):
''' (float, float) -> float
RECEBE dois números x e y.
RETORNA o erro relativo entre eles.
EXEMPLOS:
In [1]: erro_rel(0, 0)
Out [1]: 0.0
In [2]: erro_rel(0.01, 0)
Out [2]: 1.0
In [3]: erro_rel(1.01, 1.0)
Out [3]: 0.01
'''
if x == 0 and y == 0:
return 0.0
elif x == 0:
return 1.0
erro = (y-x)/x
if erro < 0:
return -erro
return erro
#------------------------------------------------------------------
def area_por_retangulos(f, a, b, k):
'''(function, float, float, int) -> float
RECEBE uma função f, dois números a e b e um inteiro k.
RETORNA uma aproximação da área sob a função f no intervalo [a,b]
usando k retângulos.
PRÉ-CONDIÇÃO: a função supõe que a função f é continua no intervalo [a,b] e que
f(x) >= 0 para todo x, a <= x <= b.
EXEMPLOS:
In [15]area_por_retangulos(identidade, 0, 1, 1)
Out[15]: 0.5
In [16]:area_por_retangulos(circunferencia, -1, 0, 1)
Out[16]: 0.8660254037844386
'''
# escreva a sua solução a seguir
# remova ou modifique a linha abaixo como desejar
base = (b-a)/k
i = 0
x_meio = ((b-a)/(2*k)) + a
soma = 0
while i < k:
area = f(x_meio)*base
x_meio += base
i += 1
soma += area
return soma
#------------------------------------------------------------------
def area_aproximada(f, a, b, eps=EPSILON):
'''(function, float, float, float) -> int, float
RECEBE uma função f, dois números a, b, eps.
RETORNA um inteiro k e uma aproximação da área sob a função f no intervalo [a,b]
usando k retângulo.
O valor de k deve ser a __menor potência__ de 2 tal que o erro relativo
da aproximação retornada seja menor que eps.
Assim, os possíveis valores de k são 1, 2, 4, 8, 16, 32, 64, ...
PRÉ-CONDIÇÃO: a função supõe que a função f é continua no intervalo [a,b] e que
f(x) >= 0 para todo x, a <= x <= b.
EXEMPLOS:
In [22]: area_aproximada(identidade, 1, 2)
Out[22]: (2, 1.5)
In [23]: area_aproximada(exp, 1, 2, 16)
Out[23]: (2, 4.6224728167337865)
'''
# escreva o corpo da função
# remova ou modifique a linha abaixo como desejar
k = 1
sub = eps + 1
while sub >= eps:
sub = erro_rel(area_por_retangulos(f,a,b,k*2),area_por_retangulos(f,a,b,k))
k *= 2
return k, area_por_retangulos(f,a,b,k) # para retornar um int e um float
# basta separá-los por vírgula
#######################################################
### FIM ###
#######################################################
#
# NÃO MODIFIQUE AS LINHAS ABAIXO
#
# Esse if serve para executar a função main() apenas quando
# este é o módulo a partir do qual a execução foi iniciada.
if __name__ == '__main__':
main()
| 31.901408
| 98
| 0.512288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,325
| 0.769286
|
c693df3548964a87b3411b88e56a453a7a597f59
| 4,421
|
py
|
Python
|
gribmagic/unity/download/engine.py
|
earthobservations/gribmagic
|
59c647d3ca3ecaf2d720837ba0cec9cc2aa2546e
|
[
"MIT"
] | 9
|
2020-12-18T13:26:45.000Z
|
2022-03-03T16:46:33.000Z
|
gribmagic/unity/download/engine.py
|
earthobservations/gribmagic
|
59c647d3ca3ecaf2d720837ba0cec9cc2aa2546e
|
[
"MIT"
] | 12
|
2020-12-19T18:32:51.000Z
|
2021-10-30T17:48:35.000Z
|
gribmagic/unity/download/engine.py
|
earthobservations/gribmagic
|
59c647d3ca3ecaf2d720837ba0cec9cc2aa2546e
|
[
"MIT"
] | 2
|
2020-12-19T08:02:03.000Z
|
2021-10-30T16:01:02.000Z
|
"""
Handle download of NWP data from remote servers.
"""
import logging
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Dict, List
import requests
from gribmagic.unity.configuration.constants import (
KEY_COMPRESSION,
KEY_LOCAL_FILE_PATHS,
KEY_REMOTE_FILE_PATHS,
)
from gribmagic.unity.configuration.model import WeatherModelSettings
from gribmagic.unity.download.decoder import (
decode_bunzip,
decode_identity,
decode_tarfile,
)
from gribmagic.unity.enumerations import WeatherModel
from gribmagic.unity.model import DownloadItem
session = requests.Session()
logger = logging.getLogger(__name__)
DEFAULT_NUMBER_OF_PARALLEL_PROCESSES = 4
def run_download(
weather_model: WeatherModel,
model_file_lists: Dict[str, List[str]],
parallel_download: bool = False,
n_processes: int = DEFAULT_NUMBER_OF_PARALLEL_PROCESSES,
) -> None:
"""
Download weather forecasts data.
"""
model = WeatherModelSettings(weather_model)
if model.info[KEY_COMPRESSION] == "tar":
return __download_tar_file(
weather_model,
model_file_lists[KEY_REMOTE_FILE_PATHS][0],
model_file_lists[KEY_LOCAL_FILE_PATHS],
)
if parallel_download:
download_specifications = [
DownloadItem(model=weather_model, local_file=local_file_path, remote_url=remote_file)
for remote_file, local_file_path in zip(
model_file_lists[KEY_REMOTE_FILE_PATHS],
model_file_lists[KEY_LOCAL_FILE_PATHS],
)
]
return __download_parallel(download_specifications, n_processes)
else:
results = []
for remote_file, local_file_path in zip(
model_file_lists[KEY_REMOTE_FILE_PATHS],
model_file_lists[KEY_LOCAL_FILE_PATHS],
):
item = DownloadItem(
model=weather_model, local_file=local_file_path, remote_url=remote_file
)
results.append(__download(item))
return results
def __download(item: DownloadItem) -> None:
"""
base download function to manage single file download
Args:
download_specification: Tuple with
- WeatherModel
- local_file_path
- remote_file_path
Returns:
Stores a file in temporary directory
"""
model = WeatherModelSettings(item.model)
# Compute source URL and target file.
url = item.remote_url
target_file = Path(item.local_file)
if target_file.exists():
logger.info(f"Skipping existing file {target_file}")
return target_file
logger.info(f"Downloading {url} to {target_file}")
try:
response = session.get(url, stream=True)
response.raise_for_status()
except Exception as ex:
logger.warning(f"Failed accessing resource {url}: {ex}")
return
if not target_file.parent.is_dir():
target_file.parent.mkdir(exist_ok=True)
if model.info[KEY_COMPRESSION] == "bz2":
decode_bunzip(response.raw, target_file)
else:
decode_identity(response.raw, target_file)
return target_file
def __download_parallel(
download_specifications: List[DownloadItem],
n_processes: int = DEFAULT_NUMBER_OF_PARALLEL_PROCESSES,
) -> None:
"""
Script to run download in parallel
Args:
download_specifications: List of Tuple with
- WeatherModel
- local_file_path
- remote_file_path
n_processes: Number of parallel processes used for download
Returns:
None
"""
with ThreadPoolExecutor(max_workers=n_processes) as executor:
results = executor.map(__download, download_specifications)
executor.shutdown(wait=True)
return results
def __download_tar_file(
weather_model: WeatherModel, url: str, local_file_list: List[Path]
) -> None:
"""
Downloads a weather forecast package with one tar archive
Args:
weather_model:
remote_file:
local_file_list:
Returns:
"""
model = WeatherModelSettings(weather_model)
try:
response = session.get(url, stream=True)
response.raise_for_status()
except Exception as ex:
logger.warning(f"Failed accessing resource {url}: {ex}")
return
return decode_tarfile(response.raw, local_file_list)
| 27.459627
| 97
| 0.680615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,037
| 0.234562
|
c696cbe9a74a6a3f3db61104f5e94acb0ded96e3
| 2,195
|
py
|
Python
|
tests/main.py
|
Antojitos/guacamole
|
50b4da41a45b2b4dd4f63f6c6cc68bfcf8563152
|
[
"MIT"
] | 3
|
2015-10-30T13:09:13.000Z
|
2021-02-17T19:12:37.000Z
|
tests/main.py
|
amessinger/guacamole
|
50b4da41a45b2b4dd4f63f6c6cc68bfcf8563152
|
[
"MIT"
] | 5
|
2015-10-30T12:53:05.000Z
|
2015-12-14T15:20:04.000Z
|
tests/main.py
|
Antojitos/guacamole
|
50b4da41a45b2b4dd4f63f6c6cc68bfcf8563152
|
[
"MIT"
] | 1
|
2015-10-28T08:44:48.000Z
|
2015-10-28T08:44:48.000Z
|
import sys
import os
import shutil
import filecmp
import json
import unittest
# Path hack. http://stackoverflow.com/questions/6323860/sibling-package-imports
sys.path.insert(0, os.path.abspath('../guacamole'))
import guacamole
class GuacamoleTestCase(unittest.TestCase):
def setUp(self):
guacamole.app.config['TESTING'] = True
self.app = guacamole.app.test_client()
self.original_file_name = 'image.jpg'
self.original_file_path = os.path.join('tests/fixtures', self.original_file_name)
self.original_file = open(self.original_file_path, 'r')
self.original_file_tags = 'Mexican, food,fiesta'
if not os.path.exists('files'):
os.makedirs('files')
def tearDown(self):
shutil.rmtree('files')
pass
def test_post_file(self):
"""Testing file upload"""
response = self.app.post('/files/',
buffered=True,
content_type='multipart/form-data',
data={
'file': (self.original_file, self.original_file_name)
})
uploaded_file_meta = json.loads(response.data)
uploaded_file_path = os.path.join('files', uploaded_file_meta['uri'])
assert '200' in response.status
assert os.path.isfile(uploaded_file_path)
assert filecmp.cmp(self.original_file_path, uploaded_file_path)
def test_post_file_with_tags(self):
"""Testing file upload with tags"""
response = self.app.post('/files/',
buffered=True,
content_type='multipart/form-data',
data={
'file': (self.original_file, self.original_file_name),
'tags': self.original_file_tags
})
uploaded_file_meta = json.loads(response.data)
uploaded_file_path = os.path.join('files', uploaded_file_meta['uri'])
assert '200' in response.status
assert '["mexican", "food", "fiesta"]' in response.data
assert os.path.isfile(uploaded_file_path)
assert filecmp.cmp(self.original_file_path, uploaded_file_path)
if __name__ == '__main__':
unittest.main()
| 32.279412
| 89
| 0.626424
| 1,918
| 0.873804
| 0
| 0
| 0
| 0
| 0
| 0
| 388
| 0.176765
|
c696f39c84a65ece0fb68103ccf754b71fcc536c
| 1,249
|
py
|
Python
|
scripts/devicereload/reload_cisco_device.py
|
chrisbalmer/netauto-helper-scripts
|
1855085f899fa1cfbf86d6515330e0a2b002ec6a
|
[
"MIT"
] | null | null | null |
scripts/devicereload/reload_cisco_device.py
|
chrisbalmer/netauto-helper-scripts
|
1855085f899fa1cfbf86d6515330e0a2b002ec6a
|
[
"MIT"
] | null | null | null |
scripts/devicereload/reload_cisco_device.py
|
chrisbalmer/netauto-helper-scripts
|
1855085f899fa1cfbf86d6515330e0a2b002ec6a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import sys
import yaml
import paramiko
import base64
import time
import keychain
import re
import console
def connect_to_device(ssh):
print "\n\nConnecting to device..."
keys = ssh.get_host_keys()
keys.add(hostname,'ssh-rsa',public_key)
password = keychain.get_password(hostname,
username)
ssh.connect(hostname,username=username,password=password)
shell = ssh.invoke_shell()
print "Connected to " + hostname + "."
shell.send("term len 0\n")
return shell
def send_command(shell, command):
shell.send(command + "\n")
time.sleep(1)
output = shell.recv(10000)
return output
def logout(shell):
shell.send('logout\n')
print '\nDisconnected from device\n'
console.clear()
# Load options
with open('devices.yaml', 'r') as file:
device_list = yaml.load(file)
hostname = device_list['device1']['host']
public_key_string = device_list['device1']['public_key']
username = device_list['device1']['username']
public_key = paramiko.RSAKey(data=base64.b64decode(public_key_string))
# Prep the SSH connection
ssh = paramiko.SSHClient()
shell = connect_to_device(ssh)
print send_command(shell, 'reload\n')
logout(shell)
print '\n\nComplete!'
console.hud_alert('Complete!',duration=2)
| 22.303571
| 70
| 0.72458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 276
| 0.220977
|
c697934e43005813bbf25f5936b378004c77b6ac
| 324
|
py
|
Python
|
settings.py
|
musahibrahimali/flasket-api
|
d212cb84817dee90e9a53015b2811468a4db75ff
|
[
"MIT"
] | 7
|
2018-02-23T17:41:04.000Z
|
2022-03-09T12:20:56.000Z
|
settings.py
|
musahibrahimali/flasket-api
|
d212cb84817dee90e9a53015b2811468a4db75ff
|
[
"MIT"
] | null | null | null |
settings.py
|
musahibrahimali/flasket-api
|
d212cb84817dee90e9a53015b2811468a4db75ff
|
[
"MIT"
] | 1
|
2021-06-02T17:23:45.000Z
|
2021-06-02T17:23:45.000Z
|
# Flask settings
FLASK_DEBUG = True # Do not use debug mode in production
# SQLAlchemy settings
SQLALCHEMY_DATABASE_URI = 'sqlite:///db.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = True
# Flask-Restplus settings
SWAGGER_UI_DOC_EXPANSION = 'list'
RESTPLUS_VALIDATE = True
RESTPLUS_MASK_SWAGGER = False
ERROR_404_HELP = False
| 23.142857
| 57
| 0.805556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 126
| 0.388889
|
c69cba0e213110d86560b0464617fbc29e061f5e
| 1,409
|
py
|
Python
|
parkings/management/commands/create_init_user.py
|
PICTEC/pgs
|
c5e8fd78d411937ce60e733316d4d425410153bc
|
[
"MIT"
] | 1
|
2021-03-26T05:49:08.000Z
|
2021-03-26T05:49:08.000Z
|
parkings/management/commands/create_init_user.py
|
PICTEC/PGS
|
813721b3bdbaf173d68cb81b3dc0886e542b9a4e
|
[
"MIT"
] | null | null | null |
parkings/management/commands/create_init_user.py
|
PICTEC/PGS
|
813721b3bdbaf173d68cb81b3dc0886e542b9a4e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Create superuser and monitoring group
"""
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Group
from django.contrib.auth.models import Permission
from django.contrib.auth.models import User
from parkings.models import Monitor
from parkings.models import EnforcementDomain
MODELS = ['operator', 'parking area', 'parking check', 'parking terminal', 'parking', 'region', 'payment zone']
PERMISSIONS = ['view']
class Command(BaseCommand):
help = 'Create superuser and monitor'
def add_arguments(self, parser):
parser.add_argument('superuser_name', type=str)
parser.add_argument('superuser_email', type=str)
parser.add_argument('superuser_password', type=str)
def handle(self, superuser_name, superuser_email, superuser_password, *args, **options):
try:
user = User.objects.create_superuser(superuser_name, superuser_email, superuser_password)
print("Created superuser " + superuser_name)
except Exception as e:
print("Failed in creating superuser " + superuser_name)
print(e)
return
monitor = Monitor.objects.update_or_create(
name=superuser_name,
user=user,
domain=EnforcementDomain.get_default_domain(),
)
print("Created monitor for superuser " + superuser_name)
| 35.225
| 111
| 0.698368
| 929
| 0.659333
| 0
| 0
| 0
| 0
| 0
| 0
| 326
| 0.23137
|
c69d613e92541912c5d1aa1169340677fbcf4a96
| 5,437
|
py
|
Python
|
mlops/parallelm/mlops/ml_metrics_stat/ml_stat_object_creator.py
|
mlpiper/mlpiper
|
0fd2b6773f970c831038db47bf4920ada21a5f51
|
[
"Apache-2.0"
] | 7
|
2019-04-08T02:31:55.000Z
|
2021-11-15T14:40:49.000Z
|
mlops/parallelm/mlops/ml_metrics_stat/ml_stat_object_creator.py
|
mlpiper/mlpiper
|
0fd2b6773f970c831038db47bf4920ada21a5f51
|
[
"Apache-2.0"
] | 31
|
2019-02-22T22:23:26.000Z
|
2021-08-02T17:17:06.000Z
|
mlops/parallelm/mlops/ml_metrics_stat/ml_stat_object_creator.py
|
mlpiper/mlpiper
|
0fd2b6773f970c831038db47bf4920ada21a5f51
|
[
"Apache-2.0"
] | 8
|
2019-03-15T23:46:08.000Z
|
2020-02-06T09:16:02.000Z
|
import numpy as np
from parallelm.mlops.mlops_exception import MLOpsStatisticsException
from parallelm.mlops.stats.graph import Graph
from parallelm.mlops.stats.multi_line_graph import MultiLineGraph
from parallelm.mlops.stats.single_value import SingleValue
from parallelm.mlops.stats.table import Table
from parallelm.mlops.stats_category import StatCategory
class MLStatObjectCreator(object):
@staticmethod
def get_single_value_stat_object(name, single_value):
"""
Create Single Value stat object from numerical value
:param name: Name of stat
:param single_value: single numeric value
:return: MLOps Single Value object, time series category
"""
if isinstance(single_value, (int, float)):
category = StatCategory.TIME_SERIES
single_value = \
SingleValue() \
.name(name) \
.value(single_value) \
.mode(category)
return single_value, category
else:
raise MLOpsStatisticsException \
("For outputting {}, {} should be of type numeric but got {}."
.format(name, single_value, type(single_value)))
@staticmethod
def get_table_value_stat_object(name, list_2d, match_header_pattern=None):
"""
Create Table Value stat object from list of list. Where first element of 2d list is header. And from remaining lists, list's first index is Row's header.
:param name: Name of stat
:param list_2d: 2d representation of table to output
:param match_header_pattern: If not none, then header of table should match the pattern provided
:return: MLOps Table Value object, general stat category
"""
category = StatCategory.GENERAL
try:
header = list(map(lambda x: str(x).strip(), list_2d[0]))
if match_header_pattern is not None:
assert header == match_header_pattern, \
"headers {} is not matching expected headers pattern {}" \
.format(header, match_header_pattern)
len_of_header = len(header)
table_object = Table().name(name).cols(header)
for index in range(1, len(list_2d)):
assert len(list_2d[index]) - 1 == len_of_header, \
"length of row value does not match with headers length"
row_title = str(list_2d[index][0]).strip()
row_value = list(map(lambda x: str(x).strip(), list_2d[index][1:]))
table_object.add_row(row_title, row_value)
return table_object, category
except Exception as e:
raise MLOpsStatisticsException \
("error happened while outputting table object from list_2d: {}. error: {}".format(list_2d, e))
@staticmethod
def get_graph_value_stat_object(name, x_data, y_data, x_title, y_title, legend):
"""
Create graph object from given data.
:param name: Name of stat
:param x_data: X axis data. It has to be numeric list.
:param y_data: Y axis data. It has to be numeric list.
:param x_title: X axis title
:param y_title: Y axis title
:param legend: Legend of Y axis
:return: MLOps Graph Value object, general stat category
"""
category = StatCategory.GENERAL
if legend is None:
legend = "{} vs {}".format(y_title, x_title)
try:
graph_object = Graph() \
.name(name) \
.set_x_series(list(x_data)) \
.add_y_series(label=legend, data=list(y_data))
graph_object.x_title(x_title)
graph_object.y_title(y_title)
return graph_object, category
except Exception as e:
raise MLOpsStatisticsException \
("error happened while outputting graph object. error: {}".format(e))
@staticmethod
def get_multiline_stat_object(name, list_value, labels=None):
"""
Create multiline object from list of values. It outputs mulitline from values and legends is index of the values - i.e. 0, 1, ..
:param name: Name of stat
:param list_value: list of values to embed in multiline value.
:return: MLOps Multiline Value object, timeseries stat category
"""
if isinstance(list_value, list) or isinstance(list_value, np.ndarray):
category = StatCategory.TIME_SERIES
# if labels are not provided then it will be 0, 1, .. length of list - 1
if labels is None:
labels = range(len(list_value))
labels = list(map(lambda x: str(x).strip(), labels))
if (len(labels) == len(list_value)):
multiline_object = MultiLineGraph() \
.name(name) \
.labels(labels)
multiline_object.data(list(list_value))
return multiline_object, category
else:
raise MLOpsStatisticsException(
"size of labels associated with list of values to get does not match. {}!={}"
.format(len(labels), len(list_value)))
else:
raise MLOpsStatisticsException(
"list_value has to be of type list or nd array but got {}".format(type(list_value)))
| 41.823077
| 161
| 0.609343
| 5,072
| 0.932867
| 0
| 0
| 5,015
| 0.922384
| 0
| 0
| 1,916
| 0.3524
|
c69e524f9b42fcb4896f83fcc4785aff222562d4
| 303
|
py
|
Python
|
terrascript/data/davidji99/split.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/data/davidji99/split.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/data/davidji99/split.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/data/davidji99/split.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:27:33 UTC)
import terrascript
class split_traffic_type(terrascript.Data):
pass
class split_workspace(terrascript.Data):
pass
__all__ = [
"split_traffic_type",
"split_workspace",
]
| 16.833333
| 73
| 0.745875
| 101
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 147
| 0.485149
|
c69f9f22e1976429e68cc587ef9c41a2baa5fb93
| 85
|
py
|
Python
|
nginx/apps.py
|
rockychen-dpaw/it-assets
|
92ec23c6a413c5c45bb3d96981d6af68535d225c
|
[
"Apache-2.0"
] | 4
|
2018-11-16T13:49:49.000Z
|
2021-08-19T05:16:50.000Z
|
nginx/apps.py
|
rockychen-dpaw/it-assets
|
92ec23c6a413c5c45bb3d96981d6af68535d225c
|
[
"Apache-2.0"
] | 10
|
2018-07-06T09:34:56.000Z
|
2022-01-28T06:09:05.000Z
|
nginx/apps.py
|
rockychen-dpaw/it-assets
|
92ec23c6a413c5c45bb3d96981d6af68535d225c
|
[
"Apache-2.0"
] | 9
|
2018-05-05T23:29:10.000Z
|
2020-06-26T02:29:17.000Z
|
from django.apps import AppConfig
class NginxConfig(AppConfig):
name = 'nginx'
| 14.166667
| 33
| 0.741176
| 48
| 0.564706
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.082353
|
c69faa343bb44ce7636a902fb8fc9cfe5f9f2c0d
| 3,511
|
py
|
Python
|
tools/telemetry/telemetry/unittest/output_formatter.py
|
aranajhonny/chromium
|
caf5bcb822f79b8997720e589334266551a50a13
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2019-01-16T03:57:39.000Z
|
2019-01-16T03:57:39.000Z
|
tools/telemetry/telemetry/unittest/output_formatter.py
|
aranajhonny/chromium
|
caf5bcb822f79b8997720e589334266551a50a13
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2018-02-10T21:00:08.000Z
|
2018-03-20T05:09:50.000Z
|
tools/telemetry/telemetry/unittest/output_formatter.py
|
aranajhonny/chromium
|
caf5bcb822f79b8997720e589334266551a50a13
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import util
from telemetry.unittest import options_for_unittests
class OutputFormatter(object):
def __init__(self, output_stream):
self._output_stream = output_stream
def StartTest(self, test):
pass
def StartTestSuite(self, suite):
pass
def StartTestRun(self):
pass
def StopTest(self, test):
pass
def StopTestSuite(self, suite):
pass
def StopTestRun(self, result):
pass
def Error(self, test, err):
pass
def Failure(self, test, err):
pass
def Success(self, test):
pass
def Skip(self, test, reason):
pass
class TestSuite(unittest.TestSuite):
"""TestSuite that can delegate start and stop calls to a TestResult object."""
def run(self, result): # pylint: disable=W0221
if hasattr(result, 'startTestSuite'):
result.startTestSuite(self)
result = super(TestSuite, self).run(result)
if hasattr(result, 'stopTestSuite'):
result.stopTestSuite(self)
return result
class TestRunner(object):
def run(self, test, output_formatters, repeat_count, args):
util.AddDirToPythonPath(util.GetUnittestDataDir())
result = TestResult(output_formatters)
result.startTestRun()
try:
options_for_unittests.Push(args)
for _ in xrange(repeat_count):
test(result)
finally:
options_for_unittests.Pop()
result.stopTestRun()
return result
class TestResult(unittest.TestResult):
def __init__(self, output_formatters):
super(TestResult, self).__init__()
self.successes = []
self._output_formatters = output_formatters
@property
def failures_and_errors(self):
return self.failures + self.errors
def startTest(self, test):
super(TestResult, self).startTest(test)
for output_formatter in self._output_formatters:
output_formatter.StartTest(test)
def startTestSuite(self, suite):
for output_formatter in self._output_formatters:
output_formatter.StartTestSuite(suite)
def startTestRun(self):
super(TestResult, self).startTestRun()
for output_formatter in self._output_formatters:
output_formatter.StartTestRun()
def stopTest(self, test):
super(TestResult, self).stopTest(test)
for output_formatter in self._output_formatters:
output_formatter.StopTest(test)
def stopTestSuite(self, suite):
for output_formatter in self._output_formatters:
output_formatter.StopTestSuite(suite)
def stopTestRun(self):
super(TestResult, self).stopTestRun()
for output_formatter in self._output_formatters:
output_formatter.StopTestRun(self)
def addError(self, test, err):
super(TestResult, self).addError(test, err)
for output_formatter in self._output_formatters:
output_formatter.Error(test, err)
def addFailure(self, test, err):
super(TestResult, self).addFailure(test, err)
for output_formatter in self._output_formatters:
output_formatter.Failure(test, err)
def addSuccess(self, test):
super(TestResult, self).addSuccess(test)
self.successes.append(test)
for output_formatter in self._output_formatters:
output_formatter.Success(test)
def addSkip(self, test, reason):
super(TestResult, self).addSkip(test, reason)
for output_formatter in self._output_formatters:
output_formatter.Skip(test, reason)
| 26.801527
| 80
| 0.727428
| 3,234
| 0.921105
| 0
| 0
| 81
| 0.02307
| 0
| 0
| 291
| 0.082882
|
c69fe4b03acf538832512321d83a32c7f8cc326f
| 480
|
py
|
Python
|
awsflow/lambdas/demo.py
|
algorithmia-algorithms/awsflow
|
927698c27e57377dbe8094c71d5b0c36548b0937
|
[
"MIT"
] | 12
|
2019-04-06T14:59:29.000Z
|
2020-04-14T21:02:23.000Z
|
awsflow/lambdas/demo.py
|
vaquarkhan/awsflow
|
59f9001972aec2bac60a97d174b97f96689360ce
|
[
"MIT"
] | null | null | null |
awsflow/lambdas/demo.py
|
vaquarkhan/awsflow
|
59f9001972aec2bac60a97d174b97f96689360ce
|
[
"MIT"
] | 3
|
2019-07-30T17:11:14.000Z
|
2020-02-17T20:39:25.000Z
|
from awsflow.tools.emr import logging
from awsflow.version import __version__
def hello_world(event, context):
"""
Test function, does nothing
:param event: AWS lambdas function event
:param context: AWS lambdas function context
:return:
"""
message = 'event={} context={}'.format(event, context)
logging.info('Hello World! Message is {}'.format(message))
return {
'parameters': message,
'awsflow-version': __version__
}
| 25.263158
| 62
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 228
| 0.475
|
c6a0b2e6f13cc83e001ace2dc43eeb51890ba31f
| 1,074
|
py
|
Python
|
weather/tools.py
|
yulinliu101/DeepTP
|
bc4f9adad6dda6c32e58026dda7863e0cb2a6072
|
[
"MIT"
] | 46
|
2018-09-23T02:08:02.000Z
|
2022-03-19T15:56:15.000Z
|
weather/tools.py
|
yulinliu101/DeepTP
|
bc4f9adad6dda6c32e58026dda7863e0cb2a6072
|
[
"MIT"
] | 6
|
2018-12-02T09:04:56.000Z
|
2021-09-30T12:14:53.000Z
|
weather/tools.py
|
yulinliu101/DeepTP
|
bc4f9adad6dda6c32e58026dda7863e0cb2a6072
|
[
"MIT"
] | 27
|
2018-11-19T18:17:07.000Z
|
2021-08-28T17:07:11.000Z
|
'''
Module's author : Jarry Gabriel
Date : June, July 2016
Some Algorithms was made by : Malivai Luce, Helene Piquet
This module handle different tools
'''
from pyproj import Proj, Geod
import numpy as np
# Projections
wgs84=Proj("+init=EPSG:4326")
epsg3857=Proj("+init=EPSG:3857")
g=Geod(ellps='WGS84')
# Returns pressure from altitude (ft)
def press(alt):
z = alt/3.28084
return 1013.25*(1-(0.0065*z)/288.15)**5.255
# Returns the closest lvl from levels with altitude (atl)
def proxilvl(alt , lvls):
p = press(alt)
levels = np.array(sorted(lvls.keys()))
return levels[np.abs(levels - p).argmin()]
# def proxy(val, lvl1, lvl2):
# if (abs(val - lvl1) < abs(val - lvl2)):
# return lvl1
# else:
# return lvl2
# p = press(alt)
# levels = sorted(lvls.keys())
# if p < levels[0]:
# return levels[0]
# else:
# for i, el in enumerate(levels[1:]):
# if p < el:
# return proxy(p, levels[i-1], el)
# return levels[-1]
| 25.571429
| 58
| 0.57635
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 714
| 0.664804
|
c6a0ccd39f3cb516016d54f1a50913914e43bf5d
| 1,315
|
py
|
Python
|
src/database/report.py
|
moevm/nosql1h19-report-stats
|
ab1dc80858df2d8b44489dc7ca900371b1fcc80f
|
[
"MIT"
] | null | null | null |
src/database/report.py
|
moevm/nosql1h19-report-stats
|
ab1dc80858df2d8b44489dc7ca900371b1fcc80f
|
[
"MIT"
] | null | null | null |
src/database/report.py
|
moevm/nosql1h19-report-stats
|
ab1dc80858df2d8b44489dc7ca900371b1fcc80f
|
[
"MIT"
] | null | null | null |
from docx import Document
class Report:
def __init__(self, docx_text, meta, text_processor):
self.document = Document(docx_text)
self.date = self.document.core_properties.modified
self.title = meta['title']
self.author = meta['author']
self.group = int(meta['group'])
self.department = meta['department']
self.course = int(meta['course'])
self.faculty = meta['faculty']
raw_text = ' '.join([par.text for par in self.document.paragraphs])
processed_text = text_processor.process(raw_text)
self.text = processed_text['text']
self.text.pop('clean_text', None) # Не храним очищенный текст
self.words = processed_text['words']
self.words.pop('words', None) # Не храним все слова
self.symbols = processed_text['symbols']
def serialize_db(self):
serialized_document = {
'title': self.title,
'date': self.date,
'author': self.author,
'group': self.group,
'department': self.department,
'course': self.course,
'faculty': self.faculty,
'text': self.text,
'words': self.words,
'symbols': self.symbols
}
return serialized_document
| 32.073171
| 75
| 0.579468
| 1,325
| 0.979305
| 0
| 0
| 0
| 0
| 0
| 0
| 260
| 0.192166
|
c6a371ecbe5a163fba368a97852b226ecc2b76c6
| 19,724
|
py
|
Python
|
transmission/PFM_v24.py
|
zarppy/MUREIL_2014
|
25ba16554ce8f614b9337e0fffce75da3fa259a4
|
[
"MIT"
] | null | null | null |
transmission/PFM_v24.py
|
zarppy/MUREIL_2014
|
25ba16554ce8f614b9337e0fffce75da3fa259a4
|
[
"MIT"
] | null | null | null |
transmission/PFM_v24.py
|
zarppy/MUREIL_2014
|
25ba16554ce8f614b9337e0fffce75da3fa259a4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
#
# Copyright (C) University of Melbourne 2012
#
#
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
#
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import math
class PowerFlow():
"""The power flow class, which can serve as a transmission model for
an energy system model. In the current version it can return the amount
of failed transmission. It further will have the ability to be updated
via a function, in order to introduce changeability.
"""
def __init__(self):
"""Initiates a class member of the power flow class.
"""
self.b_inverse_matrix = np.matrix(1)
self.a_d_matrix = np.matrix(1)
self.no_edges = 0
self.total_unresolved_flow = 0
self.flow_series = []
self.line_dictionary = {}
self.node_dictonary = {}
# Maybe expendable, right no used for update method
self.y_bus = []
self.a_matrix = []
self.capacity_matrix = []
self.no_nodes = 0
def calculate_flow(self, supply):
"""Calculates the power flow for the current supply set, which is
provided by the txmultigenerator. The method
create_transmission_network needs to be run before calculating the
flow. No output is returned, but the total_unresolved_flow is changed.
Inputs:
supply: a timeseries of supply vectors
Output:
none
"""
# Loop through full timeperiod
t=0
while t < len(supply):
supply_vector = np.matrix(np.array(supply[t])[1:])
# Calculate the nodal phase angles
phase_angle_vector = self.b_inverse_matrix * supply_vector.T
# Calculate the line flows
flow_vector = self.a_d_matrix * phase_angle_vector
# Save flow in timeseries for later evaluation
self.flow_series.append(flow_vector)
t += 1
def analyse_network(self):
"""Analysis of the network. Returns a maximum flows that were assigned
to the lines and a capacity that would be sufficient to transport 90%
of the flows. These values can be later used to see where capacity
was exceded to recaculate the dispatch and eventually make network
updates.
Input:
None, uses self.flow_series as basis of calculation
Output:
line_maxLoad_in: maximum flow in timeseries in defined direction
on line
line_maxLoad_ag: maximum flow in timeseries against defined
direction on line
line_load90_in: 90% percentile flow in timeseries in defined
direction on line
line_load90_ag: 90% percentile flow in timeseries against defined
direction on line
"""
# Devide flow_array into one with the positive values and one with neg.
flow_array_pos = np.clip(np.array(self.flow_series),0,np.Infinity)
flow_array_neg = -1*(np.clip(np.array(self.flow_series),-np.Infinity,0))
# Calculate max load that occured on the transmission line in the timeseries
line_maxLoad_in= flow_array_pos.max(axis=0)
line_maxLoad_ag= flow_array_pos.max(axis=0)
# Calculate capacity that would be sufficient for 90% of the loads
# on that line for the loads of that timeseries
line_load90_in = np.percentile(flow_array_pos,90,axis=0)
line_load90_ag = np.percentile(flow_array_neg,90,axis=0)
return line_maxLoad_in, line_maxLoad_ag, line_load90_in, line_load90_ag
def create_transmission_network(self, y_bus, a_matrix, capacity_matrix):
"""Prepares the transmission network for the flow calculation. Sets
up the matrixes needed for the flow calculation, namely b_inverse_matrix
and the a_d_matrix. Further creates a line_dictionary with information
about origin node, destination node, capacity and admittance value for
each line.
N: number of nodes
M: number of lines
Input:
y_bus: (NxN) nodal attmittance matrix with
y-bus(i,j) = -Y(i,j) for non-diagonal values and
y-bus(i,i) = Y(i,i) + sum(Y(i,j): for j:(1,N) & j != i)
In this simple DC power flow model the resistance is
neglected, therefore the admittance y = -j * b with b
being the suspectance.
a_matrix: (MxN) node-arc incidence matrix, with
a(m,n) = 1 if arc m has its starting point in node n
a(m,n) = -1 if arc m has its end point in node n#
a(m,n) = 0 otherwise
capacity_matrix: (NxN) matrix of the line capacities
capacity(i,j) = tranfer capacity between node i and node j
(note: capacity(i,j) can be different from capacity(j,i))
Output:
none, but saves mentioned results in self. variables
"""
self.no_edges = len(a_matrix)
self.no_nodes = len(a_matrix[1])
self.y_bus = y_bus
self.a_matrix = a_matrix
self.capacity_matrix = capacity_matrix
# Calculate b_inverse_matrix
# first calculate b_prime_matrix, which is the negative of the y-bus,
# but the diagonal elements are replaced by the sum of the b-values
# in the row of the respective element.
# shape: (N-1) x (N-1)
b_prime_matrix = -1 * y_bus[1:,1:]
for i, row in enumerate(b_prime_matrix):
# replace diagonal elements with sum of all other elements of its row
b_prime_matrix[i][i] = sum(y_bus[i+1]) - y_bus[i+1][i+1]
self.b_inverse_matrix = np.linalg.inv(b_prime_matrix)
#Calculate D-matrix and capacity_vector and create line_dictionary
d_matrix = np.zeros((self.no_edges,self.no_edges))
i=0
while i < self.no_edges:
row = list(a_matrix[i])
orig_id = row.index(1)
dest_id = row.index(-1)
d_matrix[i][i] = y_bus[orig_id][dest_id]
self.line_dictionary[i] = {'origin': orig_id, 'destination': dest_id,
'capacity_in':capacity_matrix[orig_id][dest_id],
'capacity_ag':capacity_matrix[dest_id][orig_id],
'Y':y_bus[orig_id][dest_id] }
i=i+1
# Calculate a_d_matrix
# := transfer admittance matrix
# (M x N-1)
# with a_d(line i, node j) := -b(i) if j is end node of line
# b(i) if j is start node of line
self.a_d_matrix = np.matrix(d_matrix) * np.matrix(a_matrix)[:,1:]
def update_transmission_network(self, origin_id, dest_id, cap_incr_in,
cap_incr_ag, new_y):
"""Updates the capacity and y-bus of the transmission network
according to the input values and returns a cost value.
### PRELIMINAR VERSION ###
to do:
-better cost calculation, based on different types of updates,
maybe just 2 or 3 different options with a fixed capacity increase
-...
Inputs:
origin_id: id of starting node
dest_id: id of end node
cap_incr_in: capacity update in direction of line
cap_incr_ag: capacity update against direction of line
new_y: new admittance value for y_bus
Output:
cost: investment cost for capacity increase
"""
cost = 0
new_capacity_matrix = self.capacity_matrix
new_y_bus = self.y_bus
new_a_matrix = self.a_matrix
# Check if nodes existed before
if origin_id < self.no_nodes and dest_id < self.no_nodes:
# Calculate distance for cost calculation with Haversine Formula
lat1, lat2, lon1, lon2 = map(math.radians,
[self.node_dictonary[dest_id]['y_loc'],
self.node_dictonary[origin_id]['y_loc'],
self.node_dictonary[origin_id]['x_loc'],
self.node_dictonary[dest_id]['x_loc']])
dlon = abs(lon1 - lon2)
dlat = abs(lat1 - lat2)
a = (math.sin(dlat/2))**2 + math.cos(lat1) * math.cos(lat2)\
* (math.sin(dlon/2))**2
c = 2 * math.atan2( math.sqrt(a), math.sqrt(1-a) )
distance = 6373 * c
# Check further if connection existed before
if self.capacity_matrix[origin_id][dest_id] != 0 or \
self.capacity_matrix[dest_id][origin_id] != 0:
# Simple case: increase capacity and update Y
new_capacity_matrix[origin_id][dest_id] += cap_incr_in
new_capacity_matrix[dest_id][origin_id] += cap_incr_ag
new_y_bus[origin_id][dest_id] = new_y
new_y_bus[dest_id][origin_id] = new_y
cost = 1.4 * distance
else:
# New line, but existing nodes
new_capacity_matrix[origin_id][dest_id] += cap_incr_in
new_capacity_matrix[dest_id][origin_id] += cap_incr_ag
new_y_bus[origin_id][dest_id] = new_y
new_y_bus[dest_id][origin_id] = new_y
cost = 1.4 *distance
# Update a_matrix
a_row = [0]*self.no_nodes
a_row[origin_id] = 1
a_row[dest_id] = -1
new_a_matrix.append(a_row)
# Calculate costs
cost = max(cap_incr_in, cap_incr_ag) * 1.5
else:
# New nodes must be added.
# supply vector length must be adjusted
cost = 1
self.create_transmission_network(new_y_bus, new_a_matrix, new_capacity_matrix)
return cost
def draw_network(self, flow_vector, supply, filename):
"""Creates a plot of the network with the flows using Networkx.
"""
g = nx.DiGraph()
label1 = {} # node label
label_node2 = {}
label2 = {} # line label
pos1 = {}
line_attributes = {}
# Preparing the nodes
for node in self.node_dictonary:
g.add_node(node)
pos1[node] = (self.node_dictonary[node]["x_loc"], \
self.node_dictonary[node]["y_loc"])
label1[node] = self.node_dictonary[node]["name"][:3]
node += 1
# Adjusting position to improve readability
# if test as easy way to only adjust node positions if NEM network
# is used, otherwise leave as they are
if self.node_dictonary[0]['name'] == "MELBOURNE":
pos1[1] = (pos1[1][0],pos1[1][1]-1) #LATROBE
pos1[2] = (pos1[2][0]-0.1,pos1[2][1]+0.4) #CVIC
pos1[5] = (pos1[5][0]-1.3,pos1[5][1]-1) #GEELONG
pos1[6] = (pos1[6][0]-0.9,pos1[6][1]-0.4) #SWVIC
pos1[8] = (pos1[8][0]+0.7,pos1[8][1]) #SYDNEY
pos1[10] = (pos1[10][0]-1,pos1[10][1]+0.3) #DARPOINT
pos1[11] = (pos1[11][0],pos1[11][1]+1) #WAGGA
pos1[12] = (pos1[12][0]+0.8,pos1[12][1]) #CANBERRA
pos1[13] = (pos1[13][0]-0.8,pos1[13][1]+0.2) #MTPIPER
pos1[14] = (pos1[14][0]-0.7,pos1[14][1]+1.5) #BAYSWATER
pos1[15] = (pos1[15][0],pos1[15][1]+1.5) #ARMIDALE
pos1[16] = (pos1[16][0]+0.7,pos1[16][1]+1.3) #ERARING
pos1[17] = (pos1[17][0]+0.6,pos1[17][1]+0.9) #BRISBANE
pos1[18] = (pos1[18][0]-0.5,pos1[18][1]+0.3) #TARONG
pos1[19] = (pos1[19][0]-0.8,pos1[19][1]) #ROMA
for node in self.node_dictonary:
if supply[0][node] != 0:
label_node2[node] = round(supply[0][node],1)
#Preparing the lines
for line in self.line_dictionary:
origin = self.line_dictionary[line]["origin"]
dest = self.line_dictionary[line]["destination"]
g.add_edge(origin,dest)
line_tuppel = ((origin,dest))
line_attributes[line_tuppel] = {}
# Attributes
# ---width
if self.line_dictionary[line]['capacity_in'] > 10000:
line_attributes[line_tuppel]['width']=20
elif self.line_dictionary[line]['capacity_in'] > 6000:
line_attributes[line_tuppel]['width']=15
elif self.line_dictionary[line]['capacity_in'] > 2000:
line_attributes[line_tuppel]['width']=11
elif self.line_dictionary[line]['capacity_in'] > 500:
line_attributes[line_tuppel]['width']=8
else:
line_attributes[line_tuppel]['width']=4
# ---color&style
if abs(flow_vector.item(line)) > 0.01:
if abs(flow_vector.item(line))/self.line_dictionary[line]['capacity_in'] > 1.0:
line_attributes[line_tuppel]['color']='red'
line_attributes[line_tuppel]['style']='solid'
elif abs(flow_vector.item(line))/self.line_dictionary[line]['capacity_in'] > 0.8:
line_attributes[line_tuppel]['color']='orange'
line_attributes[line_tuppel]['style']='solid'
else:
line_attributes[line_tuppel]['color']='green'
line_attributes[line_tuppel]['style']='solid'
else:
line_attributes[line_tuppel]['color']='black'
line_attributes[line_tuppel]['style']='dotted'
#label with arrows for direction...
if pos1[origin][0] < pos1[dest][0]:
if flow_vector.item(line) > 0.001:
label2[(origin,dest)] = \
str(abs(round(flow_vector.item(line),1))) + " >>" +\
"\n"+str(line)+":(" + str(self.line_dictionary[line]['capacity_in']) + \
", " + str(self.line_dictionary[line]['Y'])+ ")"
elif flow_vector.item(line) < -0.001:
label2[(origin,dest)] = "<< " + \
str(abs(round(flow_vector.item(line),1))) +\
"\n"+str(line)+":(" + str(self.line_dictionary[line]['capacity_ag']) + \
", " + str(self.line_dictionary[line]['Y'])+ ")"
else:
label2[(origin,dest)] = str(abs(round(flow_vector.item(line),1))) +\
"\n"+str(line)+":(" + str(self.line_dictionary[line]['capacity_in']) + \
", " + str(self.line_dictionary[line]['Y'])+ ")"
else:
if flow_vector.item(line) > 0.001:
label2[(origin,dest)] = "<< " + \
str(abs(round(flow_vector.item(line),1))) +\
"\n"+str(line)+":(" + str(self.line_dictionary[line]['capacity_in']) + \
", " + str(self.line_dictionary[line]['Y'])+ ")"
elif flow_vector.item(line) < -0.001:
label2[(origin,dest)] = \
str(abs(round(flow_vector.item(line),1))) + " >>" +\
"\n"+str(line)+":(" + str(self.line_dictionary[line]['capacity_ag']) + \
", " + str(self.line_dictionary[line]['Y'])+ ")"
else:
label2[(origin,dest)] = str(abs(round(flow_vector.item(line),1))) +\
"\n"+str(line)+":(" + str(self.line_dictionary[line]['capacity_in']) + \
", " + str(self.line_dictionary[line]['Y'])+ ")"
#draw graph
plt.figure(1,figsize=(20,25))
nx.draw_networkx_nodes(g, pos = pos1,
with_labels = False,
node_color=(0,0,0.4),
node_size = 1000)
nx.draw_networkx_labels(g, pos=pos1,
labels = label1,
font_size = 9,
font_color='white',
font_weight = 'bold')
# Supply values as box next to node
for node in label_node2:
if label_node2[node]>0:
plt.text(pos1[node][0]-0.5, pos1[node][1]+0.3,
str(label_node2[node]),
size=10, weight='bold', stretch='condensed',
color='black', bbox=dict(facecolor='lightblue')
)
else:
plt.text(pos1[node][0]-0.4, pos1[node][1]+0.3,
str(label_node2[node]),
size=10, weight='bold', stretch='condensed',
color='black', bbox=dict(facecolor='orange')
)
for edge in g.edges():
nx.draw_networkx_edges(g, edgelist=[edge],
pos=pos1,
arrows = False,
width = line_attributes[edge]['width'],
edge_color = line_attributes[edge]['color'],
style = line_attributes[edge]['style'])
nx.draw_networkx_edge_labels(g, pos = pos1,
edge_labels = label2,
edge_text_pos = 0.5,
font_size=6,
font_weight = 'bold')
plt.savefig(filename + ".pdf")
| 45.657407
| 97
| 0.523575
| 18,506
| 0.938248
| 0
| 0
| 0
| 0
| 0
| 0
| 7,545
| 0.382529
|
c6a5691106c675b51a0898624e8d7f4af7a6316d
| 11,893
|
py
|
Python
|
ecl/tests/unit/compute/v2/test_server.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | 5
|
2017-04-07T06:23:04.000Z
|
2019-11-19T00:52:34.000Z
|
ecl/tests/unit/compute/v2/test_server.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | 16
|
2018-09-12T11:14:40.000Z
|
2021-04-19T09:02:44.000Z
|
ecl/tests/unit/compute/v2/test_server.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | 14
|
2017-05-11T14:26:26.000Z
|
2021-07-14T14:00:06.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from ecl.compute.v2 import server
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'accessIPv4': '1',
'accessIPv6': '2',
'addresses': {'region': '3'},
'created': '2015-03-09T12:14:57.233772',
'flavorRef': '5',
'flavor': {'id': 'FLAVOR_ID', 'links': {}},
'hostId': '6',
'id': IDENTIFIER,
'imageRef': '8',
'image': {'id': 'IMAGE_ID', 'links': {}},
'links': '9',
'metadata': {'key': '10'},
'name': '11',
'progress': 12,
'tenant_id': '13',
'status': '14',
'updated': '2015-03-09T12:15:57.233772',
'user_id': '16',
'key_name': '17',
'OS-DCF:diskConfig': '18',
'OS-EXT-AZ:availability_zone': '19',
'OS-EXT-STS:power_state': '20',
'OS-EXT-STS:task_state': '21',
'OS-EXT-STS:vm_state': '22',
'os-extended-volumes:volumes_attached': '23',
'OS-SRV-USG:launched_at': '2015-03-09T12:15:57.233772',
'OS-SRV-USG:terminated_at': '2015-03-09T12:15:57.233772',
'security_groups': '26',
'adminPass': '27',
'personality': '28',
'block_device_mapping_v2': {'key': '29'},
'os:scheduler_hints': {'key': '30'},
'user_data': '31'
}
class TestServer(testtools.TestCase):
def setUp(self):
super(TestServer, self).setUp()
self.resp = mock.Mock()
self.resp.body = None
self.resp.json = mock.Mock(return_value=self.resp.body)
self.sess = mock.Mock()
self.sess.post = mock.Mock(return_value=self.resp)
def test_basic(self):
sot = server.Server()
self.assertEqual('server', sot.resource_key)
self.assertEqual('servers', sot.resources_key)
self.assertEqual('/servers', sot.base_path)
self.assertEqual('compute', sot.service.service_type)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_get)
self.assertTrue(sot.allow_update)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
self.assertDictEqual({"image": "image",
"flavor": "flavor",
"name": "name",
"status": "status",
"host": "host",
"changes_since": "changes-since"},
sot._query_mapping._mapping)
def test_make_it(self):
sot = server.Server(**EXAMPLE)
self.assertEqual(EXAMPLE['accessIPv4'], sot.access_ipv4)
self.assertEqual(EXAMPLE['accessIPv6'], sot.access_ipv6)
self.assertEqual(EXAMPLE['addresses'], sot.addresses)
self.assertEqual(EXAMPLE['created'], sot.created_at)
self.assertEqual(EXAMPLE['flavorRef'], sot.flavor_id)
self.assertEqual(EXAMPLE['flavor'], sot.flavor)
self.assertEqual(EXAMPLE['hostId'], sot.host_id)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['imageRef'], sot.image_id)
self.assertEqual(EXAMPLE['image'], sot.image)
self.assertEqual(EXAMPLE['links'], sot.links)
self.assertEqual(EXAMPLE['metadata'], sot.metadata)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertEqual(EXAMPLE['progress'], sot.progress)
self.assertEqual(EXAMPLE['tenant_id'], sot.project_id)
self.assertEqual(EXAMPLE['status'], sot.status)
self.assertEqual(EXAMPLE['updated'], sot.updated_at)
self.assertEqual(EXAMPLE['user_id'], sot.user_id)
self.assertEqual(EXAMPLE['key_name'], sot.key_name)
self.assertEqual(EXAMPLE['OS-DCF:diskConfig'], sot.disk_config)
self.assertEqual(EXAMPLE['OS-EXT-AZ:availability_zone'],
sot.availability_zone)
self.assertEqual(EXAMPLE['OS-EXT-STS:power_state'], sot.power_state)
self.assertEqual(EXAMPLE['OS-EXT-STS:task_state'], sot.task_state)
self.assertEqual(EXAMPLE['OS-EXT-STS:vm_state'], sot.vm_state)
self.assertEqual(EXAMPLE['os-extended-volumes:volumes_attached'],
sot.attached_volumes)
self.assertEqual(EXAMPLE['OS-SRV-USG:launched_at'], sot.launched_at)
self.assertEqual(EXAMPLE['OS-SRV-USG:terminated_at'],
sot.terminated_at)
self.assertEqual(EXAMPLE['security_groups'], sot.security_groups)
self.assertEqual(EXAMPLE['adminPass'], sot.admin_pass)
self.assertEqual(EXAMPLE['adminPass'], sot.adminPass)
self.assertEqual(EXAMPLE['personality'], sot.personality)
self.assertEqual(EXAMPLE['block_device_mapping_v2'],
sot.block_device_mapping_v2)
self.assertEqual(EXAMPLE['os:scheduler_hints'], sot.scheduler_hints)
self.assertEqual(EXAMPLE['user_data'], sot.user_data)
def test_detail(self):
sot = server.ServerDetail()
self.assertEqual('server', sot.resource_key)
self.assertEqual('servers', sot.resources_key)
self.assertEqual('/servers/detail', sot.base_path)
self.assertEqual('compute', sot.service.service_type)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_get)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_change_passowrd(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.change_password(self.sess, 'a'))
url = 'servers/IDENTIFIER/action'
body = {"changePassword": {"adminPass": "a"}}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_reboot(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.reboot(self.sess, 'HARD'))
url = 'servers/IDENTIFIER/action'
body = {"reboot": {"type": "HARD"}}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_force_delete(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.force_delete(self.sess))
url = 'servers/IDENTIFIER/action'
body = {'forceDelete': None}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_rebuild(self):
sot = server.Server(**EXAMPLE)
# Let the translate pass through, that portion is tested elsewhere
sot._translate_response = lambda arg: arg
result = sot.rebuild(self.sess, name='noo', admin_password='seekr3t',
image='http://image/1', access_ipv4="12.34.56.78",
access_ipv6="fe80::100",
metadata={"meta var": "meta val"},
personality=[{"path": "/etc/motd",
"contents": "foo"}])
self.assertIsInstance(result, server.Server)
url = 'servers/IDENTIFIER/action'
body = {
"rebuild": {
"name": "noo",
"imageRef": "http://image/1",
"adminPass": "seekr3t",
"accessIPv4": "12.34.56.78",
"accessIPv6": "fe80::100",
"metadata": {"meta var": "meta val"},
"personality": [{"path": "/etc/motd", "contents": "foo"}],
"preserve_ephemeral": False
}
}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_rebuild_minimal(self):
sot = server.Server(**EXAMPLE)
# Let the translate pass through, that portion is tested elsewhere
sot._translate_response = lambda arg: arg
result = sot.rebuild(self.sess, name='nootoo',
admin_password='seekr3two',
image='http://image/2')
self.assertIsInstance(result, server.Server)
url = 'servers/IDENTIFIER/action'
body = {
"rebuild": {
"name": "nootoo",
"imageRef": "http://image/2",
"adminPass": "seekr3two",
"preserve_ephemeral": False
}
}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_resize(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.resize(self.sess, '2'))
url = 'servers/IDENTIFIER/action'
body = {"resize": {"flavorRef": "2",
"OS-DCF:diskConfig": "AUTO"}}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_confirm_resize(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.confirm_resize(self.sess))
url = 'servers/IDENTIFIER/action'
body = {"confirmResize": None}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_revert_resize(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.revert_resize(self.sess))
url = 'servers/IDENTIFIER/action'
body = {"revertResize": None}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_create_image(self):
sot = server.Server(**EXAMPLE)
name = 'noo'
metadata = {'nu': 'image', 'created': 'today'}
self.assertIsNotNone(sot.create_image(self.sess, name, metadata))
url = 'servers/IDENTIFIER/action'
body = {"createImage": {'name': name, 'metadata': metadata}}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_create_image_minimal(self):
sot = server.Server(**EXAMPLE)
name = 'noo'
self.assertIsNone(self.resp.body, sot.create_image(self.sess, name))
url = 'servers/IDENTIFIER/action'
body = {"createImage": {'name': name}}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=dict(sot.service), json=body, headers=headers)
def test_add_security_group(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.add_security_group(self.sess, "group"))
url = 'servers/IDENTIFIER/action'
body = {"addSecurityGroup": {"name": "group"}}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_remove_security_group(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.remove_security_group(self.sess, "group"))
url = 'servers/IDENTIFIER/action'
body = {"removeSecurityGroup": {"name": "group"}}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
| 38.739414
| 79
| 0.600858
| 10,184
| 0.856302
| 0
| 0
| 0
| 0
| 0
| 0
| 3,285
| 0.276213
|
c6a5791901b1fc6361134fdaba0ad7eda0768c85
| 1,577
|
py
|
Python
|
packages/diana/diana/connect/utils/orth_fiq.py
|
derekmerck/diana-star
|
78aa7badb27677a1f5c83d744852f659e2541567
|
[
"MIT"
] | null | null | null |
packages/diana/diana/connect/utils/orth_fiq.py
|
derekmerck/diana-star
|
78aa7badb27677a1f5c83d744852f659e2541567
|
[
"MIT"
] | null | null | null |
packages/diana/diana/connect/utils/orth_fiq.py
|
derekmerck/diana-star
|
78aa7badb27677a1f5c83d744852f659e2541567
|
[
"MIT"
] | null | null | null |
# import logging
# from pprint import pformat
from diana.utils.dicom import DicomLevel
def find_item_query(item):
"""
Have some information about the dixel, want to find the STUID, SERUID, INSTUID
Returns a _list_ of dictionaries with matches, retrieves any if "retrieve" flag
"""
q = {}
keys = {}
# All levels have these
keys[DicomLevel.STUDIES] = ['PatientID',
'PatientName',
'PatientBirthDate',
'PatientSex',
'StudyInstanceUID',
'StudyDate',
'StudyTime',
'AccessionNumber']
# Series level has these
keys[DicomLevel.SERIES] = keys[DicomLevel.STUDIES] + \
['SeriesInstanceUID',
'SeriesDescription',
'ProtocolName',
'SeriesNumber',
'NumberOfSeriesRelatedInstances',
'Modality']
# For instance level, use the minimum
keys[DicomLevel.INSTANCES] = ['SOPInstanceUID', 'SeriesInstanceUID']
def add_key(q, key, dixel):
q[key] = dixel.meta.get(key, '')
return q
for k in keys[item.level]:
q = add_key(q, k, item)
if item.level == DicomLevel.STUDIES and item.meta.get('Modality'):
q['ModalitiesInStudy'] = item.meta.get('Modality')
# logging.debug(pformat(q))
query = {'Level': str(item.level),
'Query': q}
return query
| 30.326923
| 83
| 0.521877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 642
| 0.407102
|
c6a59cf0b7d0aebf7e2f62e142e7553ec2e18c60
| 32,332
|
py
|
Python
|
IRIS_data_download/IRIS_download_support/obspy/io/ascii/tests/test_ascii.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-03-05T01:03:01.000Z
|
2020-12-17T05:04:07.000Z
|
IRIS_data_download/IRIS_download_support/obspy/io/ascii/tests/test_ascii.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 4
|
2021-03-31T19:25:55.000Z
|
2021-12-13T20:32:46.000Z
|
IRIS_data_download/IRIS_download_support/obspy/io/ascii/tests/test_ascii.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-09-08T19:33:40.000Z
|
2021-04-05T09:47:50.000Z
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA @UnusedWildImport
import os
import unittest
import numpy as np
from obspy import Trace, UTCDateTime, read
from obspy.io.ascii.core import (_determine_dtype, _is_slist, _is_tspair,
_read_slist, _read_tspair, _write_slist,
_write_tspair)
from obspy.core.util import NamedTemporaryFile
class ASCIITestCase(unittest.TestCase):
"""
"""
def setUp(self):
# Directory where the test files are located
self.path = os.path.dirname(__file__)
def test_is_slist_file(self):
"""
Testing SLIST file format.
"""
testfile = os.path.join(self.path, 'data', 'slist.ascii')
self.assertEqual(_is_slist(testfile), True)
testfile = os.path.join(self.path, 'data', 'slist_2_traces.ascii')
self.assertEqual(_is_slist(testfile), True)
testfile = os.path.join(self.path, 'data', 'tspair.ascii')
self.assertEqual(_is_slist(testfile), False)
# not existing file should fail
testfile = os.path.join(self.path, 'data', 'xyz')
self.assertEqual(_is_slist(testfile), False)
def test_read_slist_file_single_trace(self):
"""
Read SLIST file test via obspy.core.ascii._read_slist.
"""
testfile = os.path.join(self.path, 'data', 'slist.ascii')
# read
stream = _read_slist(testfile)
stream.verify()
self.assertEqual(stream[0].stats.network, 'XX')
self.assertEqual(stream[0].stats.station, 'TEST')
self.assertEqual(stream[0].stats.location, '')
self.assertEqual(stream[0].stats.channel, 'BHZ')
self.assertEqual(stream[0].stats.sampling_rate, 40.0)
self.assertEqual(stream[0].stats.npts, 635)
self.assertEqual(stream[0].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[0].stats.calib, 1.0e-00)
# check first 4 samples
data = [185, 181, 185, 189]
np.testing.assert_array_almost_equal(stream[0].data[0:4], data)
# check last 4 samples
data = [761, 755, 748, 746]
np.testing.assert_array_almost_equal(stream[0].data[-4:], data)
def test_read_slist_file_multiple_traces(self):
"""
Read SLIST file test via obspy.core.ascii._read_slist.
"""
testfile = os.path.join(self.path, 'data', 'slist_2_traces.ascii')
# read
stream = _read_slist(testfile)
stream.verify()
self.assertEqual(stream[0].stats.network, 'XX')
self.assertEqual(stream[0].stats.station, 'TEST')
self.assertEqual(stream[0].stats.location, '')
self.assertEqual(stream[0].stats.channel, 'BHZ')
self.assertEqual(stream[0].stats.sampling_rate, 40.0)
self.assertEqual(stream[0].stats.npts, 635)
self.assertEqual(stream[0].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[0].stats.calib, 1.0e-00)
# check first 4 samples
data = [185, 181, 185, 189]
np.testing.assert_array_almost_equal(stream[0].data[0:4], data)
# check last 4 samples
data = [761, 755, 748, 746]
np.testing.assert_array_almost_equal(stream[0].data[-4:], data)
# second trace
self.assertEqual(stream[1].stats.network, 'XX')
self.assertEqual(stream[1].stats.station, 'TEST')
self.assertEqual(stream[1].stats.location, '')
self.assertEqual(stream[1].stats.channel, 'BHE')
self.assertEqual(stream[1].stats.sampling_rate, 40.0)
self.assertEqual(stream[1].stats.npts, 630)
self.assertEqual(stream[1].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[1].stats.calib, 1.0e-00)
# check first 4 samples
data = [185, 181, 185, 189]
np.testing.assert_array_almost_equal(stream[1].data[0:4], data)
# check last 4 samples
data = [781, 785, 778, 772]
np.testing.assert_array_almost_equal(stream[1].data[-4:], data)
def test_read_slist_file_head_only(self):
"""
Read SLIST file test via obspy.core.ascii._read_slist.
"""
testfile = os.path.join(self.path, 'data', 'slist.ascii')
# read
stream = _read_slist(testfile, headonly=True)
self.assertEqual(stream[0].stats.network, 'XX')
self.assertEqual(stream[0].stats.station, 'TEST')
self.assertEqual(stream[0].stats.location, '')
self.assertEqual(stream[0].stats.channel, 'BHZ')
self.assertEqual(stream[0].stats.sampling_rate, 40.0)
self.assertEqual(stream[0].stats.npts, 635)
self.assertEqual(stream[0].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[0].stats.calib, 1.0e-00)
self.assertEqual(len(stream[0].data), 0)
def test_read_slist_file_encoding(self):
"""
Read SLIST file test via obspy.core.ascii._read_slist.
"""
# float32
testfile = os.path.join(self.path, 'data', 'slist_float.ascii')
stream = _read_slist(testfile)
self.assertEqual(stream[0].stats.network, 'XX')
self.assertEqual(stream[0].stats.station, 'TEST')
self.assertEqual(stream[0].stats.location, '')
self.assertEqual(stream[0].stats.channel, 'BHZ')
self.assertEqual(stream[0].stats.sampling_rate, 40.0)
self.assertEqual(stream[0].stats.npts, 12)
self.assertEqual(stream[0].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[0].stats.calib, 1.0e-00)
data = [185.01, 181.02, 185.03, 189.04, 194.05, 205.06,
209.07, 214.08, 222.09, 225.98, 226.99, 219.00]
np.testing.assert_array_almost_equal(stream[0].data, data, decimal=2)
# unknown encoding
testfile = os.path.join(self.path, 'data', 'slist_unknown.ascii')
self.assertRaises(NotImplementedError, _read_slist, testfile)
def test_is_tspair_file(self):
"""
Testing TSPAIR file format.
"""
testfile = os.path.join(self.path, 'data', 'tspair.ascii')
self.assertEqual(_is_tspair(testfile), True)
testfile = os.path.join(self.path, 'data', 'tspair_2_traces.ascii')
self.assertEqual(_is_tspair(testfile), True)
testfile = os.path.join(self.path, 'data', 'slist.ascii')
self.assertEqual(_is_tspair(testfile), False)
# not existing file should fail
testfile = os.path.join(self.path, 'data', 'xyz')
self.assertEqual(_is_tspair(testfile), False)
def test_read_tspair_file_single_trace(self):
"""
Read TSPAIR file test via obspy.core.ascii._read_tspair.
"""
testfile = os.path.join(self.path, 'data', 'tspair.ascii')
# read
stream = _read_tspair(testfile)
stream.verify()
self.assertEqual(stream[0].stats.network, 'XX')
self.assertEqual(stream[0].stats.station, 'TEST')
self.assertEqual(stream[0].stats.location, '')
self.assertEqual(stream[0].stats.channel, 'BHZ')
self.assertEqual(stream[0].stats.sampling_rate, 40.0)
self.assertEqual(stream[0].stats.npts, 635)
self.assertEqual(stream[0].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[0].stats.calib, 1.0e-00)
self.assertEqual(stream[0].stats.mseed.dataquality, 'R')
# check first 4 samples
data = [185, 181, 185, 189]
np.testing.assert_array_almost_equal(stream[0].data[0:4], data)
# check last 4 samples
data = [761, 755, 748, 746]
np.testing.assert_array_almost_equal(stream[0].data[-4:], data)
def test_read_tspair_file_multiple_traces(self):
"""
Read TSPAIR file test via obspy.core.ascii._read_tspair.
"""
testfile = os.path.join(self.path, 'data', 'tspair_2_traces.ascii')
# read
stream = _read_tspair(testfile)
stream.verify()
# sort traces to ensure comparable results
stream.sort()
self.assertEqual(stream[1].stats.network, 'XX')
self.assertEqual(stream[1].stats.station, 'TEST')
self.assertEqual(stream[1].stats.location, '')
self.assertEqual(stream[1].stats.channel, 'BHZ')
self.assertEqual(stream[1].stats.sampling_rate, 40.0)
self.assertEqual(stream[1].stats.npts, 635)
self.assertEqual(stream[1].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[1].stats.calib, 1.0e-00)
self.assertEqual(stream[1].stats.mseed.dataquality, 'R')
# check first 4 samples
data = [185, 181, 185, 189]
np.testing.assert_array_almost_equal(stream[1].data[0:4], data)
# check last 4 samples
data = [761, 755, 748, 746]
np.testing.assert_array_almost_equal(stream[1].data[-4:], data)
# second trace
self.assertEqual(stream[0].stats.network, 'XX')
self.assertEqual(stream[0].stats.station, 'TEST')
self.assertEqual(stream[0].stats.location, '')
self.assertEqual(stream[0].stats.channel, 'BHE')
self.assertEqual(stream[0].stats.sampling_rate, 40.0)
self.assertEqual(stream[0].stats.npts, 630)
self.assertEqual(stream[0].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[0].stats.calib, 1.0e-00)
self.assertEqual(stream[0].stats.mseed.dataquality, 'R')
# check first 4 samples
data = [185, 181, 185, 189]
np.testing.assert_array_almost_equal(stream[0].data[0:4], data)
# check last 4 samples
data = [781, 785, 778, 772]
np.testing.assert_array_almost_equal(stream[0].data[-4:], data)
def test_read_tspair_head_only(self):
"""
Read TSPAIR file test via obspy.core.ascii._read_tspair.
"""
testfile = os.path.join(self.path, 'data', 'tspair.ascii')
# read
stream = _read_tspair(testfile, headonly=True)
self.assertEqual(stream[0].stats.network, 'XX')
self.assertEqual(stream[0].stats.station, 'TEST')
self.assertEqual(stream[0].stats.location, '')
self.assertEqual(stream[0].stats.channel, 'BHZ')
self.assertEqual(stream[0].stats.sampling_rate, 40.0)
self.assertEqual(stream[0].stats.npts, 635)
self.assertEqual(stream[0].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[0].stats.calib, 1.0e-00)
self.assertEqual(stream[0].stats.mseed.dataquality, 'R')
self.assertEqual(len(stream[0].data), 0)
def test_read_tspair_file_encoding(self):
"""
Read TSPAIR file test via obspy.core.ascii._read_tspair.
"""
# float32
testfile = os.path.join(self.path, 'data', 'tspair_float.ascii')
stream = _read_tspair(testfile)
stream.verify()
self.assertEqual(stream[0].stats.network, 'XX')
self.assertEqual(stream[0].stats.station, 'TEST')
self.assertEqual(stream[0].stats.location, '')
self.assertEqual(stream[0].stats.channel, 'BHZ')
self.assertEqual(stream[0].stats.sampling_rate, 40.0)
self.assertEqual(stream[0].stats.npts, 12)
self.assertEqual(stream[0].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[0].stats.calib, 1.0e-00)
self.assertEqual(stream[0].stats.mseed.dataquality, 'R')
data = [185.01, 181.02, 185.03, 189.04, 194.05, 205.06,
209.07, 214.08, 222.09, 225.98, 226.99, 219.00]
np.testing.assert_array_almost_equal(stream[0].data, data, decimal=2)
# unknown encoding
testfile = os.path.join(self.path, 'data', 'tspair_unknown.ascii')
self.assertRaises(NotImplementedError, _read_tspair, testfile)
def test_write_tspair(self):
"""
Write TSPAIR file test via obspy.core.ascii._write_tspair.
"""
# float32
testfile = os.path.join(self.path, 'data', 'tspair_float.ascii')
stream_orig = _read_tspair(testfile)
with NamedTemporaryFile() as tf:
tmpfile = tf.name
# write
_write_tspair(stream_orig, tmpfile)
# look at the raw data
with open(tmpfile, 'rt') as f:
lines = f.readlines()
self.assertEqual(
lines[0].strip(),
'TIMESERIES XX_TEST__BHZ_R, 12 samples, 40 sps, ' +
'2008-01-15T00:00:00.025000, TSPAIR, FLOAT, Counts')
self.assertEqual(
lines[1].strip(),
'2008-01-15T00:00:00.025000 +1.8500999450e+02')
# read again
stream = _read_tspair(tmpfile)
stream.verify()
self.assertEqual(stream[0].stats.network, 'XX')
self.assertEqual(stream[0].stats.station, 'TEST')
self.assertEqual(stream[0].stats.location, '')
self.assertEqual(stream[0].stats.channel, 'BHZ')
self.assertEqual(stream[0].stats.sampling_rate, 40.0)
self.assertEqual(stream[0].stats.npts, 12)
self.assertEqual(stream[0].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[0].stats.calib, 1.0e-00)
self.assertEqual(stream[0].stats.mseed.dataquality, 'R')
data = [185.01, 181.02, 185.03, 189.04, 194.05, 205.06,
209.07, 214.08, 222.09, 225.98, 226.99, 219.00]
np.testing.assert_array_almost_equal(stream[0].data, data,
decimal=2)
# compare raw header
with open(testfile, 'rt') as f:
lines_orig = f.readlines()
with open(tmpfile, 'rt') as f:
lines_new = f.readlines()
self.assertEqual(lines_orig[0], lines_new[0])
def test_write_tspair_custom_fmt(self):
"""
Write TSPAIR file test via obspy.core.ascii._write_tspair.
"""
# float
testfile_orig = os.path.join(self.path, 'data', 'tspair_float.ascii')
testfile = os.path.join(self.path, 'data',
'tspair_float_custom_fmt.ascii')
stream_orig = _read_tspair(testfile_orig)
with NamedTemporaryFile() as tf:
tmpfile = tf.name
# write
_write_tspair(stream_orig, tmpfile, custom_fmt='%3.14f')
# look at the raw data
with open(tmpfile, 'rt') as f:
lines = f.readlines()
self.assertEqual(
lines[0].strip(),
'TIMESERIES XX_TEST__BHZ_R, 12 samples, 40 sps, ' +
'2008-01-15T00:00:00.025000, TSPAIR, FLOAT, Counts')
self.assertEqual(
lines[1].strip(),
'2008-01-15T00:00:00.025000 185.00999450000000')
# read again
stream = _read_tspair(tmpfile)
stream.verify()
self.assertEqual(stream[0].stats.network, 'XX')
self.assertEqual(stream[0].stats.station, 'TEST')
self.assertEqual(stream[0].stats.location, '')
self.assertEqual(stream[0].stats.channel, 'BHZ')
self.assertEqual(stream[0].stats.sampling_rate, 40.0)
self.assertEqual(stream[0].stats.npts, 12)
self.assertEqual(stream[0].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[0].stats.calib, 1.0e-00)
self.assertEqual(stream[0].stats.mseed.dataquality, 'R')
data = [185.01, 181.02, 185.03, 189.04, 194.05, 205.06,
209.07, 214.08, 222.09, 225.98, 226.99, 219.00]
np.testing.assert_array_almost_equal(stream[0].data, data,
decimal=2)
# compare raw header
with open(testfile, 'rt') as f:
lines_orig = f.readlines()
with open(tmpfile, 'rt') as f:
lines_new = f.readlines()
self.assertEqual(lines_orig[0], lines_new[0])
def test_write_tspair_custom_fmt_custom(self):
"""
Write TSPAIR file test via obspy.core.ascii._write_tspair.
"""
# float
testfile_orig = os.path.join(self.path, 'data', 'tspair_float.ascii')
stream_orig = _read_tspair(testfile_orig)
with NamedTemporaryFile() as tf:
tmpfile = tf.name
# write
_write_tspair(stream_orig, tmpfile, custom_fmt='%+r')
self.assertRaises(NotImplementedError, _read_tspair, tmpfile)
# look at the raw data
with open(tmpfile, 'rt') as f:
lines = f.readlines()
self.assertEqual(
lines[0].strip(),
'TIMESERIES XX_TEST__BHZ_R, 12 samples, 40 sps, ' +
'2008-01-15T00:00:00.025000, TSPAIR, CUSTOM, Counts')
self.assertEqual(
lines[1].strip(),
'2008-01-15T00:00:00.025000 185.0099945')
def test_write_tspair_file_multiple_traces(self):
"""
Write TSPAIR file test via obspy.core.ascii._write_tspair.
"""
testfile = os.path.join(self.path, 'data', 'tspair_2_traces.ascii')
stream_orig = _read_tspair(testfile)
with NamedTemporaryFile() as tf:
tmpfile = tf.name
# write
_write_tspair(stream_orig, tmpfile)
# look at the raw data
with open(tmpfile, 'rt') as f:
lines = f.readlines()
self.assertTrue(lines[0].startswith('TIMESERIES'))
self.assertIn('TSPAIR', lines[0])
self.assertEqual(lines[1], '2008-01-15T00:00:00.025000 185\n')
# test issue #321 (problems in time stamping)
self.assertEqual(lines[-1], '2008-01-15T00:00:15.750000 772\n')
# read again
stream = _read_tspair(tmpfile)
stream.verify()
# sort traces to ensure comparable results
stream.sort()
self.assertEqual(stream[0].stats.network, 'XX')
self.assertEqual(stream[0].stats.station, 'TEST')
self.assertEqual(stream[0].stats.location, '')
self.assertEqual(stream[0].stats.channel, 'BHE')
self.assertEqual(stream[0].stats.sampling_rate, 40.0)
self.assertEqual(stream[0].stats.npts, 630)
self.assertEqual(stream[0].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[0].stats.calib, 1.0e-00)
self.assertEqual(stream[0].stats.mseed.dataquality, 'R')
# check first 4 samples
data = [185, 181, 185, 189]
np.testing.assert_array_almost_equal(stream[0].data[0:4], data)
# check last 4 samples
data = [781, 785, 778, 772]
np.testing.assert_array_almost_equal(stream[0].data[-4:], data)
# second trace
self.assertEqual(stream[1].stats.network, 'XX')
self.assertEqual(stream[1].stats.station, 'TEST')
self.assertEqual(stream[1].stats.location, '')
self.assertEqual(stream[1].stats.channel, 'BHZ')
self.assertEqual(stream[1].stats.sampling_rate, 40.0)
self.assertEqual(stream[1].stats.npts, 635)
self.assertEqual(stream[1].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[1].stats.calib, 1.0e-00)
self.assertEqual(stream[0].stats.mseed.dataquality, 'R')
# check first 4 samples
data = [185, 181, 185, 189]
np.testing.assert_array_almost_equal(stream[1].data[0:4], data)
# check last 4 samples
data = [761, 755, 748, 746]
np.testing.assert_array_almost_equal(stream[1].data[-4:], data)
def test_write_slist(self):
"""
Write SLIST file test via obspy.core.ascii._write_tspair.
"""
# float32
testfile = os.path.join(self.path, 'data', 'slist_float.ascii')
stream_orig = _read_slist(testfile)
with NamedTemporaryFile() as tf:
tmpfile = tf.name
# write
_write_slist(stream_orig, tmpfile)
# look at the raw data
with open(tmpfile, 'rt') as f:
lines = f.readlines()
self.assertEqual(
lines[0].strip(),
'TIMESERIES XX_TEST__BHZ_R, 12 samples, 40 sps, ' +
'2008-01-15T00:00:00.025000, SLIST, FLOAT, Counts')
self.assertEqual(
lines[1].strip(),
'+1.8500999450e+02\t+1.8102000430e+02\t+1.8502999880e+02\t' +
'+1.8903999330e+02\t+1.9405000310e+02\t+2.0505999760e+02')
# read again
stream = _read_slist(tmpfile)
stream.verify()
self.assertEqual(stream[0].stats.network, 'XX')
self.assertEqual(stream[0].stats.station, 'TEST')
self.assertEqual(stream[0].stats.location, '')
self.assertEqual(stream[0].stats.channel, 'BHZ')
self.assertEqual(stream[0].stats.sampling_rate, 40.0)
self.assertEqual(stream[0].stats.npts, 12)
self.assertEqual(stream[0].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[0].stats.calib, 1.0e-00)
self.assertEqual(stream[0].stats.mseed.dataquality, 'R')
data = [185.01, 181.02, 185.03, 189.04, 194.05, 205.06,
209.07, 214.08, 222.09, 225.98, 226.99, 219.00]
np.testing.assert_array_almost_equal(stream[0].data, data,
decimal=2)
# compare raw header
with open(testfile, 'rt') as f:
lines_orig = f.readlines()
with open(tmpfile, 'rt') as f:
lines_new = f.readlines()
self.assertEqual(lines_orig[0], lines_new[0])
def test_write_slist_custom_fmt_float(self):
"""
Write SLIST file test via obspy.core.ascii._write_tspair.
"""
# float
testfile_orig = os.path.join(self.path, 'data', 'slist_float.ascii')
testfile = os.path.join(self.path, 'data',
'slist_float_custom_fmt.ascii')
stream_orig = _read_slist(testfile_orig)
with NamedTemporaryFile() as tf:
tmpfile = tf.name
# write
_write_slist(stream_orig, tmpfile, custom_fmt='%3.14f')
# look at the raw data
with open(tmpfile, 'rt') as f:
lines = f.readlines()
self.assertEqual(
lines[0].strip(),
'TIMESERIES XX_TEST__BHZ_R, 12 samples, 40 sps, ' +
'2008-01-15T00:00:00.025000, SLIST, FLOAT, Counts')
self.assertEqual(
lines[1].strip(),
'185.00999450000000\t181.02000430000001\t' +
'185.02999879999999\t189.03999329999999\t' +
'194.05000310000000\t205.05999760000000')
# read again
stream = _read_slist(tmpfile)
stream.verify()
self.assertEqual(stream[0].stats.network, 'XX')
self.assertEqual(stream[0].stats.station, 'TEST')
self.assertEqual(stream[0].stats.location, '')
self.assertEqual(stream[0].stats.channel, 'BHZ')
self.assertEqual(stream[0].stats.sampling_rate, 40.0)
self.assertEqual(stream[0].stats.npts, 12)
self.assertEqual(stream[0].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[0].stats.calib, 1.0e-00)
self.assertEqual(stream[0].stats.mseed.dataquality, 'R')
data = [185.01, 181.02, 185.03, 189.04, 194.05, 205.06,
209.07, 214.08, 222.09, 225.98, 226.99, 219.00]
np.testing.assert_array_almost_equal(stream[0].data, data,
decimal=2)
# compare raw header
with open(testfile, 'rt') as f:
lines_orig = f.readlines()
with open(tmpfile, 'rt') as f:
lines_new = f.readlines()
self.assertEqual(lines_orig[0], lines_new[0])
def test_write_slist_custom_fmt_custom(self):
"""
Write SLIST file test via obspy.core.ascii._write_tspair.
"""
# float
testfile_orig = os.path.join(self.path, 'data', 'slist_float.ascii')
stream_orig = _read_slist(testfile_orig)
with NamedTemporaryFile() as tf:
tmpfile = tf.name
# write
_write_slist(stream_orig, tmpfile, custom_fmt='%+r')
self.assertRaises(NotImplementedError, _read_slist, tmpfile)
# look at the raw data
with open(tmpfile, 'rt') as f:
lines = f.readlines()
self.assertEqual(
lines[0].strip(),
'TIMESERIES XX_TEST__BHZ_R, 12 samples, 40 sps, ' +
'2008-01-15T00:00:00.025000, SLIST, CUSTOM, Counts')
self.assertEqual(
lines[1].strip(),
'185.0099945\t181.02000430000001\t' +
'185.02999879999999\t189.03999329999999\t' +
'194.0500031\t205.0599976')
def test_write_slist_file_multiple_traces(self):
"""
Write SLIST file test via obspy.core.ascii._write_tspair.
"""
testfile = os.path.join(self.path, 'data', 'slist_2_traces.ascii')
stream_orig = _read_slist(testfile)
with NamedTemporaryFile() as tf:
tmpfile = tf.name
# write
_write_slist(stream_orig, tmpfile)
# look at the raw data
with open(tmpfile, 'rt') as f:
lines = f.readlines()
self.assertTrue(lines[0].startswith('TIMESERIES'))
self.assertIn('SLIST', lines[0])
self.assertEqual(lines[1].strip(), '185\t181\t185\t189\t194\t205')
# read again
stream = _read_slist(tmpfile)
stream.verify()
# sort traces to ensure comparable results
stream.sort()
self.assertEqual(stream[0].stats.network, 'XX')
self.assertEqual(stream[0].stats.station, 'TEST')
self.assertEqual(stream[0].stats.location, '')
self.assertEqual(stream[0].stats.channel, 'BHE')
self.assertEqual(stream[0].stats.sampling_rate, 40.0)
self.assertEqual(stream[0].stats.npts, 630)
self.assertEqual(stream[0].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[0].stats.calib, 1.0e-00)
self.assertEqual(stream[0].stats.mseed.dataquality, 'R')
# check first 4 samples
data = [185, 181, 185, 189]
np.testing.assert_array_almost_equal(stream[0].data[0:4], data)
# check last 4 samples
data = [781, 785, 778, 772]
np.testing.assert_array_almost_equal(stream[0].data[-4:], data)
# second trace
self.assertEqual(stream[1].stats.network, 'XX')
self.assertEqual(stream[1].stats.station, 'TEST')
self.assertEqual(stream[1].stats.location, '')
self.assertEqual(stream[1].stats.channel, 'BHZ')
self.assertEqual(stream[1].stats.sampling_rate, 40.0)
self.assertEqual(stream[1].stats.npts, 635)
self.assertEqual(stream[1].stats.starttime,
UTCDateTime("2008-01-15T00:00:00.025000"))
self.assertEqual(stream[1].stats.calib, 1.0e-00)
self.assertEqual(stream[0].stats.mseed.dataquality, 'R')
# check first 4 samples
data = [185, 181, 185, 189]
np.testing.assert_array_almost_equal(stream[1].data[0:4], data)
# check last 4 samples
data = [761, 755, 748, 746]
np.testing.assert_array_almost_equal(stream[1].data[-4:], data)
def test_write_small_trace(self):
"""
Tests writing Traces containing 0, 1 or 2 samples only.
"""
for format in ['SLIST', 'TSPAIR']:
for num in range(0, 4):
tr = Trace(data=np.arange(num))
with NamedTemporaryFile() as tf:
tempfile = tf.name
tr.write(tempfile, format=format)
# test results
st = read(tempfile, format=format)
self.assertEqual(len(st), 1)
self.assertEqual(len(st[0]), num)
def test_float_sampling_rates_write_and_read(self):
"""
Tests writing and reading Traces with floating point and with less than
1 Hz sampling rates.
"""
tr = Trace(np.arange(10))
check_sampling_rates = (0.000000001, 1.000000001, 100.000000001,
99.999999999, 1.5, 1.666666, 10000.0001)
for format in ['SLIST', 'TSPAIR']:
for sps in check_sampling_rates:
tr.stats.sampling_rate = sps
with NamedTemporaryFile() as tf:
tempfile = tf.name
tr.write(tempfile, format=format)
# test results
got = read(tempfile, format=format)[0]
self.assertEqual(tr.stats.sampling_rate,
got.stats.sampling_rate)
def test_determine_dtype(self):
"""
Tests _determine_dtype for properly returned types
"""
float_formats = ['%+10.10e', '%+.10e', '%.3e',
'%+10.10E', '%+.10E', '%.3E',
'%+10.10f', '%+.10f', '%.3f',
'%+10.10F', '%+.10F', '%.3F',
'%+10.10g', '%+.10g', '%.3g',
'%+10.10G', '%+.10G', '%.3G']
int_formats = ['%+10.10i', '%+.10i', '%.3i',
'%+10.10I', '%+.10I', '%.3I',
'%+10.10d', '%+.10d', '%.3d',
'%+10.10D', '%+.10D', '%.3D']
custom_formats = ['%+10.10s', '%+.10s', '%.3s',
'%+10.10x', '%+.10x', '%.3x',
'%+10.10k', '%+.10k', '%.3k',
'%+10.10z', '%+.10z', '%.3z',
'%+10.10w', '%+.10w', '%.3w',
'%+10.10q', '%+.10q', '%.3q']
for format in float_formats:
self.assertEqual('FLOAT', _determine_dtype(format))
for format in int_formats:
self.assertEqual('INTEGER', _determine_dtype(format))
for format in custom_formats:
self.assertEqual('CUSTOM', _determine_dtype(format))
self.assertRaises(ValueError, _determine_dtype, '')
def test_regression_against_mseed2ascii(self):
"""
Regression test against issue #2165.
"""
mseed_file = os.path.join(self.path, "data", "miniseed_record.mseed")
mseed2ascii_file = os.path.join(
self.path, "data", "mseed2ascii_miniseed_record.txt")
with NamedTemporaryFile() as tf:
# Write as TSPAIR
read(mseed_file).write(tf.name, format="TSPAIR")
# Check all lines aside from the first as they differ.
with open(tf.name, "rt") as fh:
actual_lines = fh.readlines()[1:]
with open(mseed2ascii_file, "rt") as fh:
expected_lines = fh.readlines()[1:]
for actual, expected in zip(actual_lines, expected_lines):
self.assertEqual(actual.strip(), expected.strip())
def suite():
return unittest.makeSuite(ASCIITestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 45.346424
| 79
| 0.578034
| 31,670
| 0.979525
| 0
| 0
| 0
| 0
| 0
| 0
| 6,566
| 0.203081
|
c6ac1de06c12088cfd7b5e0c3570e7c36efacf0e
| 68
|
py
|
Python
|
libMap/__init__.py
|
ChrisSJard/PythonGUI-WinApplication1
|
54f658e7d345a63d09bff683a635d01d57856e6e
|
[
"Apache-2.0"
] | null | null | null |
libMap/__init__.py
|
ChrisSJard/PythonGUI-WinApplication1
|
54f658e7d345a63d09bff683a635d01d57856e6e
|
[
"Apache-2.0"
] | null | null | null |
libMap/__init__.py
|
ChrisSJard/PythonGUI-WinApplication1
|
54f658e7d345a63d09bff683a635d01d57856e6e
|
[
"Apache-2.0"
] | null | null | null |
'''
MAP v-SCREEN gargle test - Shimadzu 8020 H
Date: 30/11/2020
'''
| 17
| 43
| 0.661765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 68
| 1
|
c6ac4f68c9c3e35b48eadd2793c372b95b8f9ebd
| 1,793
|
py
|
Python
|
zinnia/converters.py
|
emencia/django-blog-xinnia
|
ab19b55477ce7003b6f0712f8bd12af3501c4829
|
[
"BSD-3-Clause"
] | null | null | null |
zinnia/converters.py
|
emencia/django-blog-xinnia
|
ab19b55477ce7003b6f0712f8bd12af3501c4829
|
[
"BSD-3-Clause"
] | null | null | null |
zinnia/converters.py
|
emencia/django-blog-xinnia
|
ab19b55477ce7003b6f0712f8bd12af3501c4829
|
[
"BSD-3-Clause"
] | 1
|
2021-06-17T14:02:21.000Z
|
2021-06-17T14:02:21.000Z
|
"""URL converters for the Zinnia project"""
class FourDigitYearConverter:
"""
Pattern converter for a Year on four digits exactly
"""
regex = '[0-9]{4}'
def to_python(self, value):
return int(value)
def to_url(self, value):
# Enforce integer since some code may try to pass a number as a string
return '%04d' % int(value)
class TwoDigitMonthConverter:
"""
Pattern converter for a Month on four digits exactly
"""
regex = '[0-9]{2}'
def to_python(self, value):
return int(value)
def to_url(self, value):
# Enforce integer since some code may try to pass a number as a string
return '%02d' % int(value)
class TwoDigitDayConverter(TwoDigitMonthConverter):
"""
Pattern converter for a Day on four digits exactly.
Just an explicit Class which inherit from 'TwoDigitMonthConverter'.
"""
pass
class UsernamePathConverter:
"""
Pattern converter for Author username string
"""
regex = r'[a-zA-Z0-9_.+-@]+'
def to_python(self, value):
return value
def to_url(self, value):
return value
class PathPathConverter:
"""
Pattern converter for path string (such as ``foo/bar``)
"""
regex = r'[-\/\w]+'
def to_python(self, value):
return value
def to_url(self, value):
return value
class TagPathConverter:
"""
Pattern converter for tag string
"""
regex = '[^/]+'
def to_python(self, value):
return value
def to_url(self, value):
return value
class TokenPathConverter:
"""
Pattern converter for token string
"""
regex = r'[\dA-Z]+'
def to_python(self, value):
return value
def to_url(self, value):
return value
| 20.146067
| 78
| 0.611824
| 1,730
| 0.964863
| 0
| 0
| 0
| 0
| 0
| 0
| 768
| 0.428332
|
c6acd732e85ef3e6872505baf917d917ef7c0ec1
| 8,045
|
py
|
Python
|
nisse/routes/slack/command_handlers/report_command_handler.py
|
nexocodecom/nisse.io
|
58a64072bc8dad87fbb1f54dabc93fd2d4cff6eb
|
[
"MIT"
] | null | null | null |
nisse/routes/slack/command_handlers/report_command_handler.py
|
nexocodecom/nisse.io
|
58a64072bc8dad87fbb1f54dabc93fd2d4cff6eb
|
[
"MIT"
] | 42
|
2018-07-20T14:15:48.000Z
|
2019-09-26T05:44:21.000Z
|
nisse/routes/slack/command_handlers/report_command_handler.py
|
nexocodecom/nisse.io
|
58a64072bc8dad87fbb1f54dabc93fd2d4cff6eb
|
[
"MIT"
] | null | null | null |
import logging
import os
import uuid
from typing import List
from flask import current_app
from flask.config import Config
from flask_injector import inject
from slackclient import SlackClient
from werkzeug.utils import secure_filename
from nisse.models.DTO import PrintParametersDto
from nisse.models.slack.common import ActionType
from nisse.models.slack.common import LabelSelectOption
from nisse.models.slack.dialog import Element, Dialog
from nisse.models.slack.message import Attachment, Message, Action, TextSelectOption
from nisse.models.slack.payload import ReportGenerateFormPayload
from nisse.routes.slack.command_handlers.slack_command_handler import SlackCommandHandler
from nisse.services.project_service import ProjectService
from nisse.services.reminder_service import ReminderService
from nisse.services.report_service import ReportService
from nisse.services.user_service import UserService
from nisse.services.xlsx_document_service import XlsxDocumentService
from nisse.utils import string_helper
from nisse.utils.date_helper import TimeRanges
from nisse.utils.date_helper import get_start_end_date
from nisse.utils.validation_helper import list_find
class ReportCommandHandler(SlackCommandHandler):
@inject
def __init__(self, config: Config, logger: logging.Logger, user_service: UserService,
slack_client: SlackClient, project_service: ProjectService,
reminder_service: ReminderService, report_service: ReportService, sheet_generator: XlsxDocumentService):
super().__init__(config, logger, user_service, slack_client, project_service, reminder_service)
self.report_service = report_service
self.sheet_generator = sheet_generator
def handle(self, payload: ReportGenerateFormPayload):
if payload.submission:
date_to = payload.submission.day_to
date_from = payload.submission.day_from
selected_user_id = None
if hasattr(payload.submission, 'user'):
selected_user_id = payload.submission.user
project_id = payload.submission.project
print_param = PrintParametersDto()
print_param.date_to = date_to
print_param.date_from = date_from
print_param.project_id = project_id
# todo cache projects globally e.g. Flask-Cache
projects = self.project_service.get_projects()
selected_project = list_find(lambda p: str(p.project_id) == print_param.project_id, projects)
user = self.get_user_by_slack_user_id(payload.user.id)
selected_user = None
if user.role.role != 'admin':
print_param.user_id = user.user_id
# if admin select proper user
elif selected_user_id is not None:
print_param.user_id = selected_user_id
selected_user = self.user_service.get_user_by_id(selected_user_id)
# generate report
path_for_report = os.path.join(current_app.instance_path, current_app.config["REPORT_PATH"],
secure_filename(str(uuid.uuid4())) + ".xlsx")
load_data = self.report_service.load_report_data(print_param)
self.sheet_generator.save_report(path_for_report, print_param.date_from, print_param.date_to, load_data)
im_channel = self.slack_client.api_call("im.open", user=payload.user.id)
if not im_channel["ok"]:
self.logger.error("Can't open im channel for: " + str(selected_user_id) + '. ' + im_channel["error"])
selected_project_name = "all projects"
if selected_project is not None:
selected_project_name = selected_project.name
resp = self.slack_client.api_call(
"files.upload",
channels=im_channel['channel']['id'],
file=open(path_for_report, 'rb'),
title=string_helper.generate_xlsx_title(selected_user, selected_project_name, print_param.date_from,
print_param.date_to),
filetype="xlsx",
filename=string_helper.generate_xlsx_file_name(selected_user, selected_project_name,
print_param.date_from,
print_param.date_to)
)
try:
os.remove(path_for_report)
except OSError as err:
self.logger.error("Cannot delete report file {0}".format(err))
if not resp["ok"]:
self.logger.error("Can't send report: " + resp.get("error"))
else:
self.show_dialog({'trigger_id': payload.trigger_id}, None, next(iter(payload.actions.values())))
def create_dialog(self, command_body, argument, action) -> Dialog:
selected_period = None
if action and len(action.selected_options):
selected_period = next(iter(action.selected_options), None).value
start_end = get_start_end_date(selected_period)
# todo cache it globally e.g. Flask-Cache
projects = self.project_service.get_projects()
project_options_list: List[LabelSelectOption] = [LabelSelectOption(label=p.name, value=p.project_id) for p in
projects]
# admin see users list
user = self.get_user_by_slack_user_id(action.name)
elements: Element = [
Element(label="Date from", type="text", name='day_from', placeholder="Specify date", value=start_end[0]),
Element(label="Date to", type="text", name='day_to', placeholder="Specify date", value=start_end[1]),
Element(label="Project", type="select", name='project', optional='true', placeholder="Select a project",
options=project_options_list)
]
dialog: Dialog = Dialog(title="Generate report", submit_label="Generate",
callback_id=string_helper.get_full_class_name(ReportGenerateFormPayload), elements=elements)
if action.name:
prompted_user = self.get_user_by_slack_user_id(action.name)
if user.role.role == 'admin':
users = self.user_service.get_users()
user_options_list = [LabelSelectOption(label=string_helper.get_user_name(p), value=p.user_id) for p in
users]
dialog.elements.append(
Element(label="User", value=(prompted_user.user_id if prompted_user else None),
optional='true', type="select", name='user', placeholder="Select user",
options=user_options_list))
return dialog
def report_pre_dialog(self, command_body, arguments, action):
message_text = "I'm going to generate report..."
inner_user_id = None
if len(arguments):
user = arguments[0]
inner_user_id = self.extract_slack_user_id(user)
self.get_user_by_slack_user_id(inner_user_id)
actions = [
Action(
name=inner_user_id if inner_user_id is not None else command_body['user_id'],
text="Select time range...",
type=ActionType.SELECT.value,
options=[TextSelectOption(text=tr.value, value=tr.value) for tr in TimeRanges]
)
]
attachments = [
Attachment(
text="Generate report for",
fallback="Select time range to report",
color="#3AA3E3",
attachment_type="default",
callback_id=string_helper.get_full_class_name(ReportGenerateFormPayload),
actions=actions
)
]
return Message(
text=message_text,
response_type="ephemeral",
mrkdwn=True,
attachments=attachments
).dump()
| 43.252688
| 121
| 0.640895
| 6,870
| 0.853947
| 0
| 0
| 492
| 0.061156
| 0
| 0
| 712
| 0.088502
|
c6acd7e0d4951d5c3034a6f821df7b9a82c0e2f9
| 369
|
py
|
Python
|
days/day01/part1.py
|
jaredbancroft/aoc2021
|
4eaf339cc0c8566da2af13f7cb9cf6fe87355aac
|
[
"MIT"
] | null | null | null |
days/day01/part1.py
|
jaredbancroft/aoc2021
|
4eaf339cc0c8566da2af13f7cb9cf6fe87355aac
|
[
"MIT"
] | null | null | null |
days/day01/part1.py
|
jaredbancroft/aoc2021
|
4eaf339cc0c8566da2af13f7cb9cf6fe87355aac
|
[
"MIT"
] | null | null | null |
from helpers import inputs
def solution(day):
depths = inputs.read_to_list(f"inputs/{day}.txt")
part1_total = 0
for index, depth in enumerate(depths):
if index - 1 >= 0:
diff = int(depth) - int(depths[index - 1])
if diff > 0:
part1_total += 1
return f"Day 01 Part 1 Total Depth Increase: {part1_total}"
| 28.384615
| 63
| 0.588076
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 71
| 0.192412
|
c6ad38af41dda6b3a428b74b7d6a179478b67cda
| 583
|
py
|
Python
|
packages/syft/src/syft/core/node/common/node_table/setup.py
|
callezenwaka/PySyft
|
2545c302441cfe727ec095c4f9aa136bff02be32
|
[
"Apache-1.1"
] | 2
|
2022-02-18T03:48:27.000Z
|
2022-03-05T06:13:57.000Z
|
packages/syft/src/syft/core/node/common/node_table/setup.py
|
callezenwaka/PySyft
|
2545c302441cfe727ec095c4f9aa136bff02be32
|
[
"Apache-1.1"
] | 3
|
2021-11-17T15:34:03.000Z
|
2021-12-08T14:39:10.000Z
|
packages/syft/src/syft/core/node/common/node_table/setup.py
|
callezenwaka/PySyft
|
2545c302441cfe727ec095c4f9aa136bff02be32
|
[
"Apache-1.1"
] | 1
|
2021-08-19T12:23:01.000Z
|
2021-08-19T12:23:01.000Z
|
# third party
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
# relative
from . import Base
class SetupConfig(Base):
__tablename__ = "setup"
id = Column(Integer(), primary_key=True, autoincrement=True)
domain_name = Column(String(255), default="")
node_id = Column(String(32), default="")
def __str__(self) -> str:
return f"<Domain Name: {self.domain_name}>"
def create_setup(id: int, domain_name: str, node_id: str) -> SetupConfig:
return SetupConfig(id=id, domain_name=domain_name, node_id=node_id)
| 25.347826
| 73
| 0.716981
| 296
| 0.507719
| 0
| 0
| 0
| 0
| 0
| 0
| 70
| 0.120069
|
c6afea6ff7fcfa0cc419b40bc7e78312c3c4768e
| 1,983
|
py
|
Python
|
third_party/blink/renderer/bindings/scripts/web_idl/make_copy_test.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
third_party/blink/renderer/bindings/scripts/web_idl/make_copy_test.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113
|
2015-05-04T09:58:14.000Z
|
2022-01-31T19:35:03.000Z
|
third_party/blink/renderer/bindings/scripts/web_idl/make_copy_test.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from .composition_parts import Component
from .composition_parts import Identifier
from .make_copy import make_copy
class MakeCopyTest(unittest.TestCase):
def test_primitives(self):
self.assertEqual(None, make_copy(None))
self.assertEqual(True, make_copy(True))
self.assertEqual(False, make_copy(False))
self.assertEqual(42, make_copy(42))
self.assertEqual(3.14, make_copy(3.14))
self.assertEqual('abc', make_copy('abc'))
def test_primitives_subclasses(self):
# Identifier and Component are subclasses of str. Copies of them
# shouldn't change their type.
original = (Identifier('x'), Component('x'))
copy = make_copy(original)
self.assertEqual(original[0], copy[0])
self.assertEqual(original[1], copy[1])
self.assertIsInstance(copy[0], Identifier)
self.assertIsInstance(copy[1], Component)
def test_object_identity(self):
# A diamond structure must be preserved when making a copy.
# /--> B --\
# A --> D
# \--> C --/
# A1->B1, A1->C1, B1->D1, C1->D1 will be copied as;
# A2->B2, A2->C2, B2->D2, C2->D2 where X2 is a copy of X1.
class Obj(object):
pass
class Ref(object):
def __init__(self, value=None):
self.value = value
obj = Obj()
ref1 = Ref(obj)
ref2 = Ref(obj)
self.assertNotEqual(ref1, ref2)
self.assertIs(ref1.value, ref2.value)
copy = make_copy((ref1, ref2))
self.assertIsInstance(copy, tuple)
self.assertIsInstance(copy[0], Ref)
self.assertIsInstance(copy[1], Ref)
self.assertIsNot(copy[0], copy[1])
self.assertIs(copy[0].value, copy[1].value)
| 34.189655
| 73
| 0.616742
| 1,684
| 0.849218
| 0
| 0
| 0
| 0
| 0
| 0
| 494
| 0.249117
|
c6b2844ec8d83bfcefb1163893e3fea8102bf2bc
| 1,554
|
py
|
Python
|
04 - Classes-inheritance-oops/39-classes-numeric-normal-magic-methods.py
|
python-demo-codes/basics
|
2a151bbff4b528cefd52978829c632fd087c8f20
|
[
"DOC"
] | 2
|
2019-08-23T06:05:55.000Z
|
2019-08-26T03:56:07.000Z
|
04 - Classes-inheritance-oops/39-classes-numeric-normal-magic-methods.py
|
python-lang-codes/basics
|
2a151bbff4b528cefd52978829c632fd087c8f20
|
[
"DOC"
] | null | null | null |
04 - Classes-inheritance-oops/39-classes-numeric-normal-magic-methods.py
|
python-lang-codes/basics
|
2a151bbff4b528cefd52978829c632fd087c8f20
|
[
"DOC"
] | 4
|
2020-10-01T07:16:07.000Z
|
2021-07-17T07:55:08.000Z
|
# HEAD
# Classes - Magic Methods - Normal Numeric Magic Methods
# DESCRIPTION
# Describes the magic methods of classes
# add, sub, mul, floordiv, div, truediv, mod,
# divmod, pow, lshift, rshift, and, or, xor
# RESOURCES
#
# https://rszalski.github.io/magicmethods/
# Normal arithmetic operators
# Now, we cover the typical binary operators (and a function or two): +, -, * and the like. These are, for the most part, pretty self-explanatory.
# __add__(self, other)
# Implements addition.
# __sub__(self, other)
# Implements subtraction.
# __mul__(self, other)
# Implements multiplication.
# __floordiv__(self, other)
# Implements integer division using the // operator.
# __div__(self, other)
# Implements division using the / operator.
# __truediv__(self, other)
# Implements true division. Note that this only works when from __future__ import division is in effect.
# __mod__(self, other)
# Implements modulo using the % operator.
# __divmod__(self, other)
# Implements behavior for long division using the divmod() built in function.
# __pow__
# Implements behavior for exponents using the ** operator.
# __lshift__(self, other)
# Implements left bitwise shift using the << operator.
# __rshift__(self, other)
# Implements right bitwise shift using the >> operator.
# __and__(self, other)
# Implements bitwise and using the & operator.
# __or__(self, other)
# Implements bitwise or using the | operator.
# __xor__(self, other)
# Implements bitwise xor using the ^ operator.
| 34.533333
| 146
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,510
| 0.971686
|
c6b2c3233e24382da55f5267e87bb737b994481e
| 11,384
|
py
|
Python
|
abcdeep/argsutils.py
|
Conchylicultor/AbcDeep
|
6fcfc03a1a516ccd760201bb004098e6f6fe0e7e
|
[
"Apache-2.0"
] | 1
|
2017-09-10T14:13:39.000Z
|
2017-09-10T14:13:39.000Z
|
abcdeep/argsutils.py
|
Conchylicultor/AbcDeep
|
6fcfc03a1a516ccd760201bb004098e6f6fe0e7e
|
[
"Apache-2.0"
] | null | null | null |
abcdeep/argsutils.py
|
Conchylicultor/AbcDeep
|
6fcfc03a1a516ccd760201bb004098e6f6fe0e7e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Conchylicultor. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Extention of argparse to automatically save/restore command line arguments
The other classes can easily add new arguments to the default ones
"""
import sys
import ast # For literal eval
import collections
import argparse
import abcdeep.otherutils as otherutils
from abcdeep.otherutils import cprint, TermMsg
# TODO: Create new class allowing to add subprograms (with .add_subparsers). Should
# allow to define a default subparser to use if no subprogram is defined. This
# can be done using the dest argument of add_subparsers and test it after the
# parse_args call (pb is that default help won't be printed)
class ArgGroup(otherutils.OrderedAttr):
GLOBAL = 'Global' # Meta arguments: not restored by the program
DATASET = 'Dataset' # Network input options
PREPROCESSING = 'Preprocessing' # Dataset Creation: done once for all models
SIMULATOR = 'Simulator' # RL environement options (recording,...)
NETWORK = 'Network' # Network architecture options (layers, hidden params,...)
TRAINING = 'Training' # (learning rate, batch size,...)
class ArgParser:
FCT_FLAG = '_arg_flag'
def __init__(self):
self.parser = argparse.ArgumentParser() # TODO: Allow to use add_subparsers
self.args = None
# Correspondance structures
self._groups = {} # Contains the argparse group object
self._group2key = collections.OrderedDict(
[(k, []) for k in ArgGroup._attr_values]
)
self._key2group = {} # Useless ?
self._key2action = {}
self._overwritten = {} # Argument modified by overwrite()
self._given = set() # Argument passed by the command line
@staticmethod
def regiser_args(group):
""" Decorator which flag the function as adding new arguments to the parser
The decorated function has to have the following signature: Fct(parser)
Args:
group (str): The group on which add the arguments will be added. If
the group is not in ArgumentGroup, it will be created
"""
def decorator(f):
setattr(f, ArgParser.FCT_FLAG, group)
return f
return decorator
def register_cls(self, cls):
""" Parse the given class and add the registered arguments to the parser.
The class must have flagged some of its methods with `regiser_args`
Args:
cls: The class to parse
"""
# TODO: A common bug is when a function has declared two function with
# the same name and regiser_args. The second function erase the first
# one, thus one of the function is never registered to the parser.
# Is there a way to detect duplicates names within a class ?
for fct in otherutils.gen_attr(cls):
if hasattr(fct, ArgParser.FCT_FLAG):
group_name = getattr(fct, ArgParser.FCT_FLAG)
# Create the group if not exist
if group_name not in self._groups:
self._groups[group_name] = self.parser.add_argument_group(group_name)
self._group2key.setdefault(group_name, [])
# Add arguments to the group
fct(self._groups[group_name])
def overwrite(self, **kwargs):
""" Allow to replace the defaults values of the arguments.
Must be called before `parse_args`.
`parse_args` will raise AttributeError if the argument given here
don't exist
"""
self._overwritten.update(kwargs)
def parse_args(self, argv=None):
""" This is where the command line is actually parsed
Args:
argv (List[str]): Customs arguments to parse
Return:
argparse.Namespace
"""
#self.parser.add_argument_group('Global')
# When parsing the args, the namespaces keys are also saved
for group_name, group in self._groups.items():
for action in group._group_actions: # HACK: Get the action list
if action.dest == 'help': # HACK: filter args added by argparse
continue
self._group2key[group_name].append(action.dest)
self._key2group[action.dest] = group_name
self._key2action[action.dest] = action
# The defaults arguments can be overwritten by .overwrite()
if action.dest in self._overwritten:
action.default = self._overwritten[action.dest]
del self._overwritten[action.dest] # Default overwritten
if len(self._overwritten):
raise AttributeError('Unkwnown argparse overwritten key: {}'.format(
list(self._overwritten.keys())[0])
)
# Parse the given args
self.args = self.parser.parse_args(argv)
# Save the given arguments (TODO: Is there a better way to extract the args ?)
if argv is None:
argv = sys.argv
def filter_arg(a): # TODO: Support for other prefix_char
return a.startswith('--')
for a in filter(filter_arg, argv):
self._given.add(a.lstrip('-'))
# TODO: Also save which args have been modified from default values (check after ?)
return self.args
def save_args(self, config):
"""
All arguments have to implement __str__. The program just try a
naive conversion.
Args:
config (obj): configparser object
"""
values = vars(self.args)
for group_name, actions in self._group2key.items():
if not actions: # Skip positional and optional arguments
continue
config[group_name] = {}
for a in actions:
config[group_name][a] = str(values[a])
def restore_args(self, config):
"""
If a value cannot be fetched (ex: a new argument has been added), the
default value will be used instead (None if not set).
The arguments passed with the command line are not restored but kept
Args:
config (obj): configparser object
"""
for group_name in config.sections():
if group_name == ArgGroup.GLOBAL: # Meta arguments are not restored
continue
for key in config[group_name]:
action = self._key2action.get(key, None)
if not action: # Additionnal key is present (don't exist in argparse)
cprint(
'Warning: Could not restore param <{}/{}>. Ignoring...'.format(group_name,key),
color=TermMsg.WARNING
)
continue
if key in self._given: # The command lines arguments overwrite the given ones
continue
# TODO: This code is critical so should be more carefully
# tested (should also assert that the infered type correspond
# to the expected one)
# TODO: The paths are saved in their absolute form (models non
# portable from a PC to another or if directory names changes)
value = config[group_name][key]
if action.type is not str or value == 'None': # Use isinstance instead ?
value = ast.literal_eval(value)
setattr(self.args, key, value)
# If a key does not exist, the attribute won't be modified and
# the default argparse value will be used (TODO: Should detect
# and print warning if this was the case ?)
def print_args(self):
""" Print the registered arguments with their values.
Need to be called after `parse_args`
"""
values = vars(self.args)
cprint('############### Parameters ###############', color=TermMsg.H1)
for group_title, actions in self._group2key.items():
if not actions: # Skip positional and optional arguments
continue
cprint('[{}]'.format(group_title), color=TermMsg.H2)
for a in actions:
color = TermMsg.STRONG if a in self._given else None
cprint('{}: {}'.format(a, values[a]), color=color)
print()
class ArgDefault: # TODO: Delete and dispatch among all modules
""" Define the usuals arguments for machine learning research programs
"""
@staticmethod
@ArgParser.regiser_args(ArgGroup.GLOBAL)
def global_args(parser):
parser.add_argument('--test', nargs='*', default=None, help='test mode')
parser.add_argument('--model_tag', type=str, default=None, help='tag to differentiate which model to store/load')
parser.add_argument('--dir_models', type=str, default=None, help='root folder in which the models are saved and loaded')
parser.add_argument('--reset', action='store_true', help='use this if you want to ignore the previous model present on the model directory (Warning: the model will be destroyed with all the folder content)')
@staticmethod
@ArgParser.regiser_args(ArgGroup.DATASET)
def dataset_args(parser):
parser.add_argument('--dataset', type=str, default=None, help='Dataset to use')
parser.add_argument('--dataset_tag', type=str, default=None, help='tag to differentiate which dataset to store/load (dataset can have different hyperparameters too)')
parser.add_argument('--dir_data', type=str, default=None, help='folder which contains the dataset')
@staticmethod
@ArgParser.regiser_args(ArgGroup.TRAINING)
def training_args(parser):
parser.add_argument('--num_epochs', type=int, default=0, help='maximum number of epochs to run (0 for infinity)')
parser.add_argument('--keep_every', type=float, default=0.3, help='if this option is set, a saved model will be keep every x hours (can be a fraction) (Warning: make sure you have enough free disk space or increase save_every)')
parser.add_argument('--save_every', type=int, default=1000, help='nb of mini-batch step before creating a model checkpoint')
parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate') # TODO: Replace by more flexible module
parser.add_argument('--batch_size', type=int, default=64, help='learning rate')
parser.add_argument('--visualize_every', type=int, default=0, help='nb of mini-batch step before generating some outputs (allow to visualize the training)')
parser.add_argument('--testing_curve', type=int, default=10, help='Also record the testing curve each every x iteration (given by the parameter)')
| 45.903226
| 236
| 0.634048
| 10,073
| 0.884838
| 0
| 0
| 2,723
| 0.239195
| 0
| 0
| 6,016
| 0.528461
|
c6b65a93ba4bfc063204ecefff708893dd868984
| 265
|
py
|
Python
|
listas/gabarito/lista4CT/exe-1.py
|
yujinishioka/computacional-thinking-python
|
38abfc00d94c45cc5a7d4303e57cb8f0cab4272a
|
[
"MIT"
] | 1
|
2022-03-08T21:54:49.000Z
|
2022-03-08T21:54:49.000Z
|
listas/gabarito/lista4CT/exe-1.py
|
yujinishioka/computacional-thinking-python
|
38abfc00d94c45cc5a7d4303e57cb8f0cab4272a
|
[
"MIT"
] | null | null | null |
listas/gabarito/lista4CT/exe-1.py
|
yujinishioka/computacional-thinking-python
|
38abfc00d94c45cc5a7d4303e57cb8f0cab4272a
|
[
"MIT"
] | null | null | null |
soma = 0
print("0 para parar")
numero = int(input("Digite numero: "))
while numero != 0:
if numero %2 == 0:
soma += numero
if numero == 0:
break
print("0 para parar")
numero = int(input("Digite numero: "))
print("O total é", soma)
| 18.928571
| 42
| 0.562264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 74
| 0.278195
|
c6b714242593972ca83e47dd6c36c7d8b16188e4
| 41,780
|
py
|
Python
|
venv/lib/python3.8/site-packages/spaceone/api/power_scheduler/v1/schedule_rule_pb2.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/spaceone/api/power_scheduler/v1/schedule_rule_pb2.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/spaceone/api/power_scheduler/v1/schedule_rule_pb2.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: spaceone/api/power_scheduler/v1/schedule_rule.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from spaceone.api.core.v1 import query_pb2 as spaceone_dot_api_dot_core_dot_v1_dot_query__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='spaceone/api/power_scheduler/v1/schedule_rule.proto',
package='spaceone.api.power_scheduler.v1',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n3spaceone/api/power_scheduler/v1/schedule_rule.proto\x12\x1fspaceone.api.power_scheduler.v1\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1cgoogle/api/annotations.proto\x1a spaceone/api/core/v1/query.proto\"\xd1\x02\n\x19\x43reateScheduleRuleRequest\x12\x13\n\x0bschedule_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x41\n\x05state\x18\x03 \x01(\x0e\x32\x32.spaceone.api.power_scheduler.v1.ScheduleRuleState\x12<\n\trule_type\x18\x04 \x01(\x0e\x32).spaceone.api.power_scheduler.v1.RuleType\x12\x33\n\x04rule\x18\x05 \x03(\x0b\x32%.spaceone.api.power_scheduler.v1.Rule\x12\x10\n\x08priority\x18\x06 \x01(\x05\x12%\n\x04tags\x18\x0b \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0f\n\x07user_id\x18\x17 \x01(\t\x12\x11\n\tdomain_id\x18\x16 \x01(\t\"\xf5\x01\n\x19UpdateScheduleRuleRequest\x12\x18\n\x10schedule_rule_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x41\n\x05state\x18\x03 \x01(\x0e\x32\x32.spaceone.api.power_scheduler.v1.ScheduleRuleState\x12\x33\n\x04rule\x18\x04 \x03(\x0b\x32%.spaceone.api.power_scheduler.v1.Rule\x12%\n\x04tags\x18\x0b \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tdomain_id\x18\x16 \x01(\t\"B\n\x13ScheduleRuleRequest\x12\x18\n\x10schedule_rule_id\x18\x01 \x01(\t\x12\x11\n\tdomain_id\x18\x02 \x01(\t\"S\n\x16GetScheduleRuleRequest\x12\x18\n\x10schedule_rule_id\x18\x01 \x01(\t\x12\x11\n\tdomain_id\x18\x02 \x01(\t\x12\x0c\n\x04only\x18\x03 \x03(\t\"\xf8\x01\n\x11ScheduleRuleQuery\x12*\n\x05query\x18\x01 \x01(\x0b\x32\x1b.spaceone.api.core.v1.Query\x12\x18\n\x10schedule_rule_id\x18\x02 \x01(\t\x12\x13\n\x0bschedule_id\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x10\n\x08priority\x18\x05 \x01(\x05\x12\x41\n\x05state\x18\x06 \x01(\x0e\x32\x32.spaceone.api.power_scheduler.v1.ScheduleRuleState\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12\x11\n\tdomain_id\x18\t \x01(\t\">\n\x04Rule\x12\r\n\x03\x64\x61y\x18\x01 \x01(\tH\x00\x12\x0e\n\x04\x64\x61te\x18\x02 \x01(\tH\x00\x12\r\n\x05times\x18\x03 \x03(\x05\x42\x08\n\x06\x66ormat\"\x85\x03\n\x08RuleInfo\x12\x18\n\x10schedule_rule_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x41\n\x05state\x18\x03 \x01(\x0e\x32\x32.spaceone.api.power_scheduler.v1.ScheduleRuleState\x12<\n\trule_type\x18\x04 \x01(\x0e\x32).spaceone.api.power_scheduler.v1.RuleType\x12\x33\n\x04rule\x18\x05 \x03(\x0b\x32%.spaceone.api.power_scheduler.v1.Rule\x12\x10\n\x08priority\x18\x06 \x01(\x05\x12\x13\n\x0bschedule_id\x18\x07 \x01(\t\x12%\n\x04tags\x18\x0c \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x12\n\nproject_id\x18\x15 \x01(\t\x12\x11\n\tdomain_id\x18\x16 \x01(\t\x12\x12\n\ncreated_by\x18\x17 \x01(\t\x12\x12\n\ncreated_at\x18\x1f \x01(\t\"\\\n\tRulesInfo\x12:\n\x07results\x18\x01 \x03(\x0b\x32).spaceone.api.power_scheduler.v1.RuleInfo\x12\x13\n\x0btotal_count\x18\x02 \x01(\x05\"`\n\x15ScheduleRuleStatQuery\x12\x34\n\x05query\x18\x01 \x01(\x0b\x32%.spaceone.api.core.v1.StatisticsQuery\x12\x11\n\tdomain_id\x18\x02 \x01(\t*B\n\x11ScheduleRuleState\x12\x13\n\x0fRULE_STATE_NONE\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\x0b\n\x07STOPPED\x10\x02*7\n\x08RuleType\x12\x12\n\x0eRULE_TYPE_NONE\x10\x00\x12\x0b\n\x07ROUTINE\x10\x01\x12\n\n\x06TICKET\x10\x02\x32\xea\x07\n\x0cScheduleRule\x12\x9b\x01\n\x06\x63reate\x12:.spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest\x1a).spaceone.api.power_scheduler.v1.RuleInfo\"*\x82\xd3\xe4\x93\x02$\"\"/power-scheduler/v1/schedule-rules\x12\xad\x01\n\x06update\x12:.spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest\x1a).spaceone.api.power_scheduler.v1.RuleInfo\"<\x82\xd3\xe4\x93\x02\x36\x1a\x34/power-scheduler/v1/schedule-rule/{schedule_rule_id}\x12\x94\x01\n\x06\x64\x65lete\x12\x34.spaceone.api.power_scheduler.v1.ScheduleRuleRequest\x1a\x16.google.protobuf.Empty\"<\x82\xd3\xe4\x93\x02\x36*4/power-scheduler/v1/schedule-rule/{schedule_rule_id}\x12\xa7\x01\n\x03get\x12\x37.spaceone.api.power_scheduler.v1.GetScheduleRuleRequest\x1a).spaceone.api.power_scheduler.v1.RuleInfo\"<\x82\xd3\xe4\x93\x02\x36\x12\x34/power-scheduler/v1/schedule-rule/{schedule_rule_id}\x12\xbf\x01\n\x04list\x12\x32.spaceone.api.power_scheduler.v1.ScheduleRuleQuery\x1a*.spaceone.api.power_scheduler.v1.RulesInfo\"W\x82\xd3\xe4\x93\x02Q\x12\"/power-scheduler/v1/schedule-rulesZ+\")/power-scheduler/v1/schedule-rules/search\x12\x88\x01\n\x04stat\x12\x36.spaceone.api.power_scheduler.v1.ScheduleRuleStatQuery\x1a\x17.google.protobuf.Struct\"/\x82\xd3\xe4\x93\x02)\"\'/power-scheduler/v1/schedule-rules/statb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,spaceone_dot_api_dot_core_dot_v1_dot_query__pb2.DESCRIPTOR,])
_SCHEDULERULESTATE = _descriptor.EnumDescriptor(
name='ScheduleRuleState',
full_name='spaceone.api.power_scheduler.v1.ScheduleRuleState',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='RULE_STATE_NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RUNNING', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='STOPPED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1851,
serialized_end=1917,
)
_sym_db.RegisterEnumDescriptor(_SCHEDULERULESTATE)
ScheduleRuleState = enum_type_wrapper.EnumTypeWrapper(_SCHEDULERULESTATE)
_RULETYPE = _descriptor.EnumDescriptor(
name='RuleType',
full_name='spaceone.api.power_scheduler.v1.RuleType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='RULE_TYPE_NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ROUTINE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TICKET', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1919,
serialized_end=1974,
)
_sym_db.RegisterEnumDescriptor(_RULETYPE)
RuleType = enum_type_wrapper.EnumTypeWrapper(_RULETYPE)
RULE_STATE_NONE = 0
RUNNING = 1
STOPPED = 2
RULE_TYPE_NONE = 0
ROUTINE = 1
TICKET = 2
_CREATESCHEDULERULEREQUEST = _descriptor.Descriptor(
name='CreateScheduleRuleRequest',
full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='schedule_id', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.schedule_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.state', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rule_type', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.rule_type', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rule', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.rule', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='priority', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.priority', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.tags', index=6,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_id', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.user_id', index=7,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest.domain_id', index=8,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=212,
serialized_end=549,
)
_UPDATESCHEDULERULEREQUEST = _descriptor.Descriptor(
name='UpdateScheduleRuleRequest',
full_name='spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='schedule_rule_id', full_name='spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest.schedule_rule_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest.state', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rule', full_name='spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest.rule', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest.tags', index=4,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest.domain_id', index=5,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=552,
serialized_end=797,
)
_SCHEDULERULEREQUEST = _descriptor.Descriptor(
name='ScheduleRuleRequest',
full_name='spaceone.api.power_scheduler.v1.ScheduleRuleRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='schedule_rule_id', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleRequest.schedule_rule_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleRequest.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=799,
serialized_end=865,
)
_GETSCHEDULERULEREQUEST = _descriptor.Descriptor(
name='GetScheduleRuleRequest',
full_name='spaceone.api.power_scheduler.v1.GetScheduleRuleRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='schedule_rule_id', full_name='spaceone.api.power_scheduler.v1.GetScheduleRuleRequest.schedule_rule_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.GetScheduleRuleRequest.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='only', full_name='spaceone.api.power_scheduler.v1.GetScheduleRuleRequest.only', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=867,
serialized_end=950,
)
_SCHEDULERULEQUERY = _descriptor.Descriptor(
name='ScheduleRuleQuery',
full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.query', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='schedule_rule_id', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.schedule_rule_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='schedule_id', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.schedule_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='priority', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.priority', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.state', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.project_id', index=6,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleQuery.domain_id', index=7,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=953,
serialized_end=1201,
)
_RULE = _descriptor.Descriptor(
name='Rule',
full_name='spaceone.api.power_scheduler.v1.Rule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='day', full_name='spaceone.api.power_scheduler.v1.Rule.day', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='date', full_name='spaceone.api.power_scheduler.v1.Rule.date', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='times', full_name='spaceone.api.power_scheduler.v1.Rule.times', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='format', full_name='spaceone.api.power_scheduler.v1.Rule.format',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=1203,
serialized_end=1265,
)
_RULEINFO = _descriptor.Descriptor(
name='RuleInfo',
full_name='spaceone.api.power_scheduler.v1.RuleInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='schedule_rule_id', full_name='spaceone.api.power_scheduler.v1.RuleInfo.schedule_rule_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.power_scheduler.v1.RuleInfo.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.power_scheduler.v1.RuleInfo.state', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rule_type', full_name='spaceone.api.power_scheduler.v1.RuleInfo.rule_type', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rule', full_name='spaceone.api.power_scheduler.v1.RuleInfo.rule', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='priority', full_name='spaceone.api.power_scheduler.v1.RuleInfo.priority', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='schedule_id', full_name='spaceone.api.power_scheduler.v1.RuleInfo.schedule_id', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.power_scheduler.v1.RuleInfo.tags', index=7,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='spaceone.api.power_scheduler.v1.RuleInfo.project_id', index=8,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.RuleInfo.domain_id', index=9,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created_by', full_name='spaceone.api.power_scheduler.v1.RuleInfo.created_by', index=10,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created_at', full_name='spaceone.api.power_scheduler.v1.RuleInfo.created_at', index=11,
number=31, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1268,
serialized_end=1657,
)
_RULESINFO = _descriptor.Descriptor(
name='RulesInfo',
full_name='spaceone.api.power_scheduler.v1.RulesInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='spaceone.api.power_scheduler.v1.RulesInfo.results', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_count', full_name='spaceone.api.power_scheduler.v1.RulesInfo.total_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1659,
serialized_end=1751,
)
_SCHEDULERULESTATQUERY = _descriptor.Descriptor(
name='ScheduleRuleStatQuery',
full_name='spaceone.api.power_scheduler.v1.ScheduleRuleStatQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleStatQuery.query', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.ScheduleRuleStatQuery.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1753,
serialized_end=1849,
)
_CREATESCHEDULERULEREQUEST.fields_by_name['state'].enum_type = _SCHEDULERULESTATE
_CREATESCHEDULERULEREQUEST.fields_by_name['rule_type'].enum_type = _RULETYPE
_CREATESCHEDULERULEREQUEST.fields_by_name['rule'].message_type = _RULE
_CREATESCHEDULERULEREQUEST.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_UPDATESCHEDULERULEREQUEST.fields_by_name['state'].enum_type = _SCHEDULERULESTATE
_UPDATESCHEDULERULEREQUEST.fields_by_name['rule'].message_type = _RULE
_UPDATESCHEDULERULEREQUEST.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_SCHEDULERULEQUERY.fields_by_name['query'].message_type = spaceone_dot_api_dot_core_dot_v1_dot_query__pb2._QUERY
_SCHEDULERULEQUERY.fields_by_name['state'].enum_type = _SCHEDULERULESTATE
_RULE.oneofs_by_name['format'].fields.append(
_RULE.fields_by_name['day'])
_RULE.fields_by_name['day'].containing_oneof = _RULE.oneofs_by_name['format']
_RULE.oneofs_by_name['format'].fields.append(
_RULE.fields_by_name['date'])
_RULE.fields_by_name['date'].containing_oneof = _RULE.oneofs_by_name['format']
_RULEINFO.fields_by_name['state'].enum_type = _SCHEDULERULESTATE
_RULEINFO.fields_by_name['rule_type'].enum_type = _RULETYPE
_RULEINFO.fields_by_name['rule'].message_type = _RULE
_RULEINFO.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_RULESINFO.fields_by_name['results'].message_type = _RULEINFO
_SCHEDULERULESTATQUERY.fields_by_name['query'].message_type = spaceone_dot_api_dot_core_dot_v1_dot_query__pb2._STATISTICSQUERY
DESCRIPTOR.message_types_by_name['CreateScheduleRuleRequest'] = _CREATESCHEDULERULEREQUEST
DESCRIPTOR.message_types_by_name['UpdateScheduleRuleRequest'] = _UPDATESCHEDULERULEREQUEST
DESCRIPTOR.message_types_by_name['ScheduleRuleRequest'] = _SCHEDULERULEREQUEST
DESCRIPTOR.message_types_by_name['GetScheduleRuleRequest'] = _GETSCHEDULERULEREQUEST
DESCRIPTOR.message_types_by_name['ScheduleRuleQuery'] = _SCHEDULERULEQUERY
DESCRIPTOR.message_types_by_name['Rule'] = _RULE
DESCRIPTOR.message_types_by_name['RuleInfo'] = _RULEINFO
DESCRIPTOR.message_types_by_name['RulesInfo'] = _RULESINFO
DESCRIPTOR.message_types_by_name['ScheduleRuleStatQuery'] = _SCHEDULERULESTATQUERY
DESCRIPTOR.enum_types_by_name['ScheduleRuleState'] = _SCHEDULERULESTATE
DESCRIPTOR.enum_types_by_name['RuleType'] = _RULETYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreateScheduleRuleRequest = _reflection.GeneratedProtocolMessageType('CreateScheduleRuleRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATESCHEDULERULEREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.CreateScheduleRuleRequest)
})
_sym_db.RegisterMessage(CreateScheduleRuleRequest)
UpdateScheduleRuleRequest = _reflection.GeneratedProtocolMessageType('UpdateScheduleRuleRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATESCHEDULERULEREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.UpdateScheduleRuleRequest)
})
_sym_db.RegisterMessage(UpdateScheduleRuleRequest)
ScheduleRuleRequest = _reflection.GeneratedProtocolMessageType('ScheduleRuleRequest', (_message.Message,), {
'DESCRIPTOR' : _SCHEDULERULEREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.ScheduleRuleRequest)
})
_sym_db.RegisterMessage(ScheduleRuleRequest)
GetScheduleRuleRequest = _reflection.GeneratedProtocolMessageType('GetScheduleRuleRequest', (_message.Message,), {
'DESCRIPTOR' : _GETSCHEDULERULEREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.GetScheduleRuleRequest)
})
_sym_db.RegisterMessage(GetScheduleRuleRequest)
ScheduleRuleQuery = _reflection.GeneratedProtocolMessageType('ScheduleRuleQuery', (_message.Message,), {
'DESCRIPTOR' : _SCHEDULERULEQUERY,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.ScheduleRuleQuery)
})
_sym_db.RegisterMessage(ScheduleRuleQuery)
Rule = _reflection.GeneratedProtocolMessageType('Rule', (_message.Message,), {
'DESCRIPTOR' : _RULE,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.Rule)
})
_sym_db.RegisterMessage(Rule)
RuleInfo = _reflection.GeneratedProtocolMessageType('RuleInfo', (_message.Message,), {
'DESCRIPTOR' : _RULEINFO,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.RuleInfo)
})
_sym_db.RegisterMessage(RuleInfo)
RulesInfo = _reflection.GeneratedProtocolMessageType('RulesInfo', (_message.Message,), {
'DESCRIPTOR' : _RULESINFO,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.RulesInfo)
})
_sym_db.RegisterMessage(RulesInfo)
ScheduleRuleStatQuery = _reflection.GeneratedProtocolMessageType('ScheduleRuleStatQuery', (_message.Message,), {
'DESCRIPTOR' : _SCHEDULERULESTATQUERY,
'__module__' : 'spaceone.api.power_scheduler.v1.schedule_rule_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.ScheduleRuleStatQuery)
})
_sym_db.RegisterMessage(ScheduleRuleStatQuery)
_SCHEDULERULE = _descriptor.ServiceDescriptor(
name='ScheduleRule',
full_name='spaceone.api.power_scheduler.v1.ScheduleRule',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1977,
serialized_end=2979,
methods=[
_descriptor.MethodDescriptor(
name='create',
full_name='spaceone.api.power_scheduler.v1.ScheduleRule.create',
index=0,
containing_service=None,
input_type=_CREATESCHEDULERULEREQUEST,
output_type=_RULEINFO,
serialized_options=b'\202\323\344\223\002$\"\"/power-scheduler/v1/schedule-rules',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='update',
full_name='spaceone.api.power_scheduler.v1.ScheduleRule.update',
index=1,
containing_service=None,
input_type=_UPDATESCHEDULERULEREQUEST,
output_type=_RULEINFO,
serialized_options=b'\202\323\344\223\0026\0324/power-scheduler/v1/schedule-rule/{schedule_rule_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='delete',
full_name='spaceone.api.power_scheduler.v1.ScheduleRule.delete',
index=2,
containing_service=None,
input_type=_SCHEDULERULEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0026*4/power-scheduler/v1/schedule-rule/{schedule_rule_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='get',
full_name='spaceone.api.power_scheduler.v1.ScheduleRule.get',
index=3,
containing_service=None,
input_type=_GETSCHEDULERULEREQUEST,
output_type=_RULEINFO,
serialized_options=b'\202\323\344\223\0026\0224/power-scheduler/v1/schedule-rule/{schedule_rule_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='list',
full_name='spaceone.api.power_scheduler.v1.ScheduleRule.list',
index=4,
containing_service=None,
input_type=_SCHEDULERULEQUERY,
output_type=_RULESINFO,
serialized_options=b'\202\323\344\223\002Q\022\"/power-scheduler/v1/schedule-rulesZ+\")/power-scheduler/v1/schedule-rules/search',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='stat',
full_name='spaceone.api.power_scheduler.v1.ScheduleRule.stat',
index=5,
containing_service=None,
input_type=_SCHEDULERULESTATQUERY,
output_type=google_dot_protobuf_dot_struct__pb2._STRUCT,
serialized_options=b'\202\323\344\223\002)\"\'/power-scheduler/v1/schedule-rules/stat',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_SCHEDULERULE)
DESCRIPTOR.services_by_name['ScheduleRule'] = _SCHEDULERULE
# @@protoc_insertion_point(module_scope)
| 50.035928
| 4,517
| 0.769196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12,214
| 0.292341
|
c6b71b4c3e09976fe7726eb682b74cdf5af82966
| 990
|
py
|
Python
|
django_boost/admin/sites.py
|
toshiki-tosshi/django-boost
|
2431b743af2d976571d491ae232a5cb03c760b7e
|
[
"MIT"
] | null | null | null |
django_boost/admin/sites.py
|
toshiki-tosshi/django-boost
|
2431b743af2d976571d491ae232a5cb03c760b7e
|
[
"MIT"
] | null | null | null |
django_boost/admin/sites.py
|
toshiki-tosshi/django-boost
|
2431b743af2d976571d491ae232a5cb03c760b7e
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.db.models import Model
__all__ = ["register_all"]
def register_all(models, admin_class=admin.ModelAdmin):
"""
Easily register Models to Django admin site.
::
from yourapp import models
from django_boost.admin.sites import register_all
register_all(models)
Register all models defined in `models.py` in Django admin site.
Custom admin classes are also available.
::
from your_app import models
from your_app import admin
from django_boost.admin.sites import register_all
register_all(models, admin_class=admin.CustomAdmin)
"""
for attr in dir(models):
attr = getattr(models, attr, None)
if isinstance(attr, type):
if issubclass(attr, Model) and not attr._meta.abstract:
try:
admin.site.register(attr, admin_class)
except admin.sites.AlreadyRegistered:
pass
| 26.052632
| 70
| 0.651515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 511
| 0.516162
|
c6b79f701bcc0df19eeeaf217d68d4ce14a63d1a
| 251
|
py
|
Python
|
bot.py
|
White-ZacK/HLavalink
|
917a2a5abf3df2b2fbdff93709b9eb9e47c033aa
|
[
"MIT"
] | null | null | null |
bot.py
|
White-ZacK/HLavalink
|
917a2a5abf3df2b2fbdff93709b9eb9e47c033aa
|
[
"MIT"
] | null | null | null |
bot.py
|
White-ZacK/HLavalink
|
917a2a5abf3df2b2fbdff93709b9eb9e47c033aa
|
[
"MIT"
] | null | null | null |
import discord
import os
from discord.ext import commands
bot = commands.Bot(command_prefix=">")
TOKEN = os.environ.get('TOKEN')
@bot.event
async def on_ready():
print(f'{bot.user} has logged in.')
bot.load_extension('cogs.WVL')
bot.run(TOKEN)
| 17.928571
| 38
| 0.7251
| 0
| 0
| 0
| 0
| 103
| 0.410359
| 92
| 0.366534
| 48
| 0.191235
|
c6b7b42e398b5ad8a87b392745a1b79c63f44e1e
| 1,612
|
py
|
Python
|
sdk/ml/azure-ai-ml/tests/batch_services/unittests/test_batch_deployment_schema.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/ml/azure-ai-ml/tests/batch_services/unittests/test_batch_deployment_schema.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/ml/azure-ai-ml/tests/batch_services/unittests/test_batch_deployment_schema.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import pytest
import yaml
from azure.ai.ml._schema._deployment.batch.batch_deployment import BatchDeploymentSchema
from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, BatchDeploymentOutputAction
from azure.ai.ml.entities._util import load_from_dict
from azure.ai.ml.entities import BatchDeployment
def load_batch_deployment_entity_from_yaml(path: str, context={}) -> BatchDeployment:
"""batch deployment yaml -> batch deployment entity"""
with open(path, "r") as f:
cfg = yaml.safe_load(f)
context.update({BASE_PATH_CONTEXT_KEY: Path(path).parent})
deployment = load_from_dict(BatchDeploymentSchema, cfg, context)
return deployment
@pytest.mark.unittest
class TestBatchDeploymentSchema:
def test_serialize_batch_deployment(self) -> None:
test_path = "./tests/test_configs/deployments/batch/batch_deployment_1.yaml"
batch_deployment_entity = load_batch_deployment_entity_from_yaml(test_path)
assert batch_deployment_entity
assert batch_deployment_entity.environment == "AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:1"
assert batch_deployment_entity.compute == "cpu-cluster"
assert batch_deployment_entity.output_action == BatchDeploymentOutputAction.APPEND_ROW
assert batch_deployment_entity.output_file_name == "append_row.txt"
assert batch_deployment_entity.error_threshold == 10
assert batch_deployment_entity.mini_batch_size == 5
assert batch_deployment_entity.max_concurrency_per_instance == 5
assert batch_deployment_entity.resources.instance_count == 2
| 46.057143
| 99
| 0.782258
| 894
| 0.554591
| 0
| 0
| 916
| 0.568238
| 0
| 0
| 195
| 0.120968
|
c6b8d428a66aa6d3e2e0df39f78679dd2657686d
| 105
|
py
|
Python
|
Programs/functions/returnFunction.py
|
LuciKritZ/python
|
ed5500f5aad3cb15354ca5ebf71748029fc6ae77
|
[
"MIT"
] | null | null | null |
Programs/functions/returnFunction.py
|
LuciKritZ/python
|
ed5500f5aad3cb15354ca5ebf71748029fc6ae77
|
[
"MIT"
] | null | null | null |
Programs/functions/returnFunction.py
|
LuciKritZ/python
|
ed5500f5aad3cb15354ca5ebf71748029fc6ae77
|
[
"MIT"
] | null | null | null |
def display():
def message():
return "Hello"
return message
fun = display()
print(fun())
| 15
| 22
| 0.590476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.066667
|
c6badd66c9c53436c0cfcf31174d258e7727a76d
| 795
|
py
|
Python
|
test.py
|
Roulbac/GanSeg
|
78f354da5d724b93ead3ac6c2b15ae18d3ac0aea
|
[
"MIT"
] | 20
|
2019-04-13T07:07:49.000Z
|
2022-02-23T03:10:40.000Z
|
test.py
|
Roulbac/GanSeg
|
78f354da5d724b93ead3ac6c2b15ae18d3ac0aea
|
[
"MIT"
] | null | null | null |
test.py
|
Roulbac/GanSeg
|
78f354da5d724b93ead3ac6c2b15ae18d3ac0aea
|
[
"MIT"
] | 4
|
2019-04-13T13:50:39.000Z
|
2020-11-08T03:50:54.000Z
|
from options.test_parser import TestParser
from models import create_model, get_model_parsing_modifier
from datasets import create_dataset, get_dataset_parsing_modifier
parser = TestParser()
model_name = parser.get_model_name()
dataset_name = parser.get_dataset_name()
print('Model name: {}'.format(model_name))
print('Dataset name: {}'.format(dataset_name))
model_parser_modifier = get_model_parsing_modifier(model_name)
model_parser_modifier(parser, is_train=False)
dataset_parser_modifier = get_dataset_parsing_modifier(dataset_name)
dataset_parser_modifier(parser, is_train=False)
opts, _ = parser.parse_options()
opts_str = parser.make_opts_string(opts, verbose=True)
model = create_model(opts)
dataset = create_dataset(opts)
if opts.eval:
model.set_eval()
model.test(dataset)
| 27.413793
| 68
| 0.820126
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.042767
|
c6bbabf1d22c4d30fab6e968dbe23f93d2189af5
| 67
|
py
|
Python
|
codes/course1/demo3_3.py
|
BigShuang/big-shuang-python-introductory-course
|
c4fd1343c4c539567180072c749b68bda7c28075
|
[
"MIT"
] | null | null | null |
codes/course1/demo3_3.py
|
BigShuang/big-shuang-python-introductory-course
|
c4fd1343c4c539567180072c749b68bda7c28075
|
[
"MIT"
] | null | null | null |
codes/course1/demo3_3.py
|
BigShuang/big-shuang-python-introductory-course
|
c4fd1343c4c539567180072c749b68bda7c28075
|
[
"MIT"
] | null | null | null |
for i in range(11):
v = 2 ** i
print("2^%s = %s" % (i, v))
| 16.75
| 31
| 0.38806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.164179
|
c6bbf866443aff7a6fcd220b4ae5ee2ac61f6a5c
| 353
|
py
|
Python
|
2018-12-31.py
|
shangpf1/python_study
|
6730519ce7b5cf4612e1c778ae5876cfbb748a4f
|
[
"MIT"
] | null | null | null |
2018-12-31.py
|
shangpf1/python_study
|
6730519ce7b5cf4612e1c778ae5876cfbb748a4f
|
[
"MIT"
] | null | null | null |
2018-12-31.py
|
shangpf1/python_study
|
6730519ce7b5cf4612e1c778ae5876cfbb748a4f
|
[
"MIT"
] | null | null | null |
# 浏览器最大化窗口、截屏
from selenium import webdriver
from os import path
driver = webdriver.Chrome()
d = path.dirname('__file__')
index = path.join(d,'index.png')
driver.get("https://www.baidu.com/")
# 最大化窗口
driver.maximize_window()
# 截屏
driver.save_screenshot(index)
# 后退操作
driver.back()
# 前进操作
driver.forward()
# 刷新操作
driver.refresh()
driver.quit()
| 12.172414
| 36
| 0.716714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 147
| 0.355932
|
c6bcdd4e1b6e9560584746d256ad5769eed1114e
| 4,016
|
py
|
Python
|
flask_webapi/exceptions.py
|
viniciuschiele/flask-webapi
|
4901c0b78fc61b8db18c211c5858b84901d0f4ab
|
[
"MIT"
] | null | null | null |
flask_webapi/exceptions.py
|
viniciuschiele/flask-webapi
|
4901c0b78fc61b8db18c211c5858b84901d0f4ab
|
[
"MIT"
] | null | null | null |
flask_webapi/exceptions.py
|
viniciuschiele/flask-webapi
|
4901c0b78fc61b8db18c211c5858b84901d0f4ab
|
[
"MIT"
] | null | null | null |
"""
Handles exceptions raised by Flask WebAPI.
"""
from . import status
class APIException(Exception):
"""
Base class for Flask WebAPI exceptions.
Subclasses should provide `.status_code` and `.default_message` properties.
:param str message: The actual message.
:param kwargs: The extra attributes.
"""
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_message = 'A server error occurred.'
def __init__(self, message=None, **kwargs):
if message is not None:
self.message = str(message)
else:
self.message = str(self.default_message)
self.kwargs = kwargs
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return str(self.message)
def denormalize(self, message_key_name='message', field_key_name='field'):
"""
Turns all `APIException` instances into `dict` and
returns a unique level of errors.
:param message_key_name: The key name used for the message item.
:param field_key_name: The key name used for the field item.
:return: A list of errors.
"""
errors = []
self._denormalize(errors, self, message_key_name=message_key_name, field_key_name=field_key_name)
return errors
def _denormalize(self, errors, message, field=None, message_key_name='message', field_key_name='field'):
kwargs = None
if isinstance(message, APIException):
kwargs = message.kwargs
message = message.message
if isinstance(message, dict):
for f, messages in message.items():
f = field + '.' + f if field else f
self._denormalize(errors, messages, f, message_key_name, field_key_name)
elif isinstance(message, list):
for message in message:
self._denormalize(errors, message, field, message_key_name, field_key_name)
else:
data = {message_key_name: message}
if kwargs:
data.update(kwargs)
if field:
data.update({field_key_name: field})
errors.append(data)
return errors
class ValidationError(APIException):
status_code = status.HTTP_400_BAD_REQUEST
def __init__(self, message, **kwargs):
# if `message` is a dict the key is
# the name of the field and the value is
# actual message.
if isinstance(message, dict):
result = {}
for field, messages in message.items():
if not isinstance(messages, ValidationError):
messages = ValidationError(messages)
if isinstance(messages.message, str):
result[field] = [messages]
else:
result[field] = messages.message
self.message = result
self.kwargs = {}
elif isinstance(message, list):
result = []
for msg in message:
if not isinstance(msg, ValidationError):
if isinstance(msg, dict):
msg = ValidationError(**msg)
else:
msg = ValidationError(msg)
result.append(msg)
if len(result) == 1:
self.message = result[0].message
self.kwargs = result[0].kwargs
else:
self.message = result
self.kwargs = {}
else:
self.message = str(message)
self.kwargs = kwargs
class UnsupportedMediaType(Exception):
default_message = 'Unsupported media type "{mimetype}" in request.'
def __init__(self, mimetype, message=None):
if message is None:
message = self.default_message.format(mimetype=mimetype)
self.message = message
| 30.656489
| 108
| 0.586404
| 3,934
| 0.979582
| 0
| 0
| 0
| 0
| 0
| 0
| 765
| 0.190488
|
c6bec2b7b19f2adc7fd34bc6ce05b27edb1743ba
| 5,133
|
py
|
Python
|
plugins/module_utils/fortiwebcloud/request.py
|
fortinet/fortiwebcloud-ansible
|
4a6a2b139b88d6428494ca87d570a0a09988b15d
|
[
"MIT"
] | 5
|
2021-01-09T23:09:22.000Z
|
2022-01-22T12:34:25.000Z
|
plugins/module_utils/fortiwebcloud/request.py
|
fortinet/fortiwebcloud-ansible
|
4a6a2b139b88d6428494ca87d570a0a09988b15d
|
[
"MIT"
] | 2
|
2021-01-19T03:46:53.000Z
|
2021-06-28T15:19:24.000Z
|
plugins/module_utils/fortiwebcloud/request.py
|
fortinet/fortiwebcloud-ansible
|
4a6a2b139b88d6428494ca87d570a0a09988b15d
|
[
"MIT"
] | 2
|
2021-09-17T11:13:31.000Z
|
2021-11-30T10:53:49.000Z
|
#!/usr/bin/python
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2020 Fortinet, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import json
import time
import threading
import urllib.parse
from ansible.plugins.httpapi import HttpApiBase
from ansible.module_utils.basic import to_text
from ansible.module_utils.six.moves import urllib
from ansible_collections.fortinet.fortiwebcloud.plugins.module_utils.fortiwebcloud.settings import (API_VER, DOMAIN)
# Global FWB REST connection session
class RequestBase(object):
def __init__(self, method='GET', path="", query='', data={}, files=None, handler=None, timeout=60, **kargs):
self.method = method
self.data = data
self.files = files
self.timeout = timeout
if type(query) == "string":
self.query = query
else:
self.query = urllib.parse.urlencode(query)
self.api_ver = API_VER
self.domain = DOMAIN
self.path = path
self.url = self._set_url()
self.headers = dict()
self.set_headers('Content-Type', 'application/json')
self.set_headers('Accept', 'text/plain')
self.handler = handler
@staticmethod
def _format_path(path):
return '/'.join([seg for seg in path.split('/') if len(seg)])
def _set_url(self):
ulist = []
ulist.append(self.api_ver)
ulist.append(self.path)
url = "/".join(ulist)
if self.query:
query_str = self.query if self.query.startswith('?') else '?' + self.query
url = url + query_str
return "/" + url
def set_headers(self, key, value):
self.headers[key] = value
def validate(self):
"""
Validate the setup of rest api
"""
if not self.method in ('GET', 'POST', 'PUT', 'DELETE'):
raise Exception("REST API method %s not supported." % self.method)
def get(self, data={}):
status, res = self.handler.send_req(self.url, headers=self.headers, method="GET")
return res
def delete(self, data={}):
status, res = self.handler.send_req(self.url, headers=self.headers, method="DELETE")
return res
def put(self, data={}, files=None):
status, res = self.handler.send_req(
self.url, headers=self.headers,
data=json.dumps(data), files=files, method="PUT")
return res
def post(self, data={}):
_, res = self.handler.send_req(
self.url, headers=self.headers,
data=json.dumps(data), method="POST")
return res
def send(self, data=None, files=None):
"""
Send rest api, and wait its return.
"""
self.validate()
try:
ts = time.time()
method_val = getattr(self, self.method.lower(), self.get)
d = data or self.data
print(f"send data {d}")
f = files or self.files
print(f"send files {f}")
if f:
response = method_val(data=d, files=f)
else:
response = method_val(data=d)
try:
response = json.loads(response)
except Exception as e:
raise Exception(f"Get response json content failed for {e}.")
duration = time.time() - ts
print(f"URL:{self.url}, method:{self.method} finished, duration:{duration}.")
return response
except Exception as e:
raise Exception("Failed to connect to %s: %s." % (self.url, e))
| 36.664286
| 116
| 0.643678
| 3,064
| 0.596922
| 0
| 0
| 111
| 0.021625
| 0
| 0
| 2,150
| 0.418858
|
c6bf070a0e1401995e4a06960552d64f43d04d96
| 497
|
py
|
Python
|
tests/test_account.py
|
thangduong/lendingclub2
|
b16552807b69b81804369fd1a9058fa8f89ce1ef
|
[
"MIT"
] | null | null | null |
tests/test_account.py
|
thangduong/lendingclub2
|
b16552807b69b81804369fd1a9058fa8f89ce1ef
|
[
"MIT"
] | null | null | null |
tests/test_account.py
|
thangduong/lendingclub2
|
b16552807b69b81804369fd1a9058fa8f89ce1ef
|
[
"MIT"
] | null | null | null |
# Filename: test_account.py
"""
Test the lendingclub2.accountmodule
"""
# PyTest
import pytest
# lendingclub2
from lendingclub2.account import InvestorAccount
from lendingclub2.error import LCError
class TestInvestorAccount:
def test_properties(self):
try:
investor = InvestorAccount()
except LCError:
pytest.skip("skip because cannot find account ID")
assert investor.available_balance >= 0.0
assert investor.total_balance >= 0.0
| 20.708333
| 62
| 0.702213
| 293
| 0.589537
| 0
| 0
| 0
| 0
| 0
| 0
| 129
| 0.259557
|
c6c065b4b597b12187960a9bbab5ef9b81fb5b2a
| 3,318
|
py
|
Python
|
src/webserver.py
|
sadjadeb/qrCode_ticket
|
36e5762e6fcb77315385922bb4568f2e0b67888c
|
[
"MIT"
] | 10
|
2021-12-25T16:58:45.000Z
|
2022-03-21T02:25:10.000Z
|
src/webserver.py
|
sadjadeb/qrCode_ticket
|
36e5762e6fcb77315385922bb4568f2e0b67888c
|
[
"MIT"
] | 2
|
2021-12-31T10:48:57.000Z
|
2022-01-01T12:05:02.000Z
|
src/webserver.py
|
sadjadeb/qrCode_ticket
|
36e5762e6fcb77315385922bb4568f2e0b67888c
|
[
"MIT"
] | null | null | null |
from config import *
from excel_handler import get_users_from_excel
from fastapi import FastAPI, HTTPException, status, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from typing import Optional
import pathlib
import uvicorn
app = FastAPI()
templates_path = pathlib.Path(__file__).parent.resolve().parent.resolve() / 'templates'
templates = Jinja2Templates(directory=templates_path)
app.mount("/static", StaticFiles(directory=templates_path / 'static'), name="static")
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
users = get_users_from_excel(OUTPUT_FILE_PATH)
users_entrance = {}
def has_permission(password: str):
if password is None:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Password required")
if password != ADMIN_PASSWORD:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Incorrect password")
else:
return True
@app.get("/", response_class=HTMLResponse)
async def root(request: Request):
return templates.TemplateResponse("index.html", {"request": request,
"page_title": PAGE_TITLE})
@app.get("/ticket/{ticket_id}", response_class=HTMLResponse)
async def ticket(request: Request, ticket_id: int):
return templates.TemplateResponse("ticket.html", {"request": request,
"ticket_id": ticket_id,
"base_url": DOMAIN_NAME,
"page_title": PAGE_TITLE,
"event_name": EVENT_NAME})
@app.get("/api/ticket/all")
async def read_all_items(password: Optional[str] = None):
if has_permission(password):
return users_entrance, users
@app.get("/api/ticket/{ticket_id}")
async def read_items(ticket_id: int):
for user in users:
if user['ticket_id'] == ticket_id:
return user
raise HTTPException(status_code=404, detail="User Not Found")
@app.get("/reception", response_class=HTMLResponse)
async def reception_page(request: Request):
return templates.TemplateResponse("reception.html", {"request": request,
"base_url": DOMAIN_NAME,
"page_title": PAGE_TITLE})
@app.get("/api/reception/{ticket_id}")
async def verify_ticket(ticket_id: int, password: Optional[str] = None):
if has_permission(password):
for user in users:
if user['ticket_id'] == ticket_id:
if ticket_id in users_entrance:
users_entrance[ticket_id] += 1
else:
users_entrance[ticket_id] = 1
return {
'first_name': user['first_name'],
'last_name': user['last_name'],
'entrance_count': users_entrance[ticket_id]
}
def run_server():
uvicorn.run(app, host="127.0.0.1", port=8000)
| 34.926316
| 98
| 0.61965
| 0
| 0
| 0
| 0
| 2,058
| 0.620253
| 1,799
| 0.542194
| 448
| 0.135021
|
c6c0d208582ba1aed39ac80bef8ef9d7e28b0eae
| 337
|
py
|
Python
|
main/test/test.py
|
gitter-badger/grow-controller-Rpi
|
0107251af85a4dc23b61b8be66fe49d597fd776b
|
[
"Unlicense"
] | 3
|
2017-03-21T22:35:01.000Z
|
2021-08-19T03:16:39.000Z
|
main/test/test.py
|
gitter-badger/grow-controller-Rpi
|
0107251af85a4dc23b61b8be66fe49d597fd776b
|
[
"Unlicense"
] | null | null | null |
main/test/test.py
|
gitter-badger/grow-controller-Rpi
|
0107251af85a4dc23b61b8be66fe49d597fd776b
|
[
"Unlicense"
] | null | null | null |
###PiPlate buttons
print('sudo crontab /home/pi/grow-controller-Rpi/main/ref/crontab.cron')
'''
while True:
time.sleep(0.5) # without this time.sleep, 23% cpu usage. with 3%
if lcd.is_pressed(LCD.UP):
GPIO.output(pin1, GPIO.LOW) # on
if lcd.is_pressed(LCD.DOWN):
GPIO.output(pin1, GPIO.HIGH) # off
'''
| 22.466667
| 72
| 0.64095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 323
| 0.958457
|
c6c2c18414f129c6c748df8b453b0adccb5dbf36
| 2,010
|
py
|
Python
|
generate.py
|
xphip/feather
|
18a9c88bdb545e4f33a35e0e771b07d8c5c8c56e
|
[
"MIT"
] | null | null | null |
generate.py
|
xphip/feather
|
18a9c88bdb545e4f33a35e0e771b07d8c5c8c56e
|
[
"MIT"
] | null | null | null |
generate.py
|
xphip/feather
|
18a9c88bdb545e4f33a35e0e771b07d8c5c8c56e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
import os
import subprocess
import sys
def CSSMaker():
output = 'css/icons_list.css'
css = ''
for icon in os.listdir('./icons'):
[name, ext] = icon.split('.')
css += 'i[data-feather="{}"]:after {{ background-image: url(../icons/{}); }}\n'.format(name, icon);
f = open(output, 'w+')
f.write(css)
f.close()
print 'Output: ' + output
def CSSUriMaker():
output = 'css/icons_list_emb.css'
css = ''
for icon in os.listdir('./icons'):
[name, ext] = icon.split('.')
i = SVGMinify(icon, './icons', './icons-min')
i = 'data:image/svg+xml;utf8,' + i
css += """i[data-feather="{}"]:after {{ background-image: url('{}'); }}\n""".format(name, i);
f = open(output, 'w+')
f.write(css)
f.close()
def SVGMinify(icon, icons_dir, icons_min_dir):
# icons_dir = './icons'
# icons_min_dir = './icons-min'
[name, ext] = icon.split('.')
icon_min = "{}/{}.min.{}".format(icons_min_dir, name, ext)
i = "{}/{}.{}".format(icons_dir, name, ext)
if os.path.exists(icon_min) == 0:
subprocess.call(['svgo', '-q', i, '-o', icon_min])
f = open(icon_min)
i = f.read()
f.close()
return i
# # OLD
# def SVGMinify():
# icons_dir = './icons'
# icons_min_dir = './icons-min'
# total_icons = 0
# for icon in os.listdir(icons_dir):
# [name, ext] = icon.split('.')
# icon_min = "{}/{}.min.{}".format(icons_min_dir, name, ext)
# i = "{}/{}.{}".format(icons_dir, name, ext)
# if os.path.exists(icon_min) == 0:
# subprocess.call(['svgo', '-q', i, '-o', icon_min])
# total_icons += 1
# if total_icons > 0:
# print 'Total icons minified: ' + str(total_icons)
def Usage():
print """
[USAGE]
./generate.py [OPTIONS]
[OPTIONS]
css Gera o arquivo CSS conforme os ./icones.
cssuri Gera o arquivo CSS conforme os ./icones, inclusos no arquivo de estilo.
"""
pass
if __name__ == "__main__":
if len(sys.argv) > 1:
if sys.argv[1] == "css":
CSSMaker()
print 'Done!'
elif sys.argv[1] == "cssuri":
CSSUriMaker()
print 'Done!'
else:
Usage()
| 21.612903
| 101
| 0.593035
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,079
| 0.536816
|
c6c3cf7f18578ef4fee0cf3ceb347dcb151e1993
| 3,827
|
py
|
Python
|
Lib/corpuscrawler/crawl_pl.py
|
cash/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 95
|
2019-06-13T23:34:21.000Z
|
2022-03-12T05:22:49.000Z
|
Lib/corpuscrawler/crawl_pl.py
|
sahwar/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 31
|
2019-06-02T18:56:53.000Z
|
2021-08-10T20:16:02.000Z
|
Lib/corpuscrawler/crawl_pl.py
|
sahwar/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 35
|
2019-06-18T08:26:24.000Z
|
2022-01-11T13:59:40.000Z
|
# coding: utf-8
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
import re
from corpuscrawler.util import (
crawl_deutsche_welle, crawl_udhr, extract, cleantext, clean_paragraphs, urlpath
)
def crawl(crawler):
out = crawler.get_output(language='pl')
crawl_udhr(crawler, out, filename='udhr_pol.txt')
crawl_deutsche_welle(crawler, out, prefix='/pl/')
crawl_pl_usembassy_gov(crawler, out)
def _pl_usembassy_gov_path(url):
if not urlpath(url).startswith('/pl/'):
return False
else:
if urlpath(url) == '/pl/':
return False
elif urlpath(url).startswith('/pl/category/'):
return False
elif urlpath(url).startswith('/pl/tag/'):
return False
else:
return True
def crawl_pl_usembassy_gov(crawler, out):
sitemap = crawler.fetch_sitemap('https://pl.usembassy.gov/sitemap_index.xml')
trans_regex = re.compile(
r'<h3>Tłumaczenie</h3><div class="translations_sidebar"><ul><li><a href ?="([^"]*)"'
)
pubdate_regex = re.compile(
r'<meta property="article:published_time" content="([^"]*)"'
)
links = set()
for key in sorted(sitemap.keys()):
if _pl_usembassy_gov_path(key):
links.add(key)
for link in sorted(links):
result = crawler.fetch(link)
if result.status != 200:
continue
html = result.content.decode('utf-8')
title = extract('<title>', '</title>', html)
title = title if title else ''
title = title.split(' | ')[0] if ' | ' in title else title
pubdate_match = pubdate_regex.search(html)
pubdate = pubdate_match.group(1) if pubdate_match else None
trans_match = trans_regex.search(html)
trans = trans_match.group(1) if trans_match else None
if pubdate is None: pubdate = result.headers.get('Last-Modified')
if pubdate is None: pubdate = sitemap[link]
exstart = '<div class="entry-content">'
exstart2 = '<div class="mo-page-content">'
exend = '<!-- AddThis Advanced Settings above via filter on the_content -->'
exstart = exstart2 if exstart2 in html else exstart
content = extract(exstart, exend, html)
cleanparas = clean_paragraphs(content) if content else None
# Don't repeat the title if it's the only text content
cleantitle = cleantext(title)
if cleanparas:
if len(cleanparas) == 1 and cleanparas[0] == cleantitle:
paras = [cleantitle]
else:
paras = [cleantitle] + cleanparas
else:
paras = [cleantitle]
# There are quite a few media pages whose only text is the filename
# this, conveniently, is typically also the post's name
if len(paras) == 1 and paras[0].lower() in urlpath(link).lower():
continue
if paras:
out.write('# Location: %s\n' % link)
out.write('# Genre: Diplomatic\n')
if trans:
out.write('# Translation: %s\n' % trans)
if pubdate:
out.write('# Publication-Date: %s\n' % pubdate)
out.write('\n'.join(paras) + '\n')
| 40.284211
| 92
| 0.629736
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,303
| 0.340298
|
c6c507d5077fa7072a210afdf6ced8586dc0a30d
| 2,775
|
py
|
Python
|
typewise_alert.py
|
clean-code-craft-tcq-1/add-variety-python-AkshayUHegde
|
924beb7195d960d3fe06460da9df1a42c5d5693f
|
[
"MIT"
] | null | null | null |
typewise_alert.py
|
clean-code-craft-tcq-1/add-variety-python-AkshayUHegde
|
924beb7195d960d3fe06460da9df1a42c5d5693f
|
[
"MIT"
] | null | null | null |
typewise_alert.py
|
clean-code-craft-tcq-1/add-variety-python-AkshayUHegde
|
924beb7195d960d3fe06460da9df1a42c5d5693f
|
[
"MIT"
] | null | null | null |
class TypewiseAlert:
def __init__(self, limits_for_types=None, alert_target_funcs=None):
self.default_limits_for_cooling_types = {
"PASSIVE_COOLING": (0, 35),
"MED_ACTIVE_COOLING": (0, 40),
"HI_ACTIVE_COOLING": (0, 45),
}
self.default_alert_funcs = {
'TO_CONTROLLER': self.send_controller_message,
'TO_EMAIL': self.send_email
}
self.alert_mail_details = {
"TOO_LOW": {
"recipient": "low_temperature_breach_expert@bosch.com",
"email_message": "The temperature has dropped beyond lower breach limits. "
"Please take corrective action immediately."
},
"TOO_HIGH": {
"recipient": "high_temperature_breach_expert@bosch.com",
"email_message": "The temperature has dropped beyond upper breach limits. "
"Please take corrective action immediately."
},
"NORMAL": {
"recipient": "monitoring_team@bosch.com",
"email_message": "The temperature is OK."
},
}
self.default_controller_header = 0xfeed
self.limits_for_types = [limits_for_types if limits_for_types is not None
else self.default_limits_for_cooling_types][0]
self.alert_target_funcs = [alert_target_funcs if alert_target_funcs is not None
else self.default_alert_funcs][0]
def send_controller_message(self, breach_type):
print(f'{self.default_controller_header}, {breach_type}')
return f"CONTROLLER_MESSAGE,{breach_type}"
def send_email(self, breach_type):
recipients = self.alert_mail_details[breach_type]['recipient']
email_message = self.alert_mail_details[breach_type]['email_message']
email_content = f"To,\n{recipients}\n \t{email_message}"
print(email_content)
return f"EMAIL,{breach_type}"
def infer_breach(self, value, lower_limit, upper_limit):
if value < lower_limit:
return 'TOO_LOW'
if value > upper_limit:
return 'TOO_HIGH'
return 'NORMAL'
def classify_temperature_breach(self, cooling_type, temperature_in_c):
lower_limit, upper_limit = self.limits_for_types[cooling_type]
return self.infer_breach(temperature_in_c, lower_limit, upper_limit)
def check_and_alert(self, alert_target, battery_characteristic, temperature_in_c):
breach_type = \
self.classify_temperature_breach(battery_characteristic['coolingType'], temperature_in_c)
return self.alert_target_funcs[alert_target](breach_type)
| 45.491803
| 101
| 0.627027
| 2,773
| 0.999279
| 0
| 0
| 0
| 0
| 0
| 0
| 737
| 0.265586
|
c6c6489454579f788af2d644a9acb3a3264844fe
| 1,245
|
py
|
Python
|
froide/publicbody/search_indexes.py
|
rufuspollock/froide
|
8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4
|
[
"MIT"
] | null | null | null |
froide/publicbody/search_indexes.py
|
rufuspollock/froide
|
8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4
|
[
"MIT"
] | null | null | null |
froide/publicbody/search_indexes.py
|
rufuspollock/froide
|
8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from haystack import indexes
from celery_haystack.indexes import CelerySearchIndex
from .models import PublicBody
PUBLIC_BODY_BOOSTS = settings.FROIDE_CONFIG.get("public_body_boosts", {})
class PublicBodyIndex(CelerySearchIndex, indexes.Indexable):
text = indexes.EdgeNgramField(document=True, use_template=True)
name = indexes.CharField(model_attr='name', boost=1.5)
jurisdiction = indexes.CharField(model_attr='jurisdiction__name', default='')
topic_auto = indexes.EdgeNgramField(model_attr='topic_name')
topic_slug = indexes.CharField(model_attr='topic__slug')
name_auto = indexes.EdgeNgramField(model_attr='name')
url = indexes.CharField(model_attr='get_absolute_url')
def get_model(self):
return PublicBody
def index_queryset(self, **kwargs):
"""Used when the entire index for model is updated."""
return self.get_model().objects.get_for_search_index()
def prepare(self, obj):
data = super(PublicBodyIndex, self).prepare(obj)
if obj.classification in PUBLIC_BODY_BOOSTS:
data['boost'] = PUBLIC_BODY_BOOSTS[obj.classification]
print "Boosting %s at %f" % (obj, data['boost'])
return data
| 37.727273
| 81
| 0.727711
| 1,018
| 0.817671
| 0
| 0
| 0
| 0
| 0
| 0
| 184
| 0.147791
|
c6c9aa4e57c89e6f69fa55d265d499cc88ae995f
| 1,519
|
py
|
Python
|
4_factory/factory_method/dependent_pizza_store.py
|
hypersport/Head-First-Design-Patterns-Python
|
0c8b831ae89ebbbef8b203b96508deb7e3063590
|
[
"MIT"
] | null | null | null |
4_factory/factory_method/dependent_pizza_store.py
|
hypersport/Head-First-Design-Patterns-Python
|
0c8b831ae89ebbbef8b203b96508deb7e3063590
|
[
"MIT"
] | null | null | null |
4_factory/factory_method/dependent_pizza_store.py
|
hypersport/Head-First-Design-Patterns-Python
|
0c8b831ae89ebbbef8b203b96508deb7e3063590
|
[
"MIT"
] | null | null | null |
from chicago_style_clam_pizza import ChicagoStyleClamPizza
from chicago_style_cheese_pizza import ChicagoStyleCheesePizza
from chicago_style_pepperoni_pizza import ChicagoStylePepperoniPizza
from chicago_style_veggie_pizza import ChicagoStyleVeggiePizza
from ny_style_clam_pizza import NYStyleClamPizza
from ny_style_cheese_pizza import NYStyleCheesePizza
from ny_style_pepperoni_pizza import NYStylePepperoniPizza
from ny_style_veggie_pizza import NYStyleVeggiePizza
class DependentPizzaStore:
pizza = None
def create_pizza(self, style: str, t: str):
if style == 'NY':
if t == 'cheese':
self.pizza = NYStyleCheesePizza()
elif t == 'pepperoni':
self.pizza = NYStylePepperoniPizza()
elif t == 'clam':
self.pizza = NYStyleClamPizza()
elif t == 'veggie':
self.pizza = NYStyleVeggiePizza()
elif style == 'Chicago':
if t == 'cheese':
self.pizza = ChicagoStyleCheesePizza()
elif t == 'pepperoni':
self.pizza = ChicagoStylePepperoniPizza()
elif t == 'clam':
self.pizza = ChicagoStyleClamPizza()
elif t == 'veggie':
self.pizza = ChicagoStyleVeggiePizza()
else:
print('Error: invalid type of pizza')
return None
self.pizza.prepare()
self.pizza.bake()
self.pizza.cut()
self.pizza.box()
return self.pizza
| 35.325581
| 68
| 0.631336
| 1,048
| 0.689928
| 0
| 0
| 0
| 0
| 0
| 0
| 109
| 0.071758
|
c6cac8b3c9901ec09333ce8b40056a0c6f21d27c
| 459
|
py
|
Python
|
tests/performance/cte-arm/tests/rf_mnist.py
|
alexbarcelo/dislib
|
989f81f235ae30b17410a8d805df258c7d931b38
|
[
"Apache-2.0"
] | 36
|
2018-10-22T19:21:14.000Z
|
2022-03-22T12:10:01.000Z
|
tests/performance/cte-arm/tests/rf_mnist.py
|
alexbarcelo/dislib
|
989f81f235ae30b17410a8d805df258c7d931b38
|
[
"Apache-2.0"
] | 329
|
2018-11-22T18:04:57.000Z
|
2022-03-18T01:26:55.000Z
|
tests/performance/cte-arm/tests/rf_mnist.py
|
alexbarcelo/dislib
|
989f81f235ae30b17410a8d805df258c7d931b38
|
[
"Apache-2.0"
] | 21
|
2019-01-10T11:46:39.000Z
|
2022-03-17T12:59:45.000Z
|
import performance
import dislib as ds
from dislib.classification import RandomForestClassifier
def main():
x_mn, y_mn = ds.load_svmlight_file(
"/fefs/scratch/bsc19/bsc19029/PERFORMANCE/datasets/train.scaled",
block_size=(5000, 780), n_features=780, store_sparse=False)
rf = RandomForestClassifier(n_estimators=100, distr_depth=2)
performance.measure("RF", "mnist", rf.fit, x_mn, y_mn)
if __name__ == "__main__":
main()
| 24.157895
| 73
| 0.723312
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.185185
|
c6cc6e291c2d423fccb4a28cc69ba02ced719b37
| 186
|
py
|
Python
|
Python_Files/murach/solutions/ch14/movies/objects.py
|
Interloper2448/BCGPortfolio
|
c4c160a835c64c8d099d44c0995197f806ccc824
|
[
"MIT"
] | null | null | null |
Python_Files/murach/solutions/ch14/movies/objects.py
|
Interloper2448/BCGPortfolio
|
c4c160a835c64c8d099d44c0995197f806ccc824
|
[
"MIT"
] | null | null | null |
Python_Files/murach/solutions/ch14/movies/objects.py
|
Interloper2448/BCGPortfolio
|
c4c160a835c64c8d099d44c0995197f806ccc824
|
[
"MIT"
] | null | null | null |
class Movie:
def __init__(self, name="", year=1901):
self.name = name
self.year = year
def getStr(self):
return self.name + " (" + str(self.year) + ")"
| 23.25
| 55
| 0.532258
| 185
| 0.994624
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.048387
|
c6d0b39109db93442e531726d432358337458672
| 2,275
|
py
|
Python
|
pwn/shellcode/misc/exit.py
|
Haabb/pwnfork
|
c2530ea2fd2f9d4e65df234afeb8f7def93afe49
|
[
"MIT"
] | 1
|
2016-08-29T03:38:42.000Z
|
2016-08-29T03:38:42.000Z
|
pwn/shellcode/misc/exit.py
|
Haabb/pwnfork
|
c2530ea2fd2f9d4e65df234afeb8f7def93afe49
|
[
"MIT"
] | null | null | null |
pwn/shellcode/misc/exit.py
|
Haabb/pwnfork
|
c2530ea2fd2f9d4e65df234afeb8f7def93afe49
|
[
"MIT"
] | null | null | null |
from pwn.internal.shellcode_helper import *
from ..misc.pushstr import pushstr
@shellcode_reqs(arch=['i386', 'amd64'], os=['linux', 'freebsd'])
def exit(returncode = None, arch = None, os = None):
"""Exits. Default return code, None, means "I don't care"."""
returncode = arg_fixup(returncode)
if arch == 'i386':
if os in ['linux', 'freebsd']:
return _exit_i386(returncode, os)
elif arch == 'amd64':
if os in ['linux', 'freebsd']:
return _exit_amd64(returncode, os)
bug("OS/arch combination (%s, %s) is not supported for exit" % (os, arch))
def _exit_amd64(returncode, os):
out = ["push SYS_exit",
"pop rax"]
if returncode != None:
if os == 'linux':
if returncode == 0:
out += ['xor ebx, ebx']
elif isinstance(returncode, int):
out += [pushstr(p32(returncode), null = False, raw = True),
'pop rbx']
else:
out += ['mov ebx, %s' % str(returncode)]
elif os == 'freebsd':
if returncode == 0:
out += ['cdq', 'push rdx']
elif isinstance(returncode, int):
out += [pushstr(p32(returncode), null = False, raw = True)]
else:
out += ['push %s' % str(returncode)]
out += ['push rax']
out += ['syscall']
return '\n'.join(' ' + s for s in out)
def _exit_i386(returncode, os):
if returncode == None:
return """
push SYS_exit
pop eax
int 0x80
"""
if os == 'linux':
return """
""" + pwn.shellcode.mov('ebx', returncode, raw = True) + """
push SYS_exit
pop eax
int 0x80"""
elif os == 'freebsd':
if str(returncode) == "0":
return """
push SYS_exit
pop eax
cdq
push edx
push edx
int 0x80"""
else:
return """
push %s
push SYS_exit
pop eax
push eax
int 0x80""" % str(returncode)
else:
bug('OS was neither linux nor freebsd')
| 29.545455
| 78
| 0.465934
| 0
| 0
| 0
| 0
| 524
| 0.23033
| 0
| 0
| 845
| 0.371429
|
c6d1c365ef9f848b0908e928e06218bc28eb4a5c
| 1,037
|
py
|
Python
|
backend/bundle/tests/seeker_tests/samples/indentation.py
|
fossabot/Graphery
|
61f23b2ad4ad0fa5dff643047597f9bb6cae35a2
|
[
"MIT"
] | 5
|
2020-08-26T00:15:01.000Z
|
2021-01-11T17:24:51.000Z
|
backend/bundle/tests/seeker_tests/samples/indentation.py
|
fossabot/Graphery
|
61f23b2ad4ad0fa5dff643047597f9bb6cae35a2
|
[
"MIT"
] | 69
|
2020-08-02T23:45:44.000Z
|
2021-04-17T03:04:32.000Z
|
backend/bundle/tests/seeker_tests/samples/indentation.py
|
fossabot/Graphery
|
61f23b2ad4ad0fa5dff643047597f9bb6cae35a2
|
[
"MIT"
] | 4
|
2020-09-10T05:40:49.000Z
|
2020-12-20T11:44:16.000Z
|
from bundle import seeker
@seeker.tracer(depth=2, only_watch=False)
def main():
f2()
def f2():
f3()
def f3():
f4()
@seeker.tracer(depth=2, only_watch=False)
def f4():
f5()
def f5():
pass
expected_output = '''
Source path:... Whatever
call 5 def main():
line 6 f2()
call 9 def f2():
line 10 f3()
Source path:... Whatever
call 18 def f4():
line 19 f5()
call 22 def f5():
line 23 pass
return 23 pass
Return value:.. None
return 19 f5()
Return value:.. None
Elapsed time: 00:00:00.000134
return 10 f3()
Return value:.. None
return 6 f2()
Return value:.. None
Elapsed time: 00:00:00.000885
'''
| 21.604167
| 52
| 0.387657
| 0
| 0
| 0
| 0
| 122
| 0.117647
| 0
| 0
| 799
| 0.770492
|
c6d2c43f2fbd2525762b6b965846526e85874c64
| 1,451
|
py
|
Python
|
qiskit_experiments/framework/matplotlib.py
|
QuantumHardware/qiskit-experiments
|
c09cf35bb922419354955abe8d536a97a9ea286b
|
[
"Apache-2.0"
] | 72
|
2021-02-24T19:28:51.000Z
|
2022-03-27T02:56:59.000Z
|
qiskit_experiments/framework/matplotlib.py
|
dongcc/qiskit-experiments
|
894dcf41ac69ace9e6a0a3c4800d4b6994ac3b5a
|
[
"Apache-2.0"
] | 509
|
2021-03-04T13:46:00.000Z
|
2022-03-31T18:09:16.000Z
|
qiskit_experiments/framework/matplotlib.py
|
dongcc/qiskit-experiments
|
894dcf41ac69ace9e6a0a3c4800d4b6994ac3b5a
|
[
"Apache-2.0"
] | 70
|
2021-02-24T19:21:39.000Z
|
2022-03-05T04:00:12.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Matplotlib helper functions
"""
from matplotlib.figure import Figure
from matplotlib.backends.backend_svg import FigureCanvasSVG
default_figure_canvas = FigureCanvasSVG # pylint: disable=invalid-name
"""Matplotlib canvas to use when rendering a figure. This needs to be a
canvas for a `non-interactive backend
<https://matplotlib.org/stable/tutorials/introductory/usage.html#the-builtin-backends>`_.
The default is `FigureCanvasSVG`."""
def get_non_gui_ax():
"""Return a matplotlib axes that can be used in a child thread.
Analysis/plotting is done in a separate thread (so it doesn't block the
main thread), but matplotlib doesn't support GUI mode in a child thread.
This function creates a separate Figure and attaches a non-GUI
SVG canvas to it.
Returns:
matplotlib.axes.Axes: A matplotlib axes that can be used in a child thread.
"""
figure = Figure()
_ = default_figure_canvas(figure)
return figure.subplots()
| 36.275
| 89
| 0.751206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,179
| 0.812543
|
c6d4b9bc3a7c3d3b66374d69e6147ebd024b69ea
| 14,117
|
py
|
Python
|
effect_tools.py
|
rsjones94/hurricane_analysis
|
b619526dcf40ea83e9ae3ba92f3a1d28fce25776
|
[
"MIT"
] | null | null | null |
effect_tools.py
|
rsjones94/hurricane_analysis
|
b619526dcf40ea83e9ae3ba92f3a1d28fce25776
|
[
"MIT"
] | null | null | null |
effect_tools.py
|
rsjones94/hurricane_analysis
|
b619526dcf40ea83e9ae3ba92f3a1d28fce25776
|
[
"MIT"
] | null | null | null |
import os
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from read import clean_read
from detrend import *
def get_effect(data, param, mean, stddev, start_index, lag=3, effect_type=1,
returning_gap=0, dropthrough=(0, 0), forcing=(None, None),
max_effect=365, max_dropout=5):
"""
For a given parameter, finds the time it takes for the time series to return to normalcy
after a peturbation
Args:
data: A DataFrame of gauge data
param: the column in data to use
mean: the mean value of the pre-effect window
stddev: the standard deviation of the pre-effect window
start_index: the index of the storm peturbation
lag: the number of days allowed for an effect to begin. Minimum is 1
effect_type: the INITIAL expected effect of the peturbation. 1 indicates a positive effect, -1
indicates a negative effect
returning_gap: number of days where an increasing effect is allowed to reverse trend
before it is considered to be on its reverse trend
dropthrough: A list or tuple indicating the number of dropthroughs allowed and the number of days
the time series is allotted to drop through before being considered terminated.
A dropthrough is when a parameter is outside the normal range for that parameter and quickly
becomes outside the normal range but with opposite valence, e.g., it is above the normal range and
quickly goes to being below the normal range.
forcing: a tuple of 1) the number of days a returning trend can be reversed before it is forced to
return by calculating the best fit line for the last n returning days and
calculating the date of intersection. This allows an effect window to be
estimated even when additional storms/forcing effects follow the initial
peturbation. Default is None, which will never force a completion.
2) the number of points to include in the forcing slope fit line
max_effect: the maximum number of days an effect can continue before being terminated
max_dropout: number of continuous days of no signal before mandatory termination
Returns:
A list with two parts. The first is a list of the start and end indices of the effect
(or None, if there was no effect). The second is list, (days_above, days_below, days_between,
termination_type, forcing_start, forcing_slope). termination_type can be "natural", "forced",
None or 'dropout'
If not forced, forcing_start and forcing_slope will be None.
"""
returner = [[None, None], [0, 0, 0, 'natural', None, None]]
force_completion = forcing[0] # number of days to regress before completion is forced
force_history = forcing[1]
dropthrough = [dropthrough[0], dropthrough[1]]
comp_dict = {1: greater, -1: lesser}
exes = np.array(data.index)
orig = np.array(data[param])
whys = np.array(pd.Series(orig).interpolate(limit_direction='both'))
low = mean - stddev
high = mean + stddev
normalcy = (low, high)
if effect_type == 1:
comp_ind = 1
comp_val = normalcy[comp_ind] # high
elif effect_type == -1:
comp_ind = 0
comp_val = normalcy[comp_ind] # low
else:
raise Exception('effect_type must be 1 or -1')
effect_begun = False
i = start_index - 1
while lag > 0:
lag -= 1
i += 1
val = whys[i]
if comp_dict[effect_type](val, comp_val):
effect_begun = True
returner[0][0] = i
break
if not effect_begun:
returner[1][3] = None
return returner
# print(f'Effect begins at {i} {whys[i]}')
i -= 1
is_returning = False
has_real_val = False
nan_count = 0
ret_gap_count = 0
while True:
i += 1
# print(f'Checking {i} {whys[i]}')
if i > (i + max_effect):
returner[1][3] = 'max_effect'
if np.isnan(orig[i]):
nan_count += 1
# print(f'NANNER: {nan_count}')
if nan_count > max_dropout:
returner[1][3] = 'dropout'
# print('dropping out')
i -= nan_count - 1
break
else:
has_real_val = True
nan_count = 0
last_val = whys[i - 1]
val = whys[i]
towards_pre = comp_dict[effect_type](last_val, val)
# print(f'Towards pre: {towards_pre}')
if towards_pre and not is_returning: # checking to see if the data has started going back to pre-peturbation
ret_gap_count += 1
# print(f'Retgap: {ret_gap_count} at {i}')
if ret_gap_count > returning_gap or comp_dict[effect_type](comp_val, val):
# print(f'returning at {i}')
is_returning = True
ret_gap_count = 0
elif not is_returning:
ret_gap_count = 0
# print(f'past pre-pet')
if is_returning:
if comp_dict[effect_type](comp_val, val): # check to see if we've returned to normalcy
# print(f'we normal at {i}')
if dropthrough[0] == 0: # if no dropthroughs left then we're done
# print('no dropthroughs left')
break
else:
if within(val, normalcy): # if we're within normalcy, check to see if we'll drop through in time
# print('need to check dropthrough')
does_drop_through, ind = drops_through(whys, i, normalcy, dropthrough[1])
# print(f'Drops thru? {does_drop_through}')
if does_drop_through: # if it does drop through, go on
days_to_drop = ind - i
returner[1][2] += days_to_drop - 1
i = ind - 1
else: # if it doesn't, then we're done
# print('did not drop thru')
break
dropthrough[0] -= 1
effect_type = -effect_type
comp_ind ^= 1 # bit flip from 0 to 1 and vice versa
comp_val = normalcy[comp_ind]
is_returning = False
elif force_completion and comp_dict[effect_type](val, last_val):
# print('moving away?')
# check to see if the data is moving away from pre-pet again
# assuming force_completion is numeric
# print('Force completion active')
# print(f'Func {comp_dict[effect_type]}, vals {val,last_val}. Ind {i}')
# print('ddtr:')
dn = days_to_return(whys, i - 1, func=comp_dict[-effect_type], max_nan=max_dropout)
# print(f'{dn}')
# print(dn)
if dn <= force_completion: # if we return in time
if last_val > high:
returner[1][0] += (dn - 2)
if last_val < low:
returner[1][1] += (dn - 2)
i += (dn - 2)
else: # force completion
# print(f'Forcing completion')
try:
ind, days_to_force, slope = forced_return(exes, whys, i - 1, normalcy, history=force_history)
# print(f'Completion forced at {ind} from {i-1}. Takes {days_to_force} days. Slope: {slope}')
returner[1][3] = 'forced'
returner[1][4] = i - 1
returner[1][5] = slope
to_add = days_to_force - 1
if last_val > high:
returner[1][0] += to_add
if last_val < low:
returner[1][1] += to_add
i = ind
except ValueError:
returner[1][3] = 'forcing error'
i -= 1
break
# print('eob')
if val > high:
returner[1][0] += 1
elif val < low:
returner[1][1] += 1
else:
returner[1][2] += 1
returner[0][1] = i
if not has_real_val:
returner = [[None, None], [0, 0, 0, 'dropout', None, None]]
if returner[0][0] == returner[0][1]: # happens sometimes when there is a dropout but an effect is registered due to
# interpolation at the storm start
returner = [[None, None], [0, 0, 0, 'natural', None, None]]
return returner
def greater(a, b):
return a > b
def lesser(a, b):
return a < b
def within(a, b):
return b[1] > a > b[0]
def forced_return(exes, whys, i, window, history=3):
"""
Gives the index of a forced return and the slope of the return
Args:
exes: x vals
whys: y vals
i: index of the return begin
window: the min and max of the return window
history: number of points to include in the best fit
Returns:
tuple (index_of_return, days_to_return, slope)
"""
# print('\nFORCING:')
while True:
x = exes[(i - history + 1):(i + 1)]
y = whys[(i - history + 1):(i + 1)]
m, b = np.polyfit(x, y, 1)
# print(f'{m}')
if whys[i] > window[1] and m >= 0:
history -= 1
elif whys[i] < window[0] and m <= 0:
history -= 1
elif np.isclose(m, 0):
history -= 1
else:
break
if history == 1:
raise ValueError('Forced return impossible')
def lin_func(index, y=whys[i], anchor=i, slope=m):
r = y + (index - anchor) * slope
return r
# print('lin_func defined')
if whys[i] > window[1]:
func = lesser
comp = window[1]
# print('func def')
elif whys[i] < window[0]:
func = greater
comp = window[0]
# print('func def')
else:
Exception('Whoah. something weird with forced_return()')
val = whys[i]
n = 0
while not func(val, comp):
i += 1
n += 1
val = lin_func(index=i)
# print(val)
# print('finished')
return i, n, m
def days_to_return(exes, i, func, max_nan=0):
"""
Returns the number of days for a series to return to above/below the indexed value
Args:
exes: series of x vals
i: index to start at
func: a function, either lesser or greater as defined in this module
max_nan: maximum allowable consecutive nans
Returns:
num of days to return
"""
if func is lesser:
# print('looking for when vals drop below comp')
pass
elif func is greater:
# print('looking for when vals rise above comp')
pass
initial = exes[i]
nas = 0
n = 0
try:
while nas <= max_nan:
i += 1
n += 1
val = exes[i]
# print(f'Compare {val} to initial ({initial})')
if np.isnan(val):
nas += 1
elif func(val, initial):
break
except IndexError:
pass
return n
def drops_through(exes, i, window, allowed):
"""
Checks if exes drops through the window fast enough from index i
Args:
exes: the x data
i: the index being checked
window: the min and max of the window
allowed: number of days allowed to pass through the window
Returns:
bool
"""
val = exes[i]
while within(val, window):
i -= 1
val = exes[i]
if val > window[1]:
func = lesser
comp = window[0]
# print('First val out of window is above. Checking to see when val goes below window')
elif val < window[0]:
func = greater
comp = window[1]
# print('First val out of window is below. Checking to see when val goes above window')
else:
raise Exception('Whoah. something weird with drop_through()')
count = 0
while count < allowed:
i += 1
count += 1
val = exes[i]
# print(val,comp)
if func(val, comp):
return True, i
return False, -1
###############
'''
choice_param = 'Discharge Detrend'
choice_gauge = '02218565'
# 04249000
# 015765185
# 0209303205
results_folder = r'E:\hurricane\results'
data_folder = r'E:\hurricane\station_data\modified'
data = clean_read(os.path.join(data_folder,choice_gauge+'.csv'))
result_df = pd.read_csv(os.path.join(results_folder,choice_param+'.csv'), dtype={'Gauge':str})
for index,line in result_df.iterrows():
if np.isnan(line['Pre-effect Window']):
continue
gauge = line['Gauge']
start = line['Storm Index']
mean = line['Pre-effect Mean']
stddev = line['Pre-effect Stddev']
if gauge == choice_gauge:
break
low = mean - stddev
high = mean + stddev
(es, ee), stats = get_effect(data, choice_param, mean, stddev, start, lag=3, effect_type=1,
returning_gap=1, dropthrough=[1,2], forcing=(3,4), max_effect=365, max_dropout=5)
plt.figure()
plt.plot(data.index,data[choice_param])
plt.axvline(start, color='red')
plt.axhline(high, color='orange')
plt.axhline(low, color='orange')
if stats[3] is not None:
plt.axvline(es, color='green', linestyle='dashed')
plt.axvline(ee, color='blue')
if stats[3] == 'forced':
x1 = stats[4]
x2 = ee
y1 = data[choice_param][stats[4]]
y2 = y1 + (x2-x1)*stats[5]
fx = [x1,x2]
fy = [y1,y2]
plt.plot(fx,fy,color='black', linestyle='dashed')
plt.xlim(start-28,start+28)
plt.title(f'Above: {stats[0]}, Below: {stats[1]}, Between: {stats[2]} \n'
f'Termination Type: {stats[3]}')
plt.show()
'''
| 32.602771
| 120
| 0.551463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,062
| 0.500248
|
c6d681ac44ef1494d6073c997560935007da32f3
| 131
|
py
|
Python
|
lightning_transformers/task/nlp/multiple_choice/datasets/swag/__init__.py
|
maksym-taranukhin/lightning-transformers
|
aa7202657973b5b65c3c36eb745621043859ebc4
|
[
"Apache-2.0"
] | 451
|
2021-04-21T15:53:59.000Z
|
2022-03-29T10:39:45.000Z
|
lightning_transformers/task/nlp/multiple_choice/datasets/swag/__init__.py
|
mathemusician/lightning-transformers
|
b2ef06113433e6a178ce4d3c9df7ede8064e247f
|
[
"Apache-2.0"
] | 92
|
2021-04-21T18:42:58.000Z
|
2022-03-30T05:29:54.000Z
|
lightning_transformers/task/nlp/multiple_choice/datasets/swag/__init__.py
|
mathemusician/lightning-transformers
|
b2ef06113433e6a178ce4d3c9df7ede8064e247f
|
[
"Apache-2.0"
] | 51
|
2021-04-22T05:35:28.000Z
|
2022-03-17T13:08:12.000Z
|
from lightning_transformers.task.nlp.multiple_choice.datasets.swag.data import ( # noqa: F401
SwagMultipleChoiceDataModule,
)
| 32.75
| 94
| 0.80916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.091603
|
c6d6b79b9b74cb519b433548531f1d028f0803ab
| 871
|
py
|
Python
|
warningshot.py
|
DeadpoolPancakes/nerf-sentry
|
0f9cccd78e66f4020f1960871fd35c328a697086
|
[
"MIT"
] | null | null | null |
warningshot.py
|
DeadpoolPancakes/nerf-sentry
|
0f9cccd78e66f4020f1960871fd35c328a697086
|
[
"MIT"
] | null | null | null |
warningshot.py
|
DeadpoolPancakes/nerf-sentry
|
0f9cccd78e66f4020f1960871fd35c328a697086
|
[
"MIT"
] | null | null | null |
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
Motor1Enable = 5
Motor1B = 24
Motor1A = 27
Motor2Enable = 17
Motor2B = 6
Motor2A = 22
#single shot script used as a warning shot
# Set up defined GPIO pins
GPIO.setup(Motor1A,GPIO.OUT)
GPIO.setup(Motor1B,GPIO.OUT)
GPIO.setup(Motor1Enable,GPIO.OUT)
GPIO.setup(Motor2A,GPIO.OUT)
GPIO.setup(Motor2B,GPIO.OUT)
GPIO.setup(Motor2Enable,GPIO.OUT)
# Turn the firing motor on
GPIO.output(Motor2A,GPIO.HIGH)
GPIO.output(Motor2B,GPIO.LOW)
GPIO.output(Motor2Enable,GPIO.HIGH)
# warm it up for half a second
sleep(0.5)
#turn on firing mechanism
GPIO.output(Motor1A,GPIO.HIGH)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor1Enable,GPIO.HIGH)
# Stop the motor
sleep(0.5)
GPIO.output(Motor2Enable,GPIO.LOW)
GPIO.output(Motor1Enable,GPIO.LOW)
# Always end this script by cleaning the GPIO
GPIO.cleanup()
| 21.243902
| 45
| 0.771527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 211
| 0.24225
|
c6d814a8e68b9da379529a21009897f7697124d2
| 1,979
|
py
|
Python
|
ampadb_index/parse_md.py
|
ampafdv/ampadb
|
25c804a5cb21afcbe4e222a3b48cca27ff2d9e19
|
[
"MIT"
] | null | null | null |
ampadb_index/parse_md.py
|
ampafdv/ampadb
|
25c804a5cb21afcbe4e222a3b48cca27ff2d9e19
|
[
"MIT"
] | 28
|
2016-10-21T16:04:56.000Z
|
2018-11-10T20:55:40.000Z
|
ampadb_index/parse_md.py
|
ampafdv/ampadb
|
25c804a5cb21afcbe4e222a3b48cca27ff2d9e19
|
[
"MIT"
] | 2
|
2016-10-22T19:24:45.000Z
|
2017-02-11T10:49:02.000Z
|
import html
import markdown
import bleach
import lxml.html
from lxml.html import builder as E
TAGS = [
'p', 'img', 'em', 'strong', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'ol', 'ul',
'li', 'br', 'hr', 'a', 'img', 'blockquote', 'b', 'i', 'u', 's', 'pre',
'code', 'table', 'thead', 'tr', 'th', 'tbody', 'td'
]
ATTRS = {
'ol': ['start'],
'a': ['href', 'title', 'rel'],
'img': ['src', 'title', 'alt'],
'th': ['align'],
'td': ['align']
}
STYLES = []
def clean(raw_html):
return bleach.clean(raw_html, tags=TAGS, attributes=ATTRS, styles=STYLES)
def parse_md(md_text, wrap='div', html_class='markdown'):
raw_html = markdown.markdown(
md_text,
output_format='html5',
enable_attributes=False,
lazy_ol=False,
encoding='utf-8',
extensions=['markdown.extensions.extra'])
clean_html = clean(raw_html)
# Embolica el codi amb l'etiqueta que calgui
if wrap == 'div':
if html_class:
tree = E.DIV(E.CLASS(html_class))
else:
tree = E.DIV()
elif wrap == 'blockquote':
if html_class:
tree = E.BLOCKQUOTE(E.CLASS(html_class))
else:
tree = E.BLOCKQUOTE()
elif wrap == 'raw':
return clean_html
else:
raise ValueError('`wrap` ha de ser "div" o "blockquote", no '
'{}'.format(wrap))
bin_html = clean_html.encode('utf-8', 'xmlcharrefreplace')
try:
for elem in lxml.html.fragments_fromstring(
bin_html, parser=lxml.html.HTMLParser(encoding='utf-8')):
tree.append(elem)
except TypeError:
# S'ha de "desescapar" perque E.P també escapa l'HTML
tree.append(E.P(html.unescape(clean_html)))
for table in tree.iter('table'):
table.classes |= {'table'} # Afegir la classe "table"
return lxml.html.tostring(
tree, encoding='utf-8', method='html',
pretty_print=True).decode('utf-8')
| 29.984848
| 79
| 0.560889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 533
| 0.269192
|
c6d82f38c4cf48f9145d5b9fdd0bf2b8b2b2ea04
| 552
|
py
|
Python
|
removeModule.py
|
ahmedwab/MMM-ModularHandlet.py
|
8bdc59730507333d280f2120849c5881dac7b1ad
|
[
"MIT"
] | 1
|
2022-01-16T20:21:15.000Z
|
2022-01-16T20:21:15.000Z
|
removeModule.py
|
ahmedwab/MMM-ModuleHandler
|
8bdc59730507333d280f2120849c5881dac7b1ad
|
[
"MIT"
] | null | null | null |
removeModule.py
|
ahmedwab/MMM-ModuleHandler
|
8bdc59730507333d280f2120849c5881dac7b1ad
|
[
"MIT"
] | null | null | null |
import subprocess
import os
def removeModule(filename):
try:
path = os.path.abspath(os.path.join(os.path.dirname(__file__),".."))
path = path +"/modules"
shellCommand = "cd " + path + " && rm -r -f " + filename
subprocess.call(shellCommand, shell=True)
shellCommand = "node changeConfig.js remove "+filename
subprocess.call(shellCommand, shell=True)
print (filename + " Removed")
print ("---------------------------")
except:
print ("Error removing module")
| 27.6
| 76
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 127
| 0.230072
|
c6d82fc284eef62f6b254b22655051352ba00a72
| 532
|
py
|
Python
|
src/server_3D/API/Rice/factoryTypes/hybridShapeFactoryMeth/addNewLinePtPt.py
|
robertpardillo/Funnel
|
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
|
[
"MIT"
] | 1
|
2021-05-18T16:10:49.000Z
|
2021-05-18T16:10:49.000Z
|
src/server_3D/API/Rice/factoryTypes/hybridShapeFactoryMeth/addNewLinePtPt.py
|
robertpardillo/Funnel
|
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
|
[
"MIT"
] | null | null | null |
src/server_3D/API/Rice/factoryTypes/hybridShapeFactoryMeth/addNewLinePtPt.py
|
robertpardillo/Funnel
|
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
|
[
"MIT"
] | null | null | null |
from ...abstractObjects.hybridShapes.line import LinePtPt
def AddNewLinePtPt(self, geometrical_set, start, end):
part = geometrical_set.parentsDict['Part']
reference1 = part._createReferenceFromObject(start)
reference2 = part._createReferenceFromObject(end)
cat_constructor = self.cat_constructor.AddNewLinePtPt(reference1, reference2)
geometrical_set.cat_constructor.AppendHybridShape(cat_constructor)
line = LinePtPt(geometrical_set.parentsDict, cat_constructor, start, end)
return line
| 40.923077
| 82
| 0.781955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.011278
|
c6d92303c364567cf9a1dd4b401fd0429cd92a45
| 195
|
py
|
Python
|
Latest/venv/Lib/site-packages/pyface/resource/__init__.py
|
adamcvj/SatelliteTracker
|
49a8f26804422fdad6f330a5548e9f283d84a55d
|
[
"Apache-2.0"
] | 1
|
2022-01-09T20:04:31.000Z
|
2022-01-09T20:04:31.000Z
|
Latest/venv/Lib/site-packages/pyface/resource/__init__.py
|
adamcvj/SatelliteTracker
|
49a8f26804422fdad6f330a5548e9f283d84a55d
|
[
"Apache-2.0"
] | 1
|
2022-02-15T12:01:57.000Z
|
2022-03-24T19:48:47.000Z
|
Latest/venv/Lib/site-packages/pyface/resource/__init__.py
|
adamcvj/SatelliteTracker
|
49a8f26804422fdad6f330a5548e9f283d84a55d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2005-2011, Enthought, Inc.
# All rights reserved.
""" Support for managing resources such as images and sounds.
Part of the TraitsGUI project of the Enthought Tool Suite.
"""
| 32.5
| 62
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 192
| 0.984615
|
c6d9810ee3519ae415fa0512f84807c328a50106
| 1,223
|
py
|
Python
|
Lab Activity 6.py
|
Jeralph-Red/OOP-58001
|
4e38f9a0a58098a121a61e640a53e9568bf529b0
|
[
"Apache-2.0"
] | null | null | null |
Lab Activity 6.py
|
Jeralph-Red/OOP-58001
|
4e38f9a0a58098a121a61e640a53e9568bf529b0
|
[
"Apache-2.0"
] | null | null | null |
Lab Activity 6.py
|
Jeralph-Red/OOP-58001
|
4e38f9a0a58098a121a61e640a53e9568bf529b0
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import *
class SemGrade:
def __init__(self, win):
self.lbl1=Label(win, text='Prelim:')
self.lbl2=Label(win, text='Midterm:')
self.lbl3=Label(win, text='Final:')
self.lbl4=Label(win, text='Semestral Grade:')
self.t1=Entry(bd=3)
self.t2=Entry(bd=3)
self.t3=Entry(bd=3)
self.t4=Entry(bd=3)
self.btn1 = Button(win, text='Add')
self.b1 = Button(win, text='Compute for Semestral Grade', command=self.compute)
self.b1.place(x=100, y=150)
self.lbl1.place(x=70, y=50)
self.t1.place(x=180, y=50)
self.lbl2.place(x=70, y=80)
self.t2.place(x=180, y=80)
self.lbl3.place(x=70, y=110)
self.t3.place(x=180, y=110)
self.lbl4.place(x=70,y=190)
self.t4.place(x=180,y=190)
def compute(self):
self.t4.delete(0, 'end')
num1=int(self.t1.get())
num2=int(self.t2.get())
num3=int(self.t3.get())
result=(num1+num2+num3)/3
self.t4.insert(END, str(result))
window=Tk()
mywin=SemGrade(window)
window.title('Semestral Grade Calculator')
window.geometry("400x300+10+10")
window.mainloop()
| 31.358974
| 88
| 0.567457
| 1,064
| 0.869992
| 0
| 0
| 0
| 0
| 0
| 0
| 127
| 0.103843
|
c6da86aae41063146c3bc7bd5c1f243c9c0368e2
| 1,853
|
py
|
Python
|
parse_wfd.py
|
ajsimon1/Cazar
|
6831dbdb63764ad2159eaad45fe2b6cfc7edd553
|
[
"MIT"
] | null | null | null |
parse_wfd.py
|
ajsimon1/Cazar
|
6831dbdb63764ad2159eaad45fe2b6cfc7edd553
|
[
"MIT"
] | null | null | null |
parse_wfd.py
|
ajsimon1/Cazar
|
6831dbdb63764ad2159eaad45fe2b6cfc7edd553
|
[
"MIT"
] | null | null | null |
import os
import sys
import pandas as pd
from xml.etree import ElementTree as et
cwd = os.getcwd()
filepath = 'C:\\Users\\asimon\\Desktop\\Practice-' \
'Training\\p21_template_out3.xml'
def parse_wfd_xml(filepath):
tree = et.parse(filepath)
root = tree.getroot()
data, page = root.findall('.//LineDataInput/LDILayout/Nodes/Node/Node')
data_dict = {}
page_dict = {}
for i in data.findall('./Node/Node/Content'):
data_dict[i.find('Name').text] = i.find('Guid').text
df_data = pd.DataFrame.from_dict(data_dict,
orient='index',
columns=['guid'])
for i in page.findall('./Node/Node/Node/Content'):
try:
page_dict[i.find('DataVariable').text] = [i.find('Name').text,
i.find('Size').get('X'),
i.find('Size').get('Y'),
i.find('Offset').get('X'),
i.find('Offset').get('X')]
except AttributeError:
pass
df_page = pd.DataFrame.from_dict(page_dict,
orient='index',
columns=['name',
'size_x',
'size_y',
'offest_x',
'offest_y'])
# df_combined = df_data.join(df_page, on='guid')
# possible drop NaNs?
return df_data.join(df_page, on='guid')
if __name__ == '__main__':
df = parse_wfd_xml(filepath)
writer = pd.ExcelWriter('wfd_output.xlsx')
df.to_excel(writer, 'Sheet1')
writer.save()
| 39.425532
| 75
| 0.447922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 407
| 0.219644
|
c6db094a77c778676e8dbdbc124941b532991717
| 1,598
|
py
|
Python
|
setup.py
|
yonglehou/memsql-loader
|
5e7bb5787991aa990889c4e709f63a3529544268
|
[
"MIT"
] | 1
|
2021-05-10T03:37:26.000Z
|
2021-05-10T03:37:26.000Z
|
setup.py
|
yonglehou/memsql-loader
|
5e7bb5787991aa990889c4e709f63a3529544268
|
[
"MIT"
] | null | null | null |
setup.py
|
yonglehou/memsql-loader
|
5e7bb5787991aa990889c4e709f63a3529544268
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup
# get version
from memsql_loader import __version__
setup(
name='memsql-loader',
version=__version__,
author='MemSQL',
author_email='support@memsql.com',
url='https://github.com/memsql/memsql-loader',
download_url='https://github.com/memsql/memsql-loader/releases/latest',
license='LICENSE.txt',
description='MemSQL Loader helps you run complex ETL workflows against MemSQL',
long_description=open('README.md').read(),
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
platforms=[ "Linux", "Mac OS X" ],
entry_points={
'console_scripts': [
'memsql-loader = memsql_loader.main:main'
]
},
packages=[
'memsql_loader',
'memsql_loader.api',
'memsql_loader.cli',
'memsql_loader.db',
'memsql_loader.execution',
'memsql_loader.loader_db',
'memsql_loader.util',
'memsql_loader.util.apsw_sql_step_queue',
'memsql_loader.vendor',
'memsql_loader.vendor.glob2',
],
zip_safe=False,
install_requires=[
'memsql==2.14.4',
'wraptor==0.6.0',
'clark==0.1.0',
'voluptuous==0.8.5',
'boto==2.28.0',
'pycurl==7.19.3.1',
'prettytable==0.7.2',
'pywebhdfs==0.2.4',
'requests==2.5.1',
],
tests_require=[
'docker-py==0.3.1',
'pytest==2.5.2',
'pytest-xdist==1.10',
'pexpect==3.3',
'requests==2.2.1',
],
)
| 27.084746
| 83
| 0.574468
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 886
| 0.554443
|
c6dbe3048a8498d4b259596610f445fd78aa7173
| 17,022
|
py
|
Python
|
p20191120_wada.py
|
tmseegoslo/wada
|
1f0163ccc0e0815ae7586291712f8920b00cf7ba
|
[
"Apache-2.0"
] | null | null | null |
p20191120_wada.py
|
tmseegoslo/wada
|
1f0163ccc0e0815ae7586291712f8920b00cf7ba
|
[
"Apache-2.0"
] | null | null | null |
p20191120_wada.py
|
tmseegoslo/wada
|
1f0163ccc0e0815ae7586291712f8920b00cf7ba
|
[
"Apache-2.0"
] | null | null | null |
#MNE tutorial
#Import modules
import os
import numpy as np
import mne
import re
import complexity_entropy as ce
#Import specific smodules for filtering
from numpy.fft import fft, fftfreq
from scipy import signal
from mne.time_frequency.tfr import morlet
from mne.viz import plot_filter, plot_ideal_filter
import matplotlib.pyplot as plt
### PUT ALL PARAMETERS HERE ###
### ### ### ### ### ### ### ###
### PUT FUNCTIONS HERE OR BETTER, IN SEPARATE FILE ###
### ### ### ### ### ### ### ### ### ### ### ### ### ###
#Path(s) to data #UPDATE TO READ ALL SUBFOLDERS IN A FOLDER
data_folder = 'Y:\Data\Wada Data Swiss\Visit_JFS_BJE\Originals'
data_raw_file = os.path.join(data_folder,
'wadatest_14_06_19.edf')
### LOOP OVER ALL SUBJECTS FOR PREPROCESSING ###
### consider putting pre-processing ###
#Read data
raw = mne.io.read_raw_edf(data_raw_file, misc=['ECG EKG-REF'],
stim_channel='Event EVENT-REF', preload=True)
#Convenience function to trim channel names
def ch_rename(oldname):
return re.findall(r"\s.+-", oldname)[0][1:-1]
#Trim channel names
raw.rename_channels(ch_rename)
#Print overall and detailed info about raw dataset
print(raw)
print(raw.info)
#Read montage
montage = mne.channels.make_standard_montage('standard_postfixed')
#Set montage
raw.set_montage(montage)
#Plot sensor locations
#raw.plot_sensors(show_names=True)
#Temporarily add dummy annotation to spare user from adding new label
raw.annotations.append(onset=raw.times[0]-1.0, duration=0.0, description='Slow EEG')
#Plot raw EEG traces. Mark onset of slow EEG
raw.plot(start=0, duration=15, n_channels=26,
scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
remove_dc=True, title='Mark onset of slow EEG')
#Crop data around the newly inserted marker
seg_length = 300 #seconds
times_slow = [a['onset'] for a in raw.annotations if 'Slow' in a['description']]
tmin = times_slow[1]-seg_length
tmax = times_slow[1]+seg_length
raw = raw.crop(tmin=tmin,tmax=tmax)
#Temporarily add dummy annotation to spare user from adding new label
raw.annotations.append(onset=raw.times[0]-1.0, duration=0.0, description='BAD_segments')
#Plot raw EEG traces. Reject obviously bad channels and mark bad segments
raw.plot(start=0, duration=15, n_channels=26,
scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
remove_dc=True, title='Reject obviously bad channels and bad segments')
# Making and inserting events for epoching data
epoch_length = 10.0 # sec
overlap = 9.0 # sec
event_id = 1
t_min = 0.0
events = mne.make_fixed_length_events(raw, id=event_id, start=t_min,
stop=None, duration=epoch_length,
first_samp=True, overlap=overlap)
raw.add_events(events, stim_channel='EVENT', replace=False)
# Check that events are in the right place
raw.plot(start=0, duration=15, n_channels=26,
scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
remove_dc=True, title='Check position of events', events=events)
# Read epochs
rawepochs = mne.Epochs(raw, events=events, event_id=event_id, tmin=t_min,
tmax=epoch_length, baseline=(None, None), picks='eeg',
preload=True, reject=None, proj=False)
#Plot epoched data
rawepochs.plot(n_epochs=10, n_channels=22, scalings=dict(eeg=1e-4, misc=1e-3, stim=100))
#Plot power spectrum
rawepochs.plot_psd(fmax=180)
#Filter the data from 1-100 Hz using the default options
#NOTE: Usually you should apply high-pass and low-pass filter separately, but
#this is done 'behind the scenes' in this case
epochs = rawepochs.copy().filter(1, 80, picks='eeg', filter_length='auto',
l_trans_bandwidth='auto', h_trans_bandwidth='auto',
method='fir', phase='zero', fir_window='hamming',
fir_design='firwin')
#Plot power spectra
epochs.plot_psd(fmax=180)
#Plot epoched EEG traces. Reject obviously bad channels and mark bad segments
epochs.plot(n_epochs=10, n_channels=22, scalings=dict(eeg=3e-4, misc=1e-3, stim=100),
title='Reject obviously bad channels and bad segments')
#Set up and fit the ICA
ica = mne.preprocessing.ICA(method = 'infomax', fit_params=dict(extended=True),
random_state=0, max_iter=1000)
ica.fit(epochs, picks='eeg')
#Quick look at components
ica.plot_components(inst=epochs, plot_std=True,
picks='eeg',
psd_args=dict(fmax=85))
#Plot time course of ICs
ica.plot_sources(epochs)
# =============================================================================
# #Check components one by one and mark bad ones
# n_comps = ica.get_components().shape[1]
# is_brain = [True for i in range(0,n_comps)]
# print('Press a keyboard key for brain, and a mouse button for non-brain')
# for i in range(0,n_comps) :
# ica.plot_properties(prep, picks=i, psd_args=dict(fmin=0, fmax=110))
# is_brain[i] = plt.waitforbuttonpress()
# plt.close()
# idx_bad = [i for i, x in enumerate(is_brain) if not(x)]
# ica.exclude = idx_bad
# =============================================================================
ica.apply(epochs)
#Plot cleaned data
epochs.plot(scalings=dict(eeg=3e-4, misc=1e-3, stim=1),n_epochs=5)
#Compare power spectra
epochs.plot_psd(fmax=90)
#Set bipolar (double banana) reference
anodes = ['Fp2', 'F8', 'T4', 'T6', 'Fp1', 'F7', 'T3', 'T5',
'Fp2', 'F4', 'C4', 'P4', 'Fp1', 'F3', 'C3', 'P3',
'Fz', 'Cz',
'T6', 'T5',
'T4', 'T3']
cathodes = ['F8', 'T4', 'T6', 'O2', 'F7', 'T3', 'T5', 'O1',
'F4', 'C4', 'P4', 'O2', 'F3', 'C3', 'P3', 'O1',
'Cz', 'Pz',
'A2', 'A1',
'T2', 'T1']
#Read montage
montage = mne.channels.make_standard_montage('standard_postfixed')
#Set montage
epochs.set_montage(montage)
epochs_bipolar = mne.set_bipolar_reference(epochs, anodes, cathodes,
drop_refs=False)
#Print info for bipolar (double banana) reference raw data
print(prep_bi)
print(prep_bi.info['ch_names'])
#WARNING: Plotting of sensor locations does not work, set locations first
#Plot sensor locations for bipolar (double banana) reference raw data
#raw_bi.plot_sensors(show_names=True)
# =============================================================================
# order=np.array([0, 2, 4, 6, 21, 8, 22, 23, 10, 12,
# 14, 15,
# 1, 3, 5, 7, 18, 9, 19, 20, 11, 13,
# 16, 17])
# =============================================================================
ch_names = ['T3-T1', 'T5-A1', 'Fp1-F7', 'F7-T3', 'T3-T5', 'T5-O1', 'Fp1-F3',
'F3-C3', 'C3-P3', 'P3-O1', 'Fz-Cz', 'Cz-Pz', 'Fp2-F4', 'F4-C4',
'C4-P4', 'P4-O2', 'Fp2-F8', 'F8-T4', 'T4-T6', 'T6-O2', 'T4-T2',
'T6-A2', 'EKG', 'EVENT']
# =============================================================================
# ch_names = ['T1-A1','F7-A1','T3-A1','T5-A1','Fp1-A1','F3-A1','C3-A1','P3-A1','O1-A1',
# 'Fz-Cz','Pz-Cz',
# 'O2-A2','P4-A2','C4-A2','F4-A2','Fp2-A2','T6-A2','T4-A2','F8-A2','T2-A2',
# 'EKG','EVENT']
# =============================================================================
prep_bi.reorder_channels(ch_names)
#Plot re-referenced data (bipolar double banana reference)
prep_bi.plot(start=0, duration=15, n_channels=24,
scalings=dict(eeg=1e-4, misc=1e-3, stim=100),
remove_dc=False)
#Compare power spectra
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])
ax.set_xlim(0, 110)
ax.set_ylim(-70, 50)
#raw.plot_psd(fmax=110, ax=ax)
prep_bi.plot_psd(fmax=110, ax=ax)
prep_short = prep_bi.copy()
# =============================================================================
# # Filter again
# prep_short = prep_short.filter(1, 80, picks='eeg', filter_length='auto',
# l_trans_bandwidth='auto', h_trans_bandwidth='auto',
# method='fir', phase='zero', fir_window='hamming',
# fir_design='firwin')
# #Compare power spectra
# fig = plt.figure()
# ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])
# ax.set_xlim(0, 100)
# ax.set_ylim(-70, 50)
# prep_short.plot_psd(fmax=100, ax=ax)
# =============================================================================
#prep_short = prep_short.crop(tmin=3840,tmax=4740)
#Plot cropped data
prep_short.plot(start=0, duration=15, n_channels=24,
scalings=dict(eeg=1e-4, misc=1e-3, stim=100),
remove_dc=False)
#Get start of infusion.
#WARNING: Hard coded index + not equal to start of slowing of EEG
#time_ipsi_slow = prep_short.annotations[0]['onset']-prep_short._first_time
time_ipsi_slow = prep_short.annotations[1]['onset']-prep_short._first_time #!!! Horrible hack! Manually inserted annotation
epoch_length = 16
time_first_event = time_ipsi_slow - epoch_length*(time_ipsi_slow//epoch_length)
events = mne.make_fixed_length_events(prep_short, id=1, start=time_first_event,
stop=None, duration=epoch_length,
first_samp=True, overlap=0.0)
prep_short.add_events(events, stim_channel='EVENT', replace=False)
#Plot data with added events
prep_short.plot(start=0, duration=15, n_channels=24,
scalings=dict(eeg=1e-4, misc=1e-3, stim=100),
remove_dc=False, events=events)
# Read epochs
epochs = mne.Epochs(prep_short, events=events, event_id=1, tmin=0.0,
tmax=epoch_length, baseline=(None, None), picks='eeg',
preload=True, reject=None, proj=False)
#Plot epoched data
epochs.plot(n_epochs=3, n_channels=22, scalings=dict(eeg=1e-4, misc=1e-3, stim=100))
#Get the 3D matrix of epoched EEG-data
data = epochs.get_data(picks='eeg')
idx_left = [2,3,4,5,6,7,8,9] #[3,4,7,8] #[2,3,4,5,7,8]
idx_right = [12,13,14,15,16,17,18,19] #[13,14,17,18] #[13,14,16,17,18,19]
idx_all = idx_left+idx_right #[3,4,7,8,13,14,17,18]
#Calculate Lempel-Ziv complexity
LZC = np.zeros(data.shape[0])
LZCcontra = np.zeros(data.shape[0])
LZCipsi = np.zeros(data.shape[0])
for i in range(0,data.shape[0]) :
LZC[i] = ce.LZc(np.transpose(data[i,idx_all,:]))
LZCcontra[i] = ce.LZc(np.transpose(data[i,idx_left,:]))
LZCipsi[i] = ce.LZc(np.transpose(data[i,idx_right,:]))
#Plot LZC vs epoch number
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])
#plt.plot(range(1,data.shape[0]+1), LZC/LZC[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), LZCcontra/LZCcontra[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), LZCipsi/LZCipsi[50:60].mean())
#plt.step(range(1,data.shape[0]+1), LZC/LZC[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), LZCcontra/LZCcontra[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), LZCipsi/LZCipsi[50:60].mean(),where='mid')
ylim = ax.get_ylim()
plt.plot([59.5, 59.5],ylim,'k:')
plt.text(59.5, ylim[1]+0.02*(ylim[1]-ylim[0]),'Start Etomidtae',horizontalalignment='center')
plt.plot([50, 113],[1, 1],'k:')
ax.set_xlim(50, 113)
ax.set_ylim(ylim)
plt.xlabel('Epoch number')
plt.ylabel('LZC/LZC_baseline')
plt.legend(('tLZCcontra', 'tLZCipsi'))
plt.title('Lempel-Ziv complexity - 16s epochs - 8 bipolar channels - 1-30 Hz')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#Calculate amplitude coalition entropy
ACE = np.zeros(data.shape[0])
ACEcontra = np.zeros(data.shape[0])
ACEipsi = np.zeros(data.shape[0])
for i in range(0,data.shape[0]) :
ACE[i] = ce.ACE(data[i,idx_all,:])
ACEcontra[i] = ce.ACE(data[i,idx_left,:])
ACEipsi[i] = ce.ACE(data[i,idx_right,:])
#Plot ACE vs epoch number
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
#plt.plot(range(1,data.shape[0]+1), ACE/ACE[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), ACEcontra/ACEcontra[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), ACEipsi/ACEipsi[50:60].mean())
#plt.step(range(1,data.shape[0]+1), ACE/ACE[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), ACEcontra/ACEcontra[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), ACEipsi/ACEipsi[50:60].mean(),where='mid')
ylim = ax.get_ylim()
plt.plot([59.5, 59.5],ylim,'k:')
plt.text(59.5, ylim[1]+0.02*(ylim[1]-ylim[0]),'Start Etomidtae',horizontalalignment='center')
plt.plot([50, 113],[1, 1],'k:')
ax.set_xlim(50, 113)
ax.set_ylim(ylim)
plt.xlabel('Epoch number')
plt.ylabel('ACE/ACE_baseline')
plt.legend(('ACEcontra', 'ACEipsi'))
plt.title('Amplitude coalition entropy - 16s epochs - 8 bipolar channels - 1-35 Hz')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#Calculate synchrony coalition entropy
SCE = np.zeros(data.shape[0])
SCEcontra = np.zeros(data.shape[0])
SCEipsi = np.zeros(data.shape[0])
for i in range(0,data.shape[0]) :
SCE[i] = ce.SCE(data[i,idx_all,:])
SCEcontra[i] = ce.SCE(data[i,idx_left,:])
SCEipsi[i] = ce.SCE(data[i,idx_right,:])
#Plot SCE vs epoch number
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])
#plt.plot(range(1,data.shape[0]+1), SCE/SCE[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), SCEcontra/SCEcontra[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), SCEipsi/SCEipsi[50:60].mean())
#plt.step(range(1,data.shape[0]+1), SCE/SCE[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), SCEcontra/SCEcontra[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), SCEipsi/SCEipsi[50:60].mean(),where='mid')
ylim = ax.get_ylim()
plt.plot([59.5, 59.5],ylim,'k:')
plt.text(59.5, ylim[1]+0.02*(ylim[1]-ylim[0]),'Start Etomidtae',horizontalalignment='center')
plt.plot([50, 113],[1, 1],'k:')
ax.set_xlim(50, 113)
ax.set_ylim(ylim)
plt.xlabel('Epoch number')
plt.ylabel('SCE/SCE_baseline')
plt.legend(('SCEcontra', 'SCEipsi'))
plt.title('Synchrony coalition entropy - 16s epochs - 8 bipolar channels - 1-35 Hz')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
## POSSIBLY USEFUL ##
# =============================================================================
# #Resample if needed (Warning: looking at PSD there seems to be some passband-ripples?)
# prep = raw.copy().resample(64)
#
# #Compare power spectra
# raw.plot_psd(fmax=32)
# prep.plot_psd(fmax=32)
#
# #Compare EEG traces
# raw.plot(start=0, duration=15, n_channels=26,
# scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
# remove_dc=True)
# prep.plot(start=0, duration=15, n_channels=26,
# scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
# remove_dc=True)
# =============================================================================
# =============================================================================
# #Construct and visualize FIR filter (recommended over IIR for most applications)
# sfreq = 1000.
# f_p = 40.
# flim = (1.0, sfreq / 2.0) # limits for plotting
# nyq = sfreq / 2. # the Nyquist frequency is half our sample rate
# freq = [0, f_p, f_p, nyq]
# gain = [1, 1, 0, 0]
#
# third_height = np.array(plt.rcParams['figure.figsize']) * [1, 1.0 / 3.]
# ax = plt.subplots(1, figsize=third_height)[1]
# plot_ideal_filter(freq, gain, ax, title='Ideal %s Hz lowpass' % f_p, flim=flim)
# =============================================================================
## GRAVEYARD ##
# =============================================================================
# stim_data = np.zeros((1, len(prep_short.times)))
# info = mne.create_info(['STI'], raw.info['sfreq'], ['stim'])
# stim_raw = mne.io.RawArray(stim_data, info)
# raw.add_channels([stim_raw], force_update_info=True)
#
# =============================================================================
# =============================================================================
# #Set bipolar (double banana) reference
# anodes = ['Fp2', 'F8', 'T4', 'T6', 'Fp1', 'F7', 'T3', 'T5',
# 'Fp2', 'F4', 'C4', 'P4', 'Fp1', 'F3', 'C3', 'P3',
# 'Fz', 'Cz',
# 'T6', 'T5',
# 'T4', 'T3']
# cathodes = ['F8', 'T4', 'T6', 'O2', 'F7', 'T3', 'T5', 'O1',
# 'F4', 'C4', 'P4', 'O2', 'F3', 'C3', 'P3', 'O1',
# 'Cz', 'Pz',
# 'A2', 'A1',
# 'T2', 'T1']
# raw_bi = mne.set_bipolar_reference(raw, anodes, cathodes)
# #Print info for bipolar (double banana) reference raw data
# print(raw_bi)
# print(raw_bi.info)
# #WARNING: Plotting of sensor locations does not work, set locations first
# #Plot sensor locations for bipolar (double banana) reference raw data
# #raw_bi.plot_sensors(show_names=True)
# =============================================================================
| 39.311778
| 124
| 0.584714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9,312
| 0.547057
|
c6dc529d66bad976f5633ed5b6e53c5c1922f83f
| 1,790
|
py
|
Python
|
classifiers.py
|
mavroudisv/Mahalanobis-Classifier
|
9029b2d84215afd02d8ccdbe3be7ea875b83deb6
|
[
"MIT"
] | 1
|
2021-01-12T19:12:06.000Z
|
2021-01-12T19:12:06.000Z
|
classifiers.py
|
mavroudisv/Mahalanobis-Classifier
|
9029b2d84215afd02d8ccdbe3be7ea875b83deb6
|
[
"MIT"
] | null | null | null |
classifiers.py
|
mavroudisv/Mahalanobis-Classifier
|
9029b2d84215afd02d8ccdbe3be7ea875b83deb6
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy as sp
class MahalanobisClassifier():
def __init__(self, samples, labels):
self.clusters={}
for lbl in np.unique(labels):
self.clusters[lbl] = samples.loc[labels == lbl, :]
def mahalanobis(self, x, data, cov=None):
"""Compute the Mahalanobis Distance between each row of x and the data
x : vector or matrix of data with, say, p columns.
data : ndarray of the distribution from which Mahalanobis distance of each observation of x is to be computed.
cov : covariance matrix (p x p) of the distribution. If None, will be computed from data.
"""
x_minus_mu = x - np.mean(data)
if not cov:
cov = np.cov(data.values.T)
inv_covmat = sp.linalg.inv(cov)
left_term = np.dot(x_minus_mu, inv_covmat)
mahal = np.dot(left_term, x_minus_mu.T)
return mahal.diagonal()
def predict_probability(self, unlabeled_samples):
dists = np.array([])
def dist2prob(D):
row_sums = D.sum(axis=1)
D_norm = (D / row_sums[:, np.newaxis])
S = 1 - D_norm
row_sums = S.sum(axis=1)
S_norm = (S / row_sums[:, np.newaxis])
return S_norm
#Distance of each sample from all clusters
for lbl in self.clusters:
tmp_dists=self.mahalanobis(unlabeled_samples, self.clusters[lbl])
if len(dists)!=0:
dists = np.column_stack((dists, tmp_dists))
else:
dists = tmp_dists
return dist2prob(dists)
def predict_class(self, unlabeled_sample, ind2label):
return np.array([ind2label[np.argmax(row)] for row in self.predict_probability(unlabeled_sample)])
| 37.291667
| 118
| 0.6
| 1,750
| 0.977654
| 0
| 0
| 0
| 0
| 0
| 0
| 406
| 0.226816
|
c6dcf725bd23764de094f21a2a52e9e26e955427
| 1,982
|
py
|
Python
|
augmentation/postprocessor.py
|
abamaxa/docvision_generator
|
8017f29c7d908cb80ddcd59e345a222271fa74de
|
[
"MIT"
] | 2
|
2020-02-06T17:30:41.000Z
|
2020-08-04T10:35:46.000Z
|
augmentation/postprocessor.py
|
abamaxa/docvision_generator
|
8017f29c7d908cb80ddcd59e345a222271fa74de
|
[
"MIT"
] | null | null | null |
augmentation/postprocessor.py
|
abamaxa/docvision_generator
|
8017f29c7d908cb80ddcd59e345a222271fa74de
|
[
"MIT"
] | null | null | null |
import os
import shutil
import json
import time
import cv2
import numpy as np
import PIL
def convert_image_to_numpy(image) :
(im_width, im_height) = image.size
image_np = np.fromstring(image.tobytes(), dtype='uint8', count=-1, sep='')
array_shape = (im_height, im_width, int(image_np.shape[0] / (im_height * im_width)))
return image_np.reshape(array_shape).astype(np.uint8)
def convert_numpy_to_image(image_np) :
image = PIL.Image.fromarray(image_np)
return image
def postprocess(image, erode_by) :
kernel = np.ones((erode_by, erode_by), np.uint8)
if isinstance(image, PIL.Image.Image) :
image = convert_image_to_numpy(image)
image = cv2.erode(image, kernel)
return convert_numpy_to_image(image)
else :
return cv2.erode(image, kernel)
def save_file(image, original_file, prefix, json_data) :
new_file = prefix + "E-" + original_file
cv2.imwrite(new_file, image)
json_filename = new_file[:-3] + "json"
json_data["filename"] = new_file
with open(json_filename, "w") as json_file :
json.dump(json_data, json_file, indent=4)
def erode_all(save_as_hsv) :
kernel7 = np.ones((7,7),np.uint8)
kernel5 = np.ones((5,5),np.uint8)
kernel3 = np.ones((3,3),np.uint8)
for file in os.listdir('.') :
if not file.lower()[-3:] in ("png, ""jpg") :
continue
print(file)
json_filename = file[:-3] + "json"
with open(json_filename, "r") as json_file :
json_data = json.load(json_file)
image = cv2.imread(file)
if save_as_hsv :
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
image3 = cv2.erode(image, kernel3)
save_file(image3, file, "3", json_data)
image5 = cv2.erode(image, kernel5)
save_file(image5, file, "5", json_data)
#image7 = cv2.erode(image, kernel7)
#save_file("7E-" + file, image7)
if __name__ == '__main__' :
erode_all(True)
| 29.58209
| 88
| 0.639758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 139
| 0.070131
|
c6dd53889ed24a20c2e564119c4f2587d1b0b030
| 12,891
|
py
|
Python
|
Functions/learning_models.py
|
goyalpike/RK4_SinDy
|
7a53b03611f28915244a86f11de6552e513d0dbb
|
[
"MIT"
] | null | null | null |
Functions/learning_models.py
|
goyalpike/RK4_SinDy
|
7a53b03611f28915244a86f11de6552e513d0dbb
|
[
"MIT"
] | null | null | null |
Functions/learning_models.py
|
goyalpike/RK4_SinDy
|
7a53b03611f28915244a86f11de6552e513d0dbb
|
[
"MIT"
] | null | null | null |
""" Training of a network """
import torch
import sys
import torch_optimizer as optim_all
import numpy as np
from .modules import rk4th_onestep_SparseId, rk4th_onestep_SparseId_parameter
def learning_sparse_model(dictionary, Coeffs, dataloaders, Params,lr_reduction = 10, quite = False):
'''
Parameters
----------
dictionary : A function
It is a symbolic dictionary, containing potential candidate functions that describes dynamics.
Coeffs : float
Coefficients that picks correct features from the dictionary .
dataloaders : dataset
dataloaders contains the data that follows PyTorch framework.
Params : dataclass
Containing additional auxilary parameters.
lr_reduction : float, optional
The learning rate is reduced by lr_reduction after each iteration. The default is 10.
quite : bool, optional
It decides whether to print coeffs after each iteration. The default is False.
Returns
-------
Coeffs : float
Non-zero coefficients picks features from the dictionary and
also determines right coefficients in front of the features.
loss_track : float
tacking loss after each epoch and iteration.
'''
# Define optimizer
opt_func = optim_all.RAdam(Coeffs.parameters(), lr = Params.lr,weight_decay=Params.weightdecay)
# Define loss function
criteria = torch.nn.MSELoss()
# pre-allocate memory for loss_fuction
loss_track = np.zeros((Params.num_iter,Params.num_epochs))
#########################
###### Training #########
#########################
for p in range(Params.num_iter):
for g in range(Params.num_epochs):
Coeffs.train()
for y in dataloaders['train']:
opt_func.zero_grad()
loss_new = torch.autograd.Variable(torch.tensor([0.],requires_grad=True))
weights = 2**(-0.5*torch.linspace(0,0,1))
for i in range(y[0].shape[0]):
yi = y[0][i]
timesteps_i = torch.tensor(np.diff(y[1][i],axis=0)).float()
y_total = yi
##################################
# One forward step predictions
##################################
y_pred = rk4th_onestep_SparseId(y_total[:-1],dictionary,Coeffs,timestep = timesteps_i)
loss_new += criteria(y_pred,y_total[1:])
##################################
# One backward step predictions
##################################
y_pred_back = rk4th_onestep_SparseId(y_total[1:],dictionary, Coeffs,timestep = -timesteps_i)
loss_new += weights[0]*criteria(y_pred_back, y_total[:-1])
loss_new /= y[0].shape[0]
loss_track[p,g] += loss_new.item()
loss_new.backward()
opt_func.step()
sys.stdout.write("\r [Iter %d/%d] [Epoch %d/%d] [Training loss: %.2e] [Learning rate: %.2e]"
% (p+1,Params.num_iter,g+1,Params.num_epochs,loss_track[p,g],opt_func.param_groups[0]['lr']))
# Removing the coefficients smaller than tol and set gradients w.r.t. them to zero
# so that they will not be updated in the iterations
Ws = Coeffs.linear.weight.detach().clone()
Mask_Ws = (Ws.abs() > Params.tol_coeffs).type(torch.float)
Coeffs.linear.weight = torch.nn.Parameter(Ws * Mask_Ws)
if not quite:
print('\n')
print(Ws)
print('\nError in coeffs due to truncation: {}'.format((Ws - Coeffs.linear.weight).abs().max()))
print('Printing coeffs after {} iter after truncation'.format(p+1))
print(Coeffs.linear.weight)
print('\n'+'='*50)
Coeffs.linear.weight.register_hook(lambda grad: grad.mul_(Mask_Ws))
new_lr = opt_func.param_groups[0]['lr']/lr_reduction
opt_func = optim_all.RAdam(Coeffs.parameters(), lr = new_lr,weight_decay=Params.weightdecay)
return Coeffs, loss_track
def learning_sparse_model_parameter(dictionary, Coeffs, dataloaders, Params,lr_reduction = 10, quite = False):
'''
Here, we tailor sparse learning for parameter cases. The script is tested for a single parametes.
Parameters
----------
dictionary : A function
It is a symbolic dictionary, containing potential candidate functions that describes dynamics.
Coeffs : float
Coefficients that picks correct features from the dictionary .
dataloaders : dataset
dataloaders contains the data that follows PyTorch framework.
Params : dataclass
Containing additional auxilary parameters.
lr_reduction : float, optional
The learning rate is reduced by lr_reduction after each iteration. The default is 10.
quite : bool, optional
It decides whether to print coeffs after each iteration. The default is False.
Returns
-------
Coeffs : float
Non-zero coefficients picks features from the dictionary and
also determines right coefficients in front of the features.
loss_track : float
tacking loss after each epoch and iteration.
'''
# Define optimizer
opt_func = optim_all.RAdam(Coeffs.parameters(), lr = Params.lr,weight_decay=Params.weightdecay)
# Define loss functions
criteria = torch.nn.MSELoss()
# pre-allocate memory for loss_fuction
loss_track = np.zeros((Params.num_iter,Params.num_epochs))
#########################
###### Training #########
#########################
for p in range(Params.num_iter):
for g in range(Params.num_epochs):
Coeffs.train()
for y in dataloaders['train']:
opt_func.zero_grad()
loss_new = torch.autograd.Variable(torch.tensor([0.],requires_grad=True))
weights = 2**(-0.5*torch.linspace(0,0,1))
for i in range(y[0].shape[0]):
yi = y[0][i]
mui = y[2][i]
timesteps_i = torch.tensor(np.diff(y[1][i],axis=0)).float()
##########################
# One forward step predictions
y_pred = rk4th_onestep_SparseId_parameter(yi[:-1],mui[:-1],dictionary,Coeffs,timestep = timesteps_i)
loss_new += criteria(y_pred,yi[1:])
# One backward step predictions
y_pred_back = rk4th_onestep_SparseId_parameter(yi[1:],mui[:-1],dictionary, Coeffs,timestep = -timesteps_i)
loss_new += weights[0]*criteria(y_pred_back, yi[:-1])
loss_new /= y[0].shape[0]
loss_track[p,g] += loss_new.item()
loss_new.backward()
opt_func.step()
sys.stdout.write("\r [Iter %d/%d] [Epoch %d/%d] [Training loss: %.2e] [Learning rate: %.2e]"
% (p+1,Params.num_iter,g+1,Params.num_epochs,loss_track[p,g],opt_func.param_groups[0]['lr']))
# Removing the coefficients smaller than tol and set gradients w.r.t. them to zero
# so that they will not be updated in the iterations
Ws = Coeffs.linear.weight.detach().clone()
Mask_Ws = (Ws.abs() > Params.tol_coeffs).type(torch.float)
Coeffs.linear.weight = torch.nn.Parameter(Ws * Mask_Ws)
if not quite:
print('\n')
print(Ws)
print('\nError in coeffs due to truncation: {}'.format((Ws - Coeffs.linear.weight).abs().max()))
print('Printing coeffs after {} iter after truncation'.format(p+1))
print(Coeffs.linear.weight)
print('\n'+'='*50)
Coeffs.linear.weight.register_hook(lambda grad: grad.mul_(Mask_Ws))
new_lr = opt_func.param_groups[0]['lr']/lr_reduction
opt_func = optim_all.RAdam(Coeffs.parameters(), lr = new_lr,weight_decay=Params.weightdecay)
return Coeffs, loss_track
def learning_sparse_model_rational(dictionary, Coeffs_rational, dataloaders, Params,lr_reduction = 10, quite = False):
'''
Here, we tailor sparse learning for parameter cases. The script is tested for a single parametes.
Parameters
----------
dictionary : A function
It is a symbolic dictionary, containing potential candidate functions that describes dynamics.
Coeffs : float
Coefficients that picks correct features from the dictionary .
dataloaders : dataset
dataloaders contains the data that follows PyTorch framework.
Params : dataclass
Containing additional auxilary parameters.
lr_reduction : float, optional
The learning rate is reduced by lr_reduction after each iteration. The default is 10.
quite : bool, optional
It decides whether to print coeffs after each iteration. The default is False.
Returns
-------
Coeffs : float
Non-zero coefficients picks features from the dictionary and
also determines right coefficients in front of the features.
loss_track : float
tacking loss after each epoch and iteration.
'''
# Define optimizer
opt_func = optim_all.RAdam(Coeffs_rational.parameters(), lr = Params.lr,weight_decay=Params.weightdecay)
# Define loss function
criteria = torch.nn.MSELoss()
# pre-allocate memory for loss_fuction
loss_track = np.zeros((Params.num_iter,Params.num_epochs))
#########################
###### Training #########
#########################
for p in range(Params.num_iter):
for g in range(Params.num_epochs):
Coeffs_rational.train()
for y in dataloaders['train']:
opt_func.zero_grad()
loss_new = torch.autograd.Variable(torch.tensor([0.],requires_grad=True))
weights = 2**(-0.5*torch.linspace(0,0,1))
for i in range(y[0].shape[0]):
yi = y[0][i]
timesteps_i = torch.tensor(np.diff(y[1][i],axis=0)).float()
y_total = yi
##########################
# One forward step predictions
y_pred = rk4th_onestep_SparseId(y_total[:-1],dictionary,Coeffs_rational,timestep = timesteps_i)
loss_new += criteria(y_pred,y_total[1:])
# One backward step predictions
y_pred_back = rk4th_onestep_SparseId(y_total[1:],dictionary, Coeffs_rational,timestep = -timesteps_i)
loss_new += weights[0]*criteria(y_pred_back, y_total[:-1])
loss_new /= y[0].shape[0]
loss_track[p,g] += loss_new.item()
loss_new.backward()
opt_func.step()
sys.stdout.write("\r [Forced zero terms %d/%d] [Epoch %d/%d] [Training loss: %.2e] [Learning rate: %.2e]"
% (p,Params.num_iter,g+1,Params.num_epochs,loss_track[p,g],opt_func.param_groups[0]['lr']))
torch.save(Coeffs_rational,Params.save_model_path+'MM_model_coefficients_iter_{}.pkl'.format(p))
# Removing the coefficients smaller than tol and set gradients w.r.t. them to zero
# so that they will not be updated in the iterations
Ws_Num = Coeffs_rational.numerator.weight.detach().clone()
Ws_Den = Coeffs_rational.denominator.weight.detach().clone()
if len(Ws_Den[Ws_Den!=0]) == 0:
Adp_tol = torch.min(Ws_Num[Ws_Num!=0].abs().min()) + 1e-5
else:
Adp_tol = torch.min(Ws_Num[Ws_Num!=0].abs().min(), Ws_Den[Ws_Den!=0].abs().min()) + 1e-5
Mask_Ws_Num = (Ws_Num.abs() > Adp_tol).type(torch.float)
Mask_Ws_Den = (Ws_Den.abs() > Adp_tol).type(torch.float)
Coeffs_rational.numerator.weight = torch.nn.Parameter(Ws_Num * Mask_Ws_Num)
Coeffs_rational.denominator.weight = torch.nn.Parameter(Ws_Den * Mask_Ws_Den)
Coeffs_rational.numerator.weight.register_hook(lambda grad: grad.mul_(Mask_Ws_Num))
Coeffs_rational.denominator.weight.register_hook(lambda grad: grad.mul_(Mask_Ws_Den))
new_lr = opt_func.param_groups[0]['lr']/lr_reduction
opt_func = optim_all.RAdam(Coeffs_rational.parameters(), lr = new_lr,weight_decay=Params.weightdecay)
return Coeffs_rational, loss_track
| 44.298969
| 135
| 0.581258
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,794
| 0.371887
|
c6e2e070ba03aa1892f65c8ab57f90a175c0ba2f
| 31
|
py
|
Python
|
flask/deploy.py
|
dcu-sharepoint/Browser-id
|
4baeb18cb6bef26dad5a1a6fcf815ac1024203da
|
[
"MIT"
] | 1
|
2018-05-14T20:00:21.000Z
|
2018-05-14T20:00:21.000Z
|
flask/deploy.py
|
zakybstrd21215/cross_browser
|
4baeb18cb6bef26dad5a1a6fcf815ac1024203da
|
[
"MIT"
] | null | null | null |
flask/deploy.py
|
zakybstrd21215/cross_browser
|
4baeb18cb6bef26dad5a1a6fcf815ac1024203da
|
[
"MIT"
] | null | null | null |
cp ./* ~/server/uniquemachine/
| 15.5
| 30
| 0.677419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c6e3e9d0abc03b1874ad93609b620dcead66d6e3
| 4,874
|
py
|
Python
|
repairfiles.py
|
MrForg3t/sourcecodetrm
|
de9ce6eb1714d28998ef1f4a2ebc05cd7bf7d78f
|
[
"MIT"
] | null | null | null |
repairfiles.py
|
MrForg3t/sourcecodetrm
|
de9ce6eb1714d28998ef1f4a2ebc05cd7bf7d78f
|
[
"MIT"
] | null | null | null |
repairfiles.py
|
MrForg3t/sourcecodetrm
|
de9ce6eb1714d28998ef1f4a2ebc05cd7bf7d78f
|
[
"MIT"
] | null | null | null |
from urllib import request
from os import path, system
from platform import system as osInfo
from time import sleep
from urllib import request
def repairFileMain():
print("\n")
repairAppData()
sleep(.2)
print("\n")
repairEssential()
sleep(.2)
print("\n")
def repairAppData():
try:
if osInfo() == "Windows":
if not path.exists("data"):
system("mkdir data")
else:
print("No needed to repair data directory.")
if not path.exists("data/appData.json"):
response = request.urlretrieve("https://raw.githubusercontent.com/MrForg3t/tfxdwn/main/data/appData.json", "data/appData.json")
sleep(0.1)
print("data/appData.json successfully repaired.")
else:
print("No needed to repair data/appData.json.")
sleep(.2)
if not path.exists("data/uuidData.json"):
system("type nul > data/uuidData.json")
print("data/uuidData.json successfully repaired.")
else:
print("No needed to repair data/uuidData.json.")
if osInfo() == "Darwin":
if not path.exists("data"):
system("mkdir data")
else:
print("No needed to repair data directory.")
if not path.exists("data/appData.json"):
response = request.urlretrieve("https://raw.githubusercontent.com/MrForg3t/tfxdwn/main/data/appData.json", "data/appData.json")
sleep(0.1)
print("data/appData.json successfully repaired.")
else:
print("No needed to repair data/appData.json.")
sleep(.2)
if not path.exists("data/uuidData.json"):
system("touch data/uuidData.json")
print("data/uuidData.json successfully repaired.")
else:
print("No needed to repair data/uuidData.json.")
if osInfo() == "Linux":
if not path.exists("data"):
system("mkdir data")
else:
print("No needed to repair data directory.")
if not path.exists("data/appData.json"):
response = request.urlretrieve("https://raw.githubusercontent.com/MrForg3t/tfxdwn/main/data/appData.json", "data/appData.json")
sleep(0.1)
print("data/appData.json successfully repaired.")
else:
print("No needed to repair data/appData.json.")
sleep(.2)
if not path.exists("data/uuidData.json"):
system("touch data/uuidData.json")
print("data/uuidData.json successfully repaired.")
else:
print("No needed to repair data/uuidData.json.")
except Exception as error:
print(f"Error: {error}")
def repairEssential():
try:
if osInfo() == "Windows":
if not path.exists("main.exe"):
response = request.urlretrieve("https://github.com/MrForg3t/tfxdwn/blob/main/main.exe?raw=true", "main.exe")
print("main.exe successfully repaired")
sleep(.1)
else:
print("No needed to repair main.exe.")
if not path.exists("launcher.exe"):
response = request.urlretrieve("https://github.com/MrForg3t/tfxdwn/blob/main/launcher.exe?raw=true", "launcher.exe")
print("launcher.exe successfully repaired")
sleep(.1)
else:
print("No needed to repair launcher.exe.")
if not path.exists("uuid_gen.exe"):
response = request.urlretrieve("https://github.com/MrForg3t/tfxdwn/blob/main/uuid_gen.exe?raw=true", "uuid_gen.exe")
print("uuidgen.exe successfully repaired")
sleep(.1)
else:
print("No needed to repair uuid_gen.exe.")
if not path.exists("checkfileint.exe"):
response = request.urlretrieve("https://github.com/MrForg3t/tfxdwn/blob/main/checkfileint.exe?raw=true", "checkfileint.exe")
print("checkfileint.exe successfully repaired")
sleep(.1)
else:
print("No needed to repair checkfileint.exe.")
if osInfo() == "Darwin":
print("Not supported for now on that platform.")
if osInfo() == "Linux":
print("Not supported for now on that platform.")
except Exception as error:
print(f"Error: {error}")
if __name__ == '__main__': repairFileMain()
sleep(7)
| 33.156463
| 144
| 0.533032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,995
| 0.409315
|
c6e4a42a16095039958ecdd10b4a917bcf6aef59
| 581
|
py
|
Python
|
resources/samd21flash.py
|
dotchetter/W.O.O.B.S
|
6055020f21c462940e9477192c831d8ad0b2669e
|
[
"MIT"
] | null | null | null |
resources/samd21flash.py
|
dotchetter/W.O.O.B.S
|
6055020f21c462940e9477192c831d8ad0b2669e
|
[
"MIT"
] | 13
|
2020-11-10T12:29:46.000Z
|
2020-11-20T00:04:02.000Z
|
resources/samd21flash.py
|
dotchetter/W.O.O.B.S
|
6055020f21c462940e9477192c831d8ad0b2669e
|
[
"MIT"
] | null | null | null |
import os
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-port")
parser.add_argument("-programmer")
parser.add_argument("-binary")
args = parser.parse_args()
port_norm = args.port
port_bootloader = f"{port_norm[0:3]}{int(port_norm[-1])+1}"
print("Issuing command to bootloader with 1200 baud")
os.system(f'cmd /k "mode {port_bootloader}:1200,n,8,1,p"')
print("Complete.\nFlashing device.")
os.system(f'cmd /k "{args.programmer}" --port={port_norm} -i -e -w -v -b {args.binary} -R')
| 32.277778
| 95
| 0.666093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 282
| 0.48537
|
c6e54cd48762f141a1090fb8f2221a27cae5656e
| 136
|
py
|
Python
|
introduction/model_answer/python/09_tenka1_programmer_contest_1998.py
|
AAAR-Salmon/procon
|
d65865e7c7d98f7194f93610b4f06df8fff3332c
|
[
"MIT"
] | null | null | null |
introduction/model_answer/python/09_tenka1_programmer_contest_1998.py
|
AAAR-Salmon/procon
|
d65865e7c7d98f7194f93610b4f06df8fff3332c
|
[
"MIT"
] | null | null | null |
introduction/model_answer/python/09_tenka1_programmer_contest_1998.py
|
AAAR-Salmon/procon
|
d65865e7c7d98f7194f93610b4f06df8fff3332c
|
[
"MIT"
] | null | null | null |
# 空の固定長配列はNoneを入れておくと高速に生成できます
a=[None] * 20
a[0]=a[1]=100
a[2]=200
for i in range(3,20):
a[i] = a[i-1] + a[i-2] + a[i-3]
print(a[19])
| 17
| 32
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 78
| 0.423913
|
c6e60e06fca1a3189ef7b894a20c3b5c14557fda
| 41,045
|
py
|
Python
|
test/ontic_type_test.py
|
neoinsanity/ontic
|
2b313fb9fc45faf550791a797624c9997386c343
|
[
"Apache-2.0"
] | 2
|
2017-11-06T12:01:20.000Z
|
2021-03-01T23:52:41.000Z
|
test/ontic_type_test.py
|
neoinsanity/ontic
|
2b313fb9fc45faf550791a797624c9997386c343
|
[
"Apache-2.0"
] | 1
|
2016-12-02T04:04:03.000Z
|
2016-12-02T04:04:03.000Z
|
test/ontic_type_test.py
|
neoinsanity/ontic
|
2b313fb9fc45faf550791a797624c9997386c343
|
[
"Apache-2.0"
] | 2
|
2015-06-26T22:24:57.000Z
|
2016-12-01T02:15:36.000Z
|
"""Test the basic functionality of the base and core data types."""
from datetime import date, time, datetime
from typing import NoReturn
from ontic import OnticType
from ontic import property
from ontic import type as o_type
from ontic.meta import Meta
from ontic.property import OnticProperty
from ontic.schema import Schema
from ontic.validation_exception import ValidationException
from test.utils import BaseTestCase
class OnticTypeTest(BaseTestCase):
"""OnticType test cases."""
def test_object_type_instantiation(self) -> NoReturn:
"""OnticType instantiation to confirm dict behavior"""
schema = {'prop': {'type': 'int'}}
my_type = o_type.create_ontic_type('MyType', schema)
expected_dict = {'prop': 3}
my_object = my_type()
my_object.prop = 3
self.assertDictEqual(expected_dict, my_object)
def test_dynamic_access(self) -> NoReturn:
"""OnticType property access as a Dict and an Attribute."""
some_type = o_type.OnticType()
self.assert_dynamic_accessing(some_type)
def test_ontic_type_perfect(self) -> NoReturn:
"""Test the OnticType.perfect method."""
schema_def = Schema({
'prop_1': {'type': 'int'},
'prop_2': {'type': 'int', 'default': 20},
'prop_3': {'type': 'int', 'default': 30},
'prop_4': {'type': 'int', 'default': 40},
})
my_type = o_type.create_ontic_type('PerfectOntic', schema_def)
ontic_object = my_type()
ontic_object.prop_1 = 1
ontic_object.prop_3 = None
ontic_object.prop_4 = 400
ontic_object.extra_prop = 'Extra'
expected_dict = {
'prop_1': 1,
'prop_2': 20,
'prop_3': 30,
'prop_4': 400
}
ontic_object.perfect()
self.assertDictEqual(expected_dict, ontic_object)
def test_ontic_type_validate(self) -> NoReturn:
"""Test the OnticType.validate method."""
schema = {
'some_property': {'required': True},
'other_property': {'required': False}
}
# Create the o_type
my_type = o_type.create_ontic_type('RequireCheck', schema)
self.assertIsNotNone(o_type)
# Create object of o_type
ontic_object = my_type()
# Validate an empty object, which should cause ValueError
self.assertRaisesRegex(
ValidationException,
'The value for "some_property" is required.',
ontic_object.validate)
# Validate with data
ontic_object.some_property = 'Something'
ontic_object.other_property = 'Other'
o_type.validate_object(ontic_object)
def test_object_type_validate_value(self) -> NoReturn:
"""Test ObjectType.validate_value method."""
# Test that scalar property is valid.
single_property_schema = {
'prop1': {'type': 'str'}
}
my_type = o_type.create_ontic_type(
'GoodValidateValue', single_property_schema)
ontic_object = my_type({'prop1': 'Hot Dog'})
self.assertEqual([], ontic_object.validate_value('prop1'))
class CreateOnticTypeTestCase(BaseTestCase):
"""Test the dynamic creation of Ontic types."""
def test_create_ontic_type_arg_errors(self):
"""Assert the create ontic o_type arg errors."""
self.assertRaisesRegex(
ValueError, 'The string "name" argument is required.',
o_type.create_ontic_type, name=None, schema=dict())
self.assertRaisesRegex(
ValueError, 'The schema dictionary is required.',
o_type.create_ontic_type, name='SomeName', schema=None)
self.assertRaisesRegex(
ValueError, 'The schema must be a dict.',
o_type.create_ontic_type, name='SomeName', schema=list())
def test_create_ontic_type(self) -> NoReturn:
"""The most simple and basic dynamic Ontic."""
# Test creation from raw dictionary.
my_type = o_type.create_ontic_type('Simple', dict())
self.assertIsNotNone(my_type)
ontic_object = my_type()
self.assert_dynamic_accessing(ontic_object)
self.assertIsInstance(ontic_object, my_type)
# Test creation using a Schema object.
my_type = o_type.create_ontic_type('AnotherSimple', Schema())
self.assertIsNotNone(my_type)
ontic_object = my_type()
self.assert_dynamic_accessing(ontic_object)
self.assertIsInstance(ontic_object, my_type)
class PerfectObjectTestCase(BaseTestCase):
"""Test ontic_type.perfect_object method."""
def test_bad_perfect_usage(self) -> NoReturn:
"""Ensure handling of bad arguments to perfect)_object method."""
self.assertRaisesRegex(
ValueError,
r'"the_object" must be provided.',
o_type.perfect_object, None)
self.assertRaisesRegex(
ValueError,
r'"the_object" must be OnticType type.',
o_type.perfect_object, {})
def test_valid_perfect_usage(self) -> NoReturn:
"""Ensure that the perfect behavior is correct."""
schema_def = Schema({
'prop_1': {'type': 'int'},
'prop_2': {'type': 'int', 'default': 20},
'prop_3': {'type': 'int', 'default': 30},
'prop_4': {'type': 'int', 'default': 40},
})
my_type = o_type.create_ontic_type('PerfectOntic', schema_def)
ontic_object = my_type()
ontic_object.prop_1 = 1
ontic_object.prop_3 = None
ontic_object.prop_4 = 400
ontic_object.extra_prop = 'Extra'
expected_dict = {
'prop_1': 1,
'prop_2': 20,
'prop_3': 30,
'prop_4': 400
}
o_type.perfect_object(ontic_object)
self.assertDictEqual(expected_dict, ontic_object)
def test_perfect_collection_types(self) -> NoReturn:
"""Ensure that collection defaults are handled correctly."""
schema_def = Schema({
'dict_prop': {
'type': 'dict',
'default': {'a': 1, 'b': 2, 'c': 3}
},
'list_prop': {
'type': 'list',
'default': [1, 2, 3]
},
'set_prop': {
'type': 'set',
'default': {1, 2, 3}
}
})
my_type = o_type.create_ontic_type('PerfectCollection', schema_def)
ontic_object = my_type()
o_type.perfect_object(ontic_object)
# Test that the collection values are equal
self.assertDictEqual(schema_def.dict_prop.default,
ontic_object.dict_prop)
self.assertListEqual(schema_def.list_prop.default,
ontic_object.list_prop)
self.assertSetEqual(schema_def.set_prop.default,
ontic_object.set_prop)
# Ensure that the collections are not the same objects
self.assertIsNot(schema_def.dict_prop.default,
ontic_object.dict_prop)
self.assertIsNot(schema_def.list_prop.default,
ontic_object.list_prop)
self.assertIsNot(schema_def.set_prop.default,
ontic_object.set_prop)
def test_perfect_bad_collection_type(self) -> NoReturn:
"""Test for the handling of bad collection member o_type."""
def test_perfect_collection_default_copy(self) -> NoReturn:
"""Ensure that collection default settings are handled correctly."""
# Configure default collection.
default_dict = {'key': 'value'}
default_list = ['item']
inner_tuple = (1, 2)
outer_tuple = (inner_tuple, 3, 4)
default_set = {'entity', outer_tuple}
# Configure default collections to test deep copy behavior.
ontic_object = o_type.OnticType()
ontic_object.dict = default_dict
default_deep_dict = {'name': default_dict}
default_deep_list = [default_dict]
default_deep_set = {(inner_tuple, outer_tuple)}
schema_def = Schema({
'dict_no_default': {
'type': 'dict',
},
'list_no_default': {
'type': 'list',
},
'set_no_default': {
'type': 'set',
},
'dict_with_default': {
'type': 'dict',
'default': default_dict,
},
'list_with_default': {
'type': 'list',
'default': default_list,
},
'set_with_default': {
'type': 'set',
'default': default_set,
},
'dict_deep_default': {
'type': 'dict',
'default': default_deep_dict,
},
'list_deep_default': {
'type': 'list',
'default': default_deep_list,
},
'set_deep_default': {
'type': 'set',
'default': default_deep_set,
},
})
# Execute test subject.
my_type = o_type.create_ontic_type('CollectionDefaults', schema_def)
my_object = my_type()
o_type.perfect_object(my_object)
o_type.validate_object(my_object)
# Assert the no default state.
self.assertIsNone(my_object.dict_no_default)
self.assertIsNone(my_object.list_no_default)
self.assertIsNone(my_object.set_no_default)
# Assert equality and copy of defaults.
self.assertDictEqual(default_dict, my_object.dict_with_default)
self.assertIsNot(default_dict, my_object.dict_with_default)
self.assertListEqual(default_list, my_object.list_with_default)
self.assertIsNot(default_list, my_object.list_with_default)
self.assertSetEqual(default_set, my_object.set_with_default)
self.assertIsNot(default_set, my_object.set_with_default)
# Assert equality and copy of deep defaults.
self.assertDictEqual(default_dict, my_object.dict_deep_default['name'])
self.assertIsNot(default_deep_dict['name'],
my_object.dict_deep_default['name'])
self.assertDictEqual(default_dict, my_object.list_deep_default[0])
self.assertIsNot(default_deep_list[0], my_object.list_deep_default[0])
self.assertSetEqual(default_deep_set, my_object.set_deep_default)
self.assertIsNot(default_deep_set, my_object.set_deep_default)
def test_perfect_schema_bad_member_type(self) -> NoReturn:
"""Test perfect for bad member o_type."""
invalid_property_schema = OnticProperty(name='invalid_property')
invalid_property_schema.o_type = list
invalid_property_schema.member_type = 'UNKNOWN'
self.maxDiff = None
self.assertRaisesRegex(
ValidationException,
r"""The value "UNKNOWN" for "member_type" not in enumeration \[<class 'bool'>, <class 'complex'>, """
r"""<class 'datetime.date'>, <class 'datetime.datetime'>, <class 'datetime.time'>, <class 'dict'>, """
r"""<class 'float'>, <class 'int'>, <class 'list'>, <class 'set'>, <class 'str'>, <class 'tuple'>, None\].""",
property.validate_property, invalid_property_schema)
value_errors = property.validate_property(
invalid_property_schema,
raise_validation_exception=False)
self.assertEqual(1, len(value_errors))
self.assertEqual(
"""The value "UNKNOWN" for "member_type" not in enumeration [<class 'bool'>, <class 'complex'>, """
"""<class 'datetime.date'>, <class 'datetime.datetime'>, <class 'datetime.time'>, <class 'dict'>, """
"""<class 'float'>, <class 'int'>, <class 'list'>, <class 'set'>, <class 'str'>, <class 'tuple'>, None].""",
value_errors[0])
class ValidateObjectTestCase(BaseTestCase):
"""Test ontic_types.validate_object method basics."""
def test_bad_validate_object(self) -> NoReturn:
"""ValueError testing of validate_object."""
self.assertRaisesRegex(
ValueError,
'Validation can only support validation of objects derived from '
'ontic.ontic_type.OnticType.',
o_type.validate_object, None)
self.assertRaisesRegex(
ValueError,
'Validation can only support validation of objects derived from '
'ontic.ontic_type.OnticType.',
o_type.validate_object, 'Not a OnticType')
def test_validation_exception_handling(self) -> NoReturn:
"""Ensure that validate_object handles error reporting."""
schema_instance = Schema(some_attr={'type': 'int'})
my_type = o_type.create_ontic_type('ValidateCheck',
schema_instance)
ontic_object = my_type()
ontic_object.some_attr = 'WRONG'
self.assertRaisesRegex(
ValidationException,
r"""The value for "some_attr" is """
r"""not of type "<class 'int'>": WRONG""",
o_type.validate_object, ontic_object)
expected_errors = [
r"""The value for "some_attr" is not """
r"""of type "<class 'int'>": WRONG"""]
try:
o_type.validate_object(ontic_object)
self.fail('ValidationException should have been thrown.')
except ValidationException as ve:
self.assertListEqual(expected_errors, ve.validation_errors)
errors = o_type.validate_object(ontic_object,
raise_validation_exception=False)
self.assertListEqual(expected_errors, errors)
def test_type_setting(self) -> NoReturn:
"""Validate 'type' schema setting."""
schema = {
'bool_property': {'type': 'bool'},
'dict_property': {'type': 'dict'},
'float_property': {'type': 'float'},
'int_property': {'type': 'int'},
'list_property': {'type': 'list'},
'ontic_property': {'type': Meta},
'set_property': {'type': 'set'},
'str_property': {'type': 'str'},
'date_property': {'type': 'date'},
'time_property': {'type': 'time'},
'datetime_property': {'type': 'datetime'},
}
# Create the o_type
my_type = o_type.create_ontic_type('TypeCheck', schema)
self.assertIsNotNone(o_type)
# Create object of o_type
ontic_object = my_type()
# Validate an empty object.
o_type.validate_object(ontic_object)
# Validate with known good data.
ontic_object.bool_property = True
ontic_object.dict_property = {'some_key': 'some_value'}
ontic_object.core_type_property = Meta({'key': 'val'})
ontic_object.float_property = 3.4
ontic_object.int_property = 5
ontic_object.list_property = [5, 6, 7]
ontic_object.set_property = {'dog', 'cat', 'mouse'}
ontic_object.str_property = 'some_string'
ontic_object.date_property = date(2000, 1, 1)
ontic_object.time_property = time(12, 30, 30)
ontic_object.datetime_property = datetime(2001, 1, 1, 12, 30, 30)
o_type.validate_object(ontic_object)
# Validate with known bad data.
ontic_object.bool_property = 'Dog'
self.assertRaisesRegex(
ValidationException,
r"""The value for "bool_property" is not """
r"""of type "<class 'bool'>": Dog""",
o_type.validate_object, ontic_object)
ontic_object.bool_property = True
# Validate a string vs a list o_type
ontic_object.list_property = 'some_string'
self.assertRaisesRegex(
ValidationException,
r"""The value for "list_property" is not """
r"""of type "<class 'list'>": some_string""",
o_type.validate_object, ontic_object)
def test_type_bad_setting(self) -> NoReturn:
"""ValueError for bad 'type' setting."""
schema = {
'some_property': {'type': 'Unknown'}
}
self.assertRaisesRegex(
ValueError,
r"""Illegal type declaration: Unknown""",
o_type.create_ontic_type, 'Dummy', schema)
def test_required_setting(self) -> NoReturn:
"""Validate 'required' schema setting."""
schema = {
'some_property': {'required': True},
'other_property': {'required': False}
}
# Create the o_type
my_type = o_type.create_ontic_type('RequireCheck', schema)
self.assertIsNotNone(o_type)
# Create object of o_type
ontic_object = my_type()
# Validate an empty object, which should cause ValueError
self.assertRaisesRegex(
ValidationException,
'The value for "some_property" is required.',
o_type.validate_object, ontic_object)
# Validate with data
ontic_object.some_property = 'Something'
ontic_object.other_property = 'Other'
o_type.validate_object(ontic_object)
def test_enum_setting(self) -> NoReturn:
"""Validate 'enum' schema setting."""
# Scalar testing
# ###############
schema = {
'enum_property': {'enum': {'some_value', 99}}
}
# Create the o_type
my_type = o_type.create_ontic_type('EnumCheck', schema)
self.assertIsNotNone(my_type)
# Create object of o_type
ontic_object = my_type()
# Validate an empty object
o_type.validate_object(ontic_object)
# Validate a good setting
ontic_object.enum_property = 99
o_type.validate_object(ontic_object)
# Validate a bad setting
ontic_object.enum_property = 'bad, bad, bad'
self.assertRaisesRegex(
ValidationException,
r"""The value "bad, bad, bad" for "enum_property" not in """
r"""enumeration (\['some_value', 99\]|\[99, 'some_value'\])\.""",
o_type.validate_object, ontic_object)
def test_collection_enum_setting(self) -> NoReturn:
"""Validate 'enum' schema setting on collections."""
schema = {
'enum_property': {'type': 'list', 'enum': {'dog', 'cat'}}
}
# Create the o_type
my_type = o_type.create_ontic_type('EnumListCheck', schema)
self.assertIsNotNone(o_type)
# Create object of o_type
ontic_object = my_type()
# Validate an empty object, as required not set.
o_type.validate_object(ontic_object)
# Validate a good setting
ontic_object.enum_property = ['dog']
o_type.validate_object(ontic_object)
# Validate a bad setting
ontic_object.enum_property = ['fish']
self.assertRaisesRegex(
ValidationException,
r'''The value "fish" for "enum_property" not in'''
r''' enumeration \['cat', 'dog'\].''',
o_type.validate_object, ontic_object)
def test_min_setting(self) -> NoReturn:
"""Validate 'min' schema setting."""
schema = {
'str_min_property': {'type': 'str', 'min': 5},
'int_min_property': {'type': 'int', 'min': 10},
'float_min_property': {'type': 'float', 'min': 20},
'list_min_property': {'type': 'list', 'min': 1},
'set_min_property': {'type': 'set', 'min': 1},
'dict_min_property': {'type': 'dict', 'min': 1},
'date_min_property': {'type': 'date', 'min': date(2000, 1, 1)},
'time_min_property': {'type': 'time', 'min': time(12, 30, 30)},
'datetime_min_property': {
'type': 'datetime', 'min': datetime(2000, 1, 1, 12, 30, 30)}
}
my_type = o_type.create_ontic_type('MinCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields
o_type.validate_object(ontic_object)
# Good test
ontic_object.str_min_property = '8 letters'
ontic_object.int_min_property = 20
ontic_object.float_min_property = 30.0
ontic_object.list_min_property = ['one item']
ontic_object.set_min_property = {'one item'}
ontic_object.dict_min_property = {'some_kee': 'one item'}
ontic_object.date_min_property = date(2001, 1, 1)
ontic_object.time_min_property = time(13, 30, 30)
ontic_object.datetime_min_property = datetime(2001, 1, 1)
o_type.validate_object(ontic_object)
# Str failure
ontic_object.str_min_property = '1'
self.assertRaisesRegex(
ValidationException,
'The value of "1" for "str_min_property" '
'fails min of 5.',
o_type.validate_object, ontic_object)
ontic_object.str_min_property = '8 letters'
# Int failure
ontic_object.int_min_property = 5
self.assertRaisesRegex(
ValidationException,
'The value of "5" for "int_min_property" '
'fails min of 10.',
o_type.validate_object, ontic_object)
ontic_object.int_min_property = 20
# Float failure
ontic_object.float_min_property = 15.0
self.assertRaisesRegex(
ValidationException,
'The value of "15.0" for "float_min_property" '
'fails min of 20.',
o_type.validate_object, ontic_object)
ontic_object.float_min_property = 30.0
# List failure
ontic_object.list_min_property = list()
self.assertRaisesRegex(
ValidationException,
r"""The value of "\[\]" for "list_min_property" """
r"""fails min of 1.""",
o_type.validate_object, ontic_object)
ontic_object.list_min_property = ['one item']
# Set failure
ontic_object.set_min_property = set()
self.assertRaisesRegex(
ValidationException,
r"""set\(\)" for "set_min_property" fails min of 1.""",
o_type.validate_object, ontic_object)
ontic_object.set_min_property = {'one item'}
# Dict failure
ontic_object.dict_min_property = dict()
self.assertRaisesRegex(
ValidationException,
'The value of "{}" for "dict_min_property" '
'fails min of 1.',
o_type.validate_object, ontic_object)
ontic_object.dict_min_property = {'some_key': 'one_item'}
# Date failure
ontic_object.date_min_property = date(1999, 1, 1)
self.assertRaisesRegex(
ValidationException,
'date_min_property" fails min of 2000-01-01.',
o_type.validate_object, ontic_object)
ontic_object.date_min_property = date(2001, 1, 1)
# Time failure
ontic_object.time_min_property = time(11, 30, 30)
self.assertRaisesRegex(
ValidationException,
'The value of "11:30:30" for "time_min_property" '
'fails min of 12:30:30.',
o_type.validate_object, ontic_object)
ontic_object.time_min_property = time(13, 30, 30)
# Datetime failure
ontic_object.datetime_min_property = datetime(1999, 1, 1, 11, 30, 30)
self.assertRaisesRegex(
ValidationException,
'The value of "1999-01-01 11:30:30" for "datetime_min_property" '
'fails min of 2000-01-01 12:30:30.',
o_type.validate_object, ontic_object)
def test_max_setting(self):
"""Validate 'max' schema setting."""
schema = {
'str_max_property': {'type': 'str', 'max': 5},
'int_max_property': {'type': 'int', 'max': 10},
'float_max_property': {'type': 'float', 'max': 20},
'list_max_property': {'type': 'list', 'max': 1},
'set_max_property': {'type': 'set', 'max': 1},
'dict_max_property': {'type': 'dict', 'max': 1},
'date_max_property': {'type': 'date', 'max': date(2000, 1, 1)},
'time_max_property': {'type': 'time', 'max': time(12, 30, 30)},
'datetime_max_property': {
'type': 'datetime', 'max': datetime(2000, 1, 1, 12, 30, 30)}
}
my_type = o_type.create_ontic_type('MaxCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields
o_type.validate_object(ontic_object)
# Good test
ontic_object.str_max_property = 'small'
ontic_object.int_max_property = 5
ontic_object.float_max_property = 10.0
ontic_object.list_max_property = ['one item']
ontic_object.set_max_property = {'one item'}
ontic_object.dict_max_property = {'some_kee': 'one item'}
ontic_object.date_max_property = date(1999, 1, 1)
ontic_object.time_max_property = time(11, 30, 30)
ontic_object.datetime_max_property = datetime(1999, 1, 1)
o_type.validate_object(ontic_object)
# Str failure
ontic_object.str_max_property = '8 letters'
self.assertRaisesRegex(
ValidationException,
'The value of "8 letters" for '
'"str_max_property" fails max of 5.',
o_type.validate_object, ontic_object)
ontic_object.str_max_property = 'small'
# Int failure
ontic_object.int_max_property = 20
self.assertRaisesRegex(
ValidationException,
'The value of "20" for "int_max_property" '
'fails max of 10.',
o_type.validate_object, ontic_object)
ontic_object.int_max_property = 5
# Float failure
ontic_object.float_max_property = 30.0
self.assertRaisesRegex(
ValidationException,
'The value of "30.0" for "float_max_property" fails max of 20.',
o_type.validate_object, ontic_object)
ontic_object.float_max_property = 15.0
# List failure
ontic_object.list_max_property = ['one item', 'two item']
self.assertRaisesRegex(
ValidationException,
r"""The value of "\['(one|two) item', '(one|two) item'\]" """
r"""for "list_max_property" fails max of 1.""",
o_type.validate_object, ontic_object)
ontic_object.list_max_property = ['one item']
# Set failure
ontic_object.set_max_property = {'one item', 'two item'}
expected_error = r"""The value of "{'(one|two) item', '(two|one) item'}" for "set_max_property" fails max of 1."""
self.assertRaisesRegex(
ValidationException,
expected_error,
o_type.validate_object, ontic_object)
# Dict failure
ontic_object.dict_max_property = {'some_key': 'one_item',
'another_key': 'two_item'}
self.assertRaisesRegex(
ValidationException,
r"""The value of """
r"""("{'some_key': 'one_item', 'another_key': 'two_item'}"|"""
r""""{'another_key': 'two_item', 'some_key': 'one_item'}")"""
r""" for "dict_max_property" fails max of 1.""",
o_type.validate_object, ontic_object)
ontic_object.dict_max_property = {'some_key': 'one_item'}
# Date failure
ontic_object.date_max_property = date(2001, 1, 1)
self.assertRaisesRegex(
ValidationException,
'The value of "2001-01-01" for '
'"date_max_property" fails max of 2000-01-01.',
o_type.validate_object, ontic_object)
ontic_object.date_max_property = date(2001, 1, 1)
# Time failure
ontic_object.time_max_property = time(13, 30, 30)
self.assertRaisesRegex(
ValidationException,
'The value of "13:30:30" for "time_max_property" '
'fails max of 12:30:30.',
o_type.validate_object, ontic_object)
ontic_object.time_max_property = time(13, 30, 30)
# Datetime failure
ontic_object.datetime_max_property = datetime(2001, 1, 1, 11, 30, 30)
self.assertRaisesRegex(
ValidationException,
'The value of "2001-01-01 11:30:30" for "datetime_max_property" '
'fails max of 2000-01-01 12:30:30.',
o_type.validate_object, ontic_object)
def test_regex_setting(self):
"""Validate 'regex' schema setting."""
schema = {
'b_only_property': {'type': 'str', 'regex': '^b+'}
}
my_type = o_type.create_ontic_type('RegexCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields
o_type.validate_object(ontic_object)
# Good test
ontic_object.b_only_property = ''
o_type.validate_object(ontic_object)
ontic_object.b_only_property = 'b'
o_type.validate_object(ontic_object)
# Bad test
ontic_object.b_only_property = 'a'
self.assertRaisesRegex(
ValidationException,
r'Value \"a\" for b_only_property does not '
r'meet regex: \^b\+',
o_type.validate_object, ontic_object)
def test_member_type_setting(self) -> NoReturn:
"""Validate 'member_type' setting."""
schema = {
'list_property': {'type': 'list', 'member_type': 'str'}
}
my_type = o_type.create_ontic_type('ItemTypeCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields.
o_type.validate_object(ontic_object)
# Good test
ontic_object.list_property = []
o_type.validate_object(ontic_object)
ontic_object.list_property.append('some_item')
o_type.validate_object(ontic_object)
# Bad test
ontic_object.list_property.append(99)
self.assertRaisesRegex(
ValidationException,
r'''The value "99" for "list_property" is not of type '''
r'''"<class 'str'>".''',
o_type.validate_object, ontic_object)
def test_collection_regex_setting(self) -> NoReturn:
"""Validate string collection with 'regex' setting."""
schema = {
'set_property': {'type': set, 'member_type': str, 'regex': 'b+'}
}
my_type = o_type.create_ontic_type(
'CollectionRegexCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields.
o_type.validate_object(ontic_object)
# Good test
ontic_object.set_property = set()
o_type.validate_object(ontic_object)
ontic_object.set_property.add('bbbbb')
o_type.validate_object(ontic_object)
# Bad test
ontic_object.set_property.add('xxxxxx')
self.assertRaisesRegex(
ValidationException,
r'''Value "xxxxxx" for "set_property" does not meet regex: b+''',
o_type.validate_object, ontic_object)
def test_member_min_setting(self) -> NoReturn:
"""Validate 'member_min' setting."""
# Test the item min setting for string items.
schema = {
'list_property': {'type': 'list', 'member_type': 'str',
'member_min': 4}
}
my_type = o_type.create_ontic_type('StrItemMinCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields.
o_type.validate_object(ontic_object)
# Good Test
ontic_object.list_property = []
o_type.validate_object(ontic_object)
ontic_object.list_property.append('four')
o_type.validate_object(ontic_object)
# Bad Test
ontic_object.list_property.append('one')
self.assertRaisesRegex(
ValidationException,
r'''The value of "one" for "list_property" '''
r'''fails min length of 4.''',
o_type.validate_object, ontic_object)
# Test the item min setting for numeric items.
schema = {
'list_property': {'type': 'list', 'member_type': 'int',
'member_min': 4}
}
my_type = o_type.create_ontic_type('StrItemMinCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields.
o_type.validate_object(ontic_object)
# Good Test
ontic_object.list_property = []
o_type.validate_object(ontic_object)
ontic_object.list_property.append(4)
o_type.validate_object(ontic_object)
# Bad Test
ontic_object.list_property.append(1)
self.assertRaisesRegex(
ValidationException,
r'''The value of "1" for "list_property" fails min size of 4.''',
o_type.validate_object, ontic_object)
def test_member_max_setting(self) -> NoReturn:
"""Validate 'member_max' setting."""
# Test the item max setting for string items.
schema = {
'list_property': {
'type': 'list', 'member_type': 'str', 'member_max': 4}
}
my_type = o_type.create_ontic_type('StrItemMinCheck', schema)
self.assertIsNotNone(my_type)
ontic_object = my_type()
# None test, with no required fields.
o_type.validate_object(ontic_object)
# Good Test
ontic_object.list_property = []
o_type.validate_object(ontic_object)
ontic_object.list_property.append('four')
o_type.validate_object(ontic_object)
# Bad Test
ontic_object.list_property.append('seven')
self.assertRaisesRegex(
ValidationException,
r'''The value of "seven" for "list_property" '''
r'''fails max length of 4.''',
o_type.validate_object, ontic_object)
# Test the item min setting for numeric items.
schema = {
'list_property': {
'type': 'list', 'member_type': 'int', 'member_max': 4}
}
my_type = o_type.create_ontic_type('StrItemMinCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields.
o_type.validate_object(ontic_object)
# Good Test
ontic_object.list_property = []
o_type.validate_object(ontic_object)
ontic_object.list_property.append(4)
o_type.validate_object(ontic_object)
# Bad Test
ontic_object.list_property.append(7)
self.assertRaisesRegex(
ValidationException,
r'''The value of "7" for "list_property" fails max size of 4.''',
o_type.validate_object, ontic_object)
class ValidateValueTestCase(BaseTestCase):
"""Test ontic_types.validate_value method."""
def test_bad_validate_value(self) -> NoReturn:
"""ValueError testing of validate_value."""
self.assertRaisesRegex(
ValueError,
'"ontic_object" is required, cannot be None.',
o_type.validate_value, 'some_value', None)
self.assertRaisesRegex(
ValueError,
'"ontic_object" must be OnticType or child type of OnticType',
o_type.validate_value, 'some_value', "can't be string")
my_type = o_type.create_ontic_type(
'BadValidateValue',
{
'prop1': {'type': 'int'}
})
ontic_object = my_type()
ontic_object.prop1 = 1
self.assertRaisesRegex(
ValueError,
'"property_name" is required, cannot be None.',
o_type.validate_value, None, ontic_object)
self.assertRaisesRegex(
ValueError,
r'"property_name" is not a valid string.',
o_type.validate_value, '', ontic_object)
self.assertRaisesRegex(
ValueError,
'"property_name" is not a valid string.',
o_type.validate_value, 5, ontic_object)
self.assertRaisesRegex(
ValueError,
'"illegal property name" is not a recognized property.',
o_type.validate_value, 'illegal property name', ontic_object)
def test_validate_value_exception_handling(self) -> NoReturn:
"""Ensure validation exception handling by validation_object method."""
schema_instance = Schema(some_attr={'type': 'int'})
my_type = o_type.create_ontic_type('ValidateCheck',
schema_instance)
ontic_object = my_type()
ontic_object.some_attr = 'WRONG'
self.assertRaisesRegex(
ValidationException,
r"""The value for "some_attr" is not of type "<class 'int'>":"""
r""" WRONG""",
ontic_object.validate_value, 'some_attr')
with self.assertRaises(ValidationException) as ve:
ontic_object.validate_value('some_attr')
expected_errors = [
r"""The value for "some_attr" is not """
r"""of type "<class 'int'>": WRONG"""
]
self.assertListEqual(expected_errors, ve.exception.validation_errors)
errors = o_type.validate_value('some_attr', ontic_object,
raise_validation_exception=False)
self.assertListEqual(expected_errors, errors)
def test_validate_value_value_arg(self) -> NoReturn:
"""Valid value argument testing of validate_value."""
# Test that scalar property is valid.
single_property_schema = {
'prop1': {'type': 'str'}
}
my_type = o_type.create_ontic_type(
'GoodValidateValue', single_property_schema)
ontic_object = my_type({'prop1': 'Hot Dog'})
o_type.validate_value('prop1', ontic_object)
class ChildOnticType(OnticType):
ONTIC_SCHEMA = Schema([
OnticProperty(name='int_prop',
type=int),
OnticProperty(name='str_prop',
type=str,
required=True,
default='A Value')
])
class ParentOnticType(OnticType):
ONTIC_SCHEMA = Schema([
OnticProperty(name='child_prop', type=ChildOnticType)
])
DEFAULT_CHILD_PROP = ChildOnticType(int_prop=99, str_prop='The Value')
class RequiredOnticChildType(OnticType):
ONTIC_SCHEMA = Schema([
OnticProperty(
name='child_prop',
type=ChildOnticType,
required=True,
default=DEFAULT_CHILD_PROP),
])
class SettingOnticTypeTestCase(BaseTestCase):
"""Test case the setting of an OnticType as a OnticProperty.type setting."""
def test_ontic_type_perfect(self) -> NoReturn:
"""Test that Ontic child properties are perfected with parent."""
parent = ParentOnticType()
parent.child_prop = ChildOnticType()
self.assertNotIn('int_prop', parent.child_prop)
self.assertNotIn('str_prop', parent.child_prop)
parent.perfect()
self.assertIsNone(parent.child_prop.int_prop)
self.assertEqual('A Value', parent.child_prop.str_prop)
res = parent.validate()
self.assertListEqual([], res)
def test_ontic_type_success(self) -> NoReturn:
"""Test validation of an OnticType property."""
parent = ParentOnticType()
parent.child_prop = ChildOnticType(str_prop='Some Value')
parent.child_prop.int_prop = 1
res = parent.validate(raise_validation_exception=True)
self.assertListEqual(res, [])
def test_non_ontic_type_failure(self) -> NoReturn:
"""Test validation of an incorrect OnticType property."""
parent = ParentOnticType()
parent.child_prop = ChildOnticType()
parent.child_prop.int_prop = '1'
self.assertRaisesRegex(
ValidationException,
r"""The child property child_prop, has errors:: """
r"""The value for "int_prop" is not of o_type "<class 'int'>": 1"""
r""" || The value for "str_prop" is required.""",
parent.validate,
raise_validation_exception=True)
def test_ontic_type_default_setting(self) -> NoReturn:
"""Ensure that an OnticType property default is copied upon perfect."""
parent = RequiredOnticChildType()
self.assertNotIn('child_prop', parent)
parent.perfect()
self.assertIn('child_prop', parent)
self.assertIsNot(DEFAULT_CHILD_PROP, parent.child_prop)
self.assertEqual(99, parent.child_prop.int_prop)
self.assertEqual('The Value', parent.child_prop.str_prop)
self.assertEqual([], parent.validate())
| 37.111212
| 122
| 0.599903
| 40,522
| 0.987258
| 0
| 0
| 0
| 0
| 0
| 0
| 11,968
| 0.291582
|
c6e6312f6be52c69218d6689cca0b968307e1db4
| 46,788
|
py
|
Python
|
resources.py
|
jajberni/BreederMap
|
8a14d906a6af63dc2c27d77e43968c2e2794fa06
|
[
"MIT"
] | null | null | null |
resources.py
|
jajberni/BreederMap
|
8a14d906a6af63dc2c27d77e43968c2e2794fa06
|
[
"MIT"
] | null | null | null |
resources.py
|
jajberni/BreederMap
|
8a14d906a6af63dc2c27d77e43968c2e2794fa06
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.9.7)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x2a\xae\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\xd0\x00\x00\x01\xa8\x08\x06\x00\x00\x01\x81\x2b\x84\x3a\
\x00\x00\x2a\x75\x49\x44\x41\x54\x78\x9c\xed\xdd\x07\x7c\x13\xf5\
\xff\xc7\xf1\xb4\x4d\x37\x50\xa0\xd0\xb2\x0a\xa5\x14\x90\x51\x96\
\x80\xb2\x37\x82\x8a\x0c\x59\x8a\x80\x32\x14\x65\x08\x0a\x88\xac\
\x5a\xa6\x0c\x01\x41\x90\x21\x28\x7b\xca\x10\x91\x29\x53\x56\xd9\
\x32\x0a\x88\x2d\x65\xb7\x40\x19\x1d\x74\xd1\xfe\xb9\xf8\x4f\x7f\
\x2d\x4d\xd7\x35\xbd\xfb\xf6\xf2\x7a\x3e\x1e\x47\x92\xcb\xfa\x84\
\x6f\xde\xfd\x24\x97\x1b\xfa\xc4\xc4\x44\x9d\xd1\x8b\xf3\xd6\x3a\
\x8d\xb1\xb2\xb2\x4a\x90\x4e\xf5\xd2\x3f\xed\xc7\x74\x4f\x4c\xff\
\xe6\xb9\x9f\x5e\xed\x02\x94\x62\x59\x2f\x74\xd3\xf8\xe5\x56\x6a\
\x17\x92\xd3\x2c\x6b\x44\x2d\x41\xaa\x17\x7a\x21\x28\xa0\x71\x76\
\x1e\x30\x38\xf4\x56\xe5\x52\x6e\x25\x2e\x64\xe7\x31\x24\xb7\xee\
\xdf\x79\xa5\x44\xe1\x62\x97\xb3\xf3\x18\xde\xc5\xbd\x4e\x38\xd8\
\xd9\x47\x4a\xe7\x69\x2f\x5a\xa3\x4f\x48\x48\xb0\x49\x3e\xc3\x77\
\x68\x53\xd9\x0f\xe6\x37\x7d\xaf\xd9\x1f\x67\x58\xff\x06\x3a\x27\
\x47\xdb\x6c\x3f\x8e\xe5\x8c\xa8\xda\x05\x28\x85\x17\xaa\x35\xbc\
\x50\xad\xe1\x85\x6a\x8d\xde\xda\xda\xfa\x79\xf2\x19\xfb\x8f\x04\
\x99\xe5\x81\xcd\xf5\x38\x87\xfd\x83\x75\xb6\xb6\x36\x19\xdf\x30\
\x03\xa9\x46\xf4\x80\x99\x0a\x34\xd7\xe3\x1c\x39\x71\xc3\x2c\x8f\
\x93\xf4\xc5\x5b\xcb\x1f\xec\xa5\xd7\xa7\x4f\x7e\x41\xcd\x62\x72\
\x9a\xe5\xfc\x31\x52\xbb\x00\xa5\x24\xbd\x50\x8b\xc8\xa8\x96\x5f\
\xa4\x44\x7a\x7d\x96\xf5\xd6\xa5\xbd\x68\x88\x65\xbd\x75\x2d\x01\
\xed\x45\x4b\x2c\xab\xbd\xb0\x00\x5b\x63\x78\xa1\x5a\xc3\x0b\xd5\
\x1a\x5e\xa8\xd6\x58\xce\x0b\x7d\x79\x01\x76\xf2\x26\x9b\x1d\xe6\
\x7a\x9c\x69\x73\x0f\x99\xe5\x71\x2c\x67\x44\xa5\x7f\x8c\x5f\xba\
\xfb\xcd\xf8\xf2\xdf\x90\x47\xa1\x5e\xea\x96\x64\x3e\xae\xf9\x0a\
\xdc\xfe\x69\xd8\xec\x12\xd2\xf9\x14\x23\x3a\xff\x8b\xef\xca\xa8\
\x53\x52\xce\xb3\xac\xb7\xae\x25\x30\xbc\xd0\x6f\x7e\x99\xb2\xfb\
\xdc\xbf\x17\x9a\xab\x5d\x4c\x4e\x32\xbc\x50\xad\xbf\x48\x89\xe5\
\xbc\x75\x8d\x1b\x0f\x68\x71\xc3\x01\x89\x45\x6d\x3c\x60\x58\x0a\
\xf8\x2c\x26\x3a\xaf\xda\x85\xe4\xb4\x1f\x36\x2d\x5a\xac\x77\xb4\
\x77\x08\x57\xbb\x90\x9c\x36\xa0\x7d\xdf\xde\x6c\x3c\xa0\x35\x96\
\xf9\x42\xb3\xbb\xe1\x80\x68\x2a\x97\xae\xb0\xdf\x78\xde\x72\xda\
\x8b\xd6\x5f\xa4\xc4\xe4\x8f\x4c\x72\x7f\x33\x79\x79\xd1\x89\x68\
\x8f\x63\x99\x7f\x8c\xb4\x8c\x17\xaa\x35\xbc\x50\xad\xe1\x85\x6a\
\x4d\xaa\x17\x2a\xda\xc6\x03\xe6\x7a\x1c\xe1\x37\x1e\x30\xd7\xe3\
\xe8\xa5\x0f\xbc\x21\x61\xa1\x5e\xfd\x66\x7e\xf9\xaf\x59\x1e\x51\
\x30\xc3\xbb\x0e\xea\x58\xa7\x52\xad\x5f\x0d\x23\xea\x5e\xd0\x2d\
\x50\xeb\x4b\x19\x2c\xf7\x8f\x91\x56\x59\xce\x17\x6f\xb5\x8b\x50\
\x42\x42\x62\x82\x8d\x45\xbc\xd0\x9b\xa1\xb7\x2b\x5a\xc4\x0b\x2d\
\xe5\xee\x71\x9e\x05\xd8\x5a\x63\x79\x2f\x54\xab\x2d\xc6\x18\x4b\
\xcd\x2f\xd7\x35\xbe\x3e\xcb\x7b\xeb\x1a\x89\xb6\xe0\x99\x05\xd8\
\x59\xc4\x0b\xd5\x1a\x5e\xa8\xd6\xf0\x42\xb5\xc6\x72\x5f\xa8\x68\
\x1b\x0f\x98\xeb\x71\xf4\x5a\xdf\x62\x5f\xb2\xf4\xeb\x79\x85\x93\
\xbe\x78\x5f\xbd\x79\xed\xf5\xaf\x16\xfa\x1d\xcd\xd6\x23\x4a\xff\
\x5d\x02\x7d\x85\x1f\xdb\x63\x58\xeb\xea\x65\xab\xec\x90\xce\x27\
\xbd\x75\xcb\x79\x78\x1f\xd3\xf2\x92\x06\xcb\xfd\x63\xa4\x55\x96\
\xb3\x00\x5b\xab\xab\x98\x27\xf7\xd3\xb6\xe5\xb3\xf5\xc6\x75\xce\
\xb5\xac\xcf\x5b\xdd\x07\xe9\x93\x1f\x79\x40\xeb\x6f\x61\x2d\x4b\
\x5a\x08\x68\x9c\xc1\x60\xe6\x6e\xd2\xf8\xf9\xf6\x1c\xfe\x86\xc5\
\x74\x51\x4b\x50\xcd\xdb\x67\x57\x8a\xfd\x70\x4d\x5f\xfb\xc3\xda\
\xc3\x17\x8e\x77\x56\xb3\x28\x64\x9d\xb3\x83\xd3\xe3\x15\xa3\x16\
\x14\x90\xce\xa7\x48\xe8\xd0\x2e\x03\xba\x48\x93\x3a\x65\xc1\x1c\
\xf8\x93\xab\x31\x0c\xa8\xc6\xa4\x18\xd0\x39\x1b\x17\xfe\xbc\xf7\
\xcc\xa1\x0f\x55\xaa\x45\x58\x82\x2d\xec\x4b\x45\x5a\xaf\x68\xd6\
\x80\x49\x55\xa4\xf3\x16\xb1\x08\x25\xbb\x44\x1e\x4c\x49\x70\xc8\
\x4d\x9f\x4c\xaf\xa3\xd1\xb3\x4b\xf5\x9c\xaf\xe8\x25\x4b\xd7\x9e\
\x49\xf3\x3a\xd1\xea\xe9\xda\xae\x8a\xce\xde\x3e\xfb\xc7\x09\xc8\
\x8a\xf4\xea\x49\xb5\x23\xcb\x97\x79\x7a\x14\x30\x7b\x41\xd9\x21\
\x5a\x3d\x1e\xc5\x5d\x64\xef\x68\x33\x27\xf0\xa1\x48\x63\x18\x50\
\x8d\x61\x40\x35\x86\x01\xd5\x18\x06\x54\x63\x18\x50\x8d\x61\x40\
\x35\x86\x01\xd5\x98\x54\x3b\xbc\x7e\x99\xb9\xd6\x19\x35\x17\xd1\
\xea\x31\xd7\x0e\xb9\xcd\xc5\x22\x8e\xa8\x65\x29\x2c\xea\xc8\x61\
\x96\x82\x1e\xaa\x31\x0c\xa8\xc6\x30\xa0\x1a\xc3\x7a\xb9\x1a\x92\
\xf4\xa1\x28\x3c\x2a\xc2\x55\xed\x62\x90\x7d\xb3\x36\xcc\x5f\x6e\
\x18\xd0\xbc\x4e\x79\x1e\xaa\x5d\x0c\xb2\x6f\x70\xc7\x7e\xdd\xf9\
\xda\xa2\x31\x7c\x28\xd2\x18\x06\x54\x63\x18\x50\x8d\xe1\x6b\x8b\
\x86\x58\xcc\x11\xe6\x2d\x85\xe1\x20\x27\x19\xad\x97\x9b\x9d\x03\
\xc5\xcb\x95\xde\x4f\x64\xa2\xd5\x33\xac\x7f\x03\xc5\xd7\xcb\x4d\
\xaf\x1e\x7a\xa8\xc6\x30\xa0\x1a\xc3\x80\x6a\x0c\x03\xaa\x31\x0c\
\xa8\xc6\x30\xa0\x1a\xc3\x80\x6a\x0c\x03\xaa\x31\x0c\xa8\xc6\xb0\
\xa2\x75\x36\x09\xbb\xa2\xb5\x74\x2a\xcc\x32\x5d\xd1\x77\x3b\x22\
\xa0\x54\x3b\x6f\x4c\x3e\x13\xb9\x17\x3d\x54\x63\x18\x50\x8d\x49\
\x31\xa0\xd7\x6e\x07\xd5\x1c\x36\x7f\xec\x09\xb5\x8a\x81\x3c\xdd\
\x5b\x76\x19\xd1\xa1\xc1\xdb\x53\xa4\xf3\xac\xb1\xa0\x01\xcb\x77\
\xad\xfd\x56\x9a\x2c\xe6\xa8\xa9\x96\x42\xda\x4f\x63\x8a\xf5\x72\
\x49\x69\xee\xf5\xfd\xc0\xc9\x3e\x25\xdd\x4a\x5c\x48\x71\x10\x01\
\x23\x4b\x38\x54\x84\x16\x98\x3a\xd2\x45\x52\x42\xc7\x2c\x99\xb4\
\xef\x42\x50\x40\x63\x45\x2b\x82\xd9\xa4\x58\xb0\x10\x70\xe3\x6a\
\x3d\x06\x33\x77\xf3\x5b\x3a\x75\x67\xd2\x51\x21\x46\x2e\x1a\xff\
\x97\xda\x05\x21\x7b\xce\x5e\x3b\xdf\x52\x3a\xe5\x53\xae\xc6\x30\
\xa0\x1a\xc3\x6e\x6d\x34\x22\xd5\xaf\x2d\x0c\x6a\xee\x95\xfc\x57\
\x32\x7e\x3e\xd3\x18\x7a\xa8\xc6\x30\xa0\x1a\xc3\x80\x6a\x0c\x03\
\xaa\x31\x0c\xa8\xc6\xa4\x39\xa0\x22\x2c\xdb\x8d\x8c\x89\x72\x49\
\x4c\x48\xb4\xce\xe3\xe8\xfc\x48\xed\x5a\x24\xb1\x71\x71\x0e\x4f\
\xa3\x9e\x16\x2e\xe4\xe2\x7a\x53\xed\x5a\x2a\x97\xae\xb0\xdf\xd4\
\xfc\x14\x03\xca\xf7\xd0\xdc\x69\x83\xdf\x2f\xb6\x36\xd6\x36\xf1\
\xd2\x79\xc3\x80\x8e\x5b\x3a\x75\xc7\x99\x6b\xe7\xdf\x50\xb7\x2c\
\xc8\xd5\xd1\xf7\xc3\x38\x27\x07\xa7\x27\x2b\x47\x2d\xc8\xaf\xbf\
\xf3\xe0\x5e\x39\x06\x33\xf7\x8b\x8a\x8e\x72\x59\xbd\x77\xa3\x9f\
\xbe\xff\xf7\xc3\xae\xa8\x5d\x0c\xcc\x63\xdd\xbe\x4d\x63\xd3\xfd\
\x94\x5b\xbd\x72\x31\x5d\x95\x4a\xee\x4a\xd5\x63\x90\xde\xa1\x14\
\x25\x4a\x1f\x6e\x72\xc3\xd6\x0b\xba\xc8\xa8\xb8\x34\xaf\x17\xed\
\xf0\x97\xe9\x0e\x68\x7e\x17\x07\xe1\x0e\xef\xa8\x74\x3d\x36\x36\
\xe9\xaf\x5e\x25\xda\xff\x0f\xdf\x43\x35\x86\x01\xd5\x18\x06\x54\
\x63\x18\x50\x8d\x61\x40\x35\x86\x01\xd5\x18\x06\x54\x63\x18\x50\
\x8d\x61\x40\x35\x86\x01\xd5\x18\x06\x54\x63\x18\x50\x8d\x61\x40\
\x35\x86\x01\xd5\x18\x06\x54\x63\x18\x50\x8d\x61\x40\x35\x26\xdd\
\x01\xbd\x7e\x23\x4c\x97\x60\x62\x2f\x29\x6a\xda\x7f\x24\x48\xd1\
\xe7\x8b\x89\x49\x77\xef\xb3\x8a\xd7\x93\x91\x74\x07\x34\xe8\xe6\
\x63\xc3\x24\x92\x03\x82\xfd\x07\x8a\x56\x8f\x9e\x0d\x7d\xb5\x63\
\x5c\xaf\xaf\x9b\x1a\x12\x3a\xb1\xcf\xe8\x86\xa3\x7e\x9a\x70\x50\
\xed\x82\x20\x5f\xcb\x9a\x4d\x16\xfa\x94\xae\xb8\xcf\x30\xa0\x15\
\x4b\x95\x3f\x44\x52\x73\x2f\x36\xc9\xd7\x30\xbe\xb6\x68\x0c\x03\
\xaa\x31\x0c\xa8\xc6\x30\xa0\x1a\xc3\x80\x6a\x0c\x03\xaa\x31\x6c\
\x92\xaf\x01\x93\xfa\x8c\x69\x50\xa1\x54\x39\xc3\x2e\x72\x39\x2a\
\x84\x06\x8c\xfc\x69\xfc\xa1\x39\x83\xa6\x54\x2c\x51\xb8\x58\x80\
\x61\x40\x19\xcc\xdc\x6f\xe0\xec\xaf\x2e\x71\x98\x0f\x0d\x62\x40\
\x35\xc6\x30\xa0\xeb\xbf\xf9\xc5\xae\xd3\x37\x1f\xc6\xaa\x5d\x0c\
\xe4\xeb\xdc\xa4\xdd\x38\xe9\xd4\x30\xa0\x7a\x1b\x9b\xb8\x4f\xda\
\x7c\xf8\xd9\x82\xad\xbf\xcc\x53\xb7\x2c\xc8\x21\x7d\xc2\x7d\xaf\
\xe9\xbb\xbe\xd2\xf9\xa4\x3f\xb9\xad\x6a\x37\xfb\x51\x9a\xd4\x2b\
\x0b\xe6\x40\x0f\xd5\x18\x06\x54\x63\x18\x50\x8d\x61\x40\x35\x86\
\x01\xd5\x18\x06\x54\x63\x52\x0c\xe8\xea\x3f\x7f\x1d\xb7\x6e\xff\
\xe6\x31\x6a\x15\x83\xac\xab\x5d\xa1\xc6\x96\xaf\xdf\x1f\xd2\xce\
\x78\x99\x5f\x5b\x72\x39\xff\x80\xd3\x6d\xa5\xb1\x9b\x3d\xf0\xdb\
\x4a\x1e\x6e\xc5\x2f\xf1\x6b\x8b\x46\x0c\x9a\x33\xe2\xe2\x3a\xdf\
\x25\x0e\x7a\x06\x53\x3b\x3a\xfb\xf5\x8a\xe6\x43\x51\x86\xa4\xf7\
\x7b\xee\x59\xff\x3c\xdd\x01\x6d\x52\xcf\x4b\xd7\xb0\x8e\xa7\x42\
\xa5\xfc\xc7\x6f\xfa\xde\x74\xaf\xf7\x1d\xda\x54\xa1\x4a\xfe\x33\
\x73\xc1\x61\xdd\xd3\xf0\x98\x34\xaf\x57\xba\x1e\x49\x7a\xff\x47\
\x24\x54\x63\x18\x50\x8d\x61\x40\x35\x86\x01\xd5\x18\x06\x54\x63\
\x18\x50\x8d\x61\x40\x35\x86\x01\xd5\x18\x06\x54\x63\x18\x50\x8d\
\x61\x40\x35\x86\x01\xd5\x18\x06\x54\x63\x18\x50\x8d\x61\x40\x35\
\x86\x01\xd5\x18\x06\x54\x63\x18\x50\x8d\x49\x77\x40\xf7\x1d\x0e\
\x34\x4c\x22\xc9\x68\x15\x15\xa5\x89\x56\x8f\xbe\x6b\xd3\x0e\xbe\
\x6b\xf6\x6e\xf4\x53\xbb\x10\x64\x5f\x1e\x47\xe7\x30\x7d\x97\x26\
\xed\xc7\x31\xa0\xe9\xc9\x3d\x6b\xfd\x2d\x1f\x39\xdf\xd5\xf0\x27\
\x57\xda\x1d\xca\xc1\x73\x47\xba\xcd\xdc\xf0\xe3\x0a\xb5\x8b\x12\
\x8f\xf8\x83\xd9\xfc\xd5\x46\x8b\xfb\xb7\xeb\xd3\x47\x3a\x9f\xd4\
\x43\x1b\x56\xad\xbb\x52\x9a\xa4\xf3\x73\x37\xff\xf4\xd3\x9e\x53\
\x07\x7a\xab\x55\xa0\x81\x71\xf5\x6f\xf1\xff\x3f\x55\x51\xb7\x52\
\xed\x0d\xc3\xba\x0e\xec\xf4\xf2\x7c\x93\x1f\x8a\xa4\xd1\x36\x8e\
\x38\x72\x17\xbe\xb6\x68\x0c\x03\xaa\x31\x0c\xa8\xc6\x30\xa0\x1a\
\xc3\x80\x6a\x0c\x3b\x40\xd6\x00\x93\x07\xe2\x61\x30\x73\x2f\x69\
\xec\x8c\x83\xca\x26\xf9\x1a\x61\x1c\x54\x7a\xa8\xc6\x30\xa0\x1a\
\x63\x18\xd0\x0f\x5a\x74\x1e\xb9\x62\xf7\xba\x49\x6a\x17\x03\xf9\
\x6c\xac\x6d\xe2\xa4\x53\xc3\x80\xbe\xdb\xb0\xcd\x64\x06\x34\x77\
\xdb\xe0\xf7\x8b\x9d\x74\xaa\x4f\x4c\xe3\x18\xdb\x97\x6f\xfc\x53\
\xf7\xeb\x45\xe3\x0e\x2b\x5a\x15\x60\xa1\xa4\x15\x4d\xa4\x75\x13\
\x5e\x9e\x9f\xea\x33\xd1\xe8\x25\x13\xf7\x5f\x0c\xba\xdc\x48\x99\
\xb2\x00\x48\xa4\x95\x86\x8c\x2b\x0e\xa5\x79\xc0\x66\x16\x2f\x00\
\xea\x4b\xb5\xdc\x4f\x92\x90\x98\x60\xa3\x5e\x49\x00\x92\x5b\xf4\
\xfb\xb2\x39\x7d\xdf\xee\x31\x30\x29\xa0\xd6\x56\xd6\xcf\xd5\x2c\
\x08\xc0\xff\x94\x2d\xe1\x75\x42\x3a\x4d\xf1\x11\xb7\x47\xcb\xae\
\x5f\x2d\xdb\xb5\x66\x8a\x3a\x25\x01\x90\xd8\xd9\xda\x3d\x6b\x5c\
\xad\xfe\x32\xe9\x7c\xca\xef\xa0\x0d\xde\x9a\x6a\x98\xf8\x2e\x0a\
\xa8\x62\xce\xa0\x29\x15\x4a\x14\x2e\x76\xd9\x78\xd9\xe4\x2f\xdb\
\xc9\x97\x22\x05\xde\xb9\x5e\xe3\xec\xbf\x17\x5a\xc4\xc6\xc5\x3a\
\x29\x51\x20\x60\x29\xac\xac\xac\x12\xca\x14\x2b\x7d\xaa\x66\xf9\
\x6a\xbf\xa7\x75\x9b\x0c\x57\x3d\xf1\x2a\xe6\x79\x5a\x9a\xcc\x5b\
\x1a\x80\xcc\x60\xdd\x30\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\
\x40\x01\x81\x11\x50\x40\x60\x19\x06\xf4\xdc\xb5\x0b\x2d\xe6\x6d\
\x59\xbc\x30\xf4\xf1\x03\x4f\x05\xea\x01\x2c\x4a\xdf\xb7\x7a\x0c\
\x7c\xf3\xf5\x16\x3f\xa4\x75\x7d\xaa\x80\x9e\xbc\x72\xf6\xad\x89\
\x2b\xbe\x4b\xf3\x77\x19\x00\xe6\xb3\x68\xdb\xb2\x39\xd2\x64\xbc\
\xdc\xba\x76\xf3\x79\x1f\xb7\xe9\xd9\xdf\x78\x39\x29\xa0\xdf\xae\
\x9a\xb5\xe9\x78\xc0\xa9\x76\x0a\xd7\x07\x20\x99\xed\xfe\x7b\x3e\
\x93\x26\x69\xe7\xd5\x49\xfb\x3b\x66\xd5\x3e\x40\x2c\x11\xcf\x22\
\x0b\x4a\xb9\x64\x21\x11\x20\x30\x7d\x42\x82\xfc\xed\x40\x9b\xd4\
\xf3\xd2\x35\xac\xe3\x69\xc6\x72\xc4\x94\x9d\x03\xb1\xf8\x0e\x6d\
\x6a\xc6\x4a\xc4\x34\x73\xc1\x61\xdd\xd3\xf0\x18\x59\xf7\x1d\xd6\
\xbf\x81\xce\xc9\xd1\xd6\xcc\x15\x89\x47\xee\x7b\x88\x0e\x0a\x08\
\x8c\x80\x02\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\
\x80\x02\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\
\x02\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\
\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\
\xd3\x5b\x5b\xcb\x3f\xb2\xf6\xbe\xc3\x81\x86\x09\x69\xcb\xce\xee\
\x52\x2c\xc1\xb4\xb9\x87\xd4\x2e\x41\x68\x86\x0e\x5a\xd4\xb5\xc8\
\x3f\x77\x1f\xde\x2b\xab\x76\x31\xc8\xad\xa4\x9d\x42\x5a\x65\x78\
\x2b\x64\x9d\x21\xa0\xf3\x06\x4f\x2b\x77\xe5\xe6\xb5\x3a\x23\x16\
\xfa\x1d\x51\xbb\x20\xe4\x46\x84\xd3\xdc\x9a\x56\x6f\xf0\xcb\xc0\
\x0e\x1f\x7f\x94\xf4\x1d\xb4\xbc\x87\xf7\x51\xe3\x91\xb5\xd9\x4f\
\x2e\xa0\x8e\x85\x5f\xce\xf4\x2c\x9c\xbf\x50\xb0\xf1\xb2\xc9\x85\
\x44\xc6\xa0\x1a\xed\x39\x75\xa0\xf7\xe2\x3f\x96\x7f\x1f\x1d\x1b\
\xe3\x9c\xd3\x05\x02\x96\xe2\xfd\x66\x1d\xc7\x74\x6a\xdc\x76\x42\
\x7a\xb7\xc9\xd4\x52\xdc\xe6\xaf\x36\x5a\x2c\x4d\xe6\x29\x0b\x40\
\x66\xf1\x33\x0b\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\
\xc0\x08\x28\x20\xb0\x74\x03\x1a\x17\x1f\x6f\xbf\x6c\xd7\xea\xa9\
\x27\x2e\x9f\x69\xf3\x24\xf2\xa9\x9b\xf1\x67\x16\x67\x07\xa7\xc7\
\x8a\x54\x07\x68\x4c\x64\x74\x54\x7e\xe9\xd4\x56\xaf\x8f\x71\xb4\
\x73\x0c\xaf\xe8\x59\xfe\x60\x87\x86\x6d\xbe\x2d\x5b\xdc\xeb\x84\
\xa9\xdb\xa7\x0a\x68\x74\x6c\x74\x9e\xf7\xc6\xf7\x0d\xcf\xcc\x93\
\x00\x90\x47\x6a\x7e\x71\xf1\xe1\xf6\xc7\x2e\x9d\xec\x20\x4d\xc6\
\xf9\x3d\xdf\xe8\x3a\xbc\x5d\xfd\xb7\xa6\x19\x2f\xa7\x08\xe8\x8c\
\xf5\xf3\x56\x1d\xfa\xfb\xe8\x7b\x4a\x16\x0a\xe0\x7f\x96\xee\x5c\
\x33\x55\x9a\x8c\x2b\x0b\xa5\x08\x28\xe1\x04\xc4\xf0\xdb\x91\x1d\
\x43\xde\xa9\xdb\x6a\x26\x0b\x89\x00\x01\x49\xcb\x7c\xa4\xd3\x14\
\x01\x75\x71\xce\x17\x6a\xbc\x02\x80\x7a\xba\xb7\xe8\xfc\xb5\x74\
\x9a\x22\xa0\xbf\x8c\x98\xeb\xfe\xeb\xc1\xad\x5f\xaf\xd8\xbd\x6e\
\x92\x3a\x65\x01\x96\x2d\x7f\x1e\x97\x90\x9f\xbf\xfa\xa1\x88\xf1\
\x72\xaa\x8f\xb8\xef\x36\x6c\x33\x59\x9a\xa4\xf3\xdf\xff\xba\x60\
\xe9\xfe\xb3\x7f\xf5\x50\xb2\x40\xc0\xd2\xb8\x17\x70\x0b\x9c\xd8\
\x67\x54\x23\xd7\x7c\x05\x6f\xbd\x7c\x5d\xba\xdf\x41\x3f\x7f\xf7\
\x93\x9e\xd2\x94\x73\xa5\x01\x48\x0f\x0b\x89\x00\x81\x11\x50\x40\
\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\x65\x3a\xa0\x5b\
\x8f\xec\x18\x1c\x70\xe3\x6a\xfd\x53\x57\xcf\xbd\x19\x1b\x17\xeb\
\x98\x93\x45\x01\x5a\x56\xdd\xdb\x67\x67\x99\xe2\x5e\x27\x3b\x36\
\x7a\x67\x92\xbd\xad\x5d\x54\x7a\xb7\x35\x19\xd0\x1f\xb7\x2c\x59\
\xb0\xeb\xe4\xbe\x8f\x73\xa6\x3c\x68\x12\xfb\xae\xce\xb4\x33\xd7\
\xce\xbf\x21\x4d\x1b\x0e\x6c\x19\x95\x7c\xbe\xb4\x03\x79\x69\x1f\
\xd5\xc9\xe7\x25\x05\x34\xfe\x79\xbc\x5d\xa7\x6f\x3e\x8a\x51\xaa\
\x48\x68\x0c\xe1\xcc\x36\xe9\xe8\x0e\xc6\x7d\x52\xcf\xec\x3f\xb1\
\x9a\x67\x91\x92\xe7\x0c\x01\x9d\xbc\x6a\xe6\x66\xff\x80\xd3\x6d\
\xd5\x2d\x0f\xb9\x17\xed\xd3\xdc\x86\xcc\x1d\x75\x56\xfa\xf8\x6b\
\x08\x28\xe1\x44\xf6\x10\xce\x9c\x10\x13\x17\xeb\xa4\x4f\x48\x48\
\xb0\x91\xfb\x00\x4d\xea\x79\xe9\x1a\xd6\xf1\x34\x63\x49\x62\xca\
\xce\x11\xca\x7c\x87\x36\x35\x63\x25\x62\x9a\xb9\xe0\xb0\xee\x69\
\xb8\xbc\x6f\x47\xc3\xfa\x37\xd0\x39\x39\xda\x9a\xb9\x22\xf1\xc8\
\x7d\x0f\xf1\x33\x0b\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\
\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\
\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\
\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\x08\
\x28\x20\x30\x02\x0a\x08\x4c\x6f\x6d\x6d\xfd\x5c\xee\x9d\xf7\x1d\
\x0e\x34\x4c\x48\x5b\x76\x76\x97\x62\x09\xa6\xcd\x3d\xa4\x76\x09\
\x42\xa3\x83\xc2\x0c\xd8\xab\x5f\x4e\x31\x04\x74\xd3\xf8\xe5\x56\
\x1d\xc6\xf6\x78\x9e\x98\x98\x68\xad\x76\x41\xc8\x8d\x08\x67\x4e\
\x90\x72\x99\xd4\x41\x37\x8e\x5b\x66\xd8\xbb\xdf\xfb\x13\xfa\x3e\
\x7d\x16\x13\x9d\x57\xbd\xb2\x00\xcb\xb6\x7a\xcc\xa2\xbc\x0e\x76\
\x0e\x11\xd2\xf9\x54\x1f\x71\x57\x8d\x5e\x94\xcf\x78\xfe\xcc\x3f\
\x7f\xb7\x1a\xb7\x6c\xda\x76\x25\x8b\x13\x19\x1f\xe4\x90\x13\x3e\
\x7e\xbb\x67\xff\xd6\xaf\x35\x9f\x67\xea\xba\x74\xbf\x83\x56\x2f\
\x5b\x65\x87\xd4\x66\x73\xa6\x2c\x00\x19\x61\x21\x11\x20\x30\x02\
\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\
\x08\xcc\x64\x40\xfd\x96\x4e\xdd\x79\xf6\xda\xf9\x96\x4a\x17\x03\
\x58\xba\x97\x7f\xd6\x4c\x11\xd0\xab\xb7\xfe\x7d\xed\xab\x05\xdf\
\x1c\x53\xb6\x24\x00\x46\xed\xc7\x74\x4f\x7c\xb5\x5c\xd5\x3f\x46\
\x77\x1f\xfa\x96\x74\x39\x45\x40\x09\x27\xa0\xbe\x53\x57\xcf\xbd\
\x79\xee\xdf\x8b\xcd\xab\x96\xa9\xb4\x27\x29\xa0\xac\x7f\x0b\x88\
\xe3\xd0\xdf\x47\xdf\x4b\x11\x50\x47\x7b\x87\x70\x35\x0b\x02\xf0\
\x3f\x03\xda\xf7\xe9\x2d\x9d\xa6\xf8\x88\x2b\x7d\x41\x95\x3e\x03\
\xab\x53\x12\x00\x49\xf2\x05\x45\xa9\x96\xe2\x1a\xaf\x9c\xbc\x72\
\xe6\x66\xff\xcb\xa7\xdb\x2a\x59\x18\x60\xa9\xec\xf4\x76\xcf\xd6\
\xfa\x2e\x76\x7a\x79\xbe\x3e\x31\x31\xe3\x86\xc9\x86\xdc\x80\xf9\
\x59\x59\x59\x25\x64\x74\x9b\x34\x57\x54\x98\xb2\xfa\xfb\x8d\xc7\
\x2e\x9d\x6c\x6f\xde\x92\x00\xbc\xcc\xc5\x39\x5f\xe8\xe2\xe1\xb3\
\x8b\xdb\x58\xdb\xc4\xbf\x7c\x5d\xaa\x80\x3e\x8e\x78\xe2\xfe\xd1\
\x94\x01\xf7\x94\x29\x0d\xc0\x93\xc8\xa7\x6e\x1d\x7d\x3f\x8c\xeb\
\xde\xb2\xcb\x88\x0e\x0d\xde\x9e\x92\xfc\xba\x54\x01\x25\x9c\x80\
\x3a\x96\xef\x5a\xfb\xad\xb4\xab\x93\x37\x5f\x6b\x3e\xd7\x38\x2f\
\x45\x40\x59\x82\x0b\xa8\x6b\xd1\xef\x4b\x7f\x30\x19\xd0\xbb\x0f\
\x43\xbc\xd5\x29\x09\x40\x72\xd3\xd6\xcc\x59\x3f\xac\xeb\xc0\x4e\
\xd2\xf9\xa4\x80\xfe\xbc\x7d\xe5\x0c\xf5\x4a\x02\x60\x74\xe4\xa2\
\x7f\x47\xe3\xf9\xa4\x80\x9e\xb8\x72\xa6\x8d\x3a\xe5\x00\x48\x0b\
\xdb\x83\x02\x02\x4b\x0a\xa8\x77\xf1\xd2\x27\xaf\xdd\x0e\xaa\xa9\
\x66\x31\x00\x52\x4a\x0a\x68\xe7\xc6\xed\xc7\x4d\x5a\x39\xe3\x37\
\x35\x8b\x01\xa0\xd3\x95\x28\x5c\x2c\xc0\x78\x3e\x29\xa0\xb5\x5e\
\xa9\xbe\x55\x9d\x72\x00\x24\x37\xa9\xcf\x98\x06\xc6\xf3\x29\xbe\
\x83\x7e\xf3\xe1\x57\x2d\xbf\xf9\x65\xca\x2e\xe5\x4b\x02\x20\xa9\
\xe6\xed\xb3\x2b\xaf\x53\x9e\x87\xc6\xcb\x29\x02\x5a\xb5\x4c\xe5\
\xdd\x3d\x5b\xbd\x37\x6c\xe9\x8e\xd5\xd3\x94\x2f\x0d\xb0\x6c\x45\
\x0a\xba\xfd\xeb\xdb\x73\xf8\x1b\xc9\xe7\xa5\x5a\x8a\xdb\xae\xde\
\x9b\xd3\x5f\x7b\xe5\xd5\xcd\x9f\xcd\x1a\xfa\x8f\x72\xa5\x01\x96\
\xed\xa3\xd6\xdd\xbe\x78\xa7\x6e\xab\x99\x2f\xcf\x37\xf9\x33\x4b\
\x51\x57\xf7\x6b\xc6\xed\x42\xf7\x9f\xfd\xab\xfb\xbd\xb0\x50\xd6\
\x32\x02\xcc\x2c\x8f\x63\x9e\x87\x75\x2a\xd5\xfa\xd5\x35\x5f\x81\
\xdb\x69\xdd\x26\xc3\xdf\x41\x1b\x57\xab\xbf\xdc\xbc\x65\x01\xc8\
\x2c\x56\x54\x00\x04\x46\x40\x01\x81\x11\x50\x40\x60\x04\x14\x10\
\x18\x01\x05\x04\x46\x40\x01\x81\x11\x50\x40\x60\x04\x14\x10\x18\
\x01\x05\x04\x46\x40\x01\x81\x11\x50\x40\x60\x59\x0e\xe8\xbd\xb0\
\xd0\x32\x0f\x9e\x3c\xf4\xc8\x89\x62\x72\x9b\xd0\x47\x0f\x4a\xe9\
\x74\x89\xd6\x6e\x05\x0a\x07\xa9\x5d\x8b\x88\x1e\x85\x3f\x2e\xfa\
\xf4\x59\x84\xab\x9b\x4b\xa1\x60\x0e\x6f\xa9\xd3\xd9\xea\x6d\x63\
\xca\x7b\x78\x1f\xcd\xca\x7d\x32\x0c\xe8\xe2\x3f\x56\xcc\xfa\xfd\
\xe8\xce\xcf\xe5\x97\xa5\x21\xd2\x6e\xbd\xad\x32\xbc\x15\x90\x29\
\xd2\xc6\xd9\x9f\xbf\xfb\x49\x8f\xfc\x79\x5c\x42\xd2\xba\x8d\xc9\
\x80\xde\x7e\x70\xb7\xfc\xe7\x73\xbe\x3e\xff\x3c\xe1\xb9\x6d\xce\
\x95\x97\x0b\x11\x4e\x98\xd1\xd9\x6b\xe7\x5b\x1a\x0f\xb5\x22\x05\
\xd5\xd4\x96\x63\x26\x8f\xcd\x22\x1d\x40\x49\x89\x02\x01\xfc\xe7\
\xfb\x5f\x17\x2c\x93\xa6\x35\x63\x17\x3b\xdb\xdb\xda\x45\x19\xe7\
\x27\x05\x34\x36\x2e\xd6\xb1\xcb\xb8\xde\x51\xa6\xef\x0e\x40\x09\
\x5d\xc7\xf5\x8e\x9c\xd6\xcf\xaf\xb6\x77\x71\xaf\x13\xd2\xe5\xa4\
\x80\x12\x4e\x40\x0c\xc3\xe6\xfb\xfa\xcf\xff\x62\x86\x97\x7b\x81\
\xc2\x41\x86\x80\x76\x18\xdb\xe3\xb9\xda\x45\x01\xf8\x9f\x7e\x33\
\xbe\x08\x94\x76\x3b\xa4\xbf\xff\xe4\x61\x49\x0e\x71\x0f\x88\x67\
\xe3\xc1\xad\x23\xf4\xb3\xd6\xff\xb8\x42\xed\x42\x00\xa4\xb6\x7c\
\xf7\xba\xc9\xfa\x4b\xc1\x57\x1a\x64\x7c\x53\x00\x6a\xc8\xd6\xaa\
\x7e\x3d\xbb\x54\x37\x57\x1d\xc2\x0a\xbc\x1e\xa6\x3b\x74\x3c\x58\
\xd6\x7d\x9b\xd4\xf3\xd2\x95\x2c\xe1\x62\xe6\x8a\xc4\xb3\x74\xed\
\x19\xd9\xf7\xb5\x84\xf7\x50\x76\xfe\x7f\xb2\x15\x50\x4f\x8f\x02\
\xd9\xb9\x7b\xae\x10\xf6\x24\x5a\xf6\x7d\x0b\x17\x72\xb6\x88\xff\
\xa3\xec\xb0\x84\xff\x1f\xbd\xde\x46\x17\x1f\x2f\x6f\x39\x2c\x2b\
\xcb\x03\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\
\x02\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\
\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\
\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\
\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\
\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\
\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\
\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\x08\
\x28\x20\x30\x02\x0a\x08\x2c\x5b\x01\xdd\x7f\x24\xc8\x5c\x75\x08\
\xeb\x5e\x48\xb8\xec\xfb\x5e\xbc\x1c\xa2\x0b\xb9\x1f\x61\xc6\x6a\
\xb4\xc7\x12\xde\x43\xf1\xf1\xcf\x65\xdf\x57\x9f\xd7\x29\xcf\xc3\
\xf0\xa8\x08\x57\x39\x77\x3e\x60\x01\xff\xb9\xd9\x71\xf1\x4a\xa8\
\xda\x25\x08\x8f\xf7\x50\xfa\xf4\x0d\xab\xd4\x5d\xb9\xed\xd8\xae\
\x41\x6a\x17\x02\x20\x35\x7d\x9f\xb7\xba\x7f\x4e\x40\x01\xf1\x0c\
\xeb\x3a\xb0\x93\xe1\x3b\xa8\x77\xf1\xd2\x27\xaf\xdd\x0e\xaa\xa9\
\x76\x41\x00\xfe\xa7\x6e\xa5\xda\x1b\x0c\x01\x9d\xd6\x6f\x5c\xad\
\xf6\x63\xba\x27\xaa\x5d\x10\x80\xff\x6c\x1a\xbf\xdc\x4a\x3a\xd5\
\x27\x9f\xf1\xfe\x84\xbe\x4f\x9f\xc5\x44\xe7\x55\xaf\x2c\x00\xc6\
\x70\x4a\x52\xfc\xcc\xb2\x6a\xf4\xa2\x7c\x1b\x0f\x6e\x1d\xb1\x7c\
\xf7\xba\xc9\xca\x97\x25\x30\xe9\xb3\x85\x55\x86\xb7\x02\xb2\xa5\
\x9a\xb7\xcf\x2e\xdf\x9e\xc3\xdf\x48\x3e\x2f\xd5\xef\xa0\x1d\x1a\
\xb6\xf9\x56\x9a\x16\xfe\xbe\xf4\x87\xed\xc7\xf7\xf4\x57\xae\x3c\
\x81\x11\x4e\xe4\xa0\x4a\x9e\xaf\x1c\x9c\xd0\x7b\x54\x23\x53\xd7\
\xa5\xb9\xa2\xc2\xc7\x6f\xf7\x1c\x20\x4d\x4f\x23\xc3\x0b\xcf\xdb\
\xb2\x78\xe1\xf1\x80\x53\xed\x72\xac\x42\xc0\xc2\x94\x2b\x51\xe6\
\x78\xdf\x17\xf9\x92\x16\xd0\xa6\x77\xbb\x0c\xd7\x24\xca\xe7\x9c\
\xf7\xfe\x88\xf7\x07\xb7\x37\x5f\x69\x00\x32\x8b\x75\x71\x01\x81\
\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\x11\
\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\x11\x50\
\x40\x60\x04\x14\x10\x18\x01\x05\x04\x96\x6e\x40\x8f\x5c\xf4\xef\
\x28\x6d\x72\x76\xfd\xde\x8d\xaa\xcf\x62\x9e\xe5\x7d\x9e\x90\x60\
\xb8\xbd\xb3\x83\xd3\x63\x45\xaa\x03\x34\x26\x32\x3a\x2a\xbf\x95\
\x95\x55\xa2\xa3\x9d\x43\x78\xe1\xfc\x85\x82\x1b\x54\xa9\xb3\xfa\
\xdd\x86\x6d\xd2\xdc\xfe\xda\x64\x40\xd7\x1f\xd8\x32\x6a\xd5\x9e\
\x0d\x13\xd2\x7b\x12\x33\xd4\x0a\x58\xa4\xc4\xc4\x44\xab\xa8\x98\
\x67\xf9\x82\x43\x6e\xfa\x04\xef\xbe\xe9\xb3\x62\xf7\xba\x49\x65\
\x4b\x78\xf9\x4f\xfd\xc4\xef\xb5\x97\x6f\x9b\x2a\xa0\x23\x7f\x1a\
\x7f\x28\x20\xf8\x6a\x7d\x65\x4a\x05\x20\xf9\xe7\x56\x60\x6d\x69\
\xbf\x60\xc9\x77\x77\x22\x49\x11\xd0\x95\xbb\xd7\x4f\x24\x9c\x80\
\x7a\x3a\xfa\xf6\x8c\xdb\xe0\xb7\xd4\xd6\x78\x39\x45\x40\x37\x1c\
\xfc\x6d\xa4\xf2\x25\x01\x30\x92\x96\xf3\x9c\x0f\xbc\xd4\xd4\xc7\
\xab\xe2\x5e\xe9\x72\x52\x40\xb7\x1d\xdb\x35\x50\xbd\xb2\x00\x18\
\x8d\x5f\x36\xed\x8f\x75\xdf\xfc\xec\x20\x9d\x4f\x0a\xe8\xd6\x23\
\x3b\x86\xa8\x57\x12\x00\xa3\xb8\xe7\xf1\xf6\xc6\xf3\x49\x01\x0d\
\x79\x74\xbf\xb4\x3a\xe5\x00\x48\x4b\x52\x40\xed\x6d\xed\xa2\x62\
\xe2\x62\x9d\xd4\x2c\x06\x40\x4a\x49\x01\x6d\x54\xb5\xde\x8a\x5d\
\x27\xf7\x7d\xac\x66\x31\x00\x52\x4a\x0a\xe8\xa7\x6d\x7b\x7d\x42\
\x40\x01\xf5\x75\x6a\xdc\x36\x69\x25\xa1\x14\x3f\xb3\xb8\xe5\x2f\
\x74\x3d\xf4\xf1\x03\x4f\xc5\x2b\x02\x90\xe4\xfd\x66\x1d\xc7\x18\
\xcf\xa7\x08\xe8\x82\x2f\x67\x96\xe6\x28\x67\x80\x7a\xe6\x0d\x9e\
\x56\x2e\xf9\xe5\x54\xab\xfa\x49\xab\x1a\x11\x52\x40\x79\xf3\x86\
\x4c\x2f\x5b\xb4\xa0\xfb\xb5\xe4\xf3\x4c\xae\x2c\x2f\x85\xf4\x7c\
\xe0\xa5\x26\xdf\xae\x9a\xb5\x59\x5a\xa9\x57\x99\xf2\x00\xcb\xd4\
\xb1\x51\xdb\x89\xdd\x9a\x77\x1c\x6d\xea\xba\x34\x37\x37\xf3\xf1\
\xaa\xb8\x6f\xe5\xe8\x85\x2e\x39\x57\x16\x80\x8c\xb0\xc1\x36\x20\
\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\
\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\
\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\x32\x15\xd0\x9b\xa1\xb7\
\x2b\x6e\xf7\xdf\xf3\x59\xe0\x9d\xeb\x35\xae\xdc\xbc\x56\x27\xa7\
\x8b\x12\x9d\xb4\x2d\x9e\x55\x86\xb7\x02\x52\x93\x76\x8a\x50\xa9\
\x74\x85\x03\xaf\x57\xac\xb9\xb1\xf6\x2b\x35\x7e\xcb\xe8\xf6\x69\
\x06\xf4\x49\xe4\x53\xb7\x81\xb3\xbf\xba\x14\x1e\x15\xe1\x6a\xde\
\x12\x73\x3f\xc2\x99\x16\xfe\x74\x65\x44\xda\x63\x49\xe8\x99\x43\
\x9e\xfb\xce\x1c\xea\x69\x9c\x37\xb6\xc7\xf0\x56\xd5\xcb\xfa\xec\
\x34\x75\xfb\x54\x01\x95\xb6\xff\xfc\x60\xc2\x27\x8f\x13\x75\x89\
\xfc\x4f\x23\x8b\x78\xcb\xc8\x31\x6e\xd9\xd4\x1d\xd2\xe9\x94\x8f\
\x7d\xeb\x94\xf3\xf0\x3e\x96\xfc\xba\x14\x01\x9d\xb7\x65\xf1\xc2\
\xdd\x27\xf7\xf7\x55\xb2\x38\x00\xff\xf9\x6a\xa1\xdf\x51\xf7\x02\
\x85\x83\xe6\x7f\x31\xc3\xcb\x38\x2f\x29\xa0\x63\x96\x4c\xda\x77\
\x21\x28\xa0\xb1\x2a\x95\x01\x30\x90\x76\x20\xff\xfe\x84\xbe\x4f\
\x57\x8d\x5e\x64\xd8\x93\x89\x21\xa0\xeb\xf6\x6f\x1e\x43\x38\x01\
\x31\x3c\x8b\x89\xce\x3b\xf8\x87\x91\x7f\xcf\x1a\x30\xa9\x8a\x21\
\xa0\xab\xff\xfc\x75\x9c\xda\x45\x01\xf8\x1f\xe9\xe0\xbe\xd2\x02\
\x5a\xfd\xca\x74\x8e\xa4\x0d\x40\x3d\xd2\xc1\xb4\xf5\xbf\x1f\xdd\
\x31\x58\xed\x42\x00\xa4\x76\xeb\xfe\x9d\x0a\xfa\xe8\xd8\x18\x67\
\xb5\x0b\x01\x60\x5a\xb6\x56\xf5\xf3\x1d\xda\xd4\x5c\x75\x08\xeb\
\xf4\x85\xbb\xba\xad\x3b\x02\x64\xdd\xb7\x73\x5b\x1f\x5d\x85\xb2\
\x85\xcd\x5c\x91\x78\xfc\xa6\xef\x95\x7d\x5f\x4b\x78\x0f\x4d\x9c\
\x75\x40\x17\x1f\xff\x5c\xd6\x7d\x59\x17\x17\x10\x18\x01\x05\x04\
\x46\x40\x01\x81\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\
\x40\x01\x81\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\
\x01\x81\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\
\x81\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\
\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\x11\
\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\x11\x50\
\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\x11\x50\x40\
\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\x11\x50\x40\x60\
\x04\x14\x10\x58\xb6\x02\xea\x37\x7d\xaf\xb9\xea\xd0\xa4\x75\x5b\
\xce\xab\x5d\x82\xf0\x78\x0f\xa5\x4f\x5f\xbc\x50\xd1\x2b\xb7\x1f\
\xdc\x2d\xaf\x76\x21\x00\x52\xd3\x77\x6d\xda\xc1\xf7\xbb\x75\x73\
\xd7\xa8\x5d\x08\x80\x94\x8a\x17\x2a\x76\x59\x5f\xdf\xe7\xf5\xb5\
\x04\x14\x10\xcf\xf4\x4f\xc7\xd5\x34\x7c\x07\x9d\xd0\x7b\x54\xa3\
\xd1\x8b\x27\x1e\x50\xbb\x20\x00\xff\x79\xb5\x5c\xb5\x6d\x0e\x76\
\xf6\x91\x86\x80\x56\xf2\x7c\xe5\x60\xb7\xe6\x1d\x47\xaf\xdc\xb3\
\x61\x82\xda\x85\x89\x27\xf1\xc5\x64\xa5\x76\x11\xb0\x20\xc5\x5c\
\x8b\x5c\x1d\xdd\xfd\xcb\xb7\xa5\xf3\x49\x4b\x71\x3b\x36\x6a\x3b\
\xb1\xa8\x6b\x91\x7f\xa6\xaf\xfd\x61\xad\x7a\xa5\x89\x88\x70\x42\
\x39\x8d\xaa\xd6\x5b\x31\xb8\x63\xbf\xee\xc6\xcb\x29\x7e\x66\xa9\
\x57\xf9\xb5\x75\xd2\xd4\x7b\xea\xa0\xdb\x61\xe1\x8f\x8a\x29\x5f\
\x1e\x60\xb9\x56\x8c\x5a\x50\xc0\xd9\xc1\xe9\x71\xf2\x79\x26\x7f\
\x07\x5d\x3c\x7c\x76\x71\xe9\x74\xc2\xf2\xe9\xdb\x4e\x5d\x3d\xf7\
\xa6\x02\xb5\x41\x0b\xf8\x36\x90\x65\xa5\xdc\x3d\xce\x4f\xee\x3b\
\xb6\x9e\xa3\xbd\x43\xb8\xa9\xeb\xd3\x5d\x51\x61\x74\xf7\xa1\x6f\
\x19\xcf\xff\x71\x6c\xf7\x80\xbf\x2e\x1c\xeb\x12\x74\x37\xb8\x7a\
\x74\x6c\x8c\xb3\xb9\x0b\x85\x06\xfc\x7f\x38\xc9\x69\xda\xbc\x8a\
\x79\x9e\xae\x53\xb1\xe6\xc6\xb7\xeb\xbc\xf1\xbd\x83\x9d\x43\x44\
\x46\xb7\xcf\xf4\x9a\x44\x6f\xbe\xde\xe2\x07\x69\xca\x5e\x79\x00\
\xb2\x82\x75\x71\x01\x81\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\
\x04\x46\x40\x01\x81\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\
\x46\x40\x01\x81\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x96\
\x66\x40\x67\xac\x9b\xbb\xfa\xd0\xf9\x63\x5d\x95\x2c\x06\xb0\x44\
\x85\x5c\x5c\x6f\x2e\x1a\x3a\xab\xa4\xa9\xeb\x52\x05\x34\xe2\x59\
\x44\xc1\xee\x93\x3e\x7d\x98\xf3\x65\x01\x90\x3c\x78\xf2\xd0\xa3\
\xfd\x98\xee\x89\xbd\x5a\x77\xfb\xa2\x4d\xdd\x56\x33\x93\x5f\x97\
\x2a\xa0\x84\x13\x50\xc7\x92\xed\x2b\x67\xd8\xdb\xd9\x47\xb6\xac\
\xd9\x64\xa1\x71\x5e\x8a\x80\x4a\x29\x56\xbe\x2c\x00\x46\x3f\x6e\
\x59\xb2\xc0\x64\x40\x43\x1e\xdd\x2f\xad\x4e\x49\x00\x92\x93\x76\
\xdc\x37\xb4\xcb\x80\x2e\xd2\xf9\xa4\x80\x2e\xd9\xbe\x62\x66\xda\
\x77\x01\xa0\x94\xc3\x17\x8e\x77\x4e\x15\x50\xff\x80\xd3\x6d\xd5\
\x2b\x09\x80\x29\xfc\x0e\x0a\x08\x2c\x29\xa0\x5e\x45\x3d\x4f\x07\
\xde\xbd\x5e\x43\xcd\x62\x00\xa4\x94\x14\xd0\x2e\x4d\xdb\x8f\x9b\
\xbc\x72\xe6\x66\x15\x6b\x01\xa0\xfb\xef\xd0\x0f\xc6\xf3\x49\x01\
\xad\xfd\x4a\x8d\x2d\xea\x94\x03\x20\xb9\x49\x7d\xc7\x34\x30\x9e\
\x4f\xf1\x1d\x54\xda\x51\xb5\xb4\x37\x79\xe5\x4b\x02\x20\xf1\x29\
\x5d\x61\x9f\x8b\x73\xbe\x50\xe3\xe5\x14\x01\x7d\xb5\x5c\xd5\x3f\
\x3e\x68\xd1\x79\xe4\x8a\xdd\xeb\x26\x29\x5f\x1a\x60\xd9\x0a\xb9\
\xb8\xde\x18\xd7\x6b\x64\xd3\xe4\xf3\x52\x2d\xc5\x7d\xb7\x61\x9b\
\xc9\xd2\xc7\xdd\x41\x73\x46\x5c\x54\xae\x34\xc0\xb2\x75\x6f\xd9\
\x65\x44\x87\x06\x6f\x4f\x79\x79\xbe\x3e\x31\x51\xde\xda\x7d\x77\
\xc3\x42\xbc\x4f\x04\x9c\x7e\xe7\x52\xf0\x95\x06\x37\x42\x6f\x55\
\xbe\xfb\x30\xc4\x3b\xdb\x55\x02\x00\xa0\x80\x3c\x8e\xce\x61\x45\
\x0a\xba\xff\x5b\xa6\x98\xe7\xa9\x6a\xde\x3e\xbb\x5f\xaf\x58\x73\
\x63\x56\x1f\x23\x4b\xeb\x29\x48\x2b\xdb\x6f\x3d\xb2\x63\x48\x56\
\x9f\x04\x00\x00\x91\x44\x3c\x8b\x2c\x78\xed\x76\xa0\x34\xd5\xda\
\x79\x62\x6f\x3f\xe3\x7c\xf7\x82\x6e\x81\xbd\x5a\x77\x1b\x52\xfb\
\x95\x1a\xbf\x65\xf4\x18\x19\x36\xd0\xc7\x11\x4f\x8a\xf8\x2d\x9d\
\xba\xe3\xfa\xbd\x1b\x55\xb3\x5b\x30\x00\x00\x22\x0b\x09\x0b\xf5\
\x9a\xbc\x72\xa6\x61\xad\xa0\xce\x4d\xda\x8f\x7b\xaf\x69\x07\xdf\
\xb4\x6e\x9b\x6e\x03\x5d\xf2\xc7\x8a\x99\x5b\x8f\xee\x1c\x6c\xe6\
\xfa\x00\x00\x10\xde\xba\x7d\x9b\xc6\x4a\xd3\xf4\x4f\xc7\xd5\x2c\
\x53\xac\xf4\xa9\x97\xaf\x4f\xb3\x81\x4e\x59\xfd\xfd\xc6\x63\x97\
\x4e\xb6\xcf\xd9\xf2\x00\x00\x10\xdb\xd0\x1f\xc7\x9e\x94\x56\x83\
\x97\xd6\xb4\x4d\x3e\xdf\x64\x03\xdd\x72\xf8\x8f\x2f\x69\x9e\x00\
\x00\xfc\x67\xe2\x8a\xef\xb6\xae\x1d\xbb\xd8\xd9\x56\x6f\x1b\x6d\
\x9c\x67\xb2\x81\x6e\x3b\xba\x6b\x90\x72\x65\x01\x00\x20\xb6\xc4\
\xc4\x44\xeb\x6d\xc7\x76\x0f\x68\x57\xff\xcd\xe9\xc6\x79\xa9\x1a\
\x68\x78\x54\x84\xeb\xfd\x27\x0f\x4d\x1e\x20\x02\x00\x00\x4b\x15\
\x74\x2f\xb8\x5a\xf2\xcb\xa9\x1a\x68\x5e\xa7\x3c\x0f\x1d\xec\x1c\
\x22\xa2\x63\xa3\xf3\x28\x56\x15\x00\x00\x82\x73\xcb\x5f\x28\x38\
\xf9\x65\x93\x8b\x70\x9b\x54\xaf\xbf\x74\xfb\xf1\x3d\xfd\x95\x29\
\x09\x00\x00\xf1\x35\xaa\x5a\x6f\x45\xf2\xcb\x26\x1b\xe8\xc7\x6f\
\xf7\x1c\x70\xe2\xf2\x99\x77\xa4\xc3\x89\x2a\x53\x16\x00\x00\xe2\
\x7a\xbf\x59\xc7\x31\x25\x0a\x17\x0b\x48\x3e\x2f\xcd\xcd\x58\x16\
\x0d\x9d\x55\x72\xd0\xec\x11\x17\x6f\xde\xbf\x5d\x31\xe7\x4b\x03\
\x00\x40\x4c\x69\xee\x0b\x37\xbd\x3b\xcd\x1e\xf4\x6d\xa5\x6d\xc7\
\x76\x0d\xfc\x69\xdb\xf2\xd9\x39\x57\x1a\x00\x00\xe2\x29\x52\xd0\
\xed\xdf\x89\x7d\x46\x37\x2c\x98\xb7\xc0\x1d\x53\xd7\x67\xb8\x2b\
\xbf\xb7\x5e\x6f\x39\x47\x9a\x0e\x9c\x3b\xfc\xc1\xbc\x2d\x4b\x16\
\xc6\xc6\xc5\x3a\x9a\xbf\x4c\x00\x00\xc4\x20\xed\x30\x61\x60\x87\
\x8f\x3f\x4a\x7e\xec\x32\x53\x32\xbd\x33\x79\xe9\xc7\x53\xe3\x0f\
\xa8\x01\xc1\x57\xeb\xed\x3d\x73\xf0\xa3\xe3\x01\xa7\xda\x49\x9b\
\xbd\x64\xb7\x58\x00\x00\xd4\xe2\x5d\xbc\xf4\xc9\x7a\x95\x5f\x5f\
\xdb\xaa\x76\xd3\xf9\xd2\x56\x28\x99\xbd\x5f\x96\x8e\xc6\x62\x54\
\xa1\x54\xb9\xc3\xd2\xd4\xbf\x5d\x9f\x3e\x72\xee\x0f\x00\x40\x6e\
\x27\xab\x81\x02\x00\x60\xe9\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\
\x40\x06\x1a\x28\x00\x00\x32\xd0\x40\x01\x00\x90\x81\x06\x0a\x00\
\x80\x0c\x34\x50\x00\x00\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\
\x00\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\
\x00\x32\x98\xb5\x81\xde\x0c\xbd\x5d\xf1\xec\xb5\xf3\x2d\xef\x86\
\x85\x78\x3f\x8e\x78\x52\x24\xec\xe9\xa3\x62\x41\xf7\x6e\x54\xe3\
\x10\x68\x90\x24\xbe\x98\xac\xd4\x2e\x02\x80\x45\x71\xcb\x5f\xe8\
\x7a\xf1\x42\x45\xaf\xe4\xcf\x9b\xff\x9e\x6b\xbe\x82\xb7\xca\x96\
\xf0\xf2\xaf\x5a\xa6\xf2\x1e\x7b\x5b\xbb\xa8\xec\x3e\xb6\xac\x06\
\xfa\x24\xf2\xa9\xdb\x1f\xc7\x76\x0f\xd8\xee\xbf\xe7\x33\x0e\x67\
\x86\xcc\xa2\x79\x22\x6b\xf8\xc8\x85\xec\x0b\x7d\xfc\xc0\x53\x9a\
\xd2\xbb\x4d\xed\x0a\x35\xb6\xb4\xaa\xd5\xfc\xc7\xea\x65\x7d\x76\
\x66\xe5\xb1\x33\xd5\x40\xa3\x62\x9e\xe5\x5b\xba\x63\xf5\xb4\xdd\
\x27\xf7\xf7\x4d\xd4\x25\xf2\x8e\x06\x90\x33\x52\xf4\x4c\xfe\xd4\
\x40\x19\xfe\x01\xa7\xdb\x4a\x93\xf1\x72\x51\xd7\x22\xff\x0c\x7e\
\xf7\x93\x1e\xe5\x3c\xbc\x8f\xa5\x77\xbf\x74\x1b\xe8\xbc\x2d\x8b\
\x17\x4a\x4d\xd3\x5c\x45\x02\x40\xba\xe8\x99\x10\xc0\xdd\x87\xf7\
\xca\x7e\xb5\xd0\xef\xa8\x74\xde\xbd\x40\xe1\xa0\x51\x1f\x7c\xf9\
\xb6\x87\x5b\xf1\x4b\x2f\xdf\x2e\x55\x03\x8d\x7f\x1e\x6f\xe7\xb7\
\x74\xea\xce\x0b\x41\x01\x8d\x15\xa8\x13\x00\x00\x61\x85\x3c\xba\
\x5f\x7a\xd0\x9c\x11\x17\x1d\xed\x1d\xc2\x27\xf5\x19\xd3\xc0\xb3\
\x48\xc9\x73\xc6\xeb\x52\x34\xd0\x75\xfb\x37\x8f\x59\xfd\xe7\xaf\
\xe3\x94\x2f\x11\x00\x00\x71\x3d\x8b\x89\xce\x3b\x64\xee\xa8\xb3\
\xa5\xdc\x3d\xce\x4f\xeb\xe7\x57\xcb\x56\x6f\x1b\x93\xd4\x40\x27\
\xaf\x9a\xb9\x39\xf9\x32\x60\x00\x50\x0e\x2b\x0c\x21\x77\x08\x0e\
\xb9\xe9\xd3\x7d\x52\xbf\xb0\x45\x43\xbf\x2f\x69\x68\xa0\x2b\xf7\
\x6c\x98\x40\xf3\x04\xa0\x1e\x9a\x27\x72\x8f\x98\xb8\x58\xa7\x91\
\x3f\x8d\x3f\x64\x68\xa0\xbf\x1f\xdd\x31\x58\xe5\x7a\x00\x00\xc8\
\x35\x6e\xdd\xbf\x53\xc1\xd0\x40\xa3\x63\x63\x9c\xd5\x2e\x06\x80\
\x25\x63\x11\x2e\x72\x1f\x7d\x42\x42\x82\x8d\x1a\x4f\xdc\xa4\x9e\
\x97\xae\x61\x1d\x4f\x35\x9e\x1a\x66\x76\xfa\xc2\x5d\xdd\xd6\x1d\
\x01\x8a\x3f\x6f\xe7\xb6\x3e\xba\x0a\x65\x0b\x2b\xfe\xbc\x30\xbf\
\x99\x0b\x0e\xeb\x9e\x86\xc7\x28\xfe\xbc\xc3\xfa\x37\xd0\x39\x39\
\xda\x2a\xfe\xbc\x30\xbf\x89\xb3\x0e\xe8\xe2\xe3\x9f\x2b\xfa\x9c\
\xec\x0b\x17\x00\x00\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\
\x06\x1a\x28\x00\x00\x32\xd0\x40\x01\x00\x90\x81\x06\x0a\x00\x80\
\x0c\x34\x50\x00\x00\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\x00\
\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\x00\
\x32\xd0\x40\x01\x00\x90\x81\x06\x0a\x00\x80\x0c\x34\x50\x00\x00\
\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\x00\x19\x68\xa0\x00\x00\
\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\x00\x32\xd0\x40\x01\x00\
\x90\x81\x06\x0a\x00\x80\x0c\x34\x50\x00\x00\x64\xa0\x81\x02\x00\
\x20\x03\x0d\x14\x00\x00\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\
\x40\x06\x1a\x28\x00\x00\x32\xd0\x40\x01\x00\x90\x81\x06\x0a\x00\
\x80\x0c\x34\x50\x00\x00\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\
\x00\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\
\x00\x32\xd0\x40\x01\x00\x90\x41\x6f\x6d\x6d\xfd\x5c\x8d\x27\xde\
\x77\x38\xd0\x30\x01\x72\xad\xdb\x72\x5e\xed\x12\x60\x36\x89\x2f\
\x26\x2b\xc5\x9f\x75\xda\xdc\x43\x8a\x3f\x27\xb4\xc3\xf0\x0d\xd4\
\xab\x98\xe7\xe9\xc0\x3b\xd7\x6b\xa8\x5d\x0c\x00\x00\xb9\x85\xa1\
\x81\x7e\xd4\xea\xfd\x2f\xc7\x2c\x99\xb4\x4f\xed\x62\x00\x58\x2a\
\xe5\xbf\x7d\x02\xd9\xe1\xe3\x55\x71\xaf\xa1\x81\x56\x2e\x5d\x61\
\xff\xe0\x8e\xfd\xba\xcf\xda\x30\x7f\xb9\xda\x45\x01\x00\x20\xb2\
\x92\xee\x25\x2e\xf8\x7d\x38\xa2\x45\xd2\x4a\x44\x8d\xaa\xd6\x5b\
\x21\x75\xd4\xcf\xe7\x7c\x7d\x3e\xe2\x59\x64\x41\x35\x8b\x03\x00\
\x40\x44\x5d\x9b\x76\xf0\xed\xd2\xa4\xfd\x38\xe9\x7c\x8a\xb5\x70\
\x0b\xe6\x2d\x70\x67\xf9\xc8\xf9\xae\x67\xaf\x9d\x6f\xe9\xb7\x74\
\xea\x4e\x75\xca\x43\xee\xa3\xce\x0a\x20\x00\xa0\x94\xba\x95\x6a\
\x6f\x18\xd6\x75\x60\xa7\xe4\xf3\x4c\x6e\xc6\x52\xcd\xdb\x67\xd7\
\xa6\xf1\xcb\xad\x42\xc2\x42\xbd\xa6\xaf\x9b\xbb\xe6\xda\xed\xc0\
\x5a\xca\x94\x88\xdc\x89\xe6\x09\x40\x7b\x1c\xec\x1c\x22\x7a\xbd\
\xd9\x6d\x48\x8b\x57\x1b\xff\x64\xea\xfa\x74\xb7\x03\x75\x2f\xe8\
\x16\x38\xad\x9f\x5f\x6d\xe9\xbc\xd4\x4c\x97\xed\x5e\xfb\xed\x91\
\x0b\xfe\x9d\xd2\xbb\x0f\x2c\x1c\x5f\x46\x01\xe4\x62\xc5\x0b\x15\
\xbd\xd2\xa9\x71\xdb\x09\xd2\xcf\x9a\x19\xdd\x36\xd3\x3b\x52\x90\
\x9a\xe9\xb0\x2e\x03\x3b\xeb\xba\xfc\x77\x39\x32\x3a\x2a\xff\x9f\
\xa7\x0e\xf4\xfa\xeb\xc2\xb1\x2e\xff\xdc\x0a\xac\x2d\xbf\x5c\x68\
\xca\xff\x37\x4f\xfa\x28\xb2\x8b\xf7\x10\x72\x5a\xfe\x3c\x2e\xf7\
\xea\x55\x7e\x6d\x7d\xf3\x17\xdf\x30\x3d\x8b\x78\xfc\x9d\xd5\xfb\
\xcb\xde\x13\x91\xb3\x83\xd3\xe3\x77\xea\xb5\x9e\x21\x4d\x2f\x5f\
\x17\x74\x37\xb8\xda\x8d\xd0\x5b\x95\x6f\x3f\xb8\x5b\x3e\xec\xe9\
\xe3\x62\x8f\x23\x9e\xb8\x87\x3f\x8b\x70\x7d\x14\xfe\xb8\xe8\x93\
\xc8\xa7\x6e\xb1\x71\xb1\x8e\x72\x9f\x17\xb9\x43\xaa\x3f\x7c\xc6\
\xbf\x86\x2f\x9f\x02\x69\x78\xf9\xed\xc1\x5b\x06\x99\xe1\xe2\x9c\
\x2f\xf4\x45\x63\x0c\x91\x9a\xa3\x4b\x9e\x17\xe7\x9d\x5d\x42\x0a\
\xe7\x77\xbd\x51\xa6\x78\xe9\x93\x5e\x45\x3d\xcf\xd8\xdb\xda\x45\
\x99\xeb\xb9\x72\x64\x57\x7e\xa5\x8b\x96\x3a\x2b\x4d\x39\xf1\xd8\
\x00\x00\x88\x80\x7d\xe1\x02\x00\x20\x03\x0d\x14\x00\x00\x19\x68\
\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\x00\x32\xd0\
\x40\x01\x00\x90\x81\x06\x0a\x00\x80\x0c\x34\x50\x00\x00\x64\xa0\
\x81\x02\x00\x20\x03\x0d\x14\x00\x00\x19\x68\xa0\x00\x00\xc8\x40\
\x03\x05\x00\x40\x86\x2c\x37\xd0\x7b\x61\xa1\x65\xb6\x1e\xd9\x3e\
\x64\xf7\xa9\x03\xbd\xe3\xe2\xe3\x1c\x72\xa2\x28\x00\x00\x94\xe4\
\x55\xb4\xd4\x99\x56\xb5\x9b\xfd\xd8\xa2\x66\x93\x45\x99\xbd\x4f\
\xa6\x1a\x68\x4c\x5c\x8c\xd3\x8c\xf5\xf3\x56\xf9\x07\x9c\x6e\x2b\
\xbf\x3c\x00\x00\xc4\x14\x78\x37\xb8\xfa\xbc\x2d\x4b\x16\x4a\x93\
\x74\xf9\x93\x36\x1f\x7e\x26\x35\xd4\xf4\xee\x93\x61\x03\x9d\xb0\
\x7c\xfa\xb6\x53\x57\xcf\xbd\x69\xae\x22\x01\x00\x10\xdd\x82\xad\
\xbf\xcc\x93\xa6\x81\xed\xfb\x7e\xd4\xb4\x46\xc3\x5f\x4c\xdd\x26\
\xcd\x06\x1a\x78\xe7\x7a\x8d\x2f\x7f\x1c\x73\x2a\xc7\xaa\x03\x00\
\x40\x70\x73\x36\x2d\xfa\xf9\x78\xc0\xa9\x76\x5f\x77\x1b\xd2\xee\
\xe5\xeb\x4c\x36\xd0\xfb\x8f\x1f\x96\xa4\x79\x02\x00\xa0\xd3\xf9\
\x5f\x3e\xdd\x76\xe6\xfa\x79\x2b\x87\x74\xfa\xac\x5b\xf2\xf9\x26\
\x1b\xe8\xe2\x3f\x56\xcc\x52\xa4\x2a\x00\x00\x72\x81\x83\x7f\x1f\
\x7d\xbf\xf9\xab\x8d\x17\xfb\x78\x55\xdc\x6b\x9c\x97\xaa\x81\x26\
\xea\x12\xad\x8e\x07\x9c\x6c\xaf\x6c\x69\x00\x00\x88\xed\x78\xc0\
\xa9\xb6\xe9\x36\xd0\x67\x31\xd1\x79\x95\x2d\x09\x00\x00\xf1\x3d\
\x8b\x4d\xd9\x1f\x53\x35\x50\x27\x7b\xc7\xa7\xa5\x8b\x94\x3c\x1b\
\x74\xef\x46\x35\xc5\xaa\x02\x00\x40\x70\x15\x4b\x95\xfb\x2b\xf9\
\x65\x93\xbf\x81\x76\x6a\xdc\x6e\xc2\xd4\x35\xb3\x37\x28\x53\x12\
\x00\x00\x62\x2b\x52\xd0\xed\xdf\x66\x35\x1a\x2d\x49\x3e\xcf\x64\
\x03\xad\x53\xa9\xd6\xaf\xef\x35\x7d\xd7\x77\xf5\xde\x5f\xfd\x94\
\x29\x0d\x00\x00\x71\x4d\xec\x3d\xba\xd1\xcb\xf3\xd2\xdc\x0e\xb4\
\x73\x93\x76\xe3\x4a\x15\xf1\xf8\xfb\xdb\x55\xb3\x36\xe5\x6c\x59\
\x00\x00\x88\xa9\x42\xc9\x72\x87\x27\xf5\x1d\x53\xdf\xd4\x75\xe9\
\xee\x89\xe8\xb5\x0a\xaf\x6e\xde\x34\x7e\xb9\xd5\x9a\xbd\x1b\xfd\
\xd6\xee\xdb\x34\x36\x67\xca\x03\x00\x40\x2c\xf9\xf3\xb8\x84\x8c\
\xfa\xe0\xcb\xb7\xbd\x8b\x97\x3e\x99\xd6\x6d\x32\xb5\x2f\xdc\xae\
\x4d\x3b\xf8\x4a\xd3\xf5\x7b\x37\xab\xac\xfa\x73\xfd\x84\x13\x97\
\xcf\xb4\x31\x5f\x99\x00\x00\x88\xa1\x4b\x93\xf6\x7e\x2f\xfa\xdd\
\x37\x99\xb9\x6d\x96\x8e\xc6\xe2\x59\xc4\xe3\xef\x91\xdd\xbe\x78\
\x27\xf9\xbc\x2b\x37\xaf\xbd\x7e\x23\xe4\xa6\xcf\x83\x27\x61\x1e\
\x91\xd1\x51\xf9\xe3\xe2\xe3\xec\xf5\x7a\x7d\x6c\x56\x1e\x17\x00\
\x00\xa5\x58\xe9\xac\x12\xf2\x3a\xe5\x09\x73\x2f\x50\x38\xc8\xab\
\xa8\xe7\xe9\x92\xee\x25\x2e\xc8\x79\x9c\x6c\x1f\x0f\xb4\xbc\x87\
\xf7\x31\x69\xca\xee\xe3\x00\x00\x90\x9b\x70\x40\x6d\x00\x00\x64\
\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\x00\x19\x68\xa0\x00\x00\xc8\
\x40\x03\x05\x00\x40\x06\x1a\x28\x00\x00\x32\xd0\x40\x01\x00\x90\
\x81\x06\x0a\x00\x80\x0c\x34\x50\x00\x00\x64\xa0\x81\x02\x00\x20\
\x03\x0d\x14\x00\x00\x19\x14\x69\xa0\xf7\xc2\x42\xcb\x3c\x78\xf2\
\xd0\x43\x89\xe7\x42\xee\x10\x19\x13\xe5\x12\x19\x15\x95\xff\x59\
\x5c\x74\x5e\x47\x5b\xfb\x48\x27\x07\xa7\xc7\x79\x1c\x9d\x1f\xa9\
\x5d\x17\x72\x87\xd8\xb8\x38\x87\xc8\xe8\xc8\x02\x91\xd1\x51\x2e\
\xcf\x13\x13\xf4\xce\xf6\x8e\x4f\x9c\x1d\x9c\x1f\x3b\xda\x3b\x84\
\xab\x5d\x1b\xc4\x60\xab\xb7\x8d\x29\xe5\xee\xf1\xb7\x83\x9d\x7d\
\x64\x4e\x3d\x47\xb6\x1b\xe8\xb9\x6b\x17\x5a\x9c\xbc\x7a\xf6\x2d\
\xff\x80\x53\x6d\x43\x1f\x3f\xf0\x34\x43\x4d\xd0\x8a\x44\x9d\x74\
\xd8\x03\x00\x10\x4e\x35\x6f\x9f\x5d\xb5\xca\x57\xdf\x5a\xb7\x72\
\xed\xf5\xd2\xb1\x3f\xe5\x3c\x46\x96\x1a\xe8\xed\x07\x77\xcb\x6f\
\x3c\xb8\x75\xc4\x81\x73\x47\xba\x3d\x4f\x78\x6e\x2b\xe7\x09\x61\
\x41\x68\x9e\xc8\x22\x3e\x73\x41\x29\x67\xaf\x9d\x6f\x29\x4d\x8b\
\xb6\x2d\x9b\x63\x9c\xf7\xe2\x1b\xeb\xf9\x76\xf5\xdf\x9c\xd6\xa0\
\x4a\x9d\xd5\x36\xd6\x36\xf1\x19\x3d\x46\x86\x0d\xf4\xe4\x95\xb3\
\x6f\xcd\xdd\xfc\xd3\xe2\xc7\x11\x4f\xdc\xb3\x5b\x30\x00\xa4\x87\
\xe6\x09\x35\x05\x87\xdc\xf4\xf9\xfe\xd7\x05\xcb\xa4\x49\xba\xdc\
\xba\x76\xf3\x79\x3d\x5b\xbd\x37\xcc\xde\xd6\x2e\xca\xd4\xed\x4d\
\x36\xd0\xd8\xb8\x58\xc7\x6f\x57\xcd\xda\x74\xe6\xda\xf9\x37\x72\
\xb2\x58\x00\x00\x44\xb5\xdd\x7f\xcf\x67\xd2\xe4\xe4\xe0\xf4\xc4\
\xef\xc3\xaf\x5a\x78\x17\xf7\x3a\x91\xfc\xfa\x54\x0d\x54\x6a\x9c\
\xc7\x03\x4e\xb5\x53\xac\x42\x00\x00\x04\x16\x15\x1d\xe5\x32\x6c\
\xbe\xaf\x7f\x1e\x47\xe7\xb0\xe9\x9f\x8e\xaf\xe9\x5e\xa0\x70\x90\
\x34\x3f\xa9\x81\xde\x79\x70\xaf\xdc\x80\xd9\xc3\x03\x12\x13\x13\
\xad\xd5\x2b\x13\x00\x00\x31\x45\x3c\x8b\x2c\xd8\x6f\xc6\x17\x81\
\x9d\x9b\xb4\x1f\xf7\x5e\xd3\x0e\xbe\x86\x06\x7a\xff\xc9\xc3\x92\
\xfd\xbf\x1f\x76\x45\xed\xe2\x00\x00\x10\xdd\xba\x7d\x9b\xc6\xda\
\xeb\x6d\x9f\x19\x1a\xe8\xac\xf5\x3f\xae\x50\xbb\x20\x00\x00\x72\
\x8b\xe5\xbb\xd7\x4d\x36\x34\xd0\x4b\xc1\x57\x1a\xa8\x5d\x0c\x00\
\x00\xb9\x89\x6a\xbb\xf2\xab\x5e\xb9\x98\xae\x4a\x25\xb6\x8c\xd1\
\x82\xc0\xeb\x61\xba\x43\xc7\x83\x15\x7f\xde\x26\xf5\xbc\x74\x25\
\x4b\xb8\x28\xfe\xbc\x30\xbf\x0d\x5b\x2f\xe8\x22\xa3\xe2\x14\x7f\
\xde\xae\xed\xaa\xe8\xec\xed\x6d\x14\x7f\x5e\x98\xdf\xd2\xb5\x67\
\x14\x7f\x4e\x7d\x42\x42\x82\x2a\xef\x9e\xfc\x2e\x0e\x3a\x4f\x8f\
\x02\x6a\x3c\x35\xcc\x2c\xec\x49\xb4\x2a\xcf\x5b\xb8\x90\x33\xef\
\x21\x8d\xb0\xb1\x51\x67\xdd\x45\x8f\xe2\x2e\x3a\x27\x47\xf6\x09\
\xa3\x05\x7a\xbd\x8d\x2e\x3e\xfe\xb9\xb2\xcf\xa9\xe8\xb3\x01\x00\
\xa0\x11\x34\x50\x00\x00\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\
\x00\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\
\x00\x32\xd0\x40\x01\x00\x90\x81\x06\x0a\x00\x80\x0c\x34\x50\x00\
\x00\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\x00\x19\x68\xa0\x00\
\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\x00\x32\xd0\x40\x01\
\x00\x90\x81\x06\x0a\x00\x80\x0c\x34\x50\x00\x00\x64\xa0\x81\x02\
\x00\x20\x03\x0d\x14\x00\x00\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\
\x00\x40\x06\x1a\x28\x00\x00\x32\xd0\x40\x01\x00\x90\x81\x06\x0a\
\x00\x80\x0c\x34\x50\x00\x00\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\
\x00\x00\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\
\x00\x00\x32\xd0\x40\x01\x00\x90\x81\x06\x0a\x00\x80\x0c\x34\x50\
\x00\x00\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\x00\x19\xf4\xd6\
\xd6\xd6\xcf\xd5\x78\xe2\xeb\x37\xc2\x74\x09\x89\x89\x6a\x3c\x35\
\xcc\xec\x5e\x48\xb8\x2a\xcf\x7b\xf1\x72\x88\x2e\xe4\x7e\x84\x2a\
\xcf\x0d\xf3\x8a\x89\x51\xe5\xcf\x90\xee\xb0\x7f\xb0\xce\xd6\xd6\
\x46\x95\xe7\x86\x79\xc5\xc7\x2b\xff\x1e\x32\x7c\x03\x75\xb2\x77\
\x7c\x1a\x15\xf3\x2c\x9f\x92\x4f\x1c\x74\xf3\xb1\x61\x02\xe4\xba\
\x78\x25\x54\xed\x12\x90\xcb\x1d\x39\x71\x43\xed\x12\x90\x8b\x19\
\x1a\x68\xed\x0a\xaf\x6e\xde\x7f\xf6\xaf\x1e\x6a\x17\x03\x00\x40\
\x6e\x61\x68\xa0\x03\xda\xf5\xe9\xe3\x7f\xf9\x74\xdb\xa8\xe8\x28\
\x17\xb5\x0b\x02\x00\x40\x74\xbd\x5a\x77\x1b\x62\x68\xa0\x36\x36\
\x36\x71\x4b\x86\xcf\x29\xf6\xc9\x77\x43\x82\x9e\x44\x3e\x75\x53\
\xbb\x30\x00\x96\x46\x5a\x1f\xc2\x4a\xed\x22\x80\x4c\xe9\xf7\xce\
\x87\x9f\xbe\x51\xab\xd9\xfc\xa4\xb5\x70\xed\x6d\xed\xa2\x7e\x19\
\x31\xd7\x7d\xe7\x89\x3f\xfb\xcd\xff\xed\x97\x1f\xd5\x2c\x0e\x80\
\xa5\xa1\x79\x42\x7c\x5e\x45\x4b\x9d\x99\xd0\x7b\x74\x23\x47\x7b\
\x07\xc3\x9a\x93\xa9\x36\x63\x91\xba\xaa\x34\xfd\x76\x78\xfb\x17\
\x3f\xef\x58\xf5\x9d\xf2\x25\x02\x00\x20\x8e\x32\xc5\x4a\x9f\xf2\
\xed\x39\xfc\x8d\xbc\x4e\x79\x1e\x26\x9f\x9f\xe6\x76\xa0\xef\xd4\
\x6b\x3d\x43\x9a\x1e\x3e\x7d\x54\xfc\x87\x4d\x8b\x96\x9c\xbd\x76\
\xbe\x65\xce\x97\x89\x5c\x8d\xa5\x70\x00\x34\x22\x9f\x53\xde\x07\
\x9f\xb6\xed\xf5\xc9\xeb\x15\x6b\x6e\x4c\xeb\x36\x19\xee\x48\xc1\
\x35\x5f\x81\xdb\x52\xe7\x35\x5e\xfe\xf3\xf4\xc1\x8f\x36\x1d\xda\
\x36\xfc\xf6\x83\x3b\xaf\x98\xab\x50\x68\x04\xcd\x13\x40\x2e\x25\
\xfd\x8c\xd9\xac\x46\xa3\x25\xed\x1b\xbc\x35\xb5\x90\x8b\xeb\xcd\
\xcc\xdc\x27\xcb\x7b\x22\x6a\x56\xa3\xe1\xcf\xd2\x64\xbc\x1c\x1d\
\x1b\x9d\xe7\xf4\x3f\x7f\xb7\x3a\x71\xf9\x4c\x9b\x93\x57\xce\xb4\
\x89\x78\x16\x59\x20\xab\x8f\x09\x00\x80\x52\x7c\xbc\x2a\xee\xad\
\x51\xb6\xea\xf6\x3a\x2f\xbe\x5d\xba\x17\x74\x0b\x94\xfb\x38\xd9\
\xde\x95\x9f\x83\x9d\x43\x44\xdd\x4a\xb5\x37\x48\x53\x76\x1f\x0b\
\x00\x80\xdc\x82\x7d\xe1\x02\x00\x20\x03\x0d\x14\x00\x00\x19\x68\
\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\x00\x32\xd0\
\x40\x01\x00\x90\x81\x06\x0a\x00\x80\x0c\x34\x50\x00\x00\x64\xa0\
\x81\x02\x00\x20\x03\x0d\x14\x00\x00\x19\x68\xa0\x00\x00\xc8\x40\
\x03\x05\x00\x40\x06\xd9\x0d\xf4\xc1\x93\x87\x1e\x27\x2e\x9f\x79\
\xe7\x52\xf0\x95\x06\x37\x42\x6e\x56\x96\x8e\x1b\x1a\x1d\x1b\xe3\
\x9c\xe6\x1d\x38\xd4\x15\x00\x40\x49\x89\x69\x5f\xe5\x92\x27\xdf\
\x7d\xf7\x02\x85\x03\xa5\x83\x65\x57\xf3\xae\xbc\xfb\xd5\x72\xd5\
\xb6\x65\xf5\xe1\xb3\xd4\x40\x97\xef\x5e\x37\x79\xe3\xc1\xad\x23\
\xb2\xfa\x24\x00\x00\x88\x24\xec\xe9\xa3\x62\xd2\x14\x10\x7c\xb5\
\xfe\xef\x47\x77\x7e\x6e\x9c\x5f\xbc\x50\xd1\xcb\xbd\xde\xfc\x60\
\x48\x8d\xb2\x55\x76\x64\xf4\x18\x19\x36\xd0\xf0\xa8\x08\xd7\x71\
\xcb\xa6\x6d\xbf\x76\x3b\xb0\x56\x76\x0b\x06\x00\x40\x64\xb7\x1f\
\xdc\x7d\x65\xfc\x8b\x9e\x27\x9d\x7f\xbf\x59\xc7\x31\x9d\x1a\xb7\
\x9d\x90\xd6\x6d\xd3\x6d\xa0\x4b\x77\xae\x99\xba\xf9\xaf\x6d\xc3\
\xcc\x5d\x20\x00\x00\xa2\x5b\xf5\xe7\x86\xf1\xd2\x34\xb3\xff\xc4\
\x6a\x9e\x45\x4a\x9e\x7b\xf9\xfa\x34\x1b\xe8\xb4\x35\x73\xd6\x1f\
\xb9\xe8\xdf\x31\x67\xcb\x03\x00\x40\x6c\x43\xe6\x8e\x3a\x3b\xb6\
\xc7\xb0\x56\xd5\xcb\x56\xd9\x99\x7c\xbe\xc9\x06\xba\xf5\xe8\xce\
\xc1\x34\x4f\x00\x00\xfe\x33\x71\xc5\x77\xbf\xaf\x19\xbb\xd8\x59\
\x6f\xa3\x8f\x35\xce\x33\xdd\x40\x8f\xec\x18\xac\x58\x55\x00\x00\
\x08\xee\x79\x42\x82\xfe\x8f\xe3\xbb\x07\xbc\x53\xb7\xf5\x0c\xe3\
\xbc\x54\x0d\x54\x5a\x69\xe8\xfe\xe3\x07\xa5\x94\x2d\x0d\x00\x00\
\xb1\x05\xde\x09\xae\x9e\xfc\x72\xaa\x06\x9a\xd7\x29\xcf\x43\x7b\
\x5b\xfb\xc8\x98\xb8\x74\xb6\xe9\x04\x00\xc0\xc2\x14\x72\x29\x78\
\x33\xf9\x65\x93\x8b\x70\x9b\x54\xaf\xbf\x6c\x87\xff\x9f\x9f\x2a\
\x53\x12\x00\x00\xe2\x6b\x58\xb5\xee\xca\xe4\x97\x4d\x36\xd0\x4f\
\xda\x7c\xf8\x99\x7f\xc0\xe9\xb6\x61\xe1\x8f\x8a\x29\x53\x16\x00\
\x00\xe2\xea\xda\xb4\xc3\x37\x25\xdd\x4a\x5c\x4c\x3e\x2f\xcd\xcd\
\x58\x16\x0f\x9f\x5d\xbc\xff\xac\x61\x57\xee\x3c\xbc\x57\x2e\xe7\
\x4b\x03\x00\x40\x4c\xdd\x9a\x77\x1c\xdd\xb1\x51\xdb\x89\x2f\xcf\
\x4f\x77\x47\x0a\x73\x07\x4f\x2b\xff\xdb\xe1\xed\x5f\xfc\xbc\x63\
\xd5\x77\x39\x57\x1a\x00\x00\xe2\x29\xe4\xe2\x7a\x63\x52\xdf\x31\
\x0d\x0a\xbf\x38\x35\x75\x7d\x86\xbb\xf2\x7b\xa7\x5e\xeb\x19\xd2\
\xf4\xe7\xe9\x03\xbd\xe6\x6d\x5e\xb2\x30\x21\x31\xc1\xc6\xfc\x65\
\x02\x00\x20\x86\xaa\xde\x95\x77\x0f\x6c\xdf\xb7\x97\x6b\xbe\x82\
\xb7\xd2\xbb\x5d\xa6\x77\x26\xdf\xac\x46\xa3\x25\xd2\x24\x9d\x3f\
\x1f\x14\xd0\xe4\xcf\x53\x07\x7a\x1d\xbb\x74\xb2\x3d\x6b\xeb\x02\
\x00\x72\xb3\x92\x6e\x25\x2e\x34\xa8\x5a\x67\x75\xab\x5a\xcd\xe6\
\xe7\x71\x74\x0e\xcb\xec\xfd\xfe\x0f\x0b\x73\x2b\xcb\x04\x87\x68\
\x0e\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x0b\
\x0a\x17\x74\xe0\
\x00\x62\
\x00\x72\x00\x65\x00\x65\x00\x64\x00\x65\x00\x72\x00\x5f\x00\x6d\x00\x61\x00\x70\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x30\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x30\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x7f\xfd\xcd\xc3\xb8\
"
qt_version = QtCore.qVersion().split('.')
if qt_version < ['5', '8', '0']:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 62.634538
| 103
| 0.727131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 46,184
| 0.987091
|
c6e972384085a17d4254d8b48954d37e8355bbe9
| 5,503
|
py
|
Python
|
api/telegram.py
|
ongzhixian/python-apps
|
11a0d0ce656a7e9d7bdff18dd29feaa2bb436ae6
|
[
"MIT"
] | null | null | null |
api/telegram.py
|
ongzhixian/python-apps
|
11a0d0ce656a7e9d7bdff18dd29feaa2bb436ae6
|
[
"MIT"
] | null | null | null |
api/telegram.py
|
ongzhixian/python-apps
|
11a0d0ce656a7e9d7bdff18dd29feaa2bb436ae6
|
[
"MIT"
] | null | null | null |
import json
import logging
import os
import pdb
import re
from helpers.app_helpers import *
from helpers.page_helpers import *
from helpers.jinja2_helpers import *
from helpers.telegram_helpers import *
#from main import *
#from flask import request
################################################################################
# Setup helper functions
################################################################################
def get_machine_status(log_string):
rdp_re = re.compile("Machine \[(?P<box_name>.+)\] RDP session has \[(?P<box_ip>.*)\]")
result = rdp_re.match(log_string)
if result is None:
return result
box_name = result.group("box_name")
box_ip = result.group("box_ip").split(":")[0]
return (box_name, box_ip)
def get_box_statuses():
cwd = os.getcwd()
#os.path.relpath("data/box_statuses.json")
outfile_path = os.path.join(os.getcwd(), os.path.relpath("static/data/box_statuses.json"))
box_statuses = None
if os.path.exists(outfile_path):
# Read file
with open(outfile_path, "rb") as outfile:
json_data = outfile.read()
box_statuses = json.loads(json_data)
else:
box_statuses = {}
return box_statuses
def save_box_statuses(box_statuses):
logging.debug("IN save_box_statuses()")
cwd = os.getcwd()
#os.path.relpath("data/box_statuses.json")
outfile_path = os.path.join(os.getcwd(), os.path.relpath("static/data/box_statuses.json"))
# Write to file
try:
with open(outfile_path, "w+") as outfile:
outfile.write(json.dumps(box_statuses))
logging.debug("Saved!")
except Exception as ex:
logging.error(ex)
def update_box_statuses(log_string):
logging.debug("IN update_box_statuses()")
result = get_machine_status(log_string)
if result is not None:
logging.debug("IN result is not None")
# We got a machine status log entry; update json
# Get box statues
box_statuses = get_box_statuses()
box_name = result[0]
box_ip = result[1]
logging.debug("box_name: %s, box_ip: %s" % (box_name, box_ip))
# Update box_statuses.json
if not box_statuses.has_key(box_name):
box_statuses[box_name] = {}
box_statuses[box_name]["status"] = "In use" if len(box_ip) > 0 else "Available"
box_statuses[box_name]["comment"] = box_ip
save_box_statuses(box_statuses)
################################################################################
# Setup routes
################################################################################
@route('/api/telegram/updates', method='POST')
def api_telegram_plato_dev_post():
logging.debug("IN api_telegram_plato_dev_post()")
# ZX: Support to get an Update object from the content of the response?
# logging.info("should dump content here")
json_data = request.json
if json_data is None:
return None
try:
logging.info(str(json_data))
message_text = ""
if json_data.has_key("message"):
message_text = json_data["message"]["text"]
if json_data.has_key("channel_post"):
message_text = json_data["channel_post"]["text"]
logging.debug("message_text is:" + message_text)
update_box_statuses(message_text)
except Exception as ex:
logging.error(ex)
#send_message(appconfig["telegram"]["token"], "53274105", "i received message")
#return json.dumps("api_telegram_plato_dev_post")
return str(json_data)
#
# cwd = os.getcwd()
# logging.info(cwd)
# rdp_re = re.compile("Machine \[(?P<box_name>.+)\] RDP session has \[(?P<ip>.*)\]")
# result = rdp_re.match(str(json_data["message"]["text"]))
# if result is None:
# pass
# else:
# pass
#send_message(appconfig["telegram"]["token"], "53274105", "i received message")
#return json.dumps("api_telegram_plato_dev_post")
# return str(json_data)
@route('/api/telegram/brahman-devops/sendMessage', method='POST')
def api_telegram_plato_dev_send_message_post():
logging.debug("IN api_telegram_plato_dev_send_message_post()")
chat_id = None
message = None
if 'chat_id' in request.json.keys():
chat_id = request.json['chat_id']
if 'message' in request.json.keys():
message = request.json['message']
if chat_id is None or message is None:
response.set_header('Content-Type', 'application/json')
return json.dumps("{}")
json_response_string = send_message(appconfig["telegram"]["token"], chat_id, message)
json_response_object = json.loads(json_response_string)
response.set_header('Content-Type', 'application/json')
return json_response_object
@route('/api/telegram/setWebhook', method='POST')
def api_telegram_set_webhook_post():
logging.debug("IN api_telegram_set_webhook_post()")
json_data = set_webhook(appconfig["telegram"]["token"])
response.set_header('Content-Type', 'application/json')
return json_data
@route('/api/telegram/getme', method='POST')
def api_telegram_getme_get():
#
# {"ok": true, "result": {"username": "plato_dev_bot", "first_name": "plato-dev-bot", "is_bot": true, "id": 407476479}}
logging.debug("IN api_telegram_getme_get()")
json_data = get_me(appconfig["telegram"]["token"])
response.set_header('Content-Type', 'application/json')
return json_data
| 35.050955
| 123
| 0.623296
| 0
| 0
| 0
| 0
| 2,849
| 0.517718
| 0
| 0
| 2,223
| 0.403961
|