hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4d6035a4fc60599c146da678702a9ea1e5501f20 | 417 | py | Python | src/python/pants/backend/python/lint/black/register.py | tpasternak/pants | edf5716283d449852309fff1a10dd351dfbf3493 | [
"Apache-2.0"
] | 1 | 2021-05-05T18:58:28.000Z | 2021-05-05T18:58:28.000Z | src/python/pants/backend/python/lint/black/register.py | tpasternak/pants | edf5716283d449852309fff1a10dd351dfbf3493 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/python/lint/black/register.py | tpasternak/pants | edf5716283d449852309fff1a10dd351dfbf3493 | [
"Apache-2.0"
] | 3 | 2020-06-30T08:28:13.000Z | 2021-07-28T09:35:57.000Z | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.lint import python_format_target, python_lint_target
from pants.backend.python.lint.black import rules as black_rules
| 27.8 | 78 | 0.731415 | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.lint import python_format_target, python_lint_target
from pants.backend.python.lint.black import rules as black_rules
def rules():
return (
*black_rules.rules(),
*python_format_target.rules(),
*python_lint_target.rules(),
)
| 116 | 0 | 23 |
ecc5045a1aea6b00cccf6b261440eedff9b159c0 | 2,921 | py | Python | Thomson_spectrometer_charge_to_mass_number_def.py | Similarities/Thomson-Parabola-analytical-plot | bd2761960ace28068efcd7eafbb4520c44b43994 | [
"MIT"
] | null | null | null | Thomson_spectrometer_charge_to_mass_number_def.py | Similarities/Thomson-Parabola-analytical-plot | bd2761960ace28068efcd7eafbb4520c44b43994 | [
"MIT"
] | null | null | null | Thomson_spectrometer_charge_to_mass_number_def.py | Similarities/Thomson-Parabola-analytical-plot | bd2761960ace28068efcd7eafbb4520c44b43994 | [
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 01 11:56:10 2015
@author: Zombie.Soc//similarities
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
print ("Reconstruction of Thomson spectrometer parabolas - to check parameters")
#Parameters
# !!! 0 point coordinates in px
x0 = 825
y0 = 305
B = 0.21 #magnetic field strength in Tesla
D = 0.670 # Drift of spectrometer
q = 1.6027E-19 # elemental charge q
l = 0.05 # lenght of magnet in m
Abbildung = 0.00004521 # magnification factor m/pix
mp = 1.673E-27 # mass proton
E1 = 3.985E3 # Potential of Efield in applied Voltage
Le = 0.0098 # distance of field plates in m
EF = E1/Le
v0 = 1E7 #v0 in m/s
print("charge to mass ratio?")
ZA= input("Z/A:" ) # charge to mass number of ion
# 0.355 +/- 0.002 trace1 between C4+ und O5+
# 0.344 +/- 0.002 trace2 between C4+ und O5+
# 0.466 +/- 0.005 trace between C6+ und O7+
#0.428 +/- 0. trace after O7+
#0.405 +/-0.002 trace after C+5
#0.32 +/- 0.005 trace after C4+
#0.28 +/-0.005 trace after O5+
#now plot:
img = mpimg.imread('20140131_012ions.jpg')
imshow(img)
lum_img = img[:, :]
imgplot = plt.imshow(lum_img)
# spectral, hot, cool, ... tip was falsches ein.. dann kommen vorschläge :)
imgplot.set_cmap('binary_r')
plt.plot(xp(x0),parabely(x0,ZA),"r")
# plot - loop for elements (carbon C12, CC, O16, 015, N14, N15...)
C12 = 1.0/12.0107
C24 = 0.5/12.0107
O16 = 1.0/15.9949
O15 = 1.0/15.0031
N14 = 1.0/14.0067
N15 = 1.0/15.00011
C13 = 1.0/13.03355
C17 = 1.0/16.0226
C14 = 1.0/14.0334
C15 = 1.0/15.0106
F19 = 1.0/18.9984
#CH=1.0/(1.007276*2+12.0107)
for i in range(2,6+1):
#plt.plot(xp(x0), parabely(x0,C17*(i)),"y")
#plt.plot(xp(x0), parabely(x0,O15*(i+1)),"y")
#plt.plot(xp(x0), parabely(x0,C13*(i)),"g")
#plt.plot(xp(x0), parabely(x0,C15*(i)),"r")
#plt.plot(xp(x0), parabely(x0,C24*(i+6)),"y")
#plt.plot(xp(x0), parabely(x0,C12*(i)),"w")
plt.plot(xp(x0), parabely(x0,C12*(i)),"y")
#plt.plot(xp(x0), parabely(x0,C12*(i)+0.0015),"y--")
plt.plot(xp(x0), parabely(x0,O16*(i+1)),"b")
# plt.plot(xp(x0), parabely(x0,O16*(i+1)+0.0015),"b--")
#plt.plot(xp(x0), parabely(x0,N14*(i)),"g")
#print parabely(x0,i)
#plt.ylabel('C+',"i")0
plt.axis([0, 1032, 200, 756])
#plt.legend("+")
plt.show()
###
raw_input('press Return>')
| 19.473333 | 81 | 0.5481 |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 01 11:56:10 2015
@author: Zombie.Soc//similarities
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
print ("Reconstruction of Thomson spectrometer parabolas - to check parameters")
#Parameters
# !!! 0 point coordinates in px
x0 = 825
y0 = 305
B = 0.21 #magnetic field strength in Tesla
D = 0.670 # Drift of spectrometer
q = 1.6027E-19 # elemental charge q
l = 0.05 # lenght of magnet in m
Abbildung = 0.00004521 # magnification factor m/pix
mp = 1.673E-27 # mass proton
E1 = 3.985E3 # Potential of Efield in applied Voltage
Le = 0.0098 # distance of field plates in m
EF = E1/Le
v0 = 1E7 #v0 in m/s
print("charge to mass ratio?")
ZA= input("Z/A:" ) # charge to mass number of ion
# 0.355 +/- 0.002 trace1 between C4+ und O5+
# 0.344 +/- 0.002 trace2 between C4+ und O5+
# 0.466 +/- 0.005 trace between C6+ und O7+
#0.428 +/- 0. trace after O7+
#0.405 +/-0.002 trace after C+5
#0.32 +/- 0.005 trace after C4+
#0.28 +/-0.005 trace after O5+
def xp(x1):
xp = [x1]
for i in range (1, x1):
xp.append(x1 - i)
return xp
def parabely(x1,ZA):
hh = [y0]
const = (mp * EF / (ZA *q * l * D * B ** 2))
for i in range (1, x1):
hh.append(y0 + const * ((i) **2 ) * Abbildung)
return hh
#plt.plot(xp(x0), parabely(Zmax,x0))
#now plot:
img = mpimg.imread('20140131_012ions.jpg')
imshow(img)
lum_img = img[:, :]
imgplot = plt.imshow(lum_img)
# spectral, hot, cool, ... tip was falsches ein.. dann kommen vorschläge :)
imgplot.set_cmap('binary_r')
plt.plot(xp(x0),parabely(x0,ZA),"r")
# plot - loop for elements (carbon C12, CC, O16, 015, N14, N15...)
C12 = 1.0/12.0107
C24 = 0.5/12.0107
O16 = 1.0/15.9949
O15 = 1.0/15.0031
N14 = 1.0/14.0067
N15 = 1.0/15.00011
C13 = 1.0/13.03355
C17 = 1.0/16.0226
C14 = 1.0/14.0334
C15 = 1.0/15.0106
F19 = 1.0/18.9984
#CH=1.0/(1.007276*2+12.0107)
for i in range(2,6+1):
#plt.plot(xp(x0), parabely(x0,C17*(i)),"y")
#plt.plot(xp(x0), parabely(x0,O15*(i+1)),"y")
#plt.plot(xp(x0), parabely(x0,C13*(i)),"g")
#plt.plot(xp(x0), parabely(x0,C15*(i)),"r")
#plt.plot(xp(x0), parabely(x0,C24*(i+6)),"y")
#plt.plot(xp(x0), parabely(x0,C12*(i)),"w")
plt.plot(xp(x0), parabely(x0,C12*(i)),"y")
#plt.plot(xp(x0), parabely(x0,C12*(i)+0.0015),"y--")
plt.plot(xp(x0), parabely(x0,O16*(i+1)),"b")
# plt.plot(xp(x0), parabely(x0,O16*(i+1)+0.0015),"b--")
#plt.plot(xp(x0), parabely(x0,N14*(i)),"g")
#print parabely(x0,i)
#plt.ylabel('C+',"i")0
plt.axis([0, 1032, 200, 756])
#plt.legend("+")
plt.show()
###
raw_input('press Return>')
| 349 | 0 | 58 |
c013bc222bc2bb135874674a676063550952f9bd | 3,470 | py | Python | tests/browser.py | nickmeet/kanvas | 0fed091aa1b4011fc688ecc074e8525dd550b31d | [
"Apache-2.0"
] | null | null | null | tests/browser.py | nickmeet/kanvas | 0fed091aa1b4011fc688ecc074e8525dd550b31d | [
"Apache-2.0"
] | 5 | 2020-02-12T00:00:34.000Z | 2021-06-10T19:37:45.000Z | tests/browser.py | nicosmaris/kanvas | 0fed091aa1b4011fc688ecc074e8525dd550b31d | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
import sys
from os.path import join, abspath, dirname
import logging
from selenium.webdriver.remote.remote_connection import LOGGER as ghostdriver_logger
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from locators import homePage
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from datetime import datetime
from time import sleep
logging.basicConfig(filename="python.log", level=logging.DEBUG)
| 41.807229 | 138 | 0.65562 | from selenium import webdriver
import sys
from os.path import join, abspath, dirname
import logging
from selenium.webdriver.remote.remote_connection import LOGGER as ghostdriver_logger
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from locators import homePage
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from datetime import datetime
from time import sleep
logging.basicConfig(filename="python.log", level=logging.DEBUG)
class Browser:
def __init__(self):
path = abspath(join(dirname(dirname(__file__)), "phantomjs-2.1.1-64"))
sys.path.append(path) # phantomjs needs to be in path when running from pycharm
cap = dict(DesiredCapabilities.PHANTOMJS)
cap["phantomjs.page.settings.userAgent"] = ("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0 ")
service_args=["--webdriver-loglevel=DEBUG", "--cookies-file=ghostdriver.cookies"]
ghostdriver_logger.setLevel(logging.DEBUG)
#self.driver = webdriver.Firefox()
self.driver = webdriver.PhantomJS(executable_path=path, desired_capabilities=cap, service_args=service_args)
self.driver.timeout = { # adds field to use only one of these values for a timeout
"implicit": 10,
"explicit": 10,
"page_load": 30
}
self.driver.implicitly_wait(self.driver.timeout["implicit"])
self.driver.set_window_size(1280, 768)
self.driver.maximize_window()
self.driver.set_page_load_timeout(self.driver.timeout["page_load"]) # driver.get uses this timeout when calling requests.get
def close(self):
self.driver.close()
self.driver.quit()
def shoot(self, msg, exception):
self.driver.get_screenshot_as_file('%s.png' % datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
assert False, msg + str(exception)
def locate(self, locator):
"""
:param locator: key of a dictionary at module locators
:return: http://selenium-python.readthedocs.io/api.html#selenium.webdriver.remote.webelement.WebElement
"""
element = None
wait = WebDriverWait(self.driver, self.driver.timeout["explicit"])
method = locator.split(' ')[-1].lower()
if method == 'xpath':
wait.until(expected_conditions.presence_of_element_located((By.XPATH, homePage[locator])))
element = self.driver.find_element_by_xpath(homePage[locator])
elif method == 'id':
element = wait.until(expected_conditions.presence_of_element_located((By.ID, homePage[locator])))
#element = self.driver.find_element_by_id(homePage[locator])
else:
assert False, "Unknown method for locator " + str(locator)
return element
def load(self, url, iframe=None):
tries = 0
for i in range(5):
try:
self.driver.get(url)
except:
tries = i
sleep(2)
else:
break
if tries==4:
self.shoot("Failed to load page ", e)
if iframe:
try:
self.driver.switch_to.frame(self.locate(iframe))
except Exception as e:
self.shoot("Failed to switch to frame ", e)
return self.driver.page_source
| 1,869 | 1,006 | 23 |
5c2419e80cf571fe31532b5945a81bf47c134391 | 8,043 | py | Python | research/object_detection_app/mlb_detection/prepare_numpy.py | szhaofelicia/models-1.13.0-app | 6213767f02ecf309d2af91989b4c081bf85f3bf9 | [
"Apache-2.0"
] | null | null | null | research/object_detection_app/mlb_detection/prepare_numpy.py | szhaofelicia/models-1.13.0-app | 6213767f02ecf309d2af91989b4c081bf85f3bf9 | [
"Apache-2.0"
] | null | null | null | research/object_detection_app/mlb_detection/prepare_numpy.py | szhaofelicia/models-1.13.0-app | 6213767f02ecf309d2af91989b4c081bf85f3bf9 | [
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import json
import math
import os
import numpy as np
from PIL import Image
from PIL import ImageFile
import tensorflow.compat.v2 as tf
from absl import logging
from absl import app
from absl import flags
import cv2
flags.DEFINE_string('acivity', 'swing', 'The class of test images.')
flags.DEFINE_string('input_dir', '/media/felicia/Data/mlb-youtube/%s_videos/rm_noise/videos', 'Path to videos.')
flags.DEFINE_string('name', 'image_bbgame_swing', 'Name of the dataset being created. This will'
'be used as a prefix.')
flags.DEFINE_string('file_pattern', '*.mp4', 'Pattern used to searh for files'
'in the given directory.')
flags.DEFINE_string('label_file', None, 'Provide a corresponding labels file'
'that stores per-frame or per-sequence labels. This info'
'will get stored.')
flags.DEFINE_string('output_dir', '/media/felicia/Data/object_detection/data/%s/', 'Output directory where'
'tfrecords will be stored.')
flags.DEFINE_integer('files_per_shard', 50, 'Number of videos to store in a'
'shard.')
flags.DEFINE_boolean('rotate', False, 'Rotate videos by 90 degrees before'
'creating tfrecords')
flags.DEFINE_boolean('resize', True, 'Resize videos to a given size.')
flags.DEFINE_integer('width', 1280, 'Width of frames in the TFRecord.')
flags.DEFINE_integer('height', 720, 'Height of frames in the TFRecord.')
flags.DEFINE_list(
'frame_labels', '', 'Comma separated list of descriptions '
'for labels given on a per frame basis. For example: '
'winding_up,early_cocking,acclerating,follow_through')
flags.DEFINE_integer('action_label',0 , 'Action label of all videos.') # swing:0, ball:1, strike:2, foul:3, hit:4
flags.DEFINE_integer('expected_segments', -1, 'Expected number of segments.')
flags.DEFINE_integer('fps', 10, 'Frames per second of video. If 0, fps will be '
'read from metadata of video.') # Original:
FLAGS = flags.FLAGS
gfile = tf.io.gfile
def video_to_frames(video_filename, rotate, fps=0, resize=False,
width=224, height=224):
"""Returns all frames from a video.
Args:
video_filename: string, filename of video.
rotate: Boolean: if True, rotates video by 90 degrees.
fps: Integer, frames per second of video. If 0, it will be inferred from
metadata of video.
resize: Boolean, if True resizes images to given size.
width: Integer, Width of image.
height: Integer, Height of image.
Raises:
ValueError: if fps is greater than the rate of video.
"""
logging.info('Loading %s', video_filename)
cap = cv2.VideoCapture(video_filename)
if fps == 0:
fps = cap.get(cv2.CAP_PROP_FPS)
keep_frequency = 1
else:
if fps > cap.get(cv2.CAP_PROP_FPS):
raise ValueError('Cannot sample at a frequency higher than FPS of video')
keep_frequency = int(float(cap.get(cv2.CAP_PROP_FPS)) / fps)
frames = []
timestamps = []
counter = 0
if cap.isOpened():
while True:
success, frame_bgr = cap.read()
if not success:
break
if counter % keep_frequency == 0:
# Convert BGR to RGB
frame_rgb = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB)
if resize:
frame_rgb = cv2.resize(frame_rgb, (width, height))
if rotate:
frame_rgb = cv2.transpose(frame_rgb)
frame_rgb = cv2.flip(frame_rgb, 1)
frames.append(frame_rgb)
timestamps.append(cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0)
counter += 1
return frames, timestamps, fps
def create_numpy(name, output_dir, input_dir, label_file, input_pattern,
files_per_shard, action_label, frame_labels,
expected_segments, orig_fps, rotate, resize, width,
height):
"""Create Numpy file from videos in a given path.
Args:
name: string, name of the dataset being created.
output_dir: string, path to output directory.
input_dir: string, path to input videos directory.
label_file: None or string, JSON file that contains annotations.
input_pattern: string, regex pattern to look up videos in directory.
files_per_shard: int, number of files to keep in each shard.
action_label: int, Label of actions in video.
frame_labels: list, list of string describing each class. Class label is
the index in list.
expected_segments: int, expected number of segments.
orig_fps: int, frame rate at which tfrecord will be created.
rotate: boolean, if True rotate videos by 90 degrees.
resize: boolean, if True resize to given height and width.
width: int, Width of frames.
height: int, Height of frames.
Raises:
ValueError: If invalid args are passed.
"""
labels={
'swing':0,'ball':1
}
ACTIVITY=FLAGS.acivity
LABEL=labels[ACTIVITY]
input_dir=input_dir%ACTIVITY
output_path=output_dir%ACTIVITY
if not gfile.exists(output_path):
logging.info('Creating output directory: %s', output_path)
gfile.makedirs(output_path)
if not isinstance(input_pattern, list):
file_pattern = os.path.join(input_dir, input_pattern)
filenames = [os.path.basename(x) for x in gfile.glob(file_pattern)]
else:
filenames = []
for file_pattern in input_pattern:
file_pattern = os.path.join(input_dir, file_pattern)
filenames += [os.path.basename(x) for x in gfile.glob(file_pattern)]
num_shards = int(math.ceil(len(filenames)/files_per_shard))
len_num_shards = len(str(num_shards))
shard_id = 0
image_minibatch=list()
step_minibatch=list()
label_minibatch=list()
video_minibatch=list()
print('-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+')
print('shard_id',shard_id)
for i, filename in enumerate(filenames):
frames, video_timestamps, _ = video_to_frames(
os.path.join(input_dir, filename),
rotate,
orig_fps,
resize=resize,
width=width,
height=height)
vid_name=os.path.splitext(filename)[0]
vid_name=str.encode(vid_name)
image_minibatch.append(frames)
# duration=video_timestamps[1]
steps=np.array([x for x in range(len(video_timestamps))])
# print(i,filename,steps,video_timestamps)
step_minibatch.append(steps)
labels=[LABEL]*len(steps)
label_minibatch.append(labels)
vids=[vid_name]*len(steps)
video_minibatch+=vids
if (i + 1) % files_per_shard == 0 or i == len(filenames) - 1:
# if shard_id==2:
output_filename = os.path.join(
output_path,
'%s-%s-of-%s.npy' % (name,
str(shard_id).zfill(len_num_shards),
str(num_shards).zfill(len_num_shards)))
image_minibatch=np.concatenate(image_minibatch,axis=0)
step_minibatch=np.concatenate(step_minibatch,axis=0)
label_minibatch=np.concatenate(label_minibatch,axis=0)
numpy_dict={
'images':image_minibatch, # np.array: B*H*W*3
'activity':label_minibatch, # np.array: B*1
'steps':step_minibatch, # np.array:B*1
'videos':video_minibatch,# list
}
with open(output_filename,'wb') as file:
np.save(file,numpy_dict)
shard_id += 1
image_minibatch=list()
step_minibatch=list()
label_minibatch=list()
video_minibatch=list()
# print('-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+')
# print('shard_id',shard_id)
if __name__ == '__main__':
app.run(main) | 34.519313 | 113 | 0.659704 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import json
import math
import os
import numpy as np
from PIL import Image
from PIL import ImageFile
import tensorflow.compat.v2 as tf
from absl import logging
from absl import app
from absl import flags
import cv2
flags.DEFINE_string('acivity', 'swing', 'The class of test images.')
flags.DEFINE_string('input_dir', '/media/felicia/Data/mlb-youtube/%s_videos/rm_noise/videos', 'Path to videos.')
flags.DEFINE_string('name', 'image_bbgame_swing', 'Name of the dataset being created. This will'
'be used as a prefix.')
flags.DEFINE_string('file_pattern', '*.mp4', 'Pattern used to searh for files'
'in the given directory.')
flags.DEFINE_string('label_file', None, 'Provide a corresponding labels file'
'that stores per-frame or per-sequence labels. This info'
'will get stored.')
flags.DEFINE_string('output_dir', '/media/felicia/Data/object_detection/data/%s/', 'Output directory where'
'tfrecords will be stored.')
flags.DEFINE_integer('files_per_shard', 50, 'Number of videos to store in a'
'shard.')
flags.DEFINE_boolean('rotate', False, 'Rotate videos by 90 degrees before'
'creating tfrecords')
flags.DEFINE_boolean('resize', True, 'Resize videos to a given size.')
flags.DEFINE_integer('width', 1280, 'Width of frames in the TFRecord.')
flags.DEFINE_integer('height', 720, 'Height of frames in the TFRecord.')
flags.DEFINE_list(
'frame_labels', '', 'Comma separated list of descriptions '
'for labels given on a per frame basis. For example: '
'winding_up,early_cocking,acclerating,follow_through')
flags.DEFINE_integer('action_label',0 , 'Action label of all videos.') # swing:0, ball:1, strike:2, foul:3, hit:4
flags.DEFINE_integer('expected_segments', -1, 'Expected number of segments.')
flags.DEFINE_integer('fps', 10, 'Frames per second of video. If 0, fps will be '
'read from metadata of video.') # Original:
FLAGS = flags.FLAGS
gfile = tf.io.gfile
def video_to_frames(video_filename, rotate, fps=0, resize=False,
width=224, height=224):
"""Returns all frames from a video.
Args:
video_filename: string, filename of video.
rotate: Boolean: if True, rotates video by 90 degrees.
fps: Integer, frames per second of video. If 0, it will be inferred from
metadata of video.
resize: Boolean, if True resizes images to given size.
width: Integer, Width of image.
height: Integer, Height of image.
Raises:
ValueError: if fps is greater than the rate of video.
"""
logging.info('Loading %s', video_filename)
cap = cv2.VideoCapture(video_filename)
if fps == 0:
fps = cap.get(cv2.CAP_PROP_FPS)
keep_frequency = 1
else:
if fps > cap.get(cv2.CAP_PROP_FPS):
raise ValueError('Cannot sample at a frequency higher than FPS of video')
keep_frequency = int(float(cap.get(cv2.CAP_PROP_FPS)) / fps)
frames = []
timestamps = []
counter = 0
if cap.isOpened():
while True:
success, frame_bgr = cap.read()
if not success:
break
if counter % keep_frequency == 0:
# Convert BGR to RGB
frame_rgb = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB)
if resize:
frame_rgb = cv2.resize(frame_rgb, (width, height))
if rotate:
frame_rgb = cv2.transpose(frame_rgb)
frame_rgb = cv2.flip(frame_rgb, 1)
frames.append(frame_rgb)
timestamps.append(cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0)
counter += 1
return frames, timestamps, fps
def create_numpy(name, output_dir, input_dir, label_file, input_pattern,
files_per_shard, action_label, frame_labels,
expected_segments, orig_fps, rotate, resize, width,
height):
"""Create Numpy file from videos in a given path.
Args:
name: string, name of the dataset being created.
output_dir: string, path to output directory.
input_dir: string, path to input videos directory.
label_file: None or string, JSON file that contains annotations.
input_pattern: string, regex pattern to look up videos in directory.
files_per_shard: int, number of files to keep in each shard.
action_label: int, Label of actions in video.
frame_labels: list, list of string describing each class. Class label is
the index in list.
expected_segments: int, expected number of segments.
orig_fps: int, frame rate at which tfrecord will be created.
rotate: boolean, if True rotate videos by 90 degrees.
resize: boolean, if True resize to given height and width.
width: int, Width of frames.
height: int, Height of frames.
Raises:
ValueError: If invalid args are passed.
"""
labels={
'swing':0,'ball':1
}
ACTIVITY=FLAGS.acivity
LABEL=labels[ACTIVITY]
input_dir=input_dir%ACTIVITY
output_path=output_dir%ACTIVITY
if not gfile.exists(output_path):
logging.info('Creating output directory: %s', output_path)
gfile.makedirs(output_path)
if not isinstance(input_pattern, list):
file_pattern = os.path.join(input_dir, input_pattern)
filenames = [os.path.basename(x) for x in gfile.glob(file_pattern)]
else:
filenames = []
for file_pattern in input_pattern:
file_pattern = os.path.join(input_dir, file_pattern)
filenames += [os.path.basename(x) for x in gfile.glob(file_pattern)]
num_shards = int(math.ceil(len(filenames)/files_per_shard))
len_num_shards = len(str(num_shards))
shard_id = 0
image_minibatch=list()
step_minibatch=list()
label_minibatch=list()
video_minibatch=list()
print('-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+')
print('shard_id',shard_id)
for i, filename in enumerate(filenames):
frames, video_timestamps, _ = video_to_frames(
os.path.join(input_dir, filename),
rotate,
orig_fps,
resize=resize,
width=width,
height=height)
vid_name=os.path.splitext(filename)[0]
vid_name=str.encode(vid_name)
image_minibatch.append(frames)
# duration=video_timestamps[1]
steps=np.array([x for x in range(len(video_timestamps))])
# print(i,filename,steps,video_timestamps)
step_minibatch.append(steps)
labels=[LABEL]*len(steps)
label_minibatch.append(labels)
vids=[vid_name]*len(steps)
video_minibatch+=vids
if (i + 1) % files_per_shard == 0 or i == len(filenames) - 1:
# if shard_id==2:
output_filename = os.path.join(
output_path,
'%s-%s-of-%s.npy' % (name,
str(shard_id).zfill(len_num_shards),
str(num_shards).zfill(len_num_shards)))
image_minibatch=np.concatenate(image_minibatch,axis=0)
step_minibatch=np.concatenate(step_minibatch,axis=0)
label_minibatch=np.concatenate(label_minibatch,axis=0)
numpy_dict={
'images':image_minibatch, # np.array: B*H*W*3
'activity':label_minibatch, # np.array: B*1
'steps':step_minibatch, # np.array:B*1
'videos':video_minibatch,# list
}
with open(output_filename,'wb') as file:
np.save(file,numpy_dict)
shard_id += 1
image_minibatch=list()
step_minibatch=list()
label_minibatch=list()
video_minibatch=list()
# print('-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+')
# print('shard_id',shard_id)
def main(_):
create_numpy(FLAGS.name, FLAGS.output_dir, FLAGS.input_dir,
FLAGS.label_file, FLAGS.file_pattern, FLAGS.files_per_shard,
FLAGS.action_label, FLAGS.frame_labels,
FLAGS.expected_segments, FLAGS.fps, FLAGS.rotate,
FLAGS.resize, FLAGS.width, FLAGS.height)
if __name__ == '__main__':
app.run(main) | 309 | 0 | 23 |
dbed26663e7828bd1d7de6032d7366ca4fa81a22 | 3,386 | py | Python | crypto/crypto_test_old.py | JohnOmernik/raspfarm | b187a81adea95dd39fe804a238105191f784c9be | [
"Apache-2.0"
] | null | null | null | crypto/crypto_test_old.py | JohnOmernik/raspfarm | b187a81adea95dd39fe804a238105191f784c9be | [
"Apache-2.0"
] | null | null | null | crypto/crypto_test_old.py | JohnOmernik/raspfarm | b187a81adea95dd39fe804a238105191f784c9be | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
import sys
import os
keysdir = "./keys"
pubdir = keysdir + "/public"
pvtdir = keysdir + "/private"
pubkey = pubdir + "/mypub.pem"
pvtkey = pvtdir + "/mypvt.pem"
srvkey = pubdir + "/srvpub.pem"
public_key = ""
private_key = ""
server_public_key = ""
if __name__ == "__main__":
main()
| 35.642105 | 181 | 0.700827 | #!/usr/bin/python3
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
import sys
import os
keysdir = "./keys"
pubdir = keysdir + "/public"
pvtdir = keysdir + "/private"
pubkey = pubdir + "/mypub.pem"
pvtkey = pvtdir + "/mypvt.pem"
srvkey = pubdir + "/srvpub.pem"
public_key = ""
private_key = ""
server_public_key = ""
def main():
print("Hello")
chkdirs()
loadkeys()
msg = b"Hello, my name is encryption"
msg_signature = private_key.sign(msg,padding.PSS(mgf=padding.MGF1(hashes.SHA256()),salt_length=padding.PSS.MAX_LENGTH),hashes.SHA256())
print("Message: %s" % msg)
print("Message Length: %s" % len(msg))
print("Message Signature Length: %s" % len(msg_signature))
encrypted_msg = server_public_key.encrypt(msg + b"\n" + msg_signature, padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),algorithm=hashes.SHA256(),label=None))
print("Encrypted Message Length: %s " % len(encrypted_msg))
def chkdirs():
dirs = [keysdir, pubdir, pvtdir]
for dir in dirs:
if not os.path.isdir(dir):
print("Directory of %s does not exist: Creating" % dir)
os.mkdir(dir)
else:
print("Directory %s exists" % dir)
def loadkeys():
global public_key
global private_key
global server_public_key
if not os.path.exists(pvtkey) and not os.path.exists(pubkey):
print("Public and Private Key pairs do not exist. Generating")
my_private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
my_public_key = my_private_key.public_key()
my_private_pem = my_private_key.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.PKCS8,encryption_algorithm=serialization.NoEncryption())
my_public_pem = my_public_key.public_bytes(encoding=serialization.Encoding.PEM,format=serialization.PublicFormat.SubjectPublicKeyInfo)
with open(pvtkey, 'wb') as f:
f.write(my_private_pem)
f.close()
with open(pubkey, 'wb') as f:
f.write(my_public_pem)
f.close()
elif os.path.exists(pvtkey) and os.path.exists(pubkey):
with open(pvtkey, "rb") as key_file:
my_private_key = serialization.load_pem_private_key(key_file.read(),password=None,backend=default_backend())
with open(pubkey, "rb") as key_file:
my_public_key = serialization.load_pem_public_key(key_file.read(),backend=default_backend())
elif not os.path.exists(pvtkey) or not os.path.exists(pubkey):
print("Either the public or private key exists, and the opposite does not. We exit here for you to fix")
sys.exit(1)
public_key = my_public_key
private_key = my_private_key
if os.path.exists(srvkey):
with open(srvkey, "rb") as key_file:
my_server_public_key = serialization.load_pem_public_key(key_file.read(),backend=default_backend())
server_public_key = my_server_public_key
else:
print("WARNING - Server Public Key file not found at %s Stuff probably won't work" % srvkey)
if __name__ == "__main__":
main()
| 2,717 | 0 | 68 |
9149780a2fe4d9f493fbf30d587e4c87a5240349 | 1,645 | py | Python | backend/vehicules/migrations/0001_initial.py | RodrigoBLima/parking-project | 4444033980aec03f0cd7ba1947b24d487bff9131 | [
"MIT"
] | null | null | null | backend/vehicules/migrations/0001_initial.py | RodrigoBLima/parking-project | 4444033980aec03f0cd7ba1947b24d487bff9131 | [
"MIT"
] | 3 | 2021-09-12T01:03:31.000Z | 2022-02-27T06:47:30.000Z | backend/vehicules/migrations/0001_initial.py | RodrigoBLima/parking-project | 4444033980aec03f0cd7ba1947b24d487bff9131 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-06-15 22:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 47 | 198 | 0.647416 | # Generated by Django 3.0.7 on 2020-06-15 22:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('employees', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Veiculos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('placa', models.CharField(max_length=80, verbose_name='Placa veiculo')),
('modelo', models.CharField(max_length=80, verbose_name='Modelo do carro')),
('marca', models.CharField(max_length=80, verbose_name='Marca do veiculo')),
('ano', models.CharField(max_length=80, verbose_name='Ano de fabricacao')),
('cor', models.CharField(max_length=80, verbose_name='Cor do veiculo')),
('proprietario', models.CharField(max_length=80, verbose_name='Nome do dono')),
('h_entrada', models.DateTimeField()),
('h_saida', models.DateTimeField()),
('idEstacionamento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='establishment_vei', to=settings.AUTH_USER_MODEL, verbose_name='Estacionamento id')),
('idFuncionario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='employee_id', to='employees.Empregados', verbose_name='Funcionario id')),
],
),
]
| 0 | 1,465 | 23 |
7cbca312c6107409ee289dcff735a96fab311dd3 | 733 | py | Python | projects/speech_translation/lr_scheduler/subepoch_lr_scheduler.py | tran-khoa/fairseq | 558366b3c6970a5dd85ad1909581d43e41fdce9f | [
"MIT"
] | null | null | null | projects/speech_translation/lr_scheduler/subepoch_lr_scheduler.py | tran-khoa/fairseq | 558366b3c6970a5dd85ad1909581d43e41fdce9f | [
"MIT"
] | null | null | null | projects/speech_translation/lr_scheduler/subepoch_lr_scheduler.py | tran-khoa/fairseq | 558366b3c6970a5dd85ad1909581d43e41fdce9f | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
STEP_MODE_CHOICE = ChoiceEnum(['epoch', 'subepoch'])
@dataclass
| 26.178571 | 84 | 0.697135 | from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
STEP_MODE_CHOICE = ChoiceEnum(['epoch', 'subepoch'])
@dataclass
class SubepochLRSchedulerConfig(FairseqDataclass):
step_mode: STEP_MODE_CHOICE = field(
default="subepoch",
metadata={
"help": "lr scheduler step will be executed after each {subepoch,epoch}"
},
)
class SubepochLRScheduler(ABC):
def __init__(self, cfg, *args, **kwargs):
assert hasattr(cfg, 'step_mode')
self.step_mode = cfg.step_mode
@abstractmethod
def step_subepoch(self, subepoch: int, val_loss: float) -> float:
raise NotImplementedError()
| 180 | 303 | 45 |
853dc461e03b4829448d8970e799270c6aec43d6 | 179 | py | Python | Exercicios/ex006.py | Vitmambro/Python9 | d084e6fd8230b71e4dade87086a411210e131320 | [
"MIT"
] | null | null | null | Exercicios/ex006.py | Vitmambro/Python9 | d084e6fd8230b71e4dade87086a411210e131320 | [
"MIT"
] | null | null | null | Exercicios/ex006.py | Vitmambro/Python9 | d084e6fd8230b71e4dade87086a411210e131320 | [
"MIT"
] | null | null | null | n = int(input('Digite um numero: '))
a = n * 2
b = n * 3
c = n ** (1/2)
print('O dobro do numero seria {} \n O triplo seria {} \n A raiz quadrada seria {:.2f}'.format(a, b, c))
| 22.375 | 104 | 0.558659 | n = int(input('Digite um numero: '))
a = n * 2
b = n * 3
c = n ** (1/2)
print('O dobro do numero seria {} \n O triplo seria {} \n A raiz quadrada seria {:.2f}'.format(a, b, c))
| 0 | 0 | 0 |
1bed624c8eee0da3cdb05b47bd146d6d0a02a7d0 | 342 | py | Python | Feature/corner_fast.py | Joevaen/Scikit-image_On_CT | e3bf0eeadc50691041b4b7c44a19d07546a85001 | [
"Apache-2.0"
] | null | null | null | Feature/corner_fast.py | Joevaen/Scikit-image_On_CT | e3bf0eeadc50691041b4b7c44a19d07546a85001 | [
"Apache-2.0"
] | null | null | null | Feature/corner_fast.py | Joevaen/Scikit-image_On_CT | e3bf0eeadc50691041b4b7c44a19d07546a85001 | [
"Apache-2.0"
] | null | null | null | # FAST拐点检测,拐点检测Opencv有七种,且比skimage的方法快的多
from skimage import data, feature, img_as_float, io
import numpy as np
image = img_as_float(io.imread('/home/qiao/PythonProjects/Scikit-image_On_CT/Test_Img/10.jpg'))
gamma_corrected = feature.corner_fast(image)
points = feature.corner_peaks(gamma_corrected)
print(points)
io.imshow(image)
io.show()
| 31.090909 | 95 | 0.812865 | # FAST拐点检测,拐点检测Opencv有七种,且比skimage的方法快的多
from skimage import data, feature, img_as_float, io
import numpy as np
image = img_as_float(io.imread('/home/qiao/PythonProjects/Scikit-image_On_CT/Test_Img/10.jpg'))
gamma_corrected = feature.corner_fast(image)
points = feature.corner_peaks(gamma_corrected)
print(points)
io.imshow(image)
io.show()
| 0 | 0 | 0 |
4f5c7ba674e75571c35d8a525e280d946419130f | 2,090 | py | Python | hpimdm/packet/ReceivedPacket.py | pedrofran12/hpim_dm | fe949294b5e75ab544dcd40ff51ceafc1d3b2f0c | [
"MIT"
] | 1 | 2020-02-04T20:59:03.000Z | 2020-02-04T20:59:03.000Z | hpimdm/packet/ReceivedPacket.py | pedrofran12/hpim_dm | fe949294b5e75ab544dcd40ff51ceafc1d3b2f0c | [
"MIT"
] | 3 | 2020-06-09T16:37:01.000Z | 2021-08-30T00:31:12.000Z | hpimdm/packet/ReceivedPacket.py | pedrofran12/hpim_dm | fe949294b5e75ab544dcd40ff51ceafc1d3b2f0c | [
"MIT"
] | 1 | 2020-11-23T06:47:46.000Z | 2020-11-23T06:47:46.000Z | import socket
from .Packet import Packet
from .PacketIpHeader import PacketIpv4Header, PacketIpv6Header
from hpimdm.tree.hpim_globals import MSG_FORMAT
from hpimdm.utils import TYPE_CHECKING
if TYPE_CHECKING:
from hpimdm.Interface import Interface
if MSG_FORMAT == "BINARY":
from .PacketHPIMHeader import PacketHPIMHeader, PacketHPIMHeader_v6
else:
from .PacketHPIMHeader import PacketHPIMHeaderJson as PacketHPIMHeader
from .PacketHPIMHeader import PacketHPIMHeaderJson as PacketHPIMHeader_v6
| 40.192308 | 117 | 0.742105 | import socket
from .Packet import Packet
from .PacketIpHeader import PacketIpv4Header, PacketIpv6Header
from hpimdm.tree.hpim_globals import MSG_FORMAT
from hpimdm.utils import TYPE_CHECKING
if TYPE_CHECKING:
from hpimdm.Interface import Interface
if MSG_FORMAT == "BINARY":
from .PacketHPIMHeader import PacketHPIMHeader, PacketHPIMHeader_v6
else:
from .PacketHPIMHeader import PacketHPIMHeaderJson as PacketHPIMHeader
from .PacketHPIMHeader import PacketHPIMHeaderJson as PacketHPIMHeader_v6
class ReceivedPacket(Packet):
# choose payload protocol class based on ip protocol number
payload_protocol = {103: PacketHPIMHeader}
def __init__(self, raw_packet: bytes, interface: 'Interface'):
self.interface = interface
# Parse packet and fill Packet super class
ip_header = PacketIpv4Header.parse_bytes(raw_packet)
protocol_number = ip_header.proto
packet_without_ip_hdr = raw_packet[ip_header.hdr_length:]
payload = ReceivedPacket.payload_protocol[protocol_number].parse_bytes(packet_without_ip_hdr)
super().__init__(ip_header=ip_header, payload=payload)
class ReceivedPacket_v6(Packet):
# choose payload protocol class based on ip protocol number
payload_protocol_v6 = {103: PacketHPIMHeader_v6}
def __init__(self, raw_packet: bytes, ancdata: list, src_addr: str, next_header: int, interface: 'Interface'):
self.interface = interface
# Parse packet and fill Packet super class
dst_addr = "::"
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level == socket.IPPROTO_IPV6 and cmsg_type == socket.IPV6_PKTINFO:
dst_addr = socket.inet_ntop(socket.AF_INET6, cmsg_data[:16])
break
src_addr = src_addr[0].split("%")[0]
ipv6_packet = PacketIpv6Header(ver=6, hop_limit=1, next_header=next_header, ip_src=src_addr, ip_dst=dst_addr)
payload = ReceivedPacket_v6.payload_protocol_v6[next_header].parse_bytes(raw_packet)
super().__init__(ip_header=ipv6_packet, payload=payload)
| 1,229 | 301 | 46 |
aa36ea2d7b5f61fd6a7f233730049f57e2580dd0 | 2,349 | py | Python | sensor/scripts/top_phat_button.py | steviebd/bme680-container | 9d3fb10e39d70f1e287ba3003f5567295b3da946 | [
"MIT"
] | null | null | null | sensor/scripts/top_phat_button.py | steviebd/bme680-container | 9d3fb10e39d70f1e287ba3003f5567295b3da946 | [
"MIT"
] | null | null | null | sensor/scripts/top_phat_button.py | steviebd/bme680-container | 9d3fb10e39d70f1e287ba3003f5567295b3da946 | [
"MIT"
] | null | null | null | import time
import RPi.GPIO as GPIO #Python Package Reference: https://pypi.org/project/RPi.GPIO/
# Code taken from https://learn.sparkfun.com/tutorials/raspberry-pi-safe-reboot-and-shutdown-button using the last example to minimize resources used (sleep function)
# Pin definition
reset_shutdown_pin = 17
# Suppress warnings
GPIO.setwarnings(False)
# Use "GPIO" pin numbering
GPIO.setmode(GPIO.BCM)
# Use built-in internal pullup resistor so the pin is not floating
# if using a momentary push button without a resistor.
#GPIO.setup(reset_shutdown_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Use Qwiic pHAT's pullup resistor so that the pin is not floating
GPIO.setup(reset_shutdown_pin, GPIO.IN)
# modular function to restart Pi
# modular function to shutdown Pi
while True:
#short delay, otherwise this code will take up a lot of the Pi's processing power
time.sleep(1)
# wait for a button press with switch debounce on the falling edge so that this script
# is not taking up too many resources in order to shutdown/reboot the Pi safely
channel = GPIO.wait_for_edge(reset_shutdown_pin, GPIO.FALLING, bouncetime=200)
if channel is None:
print('Timeout occurred')
else:
print('Edge detected on channel', channel)
# For troubleshooting, uncomment this line to output button status on command line
#print('GPIO state is = ', GPIO.input(reset_shutdown_pin))
counter = 0
while GPIO.input(reset_shutdown_pin) == False:
# For troubleshooting, uncomment this line to view the counter. If it reaches a value above 4, we will restart.
#print(counter)
counter += 1
time.sleep(0.5)
# long button press
if counter > 4:
shut_down()
#if short button press, restart!
restart()
| 32.625 | 166 | 0.696041 | import time
import RPi.GPIO as GPIO #Python Package Reference: https://pypi.org/project/RPi.GPIO/
# Code taken from https://learn.sparkfun.com/tutorials/raspberry-pi-safe-reboot-and-shutdown-button using the last example to minimize resources used (sleep function)
# Pin definition
reset_shutdown_pin = 17
# Suppress warnings
GPIO.setwarnings(False)
# Use "GPIO" pin numbering
GPIO.setmode(GPIO.BCM)
# Use built-in internal pullup resistor so the pin is not floating
# if using a momentary push button without a resistor.
#GPIO.setup(reset_shutdown_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Use Qwiic pHAT's pullup resistor so that the pin is not floating
GPIO.setup(reset_shutdown_pin, GPIO.IN)
# modular function to restart Pi
def restart():
print("restarting Pi")
command = "/usr/bin/sudo /sbin/shutdown -r now"
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
print(output)
# modular function to shutdown Pi
def shut_down():
print("shutting down")
command = "/usr/bin/sudo /sbin/shutdown -h now"
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
print(output)
while True:
#short delay, otherwise this code will take up a lot of the Pi's processing power
time.sleep(1)
# wait for a button press with switch debounce on the falling edge so that this script
# is not taking up too many resources in order to shutdown/reboot the Pi safely
channel = GPIO.wait_for_edge(reset_shutdown_pin, GPIO.FALLING, bouncetime=200)
if channel is None:
print('Timeout occurred')
else:
print('Edge detected on channel', channel)
# For troubleshooting, uncomment this line to output button status on command line
#print('GPIO state is = ', GPIO.input(reset_shutdown_pin))
counter = 0
while GPIO.input(reset_shutdown_pin) == False:
# For troubleshooting, uncomment this line to view the counter. If it reaches a value above 4, we will restart.
#print(counter)
counter += 1
time.sleep(0.5)
# long button press
if counter > 4:
shut_down()
#if short button press, restart!
restart()
| 446 | 0 | 44 |
a021d9318c9c0717bf8b97d0ce6bd22979592ede | 755 | py | Python | src/adafruit-circuitpython-bundle-4.x-mpy-20190713/examples/thermistor_simpletest.py | mbaaba/solar_panel | 42059d8c61320494ad1298065dbc50cd9b3bd51e | [
"MIT"
] | 1 | 2020-04-13T16:10:53.000Z | 2020-04-13T16:10:53.000Z | infra/libs-400rc2-20190512/examples/thermistor_simpletest.py | jadudm/feather-isa | b7419e6698c3f64be4d8122656eb8124631ca859 | [
"MIT"
] | null | null | null | infra/libs-400rc2-20190512/examples/thermistor_simpletest.py | jadudm/feather-isa | b7419e6698c3f64be4d8122656eb8124631ca859 | [
"MIT"
] | null | null | null | import time
import board
import adafruit_thermistor
# these values work with the Adafruit CircuitPlayground Express.
# they may work with other thermistors as well, as they're fairly standard,
# though the pin will likely need to change (ie board.A1)
# pylint: disable=no-member
pin = board.TEMPERATURE
resistor = 10000
resistance = 10000
nominal_temp = 25
b_coefficient = 3950
thermistor = adafruit_thermistor.Thermistor(pin, resistor, resistance, nominal_temp, b_coefficient)
# print the temperature in C and F to the serial console every second
while True:
celsius = thermistor.temperature
fahrenheit = (celsius * 9 / 5) + 32
print('== Temperature ==\n{} *C\n{} *F\n'.format(celsius, fahrenheit))
time.sleep(1)
| 32.826087 | 100 | 0.73245 | import time
import board
import adafruit_thermistor
# these values work with the Adafruit CircuitPlayground Express.
# they may work with other thermistors as well, as they're fairly standard,
# though the pin will likely need to change (ie board.A1)
# pylint: disable=no-member
pin = board.TEMPERATURE
resistor = 10000
resistance = 10000
nominal_temp = 25
b_coefficient = 3950
thermistor = adafruit_thermistor.Thermistor(pin, resistor, resistance, nominal_temp, b_coefficient)
# print the temperature in C and F to the serial console every second
while True:
celsius = thermistor.temperature
fahrenheit = (celsius * 9 / 5) + 32
print('== Temperature ==\n{} *C\n{} *F\n'.format(celsius, fahrenheit))
time.sleep(1)
| 0 | 0 | 0 |
9e68d50f8a29ef3849f51edcab4ecba6940dc350 | 187 | py | Python | setup.py | NikNakk/jellylanguage | 40cc217ec50272370867fc5ea4fbd5c6126f1c11 | [
"MIT"
] | 392 | 2018-04-10T14:12:02.000Z | 2022-03-27T11:44:16.000Z | setup.py | NikNakk/jellylanguage | 40cc217ec50272370867fc5ea4fbd5c6126f1c11 | [
"MIT"
] | 33 | 2015-12-05T22:55:40.000Z | 2018-01-13T18:04:23.000Z | setup.py | NikNakk/jellylanguage | 40cc217ec50272370867fc5ea4fbd5c6126f1c11 | [
"MIT"
] | 58 | 2015-12-12T16:57:14.000Z | 2018-02-27T00:10:15.000Z | from distutils.core import setup
setup(
name = 'jellylanguage',
version = '0.1.31',
packages = [
'jelly'
],
scripts = [
'scripts/jelly'
],
install_requires = [
'sympy'
]
)
| 11.6875 | 32 | 0.609626 | from distutils.core import setup
setup(
name = 'jellylanguage',
version = '0.1.31',
packages = [
'jelly'
],
scripts = [
'scripts/jelly'
],
install_requires = [
'sympy'
]
)
| 0 | 0 | 0 |
e690d7a20c2b4f9af76c7b48a269e64c901a832f | 283 | py | Python | setup.py | duranbe/lev | 36a9199469480bbd343e1a95c1628ea7aed1a7c4 | [
"MIT"
] | 2 | 2021-07-03T21:52:53.000Z | 2021-07-05T15:57:06.000Z | setup.py | duranbe/lev | 36a9199469480bbd343e1a95c1628ea7aed1a7c4 | [
"MIT"
] | null | null | null | setup.py | duranbe/lev | 36a9199469480bbd343e1a95c1628ea7aed1a7c4 | [
"MIT"
] | null | null | null | from distutils.core import setup, Extension
if __name__ == "__main__":
main() | 25.727273 | 77 | 0.724382 | from distutils.core import setup, Extension
def main():
setup(name="levenshtein",
version="0.0.1",
description="Levenshtein Distance implemented as C Extension for Python 3",
ext_modules=[Extension("levenshtein",["src/levenshtein.c"])]
)
if __name__ == "__main__":
main() | 181 | 0 | 23 |
dda5c39d720198b73ef58145deafa5e98f64b03d | 6,587 | py | Python | src/bxgateway/services/block_queuing_service_manager.py | doubleukay/bxgateway | ac01fc9475c039cf4255576dd4ecd6bff6c48f69 | [
"MIT"
] | 21 | 2019-11-06T17:37:41.000Z | 2022-03-28T07:18:33.000Z | src/bxgateway/services/block_queuing_service_manager.py | doubleukay/bxgateway | ac01fc9475c039cf4255576dd4ecd6bff6c48f69 | [
"MIT"
] | 4 | 2019-11-06T22:08:00.000Z | 2021-12-08T06:20:51.000Z | src/bxgateway/services/block_queuing_service_manager.py | doubleukay/bxgateway | ac01fc9475c039cf4255576dd4ecd6bff6c48f69 | [
"MIT"
] | 10 | 2020-08-05T15:58:16.000Z | 2022-02-07T23:51:10.000Z | from typing import Optional, Dict, List, Tuple
from bxcommon.messages.abstract_block_message import AbstractBlockMessage
from bxcommon.network.ip_endpoint import IpEndpoint
from bxcommon.utils.expiring_dict import ExpiringDict
from bxcommon.utils.object_hash import Sha256Hash
from bxgateway import log_messages
from bxgateway.connections.abstract_gateway_blockchain_connection import AbstractGatewayBlockchainConnection
from bxgateway.services.abstract_block_queuing_service import AbstractBlockQueuingService
from bxutils import logging
logger = logging.get_logger(__name__)
| 46.062937 | 120 | 0.730682 | from typing import Optional, Dict, List, Tuple
from bxcommon.messages.abstract_block_message import AbstractBlockMessage
from bxcommon.network.ip_endpoint import IpEndpoint
from bxcommon.utils.expiring_dict import ExpiringDict
from bxcommon.utils.object_hash import Sha256Hash
from bxgateway import log_messages
from bxgateway.connections.abstract_gateway_blockchain_connection import AbstractGatewayBlockchainConnection
from bxgateway.services.abstract_block_queuing_service import AbstractBlockQueuingService
from bxutils import logging
logger = logging.get_logger(__name__)
class BlockQueuingServiceManager:
block_storage: ExpiringDict[Sha256Hash, Optional[AbstractBlockMessage]]
blockchain_peer_to_block_queuing_service: Dict[AbstractGatewayBlockchainConnection, AbstractBlockQueuingService]
designated_queuing_service: Optional[AbstractBlockQueuingService] = None
def __init__(
self,
block_storage: ExpiringDict[Sha256Hash, Optional[AbstractBlockMessage]],
blockchain_peer_to_block_queuing_service: Dict[AbstractGatewayBlockchainConnection, AbstractBlockQueuingService]
) -> None:
self.block_storage = block_storage
self.blockchain_peer_to_block_queuing_service = blockchain_peer_to_block_queuing_service
def __iter__(self):
for queuing_service in self.blockchain_peer_to_block_queuing_service.values():
yield queuing_service
def is_in_any_queuing_service(self, block_hash: Sha256Hash) -> bool:
"""
:param block_hash:
:return: if block hash is in any queuing service.
"""
for queuing_service in self:
if block_hash in queuing_service:
return True
return False
def is_in_common_block_storage(self, block_hash: Sha256Hash) -> bool:
"""
:param block_hash:
:return: if block message is in common block storage for block hash
"""
block = self.block_storage.contents.get(block_hash)
if block is None:
return False
else:
return True
def add_block_queuing_service(
self,
connection: AbstractGatewayBlockchainConnection,
block_queuing_service: AbstractBlockQueuingService
) -> None:
self.blockchain_peer_to_block_queuing_service[connection] = block_queuing_service
if self.designated_queuing_service is None:
self.designated_queuing_service = block_queuing_service
def remove_block_queuing_service(self, connection: AbstractGatewayBlockchainConnection) -> None:
if connection in self.blockchain_peer_to_block_queuing_service:
designated_queuing_service = self.designated_queuing_service
if designated_queuing_service is not None and designated_queuing_service.connection == connection:
self.designated_queuing_service = None
del self.blockchain_peer_to_block_queuing_service[connection]
for queuing_service in self.blockchain_peer_to_block_queuing_service.values():
self.designated_queuing_service = queuing_service
return
return
def get_block_queuing_service(
self, connection: AbstractGatewayBlockchainConnection
) -> Optional[AbstractBlockQueuingService]:
if connection in self.blockchain_peer_to_block_queuing_service:
return self.blockchain_peer_to_block_queuing_service[connection]
else:
logger.error(log_messages.ATTEMPTED_FETCH_FOR_NONEXISTENT_QUEUING_SERVICE, connection)
return None
def get_designated_block_queuing_service(self) -> Optional[AbstractBlockQueuingService]:
if self.designated_queuing_service is not None:
return self.designated_queuing_service
else:
logger.error(log_messages.ATTEMPTED_FETCH_FOR_NONEXISTENT_QUEUING_SERVICE, "(designated)")
return None
def push(
self,
block_hash: Sha256Hash, block_message: Optional[AbstractBlockMessage] = None,
waiting_for_recovery: bool = False,
node_received_from: Optional[AbstractGatewayBlockchainConnection] = None
) -> None:
for node_conn, block_queuing_service in self.blockchain_peer_to_block_queuing_service.items():
if node_received_from and node_conn == node_received_from:
continue
block_queuing_service.push(block_hash, block_message, waiting_for_recovery)
def store_block_data(self, block_hash: Sha256Hash, block_message: AbstractBlockMessage) -> None:
if self.designated_queuing_service is not None:
queuing_service = self.designated_queuing_service
assert queuing_service is not None
# Note: Stores to common block storage. Individual service used in order to call protocol specific method.
queuing_service.store_block_data(block_hash, block_message)
else:
logger.error(log_messages.ATTEMPTED_FETCH_FOR_NONEXISTENT_QUEUING_SERVICE, "(designated)")
def get_block_data(self, block_hash: Sha256Hash) -> Optional[AbstractBlockMessage]:
if block_hash in self.block_storage:
return self.block_storage[block_hash]
else:
return None
def remove(self, block_hash: Sha256Hash) -> None:
for queuing_service in self:
if block_hash in queuing_service:
queuing_service.remove(block_hash)
if block_hash in self.block_storage:
del self.block_storage[block_hash]
def update_recovered_block(
self,
block_hash: Sha256Hash,
block_message: AbstractBlockMessage,
node_received_from: Optional[AbstractGatewayBlockchainConnection] = None
) -> None:
self.store_block_data(block_hash, block_message)
for node_conn, queuing_service in self.blockchain_peer_to_block_queuing_service.items():
if node_received_from and node_conn == node_received_from:
continue
queuing_service.update_recovered_block(block_hash, block_message)
def get_length_of_each_queuing_service_stats_format(self) -> str:
queue_lengths_and_endpoints: List[Tuple[IpEndpoint, int]] = []
for queuing_service in self:
queue_lengths_and_endpoints.append((queuing_service.connection.endpoint, (len(queuing_service))))
queue_lengths_str = ', '.join(
[f"{str(ip_endpoint)}: {str(queue_length)}" for ip_endpoint, queue_length in queue_lengths_and_endpoints]
)
return f"[{queue_lengths_str}]"
| 4,712 | 1,272 | 23 |
e24db7c66349793decd4936a1352fb128cb6de8b | 979 | py | Python | batji/audio_lib.py | hiankun/py_sandbox | 6623edd0c8ab17641e1ce09fba7da34c4865fc4f | [
"MIT"
] | null | null | null | batji/audio_lib.py | hiankun/py_sandbox | 6623edd0c8ab17641e1ce09fba7da34c4865fc4f | [
"MIT"
] | null | null | null | batji/audio_lib.py | hiankun/py_sandbox | 6623edd0c8ab17641e1ce09fba7da34c4865fc4f | [
"MIT"
] | null | null | null | #!/usr/bin/python
#-*- mode: python; coding: utf-8 -*-
#import simpleaudio as sa # This need wav files
# avconv -i audio/d/dog.mp3 -acodec pcm_u8 -ar 8000 audio/d/dog.wav
import time
import os
from gtts import gTTS
import pyglet
from subprocess import call
| 25.102564 | 73 | 0.645557 | #!/usr/bin/python
#-*- mode: python; coding: utf-8 -*-
#import simpleaudio as sa # This need wav files
# avconv -i audio/d/dog.mp3 -acodec pcm_u8 -ar 8000 audio/d/dog.wav
import time
import os
from gtts import gTTS
import pyglet
from subprocess import call
def get_tts_mp3(lang, text, audio_path):
tts = gTTS(text=text, lang=lang)
tts.save(audio_path)
print('>>> Generated and saved audio file to {}.'.format(audio_path))
return
def play_audio(audio_path):
for i in range(2):
call(['mpg123', audio_path])
if i != 1: time.sleep(1.5)
return
def get_audio(audio_path, text):
if os.path.isfile(audio_path):
play_audio(audio_path)
else:
print('>>> No audio file named {}...'.format(audio_path))
obj_name = os.path.splitext(os.path.basename(audio_path))[0]
text = obj_name.replace('_', ' ')
lang = 'en'
get_tts_mp3(lang, text, audio_path)
play_audio(audio_path)
return
| 649 | 0 | 69 |
a93d838965225c6c8b6bc1e243dc5e816230b034 | 794 | py | Python | python/unit_test/get_data_test.py | aaronlam88/cmpe295 | dfe9fee8b11f0d2104d8879f578c4a6864314b76 | [
"MIT"
] | 5 | 2018-03-12T07:15:42.000Z | 2019-04-21T05:46:28.000Z | python/unit_test/get_data_test.py | aaronlam88/cmpe295 | dfe9fee8b11f0d2104d8879f578c4a6864314b76 | [
"MIT"
] | 52 | 2018-03-25T23:21:10.000Z | 2020-11-18T21:22:38.000Z | python/unit_test/get_data_test.py | aaronlam88/cmpe295 | dfe9fee8b11f0d2104d8879f578c4a6864314b76 | [
"MIT"
] | null | null | null | import sys
sys.path.insert(0, '../models')
from get_data import GetData
# from python.ultilities.get_data import GetData
import unittest
import csv
if __name__ == '__main__':
unittest.main() | 29.407407 | 58 | 0.68262 | import sys
sys.path.insert(0, '../models')
from get_data import GetData
# from python.ultilities.get_data import GetData
import unittest
import csv
class TestGetData(unittest.TestCase):
def test_getAllFeatures1(self):
getData = GetData()
features = getData.getAllFeatures()
self.assertIsNotNone(features)
def test_getAllFeatures2(self):
getData = GetData(101)
features = getData.getAllFeatures()
self.assertIsNotNone(features)
self.assertEqual(len(features), 100)
def test_getAllFeatures3(self):
getData = GetData(5)
features = getData.getAllFeatures('open', 'close')
self.assertIsNotNone(features)
self.assertEqual(len(features[0][0]), 2)
if __name__ == '__main__':
unittest.main() | 476 | 16 | 107 |
9cd9724c4f49dfcb109cc05f72977a2e1334cfa9 | 899 | py | Python | dbaas/physical/tests/test_engine_type.py | didindinn/database-as-a-service | 747de31ff8546f7874ddd654af860e130afd17a0 | [
"BSD-3-Clause"
] | 303 | 2015-01-08T10:35:54.000Z | 2022-02-28T08:54:06.000Z | dbaas/physical/tests/test_engine_type.py | nouraellm/database-as-a-service | 5e655c9347bea991b7218a01549f5e44f161d7be | [
"BSD-3-Clause"
] | 124 | 2015-01-14T12:56:15.000Z | 2022-03-22T20:45:11.000Z | dbaas/physical/tests/test_engine_type.py | nouraellm/database-as-a-service | 5e655c9347bea991b7218a01549f5e44f161d7be | [
"BSD-3-Clause"
] | 110 | 2015-01-02T11:59:48.000Z | 2022-02-28T08:54:06.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from django.contrib import admin
from physical.admin.engine_type import EngineTypeAdmin
from physical.models import EngineType
SEARCH_FIELDS = ('name', )
LIST_FILTER = ('is_in_memory', )
LIST_FIELDS = ('name', 'is_in_memory', 'created_at')
SAVE_ON_TOP = True
| 29 | 73 | 0.749722 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from django.contrib import admin
from physical.admin.engine_type import EngineTypeAdmin
from physical.models import EngineType
SEARCH_FIELDS = ('name', )
LIST_FILTER = ('is_in_memory', )
LIST_FIELDS = ('name', 'is_in_memory', 'created_at')
SAVE_ON_TOP = True
class EngineTypeTestCase(TestCase):
def setUp(self):
self.admin = EngineTypeAdmin(EngineType, admin.sites.AdminSite())
def test_search_fields(self):
self.assertEqual(SEARCH_FIELDS, self.admin.search_fields)
def test_list_filters(self):
self.assertEqual(LIST_FILTER, self.admin.list_filter)
def test_list_fields(self):
self.assertEqual(LIST_FIELDS, self.admin.list_display)
def test_save_position(self):
self.assertEqual(SAVE_ON_TOP, self.admin.save_on_top)
| 351 | 14 | 158 |
fa7eef9f715a1d26fda616e1b65f2fb5b7b6a5ed | 745 | py | Python | xxcmd/dbitem.py | grking/xxcmd | 287811a68d0bed0939ea888b555cacd1ac8b5a0e | [
"MIT"
] | null | null | null | xxcmd/dbitem.py | grking/xxcmd | 287811a68d0bed0939ea888b555cacd1ac8b5a0e | [
"MIT"
] | null | null | null | xxcmd/dbitem.py | grking/xxcmd | 287811a68d0bed0939ea888b555cacd1ac8b5a0e | [
"MIT"
] | null | null | null | # dbitem.py
import re
# Auto detect and split labels/cmd
# Return a string suitable for substring searching
| 26.607143 | 61 | 0.46443 | # dbitem.py
import re
class DBItem():
# Auto detect and split labels/cmd
def __init__(self, line, tags=None):
label = ""
post = re.match(r'.*(\[(.*)\])$', line)
pre = re.match(r'^(\[(.*)\])(.*)$', line)
if post:
label = post.group(2)
line = re.sub(r'(\[(.*)\])$', '', line)
elif pre:
label = pre.group(2)
line = re.sub(r'^(\[(.*)\])', '', line)
self.label = label.strip()
self.cmd = line.strip()
if tags:
self.tags = tags[:]
else:
self.tags = []
# Return a string suitable for substring searching
def search_key(self):
return "{0} {1}".format(self.label, self.cmd).lower()
| 557 | -6 | 75 |
3bc857abeeec95d09a9ac687261d090c0e95f042 | 878 | py | Python | tests/template_tests/test_base.py | shinshin86/django | 5cc81cd9eb69f5f7a711412c02039b435c393135 | [
"PSF-2.0",
"BSD-3-Clause"
] | 2 | 2020-11-04T06:26:42.000Z | 2021-01-17T19:29:52.000Z | tests/template_tests/test_base.py | Blaahborgh/django | c591bc3ccece1514d6b419826c7fa36ada9d9213 | [
"PSF-2.0",
"BSD-3-Clause"
] | 11 | 2020-03-24T15:46:05.000Z | 2022-03-11T23:20:58.000Z | tests/template_tests/test_base.py | Blaahborgh/django | c591bc3ccece1514d6b419826c7fa36ada9d9213 | [
"PSF-2.0",
"BSD-3-Clause"
] | 2 | 2018-01-08T08:14:29.000Z | 2020-11-04T08:46:29.000Z | from django.template.base import Variable, VariableDoesNotExist
from django.test import SimpleTestCase
| 38.173913 | 102 | 0.666287 | from django.template.base import Variable, VariableDoesNotExist
from django.test import SimpleTestCase
class VariableDoesNotExistTests(SimpleTestCase):
def test_str(self):
exc = VariableDoesNotExist(msg='Failed lookup in %r', params=({'foo': 'bar'},))
self.assertEqual(str(exc), "Failed lookup in {'foo': 'bar'}")
class VariableTests(SimpleTestCase):
def test_integer_literals(self):
self.assertEqual(Variable('999999999999999999999999999').literal, 999999999999999999999999999)
def test_nonliterals(self):
"""Variable names that aren't resolved as literals."""
var_names = []
for var in ('inf', 'infinity', 'iNFiniTy', 'nan'):
var_names.extend((var, '-' + var, '+' + var))
for var in var_names:
with self.subTest(var=var):
self.assertIsNone(Variable(var).literal)
| 270 | 431 | 72 |
51f49434b72bf30b02042d658c8777e8e03fdea0 | 66 | py | Python | homebrain/agents/devicemanager/__init__.py | ErikBjare/Homebrain | 7e4dcc9d0e5f5ef6bde3d2cf31639527166ab124 | [
"MIT"
] | 1 | 2015-12-03T18:42:54.000Z | 2015-12-03T18:42:54.000Z | homebrain/agents/devicemanager/__init__.py | ErikBjare/Homebrain | 7e4dcc9d0e5f5ef6bde3d2cf31639527166ab124 | [
"MIT"
] | 14 | 2015-12-02T22:21:12.000Z | 2019-11-06T10:26:08.000Z | homebrain/agents/devicemanager/__init__.py | ErikBjare/Homebrain | 7e4dcc9d0e5f5ef6bde3d2cf31639527166ab124 | [
"MIT"
] | null | null | null | # Import the agent class
from .devicemanager import DeviceManager
| 22 | 40 | 0.833333 | # Import the agent class
from .devicemanager import DeviceManager
| 0 | 0 | 0 |
caf71cd01f3bb9c0494d877af8bc4b92a30c6065 | 1,480 | py | Python | ReconstructOriginaldigits.py | AndySamoil/Elite_Code | 7dc3b7b1b8688c932474f8a10fd2637fd2918bdd | [
"MIT"
] | null | null | null | ReconstructOriginaldigits.py | AndySamoil/Elite_Code | 7dc3b7b1b8688c932474f8a10fd2637fd2918bdd | [
"MIT"
] | null | null | null | ReconstructOriginaldigits.py | AndySamoil/Elite_Code | 7dc3b7b1b8688c932474f8a10fd2637fd2918bdd | [
"MIT"
] | null | null | null | def originalDigits(self, s: str) -> str:
"""
credit to ZitaoWang for being a beast (adapted sol.)
The key to this solution is in the set-up in identifying
that each number has its own unique character that defines
it. Some don't have this characteristic but they become
defined as the ones before it do
"""
char2word = {'z': 'zero', 'w': 'two', 'u': 'four', 'x': 'six', 'g': 'eight', 'o': 'one', 'r': 'three', 'f': 'five', 's': 'seven','i': 'nine'}
word2int = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', \
'four': '4', 'five': '5', 'six': '6', 'seven': '7', \
'eight': '8', 'nine': '9'}
kei = list(char2word.keys())
arr = []
all = dict()
# fill dict with all letters
for letter in s:
if letter in all:
all[letter] += 1
else:
all[letter] = 1
#iterate through each key
for key in kei:
if key in all:
while all[key] != 0:
# take the key and output it to an array while subtracting the letters from all
for i in range(len(char2word[key])):
print(char2word[key][i])
all[char2word[key][i]] -= 1
arr.append(word2int[char2word[key]])
arr.sort()
return ''.join(arr) | 43.529412 | 170 | 0.458784 | def originalDigits(self, s: str) -> str:
"""
credit to ZitaoWang for being a beast (adapted sol.)
The key to this solution is in the set-up in identifying
that each number has its own unique character that defines
it. Some don't have this characteristic but they become
defined as the ones before it do
"""
char2word = {'z': 'zero', 'w': 'two', 'u': 'four', 'x': 'six', 'g': 'eight', 'o': 'one', 'r': 'three', 'f': 'five', 's': 'seven','i': 'nine'}
word2int = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', \
'four': '4', 'five': '5', 'six': '6', 'seven': '7', \
'eight': '8', 'nine': '9'}
kei = list(char2word.keys())
arr = []
all = dict()
# fill dict with all letters
for letter in s:
if letter in all:
all[letter] += 1
else:
all[letter] = 1
#iterate through each key
for key in kei:
if key in all:
while all[key] != 0:
# take the key and output it to an array while subtracting the letters from all
for i in range(len(char2word[key])):
print(char2word[key][i])
all[char2word[key][i]] -= 1
arr.append(word2int[char2word[key]])
arr.sort()
return ''.join(arr) | 0 | 0 | 0 |
1167d35a80949bd319acdab65f97e28ebc761d96 | 20,342 | py | Python | onepercentclub/settings/base.py | jfterpstra/onepercentclub-site | 43e8e01ac4d3d1ffdd5959ebd048ce95bb2dba0e | [
"BSD-3-Clause"
] | 7 | 2015-01-02T19:31:14.000Z | 2021-03-22T17:30:23.000Z | onepercentclub/settings/base.py | jfterpstra/onepercentclub-site | 43e8e01ac4d3d1ffdd5959ebd048ce95bb2dba0e | [
"BSD-3-Clause"
] | 1 | 2015-03-06T08:34:59.000Z | 2015-03-06T08:34:59.000Z | onepercentclub/settings/base.py | jfterpstra/onepercentclub-site | 43e8e01ac4d3d1ffdd5959ebd048ce95bb2dba0e | [
"BSD-3-Clause"
] | null | null | null | # coding=utf-8
# Django settings for bluebottle project.
import os, datetime
# Import global settings for overriding without throwing away defaults
from django.conf import global_settings
from django.utils.translation import ugettext as _
from admin_dashboard import *
from .payments import *
# Set PROJECT_ROOT to the dir of the current file
# Find the project's containing directory and normalize it to refer to
# the project's root more easily
PROJECT_ROOT = os.path.dirname(os.path.normpath(os.path.join(__file__, '..', '..')))
# DJANGO_PROJECT: the short project name
# (defaults to the basename of PROJECT_ROOT)
DJANGO_PROJECT = os.path.basename(PROJECT_ROOT.rstrip('/'))
DEBUG = True
TEST_MEMCACHE = False
TEMPLATE_DEBUG = True
COMPRESS_TEMPLATES = False
ADMINS = (
('Team Error', 'errors@onepercentclub.com'),
)
CONTACT_EMAIL = 'info@onepercentclub.com'
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['.onepercentclub.com', '.1procentclub.nl', 'localhost']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Amsterdam'
# Available user interface translations
# Ref: https://docs.djangoproject.com/en/1.4/ref/settings/#languages
#
# Default language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
LANGUAGES = (
('nl', gettext_noop('Dutch')),
('en', gettext_noop('English'))
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# First one is for apps the second for the main templates
LOCALE_PATHS = ('../locale', 'locale')
# If you set this to False, Django will not use timezone-aware datetimes.
# pytz is in requirements.txt because it's "highly recommended" when using
# timezone support.
# https://docs.djangoproject.com/en/1.4/topics/i18n/timezones/
USE_TZ = True
# Static Files and Media
# ======================
#
# For staticfiles and media, the following convention is used:
#
# * '/static/media/': Application media default path
# * '/static/global/': Global static media
# * '/static/assets/<app_name>/': Static assets after running `collectstatic`
#
# The respective URL's (available only when `DEBUG=True`) are in `urls.py`.
#
# More information:
# https://docs.djangoproject.com/en/1.4/ref/contrib/staticfiles/
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'static', 'media')
# Absolute filesystem path to the directory that will hold PRIVATE user-uploaded files.
PRIVATE_MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'private', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/static/media/'
PRIVATE_MEDIA_URL = '/private/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static', 'assets')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/assets/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# You can also name this tuple like: ('css', '/path/to/css')
(os.path.join(PROJECT_ROOT, 'static', 'global')),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
TEMPLATE_LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'apptemplates.Loader', # extend AND override templates
]
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# These are basically the default values from the Django configuration, written
# as a list for easy manipulation. This way one can:
#
# 1. Easily add, remove or replace elements in the list, ie. overriding.
# 2. Know what the defaults are, if you want to change them right here. This
# way you won't have to look them up every time you want to change.
#
# Note: The first three middleware classes need to be in this order: Session, Locale, Common
# http://stackoverflow.com/questions/8092695/404-on-requests-without-trailing-slash-to-i18n-urls
MIDDLEWARE_CLASSES = [
'bluebottle.auth.middleware.UserJwtTokenMiddleware',
'apps.redirects.middleware.RedirectHashCompatMiddleware',
'bluebottle.auth.middleware.AdminOnlyCsrf',
# Have a middleware to make sure old cookies still work after we switch to domain-wide cookies.
'bluebottle.utils.middleware.SubDomainSessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'bluebottle.auth.middleware.AdminOnlySessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'bluebottle.auth.middleware.AdminOnlyAuthenticationMiddleware',
'bluebottle.bb_accounts.middleware.LocaleMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# https://docs.djangoproject.com/en/1.4/ref/clickjacking/
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'apps.redirects.middleware.RedirectFallbackMiddleware',
'apps.crawlable.middleware.HashbangMiddleware',
'django_tools.middlewares.ThreadLocal.ThreadLocalMiddleware',
'bluebottle.auth.middleware.SlidingJwtTokenMiddleware'
]
# Browsers will block our pages from loading in an iframe no matter which site
# made the request. This setting can be overridden on a per response or a per
# view basis with the @xframe decorators.
X_FRAME_OPTIONS = 'DENY'
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
# Makes the 'request' variable (the current HttpRequest) available in templates.
'django.core.context_processors.request',
'django.core.context_processors.i18n',
'bluebottle.utils.context_processors.installed_apps_context_processor',
'bluebottle.utils.context_processors.git_commit',
'bluebottle.utils.context_processors.conf_settings',
'bluebottle.utils.context_processors.google_maps_api_key',
'bluebottle.utils.context_processors.google_analytics_code',
'bluebottle.utils.context_processors.sentry_dsn',
'bluebottle.utils.context_processors.facebook_auth_settings',
'bluebottle.utils.context_processors.mixpanel_settings',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
ROOT_URLCONF = 'onepercentclub.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'onepercentclub.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates')
)
INSTALLED_APPS = (
# Django apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party apps
'django_extensions',
'django_extensions.tests',
'raven.contrib.django.raven_compat',
'djcelery',
'south',
# 'django_nose',
'compressor',
'sorl.thumbnail',
'taggit',
'taggit_autocomplete_modified',
'micawber.contrib.mcdjango', # Embedding videos
'templatetag_handlebars',
'rest_framework',
'rest_framework.authtoken',
'polymorphic',
'registration',
'filetransfers',
'loginas',
#'social_auth',
'social.apps.django_app.default',
# Onepercent app to send POST requests to AFOM
'onepercent_afom',
#Widget
'bluebottle.widget',
# CMS page contents
'fluent_contents',
'fluent_contents.plugins.text',
'fluent_contents.plugins.oembeditem',
'fluent_contents.plugins.rawhtml',
'django_wysiwyg',
'tinymce',
'statici18n',
'django.contrib.humanize',
'django_tools',
# FB Auth
'bluebottle.auth',
# Password auth from old PHP site.
'legacyauth',
# Plain Bluebottle apps
'bluebottle.wallposts',
'bluebottle.utils',
'bluebottle.common',
'bluebottle.contentplugins',
'bluebottle.contact',
'bluebottle.geo',
'bluebottle.pages',
'bluebottle.news',
'bluebottle.slides',
'bluebottle.quotes',
'bluebottle.payments',
'bluebottle.payments_docdata',
'bluebottle.payments_logger',
'bluebottle.payments_voucher',
'bluebottle.redirects',
# Apps extending Bluebottle base models
# These should be before there Bb parents so the templates are overridden
'apps.members',
'apps.tasks',
'apps.projects',
'apps.organizations',
'apps.payouts',
# apps overriding bluebottle functionality should come before the bluebottle entries
# (template loaders pick the first template they find)
'apps.core',
'apps.bluebottle_salesforce',
'apps.bluebottle_dashboard',
'apps.contentplugins',
'apps.campaigns',
'apps.hbtemplates',
'apps.statistics',
'apps.homepage',
'apps.partners',
'apps.crawlable',
'apps.mchanga',
'apps.recurring_donations',
# Bluebottle apps with abstract models
'bluebottle.bb_accounts',
'bluebottle.bb_organizations',
'bluebottle.bb_projects',
'bluebottle.bb_tasks',
'bluebottle.bb_fundraisers',
'bluebottle.bb_donations',
'bluebottle.bb_orders',
'bluebottle.bb_payouts',
# Basic Bb implementations
'bluebottle.fundraisers',
'bluebottle.donations',
'bluebottle.orders',
# FIXME: Keep these just for migrations
'apps.fund',
'apps.cowry',
'apps.cowry_docdata',
# FIXME: Reimplement these apps
'apps.vouchers',
# 'apps.sepa',
# 'apps.csvimport',
# 'apps.accounting',
# Custom dashboard
'fluent_dashboard',
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
'django.contrib.admin',
'django.contrib.admindocs',
)
# Custom User model
AUTH_USER_MODEL = 'members.Member'
PROJECTS_PROJECT_MODEL = 'projects.Project'
PROJECTS_PHASELOG_MODEL = 'projects.ProjectPhaseLog'
FUNDRAISERS_FUNDRAISER_MODEL = 'fundraisers.FundRaiser'
TASKS_TASK_MODEL = 'tasks.Task'
TASKS_SKILL_MODEL = 'tasks.Skill'
TASKS_TASKMEMBER_MODEL = 'tasks.TaskMember'
TASKS_TASKFILE_MODEL = 'tasks.TaskFile'
ORGANIZATIONS_ORGANIZATION_MODEL = 'organizations.Organization'
ORGANIZATIONS_DOCUMENT_MODEL = 'organizations.OrganizationDocument'
ORGANIZATIONS_MEMBER_MODEL = 'organizations.OrganizationMember'
ORDERS_ORDER_MODEL = 'orders.Order'
DONATIONS_DONATION_MODEL = 'donations.Donation'
PAYOUTS_PROJECTPAYOUT_MODEL = 'payouts.ProjectPayout'
PAYOUTS_ORGANIZATIONPAYOUT_MODEL = 'payouts.OrganizationPayout'
SOCIAL_AUTH_USER_MODEL = 'members.Member'
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email', 'user_friends', 'public_profile', 'user_birthday']
SOCIAL_AUTH_FACEBOOK_EXTRA_DATA = [('birthday', 'birthday')]
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'payment_logs': {
'level': 'INFO',
'class': 'bluebottle.payments_logger.handlers.PaymentLogHandler',
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'bluebottle.salesforce': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'payments.payment': {
'handlers': ['mail_admins', 'payment_logs'],
'level': 'INFO',
'propagate': True,
},
}
}
# log errors & warnings
import logging
logging.basicConfig(level=logging.WARNING, format='[%(asctime)s] %(levelname)-8s %(message)s', datefmt="%d/%b/%Y %H:%M:%S")
# Django Celery - asynchronous task server
import djcelery
djcelery.setup_loader()
SOCIAL_AUTH_STRATEGY = 'social.strategies.django_strategy.DjangoStrategy'
SOCIAL_AUTH_STORAGE = 'social.apps.django_app.default.models.DjangoStorage'
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookAppOAuth2',
'social.backends.facebook.FacebookOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
# We're using nose because it limits the tests to our apps (i.e. no Django and
# 3rd party app tests). We need this because tests in contrib.auth.user are
# failing in Django 1.4.1. Here's the ticket for the failing test:
# https://code.djangoproject.com/ticket/17966
# The new test runner in Django 1.5 will be more flexible:
#https://code.djangoproject.com/ticket/17365
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--detailed-errors',
'--nologcapture',
]
SKIP_BB_FUNCTIONAL_TESTS = True
SOUTH_TESTS_MIGRATE = False # Make south shut up during tests
# django-compressor http://pypi.python.org/pypi/django_compressor
# Compressor is enabled whenever DEBUG is False.
STATICFILES_FINDERS += [
# django-compressor staticfiles
'compressor.finders.CompressorFinder',
]
# TODO Enable compass here.
COMPRESS_OUTPUT_DIR = 'compressed'
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
#'compressor.filters.datauri.DataUriFilter',
'compressor.filters.cssmin.CSSMinFilter'
]
# Automagic CSS precompilation
#COMPRESS_PRECOMPILERS = (
# ('text/coffeescript', 'coffee --compile --stdio'),
# ('text/less', 'lessc {infile} {outfile}'),
# ('text/x-sass', 'sass {infile} {outfile}'),
# ('text/x-scss', 'sass --scss {infile} {outfile}'),
#)
# The default URL to send users to after login. This will be used when the
# 'next' URL parameter hasn't been set.
LOGIN_REDIRECT_URL = '/'
# Blog/news content configuration
FLUENT_CONTENTS_CACHE_OUTPUT = True
FLUENT_TEXT_CLEAN_HTML = True
FLUENT_TEXT_SANITIZE_HTML = True
DJANGO_WYSIWYG_FLAVOR = 'tinymce_advanced'
# Required for handlebars_template to work properly
USE_EMBER_STYLE_ATTRS = True
# Sorl Thumbnail settings
# http://sorl-thumbnail.readthedocs.org/en/latest/reference/settings.html
THUMBNAIL_QUALITY = 85
# TODO: Configure Sorl with Redis.
REST_FRAMEWORK = {
'FILTER_BACKEND': 'rest_framework.filters.DjangoFilterBackend',
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
)
}
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=7),
'JWT_LEEWAY': 0,
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_ALLOW_TOKEN_RENEWAL': True,
# After the renewal limit it isn't possible to request a token refresh
# => time token first created + renewal limit.
'JWT_TOKEN_RENEWAL_LIMIT': datetime.timedelta(days=90),
}
# Time between attempts to refresh the jwt token automatically on standard request
# TODO: move this setting into the JWT_AUTH settings.
JWT_TOKEN_RENEWAL_DELTA = datetime.timedelta(minutes=30)
COWRY_RETURN_URL_BASE = 'http://127.0.0.1:8000'
COWRY_PAYMENT_METHODS = {
'dd-webmenu': {
'profile': 'webmenu',
'name': 'DocData Web Menu',
'supports_recurring': False,
'supports_single': True,
},
'dd-webdirect': {
'profile': 'webdirect',
'name': 'DocData WebDirect Direct Debit',
'restricted_countries': ('NL',),
'supports_recurring': True,
'supports_single': False,
},
}
# Default VAT percentage as string (used in payouts)
VAT_RATE = '0.21'
# Settings for organization bank account. Please set this in secrets.py
# SEPA = {
# 'iban': '',
# 'bic': '',
# 'name': '',
# 'id': ''
# }
# Salesforce app settings
SALESFORCE_QUERY_TIMEOUT = 3
DATABASE_ROUTERS = [
"salesforce.router.ModelRouter"
]
# E-mail settings
DEFAULT_FROM_EMAIL = '<website@onepercentclub.com> 1%Club'
# Django-registration settings
ACCOUNT_ACTIVATION_DAYS = 4
HTML_ACTIVATION_EMAIL = True # Note this setting is from our forked version.
# Functional testing
# Selenium and Splinter settings
SELENIUM_TESTS = True
SELENIUM_WEBDRIVER = 'phantomjs' # Can be any of chrome, firefox, phantomjs
FIXTURE_DIRS = [
os.path.join(DJANGO_PROJECT, 'fixtures')
]
# PhantomJS for flat page generation.
# NOTE: This has nothing to do with testing against phantomjs.
CRAWLABLE_PHANTOMJS_DEDICATED_MODE = True
# If dedicated mode is enabled, configure the port:
CRAWLABLE_PHANTOMJS_DEDICATED_PORT = 8910
# If dedicated mode is disabled, you can specify arguments to start phantomjs.
CRAWLABLE_PHANTOMJS_ARGS = []
# Use HTTPS for PhantomJS requests.
CRAWLABLE_FORCE_HTTPS = True
# Send email to console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
STATICI18N_ROOT = os.path.join(PROJECT_ROOT, 'static', 'global')
SESSION_COOKIE_NAME = 'bb-session-id'
# Support legacy passwords
PASSWORD_HASHERS = global_settings.PASSWORD_HASHERS + (
'legacyauth.hashers.LegacyPasswordHasher',
)
# Twitter handles, per language
TWITTER_HANDLES = {
'nl': '1procentclub',
'en': '1percentclub',
}
DEFAULT_TWITTER_HANDLE = TWITTER_HANDLES['nl']
MINIMAL_PAYOUT_AMOUNT = 21.00
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.social_auth.associate_by_email',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'bluebottle.auth.utils.save_profile_picture',
'bluebottle.auth.utils.get_extra_facebook_data',
'bluebottle.auth.utils.send_welcome_mail_pipe'
)
AFOM_ENABLED = False
SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email', 'first_name', 'last_name', ]
SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True
SEND_WELCOME_MAIL = True
| 31.984277 | 123 | 0.723528 | # coding=utf-8
# Django settings for bluebottle project.
import os, datetime
# Import global settings for overriding without throwing away defaults
from django.conf import global_settings
from django.utils.translation import ugettext as _
from admin_dashboard import *
from .payments import *
# Set PROJECT_ROOT to the dir of the current file
# Find the project's containing directory and normalize it to refer to
# the project's root more easily
PROJECT_ROOT = os.path.dirname(os.path.normpath(os.path.join(__file__, '..', '..')))
# DJANGO_PROJECT: the short project name
# (defaults to the basename of PROJECT_ROOT)
DJANGO_PROJECT = os.path.basename(PROJECT_ROOT.rstrip('/'))
DEBUG = True
TEST_MEMCACHE = False
TEMPLATE_DEBUG = True
COMPRESS_TEMPLATES = False
ADMINS = (
('Team Error', 'errors@onepercentclub.com'),
)
CONTACT_EMAIL = 'info@onepercentclub.com'
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['.onepercentclub.com', '.1procentclub.nl', 'localhost']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Amsterdam'
# Available user interface translations
# Ref: https://docs.djangoproject.com/en/1.4/ref/settings/#languages
#
# Default language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
LANGUAGES = (
('nl', gettext_noop('Dutch')),
('en', gettext_noop('English'))
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# First one is for apps the second for the main templates
LOCALE_PATHS = ('../locale', 'locale')
# If you set this to False, Django will not use timezone-aware datetimes.
# pytz is in requirements.txt because it's "highly recommended" when using
# timezone support.
# https://docs.djangoproject.com/en/1.4/topics/i18n/timezones/
USE_TZ = True
# Static Files and Media
# ======================
#
# For staticfiles and media, the following convention is used:
#
# * '/static/media/': Application media default path
# * '/static/global/': Global static media
# * '/static/assets/<app_name>/': Static assets after running `collectstatic`
#
# The respective URL's (available only when `DEBUG=True`) are in `urls.py`.
#
# More information:
# https://docs.djangoproject.com/en/1.4/ref/contrib/staticfiles/
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'static', 'media')
# Absolute filesystem path to the directory that will hold PRIVATE user-uploaded files.
PRIVATE_MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'private', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/static/media/'
PRIVATE_MEDIA_URL = '/private/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static', 'assets')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/assets/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# You can also name this tuple like: ('css', '/path/to/css')
(os.path.join(PROJECT_ROOT, 'static', 'global')),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
TEMPLATE_LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'apptemplates.Loader', # extend AND override templates
]
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# These are basically the default values from the Django configuration, written
# as a list for easy manipulation. This way one can:
#
# 1. Easily add, remove or replace elements in the list, ie. overriding.
# 2. Know what the defaults are, if you want to change them right here. This
# way you won't have to look them up every time you want to change.
#
# Note: The first three middleware classes need to be in this order: Session, Locale, Common
# http://stackoverflow.com/questions/8092695/404-on-requests-without-trailing-slash-to-i18n-urls
MIDDLEWARE_CLASSES = [
'bluebottle.auth.middleware.UserJwtTokenMiddleware',
'apps.redirects.middleware.RedirectHashCompatMiddleware',
'bluebottle.auth.middleware.AdminOnlyCsrf',
# Have a middleware to make sure old cookies still work after we switch to domain-wide cookies.
'bluebottle.utils.middleware.SubDomainSessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'bluebottle.auth.middleware.AdminOnlySessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'bluebottle.auth.middleware.AdminOnlyAuthenticationMiddleware',
'bluebottle.bb_accounts.middleware.LocaleMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# https://docs.djangoproject.com/en/1.4/ref/clickjacking/
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'apps.redirects.middleware.RedirectFallbackMiddleware',
'apps.crawlable.middleware.HashbangMiddleware',
'django_tools.middlewares.ThreadLocal.ThreadLocalMiddleware',
'bluebottle.auth.middleware.SlidingJwtTokenMiddleware'
]
# Browsers will block our pages from loading in an iframe no matter which site
# made the request. This setting can be overridden on a per response or a per
# view basis with the @xframe decorators.
X_FRAME_OPTIONS = 'DENY'
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
# Makes the 'request' variable (the current HttpRequest) available in templates.
'django.core.context_processors.request',
'django.core.context_processors.i18n',
'bluebottle.utils.context_processors.installed_apps_context_processor',
'bluebottle.utils.context_processors.git_commit',
'bluebottle.utils.context_processors.conf_settings',
'bluebottle.utils.context_processors.google_maps_api_key',
'bluebottle.utils.context_processors.google_analytics_code',
'bluebottle.utils.context_processors.sentry_dsn',
'bluebottle.utils.context_processors.facebook_auth_settings',
'bluebottle.utils.context_processors.mixpanel_settings',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
ROOT_URLCONF = 'onepercentclub.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'onepercentclub.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates')
)
INSTALLED_APPS = (
# Django apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party apps
'django_extensions',
'django_extensions.tests',
'raven.contrib.django.raven_compat',
'djcelery',
'south',
# 'django_nose',
'compressor',
'sorl.thumbnail',
'taggit',
'taggit_autocomplete_modified',
'micawber.contrib.mcdjango', # Embedding videos
'templatetag_handlebars',
'rest_framework',
'rest_framework.authtoken',
'polymorphic',
'registration',
'filetransfers',
'loginas',
#'social_auth',
'social.apps.django_app.default',
# Onepercent app to send POST requests to AFOM
'onepercent_afom',
#Widget
'bluebottle.widget',
# CMS page contents
'fluent_contents',
'fluent_contents.plugins.text',
'fluent_contents.plugins.oembeditem',
'fluent_contents.plugins.rawhtml',
'django_wysiwyg',
'tinymce',
'statici18n',
'django.contrib.humanize',
'django_tools',
# FB Auth
'bluebottle.auth',
# Password auth from old PHP site.
'legacyauth',
# Plain Bluebottle apps
'bluebottle.wallposts',
'bluebottle.utils',
'bluebottle.common',
'bluebottle.contentplugins',
'bluebottle.contact',
'bluebottle.geo',
'bluebottle.pages',
'bluebottle.news',
'bluebottle.slides',
'bluebottle.quotes',
'bluebottle.payments',
'bluebottle.payments_docdata',
'bluebottle.payments_logger',
'bluebottle.payments_voucher',
'bluebottle.redirects',
# Apps extending Bluebottle base models
# These should be before there Bb parents so the templates are overridden
'apps.members',
'apps.tasks',
'apps.projects',
'apps.organizations',
'apps.payouts',
# apps overriding bluebottle functionality should come before the bluebottle entries
# (template loaders pick the first template they find)
'apps.core',
'apps.bluebottle_salesforce',
'apps.bluebottle_dashboard',
'apps.contentplugins',
'apps.campaigns',
'apps.hbtemplates',
'apps.statistics',
'apps.homepage',
'apps.partners',
'apps.crawlable',
'apps.mchanga',
'apps.recurring_donations',
# Bluebottle apps with abstract models
'bluebottle.bb_accounts',
'bluebottle.bb_organizations',
'bluebottle.bb_projects',
'bluebottle.bb_tasks',
'bluebottle.bb_fundraisers',
'bluebottle.bb_donations',
'bluebottle.bb_orders',
'bluebottle.bb_payouts',
# Basic Bb implementations
'bluebottle.fundraisers',
'bluebottle.donations',
'bluebottle.orders',
# FIXME: Keep these just for migrations
'apps.fund',
'apps.cowry',
'apps.cowry_docdata',
# FIXME: Reimplement these apps
'apps.vouchers',
# 'apps.sepa',
# 'apps.csvimport',
# 'apps.accounting',
# Custom dashboard
'fluent_dashboard',
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
'django.contrib.admin',
'django.contrib.admindocs',
)
# Custom User model
AUTH_USER_MODEL = 'members.Member'
PROJECTS_PROJECT_MODEL = 'projects.Project'
PROJECTS_PHASELOG_MODEL = 'projects.ProjectPhaseLog'
FUNDRAISERS_FUNDRAISER_MODEL = 'fundraisers.FundRaiser'
TASKS_TASK_MODEL = 'tasks.Task'
TASKS_SKILL_MODEL = 'tasks.Skill'
TASKS_TASKMEMBER_MODEL = 'tasks.TaskMember'
TASKS_TASKFILE_MODEL = 'tasks.TaskFile'
ORGANIZATIONS_ORGANIZATION_MODEL = 'organizations.Organization'
ORGANIZATIONS_DOCUMENT_MODEL = 'organizations.OrganizationDocument'
ORGANIZATIONS_MEMBER_MODEL = 'organizations.OrganizationMember'
ORDERS_ORDER_MODEL = 'orders.Order'
DONATIONS_DONATION_MODEL = 'donations.Donation'
PAYOUTS_PROJECTPAYOUT_MODEL = 'payouts.ProjectPayout'
PAYOUTS_ORGANIZATIONPAYOUT_MODEL = 'payouts.OrganizationPayout'
SOCIAL_AUTH_USER_MODEL = 'members.Member'
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email', 'user_friends', 'public_profile', 'user_birthday']
SOCIAL_AUTH_FACEBOOK_EXTRA_DATA = [('birthday', 'birthday')]
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'payment_logs': {
'level': 'INFO',
'class': 'bluebottle.payments_logger.handlers.PaymentLogHandler',
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'bluebottle.salesforce': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'payments.payment': {
'handlers': ['mail_admins', 'payment_logs'],
'level': 'INFO',
'propagate': True,
},
}
}
# log errors & warnings
import logging
logging.basicConfig(level=logging.WARNING, format='[%(asctime)s] %(levelname)-8s %(message)s', datefmt="%d/%b/%Y %H:%M:%S")
# Django Celery - asynchronous task server
import djcelery
djcelery.setup_loader()
SOCIAL_AUTH_STRATEGY = 'social.strategies.django_strategy.DjangoStrategy'
SOCIAL_AUTH_STORAGE = 'social.apps.django_app.default.models.DjangoStorage'
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookAppOAuth2',
'social.backends.facebook.FacebookOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
# We're using nose because it limits the tests to our apps (i.e. no Django and
# 3rd party app tests). We need this because tests in contrib.auth.user are
# failing in Django 1.4.1. Here's the ticket for the failing test:
# https://code.djangoproject.com/ticket/17966
# The new test runner in Django 1.5 will be more flexible:
#https://code.djangoproject.com/ticket/17365
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--detailed-errors',
'--nologcapture',
]
SKIP_BB_FUNCTIONAL_TESTS = True
SOUTH_TESTS_MIGRATE = False # Make south shut up during tests
# django-compressor http://pypi.python.org/pypi/django_compressor
# Compressor is enabled whenever DEBUG is False.
STATICFILES_FINDERS += [
# django-compressor staticfiles
'compressor.finders.CompressorFinder',
]
# TODO Enable compass here.
COMPRESS_OUTPUT_DIR = 'compressed'
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
#'compressor.filters.datauri.DataUriFilter',
'compressor.filters.cssmin.CSSMinFilter'
]
# Automagic CSS precompilation
#COMPRESS_PRECOMPILERS = (
# ('text/coffeescript', 'coffee --compile --stdio'),
# ('text/less', 'lessc {infile} {outfile}'),
# ('text/x-sass', 'sass {infile} {outfile}'),
# ('text/x-scss', 'sass --scss {infile} {outfile}'),
#)
# The default URL to send users to after login. This will be used when the
# 'next' URL parameter hasn't been set.
LOGIN_REDIRECT_URL = '/'
# Blog/news content configuration
FLUENT_CONTENTS_CACHE_OUTPUT = True
FLUENT_TEXT_CLEAN_HTML = True
FLUENT_TEXT_SANITIZE_HTML = True
DJANGO_WYSIWYG_FLAVOR = 'tinymce_advanced'
# Required for handlebars_template to work properly
USE_EMBER_STYLE_ATTRS = True
# Sorl Thumbnail settings
# http://sorl-thumbnail.readthedocs.org/en/latest/reference/settings.html
THUMBNAIL_QUALITY = 85
# TODO: Configure Sorl with Redis.
REST_FRAMEWORK = {
'FILTER_BACKEND': 'rest_framework.filters.DjangoFilterBackend',
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
)
}
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=7),
'JWT_LEEWAY': 0,
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_ALLOW_TOKEN_RENEWAL': True,
# After the renewal limit it isn't possible to request a token refresh
# => time token first created + renewal limit.
'JWT_TOKEN_RENEWAL_LIMIT': datetime.timedelta(days=90),
}
# Time between attempts to refresh the jwt token automatically on standard request
# TODO: move this setting into the JWT_AUTH settings.
JWT_TOKEN_RENEWAL_DELTA = datetime.timedelta(minutes=30)
COWRY_RETURN_URL_BASE = 'http://127.0.0.1:8000'
COWRY_PAYMENT_METHODS = {
'dd-webmenu': {
'profile': 'webmenu',
'name': 'DocData Web Menu',
'supports_recurring': False,
'supports_single': True,
},
'dd-webdirect': {
'profile': 'webdirect',
'name': 'DocData WebDirect Direct Debit',
'restricted_countries': ('NL',),
'supports_recurring': True,
'supports_single': False,
},
}
# Default VAT percentage as string (used in payouts)
VAT_RATE = '0.21'
# Settings for organization bank account. Please set this in secrets.py
# SEPA = {
# 'iban': '',
# 'bic': '',
# 'name': '',
# 'id': ''
# }
# Salesforce app settings
SALESFORCE_QUERY_TIMEOUT = 3
DATABASE_ROUTERS = [
"salesforce.router.ModelRouter"
]
# E-mail settings
DEFAULT_FROM_EMAIL = '<website@onepercentclub.com> 1%Club'
# Django-registration settings
ACCOUNT_ACTIVATION_DAYS = 4
HTML_ACTIVATION_EMAIL = True # Note this setting is from our forked version.
# Functional testing
# Selenium and Splinter settings
SELENIUM_TESTS = True
SELENIUM_WEBDRIVER = 'phantomjs' # Can be any of chrome, firefox, phantomjs
FIXTURE_DIRS = [
os.path.join(DJANGO_PROJECT, 'fixtures')
]
# PhantomJS for flat page generation.
# NOTE: This has nothing to do with testing against phantomjs.
CRAWLABLE_PHANTOMJS_DEDICATED_MODE = True
# If dedicated mode is enabled, configure the port:
CRAWLABLE_PHANTOMJS_DEDICATED_PORT = 8910
# If dedicated mode is disabled, you can specify arguments to start phantomjs.
CRAWLABLE_PHANTOMJS_ARGS = []
# Use HTTPS for PhantomJS requests.
CRAWLABLE_FORCE_HTTPS = True
# Send email to console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
STATICI18N_ROOT = os.path.join(PROJECT_ROOT, 'static', 'global')
SESSION_COOKIE_NAME = 'bb-session-id'
# Support legacy passwords
PASSWORD_HASHERS = global_settings.PASSWORD_HASHERS + (
'legacyauth.hashers.LegacyPasswordHasher',
)
# Twitter handles, per language
TWITTER_HANDLES = {
'nl': '1procentclub',
'en': '1percentclub',
}
DEFAULT_TWITTER_HANDLE = TWITTER_HANDLES['nl']
MINIMAL_PAYOUT_AMOUNT = 21.00
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.social_auth.associate_by_email',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'bluebottle.auth.utils.save_profile_picture',
'bluebottle.auth.utils.get_extra_facebook_data',
'bluebottle.auth.utils.send_welcome_mail_pipe'
)
AFOM_ENABLED = False
SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email', 'first_name', 'last_name', ]
SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True
SEND_WELCOME_MAIL = True
| 0 | 0 | 0 |
e607a44a1aa94201d10998a2a66e5d422aaaebba | 3,084 | py | Python | Semestre_2016_1/Sol_Parcial_III/p1.py | SherylA/Archivo_Fundamentos | 4b40cca6d808efdb9c96fa62dabb453931965882 | [
"CC-BY-4.0"
] | null | null | null | Semestre_2016_1/Sol_Parcial_III/p1.py | SherylA/Archivo_Fundamentos | 4b40cca6d808efdb9c96fa62dabb453931965882 | [
"CC-BY-4.0"
] | null | null | null | Semestre_2016_1/Sol_Parcial_III/p1.py | SherylA/Archivo_Fundamentos | 4b40cca6d808efdb9c96fa62dabb453931965882 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Los valores reales eran
#La altura inicial es 0.5 [m]
#La gravedad es 9.8 [m/s^2]
#El ángulo es 25 [grados]
#La velocidad inicial es 5 [m/s]
#El Xmax es 2.724 [m]
#El Ymax es 0.728 [m]
import math, sys
#PARA REVISAR RÁPIDAMENTE
n=15
x=[0.100,0.250,0.400,0.550,0.700,0.850,1.000,1.150,1.300,1.450,1.600,1.750,1.900,2.050,2.200]
y=[0.553,0.605,0.657,0.693,0.711,0.733,0.734,0.724,0.710,0.676,0.639,0.594,0.525,0.462,0.380]
t=[0.029,0.064,0.091,0.130,0.158,0.193,0.225,0.262,0.288,0.326,0.353,0.392,0.421,0.458,0.494]
#POR ENTRADA POR CONSOLA
#n=int(input("¿Cuántos datos son?"))
#x=[]
#y=[]
#t=[]
#
#if n<=0:
# print("El número de datos debe ser mayor a cero")
# sys.exit()
#else:
# for i in range(0,n):
# print("Ingrese el dato x%d") % (i)
# x.append(float(input()))
# print("Ingrese el dato y%d") % (i)
# y.append(float(input()))
# print("Ingrese el dato t%d") % (i)
# t.append(float(input()))
sol1=reglineal(x,t,n)
sol2=regcuad(y,t,n)
print sol1
#sol1[0]-->m
#sol1[1]-->b
#sol2[0]-->cc
#sol2[1]-->bc
#sol2[2]-->ac
y0 = sol2[2]
g = -2.0*sol2[0]
theta = math.atan(sol2[1]/sol1[0]) #En radianes
V0 = sol1[0]/math.cos(theta)
if ((V0*math.sin(theta))**2.0+2.0*g*y0)==0:
print("Los datos arrojan tiempos complejos")
sys.exit()
else:
tmax1 = (-1.0*V0*math.sin(theta) + math.sqrt((V0*math.sin(theta))**2.0+2.0*g*y0))/(-1.0*g)
tmax2 = (-1.0*V0*math.sin(theta) - math.sqrt((V0*math.sin(theta))**2.0+2.0*g*y0))/(-1.0*g)
if tmax1>0:
xmax = V0*math.cos(theta)*tmax1
elif tmax2>0:
xmax = V0*math.cos(theta)*tmax2
else:
print("Tiempos negativos hay un error en sus datos")
sys.exit()
ts = V0*math.sin(theta)/g
if ts<0:
print("Tiempos negativos hay un error en sus datos")
else:
ymax = y0 - 0.5*g*ts**2 + V0*math.sin(theta)*ts
print ("La altura inicial es %.3f [m]\nLa gravedad es %.3f [m/s^2]\nEl ángulo es %.3f [grados] \nLa velocidad inicial es %.3f [m/s] \nEl Xmax es %.3f [m] \nEl Ymax es %.3f [m]") % (y0,g,math.degrees(theta),V0,xmax,ymax)
| 21.87234 | 219 | 0.590467 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Los valores reales eran
#La altura inicial es 0.5 [m]
#La gravedad es 9.8 [m/s^2]
#El ángulo es 25 [grados]
#La velocidad inicial es 5 [m/s]
#El Xmax es 2.724 [m]
#El Ymax es 0.728 [m]
import math, sys
def reglineal(y,x,n):
A=0.0
B=0.0
C=0.0
D=0.0
for i in range(0,n):
A=A+x[i]
B=B+y[i]
C=C+x[i]*y[i]
D=D+x[i]*x[i]
if(A*A-n*D)==0:
print("La pendiente es indeterminada. Los datos se ajustan a una recta de la familia x=a")
sys.exit()
else:
m=(A*B-n*C)/(A*A-n*D)
b=(B-m*A)/n
sol=[m,b]
return sol
def regcuad(y,x,n):
A=0.0
B=0.0
C=0.0
D=0.0
E=0.0
F=0.0
G=0.0
for i in range(0,n):
A=A+x[i]
B=B+y[i]
C=C+x[i]*y[i]
D=D+x[i]*x[i]
E=E+x[i]*x[i]*y[i]
F=F+x[i]*x[i]*x[i]
G=G+x[i]*x[i]*x[i]*x[i]
ni=1.0/n
den=(D-A*A*ni)*(G-D*D*ni)-(F-D*A*ni)**2.0
if den==0:
print("Los valores son indeterminados. Los datos no se ajustan a un polinomio grado 2")
sys.exit()
else:
#Aquí esta el error en cc en (F-D*A*ni) en el tablero puse (F-D*B*ni). En teoría debería darnos los mismos valores si cambio ese valor.
cc = ((D-A*A*ni)*(E-D*B*ni)-(F-D*A*ni)*(C-A*B*ni))/den
bc =((C-A*B*ni)*(G-D*D*ni)-(E-D*B*ni)*(F-D*A*ni))/den
ac =(B-bc*A-cc*D)*ni
sol=[cc,bc,ac]
return sol
#PARA REVISAR RÁPIDAMENTE
n=15
x=[0.100,0.250,0.400,0.550,0.700,0.850,1.000,1.150,1.300,1.450,1.600,1.750,1.900,2.050,2.200]
y=[0.553,0.605,0.657,0.693,0.711,0.733,0.734,0.724,0.710,0.676,0.639,0.594,0.525,0.462,0.380]
t=[0.029,0.064,0.091,0.130,0.158,0.193,0.225,0.262,0.288,0.326,0.353,0.392,0.421,0.458,0.494]
#POR ENTRADA POR CONSOLA
#n=int(input("¿Cuántos datos son?"))
#x=[]
#y=[]
#t=[]
#
#if n<=0:
# print("El número de datos debe ser mayor a cero")
# sys.exit()
#else:
# for i in range(0,n):
# print("Ingrese el dato x%d") % (i)
# x.append(float(input()))
# print("Ingrese el dato y%d") % (i)
# y.append(float(input()))
# print("Ingrese el dato t%d") % (i)
# t.append(float(input()))
sol1=reglineal(x,t,n)
sol2=regcuad(y,t,n)
print sol1
#sol1[0]-->m
#sol1[1]-->b
#sol2[0]-->cc
#sol2[1]-->bc
#sol2[2]-->ac
y0 = sol2[2]
g = -2.0*sol2[0]
theta = math.atan(sol2[1]/sol1[0]) #En radianes
V0 = sol1[0]/math.cos(theta)
if ((V0*math.sin(theta))**2.0+2.0*g*y0)==0:
print("Los datos arrojan tiempos complejos")
sys.exit()
else:
tmax1 = (-1.0*V0*math.sin(theta) + math.sqrt((V0*math.sin(theta))**2.0+2.0*g*y0))/(-1.0*g)
tmax2 = (-1.0*V0*math.sin(theta) - math.sqrt((V0*math.sin(theta))**2.0+2.0*g*y0))/(-1.0*g)
if tmax1>0:
xmax = V0*math.cos(theta)*tmax1
elif tmax2>0:
xmax = V0*math.cos(theta)*tmax2
else:
print("Tiempos negativos hay un error en sus datos")
sys.exit()
ts = V0*math.sin(theta)/g
if ts<0:
print("Tiempos negativos hay un error en sus datos")
else:
ymax = y0 - 0.5*g*ts**2 + V0*math.sin(theta)*ts
print ("La altura inicial es %.3f [m]\nLa gravedad es %.3f [m/s^2]\nEl ángulo es %.3f [grados] \nLa velocidad inicial es %.3f [m/s] \nEl Xmax es %.3f [m] \nEl Ymax es %.3f [m]") % (y0,g,math.degrees(theta),V0,xmax,ymax)
| 989 | 0 | 47 |
5db3d3e7396889be388b655ff404041e12f5a1ff | 1,736 | py | Python | test/test_orthonormal_basis_symbolic.py | wzh-code/basix | a5dc28dc4ed33e12518d8460b9d85b9f2da108d8 | [
"MIT"
] | 3 | 2020-11-19T19:17:06.000Z | 2020-12-04T11:00:26.000Z | test/test_orthonormal_basis_symbolic.py | wzh-code/basix | a5dc28dc4ed33e12518d8460b9d85b9f2da108d8 | [
"MIT"
] | 33 | 2020-11-08T18:55:27.000Z | 2020-12-14T10:08:19.000Z | test/test_orthonormal_basis_symbolic.py | wzh-code/basix | a5dc28dc4ed33e12518d8460b9d85b9f2da108d8 | [
"MIT"
] | 1 | 2020-11-23T19:40:31.000Z | 2020-11-23T19:40:31.000Z | # Copyright (c) 2020 Chris Richardson & Matthew Scroggs
# FEniCS Project
# SPDX-License-Identifier: MIT
import sympy
import basix
import numpy as np
| 26.707692 | 77 | 0.553571 | # Copyright (c) 2020 Chris Richardson & Matthew Scroggs
# FEniCS Project
# SPDX-License-Identifier: MIT
import sympy
import basix
import numpy as np
def P_interval(n, x):
r = []
for i in range(n + 1):
p = x ** i
for j in r:
p -= (p * j).integrate((x, 0, 1)) * j
p /= sympy.sqrt((p * p).integrate((x, 0, 1)))
r.append(p)
return r
def test_symbolic_interval():
n = 7
nderiv = 7
x = sympy.Symbol("x")
wd = P_interval(n, x)
cell = basix.CellType.interval
pts0 = basix.create_lattice(cell, 10, basix.LatticeType.equispaced, True)
wtab = basix._basixcpp.tabulate_polynomial_set(cell, n, nderiv, pts0)
for k in range(nderiv + 1):
wsym = np.zeros_like(wtab[k])
for i in range(n + 1):
for j, p in enumerate(pts0):
wsym[j, i] = wd[i].subs(x, p[0])
wd[i] = sympy.diff(wd[i], x)
assert np.allclose(wtab[k], wsym)
def test_symbolic_quad():
n = 5
nderiv = 5
idx = basix.index
x = sympy.Symbol("x")
y = sympy.Symbol("y")
w = [wx * wy for wx in P_interval(n, x) for wy in P_interval(n, y)]
m = (n + 1)**2
cell = basix.CellType.quadrilateral
pts0 = basix.create_lattice(cell, 2, basix.LatticeType.equispaced, True)
wtab = basix._basixcpp.tabulate_polynomial_set(cell, n, nderiv, pts0)
for kx in range(nderiv):
for ky in range(0, nderiv - kx):
wsym = np.zeros_like(wtab[0])
for i in range(m):
wd = sympy.diff(w[i], x, kx, y, ky)
for j, p in enumerate(pts0):
wsym[j, i] = wd.subs([(x, p[0]), (y, p[1])])
assert np.allclose(wtab[idx(kx, ky)], wsym)
| 1,514 | 0 | 69 |
291e5f10a8abe29a41b76b7aa9d6af7ab6c029b3 | 114 | py | Python | libalgopy/common/enums/binary_heap_type.py | PotapenkoOleg/libalgopy | ac625c0f874918c1967218c302c6fcb200db0271 | [
"MIT"
] | null | null | null | libalgopy/common/enums/binary_heap_type.py | PotapenkoOleg/libalgopy | ac625c0f874918c1967218c302c6fcb200db0271 | [
"MIT"
] | null | null | null | libalgopy/common/enums/binary_heap_type.py | PotapenkoOleg/libalgopy | ac625c0f874918c1967218c302c6fcb200db0271 | [
"MIT"
] | null | null | null | from enum import Enum
if __name__ == '__main__':
pass
| 10.363636 | 27 | 0.622807 | from enum import Enum
class BinaryHeapType(Enum):
MIN = 1
MAX = 2
if __name__ == '__main__':
pass
| 0 | 30 | 23 |
b5c99aad807c9c047b440b75f64ba73da15b30d0 | 6,535 | py | Python | visualize.py | tiskw/gaussian-process-bootstrapping-layer | a1c20232ba286aa3245e6aab575a9aaaf274931f | [
"MIT"
] | null | null | null | visualize.py | tiskw/gaussian-process-bootstrapping-layer | a1c20232ba286aa3245e6aab575a9aaaf274931f | [
"MIT"
] | null | null | null | visualize.py | tiskw/gaussian-process-bootstrapping-layer | a1c20232ba286aa3245e6aab575a9aaaf274931f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Visualize results of Gaussian process bootstrapping.
"""
import argparse
import math
import os
import pathlib
import pandas as pd
import matplotlib.pyplot as mpl
def parse_args():
"""
Parse command line arguments.
"""
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument("--dirpath", type=str, default="results", help="path to results directory")
parser.add_argument("--save", action="store_true", help="save figures under output directory")
parser.add_argument("--output", type=str, default="figures", help="output directory")
return parser.parse_args()
def read_result_tsv(path_tsv, length=300):
"""
Parse TSV file and returns basename of file and list of test accuracy.
"""
def is_test_row(line):
"""
Returns true if the given line is a row of test score.
"""
return line.startswith("Test: ")
def get_test_accuracy(line):
"""
Returns test accuracy based on the supposition that the given `line` is a test score row.
"""
return float(line.split("\t")[0].split("=")[-1].strip())
# Compute base name.
basename = path_tsv.name[:-len(path_tsv.suffix)]
# Read file and get test accuracy scores.
scores = [get_test_accuracy(line) for line in path_tsv.open() if is_test_row(line)]
# Normalize list length.
if len(scores) < length: scores += [0.0] * (length - len(scores))
else : scores = scores[:length]
return (basename, scores)
def read_result_txt(path_txt, apply_sqrt=True):
"""
Parse TXT file and returns a list of variance.
"""
def is_variance_row(line):
"""
Returns true if the given line is a row of test score.
"""
return all(char in ".0123456789e+-" for char in line.strip())
output = [float(line.strip()) for line in path_txt.open("rt") if is_variance_row(line)]
if apply_sqrt: return list(map(math.sqrt, output))
else : return output
def plot_trends(scores, keys, header, args):
"""
Plot test accuracy plotting for both with/without data augmentation.
"""
# Plot accuracies without data augmentation.
mpl.figure(figsize=(8, 5))
mpl.title(f"{header}: without data augmentation")
for key in keys:
mpl.plot(100 * scores[key], "-o", label=key, lw=1, markersize=2, alpha=0.8)
mpl.xlim(0, 150)
mpl.ylim(60, 75)
mpl.legend(loc="lower right", ncol=2)
mpl.grid(linestyle="dotted")
mpl.xlabel("Epoch")
mpl.ylabel("Accuracy on CIFAR10 [%]")
if args.save:
filepath = "trend_" + header.lower().replace(" ", "_") + ".png"
mpl.savefig(os.path.join(args.output, filepath))
# Plot accuracies with data augmentation.
mpl.figure(figsize=(8, 5))
mpl.title(f"{header}: with data augmentation")
for key in keys:
mpl.plot(100 * scores[key + "_da"], "-o", label=key, lw=1, markersize=2, alpha=0.8)
mpl.xlim(0, 300)
mpl.ylim(75, 85)
mpl.legend(loc="lower right", ncol=2)
mpl.grid(linestyle="dotted")
mpl.xlabel("Epoch")
mpl.ylabel("Accuracy on CIFAR10 [%]")
if args.save:
filepath = "trend_" + header.lower().replace(" ", "_") + "_da.png"
mpl.savefig(os.path.join(args.output, filepath))
def plot_topval(scores, keys, header, args):
"""
Plot top accuracy of test data for both with/without data augmentation.
"""
# Plot top accuracies without data augmentation.
ks = keys
xs = list(range(len(ks)))
ys = [100 * max(scores[key]) for key in ks]
mpl.figure(figsize=(8, 5))
mpl.title(f"{header}: top scores without data augmentation")
mpl.bar(xs, ys, color=[f"C{x}" for x in xs])
for x, y in enumerate(ys):
mpl.text(x - 0.25, y + 0.1, f"{y:.2f}", color=f"C{x}", fontweight="bold")
mpl.xticks(xs, ks, rotation=20)
mpl.ylim(70, 75)
mpl.grid(linestyle="dotted")
mpl.ylabel("Accuracy on CIFAR10 [%]")
if args.save:
filepath = "topscore_" + header.lower().replace(" ", "_") + ".png"
mpl.savefig(os.path.join(args.output, filepath))
# Plot top accuracies with data augmentation.
ks = [k + "_da" for k in keys]
xs = list(range(len(ks)))
ys = [100 * max(scores[key]) for key in ks]
mpl.figure(figsize=(8, 5))
mpl.title(f"{header}: top scores with data augmentation")
mpl.bar(xs, ys, color=[f"C{x}" for x in xs])
for x, y in enumerate(ys):
mpl.text(x - 0.25, y + 0.1, f"{y:.2f}", color=f"C{x}", fontweight="bold")
mpl.xticks(xs, ks, rotation=20)
mpl.ylim(80, 82.5)
mpl.grid(linestyle="dotted")
mpl.ylabel("Accuracy on CIFAR10 [%]")
if args.save:
filepath = "topscore_" + header.lower().replace(" ", "_") + "_da.png"
mpl.savefig(os.path.join(args.output, filepath))
def plot_stdhist(std_plain, std_gpb_b, args):
"""
Plot histogram of feature standard deviation.
"""
mpl.figure(figsize=(8, 5))
mpl.title("Histogram of feature standard deviation (N = 10,000)")
mpl.hist(std_plain, bins=101, histtype="step", label="plain")
mpl.hist(std_gpb_b, bins=101, histtype="step", label="gpb_b")
mpl.grid(linestyle="dotted")
mpl.xlabel("Standard deviation of features")
mpl.ylabel("Frequency")
mpl.legend()
if args.save:
filepath = "histogram_standard_deviation.png"
mpl.savefig(os.path.join(args.output, filepath))
def main(args):
"""
Main function.
"""
# Path to the result directory.
dirpath = pathlib.Path(args.dirpath)
# Read test accuracy scores.
scores = pd.DataFrame({name:score for name, score in map(read_result_tsv, sorted(dirpath.glob("*.tsv")))})
# Target base names.
keys1 = ["baseline", "gpb_tmx", "gpb_xmb", "gpb_tmb"]
keys2 = ["baseline", "gpb_txx", "gpb_xmx", "gpb_xxb", "gpb_xmb", "gpb_txb", "gpb_tmx", "gpb_tmb"]
# Plot figures and show them.
plot_trends(scores, keys1, "Main results", args)
plot_trends(scores, keys2, "Exhaustive results", args)
plot_topval(scores, keys2, "Exhaustive results", args)
# Read test accuracy scores.
std_plain = read_result_txt(dirpath / "variance_plain_da.txt", apply_sqrt=True)
std_gpb_b = read_result_txt(dirpath / "variance_gpb_b_da.txt", apply_sqrt=True)
# Plot figures and show them.
plot_stdhist(std_plain, std_gpb_b, args)
mpl.show()
if __name__ == "__main__":
main(parse_args())
# vim: expandtab tabstop=4 shiftwidth=4 fdm=marker
| 33.172589 | 110 | 0.636113 | #!/usr/bin/env python3
"""
Visualize results of Gaussian process bootstrapping.
"""
import argparse
import math
import os
import pathlib
import pandas as pd
import matplotlib.pyplot as mpl
def parse_args():
"""
Parse command line arguments.
"""
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument("--dirpath", type=str, default="results", help="path to results directory")
parser.add_argument("--save", action="store_true", help="save figures under output directory")
parser.add_argument("--output", type=str, default="figures", help="output directory")
return parser.parse_args()
def read_result_tsv(path_tsv, length=300):
"""
Parse TSV file and returns basename of file and list of test accuracy.
"""
def is_test_row(line):
"""
Returns true if the given line is a row of test score.
"""
return line.startswith("Test: ")
def get_test_accuracy(line):
"""
Returns test accuracy based on the supposition that the given `line` is a test score row.
"""
return float(line.split("\t")[0].split("=")[-1].strip())
# Compute base name.
basename = path_tsv.name[:-len(path_tsv.suffix)]
# Read file and get test accuracy scores.
scores = [get_test_accuracy(line) for line in path_tsv.open() if is_test_row(line)]
# Normalize list length.
if len(scores) < length: scores += [0.0] * (length - len(scores))
else : scores = scores[:length]
return (basename, scores)
def read_result_txt(path_txt, apply_sqrt=True):
"""
Parse TXT file and returns a list of variance.
"""
def is_variance_row(line):
"""
Returns true if the given line is a row of test score.
"""
return all(char in ".0123456789e+-" for char in line.strip())
output = [float(line.strip()) for line in path_txt.open("rt") if is_variance_row(line)]
if apply_sqrt: return list(map(math.sqrt, output))
else : return output
def plot_trends(scores, keys, header, args):
"""
Plot test accuracy plotting for both with/without data augmentation.
"""
# Plot accuracies without data augmentation.
mpl.figure(figsize=(8, 5))
mpl.title(f"{header}: without data augmentation")
for key in keys:
mpl.plot(100 * scores[key], "-o", label=key, lw=1, markersize=2, alpha=0.8)
mpl.xlim(0, 150)
mpl.ylim(60, 75)
mpl.legend(loc="lower right", ncol=2)
mpl.grid(linestyle="dotted")
mpl.xlabel("Epoch")
mpl.ylabel("Accuracy on CIFAR10 [%]")
if args.save:
filepath = "trend_" + header.lower().replace(" ", "_") + ".png"
mpl.savefig(os.path.join(args.output, filepath))
# Plot accuracies with data augmentation.
mpl.figure(figsize=(8, 5))
mpl.title(f"{header}: with data augmentation")
for key in keys:
mpl.plot(100 * scores[key + "_da"], "-o", label=key, lw=1, markersize=2, alpha=0.8)
mpl.xlim(0, 300)
mpl.ylim(75, 85)
mpl.legend(loc="lower right", ncol=2)
mpl.grid(linestyle="dotted")
mpl.xlabel("Epoch")
mpl.ylabel("Accuracy on CIFAR10 [%]")
if args.save:
filepath = "trend_" + header.lower().replace(" ", "_") + "_da.png"
mpl.savefig(os.path.join(args.output, filepath))
def plot_topval(scores, keys, header, args):
"""
Plot top accuracy of test data for both with/without data augmentation.
"""
# Plot top accuracies without data augmentation.
ks = keys
xs = list(range(len(ks)))
ys = [100 * max(scores[key]) for key in ks]
mpl.figure(figsize=(8, 5))
mpl.title(f"{header}: top scores without data augmentation")
mpl.bar(xs, ys, color=[f"C{x}" for x in xs])
for x, y in enumerate(ys):
mpl.text(x - 0.25, y + 0.1, f"{y:.2f}", color=f"C{x}", fontweight="bold")
mpl.xticks(xs, ks, rotation=20)
mpl.ylim(70, 75)
mpl.grid(linestyle="dotted")
mpl.ylabel("Accuracy on CIFAR10 [%]")
if args.save:
filepath = "topscore_" + header.lower().replace(" ", "_") + ".png"
mpl.savefig(os.path.join(args.output, filepath))
# Plot top accuracies with data augmentation.
ks = [k + "_da" for k in keys]
xs = list(range(len(ks)))
ys = [100 * max(scores[key]) for key in ks]
mpl.figure(figsize=(8, 5))
mpl.title(f"{header}: top scores with data augmentation")
mpl.bar(xs, ys, color=[f"C{x}" for x in xs])
for x, y in enumerate(ys):
mpl.text(x - 0.25, y + 0.1, f"{y:.2f}", color=f"C{x}", fontweight="bold")
mpl.xticks(xs, ks, rotation=20)
mpl.ylim(80, 82.5)
mpl.grid(linestyle="dotted")
mpl.ylabel("Accuracy on CIFAR10 [%]")
if args.save:
filepath = "topscore_" + header.lower().replace(" ", "_") + "_da.png"
mpl.savefig(os.path.join(args.output, filepath))
def plot_stdhist(std_plain, std_gpb_b, args):
"""
Plot histogram of feature standard deviation.
"""
mpl.figure(figsize=(8, 5))
mpl.title("Histogram of feature standard deviation (N = 10,000)")
mpl.hist(std_plain, bins=101, histtype="step", label="plain")
mpl.hist(std_gpb_b, bins=101, histtype="step", label="gpb_b")
mpl.grid(linestyle="dotted")
mpl.xlabel("Standard deviation of features")
mpl.ylabel("Frequency")
mpl.legend()
if args.save:
filepath = "histogram_standard_deviation.png"
mpl.savefig(os.path.join(args.output, filepath))
def main(args):
"""
Main function.
"""
# Path to the result directory.
dirpath = pathlib.Path(args.dirpath)
# Read test accuracy scores.
scores = pd.DataFrame({name:score for name, score in map(read_result_tsv, sorted(dirpath.glob("*.tsv")))})
# Target base names.
keys1 = ["baseline", "gpb_tmx", "gpb_xmb", "gpb_tmb"]
keys2 = ["baseline", "gpb_txx", "gpb_xmx", "gpb_xxb", "gpb_xmb", "gpb_txb", "gpb_tmx", "gpb_tmb"]
# Plot figures and show them.
plot_trends(scores, keys1, "Main results", args)
plot_trends(scores, keys2, "Exhaustive results", args)
plot_topval(scores, keys2, "Exhaustive results", args)
# Read test accuracy scores.
std_plain = read_result_txt(dirpath / "variance_plain_da.txt", apply_sqrt=True)
std_gpb_b = read_result_txt(dirpath / "variance_gpb_b_da.txt", apply_sqrt=True)
# Plot figures and show them.
plot_stdhist(std_plain, std_gpb_b, args)
mpl.show()
if __name__ == "__main__":
main(parse_args())
# vim: expandtab tabstop=4 shiftwidth=4 fdm=marker
| 0 | 0 | 0 |
29c0d8a6d52b7b776decec9e70f2971d2f22cee9 | 222 | py | Python | exercises/01.python-for-everybody/chapter06/exercise04.py | Fabricio-Lopees/computer-science-learning | e8cfcd468f9fdbaa1cacf803d0dade04a99eb19a | [
"MIT"
] | null | null | null | exercises/01.python-for-everybody/chapter06/exercise04.py | Fabricio-Lopees/computer-science-learning | e8cfcd468f9fdbaa1cacf803d0dade04a99eb19a | [
"MIT"
] | null | null | null | exercises/01.python-for-everybody/chapter06/exercise04.py | Fabricio-Lopees/computer-science-learning | e8cfcd468f9fdbaa1cacf803d0dade04a99eb19a | [
"MIT"
] | null | null | null | text = input('Enter a text: ')
character = input('Enter a character that you can search in "'+text+'": ')
result = text.count(character)
print('The character "'+character+'" appears '+str(result)+' times in "'+text+'".') | 37 | 83 | 0.662162 | text = input('Enter a text: ')
character = input('Enter a character that you can search in "'+text+'": ')
result = text.count(character)
print('The character "'+character+'" appears '+str(result)+' times in "'+text+'".') | 0 | 0 | 0 |
351f0855d402f27478b354263c225351c8121de8 | 5,807 | py | Python | test.py | chenrz925/pytorch_fft | 459cb20291717398df2def466faa2f3495b2454f | [
"Apache-2.0"
] | 322 | 2017-05-25T08:42:23.000Z | 2022-03-28T02:32:25.000Z | test.py | bloodmage/pytorch_fft | 34057d19563c939cc49b116ff8570f95747e552c | [
"Apache-2.0"
] | 37 | 2017-06-16T20:05:53.000Z | 2021-03-11T08:04:09.000Z | test.py | bloodmage/pytorch_fft | 34057d19563c939cc49b116ff8570f95747e552c | [
"Apache-2.0"
] | 54 | 2017-05-26T02:20:54.000Z | 2022-01-19T12:40:18.000Z | import torch
torch.manual_seed(0)
# from _ext import th_fft
import pytorch_fft.fft as cfft
import pytorch_fft.fft.autograd as afft
import numpy as np
import numpy.fft as nfft
if __name__ == "__main__":
if torch.cuda.is_available():
nfft3 = lambda x: nfft.fftn(x,axes=(1,2,3))
nifft3 = lambda x: nfft.ifftn(x,axes=(1,2,3))
cfs = [cfft.fft, cfft.fft2, cfft.fft3]
nfs = [nfft.fft, nfft.fft2, nfft3]
cifs = [cfft.ifft, cfft.ifft2, cfft.ifft3]
nifs = [nfft.ifft, nfft.ifft2, nifft3]
for args in zip(cfs, nfs, cifs, nifs):
test_c2c(*args)
nrfft3 = lambda x: nfft.rfftn(x,axes=(1,2,3))
nirfft3 = lambda x: nfft.irfftn(x,axes=(1,2,3))
cfs = [cfft.rfft, cfft.rfft2, cfft.rfft3]
nfs = [nfft.rfft, nfft.rfft2, nrfft3]
cifs = [cfft.irfft, cfft.irfft2, cfft.irfft3]
nifs = [nfft.irfft, nfft.irfft2, nirfft3]
for args in zip(cfs, nfs, cifs, nifs):
test_r2c(*args)
test_expand()
test_fft_gradcheck()
test_ifft_gradcheck()
test_fft2d_gradcheck()
test_ifft2d_gradcheck()
test_fft3d_gradcheck()
test_ifft3d_gradcheck()
test_rfft_gradcheck()
test_irfft_gradcheck()
test_rfft2d_gradcheck()
test_irfft2d_gradcheck()
test_rfft3d_gradcheck()
test_irfft3d_gradcheck()
else:
print("Cuda not available, cannot test.")
| 32.623596 | 93 | 0.6456 | import torch
torch.manual_seed(0)
# from _ext import th_fft
import pytorch_fft.fft as cfft
import pytorch_fft.fft.autograd as afft
import numpy as np
import numpy.fft as nfft
def run_c2c(x, z, _f1, _f2, _if1, _if2, atol):
y1, y2 = _f1(x, z)
x_np = x.cpu().numpy().squeeze()
y_np = _f2(x_np)
assert np.allclose(y1.cpu().numpy(), y_np.real, atol=atol)
assert np.allclose(y2.cpu().numpy(), y_np.imag, atol=atol)
x0, z0 = _if1(y1, y2)
x0_np = _if2(y_np)
assert np.allclose(x0.cpu().numpy(), x0_np.real, atol=atol)
assert np.allclose(z0.cpu().numpy(), x0_np.imag, atol=atol)
def test_c2c(_f1, _f2, _if1, _if2):
batch = 3
nch = 4
n = 5
m = 7
x = torch.randn(batch*nch*n*m).view(batch, nch, n, m).cuda()
z = torch.zeros(batch, nch, n, m).cuda()
run_c2c(x, z, _f1, _f2, _if1, _if2, 1e-6)
run_c2c(x.double(), z.double(), _f1, _f2, _if1, _if2, 1e-14)
def run_r2c(x, _f1, _f2, _if1, _if2, atol):
y1, y2 = _f1(x)
x_np = x.cpu().numpy().squeeze()
y_np = _f2(x_np)
assert np.allclose(y1.cpu().numpy(), y_np.real, atol=atol)
assert np.allclose(y2.cpu().numpy(), y_np.imag, atol=atol)
x0 = _if1(y1, y2)
x0_np = _if2(y_np)
assert np.allclose(x0.cpu().numpy(), x0_np.real, atol=atol)
def test_r2c(_f1, _f2, _if1, _if2):
batch = 3
nch = 2
n = 2
m = 4
x = torch.randn(batch*nch*n*m).view(batch, nch, n, m).cuda()
run_r2c(x, _f1, _f2, _if1, _if2, 1e-6)
run_r2c(x.double(), _f1, _f2, _if1, _if2, 1e-14)
def test_expand():
X = torch.randn(2,2,4,4).cuda().double()
zeros = torch.zeros(2,2,4,4).cuda().double()
r1, r2 = cfft.rfft2(X)
c1, c2 = cfft.fft2(X, zeros)
assert np.allclose(cfft.expand(r1).cpu().numpy(), c1.cpu().numpy())
assert np.allclose(cfft.expand(r2, imag=True).cpu().numpy(), c2.cpu().numpy())
r1, r2 = cfft.rfft3(X)
c1, c2 = cfft.fft3(X, zeros)
assert np.allclose(cfft.expand(r1).cpu().numpy(), c1.cpu().numpy())
assert np.allclose(cfft.expand(r2, imag=True).cpu().numpy(), c2.cpu().numpy())
X = torch.randn(2,2,5,5).cuda().double()
zeros = torch.zeros(2,2,5,5).cuda().double()
r1, r2 = cfft.rfft3(X)
c1, c2 = cfft.fft3(X, zeros)
assert np.allclose(cfft.expand(r1, odd=True).cpu().numpy(), c1.cpu().numpy())
assert np.allclose(cfft.expand(r2, imag=True, odd=True).cpu().numpy(), c2.cpu().numpy())
def create_real_var(*args):
return (torch.autograd.Variable(torch.randn(*args).double().cuda(), requires_grad=True),)
def create_complex_var(*args):
return (torch.autograd.Variable(torch.randn(*args).double().cuda(), requires_grad=True),
torch.autograd.Variable(torch.randn(*args).double().cuda(), requires_grad=True))
def test_fft_gradcheck():
invar = create_complex_var(5,10)
assert torch.autograd.gradcheck(afft.Fft(), invar)
def test_ifft_gradcheck():
invar = create_complex_var(5,10)
assert torch.autograd.gradcheck(afft.Ifft(), invar)
def test_fft2d_gradcheck():
invar = create_complex_var(5,5,5)
assert torch.autograd.gradcheck(afft.Fft2d(), invar)
def test_ifft2d_gradcheck():
invar = create_complex_var(5,5,5)
assert torch.autograd.gradcheck(afft.Ifft2d(), invar)
def test_fft3d_gradcheck():
invar = create_complex_var(5,3,3,3)
assert torch.autograd.gradcheck(afft.Fft3d(), invar)
def test_ifft3d_gradcheck():
invar = create_complex_var(5,3,3,3)
assert torch.autograd.gradcheck(afft.Ifft3d(), invar)
def test_rfft_gradcheck():
invar = create_real_var(5,10)
assert torch.autograd.gradcheck(afft.Rfft(), invar)
invar = create_real_var(5,11)
assert torch.autograd.gradcheck(afft.Rfft(), invar)
def test_rfft2d_gradcheck():
invar = create_real_var(5,6,6)
assert torch.autograd.gradcheck(afft.Rfft2d(), invar)
invar = create_real_var(5,5,5)
assert torch.autograd.gradcheck(afft.Rfft2d(), invar)
def test_rfft3d_gradcheck():
invar = create_real_var(5,4,4,4)
assert torch.autograd.gradcheck(afft.Rfft3d(), invar)
invar = create_real_var(5,3,3,3)
assert torch.autograd.gradcheck(afft.Rfft3d(), invar)
def test_irfft_gradcheck():
invar = create_complex_var(5,11)
assert torch.autograd.gradcheck(afft.Irfft(), invar)
def test_irfft2d_gradcheck():
invar = create_complex_var(5,5,5)
assert torch.autograd.gradcheck(afft.Irfft2d(), invar)
def test_irfft3d_gradcheck():
invar = create_complex_var(5,3,3,3)
assert torch.autograd.gradcheck(afft.Irfft3d(), invar)
if __name__ == "__main__":
if torch.cuda.is_available():
nfft3 = lambda x: nfft.fftn(x,axes=(1,2,3))
nifft3 = lambda x: nfft.ifftn(x,axes=(1,2,3))
cfs = [cfft.fft, cfft.fft2, cfft.fft3]
nfs = [nfft.fft, nfft.fft2, nfft3]
cifs = [cfft.ifft, cfft.ifft2, cfft.ifft3]
nifs = [nfft.ifft, nfft.ifft2, nifft3]
for args in zip(cfs, nfs, cifs, nifs):
test_c2c(*args)
nrfft3 = lambda x: nfft.rfftn(x,axes=(1,2,3))
nirfft3 = lambda x: nfft.irfftn(x,axes=(1,2,3))
cfs = [cfft.rfft, cfft.rfft2, cfft.rfft3]
nfs = [nfft.rfft, nfft.rfft2, nrfft3]
cifs = [cfft.irfft, cfft.irfft2, cfft.irfft3]
nifs = [nfft.irfft, nfft.irfft2, nirfft3]
for args in zip(cfs, nfs, cifs, nifs):
test_r2c(*args)
test_expand()
test_fft_gradcheck()
test_ifft_gradcheck()
test_fft2d_gradcheck()
test_ifft2d_gradcheck()
test_fft3d_gradcheck()
test_ifft3d_gradcheck()
test_rfft_gradcheck()
test_irfft_gradcheck()
test_rfft2d_gradcheck()
test_irfft2d_gradcheck()
test_rfft3d_gradcheck()
test_irfft3d_gradcheck()
else:
print("Cuda not available, cannot test.")
| 3,893 | 0 | 437 |
5c94c4181bfdc440bbcd269985eebfcc3512956e | 1,705 | py | Python | one_fm/accommodation/doctype/accommodation_unit/accommodation_unit.py | askmetoo/One-FM | c93ed63695a3e62ee8129bd9adf563116b749030 | [
"MIT"
] | 16 | 2021-06-14T23:56:47.000Z | 2022-03-22T12:05:06.000Z | one_fm/accommodation/doctype/accommodation_unit/accommodation_unit.py | askmetoo/One-FM | c93ed63695a3e62ee8129bd9adf563116b749030 | [
"MIT"
] | 119 | 2020-08-17T16:27:45.000Z | 2022-03-28T12:42:56.000Z | one_fm/accommodation/doctype/accommodation_unit/accommodation_unit.py | askmetoo/One-FM | c93ed63695a3e62ee8129bd9adf563116b749030 | [
"MIT"
] | 12 | 2021-05-16T13:35:40.000Z | 2022-02-21T12:41:04.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, omar jaber and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
| 35.520833 | 113 | 0.781232 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, omar jaber and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
class AccommodationUnit(Document):
def validate(self):
self.set_title()
def before_insert(self):
self.validate_no_of_accommodation_unit()
def validate_no_of_accommodation_unit(self):
allowed_no_of_unit = frappe.db.get_value('Accommodation', self.accommodation, 'total_no_of_accommodation_unit')
if frappe.db.count('Accommodation Unit', {'accommodation': self.accommodation}) >= allowed_no_of_unit:
frappe.throw(_("Only {0} Accommodation Unit is allowed in Accommodation {1}"
.format(allowed_no_of_unit, self.accommodation_name)))
def set_title(self):
self.title = '-'.join([self.accommodation_name, self.type, self.floor_name+' Floor'])
def autoname(self):
self.set_accommodation_unit_code()
self.name = self.accommodation+str(int(self.floor)).zfill(2)+self.accommodation_unit_code
def set_accommodation_unit_code(self):
if not self.accommodation_unit_code:
self.accommodation_unit_code = str(int(self.floor)).zfill(2)+get_latest_accommodation_unit_code(self)
def get_latest_accommodation_unit_code(doc):
query = """
select
accommodation_unit_code+1
from
`tabAccommodation Unit`
where
accommodation='{0}' and floor='{1}'
order by
accommodation_unit_code desc limit 1
"""
accommodation_unit_code = frappe.db.sql(query.format(doc.accommodation, doc.floor))
new_accommodation_unit_code = accommodation_unit_code[0][0] if accommodation_unit_code else 1
return str(int(new_accommodation_unit_code))[-1]
| 1,260 | 13 | 189 |
3f4cfc16abf8cfe74758513ff7729c9e659a4a30 | 2,501 | py | Python | PDFer/PDFer/PDFer.py | lucadalbosco/PDFer | e7ee45691044d50bb8e72258191e07e457dc7f50 | [
"MIT"
] | null | null | null | PDFer/PDFer/PDFer.py | lucadalbosco/PDFer | e7ee45691044d50bb8e72258191e07e457dc7f50 | [
"MIT"
] | null | null | null | PDFer/PDFer/PDFer.py | lucadalbosco/PDFer | e7ee45691044d50bb8e72258191e07e457dc7f50 | [
"MIT"
] | null | null | null | import pdf2txt
#import time
import csv
from PySide import QtGui
import os
import os.path
# prove prove
my_dir = QtGui.QFileDialog.getExistingDirectory(None,'Scegli una cartella','\\srveuro\Ufficio_Tecnico\VALIDAZIONE\S10')
file_list = []
for dirpath, dirnames, filenames in os.walk(my_dir):
for filename in [f for f in filenames if f.endswith(".pdf")]:
file_list.append(os.path.join(dirpath, filename))
nome_file = raw_input('Nome del file CSV:')
#nome=[]
#nome.append('Ore')
#
#my_dir = QtGui.QFileDialog.getExistingDirectory(None,'Scegli una cartella','D:\Documenti\Lavoro\Eurocoating\Logs')
#
#
## Get folder path containing text files
#file_list = glob.glob(my_dir + '/*.csv')
#
#
#lunghezza = 5 #len(file_list)
lunghezza = len(file_list)
excel = [[0 for x in range(7)] for y in range(lunghezza)]
for f in range(0,lunghezza):
#for f in range(0,len(file_list)):
print f
reportpdf = file_list[f]
reporttxt = my_dir + '\\temp.txt'
pdf2txt.main(['', '-o', reporttxt, reportpdf])
data_raw = []
testo = open(reporttxt, 'r')
with testo as myfile:
for line in myfile:
data_raw.append(line)
#for i in range(0,len(data_raw)):
# print i, data_raw[i]
testo.close()
############## Data ##############
excel[f][0] = data_raw[2][:-1]
############## Nome #############
index = data_raw.index('[mm]\n')
excel[f][1] = data_raw[index+2][:-1]
############## Norma #############
index = data_raw.index('GAUSS\n')
if int(data_raw[index-3][:-1]) == 10:
excel[f][2] = 'EC'
if int(data_raw[index-3][:-1]) == 5:
excel[f][2] = 'NORMA'
############## Ra #############
index = data_raw.index('Ra\n')
excel[f][3] = float(data_raw[index+10][:-3])
############## Rz #############
excel[f][4] = float(data_raw[index+11][:-3])
############## Rt #############
excel[f][5] = float(data_raw[index+12][:-3])
############## Nome file #############
excel[f][6] = reportpdf
nome_csv = my_dir + "\\" + nome_file +".csv"
with open(nome_csv, "wb") as c:
writer = csv.writer(c, delimiter=";")
writer.writerows(excel)
print "Finito!" | 22.132743 | 120 | 0.497001 | import pdf2txt
#import time
import csv
from PySide import QtGui
import os
import os.path
# prove prove
my_dir = QtGui.QFileDialog.getExistingDirectory(None,'Scegli una cartella','\\srveuro\Ufficio_Tecnico\VALIDAZIONE\S10')
file_list = []
for dirpath, dirnames, filenames in os.walk(my_dir):
for filename in [f for f in filenames if f.endswith(".pdf")]:
file_list.append(os.path.join(dirpath, filename))
nome_file = raw_input('Nome del file CSV:')
#nome=[]
#nome.append('Ore')
#
#my_dir = QtGui.QFileDialog.getExistingDirectory(None,'Scegli una cartella','D:\Documenti\Lavoro\Eurocoating\Logs')
#
#
## Get folder path containing text files
#file_list = glob.glob(my_dir + '/*.csv')
#
#
#lunghezza = 5 #len(file_list)
lunghezza = len(file_list)
excel = [[0 for x in range(7)] for y in range(lunghezza)]
for f in range(0,lunghezza):
#for f in range(0,len(file_list)):
print f
reportpdf = file_list[f]
reporttxt = my_dir + '\\temp.txt'
pdf2txt.main(['', '-o', reporttxt, reportpdf])
data_raw = []
testo = open(reporttxt, 'r')
with testo as myfile:
for line in myfile:
data_raw.append(line)
#for i in range(0,len(data_raw)):
# print i, data_raw[i]
testo.close()
############## Data ##############
excel[f][0] = data_raw[2][:-1]
############## Nome #############
index = data_raw.index('[mm]\n')
excel[f][1] = data_raw[index+2][:-1]
############## Norma #############
index = data_raw.index('GAUSS\n')
if int(data_raw[index-3][:-1]) == 10:
excel[f][2] = 'EC'
if int(data_raw[index-3][:-1]) == 5:
excel[f][2] = 'NORMA'
############## Ra #############
index = data_raw.index('Ra\n')
excel[f][3] = float(data_raw[index+10][:-3])
############## Rz #############
excel[f][4] = float(data_raw[index+11][:-3])
############## Rt #############
excel[f][5] = float(data_raw[index+12][:-3])
############## Nome file #############
excel[f][6] = reportpdf
nome_csv = my_dir + "\\" + nome_file +".csv"
with open(nome_csv, "wb") as c:
writer = csv.writer(c, delimiter=";")
writer.writerows(excel)
print "Finito!" | 0 | 0 | 0 |
b98c9e42fb41fb4d56962db2e910d1d60853fcb4 | 5,074 | py | Python | stellar_sdk/account.py | Shaptic/py-stellar-base | f5fa47f4d96f215889d99249fb25c7be002f5cf3 | [
"Apache-2.0"
] | null | null | null | stellar_sdk/account.py | Shaptic/py-stellar-base | f5fa47f4d96f215889d99249fb25c7be002f5cf3 | [
"Apache-2.0"
] | 27 | 2022-01-12T10:55:38.000Z | 2022-03-28T01:38:24.000Z | stellar_sdk/account.py | Shaptic/py-stellar-base | f5fa47f4d96f215889d99249fb25c7be002f5cf3 | [
"Apache-2.0"
] | 2 | 2021-12-02T12:42:03.000Z | 2021-12-07T20:53:10.000Z | from typing import Any, Dict, List, Optional, Union
from .muxed_account import MuxedAccount
from .sep.ed25519_public_key_signer import Ed25519PublicKeySigner
from .type_checked import type_checked
__all__ = ["Account"]
@type_checked
class Account:
"""The :class:`Account` object represents a single
account on the Stellar network and its sequence number.
Account tracks the sequence number as it is used
by :class:`TransactionBuilder <stellar_sdk.transaction_builder.TransactionBuilder>`.
Normally, you can get an :class:`Account` instance through :func:`stellar_sdk.server.Server.load_account`
or :func:`stellar_sdk.server_async.ServerAsync.load_account`.
An example::
from stellar_sdk import Keypair, Server
server = Server(horizon_url="https://horizon-testnet.stellar.org")
source = Keypair.from_secret("SBFZCHU5645DOKRWYBXVOXY2ELGJKFRX6VGGPRYUWHQ7PMXXJNDZFMKD")
# `account` can also be a muxed account
source_account = server.load_account(account=source.public_key)
See `Accounts <https://developers.stellar.org/docs/glossary/accounts/>`__ for
more information.
:param account: Account Id of the
account (ex. ``"GB3KJPLFUYN5VL6R3GU3EGCGVCKFDSD7BEDX42HWG5BWFKB3KQGJJRMA"``)
or muxed account (ex. ``"MBZSQ3YZMZEWL5ZRCEQ5CCSOTXCFCMKDGFFP4IEQN2KN6LCHCLI46AAAAAAAAAAE2L2QE"``)
:param sequence: Current sequence number of the account.
:param raw_data: Raw horizon response data.
"""
@property
def universal_account_id(self) -> str:
"""Get the universal account id,
if `account` is ed25519 public key, it will return ed25519
public key (ex. ``"GDGQVOKHW4VEJRU2TETD6DBRKEO5ERCNF353LW5WBFW3JJWQ2BRQ6KDD"``),
otherwise it will return muxed
account (ex. ``"MAAAAAAAAAAAJURAAB2X52XFQP6FBXLGT6LWOOWMEXWHEWBDVRZ7V5WH34Y22MPFBHUHY"``)
.. note::
SEP-0023 support is not enabled by default, if you want to enable it,
please set `ENABLE_SEP_0023` to ``true`` in the environment variable,
on Linux and MacOS, generally you can use ``export ENABLE_SEP_0023=true`` to set it.
:raises: :exc:`FeatureNotEnabledError <stellar_sdk.exceptions.FeatureNotEnabledError>`:
if `account_id` is a muxed account and `ENABLE_SEP_0023` is not set to ``true``.
"""
return self.account.universal_account_id
def increment_sequence_number(self) -> None:
"""Increments sequence number in this object by one."""
self.sequence += 1
@property
def load_ed25519_public_key_signers(self) -> List[Ed25519PublicKeySigner]:
"""Load ed25519 public key signers."""
if self.raw_data is None:
raise ValueError('"raw_data" is None, unable to get signers from it.')
signers = self.raw_data["signers"]
ed25519_public_key_signers = []
for signer in signers:
if signer["type"] == "ed25519_public_key":
ed25519_public_key_signers.append(
Ed25519PublicKeySigner(signer["key"], signer["weight"])
)
return ed25519_public_key_signers
@type_checked
| 38.439394 | 109 | 0.669294 | from typing import Any, Dict, List, Optional, Union
from .muxed_account import MuxedAccount
from .sep.ed25519_public_key_signer import Ed25519PublicKeySigner
from .type_checked import type_checked
__all__ = ["Account"]
@type_checked
class Account:
"""The :class:`Account` object represents a single
account on the Stellar network and its sequence number.
Account tracks the sequence number as it is used
by :class:`TransactionBuilder <stellar_sdk.transaction_builder.TransactionBuilder>`.
Normally, you can get an :class:`Account` instance through :func:`stellar_sdk.server.Server.load_account`
or :func:`stellar_sdk.server_async.ServerAsync.load_account`.
An example::
from stellar_sdk import Keypair, Server
server = Server(horizon_url="https://horizon-testnet.stellar.org")
source = Keypair.from_secret("SBFZCHU5645DOKRWYBXVOXY2ELGJKFRX6VGGPRYUWHQ7PMXXJNDZFMKD")
# `account` can also be a muxed account
source_account = server.load_account(account=source.public_key)
See `Accounts <https://developers.stellar.org/docs/glossary/accounts/>`__ for
more information.
:param account: Account Id of the
account (ex. ``"GB3KJPLFUYN5VL6R3GU3EGCGVCKFDSD7BEDX42HWG5BWFKB3KQGJJRMA"``)
or muxed account (ex. ``"MBZSQ3YZMZEWL5ZRCEQ5CCSOTXCFCMKDGFFP4IEQN2KN6LCHCLI46AAAAAAAAAAE2L2QE"``)
:param sequence: Current sequence number of the account.
:param raw_data: Raw horizon response data.
"""
def __init__(
self,
account: Union[str, MuxedAccount],
sequence: int,
raw_data: Dict[str, Any] = None,
) -> None:
if isinstance(account, str):
self.account: MuxedAccount = MuxedAccount.from_account(account)
else:
self.account = account
self.sequence: int = sequence
self.raw_data: Optional[Dict[str, Any]] = raw_data
@property
def universal_account_id(self) -> str:
"""Get the universal account id,
if `account` is ed25519 public key, it will return ed25519
public key (ex. ``"GDGQVOKHW4VEJRU2TETD6DBRKEO5ERCNF353LW5WBFW3JJWQ2BRQ6KDD"``),
otherwise it will return muxed
account (ex. ``"MAAAAAAAAAAAJURAAB2X52XFQP6FBXLGT6LWOOWMEXWHEWBDVRZ7V5WH34Y22MPFBHUHY"``)
.. note::
SEP-0023 support is not enabled by default, if you want to enable it,
please set `ENABLE_SEP_0023` to ``true`` in the environment variable,
on Linux and MacOS, generally you can use ``export ENABLE_SEP_0023=true`` to set it.
:raises: :exc:`FeatureNotEnabledError <stellar_sdk.exceptions.FeatureNotEnabledError>`:
if `account_id` is a muxed account and `ENABLE_SEP_0023` is not set to ``true``.
"""
return self.account.universal_account_id
def increment_sequence_number(self) -> None:
"""Increments sequence number in this object by one."""
self.sequence += 1
@property
def thresholds(self):
if self.raw_data is None:
raise ValueError('"raw_data" is None, unable to get thresholds from it.')
return Thresholds(
self.raw_data["thresholds"]["low_threshold"],
self.raw_data["thresholds"]["med_threshold"],
self.raw_data["thresholds"]["high_threshold"],
)
def load_ed25519_public_key_signers(self) -> List[Ed25519PublicKeySigner]:
"""Load ed25519 public key signers."""
if self.raw_data is None:
raise ValueError('"raw_data" is None, unable to get signers from it.')
signers = self.raw_data["signers"]
ed25519_public_key_signers = []
for signer in signers:
if signer["type"] == "ed25519_public_key":
ed25519_public_key_signers.append(
Ed25519PublicKeySigner(signer["key"], signer["weight"])
)
return ed25519_public_key_signers
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.account == other.account and self.sequence == other.sequence
def __str__(self):
return f"<Account [account={self.account}, sequence={self.sequence}]>"
@type_checked
class Thresholds:
def __init__(
self, low_threshold: int, med_threshold: int, high_threshold: int
) -> None:
self.low_threshold = low_threshold
self.med_threshold = med_threshold
self.high_threshold = high_threshold
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.low_threshold == other.low_threshold
and self.med_threshold == other.med_threshold
and self.high_threshold == other.high_threshold
)
def __str__(self):
return (
f"<Thresholds [low_threshold={self.low_threshold}, med_threshold={self.med_threshold}, "
f"high_threshold={self.high_threshold}]>"
)
| 1,673 | -4 | 209 |
d9dd94963a22d1ef9ae39d0995e3510f4b30f57c | 135 | py | Python | pastepwn/api/__init__.py | SaFiSec/pastepwn | 9009874f9c9edd2be86399cc90802117ae28d3ce | [
"MIT"
] | 1 | 2019-04-28T17:48:34.000Z | 2019-04-28T17:48:34.000Z | pastepwn/api/__init__.py | 0xSaFi/pastepwn | 9009874f9c9edd2be86399cc90802117ae28d3ce | [
"MIT"
] | null | null | null | pastepwn/api/__init__.py | 0xSaFi/pastepwn | 9009874f9c9edd2be86399cc90802117ae28d3ce | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .apiserver import APIServer
from .apiresponse import APIResponse
__all__ = ['APIServer', 'APIResponse']
| 19.285714 | 38 | 0.718519 | # -*- coding: utf-8 -*-
from .apiserver import APIServer
from .apiresponse import APIResponse
__all__ = ['APIServer', 'APIResponse']
| 0 | 0 | 0 |
3c8c7884ceeb6744551087c925736f38a5581eb0 | 1,667 | py | Python | sky130/custom/scripts/fix_gpiov2_gds.py | lekez2005/open_pdks | e8416d541c839e4ec51187feb37f456f35a9b35b | [
"Apache-2.0"
] | null | null | null | sky130/custom/scripts/fix_gpiov2_gds.py | lekez2005/open_pdks | e8416d541c839e4ec51187feb37f456f35a9b35b | [
"Apache-2.0"
] | null | null | null | sky130/custom/scripts/fix_gpiov2_gds.py | lekez2005/open_pdks | e8416d541c839e4ec51187feb37f456f35a9b35b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# fix_gpiov2_gds.py ---
#
# Special-purpose script that does the work of what ought to be a simple
# binary diff and patch. Except that no such thing exists as a standard
# offering on most Linux systems, so instead of adding another OS
# package requirement, I'm just writing a binary search-and-replace in
# python.
#
# The purpose of the patch is to correct the coordinates of the deep nwell
# and nwell layers in the cell amux_switch_1v2b, as the SkyWater cell
# contains DRC errors.
#
# Specifically, DNWELL coordinate (34.450, 0.035) is moved to (34.905, 0.035)
# and NWELL coordinate (35.055, -0.365) is moved to (35.390, -0.365)
import sys
if len(sys.argv) != 2:
print('Usage: fix_gpiov2_gds.py <filename>')
sys.exit(1)
else:
file_name = sys.argv[1]
orig_data_1 = b'\x00\x00\x86\x92\x00\x00\x00\x23\x00\x00\x86\x92'
replace_data_1 = b'\x00\x00\x88\x59\x00\x00\x00\x23\x00\x00\x88\x59'
orig_data_2 = b'\x00\x00\x88\xef\xff\xff\xff\x8d\x00\x00\x47\xef\xff\xff\xff\x8d'
replace_data_2 = b'\x00\x00\x8a\x3e\x00\x00\x00\x91\x00\x00\x47\xef\x00\x00\x00\x91'
orig_data_3 = b'\x00\x00\x88\xef\xff\xff\xfe\x93\x00\x00\x88\xef\xff\xff\xff\x8d'
replace_data_3 = b'\x00\x00\x8a\x3e\xff\xff\xfe\x93\x00\x00\x8a\x3e\x00\x00\x00\x91'
# This is not efficient, but only needs to be done once.
with open(file_name, 'rb') as ifile:
data = ifile.read()
data = data.replace(orig_data_1, replace_data_1)
data = data.replace(orig_data_2, replace_data_2)
data = data.replace(orig_data_3, replace_data_3)
# Write back into the same file
with open(file_name, 'wb') as ofile:
ofile.write(data)
print("Done!")
| 34.729167 | 84 | 0.716857 | #!/usr/bin/env python3
#
# fix_gpiov2_gds.py ---
#
# Special-purpose script that does the work of what ought to be a simple
# binary diff and patch. Except that no such thing exists as a standard
# offering on most Linux systems, so instead of adding another OS
# package requirement, I'm just writing a binary search-and-replace in
# python.
#
# The purpose of the patch is to correct the coordinates of the deep nwell
# and nwell layers in the cell amux_switch_1v2b, as the SkyWater cell
# contains DRC errors.
#
# Specifically, DNWELL coordinate (34.450, 0.035) is moved to (34.905, 0.035)
# and NWELL coordinate (35.055, -0.365) is moved to (35.390, -0.365)
import sys
if len(sys.argv) != 2:
print('Usage: fix_gpiov2_gds.py <filename>')
sys.exit(1)
else:
file_name = sys.argv[1]
orig_data_1 = b'\x00\x00\x86\x92\x00\x00\x00\x23\x00\x00\x86\x92'
replace_data_1 = b'\x00\x00\x88\x59\x00\x00\x00\x23\x00\x00\x88\x59'
orig_data_2 = b'\x00\x00\x88\xef\xff\xff\xff\x8d\x00\x00\x47\xef\xff\xff\xff\x8d'
replace_data_2 = b'\x00\x00\x8a\x3e\x00\x00\x00\x91\x00\x00\x47\xef\x00\x00\x00\x91'
orig_data_3 = b'\x00\x00\x88\xef\xff\xff\xfe\x93\x00\x00\x88\xef\xff\xff\xff\x8d'
replace_data_3 = b'\x00\x00\x8a\x3e\xff\xff\xfe\x93\x00\x00\x8a\x3e\x00\x00\x00\x91'
# This is not efficient, but only needs to be done once.
with open(file_name, 'rb') as ifile:
data = ifile.read()
data = data.replace(orig_data_1, replace_data_1)
data = data.replace(orig_data_2, replace_data_2)
data = data.replace(orig_data_3, replace_data_3)
# Write back into the same file
with open(file_name, 'wb') as ofile:
ofile.write(data)
print("Done!")
| 0 | 0 | 0 |
320b6fa95fbb616d5b44ecaf14d38d8a64b3ac3f | 11,340 | py | Python | utils/geometry_helper.py | matsuren/crownconv360depth | 8f8f30b6739409e5cd762af92206fe72d74b0d54 | [
"MIT"
] | 35 | 2020-07-12T15:31:56.000Z | 2022-02-13T01:33:20.000Z | utils/geometry_helper.py | matsuren/crownconv360depth | 8f8f30b6739409e5cd762af92206fe72d74b0d54 | [
"MIT"
] | 5 | 2020-07-17T09:22:59.000Z | 2022-02-17T16:02:58.000Z | utils/geometry_helper.py | matsuren/crownconv360depth | 8f8f30b6739409e5cd762af92206fe72d74b0d54 | [
"MIT"
] | 6 | 2020-10-27T15:23:21.000Z | 2021-07-16T06:52:46.000Z | import math
import os
from copy import copy
from functools import lru_cache
from functools import partial
from os.path import join
import igl
import numpy as np
from numpy.linalg import norm
#
@lru_cache(maxsize=None)
@lru_cache(maxsize=None)
@lru_cache(maxsize=None)
@lru_cache()
def distort_unfold_to_imgcoord(distort_unfold, drop_NE=True):
"""
Parameters
----------
distort_unfold :
distorted unfold
drop_NE : bool
drop north and east as in [1]
References
----------
[1] orientation-aware semantic segmentation on icosahedron spheres, ICCV2019
"""
vertex_num = len(distort_unfold)
level = round(math.log((vertex_num - 2) // 10, 4))
width = 2 ** level + 1
height = 2 * width - 1
unfold_pts_set = set() # (vertex_id, x, y)
# remove duplicate
for key, arr in distort_unfold.items():
for val in arr:
unfold_pts_set.add((key, val[0], val[1]))
# sort
unfold_pts_set = sorted(unfold_pts_set, key=lambda x: (x[1], x[2]))
# to image coorinate
img_coord = {}
for (vertex_id, x, y) in unfold_pts_set:
rect_idxs = get_rect_idxs(x, y)
for key in rect_idxs:
if key not in img_coord:
img_coord[key] = []
img_coord[key].append(vertex_id)
# to numpy
for key in img_coord:
img_coord[key] = np.array(img_coord[key]).reshape(width, height).T
if drop_NE:
# orientation-aware semantic segmentation on icosahedron spheres form
for key in img_coord:
img_coord[key] = img_coord[key][1:, :-1]
return img_coord
@lru_cache()
| 29.608355 | 105 | 0.587213 | import math
import os
from copy import copy
from functools import lru_cache
from functools import partial
from os.path import join
import igl
import numpy as np
from numpy.linalg import norm
#
@lru_cache(maxsize=None)
def get_icosahedron(level=0):
if level == 0:
v, f = get_base_icosahedron()
return v, f
# require subdivision
v, f = get_icosahedron(level - 1)
v, f = subdivision(v, f, 1)
return v, f
@lru_cache(maxsize=None)
def get_unfold_icosahedron(level=0):
if level == 0:
unfold_v, f = get_base_unfold()
return unfold_v, f
# require subdivision
unfold_v, f = get_unfold_icosahedron(level - 1)
unfold_v, f = unfold_subdivision(unfold_v, f)
return unfold_v, f
@lru_cache(maxsize=None)
def get_unfold_imgcoord(level=0, drop_NE=True):
# return cache if it exists
cache_dir = os.path.dirname(os.path.realpath(__file__))
cache_file = join(cache_dir, f'cache_unfold_imgcoord{level}_{drop_NE}.npz')
if os.path.exists(cache_file):
img_coord = np.load(cache_file, allow_pickle=True)['arr_0'][()]
return img_coord
# no cache
unfold_v, new_faces = get_unfold_icosahedron(level)
distort_unfold = distort_grid(unfold_v)
img_coord = distort_unfold_to_imgcoord(distort_unfold, drop_NE)
# save cache for next time
np.savez(cache_file, img_coord)
return img_coord
@lru_cache()
def get_vertexid_to_loc(level):
img_coord = get_unfold_imgcoord(level)
cat_img_coord = np.stack([img_coord[i] for i in range(5)])
num, h, w = cat_img_coord.shape
v_len = 2 + 10 * 4 ** level
vertexid_to_loc = np.full((v_len, 3), -1, dtype=np.int)
hw = h * w
for i, it in enumerate(cat_img_coord.ravel()):
# vertexid_to_loc[it] = np.unravel_index(i, (num, h, w))
vertexid_to_loc[it] = [i // hw, (i // w) % h, i % w]
return vertexid_to_loc
def get_base_icosahedron():
t = (1.0 + 5.0 ** .5) / 2.0
vertices = [-1, t, 0, 1, t, 0, 0, 1, t, -t, 0, 1, -t, 0, -1, 0, 1, -t, t, 0, -1, t, 0,
1, 0, -1, t, -1, -t, 0, 0, -1, -t, 1, -t, 0]
faces = [0, 2, 1, 0, 3, 2, 0, 4, 3, 0, 5, 4, 0, 1, 5,
1, 7, 6, 1, 2, 7, 2, 8, 7, 2, 3, 8, 3, 9, 8, 3, 4, 9, 4, 10, 9, 4, 5, 10, 5, 6, 10, 5, 1, 6,
6, 7, 11, 7, 8, 11, 8, 9, 11, 9, 10, 11, 10, 6, 11]
# make every vertex have radius 1.0
vertices = np.reshape(vertices, (-1, 3)) / (np.sin(2 * np.pi / 5) * 2)
faces = np.reshape(faces, (-1, 3))
# Rotate vertices so that v[0] = (0, -1, 0), v[1] is on yz-plane
ry = -vertices[0]
rx = np.cross(ry, vertices[1])
rx /= np.linalg.norm(rx)
rz = np.cross(rx, ry)
R = np.stack([rx, ry, rz])
vertices = vertices.dot(R.T)
return vertices, faces
def subdivision(v, f, level=1):
for _ in range(level):
# subdivision
v, f = igl.upsample(v, f)
# normalize
v /= np.linalg.norm(v, axis=1)[:, np.newaxis]
return v, f
def get_base_unfold():
v, f = get_base_icosahedron()
unfold_v = {i: [] for i in range(12)}
# edge length
edge_len = 1 / np.sin(2 * np.pi / 5)
# height
h = 3 ** 0.5 * edge_len / 2
# v0
for i in range(5):
unfold_v[0].append([i * edge_len, 0])
# v1
for _ in range(5):
unfold_v[1].append([-0.5 * edge_len, h])
unfold_v[1][1] = [-0.5 * edge_len + 5 * edge_len, h]
unfold_v[1][4] = [-0.5 * edge_len + 5 * edge_len, h]
# v2-v5
for i in range(2, 6):
for _ in range(5):
unfold_v[i].append([(0.5 + i - 2) * edge_len, h])
# v6
for _ in range(5):
unfold_v[6].append([-edge_len, 2 * h])
unfold_v[6][1] = [-edge_len + 5 * edge_len, 2 * h]
unfold_v[6][2] = [-edge_len + 5 * edge_len, 2 * h]
unfold_v[6][4] = [-edge_len + 5 * edge_len, 2 * h]
# v7-v10
for i in range(7, 11):
for _ in range(5):
unfold_v[i].append([(i - 7) * edge_len, 2 * h])
# v11
for i in range(5):
unfold_v[11].append([(-0.5 + i) * edge_len, 3 * h])
# to numpy
for i in range(len(unfold_v)):
unfold_v[i] = np.array(unfold_v[i])
return unfold_v, f
class UnfoldVertex(object):
def __init__(self, unfold_v):
self.unfold_v = unfold_v
self.reset()
def __getitem__(self, item):
pos = self.unfold_v[item][self.cnt[item]]
self.cnt[item] += 1
return pos
def reset(self):
self.cnt = {key: 0 for key in self.unfold_v.keys()}
class VertexIdxManager(object):
def __init__(self, unfold_v):
self.reg_v = {}
self.next_v_index = len(unfold_v)
def get_next(self, a, b):
if a > b:
a, b = b, a
key = f'{a},{b}'
if key not in self.reg_v:
self.reg_v[key] = self.next_v_index
self.next_v_index += 1
return self.reg_v[key]
def unfold_subdivision(unfold_v, faces):
v_idx_manager = VertexIdxManager(unfold_v)
new_faces = []
new_unfold = copy(unfold_v)
v_obj = UnfoldVertex(unfold_v)
for (a, b, c) in faces:
a_pos = v_obj[a]
b_pos = v_obj[b]
c_pos = v_obj[c]
new_a = v_idx_manager.get_next(a, b)
new_b = v_idx_manager.get_next(b, c)
new_c = v_idx_manager.get_next(c, a)
new_a_pos = (a_pos + b_pos) / 2
new_b_pos = (b_pos + c_pos) / 2
new_c_pos = (c_pos + a_pos) / 2
# new faces
new_faces.append([a, new_a, new_c])
new_faces.append([b, new_b, new_a])
new_faces.append([new_a, new_b, new_c])
new_faces.append([new_b, c, new_c])
# new vertex
indices = [new_a, new_b, new_c]
poses = [new_a_pos, new_b_pos, new_c_pos]
for (idx, pos) in zip(indices, poses):
if idx not in new_unfold:
new_unfold[idx] = []
for _ in range(3):
new_unfold[idx].append(pos)
return new_unfold, new_faces
def distort_grid(unfold_v):
np_round = partial(np.round, decimals=9)
# calculate transform matrix
new_x = unfold_v[2][0] - unfold_v[0][0]
edge_len = np.linalg.norm(new_x)
new_x /= edge_len
new_y = np.cross([0, 0, 1], np.append(new_x, 0))[:2]
R = np.stack([new_x, new_y])
a = unfold_v[2][0] - unfold_v[0][0]
b = unfold_v[1][0] - unfold_v[0][0]
skew = np.eye(2)
skew[0, 1] = -1 / np.tan(np.arccos(a.dot(b) / norm(a) / norm(b)))
skew[0] /= norm(skew[0])
T = skew.dot(R)
# scale adjust
scale = np.linalg.det(skew) * edge_len
T /= scale
# to numpy array for efficient computation
# np_round to alleviate numerical error when sorting
distort_unfold = copy(unfold_v)
five_neighbor = [distort_unfold[i] for i in range(12)]
five_neighbor = np.array(five_neighbor)
# Transform
five_neighbor = np_round(five_neighbor.dot(T.T))
# the same procedure for six_neighbor if len(unfold_v) > 12
if len(unfold_v) > 12:
six_neighbor = [distort_unfold[i] for i in range(12, len(unfold_v))]
six_neighbor = np.array(six_neighbor)
six_neighbor = np_round(six_neighbor.dot(T.T))
# to original shape
distort_unfold = {}
cnt = 0
for it in five_neighbor:
distort_unfold[cnt] = it
cnt += 1
if len(unfold_v) > 12:
for it in six_neighbor:
distort_unfold[cnt] = it
cnt += 1
return distort_unfold
def get_rect_idxs(x, y):
rect_idxs = []
for i in range(5):
x_min = i
x_max = x_min + 1
y_min = -i
y_max = y_min + 2
if x_min <= x <= x_max and y_min <= y <= y_max:
rect_idxs.append(i)
return rect_idxs
def distort_unfold_to_imgcoord(distort_unfold, drop_NE=True):
"""
Parameters
----------
distort_unfold :
distorted unfold
drop_NE : bool
drop north and east as in [1]
References
----------
[1] orientation-aware semantic segmentation on icosahedron spheres, ICCV2019
"""
vertex_num = len(distort_unfold)
level = round(math.log((vertex_num - 2) // 10, 4))
width = 2 ** level + 1
height = 2 * width - 1
unfold_pts_set = set() # (vertex_id, x, y)
# remove duplicate
for key, arr in distort_unfold.items():
for val in arr:
unfold_pts_set.add((key, val[0], val[1]))
# sort
unfold_pts_set = sorted(unfold_pts_set, key=lambda x: (x[1], x[2]))
# to image coorinate
img_coord = {}
for (vertex_id, x, y) in unfold_pts_set:
rect_idxs = get_rect_idxs(x, y)
for key in rect_idxs:
if key not in img_coord:
img_coord[key] = []
img_coord[key].append(vertex_id)
# to numpy
for key in img_coord:
img_coord[key] = np.array(img_coord[key]).reshape(width, height).T
if drop_NE:
# orientation-aware semantic segmentation on icosahedron spheres form
for key in img_coord:
img_coord[key] = img_coord[key][1:, :-1]
return img_coord
@lru_cache()
def get_unfold_imgcoord_row(level=0, drop_NE=False):
if drop_NE is True:
raise ValueError("Not implemented for drop_NE = True")
imgcoord = get_unfold_imgcoord(level=level, drop_NE=False)
h, w = imgcoord[0].shape[-2:]
imgcoord_row = [np.zeros((w, h), dtype=np.int) for _ in range(5)]
for key in range(5):
next_key = (key + 1) % 5
imgcoord_row[key][:, :w] = imgcoord[key][:w, :]
imgcoord_row[key][:, w:] = imgcoord[next_key][-w:, 1:]
return imgcoord_row
def weight_for_triangle_interpolation(v, indices, pts_c):
# ------------------------------------
# calculate weight
v0_idx, v1_idx, v2_idx = indices[:, 0], indices[:, 1], indices[:, 2]
weight_0, weight_1, weight_2 = _weight_from_three_nearest(v[v0_idx], v[v1_idx], v[v2_idx], pts_c)
outside_flag = (weight_0 <= 0)
if outside_flag.sum() == 0:
return (v0_idx, v1_idx, v2_idx), (weight_0, weight_1, weight_2)
else:
# sometimes wrong index is included in three neighbor
# v1 -- v0 -- v2
# vproj|
# |
# v3
v3_idx = indices[:, 3]
tmpv0 = v[v0_idx[outside_flag]]
tmpv1 = v[v1_idx[outside_flag]]
tmpv3 = v[v3_idx[outside_flag]]
tmpvproj = pts_c[outside_flag]
tmp_w0, tmp_w1, tmp_w2 = _weight_from_three_nearest(tmpv0, tmpv1, tmpv3, tmpvproj)
# update weight and index
weight_0[outside_flag] = tmp_w0
weight_1[outside_flag] = tmp_w1
weight_2[outside_flag] = tmp_w2
v2_idx[outside_flag] = v3_idx[outside_flag]
return (v0_idx, v1_idx, v2_idx), (weight_0, weight_1, weight_2)
def _weight_from_three_nearest(v0, v1, v2, vproj):
v01 = v1 - v0 # v0->v1 vector
v02 = v2 - v0 # v0->v2 vector
v0proj = vproj - v0 # v0->v_proj vector
# total area
total_area = norm(np.cross(v01, v02), axis=1) / 2
# area v0-v2-v_proj
v02proj_area = norm(np.cross(v0proj, v02), axis=1) / 2
# area v0-v1-v_proj
v01proj_area = norm(np.cross(v0proj, v01), axis=1) / 2
# calculate weight
weight_0 = (total_area - v02proj_area - v01proj_area) / total_area
weight_1 = v02proj_area / total_area
weight_2 = v01proj_area / total_area
return weight_0, weight_1, weight_2
| 9,189 | 16 | 473 |
cd3b9182d0f7f375965ecf2411fe35384ab62ab0 | 553 | py | Python | banix_server/src/initialize_database.py | 4rund3v/banix | d67a4b6883a86e6f36476c103fba821eeb03b87a | [
"CC0-1.0"
] | null | null | null | banix_server/src/initialize_database.py | 4rund3v/banix | d67a4b6883a86e6f36476c103fba821eeb03b87a | [
"CC0-1.0"
] | null | null | null | banix_server/src/initialize_database.py | 4rund3v/banix | d67a4b6883a86e6f36476c103fba821eeb03b87a | [
"CC0-1.0"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from configuration import DATABASE_FILE
import os
import sys
# custom module imports
from src.models import Base
if not os.path.exists(os.path.dirname(DATABASE_FILE)):
print(f"[initialize_database] Creating the database dir : {os.path.dirname(DATABASE_FILE)}")
os.makedirs(os.path.dirname(DATABASE_FILE))
engine = create_engine('sqlite:///{}'.format(DATABASE_FILE), echo=True)
Session = sessionmaker(bind=engine)
session = Session()
Base.metadata.create_all(engine)
| 32.529412 | 96 | 0.792043 | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from configuration import DATABASE_FILE
import os
import sys
# custom module imports
from src.models import Base
if not os.path.exists(os.path.dirname(DATABASE_FILE)):
print(f"[initialize_database] Creating the database dir : {os.path.dirname(DATABASE_FILE)}")
os.makedirs(os.path.dirname(DATABASE_FILE))
engine = create_engine('sqlite:///{}'.format(DATABASE_FILE), echo=True)
Session = sessionmaker(bind=engine)
session = Session()
Base.metadata.create_all(engine)
| 0 | 0 | 0 |
d6983dcba30f5c74adcaa1c06debc7b1e1a4d1d6 | 428 | py | Python | myalgorithms/VertexForAlgorithm.py | andriidem308/python_practice | 85a0ebd6ecbecf63eaba170c8279f0a88600237a | [
"MIT"
] | 2 | 2020-01-27T11:58:54.000Z | 2020-03-30T10:54:08.000Z | myalgorithms/VertexForAlgorithm.py | andriidem308/python_practice | 85a0ebd6ecbecf63eaba170c8279f0a88600237a | [
"MIT"
] | null | null | null | myalgorithms/VertexForAlgorithm.py | andriidem308/python_practice | 85a0ebd6ecbecf63eaba170c8279f0a88600237a | [
"MIT"
] | null | null | null | from Vertex import Vertex
INF = 10**9
| 20.380952 | 37 | 0.628505 | from Vertex import Vertex
INF = 10**9
class VertexForAlgorithms(Vertex):
def __init__(self, key):
super().__init__(key)
self.distance = INF
self.source = None
def set_distance(self, distance):
self.distance = distance
def distance(self):
return self.distance
def set_source(self, source):
self.source = source
def source(self):
return self.source | 220 | 13 | 157 |
e3acee8508f7303664692ddbee629864707b816f | 703 | py | Python | pig-latin/pig_latin.py | j-mak/exercism.io | 2092ee17c899abbcfb8e98521ac2fa21368f504f | [
"MIT"
] | null | null | null | pig-latin/pig_latin.py | j-mak/exercism.io | 2092ee17c899abbcfb8e98521ac2fa21368f504f | [
"MIT"
] | null | null | null | pig-latin/pig_latin.py | j-mak/exercism.io | 2092ee17c899abbcfb8e98521ac2fa21368f504f | [
"MIT"
] | null | null | null | VOWELS = ('a', 'e', 'i', 'o', 'u', 'yt', 'xr')
CONSONANTS = ('b', 'c', 'd', 'f', 'g',
'h', 'j', 'k', 'l', 'm',
'n', 'p', 'q', 'r', 's',
't', 'v', 'w', 'x', 'y',
'z', 'sh', 'sch', 'zz', 'gh',
'ch', 'th', 'qu', 'thr',
'squ')
| 29.291667 | 62 | 0.385491 | VOWELS = ('a', 'e', 'i', 'o', 'u', 'yt', 'xr')
CONSONANTS = ('b', 'c', 'd', 'f', 'g',
'h', 'j', 'k', 'l', 'm',
'n', 'p', 'q', 'r', 's',
't', 'v', 'w', 'x', 'y',
'z', 'sh', 'sch', 'zz', 'gh',
'ch', 'th', 'qu', 'thr',
'squ')
def translate(sentence):
result = []
for word in sentence.split(' '):
if word[:3] in CONSONANTS:
word = word[3:] + word[:3]
elif word[:2] in CONSONANTS:
word = word[2:] + word[:2]
elif word[0] in CONSONANTS and word[:2] not in VOWELS:
word = word[1:] + word[0]
result.append(word + 'ay')
return " ".join(result)
| 371 | 0 | 23 |
f999aabed81f7dc07d7b32f0db118169cb07d83b | 555 | py | Python | khan_mini/settings.py | tobykurien/ase | 42520a60aa373293527a15097b2ffd9e3816b415 | [
"MIT"
] | null | null | null | khan_mini/settings.py | tobykurien/ase | 42520a60aa373293527a15097b2ffd9e3816b415 | [
"MIT"
] | null | null | null | khan_mini/settings.py | tobykurien/ase | 42520a60aa373293527a15097b2ffd9e3816b415 | [
"MIT"
] | null | null | null | import os
KHAN_BASE_URL = "http://192.168.1.2:8080" # instance used by khan mini to access API
KHAN_MINI_BASE_URL = "http://192.168.1.2:8081"
KHAN_BASE_URL2 = "http://192.168.1.2" # excluding port, which will be randomly assigned from below
KHAN_INSTANCES = [8082,8083,8084,8085,8086]
khanconf = os.path.join(os.path.dirname(__file__), 'khanmini.conf')
enlishessayconf = os.path.join(os.path.dirname(__file__), 'englishessay.conf')
current_dir = os.path.dirname(os.path.abspath(__file__))
ESSAY_DB = "mysql://ase:asep4s5@localhost/ase"
| 42.692308 | 108 | 0.731532 | import os
KHAN_BASE_URL = "http://192.168.1.2:8080" # instance used by khan mini to access API
KHAN_MINI_BASE_URL = "http://192.168.1.2:8081"
KHAN_BASE_URL2 = "http://192.168.1.2" # excluding port, which will be randomly assigned from below
KHAN_INSTANCES = [8082,8083,8084,8085,8086]
khanconf = os.path.join(os.path.dirname(__file__), 'khanmini.conf')
enlishessayconf = os.path.join(os.path.dirname(__file__), 'englishessay.conf')
current_dir = os.path.dirname(os.path.abspath(__file__))
ESSAY_DB = "mysql://ase:asep4s5@localhost/ase"
| 0 | 0 | 0 |
7417cf5e6ce0b9948c0a5bbc3a8d6bdfbf2115a4 | 2,456 | py | Python | BackendAPI/customuser/admin.py | silvioramalho/django-startup-rest-api | b984ee6be27990d29ab3df7cdd446bb63ee3ee34 | [
"MIT"
] | 9 | 2020-05-23T14:42:00.000Z | 2022-03-04T12:21:00.000Z | BackendAPI/customuser/admin.py | silvioramalho/django-startup-rest-api | b984ee6be27990d29ab3df7cdd446bb63ee3ee34 | [
"MIT"
] | 6 | 2020-05-14T21:34:09.000Z | 2021-09-22T19:01:15.000Z | BackendAPI/customuser/admin.py | silvioramalho/django-startup-rest-api | b984ee6be27990d29ab3df7cdd446bb63ee3ee34 | [
"MIT"
] | 1 | 2022-03-04T12:20:52.000Z | 2022-03-04T12:20:52.000Z | from django.contrib import admin
from django.contrib.admin.actions import delete_selected
from .models import CustomUser
from djoser import signals, utils
from djoser.compat import get_user_email
from djoser.conf import settings
make_activate.short_description = "Ativar Usuarios"
send_confirmation_email.short_description = "Enviar Email de Confirmacao"
send_resend_activation_email.short_description = "Reenviar Email de Ativação"
send_reset_password_email.short_description = "Enviar link para Troca de Senha"
block_user.short_description = "Bloquear Usuário"
# Exclui globalmente a ação de apagar selecionados
# É necessário adicionar apenas nos que quiser conforme abaixo
admin.site.disable_action('delete_selected')
delete_selected.short_description = "Apagar Selecionados"
admin.site.site_header = 'Titulo do Topo'
admin.site.index_title = 'Titulo do Index'
admin.site.site_title = 'Titulo do Site(HTML)'
admin.site.register(CustomUser, CustomUserAdmin)
| 36.656716 | 79 | 0.736156 | from django.contrib import admin
from django.contrib.admin.actions import delete_selected
from .models import CustomUser
from djoser import signals, utils
from djoser.compat import get_user_email
from djoser.conf import settings
def make_activate(modeladmin, request, queryset):
queryset.update(is_active=True)
def block_user(modeladmin, request, queryset):
queryset.update(is_blocked=True)
def send_confirmation_email(modeladmin, request, queryset):
for user in queryset:
if settings.SEND_CONFIRMATION_EMAIL:
context = {"user": user}
to = [get_user_email(user)]
settings.EMAIL.confirmation(request, context).send(to)
def send_resend_activation_email(modeladmin, request, queryset):
for user in queryset:
if settings.SEND_CONFIRMATION_EMAIL:
context = {"user": user}
to = [get_user_email(user)]
settings.EMAIL.activation(request, context).send(to)
def send_reset_password_email(modeladmin, request, queryset):
for user in queryset:
if settings.SEND_CONFIRMATION_EMAIL:
context = {"user": user}
to = [get_user_email(user)]
settings.EMAIL.password_reset(request, context).send(to)
make_activate.short_description = "Ativar Usuarios"
send_confirmation_email.short_description = "Enviar Email de Confirmacao"
send_resend_activation_email.short_description = "Reenviar Email de Ativação"
send_reset_password_email.short_description = "Enviar link para Troca de Senha"
block_user.short_description = "Bloquear Usuário"
# Exclui globalmente a ação de apagar selecionados
# É necessário adicionar apenas nos que quiser conforme abaixo
admin.site.disable_action('delete_selected')
delete_selected.short_description = "Apagar Selecionados"
class CustomUserAdmin(admin.ModelAdmin):
list_display = ('email', 'is_active', 'is_blocked', 'is_admin', 'is_staff')
fields = [('email', 'is_active', 'is_blocked', 'is_admin', 'is_staff')]
list_filter = ['is_active', 'is_blocked', 'is_admin', 'is_staff']
search_fields = ['email']
actions = [
make_activate, send_confirmation_email, block_user,
send_resend_activation_email, send_reset_password_email,
'delete_selected']
admin.site.site_header = 'Titulo do Topo'
admin.site.index_title = 'Titulo do Index'
admin.site.site_title = 'Titulo do Site(HTML)'
admin.site.register(CustomUser, CustomUserAdmin)
| 907 | 443 | 138 |
f2b13eab6e221386027678d317dbab5f8bb2dc64 | 467 | py | Python | applicationServer/main.py | YkBastidas/REST-distribuidos | f43fbb13f081f84b91a2aabe6eedb6ca695acda0 | [
"MIT"
] | 1 | 2022-02-12T16:00:43.000Z | 2022-02-12T16:00:43.000Z | applicationServer/main.py | YkBastidas/REST-distribuidos | f43fbb13f081f84b91a2aabe6eedb6ca695acda0 | [
"MIT"
] | null | null | null | applicationServer/main.py | YkBastidas/REST-distribuidos | f43fbb13f081f84b91a2aabe6eedb6ca695acda0 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_restful import Api
from objects import Objects
from replicate import Replicate
from restore import Restore
from object import Object
app = Flask(__name__)
api = Api(app)
api.add_resource(Objects, "/api/objects/")
api.add_resource(Replicate, "/api/replicate/<action>")
api.add_resource(Restore, "/api/restore/<server>")
api.add_resource(Object, "/api/objects/<name>")
if __name__ == "__main__":
app.run("172.26.208.232", 5000)
| 27.470588 | 54 | 0.760171 | from flask import Flask
from flask_restful import Api
from objects import Objects
from replicate import Replicate
from restore import Restore
from object import Object
app = Flask(__name__)
api = Api(app)
api.add_resource(Objects, "/api/objects/")
api.add_resource(Replicate, "/api/replicate/<action>")
api.add_resource(Restore, "/api/restore/<server>")
api.add_resource(Object, "/api/objects/<name>")
if __name__ == "__main__":
app.run("172.26.208.232", 5000)
| 0 | 0 | 0 |
1e4ad983cebf259060996e755982865fa7dd25b3 | 433 | py | Python | CursoIntensivoPython/curso-intensivo-python-master/capitulo_04/exercicios/buffet.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | CursoIntensivoPython/curso-intensivo-python-master/capitulo_04/exercicios/buffet.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | CursoIntensivoPython/curso-intensivo-python-master/capitulo_04/exercicios/buffet.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | pratos = ('bolinho de bacalhau',
'francesinha',
'caldo verde',
'pastel de belém',
'feijoada')
print('Cardápio principal:')
for prato in pratos:
print(f'\t{prato.title()}')
pratos = ('arroz de forno',
'farofa crocante',
'caldo verde',
'pastel de belém',
'feijoada')
print('\nCardápio revisado:')
for prato in pratos:
print(f'\t{prato.title()}')
| 24.055556 | 32 | 0.549654 | pratos = ('bolinho de bacalhau',
'francesinha',
'caldo verde',
'pastel de belém',
'feijoada')
print('Cardápio principal:')
for prato in pratos:
print(f'\t{prato.title()}')
pratos = ('arroz de forno',
'farofa crocante',
'caldo verde',
'pastel de belém',
'feijoada')
print('\nCardápio revisado:')
for prato in pratos:
print(f'\t{prato.title()}')
| 0 | 0 | 0 |
248ce9e2a1ed33df23443a2674dd4d800fcb7be1 | 4,176 | py | Python | blockchain_connector/ethereum/ethereum_connector/eth_connector_service.py | san-lab/avalon_dalion | 7dcd8ce46366caa51b658e33eac88d2853a5b595 | [
"Apache-2.0"
] | null | null | null | blockchain_connector/ethereum/ethereum_connector/eth_connector_service.py | san-lab/avalon_dalion | 7dcd8ce46366caa51b658e33eac88d2853a5b595 | [
"Apache-2.0"
] | 1 | 2021-02-03T07:57:06.000Z | 2021-02-13T13:53:49.000Z | blockchain_connector/ethereum/ethereum_connector/eth_connector_service.py | pankajgoyal2/trusted-compute-framework | c060755995864f05516206e98c46e00e3826e425 | [
"Apache-2.0"
] | 4 | 2021-06-09T08:55:26.000Z | 2021-11-26T16:25:48.000Z | # Copyright 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import argparse
import config.config as pconfig
from avalon_sdk.connector.blockchains.ethereum.ethereum_work_order \
import EthereumWorkOrderProxyImpl
from avalon_sdk.connector.blockchains.ethereum.ethereum_worker_registry \
import EthereumWorkerRegistryImpl
from avalon_sdk.connector.blockchains.ethereum.ethereum_wrapper \
import EthereumWrapper
from ethereum_connector.ethereum_connector \
import EthereumConnector
import logging
# -----------------------------------------------------------------
# -----------------------------------------------------------------
TCF_HOME = os.environ.get("TCF_HOME", "../../../")
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(message)s", level=logging.INFO)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# -----------------------------------------------------------------
main()
| 32.625 | 76 | 0.613506 | # Copyright 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import argparse
import config.config as pconfig
from avalon_sdk.connector.blockchains.ethereum.ethereum_work_order \
import EthereumWorkOrderProxyImpl
from avalon_sdk.connector.blockchains.ethereum.ethereum_worker_registry \
import EthereumWorkerRegistryImpl
from avalon_sdk.connector.blockchains.ethereum.ethereum_wrapper \
import EthereumWrapper
from ethereum_connector.ethereum_connector \
import EthereumConnector
import logging
# -----------------------------------------------------------------
# -----------------------------------------------------------------
TCF_HOME = os.environ.get("TCF_HOME", "../../../")
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(message)s", level=logging.INFO)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
def _parse_config_file(config_file):
# Parse config file and return a config dictionary.
if config_file:
conf_files = [config_file]
else:
conf_files = [TCF_HOME +
"/sdk/avalon_sdk/tcf_connector.toml"]
conf_paths = ["."]
try:
config = pconfig.parse_configuration_files(conf_files, conf_paths)
json.dumps(config)
except pconfig.ConfigurationException as e:
logger.error(str(e))
config = None
return config
# -----------------------------------------------------------------
# -----------------------------------------------------------------
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config",
help="The config file containing the Ethereum contract information",
type=str)
parser.add_argument(
"-u", "--uri",
help="Direct API listener endpoint",
default="http://avalon-listener:1947",
type=str)
options = parser.parse_args(args)
config = _parse_config_file(options.config)
if config is None:
logger.error("\n Error in parsing config file: {}\n".format(
options.config
))
sys.exit(-1)
# Http JSON RPC listener uri
uri = options.uri
if uri:
config["tcf"]["json_rpc_uri"] = uri
logging.info("About to start Ethereum connector service")
eth_client = EthereumWrapper(config)
worker_reg_contract_file = TCF_HOME + "/" + \
config["ethereum"]["worker_registry_contract_file"]
worker_reg_contract_address = \
config["ethereum"]["worker_registry_contract_address"]
worker_reg_contract_instance,\
worker_reg_contract_instance_evt = eth_client\
.get_contract_instance(
worker_reg_contract_file, worker_reg_contract_address)
worker_registry = EthereumWorkerRegistryImpl(config)
work_order_contract_file = TCF_HOME + "/" + \
config["ethereum"]["work_order_contract_file"]
work_order_contract_address = \
config["ethereum"]["work_order_contract_address"]
work_order_contract_instance,\
wo_contract_instance_evt = eth_client\
.get_contract_instance(
work_order_contract_file, work_order_contract_address)
work_order_proxy = EthereumWorkOrderProxyImpl(config)
eth_connector_svc = EthereumConnector(
config, None,
worker_registry, work_order_proxy, None,
wo_contract_instance_evt)
eth_connector_svc.start()
# -----------------------------------------------------------------
# -----------------------------------------------------------------
main()
| 2,354 | 0 | 46 |
59161acdbbfe0086ad74e6b4f04b3ab3f51c44b3 | 2,366 | py | Python | test/test_membalancer.py | dcsparkes/adventofcode | e8bf8cef1d8757ad8981dde8dc76f8f7ec396be5 | [
"Unlicense"
] | null | null | null | test/test_membalancer.py | dcsparkes/adventofcode | e8bf8cef1d8757ad8981dde8dc76f8f7ec396be5 | [
"Unlicense"
] | null | null | null | test/test_membalancer.py | dcsparkes/adventofcode | e8bf8cef1d8757ad8981dde8dc76f8f7ec396be5 | [
"Unlicense"
] | null | null | null | """
https://adventofcode.com/2017/day/6
"""
import unittest
from _2017 import d06_memreallocation
if __name__ == '__main__':
unittest.main()
| 30.333333 | 57 | 0.62891 | """
https://adventofcode.com/2017/day/6
"""
import unittest
from _2017 import d06_memreallocation
class TestMemBalancer(unittest.TestCase):
fInput = "input2017_06a.txt"
fTest1 = "test2017_06a.txt"
def test_readFile_fTest1_len(self):
mb = d06_memreallocation.MemBalancer(self.fTest1)
self.assertEqual(4, len(mb.memState))
def test_readFile_fTest1_state(self):
mb = d06_memreallocation.MemBalancer(self.fTest1)
self.assertEqual([0, 2, 7, 0], mb.memState)
def test_rebalance_0270(self):
mb = d06_memreallocation.MemBalancer()
mb.memState = [0, 2, 7, 0]
mb._rebalance()
expected = [2, 4, 1, 2]
self.assertEqual(expected, mb.memState)
def test_rebalance_2412(self):
mb = d06_memreallocation.MemBalancer()
mb.memState = [2, 4, 1, 2]
mb._rebalance()
expected = [3, 1, 2, 3]
self.assertEqual(expected, mb.memState)
def test_rebalance_3123(self):
mb = d06_memreallocation.MemBalancer()
mb.memState = [3, 1, 2, 3]
mb._rebalance()
expected = [0, 2, 3, 4]
self.assertEqual(expected, mb.memState)
def test_rebalance_0234(self):
mb = d06_memreallocation.MemBalancer()
mb.memState = [0, 2, 3, 4]
mb._rebalance()
expected = [1, 3, 4, 1]
self.assertEqual(expected, mb.memState)
def test_rebalance_1341(self):
mb = d06_memreallocation.MemBalancer()
mb.memState = [1, 3, 4, 1]
mb._rebalance()
expected = [2, 4, 1, 2]
self.assertEqual(expected, mb.memState)
def test_balance_fInput_count(self):
mb = d06_memreallocation.MemBalancer(self.fInput)
self.assertEqual(7864, mb.balance()[0])
def test_balance_fInput_interim(self):
mb = d06_memreallocation.MemBalancer(self.fInput)
count, last = mb.balance()
self.assertEqual(1695, count - last)
def test_balance_fTest1_count(self):
mb = d06_memreallocation.MemBalancer()
mb.memState = [0, 2, 7, 0]
self.assertEqual(5, mb.balance()[0])
def test_balance_fTest1_interim(self):
mb = d06_memreallocation.MemBalancer()
mb.memState = [0, 2, 7, 0]
count, last = mb.balance()
self.assertEqual(4, count - last)
if __name__ == '__main__':
unittest.main()
| 1,813 | 382 | 23 |
c34ae911dc7634a0bfbf6a8c8b14453dc7a1f47b | 10,411 | py | Python | harvester/agent_manager.py | DrInfy/TheHarvester | dd21194ab2220c8edb73352c299d2bfb0f11d7d6 | [
"MIT"
] | 6 | 2020-03-08T21:04:47.000Z | 2021-05-29T07:14:25.000Z | harvester/agent_manager.py | DrInfy/TheHarvester | dd21194ab2220c8edb73352c299d2bfb0f11d7d6 | [
"MIT"
] | 5 | 2020-04-20T08:41:48.000Z | 2021-01-04T18:15:39.000Z | harvester/agent_manager.py | DrInfy/TheHarvester | dd21194ab2220c8edb73352c299d2bfb0f11d7d6 | [
"MIT"
] | 2 | 2021-01-18T21:07:56.000Z | 2021-11-22T15:24:21.000Z | from random import randint
from typing import Optional
from harvester.zerg_action import ZergAction
from sc2 import UnitTypeId
from sc2.ids.upgrade_id import UpgradeId
from sharpy.interfaces import IGameAnalyzer, IEnemyUnitsManager
from sharpy.managers.extensions.game_states import AirArmy
from sharpy.managers.extensions.game_states.advantage import at_least_disadvantage, at_least_advantage
from tactics.ml.base_agent_manager import BaseAgentManager
from tactics.ml.ml_build import MlBuild
| 41.478088 | 114 | 0.621074 | from random import randint
from typing import Optional
from harvester.zerg_action import ZergAction
from sc2 import UnitTypeId
from sc2.ids.upgrade_id import UpgradeId
from sharpy.interfaces import IGameAnalyzer, IEnemyUnitsManager
from sharpy.managers.extensions.game_states import AirArmy
from sharpy.managers.extensions.game_states.advantage import at_least_disadvantage, at_least_advantage
from tactics.ml.base_agent_manager import BaseAgentManager
from tactics.ml.ml_build import MlBuild
class ZergAgentManager(BaseAgentManager):
game_analyzer: IGameAnalyzer
enemy_units_manager: IEnemyUnitsManager
def __init__(self, agent: str, build_str: str, build: MlBuild) -> None:
super().__init__(agent, build_str, build)
self.hatcheryworkers1 = randint(13, 22)
self.poolworkers = randint(16, 22)
self.hatcheryworkers2 = randint(30, 50)
self.cap_workers = randint(45, 90)
self.mid_game_army = randint(25, 90)
self.mid_game_hydras = randint(0, 2) * 8
self.hydra_time = (randint(0, 4) + 5) * 60
self.max_queens = randint(1, 3) * 3
self.corruptors = randint(0, 5) > 3
self.upgrades_start = randint(5, 12) * 60
self.banelings = randint(0, 5) > 3
async def start(self, knowledge: "Knowledge"):
await super().start(knowledge)
self.game_analyzer = knowledge.get_required_manager(IGameAnalyzer)
self.enemy_units_manager = knowledge.get_required_manager(IEnemyUnitsManager)
def scripted_action(self) -> int:
if self.ai.minerals > 1000 and self.ai.time % 5 > 3 and self.ai.supply_used < 199:
if self.ai.supply_workers < 50 and self.ai.vespene < 50:
return ZergAction.Drones
return self.build_army()
if self.ai.supply_workers < self.hatcheryworkers1 and self.ai.supply_workers < self.poolworkers:
return ZergAction.Drones
if self.ai.supply_workers >= self.hatcheryworkers1 and self.cache.own_townhalls().amount == 1:
return ZergAction.Bases
if (
self.ai.already_pending(UnitTypeId.QUEEN) + self.cache.own(UnitTypeId.QUEEN).amount
< min(self.max_queens, self.cache.own_townhalls().amount)
and self.cache.own_townhalls.idle
):
return ZergAction.Queens
if (
self.ai.townhalls.amount == 2
and self.ai.time < 5 * 60
and self.game_analyzer.army_at_least_small_disadvantage
):
# Defense vs rush
if self.cache.own(UnitTypeId.SPINECRAWLER).amount == 0:
return ZergAction.Spines
return ZergAction.Lings
upgrade = self.upgrades()
if upgrade:
return upgrade
if (
self.game_analyzer.army_at_least_small_disadvantage
and self.game_analyzer.our_income_advantage in at_least_advantage
):
return self.build_army()
if self.ai.supply_workers >= self.hatcheryworkers2 and self.cache.own_townhalls().amount == 2:
return ZergAction.Bases
if self.game_analyzer.our_income_advantage in at_least_disadvantage:
return self.build_economy()
if self.game_analyzer.our_army_predict in at_least_disadvantage:
return self.build_army()
if self.ai.time < 8 * 60 and self.ai.supply_army < self.mid_game_army:
return self.build_army()
if self.ai.supply_workers < self.cap_workers:
return self.build_economy()
return self.build_army()
def build_economy(self) -> ZergAction:
idle_space = 0
for townhall in self.ai.townhalls:
idle_space += townhall.ideal_harvesters - townhall.assigned_harvesters
if idle_space > 10:
if self.ai.supply_used >= 200:
action = self.upgrades()
if action:
return action
return ZergAction.Drones
return ZergAction.Bases
def upgrades(self) -> Optional[ZergAction]:
if self.ai.time < self.upgrades_start or self.ai.time % 10 > 4:
return None
def allow_melee_upgrade():
return (
self.ai.already_pending_upgrade(UpgradeId.ZERGMELEEWEAPONSLEVEL1) % 1 == 0
and self.ai.already_pending_upgrade(UpgradeId.ZERGMELEEWEAPONSLEVEL2) % 1 == 0
and self.ai.already_pending_upgrade(UpgradeId.ZERGMELEEWEAPONSLEVEL3) == 0
)
def allow_ranged_upgrade():
return (
self.ai.already_pending_upgrade(UpgradeId.ZERGMISSILEWEAPONSLEVEL1) % 1 == 0
and self.ai.already_pending_upgrade(UpgradeId.ZERGMISSILEWEAPONSLEVEL2) % 1 == 0
and self.ai.already_pending_upgrade(UpgradeId.ZERGMISSILEWEAPONSLEVEL3) == 0
)
def allow_armor_upgrade():
return (
self.ai.already_pending_upgrade(UpgradeId.ZERGGROUNDARMORSLEVEL1) % 1 == 0
and self.ai.already_pending_upgrade(UpgradeId.ZERGGROUNDARMORSLEVEL2) % 1 == 0
and self.ai.already_pending_upgrade(UpgradeId.ZERGGROUNDARMORSLEVEL3) == 0
)
if self.ai.time > self.upgrades_start:
melee_count = self.cache.own(UnitTypeId.ZERGLING).amount + self.cache.own(UnitTypeId.BANELING).amount
ranged_count = (
self.cache.own(UnitTypeId.ROACH).amount
+ self.cache.own(UnitTypeId.HYDRALISK).amount
+ self.cache.own(UnitTypeId.RAVAGER).amount
)
if (
self.cache.own(UnitTypeId.SPIRE).idle
and self.ai.already_pending_upgrade(UpgradeId.ZERGFLYERARMORSLEVEL3) == 0
and self.game_analyzer.our_power.air_presence > 5
):
return ZergAction.AirUpgrades
evos = self.cache.own(UnitTypeId.EVOLUTIONCHAMBER)
if not evos:
if melee_count * 2 > ranged_count:
if allow_melee_upgrade():
return ZergAction.MeleeUpgrades
else:
if allow_ranged_upgrade():
return ZergAction.RangedUpgrades
if evos.idle:
if melee_count * 2 > ranged_count:
if allow_melee_upgrade():
return ZergAction.MeleeUpgrades
else:
if allow_ranged_upgrade():
return ZergAction.RangedUpgrades
if (evos.amount < 2 or evos.idle) and allow_armor_upgrade():
return ZergAction.ArmorUpgrades
return None
def build_army(self) -> ZergAction:
larva = self.cache.own(UnitTypeId.LARVA).amount
if self.enemy_units_manager.unit_count(UnitTypeId.VOIDRAY) > 2:
if (
self.cache.own(UnitTypeId.HYDRALISK).amount
< self.enemy_units_manager.unit_count(UnitTypeId.VOIDRAY) * 2
):
return ZergAction.Hydras
if self.enemy_units_manager.unit_count(UnitTypeId.BANSHEE) > 1:
if (
self.corruptors
and self.cache.own(UnitTypeId.MUTALISK).amount
< self.enemy_units_manager.unit_count(UnitTypeId.BANSHEE) * 2
):
return ZergAction.Mutalisks
if (
not self.corruptors
and self.cache.own(UnitTypeId.HYDRALISK).amount
< self.enemy_units_manager.unit_count(UnitTypeId.BANSHEE) * 2
):
return ZergAction.Hydras
if self.game_analyzer.enemy_air == AirArmy.AllAir or self.game_analyzer.enemy_air == AirArmy.AlmostAllAir:
if not self.corruptors:
if len(self.cache.own(UnitTypeId.HYDRALISK)) * 0.1 > len(self.cache.own(UnitTypeId.INFESTOR)) + 2:
return ZergAction.Infestors
return ZergAction.Hydras
return ZergAction.Corruptors
if self.game_analyzer.enemy_air == AirArmy.Mixed:
if not self.corruptors:
if len(self.cache.own(UnitTypeId.HYDRALISK)) > len(self.cache.own(UnitTypeId.ROACH)):
return ZergAction.Roaches
return ZergAction.Hydras
if len(self.cache.own(UnitTypeId.CORRUPTOR)) > len(self.cache.own(UnitTypeId.ROACH)):
return ZergAction.Roaches
return ZergAction.Corruptors
if self.ai.time < 240 or (self.cache.own(UnitTypeId.ZERGLING).amount < 12 and larva < 2):
return ZergAction.Lings
if self.banelings and (
self.enemy_units_manager.unit_count(UnitTypeId.ZERGLING) > 25
or self.enemy_units_manager.unit_count(UnitTypeId.MARINE) > 15
or self.enemy_units_manager.unit_count(UnitTypeId.ZEALOT) > 10
):
if self.cache.own(UnitTypeId.BANELING).amount < 10:
return ZergAction.Banelings
if (
self.enemy_units_manager.unit_count(UnitTypeId.MARAUDER) > 6
or self.enemy_units_manager.unit_count(UnitTypeId.STALKER) > 5
):
if self.cache.own(UnitTypeId.ZERGLING).amount < 30:
return ZergAction.Lings
if (
self.enemy_units_manager.unit_count(UnitTypeId.BATTLECRUISER) > 1
or self.enemy_units_manager.unit_count(UnitTypeId.TEMPEST) > 1
or self.enemy_units_manager.unit_count(UnitTypeId.CARRIER) > 1
or self.enemy_units_manager.unit_count(UnitTypeId.BROODLORD) > 1
):
if self.cache.own(UnitTypeId.CORRUPTOR).amount < 5:
return ZergAction.Corruptors
if self.ai.time > self.hydra_time and self.mid_game_hydras < len(self.cache.own(UnitTypeId.HYDRALISK)):
return ZergAction.Hydras
if (
self.ai.vespene > 100
and self.game_analyzer.enemy_air < AirArmy.SomeAir
and len(self.cache.own(UnitTypeId.HYDRALISK)) > 6
and len(self.cache.own(UnitTypeId.LURKER)) < 5
):
return ZergAction.Lurkers
if (
self.ai.vespene > 100
and len(self.cache.own(UnitTypeId.ROACH)) > len(self.cache.own(UnitTypeId.RAVAGER)) + 2
):
return ZergAction.Ravagers
return ZergAction.Roaches
| 9,634 | 259 | 23 |
09e2d8ae0fd803e5a993783454003f4eba5dc1d2 | 275 | py | Python | sunnysouth/marketplace/serializers/categories.py | EdwinBaeza05/django-genricsl-app | a8759d609957e80883cca79f0694d494364775a4 | [
"MIT"
] | null | null | null | sunnysouth/marketplace/serializers/categories.py | EdwinBaeza05/django-genricsl-app | a8759d609957e80883cca79f0694d494364775a4 | [
"MIT"
] | null | null | null | sunnysouth/marketplace/serializers/categories.py | EdwinBaeza05/django-genricsl-app | a8759d609957e80883cca79f0694d494364775a4 | [
"MIT"
] | null | null | null | # Django
from rest_framework import serializers
# Models
from sunnysouth.marketplace.models import Category
| 21.153846 | 59 | 0.730909 | # Django
from rest_framework import serializers
# Models
from sunnysouth.marketplace.models import Category
class CategoryModelSerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
read_only_fields = ['uuid']
| 0 | 142 | 23 |
9fc23fc2e4a05ea6f915783e7573f4e1dddd686a | 1,666 | py | Python | launch_xml/test/launch_xml/test_include.py | bedieber/launch | 4dfe69763379e405df7a21bde536aad7e39fdd93 | [
"Apache-2.0"
] | 67 | 2015-06-12T21:17:24.000Z | 2022-03-30T07:19:52.000Z | launch_xml/test/launch_xml/test_include.py | bedieber/launch | 4dfe69763379e405df7a21bde536aad7e39fdd93 | [
"Apache-2.0"
] | 516 | 2015-03-20T02:22:59.000Z | 2022-03-30T12:33:33.000Z | launch_xml/test/launch_xml/test_include.py | bedieber/launch | 4dfe69763379e405df7a21bde536aad7e39fdd93 | [
"Apache-2.0"
] | 101 | 2016-01-12T16:56:54.000Z | 2022-03-09T12:35:37.000Z | # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test parsing an include action."""
import io
from pathlib import Path
import textwrap
from launch import LaunchService
from launch.actions import IncludeLaunchDescription
from launch.frontend import Parser
from launch.launch_description_sources import AnyLaunchDescriptionSource
def test_include():
"""Parse node xml example."""
# Always use posix style paths in launch XML files.
path = (Path(__file__).parent / 'executable.xml').as_posix()
xml_file = \
"""\
<launch>
<include file="{}"/>
</launch>
""".format(path) # noqa: E501
xml_file = textwrap.dedent(xml_file)
root_entity, parser = Parser.load(io.StringIO(xml_file))
ld = parser.parse_description(root_entity)
include = ld.entities[0]
assert isinstance(include, IncludeLaunchDescription)
assert isinstance(include.launch_description_source, AnyLaunchDescriptionSource)
ls = LaunchService(debug=True)
ls.include_launch_description(ld)
assert 0 == ls.run()
if __name__ == '__main__':
test_include()
| 33.32 | 84 | 0.729292 | # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test parsing an include action."""
import io
from pathlib import Path
import textwrap
from launch import LaunchService
from launch.actions import IncludeLaunchDescription
from launch.frontend import Parser
from launch.launch_description_sources import AnyLaunchDescriptionSource
def test_include():
"""Parse node xml example."""
# Always use posix style paths in launch XML files.
path = (Path(__file__).parent / 'executable.xml').as_posix()
xml_file = \
"""\
<launch>
<include file="{}"/>
</launch>
""".format(path) # noqa: E501
xml_file = textwrap.dedent(xml_file)
root_entity, parser = Parser.load(io.StringIO(xml_file))
ld = parser.parse_description(root_entity)
include = ld.entities[0]
assert isinstance(include, IncludeLaunchDescription)
assert isinstance(include.launch_description_source, AnyLaunchDescriptionSource)
ls = LaunchService(debug=True)
ls.include_launch_description(ld)
assert 0 == ls.run()
if __name__ == '__main__':
test_include()
| 0 | 0 | 0 |
fdb67a622240f2063fb76151882d0b104415b1aa | 651 | py | Python | scrapple/utils/dynamicdispatch.py | scrappleapp/scrapple | 0c19c7a2606f6ef5a1225a337c6ab08cb8431906 | [
"MIT"
] | 331 | 2015-02-04T18:12:31.000Z | 2015-10-02T18:55:38.000Z | scrapple/utils/dynamicdispatch.py | harishb93/scrapple | 0c19c7a2606f6ef5a1225a337c6ab08cb8431906 | [
"MIT"
] | 19 | 2015-01-24T13:44:11.000Z | 2015-05-29T10:49:19.000Z | scrapple/utils/dynamicdispatch.py | harishb93/scrapple | 0c19c7a2606f6ef5a1225a337c6ab08cb8431906 | [
"MIT"
] | 35 | 2015-12-17T06:32:02.000Z | 2022-02-13T18:33:43.000Z | """
scrapple.utils.dynamicdispatch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Functions related to dynamic dispatch of objects
"""
def get_command_class(command):
"""
Called from runCLI() to select the command class for the selected command.
:param command: The command to be implemented
:return: The command class corresponding to the selected command
"""
from scrapple.commands import genconfig, generate, run, web
commandMapping = {
'genconfig': genconfig,
'generate': generate,
'run': run,
'web': web
}
cmdClass = getattr(commandMapping.get(command), command.title() + 'Command')
return cmdClass
| 27.125 | 80 | 0.658986 | """
scrapple.utils.dynamicdispatch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Functions related to dynamic dispatch of objects
"""
def get_command_class(command):
"""
Called from runCLI() to select the command class for the selected command.
:param command: The command to be implemented
:return: The command class corresponding to the selected command
"""
from scrapple.commands import genconfig, generate, run, web
commandMapping = {
'genconfig': genconfig,
'generate': generate,
'run': run,
'web': web
}
cmdClass = getattr(commandMapping.get(command), command.title() + 'Command')
return cmdClass
| 0 | 0 | 0 |
597451557300b887a44f7f8772fc32f6fbfec8f5 | 274 | py | Python | api_providing/api_currency_exchange.py | xyla-io/almacen | 7b7f235dc7939777f971f1b5eadd5621e980c15e | [
"MIT"
] | 2 | 2020-10-15T22:12:17.000Z | 2020-10-26T07:17:17.000Z | api_providing/api_currency_exchange.py | xyla-io/almacen | 7b7f235dc7939777f971f1b5eadd5621e980c15e | [
"MIT"
] | null | null | null | api_providing/api_currency_exchange.py | xyla-io/almacen | 7b7f235dc7939777f971f1b5eadd5621e980c15e | [
"MIT"
] | null | null | null | import tasks
from . import base
from jones import FixerAPI | 30.444444 | 95 | 0.79927 | import tasks
from . import base
from jones import FixerAPI
class CurrencyExchangeAPIProvider(base.APIProvider[tasks.FetchBaseCurrencyExchangeReportTask]):
def provide(self):
api_key = self.task.api_credentials['api_key']
self.task.api = FixerAPI(api_key=api_key) | 94 | 74 | 47 |
b6c2b4befaf3d438e97b81d9007068d1ea6a1911 | 1,877 | py | Python | asax/lj.py | sirmarcel/asax | f12d7b4837e807506a62daad9a3e4ff38ba6b777 | [
"MIT"
] | 1 | 2022-02-10T18:40:13.000Z | 2022-02-10T18:40:13.000Z | asax/lj.py | sirmarcel/asax | f12d7b4837e807506a62daad9a3e4ff38ba6b777 | [
"MIT"
] | 4 | 2021-05-11T11:10:07.000Z | 2021-11-11T21:48:18.000Z | asax/lj.py | sirmarcel/asax | f12d7b4837e807506a62daad9a3e4ff38ba6b777 | [
"MIT"
] | 1 | 2021-04-27T15:01:53.000Z | 2021-04-27T15:01:53.000Z | from typing import Tuple
import jax.numpy as jnp
from ase import units
from jax_md import energy
from jax_md.energy import NeighborFn
from .calculator import Calculator
from .jax_utils import EnergyFn
class LennardJones(Calculator):
"""Lennard-Jones Potential"""
implemented_properties = ["energy", "energies", "forces", "stress"]
def __init__(
self,
epsilon=1.0,
sigma=1.0,
rc=None,
ro=None,
stress=False,
dr_threshold=1 * units.Angstrom,
**kwargs
):
"""
Parameters:
sigma: The potential minimum is at 2**(1/6) * sigma, default 1.0
epsilon: The potential depth, default 1.0
rc: Cut-off for the NeighborList is set to 3 * sigma if None, the default.
ro: Onset of the cutoff function. Set to 0.8*rc if None, the default.
stress: Compute stress tensor (periodic systems only)
"""
super().__init__(**kwargs, stress=stress)
self.epsilon = epsilon
self.sigma = sigma
if rc is None:
rc = 3 * self.sigma
self.rc = rc
if ro is None:
ro = 0.8 * self.rc
self.ro = ro
self.dr_threshold = dr_threshold
| 30.274194 | 86 | 0.606287 | from typing import Tuple
import jax.numpy as jnp
from ase import units
from jax_md import energy
from jax_md.energy import NeighborFn
from .calculator import Calculator
from .jax_utils import EnergyFn
class LennardJones(Calculator):
"""Lennard-Jones Potential"""
implemented_properties = ["energy", "energies", "forces", "stress"]
def __init__(
self,
epsilon=1.0,
sigma=1.0,
rc=None,
ro=None,
stress=False,
dr_threshold=1 * units.Angstrom,
**kwargs
):
"""
Parameters:
sigma: The potential minimum is at 2**(1/6) * sigma, default 1.0
epsilon: The potential depth, default 1.0
rc: Cut-off for the NeighborList is set to 3 * sigma if None, the default.
ro: Onset of the cutoff function. Set to 0.8*rc if None, the default.
stress: Compute stress tensor (periodic systems only)
"""
super().__init__(**kwargs, stress=stress)
self.epsilon = epsilon
self.sigma = sigma
if rc is None:
rc = 3 * self.sigma
self.rc = rc
if ro is None:
ro = 0.8 * self.rc
self.ro = ro
self.dr_threshold = dr_threshold
def get_energy_function(self) -> Tuple[NeighborFn, EnergyFn]:
normalized_ro = self.ro / self.sigma
normalized_rc = self.rc / self.sigma
return energy.lennard_jones_neighbor_list(
self.displacement,
self.box,
sigma=jnp.array(self.sigma, dtype=self.global_dtype),
epsilon=jnp.array(self.epsilon, dtype=self.global_dtype),
r_onset=jnp.array(normalized_ro, dtype=self.global_dtype),
r_cutoff=jnp.array(normalized_rc, dtype=self.global_dtype),
per_particle=True,
dr_threshold=self.dr_threshold,
)
| 599 | 0 | 27 |
5265017dcc3f9ea3967279e14b5718bf28920abb | 11,197 | py | Python | drake_pytorch/symbolic.py | DAIRLab/drake-pytorch | 3c7e33d58f1ad26008bd89f3e0fe1951b5175d3b | [
"BSD-3-Clause"
] | 9 | 2022-01-17T04:24:41.000Z | 2022-02-11T17:53:04.000Z | drake_pytorch/symbolic.py | DAIRLab/drake-pytorch | 3c7e33d58f1ad26008bd89f3e0fe1951b5175d3b | [
"BSD-3-Clause"
] | null | null | null | drake_pytorch/symbolic.py | DAIRLab/drake-pytorch | 3c7e33d58f1ad26008bd89f3e0fe1951b5175d3b | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from typing import Tuple, Callable, List, Union, cast
from typing_extensions import Protocol
import types
import torch
import pydrake.symbolic as sym
import pdb
import sympy
from sympy import Symbol, sympify, simplify, trigsimp
from functools import reduce
import operator
from enum import Enum
# string formatting literals
TORCH_ARGS = 'torch_args'
INTERMEDIATE_VARIABLE_PREFIX = 'var'
SYMPY_SYMBOL_PREFIX = 's'
# simplification settings
_simplifier_map = {
Simplifier.ALL: simplify,
Simplifier.TRIGONLY: trigsimp,
Simplifier.QUICKTRIG: lambda x: trigsimp(x, quick=True),
Simplifier.NONE: lambda x: x
}
# Convert an expression to a function in pytorch. The arguments for the
# returned function are be `x[i]`, where index `i` is derived from the
# ordering in `sym_args`. Returns [func, func_string]
#
# @param expr Either a pydrake.symbolic.Expression, or a list of expressions
# @param *sym_args The arguments to the symbolic expression. Multiple arguments
# can be provided.
#
# @return func A function object, which can be evaluated on a pytorch Tensor
# The function takes a variable number of arguments, matching the list in
# sym_args
# @return A string expressing the entire python function
# # # # # # # # # # # # # # # # # # # # # # # #
# helper functions / data for drake -> sympy #
# # # # # # # # # # # # # # # # # # # # # # # #
_constant_dict = {1.0: "1", -1.0: "-1"}
sympy_ops = {
sym.ExpressionKind.Add: sympy.Add,
sym.ExpressionKind.Mul: sympy.Mul,
sym.ExpressionKind.Div: lambda x, y: x / y,
sym.ExpressionKind.Log: sympy.log,
sym.ExpressionKind.Abs: sympy.Abs,
sym.ExpressionKind.Exp: sympy.exp,
sym.ExpressionKind.Pow: _fastpow,
sym.ExpressionKind.Sin: sympy.sin,
sym.ExpressionKind.Cos: sympy.cos,
sym.ExpressionKind.Tan: sympy.tan,
sym.ExpressionKind.Asin: sympy.asin,
sym.ExpressionKind.Acos: sympy.acos,
sym.ExpressionKind.Atan: sympy.atan,
sym.ExpressionKind.Atan2: sympy.atan2,
sym.ExpressionKind.Sinh: sympy.sinh,
sym.ExpressionKind.Cosh: sympy.cosh,
sym.ExpressionKind.Tanh: sympy.tanh,
sym.ExpressionKind.Min: sympy.Min,
sym.ExpressionKind.Max: sympy.Max,
sym.ExpressionKind.Ceil: sympy.ceiling,
sym.ExpressionKind.Floor: sympy.floor
}
# # # # # # # # # # # # # # # # # # # # # # # #
# helper functions / data for sympy -> torch #
# # # # # # # # # # # # # # # # # # # # # # # #
_torch_ops = {
sympy.Add: _reduction('+'),
sympy.Mul: _reduction('*'),
sympy.div: sympy_to_pytorch_simple_fun('div'),
sympy.log: sympy_to_pytorch_simple_fun('log'),
sympy.Abs: sympy_to_pytorch_simple_fun('abs'),
sympy.exp: sympy_to_pytorch_simple_fun('exp'),
sympy.Pow: _fastpow_string,
sympy.sin: sympy_to_pytorch_simple_fun('sin'),
sympy.cos: sympy_to_pytorch_simple_fun('cos'),
sympy.tan: sympy_to_pytorch_simple_fun('tan'),
sympy.asin: sympy_to_pytorch_simple_fun('asin'),
sympy.acos: sympy_to_pytorch_simple_fun('acos'),
sympy.atan: sympy_to_pytorch_simple_fun('atan'),
sympy.atan2: sympy_to_pytorch_simple_fun('atan2'),
sympy.sinh: sympy_to_pytorch_simple_fun('sinh'),
sympy.cosh: sympy_to_pytorch_simple_fun('cosh'),
sympy.tanh: sympy_to_pytorch_simple_fun('tanh'),
sympy.Min: sympy_to_pytorch_simple_fun('min'),
sympy.Max: sympy_to_pytorch_simple_fun('max'),
sympy.ceiling: sympy_to_pytorch_simple_fun('ceil'),
sympy.floor: sympy_to_pytorch_simple_fun('floor'),
}
# Convert a drake symbolic expression to sympy
#
# @param expr The drake expression to convert
# @param vardict Dictionary which corresponds drake variables to sympy Symbols
# @return The sympy expression
# Convert a sympy expression to a string. The arguments for the returned
# string will be `x[i]`, where index `i` is derived from the ordering
# in `sym_args`
#
# @param expr The sympy to convert
# @param memos Memoization dictionary of previously-seen expressions
# @param lines python lines which calculated memoized expressions
# @param vardict Dictionary which corresponds sympy Symbols to drake variables
# @param sym_args An ordered list of drake symbolic variables
# @return The variable name the expression is stored in to
| 35.321767 | 80 | 0.650889 | import numpy as np
from typing import Tuple, Callable, List, Union, cast
from typing_extensions import Protocol
import types
import torch
import pydrake.symbolic as sym
import pdb
import sympy
from sympy import Symbol, sympify, simplify, trigsimp
from functools import reduce
import operator
from enum import Enum
# string formatting literals
TORCH_ARGS = 'torch_args'
INTERMEDIATE_VARIABLE_PREFIX = 'var'
SYMPY_SYMBOL_PREFIX = 's'
# simplification settings
class Simplifier(Enum):
ALL = 1
TRIGONLY = 2
QUICKTRIG = 4
NONE = 4
_simplifier_map = {
Simplifier.ALL: simplify,
Simplifier.TRIGONLY: trigsimp,
Simplifier.QUICKTRIG: lambda x: trigsimp(x, quick=True),
Simplifier.NONE: lambda x: x
}
# Convert an expression to a function in pytorch. The arguments for the
# returned function are be `x[i]`, where index `i` is derived from the
# ordering in `sym_args`. Returns [func, func_string]
#
# @param expr Either a pydrake.symbolic.Expression, or a list of expressions
# @param *sym_args The arguments to the symbolic expression. Multiple arguments
# can be provided.
#
# @return func A function object, which can be evaluated on a pytorch Tensor
# The function takes a variable number of arguments, matching the list in
# sym_args
# @return A string expressing the entire python function
class TensorCallable(Protocol):
def __call__(self, *args: torch.Tensor) -> torch.Tensor: ...
def sym_to_pytorch(
expr: Union[sym.Expression, np.ndarray, List],
*sym_args: np.ndarray,
simplify_computation: Simplifier = Simplifier.ALL
) -> Tuple[TensorCallable, str]:
simplifier = _simplifier_map[simplify_computation]
str_list = []
str_list.append(f'def my_func(*{TORCH_ARGS}):\n')
# detect batch dimension list from first argument and assume the rest follow
str_list.append(
f' batch_dims = {TORCH_ARGS}[0].shape[:-{len(sym_args[0].shape)}]\n')
if isinstance(expr, sym.Expression):
python_lines = []
vardict = {}
sympy_expr_i = sym_to_sympy(expr, vardict)
if simplify_computation:
sympy_expr_i = simplifier(sympy_expr_i)
rev_vardict = {vardict[k]: k for k in vardict}
expr_name = sympy_to_pytorch_string(sympy_expr_i, {}, python_lines,
rev_vardict, sym_args)
str_list.extend(python_lines)
str_list.append(f' return {expr_name}\n')
elif isinstance(expr, list) or isinstance(expr, np.ndarray):
if isinstance(expr, np.ndarray):
shape = expr.shape
nonbatch_dims = len(shape)
expr = np.reshape(expr, -1)
else:
shape = (len(expr), 1)
vardict = {}
memos = {}
python_lines = []
expr_names = []
for i, expr_i in enumerate(expr):
sympy_expr_i = sym_to_sympy(expr_i, vardict)
if simplify_computation:
sympy_expr_i = simplifier(sympy_expr_i)
rev_vardict = {vardict[k]: k for k in vardict}
expr_names.append(
sympy_to_pytorch_string(sympy_expr_i, memos, python_lines,
rev_vardict, sym_args))
str_list.extend(python_lines)
expr_indices = ', '.join(expr_names)
str_list.append(f' ret = torch.stack(({expr_indices},), dim = -1)\n')
str_list.append(f' return torch.reshape(ret, batch_dims + {shape})')
else:
raise ValueError('expr must be a drake symbolic Expression or a list')
func_string = ''.join(str_list)
code = compile(func_string, 'tmp.py', 'single')
func = cast(TensorCallable,
types.FunctionType(code.co_consts[0], globals()))
return func, func_string
# # # # # # # # # # # # # # # # # # # # # # # #
# helper functions / data for drake -> sympy #
# # # # # # # # # # # # # # # # # # # # # # # #
def _fastpow(*x):
#print(x[1], type(x[1]), float(x[1]) == 2.0)
if float(x[1]) == 2.0:
return x[0] * x[0]
else:
return x[0]**x[1]
_constant_dict = {1.0: "1", -1.0: "-1"}
def _sympy_constant_cast(x):
fx = float(x)
if fx in _constant_dict:
return sympify(_constant_dict[fx])
return sympify(str(x))
sympy_ops = {
sym.ExpressionKind.Add: sympy.Add,
sym.ExpressionKind.Mul: sympy.Mul,
sym.ExpressionKind.Div: lambda x, y: x / y,
sym.ExpressionKind.Log: sympy.log,
sym.ExpressionKind.Abs: sympy.Abs,
sym.ExpressionKind.Exp: sympy.exp,
sym.ExpressionKind.Pow: _fastpow,
sym.ExpressionKind.Sin: sympy.sin,
sym.ExpressionKind.Cos: sympy.cos,
sym.ExpressionKind.Tan: sympy.tan,
sym.ExpressionKind.Asin: sympy.asin,
sym.ExpressionKind.Acos: sympy.acos,
sym.ExpressionKind.Atan: sympy.atan,
sym.ExpressionKind.Atan2: sympy.atan2,
sym.ExpressionKind.Sinh: sympy.sinh,
sym.ExpressionKind.Cosh: sympy.cosh,
sym.ExpressionKind.Tanh: sympy.tanh,
sym.ExpressionKind.Min: sympy.Min,
sym.ExpressionKind.Max: sympy.Max,
sym.ExpressionKind.Ceil: sympy.ceiling,
sym.ExpressionKind.Floor: sympy.floor
}
# # # # # # # # # # # # # # # # # # # # # # # #
# helper functions / data for sympy -> torch #
# # # # # # # # # # # # # # # # # # # # # # # #
def _reduction(delim):
return lambda x: f' {delim} '.join(x)
def sympy_to_pytorch_simple_fun(name):
return lambda x: f'torch.{name}(' + ', '.join(x) + ')'
def _fastpow_string(xpower):
x = xpower[0]
power = xpower[1]
return f'{x} ** {power}'
def _sympy_constant_string(x):
return f'{float(x)} * torch.ones(batch_dims)'
def _sympy_expression_key(expr):
expr_top_level = expr.func
if issubclass(expr_top_level, sympy.Number) or \
issubclass(expr_top_level, sympy.NumberSymbol):
return sympy.Float(expr)
else:
return expr
_torch_ops = {
sympy.Add: _reduction('+'),
sympy.Mul: _reduction('*'),
sympy.div: sympy_to_pytorch_simple_fun('div'),
sympy.log: sympy_to_pytorch_simple_fun('log'),
sympy.Abs: sympy_to_pytorch_simple_fun('abs'),
sympy.exp: sympy_to_pytorch_simple_fun('exp'),
sympy.Pow: _fastpow_string,
sympy.sin: sympy_to_pytorch_simple_fun('sin'),
sympy.cos: sympy_to_pytorch_simple_fun('cos'),
sympy.tan: sympy_to_pytorch_simple_fun('tan'),
sympy.asin: sympy_to_pytorch_simple_fun('asin'),
sympy.acos: sympy_to_pytorch_simple_fun('acos'),
sympy.atan: sympy_to_pytorch_simple_fun('atan'),
sympy.atan2: sympy_to_pytorch_simple_fun('atan2'),
sympy.sinh: sympy_to_pytorch_simple_fun('sinh'),
sympy.cosh: sympy_to_pytorch_simple_fun('cosh'),
sympy.tanh: sympy_to_pytorch_simple_fun('tanh'),
sympy.Min: sympy_to_pytorch_simple_fun('min'),
sympy.Max: sympy_to_pytorch_simple_fun('max'),
sympy.ceiling: sympy_to_pytorch_simple_fun('ceil'),
sympy.floor: sympy_to_pytorch_simple_fun('floor'),
}
# Convert a drake symbolic expression to sympy
#
# @param expr The drake expression to convert
# @param vardict Dictionary which corresponds drake variables to sympy Symbols
# @return The sympy expression
def sym_to_sympy(expr, vardict):
#pdb.set_trace()
# If it's a float, just return the expression
if isinstance(expr, float):
if expr == 1.0:
return sympify("1")
return _sympy_constant_cast(expr)
str_list = []
# switch based on the expression kind
kind = expr.get_kind()
ctor, expr_args = expr.Unapply()
if kind == sym.ExpressionKind.Constant:
if len(expr_args) != 1:
raise ValueError('Unexpected symbolic Constant of length != 1')
#pdb.set_trace()
return _sympy_constant_cast(expr_args[0])
elif kind == sym.ExpressionKind.Var:
#pdb.set_trace()
var_id = expr_args[0].get_id()
#print(expr_args[0], var_id)
#pdb.set_trace()
if var_id in vardict:
out = vardict[var_id]
else:
out = Symbol(f"{SYMPY_SYMBOL_PREFIX}_{len(vardict)}")
vardict[var_id] = out
return out
else:
# expression combines arguments / is not leaf node
# first, sympify constituents
sympy_args = [sym_to_sympy(expr_arg, vardict) for expr_arg in expr_args]
if any([type(sa) == type((0,)) for sa in sympy_args]):
pdb.set_trace()
try:
return sympy_ops[kind](*sympy_args)
except KeyError:
raise ValueError('Unsupported expression type ' + str(kind))
# Convert a sympy expression to a string. The arguments for the returned
# string will be `x[i]`, where index `i` is derived from the ordering
# in `sym_args`
#
# @param expr The sympy to convert
# @param memos Memoization dictionary of previously-seen expressions
# @param lines python lines which calculated memoized expressions
# @param vardict Dictionary which corresponds sympy Symbols to drake variables
# @param sym_args An ordered list of drake symbolic variables
# @return The variable name the expression is stored in to
def sympy_to_pytorch_string(expr, memos, lines, vardict, sym_args):
#pdb.set_trace()
expr_key = _sympy_expression_key(expr)
if expr_key in memos:
return memos[expr_key]
expr_top_level = expr.func
if issubclass(expr_top_level, sympy.Number) or \
issubclass(expr_top_level, sympy.NumberSymbol):
# number, add float
value = _sympy_constant_string(expr)
elif issubclass(expr_top_level, sympy.Symbol):
# variable, index into
value = substitute_variable_id_string(vardict[expr], sym_args)
else:
# get equivalent torch operation
torch_string_callback = _torch_ops[expr_top_level]
# squaring and inversion hacks
expr_args = expr.args
if torch_string_callback is _fastpow_string:
#pdb.set_trace()
if issubclass(expr_args[1].func, sympy.Integer) \
and int(expr_args[1]) == 2:
expr_args = [expr_args[0], expr_args[0]]
torch_string_callback = _reduction('*')
if issubclass(expr_args[1].func, sympy.Integer) \
and int(expr_args[1]) == -1:
expr_args = [_sympy_constant_cast(1.0), expr_args[0]]
torch_string_callback = _torch_ops[sympy.div]
args = [sympy_to_pytorch_string(arg, memos, lines, vardict, sym_args) \
for arg in expr_args]
value = torch_string_callback(args)
name = f'{INTERMEDIATE_VARIABLE_PREFIX}_{len(memos)}'
memos[expr_key] = name
lines.append(f' {name} = {value}\n')
return name
def substitute_variable_id_string(var_id, sym_args):
id_vectorized = np.vectorize(sym.Variable.get_id)
for index, sym_var in enumerate(sym_args):
if len(sym_var) == 0:
continue
var_index = np.where(id_vectorized(sym_var) == var_id)
if var_index[0].size > 0:
var_index_string = '[..., ' + \
', '.join([str(i[0]) for i in var_index]) + \
']'
return f'{TORCH_ARGS}[{index}]{var_index_string}'
raise ValueError('Expression contains variable id not in sym_args list: ' +
str(var_id))
| 6,554 | 72 | 320 |
4050934dd9bda1e21ef90f323f11b488f12c6b09 | 446 | py | Python | Mesh/System/Entity/Function/Actuate.py | ys-warble/Mesh | 115e7391d19ea09db3c627d8b8ed90b3e3bef9b5 | [
"MIT"
] | null | null | null | Mesh/System/Entity/Function/Actuate.py | ys-warble/Mesh | 115e7391d19ea09db3c627d8b8ed90b3e3bef9b5 | [
"MIT"
] | 2 | 2019-02-25T00:10:15.000Z | 2019-03-22T20:13:32.000Z | Mesh/System/Entity/Function/Actuate.py | ys-warble/Mesh | 115e7391d19ea09db3c627d8b8ed90b3e3bef9b5 | [
"MIT"
] | null | null | null | from Mesh.System.Entity.Function import BaseFunction
from Mesh.System.Entity.Function.Tasked import TaskName
| 18.583333 | 55 | 0.650224 | from Mesh.System.Entity.Function import BaseFunction
from Mesh.System.Entity.Function.Tasked import TaskName
class Actuate(BaseFunction):
tasks = [
TaskName.ACTUATE
]
def __init__(self, entity):
super().__init__(entity)
def eval(self):
pass
def init(self):
pass
def terminate(self):
pass
def actuate(self, space, location, orientation):
raise NotImplementedError
| 126 | 187 | 23 |
49cfc822a3160fcdfc4c0fcae521eafb316d87e2 | 2,177 | py | Python | python/landsat8_browseimage.py | vightel/FloodMapsWorkshop | ebc5246fbca52f2ee181a07663b859b2f6f59532 | [
"Apache-2.0"
] | 24 | 2015-02-07T06:34:14.000Z | 2021-11-06T08:06:40.000Z | python/landsat8_browseimage.py | vightel/FloodMapsWorkshop | ebc5246fbca52f2ee181a07663b859b2f6f59532 | [
"Apache-2.0"
] | 1 | 2018-07-23T01:54:23.000Z | 2018-07-23T01:55:05.000Z | python/landsat8_browseimage.py | vightel/FloodMapsWorkshop | ebc5246fbca52f2ee181a07663b859b2f6f59532 | [
"Apache-2.0"
] | 15 | 2015-03-10T06:39:10.000Z | 2020-01-06T19:54:16.000Z | #!/usr/bin/env python
#
# Created on 07/11/2014 Pat Cappelaere - Vightel Corporation
#
# Output: Landsat 8 browse image
#
import os, inspect, sys
import argparse
import numpy
import scipy
import math
from scipy import ndimage
from osgeo import gdal
from osgeo import osr
from osgeo import ogr
from which import *
import config
force = 0
verbose = 0
#
# execute with verbose option
#
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Landsat8 Floodmap vectors')
apg_input = parser.add_argument_group('Input')
apg_input.add_argument("-f", "--force", action='store_true', help="Forces new product to be generated")
apg_input.add_argument("-v", "--verbose", action='store_true', help="Verbose on/off")
apg_input.add_argument("-s", "--scene", help="Landsat Scene")
options = parser.parse_args()
force = options.force
verbose = options.verbose
scene = options.scene
outdir = os.path.join(config.LANDSAT8_DIR,scene)
app = Landsat8(outdir, scene)
app.process() | 28.272727 | 119 | 0.716123 | #!/usr/bin/env python
#
# Created on 07/11/2014 Pat Cappelaere - Vightel Corporation
#
# Output: Landsat 8 browse image
#
import os, inspect, sys
import argparse
import numpy
import scipy
import math
from scipy import ndimage
from osgeo import gdal
from osgeo import osr
from osgeo import ogr
from which import *
import config
force = 0
verbose = 0
class Landsat8:
def __init__( self, outpath, scene ):
self.scene = scene
self.composite_file = os.path.join(outpath, scene + "_COMPOSITE_432_4326.tif")
self.watermap_file = os.path.join(outpath, scene + "_WATERMAP.tif.hand.tif")
self.browse_file = os.path.join(outpath, scene + "_watermap_browseimage.tif")
self.thn_browse_file = os.path.join(outpath, scene + "_watermap_browseimage.thn.png")
#
# execute with verbose option
#
def execute( self, cmd ):
if verbose:
print cmd
os.system(cmd)
def process(self):
if not os.path.isfile(self.composite_file):
cmd = str.format("landsat8_composite_toa.py --scene {0} --red 4 --green 3 --blue 2", self.scene)
self.execute(cmd)
if force or not os.path.isfile(self.browse_file):
cmd = str.format("composite -gravity center {0} {1} {2}", self.watermap_file, self.composite_file, self.browse_file)
self.execute(cmd)
if force or not os.path.isfile(self.thn_browse_file):
cmd = str.format("convert {0} -resize 10% {1}", self.browse_file, self.thn_browse_file)
self.execute(cmd)
if os.path.isfile(self.browse_file):
cmd = str.format("rm {0}", self.browse_file)
self.execute(cmd)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Landsat8 Floodmap vectors')
apg_input = parser.add_argument_group('Input')
apg_input.add_argument("-f", "--force", action='store_true', help="Forces new product to be generated")
apg_input.add_argument("-v", "--verbose", action='store_true', help="Verbose on/off")
apg_input.add_argument("-s", "--scene", help="Landsat Scene")
options = parser.parse_args()
force = options.force
verbose = options.verbose
scene = options.scene
outdir = os.path.join(config.LANDSAT8_DIR,scene)
app = Landsat8(outdir, scene)
app.process() | 1,059 | -6 | 95 |
c676c877a24ab95600634a7a7acbc4f2edc0df63 | 208 | py | Python | String/ex004.py | Raissadg/Python | de5ba694291f48152433237d2a14bbea931b423c | [
"MIT"
] | null | null | null | String/ex004.py | Raissadg/Python | de5ba694291f48152433237d2a14bbea931b423c | [
"MIT"
] | null | null | null | String/ex004.py | Raissadg/Python | de5ba694291f48152433237d2a14bbea931b423c | [
"MIT"
] | null | null | null | nome = input('Digite seu nome: ').upper()
print(f'Seu nome possui {nome.count("A")} letra A')
print(f'O primeiro A está na posição {nome.find("A")+1}')
print(f'O último A está na posição {nome.rfind("A")+1}') | 52 | 57 | 0.668269 | nome = input('Digite seu nome: ').upper()
print(f'Seu nome possui {nome.count("A")} letra A')
print(f'O primeiro A está na posição {nome.find("A")+1}')
print(f'O último A está na posição {nome.rfind("A")+1}') | 0 | 0 | 0 |
2ed971517d20113ba8073aec719f3751603fa79f | 3,143 | py | Python | kopf/clients/events.py | yashbhutwala/kopf | 4ad77dae699d8516ee7c189b11c6cedbe9224975 | [
"MIT"
] | null | null | null | kopf/clients/events.py | yashbhutwala/kopf | 4ad77dae699d8516ee7c189b11c6cedbe9224975 | [
"MIT"
] | null | null | null | kopf/clients/events.py | yashbhutwala/kopf | 4ad77dae699d8516ee7c189b11c6cedbe9224975 | [
"MIT"
] | null | null | null | import asyncio
import datetime
import logging
import pykube
import requests
from kopf import config
from kopf.clients import auth
from kopf.structs import hierarchies
logger = logging.getLogger(__name__)
MAX_MESSAGE_LENGTH = 1024
CUT_MESSAGE_INFIX = '...'
async def post_event(*, obj=None, ref=None, type, reason, message=''):
"""
Issue an event for the object.
This is where they can also be accumulated, aggregated, grouped,
and where the rate-limits should be maintained. It can (and should)
be done by the client library, as it is done in the Go client.
"""
# Object reference - similar to the owner reference, but different.
if obj is not None and ref is not None:
raise TypeError("Only one of obj= and ref= is allowed for a posted event. Got both.")
if obj is None and ref is None:
raise TypeError("One of obj= and ref= is required for a posted event. Got none.")
if ref is None:
ref = hierarchies.build_object_reference(obj)
now = datetime.datetime.utcnow()
# See #164. For cluster-scoped objects, use the current namespace from the current context.
# It could be "default", but in some systems, we are limited to one specific namespace only.
namespace = ref.get('namespace') or auth.get_pykube_cfg().namespace
if not ref.get('namespace'):
ref = dict(ref, namespace=namespace)
# Prevent a common case of event posting errors but shortening the message.
if len(message) > MAX_MESSAGE_LENGTH:
infix = CUT_MESSAGE_INFIX
prefix = message[:MAX_MESSAGE_LENGTH // 2 - (len(infix) // 2)]
suffix = message[-MAX_MESSAGE_LENGTH // 2 + (len(infix) - len(infix) // 2):]
message = f'{prefix}{infix}{suffix}'
body = {
'metadata': {
'namespace': namespace,
'generateName': 'kopf-event-',
},
'action': 'Action?',
'type': type,
'reason': reason,
'message': message,
'reportingComponent': 'kopf',
'reportingInstance': 'dev',
'source' : {'component': 'kopf'}, # used in the "From" column in `kubectl describe`.
'involvedObject': ref,
'firstTimestamp': now.isoformat() + 'Z', # '2019-01-28T18:25:03.000000Z' -- seen in `kubectl describe ...`
'lastTimestamp': now.isoformat() + 'Z', # '2019-01-28T18:25:03.000000Z' - seen in `kubectl get events`
'eventTime': now.isoformat() + 'Z', # '2019-01-28T18:25:03.000000Z'
}
try:
api = auth.get_pykube_api()
obj = pykube.Event(api, body)
loop = asyncio.get_running_loop()
await loop.run_in_executor(config.WorkersConfig.get_syn_executor(), obj.create)
except (requests.exceptions.HTTPError, pykube.exceptions.HTTPError) as e:
# Events are helpful but auxiliary, they should not fail the handling cycle.
# Yet we want to notice that something went wrong (in logs).
logger.warning("Failed to post an event. Ignoring and continuing. "
f"Error: {e!r}. "
f"Event: type={type!r}, reason={reason!r}, message={message!r}.")
| 36.976471 | 115 | 0.641744 | import asyncio
import datetime
import logging
import pykube
import requests
from kopf import config
from kopf.clients import auth
from kopf.structs import hierarchies
logger = logging.getLogger(__name__)
MAX_MESSAGE_LENGTH = 1024
CUT_MESSAGE_INFIX = '...'
async def post_event(*, obj=None, ref=None, type, reason, message=''):
"""
Issue an event for the object.
This is where they can also be accumulated, aggregated, grouped,
and where the rate-limits should be maintained. It can (and should)
be done by the client library, as it is done in the Go client.
"""
# Object reference - similar to the owner reference, but different.
if obj is not None and ref is not None:
raise TypeError("Only one of obj= and ref= is allowed for a posted event. Got both.")
if obj is None and ref is None:
raise TypeError("One of obj= and ref= is required for a posted event. Got none.")
if ref is None:
ref = hierarchies.build_object_reference(obj)
now = datetime.datetime.utcnow()
# See #164. For cluster-scoped objects, use the current namespace from the current context.
# It could be "default", but in some systems, we are limited to one specific namespace only.
namespace = ref.get('namespace') or auth.get_pykube_cfg().namespace
if not ref.get('namespace'):
ref = dict(ref, namespace=namespace)
# Prevent a common case of event posting errors but shortening the message.
if len(message) > MAX_MESSAGE_LENGTH:
infix = CUT_MESSAGE_INFIX
prefix = message[:MAX_MESSAGE_LENGTH // 2 - (len(infix) // 2)]
suffix = message[-MAX_MESSAGE_LENGTH // 2 + (len(infix) - len(infix) // 2):]
message = f'{prefix}{infix}{suffix}'
body = {
'metadata': {
'namespace': namespace,
'generateName': 'kopf-event-',
},
'action': 'Action?',
'type': type,
'reason': reason,
'message': message,
'reportingComponent': 'kopf',
'reportingInstance': 'dev',
'source' : {'component': 'kopf'}, # used in the "From" column in `kubectl describe`.
'involvedObject': ref,
'firstTimestamp': now.isoformat() + 'Z', # '2019-01-28T18:25:03.000000Z' -- seen in `kubectl describe ...`
'lastTimestamp': now.isoformat() + 'Z', # '2019-01-28T18:25:03.000000Z' - seen in `kubectl get events`
'eventTime': now.isoformat() + 'Z', # '2019-01-28T18:25:03.000000Z'
}
try:
api = auth.get_pykube_api()
obj = pykube.Event(api, body)
loop = asyncio.get_running_loop()
await loop.run_in_executor(config.WorkersConfig.get_syn_executor(), obj.create)
except (requests.exceptions.HTTPError, pykube.exceptions.HTTPError) as e:
# Events are helpful but auxiliary, they should not fail the handling cycle.
# Yet we want to notice that something went wrong (in logs).
logger.warning("Failed to post an event. Ignoring and continuing. "
f"Error: {e!r}. "
f"Event: type={type!r}, reason={reason!r}, message={message!r}.")
| 0 | 0 | 0 |
f15869078cd0dd7a1fe8a9ab38ea4519aefe6a92 | 195 | py | Python | pypcl/__init__.py | r9y9/pypcl | 0528aacb8cfa1ceea2cbb3c7e3f20a1f066f23bf | [
"MIT"
] | null | null | null | pypcl/__init__.py | r9y9/pypcl | 0528aacb8cfa1ceea2cbb3c7e3f20a1f066f23bf | [
"MIT"
] | null | null | null | pypcl/__init__.py | r9y9/pypcl | 0528aacb8cfa1ceea2cbb3c7e3f20a1f066f23bf | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import division, print_function, absolute_import
import pkg_resources
__version__ = pkg_resources.get_distribution('pypcl').version
from pypcl.common import *
| 19.5 | 64 | 0.815385 | # coding: utf-8
from __future__ import division, print_function, absolute_import
import pkg_resources
__version__ = pkg_resources.get_distribution('pypcl').version
from pypcl.common import *
| 0 | 0 | 0 |
10490a3f72ab52863674a818ec6c6176fa89e87f | 587 | py | Python | app/main/views.py | marknesh/Blogging-website | e8baca97136dae0adc4c7bf895bf076152430bbc | [
"MIT"
] | null | null | null | app/main/views.py | marknesh/Blogging-website | e8baca97136dae0adc4c7bf895bf076152430bbc | [
"MIT"
] | 7 | 2020-02-20T13:58:44.000Z | 2021-02-08T20:40:59.000Z | app/main/views.py | marknesh/Blogging-website | e8baca97136dae0adc4c7bf895bf076152430bbc | [
"MIT"
] | null | null | null | from . import main
from flask import render_template,abort
from flask_login import login_required
from ..models import User,Blog
from app.request import get_quote
@main.route('/')
@main.route('/user/<jina>',methods=['GET','POST'])
| 22.576923 | 74 | 0.715503 | from . import main
from flask import render_template,abort
from flask_login import login_required
from ..models import User,Blog
from app.request import get_quote
@main.route('/')
def index():
neew=Blog.get_blog(id)
qoute=get_quote()
return render_template('home.html',pitch=neew,qoute=qoute)
@main.route('/user/<jina>',methods=['GET','POST'])
def profile(jina):
user=User.query.filter_by(username= jina).first()
if user is None:
abort(404)
pitchess = pitch.get_pitch(id)
return render_template('profile/profile.html',user=user,lol=pitchess)
| 304 | 0 | 44 |
2938f72bf2bac145173d9e763a2c2c54feec089d | 349 | py | Python | 17_lambda.py | JimBae/pythonTips | 5541569d9534536fc6da00f065707c176e7b95f4 | [
"MIT"
] | null | null | null | 17_lambda.py | JimBae/pythonTips | 5541569d9534536fc6da00f065707c176e7b95f4 | [
"MIT"
] | null | null | null | 17_lambda.py | JimBae/pythonTips | 5541569d9534536fc6da00f065707c176e7b95f4 | [
"MIT"
] | null | null | null |
# lambda 는 1줄 함수.
# lambda argument: manipulate(argument)
# ex
add = lambda x, y: x + y
print(add(2,4))
# sort
a = [(1,2), (4,1), (9,10), (13,-3)]
a.sort(key = lambda x:x[1])
print(a)
# 병렬로 sort list
list1 = [1,2,3,4,5]
list2 = [9,8,5,3,1]
data = list(zip(list1, list2))
print(data)
data.sort()
list1, list2 = map(lambda t: list(t), zip(*data))
| 16.619048 | 49 | 0.593123 |
# lambda 는 1줄 함수.
# lambda argument: manipulate(argument)
# ex
add = lambda x, y: x + y
print(add(2,4))
# sort
a = [(1,2), (4,1), (9,10), (13,-3)]
a.sort(key = lambda x:x[1])
print(a)
# 병렬로 sort list
list1 = [1,2,3,4,5]
list2 = [9,8,5,3,1]
data = list(zip(list1, list2))
print(data)
data.sort()
list1, list2 = map(lambda t: list(t), zip(*data))
| 0 | 0 | 0 |
5efaa4c5f3d7db8ce10787be7825583c4c7ec0e1 | 4,546 | py | Python | model_training/loss/loss.py | fluke-tracker/fluke-tracker | 1f9c5842f2100ed5ab449cd437ba74b880418624 | [
"MIT"
] | 2 | 2021-07-13T07:22:08.000Z | 2021-07-13T08:18:32.000Z | model_training/loss/loss.py | fluke-tracker/fluke-tracker | 1f9c5842f2100ed5ab449cd437ba74b880418624 | [
"MIT"
] | null | null | null | model_training/loss/loss.py | fluke-tracker/fluke-tracker | 1f9c5842f2100ed5ab449cd437ba74b880418624 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
class TripletLoss(object):
"""Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid).
Related Triplet Loss theory can be found in paper 'In Defense of the Triplet
Loss for Person Re-Identification'."""
| 34.439394 | 97 | 0.639683 | import torch
import torch.nn as nn
import torch.nn.functional as F
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
def euclidean_dist(x, y):
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
return dist
def hard_example_mining(dist_mat, labels, return_inds=False):
assert len(dist_mat.size()) == 2
assert dist_mat.size(0) == dist_mat.size(1)
N = dist_mat.size(0)
# shape [N, N]
is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())
is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())
# `dist_ap` means distance(anchor, positive)
# both `dist_ap` and `relative_p_inds` with shape [N, 1]
dist_ap, relative_p_inds = torch.max(
dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)
# `dist_an` means distance(anchor, negative)
# both `dist_an` and `relative_n_inds` with shape [N, 1]
dist_an, relative_n_inds = torch.min(
dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)
# shape [N]
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
if return_inds:
# shape [N, N]
ind = (labels.new().resize_as_(labels)
.copy_(torch.arange(0, N).long())
.unsqueeze(0).expand(N, N))
# shape [N, 1]
p_inds = torch.gather(
ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data)
n_inds = torch.gather(
ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data)
# shape [N]
p_inds = p_inds.squeeze(1)
n_inds = n_inds.squeeze(1)
return dist_ap, dist_an, p_inds, n_inds
return dist_ap, dist_an
class TripletLoss(object):
"""Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid).
Related Triplet Loss theory can be found in paper 'In Defense of the Triplet
Loss for Person Re-Identification'."""
def __init__(self, margin=None):
self.margin = margin
if margin is not None:
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
else:
self.ranking_loss = nn.SoftMarginLoss()
def __call__(self, global_feat, labels):
global_feat = l2_norm(global_feat)
dist_mat = euclidean_dist(global_feat, global_feat)
dist_ap, dist_an = hard_example_mining(dist_mat, labels)
y = dist_an.new().resize_as_(dist_an).fill_(1)
if self.margin is not None:
loss = self.ranking_loss(dist_an, dist_ap, y)
else:
loss = self.ranking_loss(dist_an - dist_ap, y)
return loss
def softmax_loss(results, labels):
labels = labels.view(-1)
loss = F.cross_entropy(results, labels, reduce=True)
return loss
def focal_loss(input, target, class_num, OHEM_percent=None):
gamma = 2
assert target.size() == input.size()
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()
invprobs = F.logsigmoid(-input * (target * 2 - 1))
loss = (invprobs * gamma).exp() * loss
if OHEM_percent is None:
return loss.mean()
else:
OHEM, _ = loss.topk(k=int(class_num * OHEM_percent), dim=1, largest=True, sorted=True)
return OHEM.mean()
def bce_loss(input, target, class_num, OHEM_percent=None):
if OHEM_percent is None:
loss = F.binary_cross_entropy_with_logits(input, target, reduce=True)
return loss
else:
loss = F.binary_cross_entropy_with_logits(input, target, reduce=False)
value, index = loss.topk(int(class_num * OHEM_percent), dim=1, largest=True, sorted=True)
return value.mean()
def focal_OHEM(results, labels, labels_onehot, class_num, OHEM_percent=100):
batch_size, classes = results.shape
labels = labels.view(-1)
loss0 = bce_loss(results, labels_onehot, class_num, OHEM_percent)
loss1 = focal_loss(results, labels_onehot, class_num, OHEM_percent)
indexs_ = (labels != classes).nonzero().view(-1)
if len(indexs_) == 0:
return loss0 + loss1
results_ = results[torch.arange(0, len(results))[indexs_], labels[indexs_]].contiguous()
loss2 = focal_loss(results_, torch.ones_like(results_).float(), class_num)
return loss0 + loss1 + loss2 | 4,025 | 0 | 215 |
533064bd6976699dc1dffdd6facee75002254ea6 | 11,953 | py | Python | AppServer/google/appengine/api/channel/channel_service_stub.py | loftwah/appscale | 586fc1347ebc743d7a632de698f4dbfb09ae38d6 | [
"Apache-2.0"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | AppServer/google/appengine/api/channel/channel_service_stub.py | loftwah/appscale | 586fc1347ebc743d7a632de698f4dbfb09ae38d6 | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | AppServer/google/appengine/api/channel/channel_service_stub.py | loftwah/appscale | 586fc1347ebc743d7a632de698f4dbfb09ae38d6 | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the Channel API, queues messages and writes them to a log."""
import hashlib
import logging
import random
import time
from google.appengine.api import apiproxy_stub
from google.appengine.api.channel import channel_service_pb
from google.appengine.runtime import apiproxy_errors
def _GenerateTokenHash(token):
"""Returns a MD5 hash of a token for integrity checking."""
return hashlib.md5(token).hexdigest()
class InvalidTokenError(Error):
"""A stub method was called with a syntactically invalid token."""
pass
class TokenTimedOutError(Error):
"""A stub method was called with a token that has expired or never existed."""
pass
class ChannelServiceStub(apiproxy_stub.APIProxyStub):
"""Python only channel service stub.
This stub does not use a browser channel to push messages to a client.
Instead it queues messages internally.
"""
THREADSAFE = True
CHANNEL_TIMEOUT_SECONDS = 2
XMPP_PUBLIC_IP = '0.1.0.10'
CHANNEL_TOKEN_DEFAULT_DURATION = 120
CHANNEL_TOKEN_IDENTIFIER = 'channel'
def __init__(self, log=logging.debug, service_name='channel',
time_func=time.time, request_data=None):
"""Initializer.
Args:
log: A logger, used for dependency injection.
service_name: Service name expected for all calls.
time_func: function to get the current time in seconds.
request_data: A request_info.RequestInfo instance. If None, a
request_info._LocalRequestInfo instance will be used.
"""
apiproxy_stub.APIProxyStub.__init__(self, service_name,
request_data=request_data)
self._log = log
self._time_func = time_func
self._connected_channel_messages = {}
def _Dynamic_CreateChannel(self, request, response):
"""Implementation of channel.create_channel.
Args:
request: A ChannelServiceRequest.
response: A ChannelServiceResponse
"""
client_id = request.application_key()
if not client_id:
raise apiproxy_errors.ApplicationError(
channel_service_pb.ChannelServiceError.INVALID_CHANNEL_KEY)
if request.has_duration_minutes():
duration = request.duration_minutes()
else:
duration = ChannelServiceStub.CHANNEL_TOKEN_DEFAULT_DURATION
expiration_sec = long(self._time_func() + duration * 60) + 1
raw_token = '-'.join([ChannelServiceStub.CHANNEL_TOKEN_IDENTIFIER,
str(random.randint(0, 2 ** 32)),
str(expiration_sec),
client_id])
token = '-'.join([_GenerateTokenHash(raw_token), raw_token])
self._log('Creating channel token %s with client id %s and duration %s',
token, request.application_key(), duration)
response.set_token(token)
@apiproxy_stub.Synchronized
def _Dynamic_SendChannelMessage(self, request, response):
"""Implementation of channel.send_message.
Queues a message to be retrieved by the client when it polls.
Args:
request: A SendMessageRequest.
response: A VoidProto.
"""
client_id = self.client_id_from_token(request.application_key())
if client_id is None:
client_id = request.application_key()
if not request.message():
raise apiproxy_errors.ApplicationError(
channel_service_pb.ChannelServiceError.BAD_MESSAGE)
if client_id in self._connected_channel_messages:
self._log('Sending a message (%s) to channel with key (%s)',
request.message(), client_id)
self._connected_channel_messages[client_id].append(request.message())
else:
self._log('SKIPPING message (%s) to channel with key (%s): '
'no clients connected',
request.message(), client_id)
def client_id_from_token(self, token):
"""Returns the client id from a given token.
Args:
token: A string representing an instance of a client connection to a
client id, returned by CreateChannel.
Returns:
A string representing the client id used to create this token,
or None if this token is incorrectly formed and doesn't map to a
client id.
"""
try:
return self.validate_token_and_extract_client_id(token)
except (InvalidTokenError, TokenTimedOutError):
return None
def validate_token_and_extract_client_id(self, token):
"""Ensures token is well-formed and hasn't expired, and extracts client_id.
Args:
token: a token returned by CreateChannel.
Returns:
A client_id, which is the value passed to CreateChannel.
Raises:
InvalidTokenError: The token is syntactically invalid.
TokenTimedOutError: The token expired or does not exist.
"""
pieces = token.split('-', 1)
if len(pieces) != 2 or _GenerateTokenHash(pieces[1]) != pieces[0]:
raise InvalidTokenError()
raw_token = pieces[1]
pieces = raw_token.split('-', 3)
if len(pieces) != 4:
raise InvalidTokenError()
constant_id, unused_random_id, expiration_sec, client_id = pieces
if (constant_id != ChannelServiceStub.CHANNEL_TOKEN_IDENTIFIER
or not expiration_sec.isdigit()):
raise InvalidTokenError()
if long(expiration_sec) <= self._time_func():
raise TokenTimedOutError()
return client_id
@apiproxy_stub.Synchronized
def get_channel_messages(self, token):
"""Returns the pending messages for a given channel.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Returns:
List of messages, or None if the channel doesn't exist. The messages are
strings.
"""
self._log('Received request for messages for channel: ' + token)
client_id = self.client_id_from_token(token)
if client_id in self._connected_channel_messages:
return self._connected_channel_messages[client_id]
return None
@apiproxy_stub.Synchronized
def has_channel_messages(self, token):
"""Checks to see if the given channel has any pending messages.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Returns:
True if the channel exists and has pending messages.
"""
client_id = self.client_id_from_token(token)
has_messages = (client_id in self._connected_channel_messages and
bool(self._connected_channel_messages[client_id]))
self._log('Checking for messages on channel (%s) (%s)',
token, has_messages)
return has_messages
@apiproxy_stub.Synchronized
def pop_first_message(self, token):
"""Returns and clears the first message from the message queue.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Returns:
The first message in the queue (a string), or None if no messages.
"""
if self.has_channel_messages(token):
client_id = self.client_id_from_token(token)
self._log('Popping first message of queue for channel (%s)', token)
return self._connected_channel_messages[client_id].pop(0)
return None
@apiproxy_stub.Synchronized
def clear_channel_messages(self, token):
"""Clears all messages from the channel.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
"""
client_id = self.client_id_from_token(token)
if client_id:
self._log('Clearing messages on channel (' + client_id + ')')
if client_id in self._connected_channel_messages:
self._connected_channel_messages[client_id] = []
else:
self._log('Ignoring clear messages for nonexistent token (' +
token + ')')
def add_connect_event(self, client_id):
"""Tell the application that the client has connected."""
self.request_data.get_dispatcher().add_async_request(
'POST', '/_ah/channel/connected/',
[('Content-Type', 'application/x-www-form-urlencoded')],
'from=%s' % client_id,
ChannelServiceStub.XMPP_PUBLIC_IP)
@apiproxy_stub.Synchronized
def disconnect_channel_event(self, client_id):
"""Removes the channel from the list of connected channels."""
self._log('Removing channel %s', client_id)
if client_id in self._connected_channel_messages:
del self._connected_channel_messages[client_id]
self.request_data.get_dispatcher().add_async_request(
'POST', '/_ah/channel/disconnected/',
[('Content-Type', 'application/x-www-form-urlencoded')],
'from=%s' % client_id,
ChannelServiceStub.XMPP_PUBLIC_IP)
def add_disconnect_event(self, client_id):
"""Add an event to notify the app if a client has disconnected.
Args:
client_id: A client ID used for a particular channel.
"""
timeout = self._time_func() + ChannelServiceStub.CHANNEL_TIMEOUT_SECONDS
self.request_data.get_dispatcher().add_event(
DefineDisconnectCallback(client_id), timeout, 'channel-disconnect',
client_id)
@apiproxy_stub.Synchronized
def connect_channel(self, token):
"""Marks the channel identified by the token (token) as connected.
If the channel has not yet been connected, this triggers a connection event
to let the application know that the channel has been connected to.
If the channel has already been connected, this refreshes the channel's
timeout so that it will not disconnect. This should be done at regular
intervals to avoid automatic disconnection.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Raises:
InvalidTokenError: The token is syntactically invalid.
TokenTimedOutError: The token expired or does not exist.
"""
client_id = self.validate_token_and_extract_client_id(token)
if client_id in self._connected_channel_messages:
timeout = self._time_func() + ChannelServiceStub.CHANNEL_TIMEOUT_SECONDS
self.request_data.get_dispatcher().update_event(
timeout, 'channel-disconnect', client_id)
return
self._connected_channel_messages[client_id] = []
self.add_connect_event(client_id)
self.add_disconnect_event(client_id)
@apiproxy_stub.Synchronized
def connect_and_pop_first_message(self, token):
"""Atomically performs a connect_channel and a pop_first_message.
This is designed to be called after the channel has already been connected,
so that it refreshes the channel's timeout, and retrieves a message, in a
single atomic operation.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Returns:
The first message in the queue (a string), or None if no messages.
Raises:
InvalidTokenError: The token is syntactically invalid.
TokenTimedOutError: The token expired or does not exist.
"""
self.connect_channel(token)
return self.pop_first_message(token)
| 30.886305 | 80 | 0.705597 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the Channel API, queues messages and writes them to a log."""
import hashlib
import logging
import random
import time
from google.appengine.api import apiproxy_stub
from google.appengine.api.channel import channel_service_pb
from google.appengine.runtime import apiproxy_errors
def _GenerateTokenHash(token):
"""Returns a MD5 hash of a token for integrity checking."""
return hashlib.md5(token).hexdigest()
class Error(Exception):
pass
class InvalidTokenError(Error):
"""A stub method was called with a syntactically invalid token."""
pass
class TokenTimedOutError(Error):
"""A stub method was called with a token that has expired or never existed."""
pass
class ChannelServiceStub(apiproxy_stub.APIProxyStub):
"""Python only channel service stub.
This stub does not use a browser channel to push messages to a client.
Instead it queues messages internally.
"""
THREADSAFE = True
CHANNEL_TIMEOUT_SECONDS = 2
XMPP_PUBLIC_IP = '0.1.0.10'
CHANNEL_TOKEN_DEFAULT_DURATION = 120
CHANNEL_TOKEN_IDENTIFIER = 'channel'
def __init__(self, log=logging.debug, service_name='channel',
time_func=time.time, request_data=None):
"""Initializer.
Args:
log: A logger, used for dependency injection.
service_name: Service name expected for all calls.
time_func: function to get the current time in seconds.
request_data: A request_info.RequestInfo instance. If None, a
request_info._LocalRequestInfo instance will be used.
"""
apiproxy_stub.APIProxyStub.__init__(self, service_name,
request_data=request_data)
self._log = log
self._time_func = time_func
self._connected_channel_messages = {}
def _Dynamic_CreateChannel(self, request, response):
"""Implementation of channel.create_channel.
Args:
request: A ChannelServiceRequest.
response: A ChannelServiceResponse
"""
client_id = request.application_key()
if not client_id:
raise apiproxy_errors.ApplicationError(
channel_service_pb.ChannelServiceError.INVALID_CHANNEL_KEY)
if request.has_duration_minutes():
duration = request.duration_minutes()
else:
duration = ChannelServiceStub.CHANNEL_TOKEN_DEFAULT_DURATION
expiration_sec = long(self._time_func() + duration * 60) + 1
raw_token = '-'.join([ChannelServiceStub.CHANNEL_TOKEN_IDENTIFIER,
str(random.randint(0, 2 ** 32)),
str(expiration_sec),
client_id])
token = '-'.join([_GenerateTokenHash(raw_token), raw_token])
self._log('Creating channel token %s with client id %s and duration %s',
token, request.application_key(), duration)
response.set_token(token)
@apiproxy_stub.Synchronized
def _Dynamic_SendChannelMessage(self, request, response):
"""Implementation of channel.send_message.
Queues a message to be retrieved by the client when it polls.
Args:
request: A SendMessageRequest.
response: A VoidProto.
"""
client_id = self.client_id_from_token(request.application_key())
if client_id is None:
client_id = request.application_key()
if not request.message():
raise apiproxy_errors.ApplicationError(
channel_service_pb.ChannelServiceError.BAD_MESSAGE)
if client_id in self._connected_channel_messages:
self._log('Sending a message (%s) to channel with key (%s)',
request.message(), client_id)
self._connected_channel_messages[client_id].append(request.message())
else:
self._log('SKIPPING message (%s) to channel with key (%s): '
'no clients connected',
request.message(), client_id)
def client_id_from_token(self, token):
"""Returns the client id from a given token.
Args:
token: A string representing an instance of a client connection to a
client id, returned by CreateChannel.
Returns:
A string representing the client id used to create this token,
or None if this token is incorrectly formed and doesn't map to a
client id.
"""
try:
return self.validate_token_and_extract_client_id(token)
except (InvalidTokenError, TokenTimedOutError):
return None
def validate_token_and_extract_client_id(self, token):
"""Ensures token is well-formed and hasn't expired, and extracts client_id.
Args:
token: a token returned by CreateChannel.
Returns:
A client_id, which is the value passed to CreateChannel.
Raises:
InvalidTokenError: The token is syntactically invalid.
TokenTimedOutError: The token expired or does not exist.
"""
pieces = token.split('-', 1)
if len(pieces) != 2 or _GenerateTokenHash(pieces[1]) != pieces[0]:
raise InvalidTokenError()
raw_token = pieces[1]
pieces = raw_token.split('-', 3)
if len(pieces) != 4:
raise InvalidTokenError()
constant_id, unused_random_id, expiration_sec, client_id = pieces
if (constant_id != ChannelServiceStub.CHANNEL_TOKEN_IDENTIFIER
or not expiration_sec.isdigit()):
raise InvalidTokenError()
if long(expiration_sec) <= self._time_func():
raise TokenTimedOutError()
return client_id
@apiproxy_stub.Synchronized
def get_channel_messages(self, token):
"""Returns the pending messages for a given channel.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Returns:
List of messages, or None if the channel doesn't exist. The messages are
strings.
"""
self._log('Received request for messages for channel: ' + token)
client_id = self.client_id_from_token(token)
if client_id in self._connected_channel_messages:
return self._connected_channel_messages[client_id]
return None
@apiproxy_stub.Synchronized
def has_channel_messages(self, token):
"""Checks to see if the given channel has any pending messages.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Returns:
True if the channel exists and has pending messages.
"""
client_id = self.client_id_from_token(token)
has_messages = (client_id in self._connected_channel_messages and
bool(self._connected_channel_messages[client_id]))
self._log('Checking for messages on channel (%s) (%s)',
token, has_messages)
return has_messages
@apiproxy_stub.Synchronized
def pop_first_message(self, token):
"""Returns and clears the first message from the message queue.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Returns:
The first message in the queue (a string), or None if no messages.
"""
if self.has_channel_messages(token):
client_id = self.client_id_from_token(token)
self._log('Popping first message of queue for channel (%s)', token)
return self._connected_channel_messages[client_id].pop(0)
return None
@apiproxy_stub.Synchronized
def clear_channel_messages(self, token):
"""Clears all messages from the channel.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
"""
client_id = self.client_id_from_token(token)
if client_id:
self._log('Clearing messages on channel (' + client_id + ')')
if client_id in self._connected_channel_messages:
self._connected_channel_messages[client_id] = []
else:
self._log('Ignoring clear messages for nonexistent token (' +
token + ')')
def add_connect_event(self, client_id):
"""Tell the application that the client has connected."""
self.request_data.get_dispatcher().add_async_request(
'POST', '/_ah/channel/connected/',
[('Content-Type', 'application/x-www-form-urlencoded')],
'from=%s' % client_id,
ChannelServiceStub.XMPP_PUBLIC_IP)
@apiproxy_stub.Synchronized
def disconnect_channel_event(self, client_id):
"""Removes the channel from the list of connected channels."""
self._log('Removing channel %s', client_id)
if client_id in self._connected_channel_messages:
del self._connected_channel_messages[client_id]
self.request_data.get_dispatcher().add_async_request(
'POST', '/_ah/channel/disconnected/',
[('Content-Type', 'application/x-www-form-urlencoded')],
'from=%s' % client_id,
ChannelServiceStub.XMPP_PUBLIC_IP)
def add_disconnect_event(self, client_id):
"""Add an event to notify the app if a client has disconnected.
Args:
client_id: A client ID used for a particular channel.
"""
timeout = self._time_func() + ChannelServiceStub.CHANNEL_TIMEOUT_SECONDS
def DefineDisconnectCallback(client_id):
return lambda: self.disconnect_channel_event(client_id)
self.request_data.get_dispatcher().add_event(
DefineDisconnectCallback(client_id), timeout, 'channel-disconnect',
client_id)
@apiproxy_stub.Synchronized
def connect_channel(self, token):
"""Marks the channel identified by the token (token) as connected.
If the channel has not yet been connected, this triggers a connection event
to let the application know that the channel has been connected to.
If the channel has already been connected, this refreshes the channel's
timeout so that it will not disconnect. This should be done at regular
intervals to avoid automatic disconnection.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Raises:
InvalidTokenError: The token is syntactically invalid.
TokenTimedOutError: The token expired or does not exist.
"""
client_id = self.validate_token_and_extract_client_id(token)
if client_id in self._connected_channel_messages:
timeout = self._time_func() + ChannelServiceStub.CHANNEL_TIMEOUT_SECONDS
self.request_data.get_dispatcher().update_event(
timeout, 'channel-disconnect', client_id)
return
self._connected_channel_messages[client_id] = []
self.add_connect_event(client_id)
self.add_disconnect_event(client_id)
@apiproxy_stub.Synchronized
def connect_and_pop_first_message(self, token):
"""Atomically performs a connect_channel and a pop_first_message.
This is designed to be called after the channel has already been connected,
so that it refreshes the channel's timeout, and retrieves a message, in a
single atomic operation.
Args:
token: A string representing the channel. Note that this is the token
returned by CreateChannel, not the client id.
Returns:
The first message in the queue (a string), or None if no messages.
Raises:
InvalidTokenError: The token is syntactically invalid.
TokenTimedOutError: The token expired or does not exist.
"""
self.connect_channel(token)
return self.pop_first_message(token)
| 81 | 9 | 50 |
db40152567bfed7b9803d41dc20e56428f9891c1 | 3,308 | py | Python | eax_stream.py | jkmnt/tiny_eax_mode | 370c4dac55fc45b50cc592fe87e7417abd25a705 | [
"Unlicense"
] | null | null | null | eax_stream.py | jkmnt/tiny_eax_mode | 370c4dac55fc45b50cc592fe87e7417abd25a705 | [
"Unlicense"
] | null | null | null | eax_stream.py | jkmnt/tiny_eax_mode | 370c4dac55fc45b50cc592fe87e7417abd25a705 | [
"Unlicense"
] | null | null | null | from eax import gf_double, xorstrings
# these classes simulate the way it could be done in C in online mode, byte by byte
# wrappers for the online stream api to test it
| 31.807692 | 84 | 0.570133 | from eax import gf_double, xorstrings
# these classes simulate the way it could be done in C in online mode, byte by byte
class OMAC_stream:
def __init__(self, cfg, key, tweak):
enc = cfg.ECB(key)
L = enc.run(bytes([0] * cfg.BLOCKSIZE))
L_int = int.from_bytes(L, cfg.ENDIAN, signed=False)
L2_int = gf_double(L_int, cfg.BLOCKSIZE)
L4_int = gf_double(L2_int, cfg.BLOCKSIZE)
self.cfg = cfg
self.L2 = L2_int.to_bytes(cfg.BLOCKSIZE, cfg.ENDIAN)
self.L4 = L4_int.to_bytes(cfg.BLOCKSIZE, cfg.ENDIAN)
self.enc = enc
self.readyblock = int.to_bytes(tweak, cfg.BLOCKSIZE, 'big')
self.mac = bytes(cfg.BLOCKSIZE)
self.buf = bytes([])
def process_byte(self, byte):
cfg = self.cfg
self.buf += bytes([byte])
if len(self.buf) == cfg.BLOCKSIZE: # full buf collected, ok. process prev
xorred = xorstrings(self.readyblock, self.mac)
self.mac = self.enc.run(xorred)
self.readyblock = self.buf
self.buf = bytes([])
def digest(self):
readyblock = self.readyblock
buf = self.buf
mac = self.mac
cfg = self.cfg
if not buf: # readyblock is last
readyblock = xorstrings(readyblock, self.L2)
xorred = xorstrings(readyblock, mac)
mac = self.enc.run(xorred)
if buf:
buf += bytes([0x80])
buf = buf.ljust((len(buf) + cfg.BLOCKSIZE - 1) & -cfg.BLOCKSIZE, b'\0')
xorred = xorstrings(xorstrings(buf, self.L4), mac)
mac = self.enc.run(xorred)
return mac
class CTR_stream:
def __init__(self, cfg, key, nonce):
enc = cfg.ECB(key)
nonce_int = int.from_bytes(nonce, cfg.ENDIAN, signed=False)
self.cfg = cfg
self.enc = enc
self.nonce = nonce_int
self.pos = 0
self.xorbuf = None
def process_byte(self, byte):
cfg = self.cfg
if self.pos % cfg.BLOCKSIZE == 0:
counter = (self.nonce + self.pos // cfg.BLOCKSIZE) & cfg.BLOCKSIZE_MASK
counter = counter.to_bytes(cfg.BLOCKSIZE, cfg.ENDIAN)
self.xorbuf = self.enc.run(counter)
pt = self.xorbuf[self.pos % cfg.BLOCKSIZE] ^ byte
self.pos += 1
return bytes([pt])
# wrappers for the online stream api to test it
def omac_stream(cfg, key, data, k):
s = OMAC_stream(cfg, key, k)
for b in data:
s.process_byte(b)
return s.digest()
def ctr_stream(cfg, key, nonce, data):
s = CTR_stream(cfg, key, nonce)
out = b''
for b in data:
out += s.process_byte(b)
return out
def eax_enc(cfg, key, nonce, header, pt):
N = omac_stream(cfg, key, nonce, 0)
H = omac_stream(cfg, key, header, 1)
ct = ctr_stream(cfg, key, N, pt)
C = omac_stream(cfg, key, ct, 2)
tag = xorstrings(xorstrings(N, C), H)
return (ct, tag)
def eax_dec(cfg, key, nonce, header, ct):
N = omac_stream(cfg, key, nonce, 0)
H = omac_stream(cfg, key, header, 1)
C = omac_stream(cfg, key, ct, 2)
tag_local = xorstrings(xorstrings(N, C), H)
pt = ctr_stream(cfg, key, N, ct)
return (pt, tag_local)
| 2,847 | -7 | 287 |
50244aa53e4bcb1d6cbdbfa6cf55283bec699087 | 817 | py | Python | setup.py | romenrg/body-mass-index | 462c9984a21cfc306c9466b20aafd184e6b1be37 | [
"MIT"
] | 2 | 2020-12-18T10:03:59.000Z | 2021-01-16T12:50:15.000Z | setup.py | romenrg/body-mass-index | 462c9984a21cfc306c9466b20aafd184e6b1be37 | [
"MIT"
] | null | null | null | setup.py | romenrg/body-mass-index | 462c9984a21cfc306c9466b20aafd184e6b1be37 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="body-mass-index",
version="1.0.1",
author="Romen Rodriguez-Gil",
author_email="contact@romenrg.com",
description="Utilities related to the Body Mass Index (BMI): Calculating it, calculating healthy weight, ranges, " +
"boundaries,...",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/romenrg/body-mass-index",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 32.68 | 120 | 0.662179 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="body-mass-index",
version="1.0.1",
author="Romen Rodriguez-Gil",
author_email="contact@romenrg.com",
description="Utilities related to the Body Mass Index (BMI): Calculating it, calculating healthy weight, ranges, " +
"boundaries,...",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/romenrg/body-mass-index",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 0 | 0 | 0 |
1cf19256d4d17d4dc98a553f94b4a0742aa4154b | 431 | py | Python | algotrade-bot-main/populate.py | ChoiceCoin/DeFi | 8ee334d43f3709dba9e09fb65cdf9d57797502d8 | [
"Apache-2.0"
] | 2 | 2022-03-23T00:19:04.000Z | 2022-03-24T19:10:49.000Z | algotrade-bot-main/populate.py | ChoiceCoin/DeFi | 8ee334d43f3709dba9e09fb65cdf9d57797502d8 | [
"Apache-2.0"
] | null | null | null | algotrade-bot-main/populate.py | ChoiceCoin/DeFi | 8ee334d43f3709dba9e09fb65cdf9d57797502d8 | [
"Apache-2.0"
] | null | null | null | from db.models import Trade, Asset
ass1 = Asset(name="Algo", asset_id="0", network="mainnet")
ass2 = Asset(name="Choice", asset_id="297995609", network="mainnet")
ass1.save()
ass2.save()
trade = Trade(
wallet_address="Replace With Address",
asset1=ass1,
asset2=ass2,
asset_in=ass2,
asset_in_amount=10,
slippage=0.5,
min_sell_price=0.003,
do_redeem=False,
network="mainnet"
)
trade.save() | 20.52381 | 68 | 0.672854 | from db.models import Trade, Asset
ass1 = Asset(name="Algo", asset_id="0", network="mainnet")
ass2 = Asset(name="Choice", asset_id="297995609", network="mainnet")
ass1.save()
ass2.save()
trade = Trade(
wallet_address="Replace With Address",
asset1=ass1,
asset2=ass2,
asset_in=ass2,
asset_in_amount=10,
slippage=0.5,
min_sell_price=0.003,
do_redeem=False,
network="mainnet"
)
trade.save() | 0 | 0 | 0 |
fbf51807ae76b0f1f34461dc9bf4c73186f2503c | 931 | py | Python | examples/featherwing_alphanum_simpletest.py | iraytrace/Adafruit_CircuitPython_FeatherWing | 2e398df58d4f0d679890f1af0167ae66e60a4c33 | [
"MIT"
] | null | null | null | examples/featherwing_alphanum_simpletest.py | iraytrace/Adafruit_CircuitPython_FeatherWing | 2e398df58d4f0d679890f1af0167ae66e60a4c33 | [
"MIT"
] | null | null | null | examples/featherwing_alphanum_simpletest.py | iraytrace/Adafruit_CircuitPython_FeatherWing | 2e398df58d4f0d679890f1af0167ae66e60a4c33 | [
"MIT"
] | null | null | null | """This example changes the fill, brightness, blink rates,
shows number and text printing, displays a counter
and then shows off the new marquee features."""
from time import sleep
from adafruit_featherwing import alphanum_featherwing
display = alphanum_featherwing.AlphaNumFeatherWing()
#Fill and empty all segments
for count in range(0, 3):
display.fill(True)
sleep(0.5)
display.fill(False)
sleep(0.5)
#Display a number and text
display.print(1234)
sleep(1)
display.print('Text')
#Change brightness
for brightness in range(0, 16):
display.brightness = brightness
sleep(0.1)
#Change blink rate
for blink_rate in range(3, 0, -1):
display.blink_rate = blink_rate
sleep(4)
display.blink_rate = 0
#Show a counter using decimals
count = 975.0
while count < 1025:
count += 1
display.print(count)
sleep(0.1)
#Show the Marquee
display.marquee('This is a really long message!!! ', 0.2)
| 22.166667 | 58 | 0.726101 | """This example changes the fill, brightness, blink rates,
shows number and text printing, displays a counter
and then shows off the new marquee features."""
from time import sleep
from adafruit_featherwing import alphanum_featherwing
display = alphanum_featherwing.AlphaNumFeatherWing()
#Fill and empty all segments
for count in range(0, 3):
display.fill(True)
sleep(0.5)
display.fill(False)
sleep(0.5)
#Display a number and text
display.print(1234)
sleep(1)
display.print('Text')
#Change brightness
for brightness in range(0, 16):
display.brightness = brightness
sleep(0.1)
#Change blink rate
for blink_rate in range(3, 0, -1):
display.blink_rate = blink_rate
sleep(4)
display.blink_rate = 0
#Show a counter using decimals
count = 975.0
while count < 1025:
count += 1
display.print(count)
sleep(0.1)
#Show the Marquee
display.marquee('This is a really long message!!! ', 0.2)
| 0 | 0 | 0 |
a935e3065f38ba3f1eebd53f899074b9cc088b4c | 4,134 | py | Python | src/panda_arm_motion_planning/scripts/end_to_end_learning.py | rajathkmanjunath/Motion-Planning-Network | e53ee8c35b8349e78c5141d42670061f00a198d4 | [
"MIT"
] | 3 | 2020-04-13T03:43:30.000Z | 2021-11-03T09:08:17.000Z | src/panda_arm_motion_planning/scripts/end_to_end_learning.py | rajathkmanjunath/Motion-Planning-Network | e53ee8c35b8349e78c5141d42670061f00a198d4 | [
"MIT"
] | null | null | null | src/panda_arm_motion_planning/scripts/end_to_end_learning.py | rajathkmanjunath/Motion-Planning-Network | e53ee8c35b8349e78c5141d42670061f00a198d4 | [
"MIT"
] | 4 | 2021-09-16T06:06:56.000Z | 2021-11-10T08:45:34.000Z | import argparse
import numpy as np
import os
import torch
from torch import nn
from torch.autograd import Variable
from torch.backends import cudnn
from torch.utils import data
from utils.mpnet import MPNet
from utils.plan_class import plan_dataset
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default='/scratch/rkm350/mpnet/dataset',
help='location of dataset directory')
parser.add_argument('--num_epochs', type=int, default=100, help='number of epochs')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate')
parser.add_argument('--num_files', type=int, default=10000, help='num of files')
parser.add_argument('--num_workers', type=int, default=6, help='number of sub processes for loading data')
parser.add_argument('--lam', type=float, default=0.001, help='lambda value for the CAE network')
parser.add_argument('--cuda', type=str, default='cuda', help='Cuda for processing the network')
args = parser.parse_args()
main(args)
| 35.637931 | 110 | 0.579584 | import argparse
import numpy as np
import os
import torch
from torch import nn
from torch.autograd import Variable
from torch.backends import cudnn
from torch.utils import data
from utils.mpnet import MPNet
from utils.plan_class import plan_dataset
def main(args):
cudnn.benchmark = True
# Parameters
train_params = {'batch_size': args.batch_size,
'shuffle': True,
'num_workers': args.num_workers}
test_params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
partition = {'train': [i + 1 for i in range(int(0.9 * args.num_files))],
'test': [i + 1 for i in range(int(0.9 * args.num_files), args.num_files)]}
point_cloud = np.load(os.path.join(args.path, 'point_cloud.npy'))
point_cloud = torch.from_numpy(point_cloud)
if (args.cuda == 'cuda'):
point_cloud = point_cloud.cuda().float()
training_set = plan_dataset(partition['train'], args.path)
train_loader = data.DataLoader(training_set, **train_params)
test_set = plan_dataset(partition['test'], args.path)
test_loader = data.DataLoader(test_set, **test_params)
mse = nn.MSELoss()
planner = MPNet(88920, 14, 7)
if (args.cuda == 'cuda'):
planner = planner.cuda()
parameters = planner.parameters()
optimizer = torch.optim.Adam(parameters, lr=args.learning_rate)
n_total_steps = len(train_loader)
for epoch in range(args.num_epochs):
for i, (states, plan) in enumerate(train_loader):
states = states.float()
plan = plan.float()
states = Variable(states)
ones = torch.ones(states.shape[0], 1)
if (args.cuda == 'cuda'):
states = states.cuda()
plan = plan.cuda()
ones = ones.cuda()
pc = point_cloud * ones
prediction = planner(pc, states)
loss = mse(plan, prediction)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ((i + 1) % 1 == 0):
print('epoch {0}/{1}, step {2}/{3}, loss = {4:4f}'.format(epoch + 1, args.num_epochs, i + 1,
n_total_steps,
loss.item()))
torch.save(planner.state_dict(), os.path.join(os.path.curdir, 'end_to_end_weights.pt'))
with torch.no_grad():
n_correct = 0
n_samples = 0
for (states, plan) in test_loader:
states = states.float()
plan = plan.float()
ones = torch.ones(states.shape[0], 1)
if (args.cuda == 'cuda'):
states = states.cuda()
plan = plan.cuda()
ones = ones.cuda()
pc = point_cloud * ones
prediction = planner(pc, states)
print(prediction[0], plan[0])
n_samples += plan.shape[0]
n_correct = (abs(prediction - plan) <= 0.01).sum().item()
acc = 100.0 * n_correct / n_samples
print('accuracy = {0}'.format(acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default='/scratch/rkm350/mpnet/dataset',
help='location of dataset directory')
parser.add_argument('--num_epochs', type=int, default=100, help='number of epochs')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate')
parser.add_argument('--num_files', type=int, default=10000, help='num of files')
parser.add_argument('--num_workers', type=int, default=6, help='number of sub processes for loading data')
parser.add_argument('--lam', type=float, default=0.001, help='lambda value for the CAE network')
parser.add_argument('--cuda', type=str, default='cuda', help='Cuda for processing the network')
args = parser.parse_args()
main(args)
| 2,940 | 0 | 23 |
8c5b050b28a617f800cf70829f393e5c2794b87e | 23,318 | py | Python | homepage/views.py | tommyfuu/BioCalculator | 9501e7ce1420eb47a591ecf3c855e9e1b0330c60 | [
"MIT"
] | 3 | 2021-02-06T18:58:25.000Z | 2021-06-25T16:02:04.000Z | homepage/views.py | tommyfuu/BioCalculator | 9501e7ce1420eb47a591ecf3c855e9e1b0330c60 | [
"MIT"
] | 9 | 2021-06-15T23:38:31.000Z | 2021-10-08T21:58:08.000Z | homepage/views.py | tommyfuu/BioCalculator | 9501e7ce1420eb47a591ecf3c855e9e1b0330c60 | [
"MIT"
] | 1 | 2021-02-27T19:07:14.000Z | 2021-02-27T19:07:14.000Z | from django.shortcuts import render, HttpResponse, HttpResponseRedirect
# importing calculators
from .calculatorDilution import DilutionForm
from .calculatorDilution import *
from .calculatorPCR import PCRForm
from .calculatorPCR import *
from .calculatorUnitConvert import ConversionForm
from .calculatorUnitConvert import *
from .calculatorCuttingReaction import CuttingEdgeForm
from .calculatorCuttingReaction import *
from .opentrons import RandomNumGenerator
from .opentrons import *
# from .models import Person
import time
# Create your views here.
# CALCULATORS
# DILUTION CALCULATOR
# GLOBAL VARIABLES
INPUTVOL = None
INPUTCONC = None
INPUTSOLUTE = None
FINALVOL = None
FINALCONC = None
ADDEDSOLUTE = None
ADDEDWATER = None
MOLARMASS = None
# PCR CALCULATOR
# GLOBAL VARIABLES
RESULTtotalVol = None
RESULTwaterVol = None
RESULTPCRBufferVol = None
RESULTPCRBufferInitConc = None
RESULTPCRBufferFinalConc = None
RESULTpolymeraseVol = None
RESULTpolymeraseConc = None
RESULTdNTPVol = None
RESULTdNTPConc = None
RESULTMgCl2Vol = None
RESULTMgCl2Conc = None
RESULTforwardPrimerVol = None
RESULTforwardPrimerConc = None
RESULTbackwardPrimerVol = None
RESULTbackwardPrimerConc = None
RESULTtemplateDNAVol = None
RESULTtemplateDNAConc = None
RESULTDMSOOptionalVol = None
RESULTDMSOOptionalConc = None
ERRORMSG = ""
# UNIT CONVERSION CALCULATOR
# GLOBAL VARIABLES
INPUTVALUE = None
INPUTUNIT = None
OUTPUTVALUE = None
OUTPUTUNIT = None
MOLARMASS = None
ERROR = False
######################################## CUTTING REACTION CALCULATOR #######################################
# GLOBAL VARIABLES
TOTALVOL = None
TEMPLATEDNAVOL = None
TEMPLATEDNAINITCONC = None
TEMPLATEDNAFINALMASS = None
BUFFERVOL = None
BUFFERCONC = None
RESTRICTIONENZYMEVOL = None
RESTRICTIONENZYMECONC = None
WATERVOL = None
ERRORMSG = ""
# totalVol = cuttingform.cleaned_data['TOTALVOL']
# templateDNAVol = cuttingform.cleaned_data['templateDNAVol']
# templateDNAInitConc = cuttingform.cleaned_data['templateDNAInitConc']
# templateDNAFinalMass = cuttingform.cleaned_data['templateDNAFinalMass']
# bufferVol = cuttingform.cleaned_data['bufferVol']
# bufferConc = cuttingform.cleaned_data['bufferConc']
# restrictionEnzymeVol = cuttingform.cleaned_data['restrictionEnzymeVol']
# restrictionEnzymeConc = cuttingform.cleaned_data['restrictionEnzymeConc']
# TODO: Define global variables --> Work on the results page
######################################## AR OPENTRONS CALCULATOR #######################################
# global variable
FLOOR = None
CEILING = None
OPENTRONS_RESULT = None
# ####### TRIAL DEPENDENT DROPDOWN AAA AAAA #########
# from django.views.generic import ListView, CreateView, UpdateView
# from django.urls import reverse_lazy
# class PersonListView(ListView):
# model = Person
# context_object_name = 'people'
# class PersonCreateView(CreateView):
# model = Person
# form_class = PersonForm
# success_url = reverse_lazy('person_changelist')
# class PersonUpdateView(UpdateView):
# model = Person
# form_class = PersonForm
# success_url = reverse_lazy('person_changelist')
| 38.039152 | 203 | 0.596063 | from django.shortcuts import render, HttpResponse, HttpResponseRedirect
# importing calculators
from .calculatorDilution import DilutionForm
from .calculatorDilution import *
from .calculatorPCR import PCRForm
from .calculatorPCR import *
from .calculatorUnitConvert import ConversionForm
from .calculatorUnitConvert import *
from .calculatorCuttingReaction import CuttingEdgeForm
from .calculatorCuttingReaction import *
from .opentrons import RandomNumGenerator
from .opentrons import *
# from .models import Person
import time
# Create your views here.
def home(request):
# return HttpResponse("Home page!")
return render(request, "home.html", {})
def calculators(request):
# return HttpResponse("Calculators page!")
return render(request, "calculators.html", {})
def faq(request):
# return HttpResponse("FAQ page!")
return render(request, "faq.html", {})
def about(request):
# return HttpResponse("About page!")
return render(request, "about.html", {})
# CALCULATORS
# DILUTION CALCULATOR
# GLOBAL VARIABLES
INPUTVOL = None
INPUTCONC = None
INPUTSOLUTE = None
FINALVOL = None
FINALCONC = None
ADDEDSOLUTE = None
ADDEDWATER = None
MOLARMASS = None
def dilution_input_view(request):
# if this is a POST request we need to process the form data
if request.method == "POST":
# create a form instance and populate it with data from the request:
form = DilutionForm(request.POST)
# check whether it's valid:
if form.is_valid():
print("Extracting values")
inputVol = form.cleaned_data["INPUTVOL"]
inputConc = form.cleaned_data["INPUTCONC"]
inputSolute = form.cleaned_data["INPUTSOLUTE"]
finalVol = form.cleaned_data["FINALVOL"]
finalConc = form.cleaned_data["FINALCONC"]
inputSoluteUnit = form.cleaned_data["INPUTSOLUTEUNIT"]
molarMass = form.cleaned_data["MOLARMASS"]
inputVolUnit = form.cleaned_data["INPUTVOLUNIT"]
inputConcUnit = form.cleaned_data["INPUTCONCUNIT"]
finalVolUnit = form.cleaned_data["FINALVOLUNIT"]
finalConcUnit = form.cleaned_data["FINALCONCUNIT"]
outputVolUnit = form.cleaned_data["OUTPUTVOLUNIT"]
outputConcUnit = form.cleaned_data["OUTPUTCONCUNIT"]
outputSoluteUnit = form.cleaned_data["OUTPUTSOLUTEUNIT"]
# addedSoluteVol = form.cleaned_data['ADDEDSOLUTE']
# waterVol = form.cleaned_data['ADDEDWATER']
addedSoluteVol = None
waterVol = None
MOLARMASS = molarMass
# INPUTVOL, INPUTCONC, INPUTSOLUTE, FINALVOL, FINALCONC, ADDEDSOLUTE, ADDEDWATER, ERROR = changeConcentrationTable(
# inputVol, inputConc, finalVol, finalConc, inputSolute, addedSoluteVol, waterVol)
(
INPUTVOL,
INPUTCONC,
INPUTSOLUTE,
FINALVOL,
FINALCONC,
ADDEDSOLUTE,
ADDEDWATER,
OUTPUTVOLUNIT,
OUTPUTCONCUNIT,
OUTPUTSOLUTEUNIT,
ERROR,
) = changeConcentrationTable(
inputVol,
inputVolUnit,
inputConc,
inputConcUnit,
inputSolute,
inputSoluteUnit,
molarMass,
finalVol,
finalVolUnit,
finalConc,
finalConcUnit,
outputVolUnit,
outputConcUnit,
outputSoluteUnit,
addedSoluteVol,
waterVol,
)
print("Here are the calculated input values for your desired output:")
if ERROR == False:
print("GOTORESULTPAGE")
return render(
request,
"dilutionCalcResult.html",
{
"inputVol": INPUTVOL,
"inputConc": INPUTCONC,
"inputSolute": INPUTSOLUTE,
"finalVol": FINALVOL,
"finalConc": FINALCONC,
"addedSolute": ADDEDSOLUTE,
"addedWater": ADDEDWATER,
"outputVolUnit": OUTPUTVOLUNIT,
"outputConcUnit": OUTPUTCONCUNIT,
"outputSoluteUnit": OUTPUTSOLUTEUNIT,
"molarMass": MOLARMASS
},
)
elif ERROR == True:
# not enough inputs
return render(request, "dilutionCalcError.html", {})
else:
if ERROR == "solute":
info = "Error: Input solution concentration not the same as the concentration value calculated with inputSolute and inputVol."
if ERROR == "unachievable":
info = "Error: Computation unachievable. The amount of solute in the final solution is smaller than the amount of solute in the input solution."
if ERROR == "inputVol==0":
info = "Error: input volume = 0, invalid input solution."
if ERROR == "zeroMolarMass":
info = "Error: zero molar mass. You should either NOT input molar mass if your calculation does not involve molar conversion, or you should enter a numerical molar mass value."
if ERROR == "displayUnit":
info = "Error: inputted input liquid concentration but not molar mass. This way the amount of solute cannot be displayed in mass, which is problematic for our current implementation."
return render(request, "dilutionCalcSolute.html", {"error": info})
# if a GET (or any other method) we'll create a blank form
else:
form = DilutionForm()
return render(request, "dilutionCalc.html", {"form": form})
def dilution_result_view(request):
# return HttpResponse("dilution calculator result page")
return render(
request,
"dilutionCalcResult.html",
{
"inputVol": INPUTVOL,
"inputConc": INPUTCONC,
"inputSolute": INPUTSOLUTE,
"finalVol": FINALVOL,
"finalConc": FINALCONC,
"addedSolute": ADDEDSOLUTE,
"addedWater": ADDEDWATER,
},
)
def dilution_error_view(request):
# return HttpResponse("dilution calculator error page")
return render(request, "dilutionCalcError.html", {"errorMsg": ERRORMSG})
# PCR CALCULATOR
# GLOBAL VARIABLES
RESULTtotalVol = None
RESULTwaterVol = None
RESULTPCRBufferVol = None
RESULTPCRBufferInitConc = None
RESULTPCRBufferFinalConc = None
RESULTpolymeraseVol = None
RESULTpolymeraseConc = None
RESULTdNTPVol = None
RESULTdNTPConc = None
RESULTMgCl2Vol = None
RESULTMgCl2Conc = None
RESULTforwardPrimerVol = None
RESULTforwardPrimerConc = None
RESULTbackwardPrimerVol = None
RESULTbackwardPrimerConc = None
RESULTtemplateDNAVol = None
RESULTtemplateDNAConc = None
RESULTDMSOOptionalVol = None
RESULTDMSOOptionalConc = None
ERRORMSG = ""
def pcr_input_view(request):
# if this is a POST request we need to process the form data
if request.method == "POST":
# create a form instance and populate it with data from the request:
pcrform = PCRForm(request.POST)
# check whether it's valid:
if pcrform.is_valid():
totalVol = pcrform.cleaned_data["totalVol"]
waterVol = pcrform.cleaned_data["waterVol"]
PCRBufferVol = pcrform.cleaned_data["PCRBufferVol"]
PCRBufferInitConc = pcrform.cleaned_data["PCRBufferInitConc"]
PCRBufferFinalConc = pcrform.cleaned_data["PCRBufferFinalConc"]
polymeraseVol = pcrform.cleaned_data["polymeraseVol"]
polymeraseConc = pcrform.cleaned_data["polymeraseConc"]
dNTPVol = pcrform.cleaned_data["dNTPVol"]
dNTPConc = pcrform.cleaned_data["dNTPConc"]
MgCl2Vol = pcrform.cleaned_data["MgCl2Vol"]
MgCl2Conc = pcrform.cleaned_data["MgCl2Conc"]
forwardPrimerVol = pcrform.cleaned_data["forwardPrimerVol"]
forwardPrimerConc = pcrform.cleaned_data["forwardPrimerConc"]
backwardPrimerVol = pcrform.cleaned_data["backwardPrimerVol"]
backwardPrimerConc = pcrform.cleaned_data["backwardPrimerConc"]
templateDNAVol = pcrform.cleaned_data["templateDNAVol"]
templateDNAConc = pcrform.cleaned_data["templateDNAConc"]
DMSOOptionalVol = pcrform.cleaned_data["DMSOOptionalVol"]
DMSOOptionalConc = pcrform.cleaned_data["DMSOOptionalConc"]
results = getVolumesPCR(
totalVol,
waterVol,
PCRBufferVol,
PCRBufferInitConc,
PCRBufferFinalConc,
polymeraseVol,
polymeraseConc,
dNTPVol,
dNTPConc,
MgCl2Vol,
MgCl2Conc,
forwardPrimerVol,
forwardPrimerConc,
backwardPrimerVol,
backwardPrimerConc,
templateDNAVol,
templateDNAConc,
DMSOOptionalVol,
DMSOOptionalConc,
)
(
RESULTtotalVol,
RESULTwaterVol,
RESULTPCRBufferVol,
RESULTPCRBufferInitConc,
RESULTPCRBufferFinalConc,
RESULTpolymeraseVol,
RESULTpolymeraseConc,
RESULTdNTPVol,
RESULTdNTPConc,
RESULTMgCl2Vol,
RESULTMgCl2Conc,
RESULTforwardPrimerVol,
RESULTforwardPrimerConc,
RESULTbackwardPrimerVol,
RESULTbackwardPrimerConc,
RESULTtemplateDNAVol,
RESULTtemplateDNAConc,
RESULTDMSOOptionalVol,
RESULTDMSOOptionalConc,
ERROR,
) = results
# ERROR = False
if ERROR == False:
return render(
request,
"calcPCRResult.html",
{
"RESULTtotalVol": RESULTtotalVol,
"RESULTwaterVol": RESULTwaterVol,
"RESULTPCRBufferVol": RESULTPCRBufferVol,
"RESULTPCRBufferInitConc": RESULTPCRBufferInitConc,
"RESULTPCRBufferFinalConc": RESULTPCRBufferFinalConc,
"RESULTpolymeraseVol": RESULTpolymeraseVol,
"RESULTpolymeraseConc": RESULTpolymeraseConc,
"RESULTdNTPVol": RESULTdNTPVol,
"RESULTdNTPConc": RESULTdNTPConc,
"RESULTMgCl2Vol": RESULTMgCl2Vol,
"RESULTMgCl2Conc": RESULTMgCl2Conc,
"RESULTforwardPrimerVol": RESULTforwardPrimerVol,
"RESULTforwardPrimerConc": RESULTforwardPrimerConc,
"RESULTbackwardPrimerVol": RESULTbackwardPrimerVol,
"RESULTbackwardPrimerConc": RESULTbackwardPrimerConc,
"RESULTtemplateDNAVol": RESULTtemplateDNAVol,
"RESULTtemplateDNAConc": RESULTtemplateDNAConc,
"RESULTDMSOOptionalVol": RESULTDMSOOptionalVol,
"RESULTDMSOOptionalConc": RESULTDMSOOptionalConc,
},
)
else:
ERRORMSG = "There's some error"
# return render(request, 'calcPCR.html', {'pcrform': pcrform})
return render(request, "calcPCRError.html", {"errorMsg": ERRORMSG})
else:
pcrform = PCRForm()
return render(request, "calcPCR.html", {"pcrform": pcrform})
def pcr_result_view(request):
# return HttpResponse("PCR result page!")
return render(
request,
"calcPCRResult.html",
{
"RESULTtotalVol": RESULTtotalVol,
"RESULTwaterVol": RESULTwaterVol,
"RESULTPCRBufferVol": RESULTPCRBufferVol,
"RESULTPCRBufferInitConc": RESULTPCRBufferInitConc,
"RESULTPCRBufferFinalConc": RESULTPCRBufferFinalConc,
"RESULTpolymeraseVol": RESULTpolymeraseVol,
"RESULTpolymeraseConc": RESULTpolymeraseConc,
"RESULTdNTPVol": RESULTdNTPVol,
"RESULTdNTPConc": RESULTdNTPConc,
"RESULTMgCl2Vol": RESULTMgCl2Vol,
"RESULTMgCl2Conc": RESULTMgCl2Conc,
"RESULTforwardPrimerVol": RESULTforwardPrimerVol,
"RESULTforwardPrimerConc": RESULTforwardPrimerConc,
"RESULTbackwardPrimerVol": RESULTbackwardPrimerVol,
"RESULTbackwardPrimerConc": RESULTbackwardPrimerConc,
"RESULTtemplateDNAVol": RESULTtemplateDNAVol,
"RESULTtemplateDNAConc": RESULTtemplateDNAConc,
"RESULTDMSOOptionalVol": RESULTDMSOOptionalVol,
"RESULTDMSOOptionalConc": RESULTDMSOOptionalConc,
},
)
def pcr_error_view(request):
# return HttpResponse("PCR error page!")
return render(request, "calcPCRError.html", {"errorMsg": ERRORMSG})
# UNIT CONVERSION CALCULATOR
# GLOBAL VARIABLES
INPUTVALUE = None
INPUTUNIT = None
OUTPUTVALUE = None
OUTPUTUNIT = None
MOLARMASS = None
ERROR = False
def unit_convert_input_view(request):
# if this is a POST request we need to process the form data
if request.method == "POST":
# create a form instance and populate it with data from the request:
conversionform = ConversionForm(request.POST)
# check whether it's valid:
if conversionform.is_valid():
inputValue = conversionform.cleaned_data["INPUTVALUE"]
inputUnit = conversionform.cleaned_data["INPUTUNIT"]
outputValue = conversionform.cleaned_data["OUTPUTVALUE"]
outputUnit = conversionform.cleaned_data["OUTPUTUNIT"]
molarMass = conversionform.cleaned_data["MOLARMASS"]
results = unitTable(
inputValue, inputUnit, outputValue, outputUnit, molarMass
)
print("Here is conversion value for your input:")
INPUTVALUE, INPUTUNIT, OUTPUTVALUE, OUTPUTUNIT, MOLARMASS, ERROR = results
if ERROR == "":
return render(
request,
"calcUnitConvertResult.html",
{
"inputValue": INPUTVALUE,
"inputUnit": INPUTUNIT,
"outputValue": OUTPUTVALUE,
"outputUnit": OUTPUTUNIT,
"molarMass": MOLARMASS,
},
)
else:
return render(request, "calcUnitConvertError.html", {"errorMsg": ERROR})
else:
return render(
request, "calcUnitConvertError.html", {"conversionform": conversionform}
)
else:
conversionform = ConversionForm()
return render(request, "calcUnitConvert.html", {"conversionform": conversionform})
def unit_convert_result_view(request):
# return HttpResponse("unit conversion result page!")
return render(
request,
"calcUnitConvertResult.html",
{
"inputValue": INPUTVALUE,
"inputUnit": INPUTUNIT,
"outputValue": OUTPUTVALUE,
"outputUnit": OUTPUTUNIT,
"molarMass": MOLARMASS,
},
)
def unit_convert_error_view(request):
# return HttpResponse("unit conversion error page!")
return render(request, "calcUnitConvertError.html", {"errorMsg": ERRORMSG})
######################################## CUTTING REACTION CALCULATOR #######################################
# GLOBAL VARIABLES
TOTALVOL = None
TEMPLATEDNAVOL = None
TEMPLATEDNAINITCONC = None
TEMPLATEDNAFINALMASS = None
BUFFERVOL = None
BUFFERCONC = None
RESTRICTIONENZYMEVOL = None
RESTRICTIONENZYMECONC = None
WATERVOL = None
ERRORMSG = ""
# totalVol = cuttingform.cleaned_data['TOTALVOL']
# templateDNAVol = cuttingform.cleaned_data['templateDNAVol']
# templateDNAInitConc = cuttingform.cleaned_data['templateDNAInitConc']
# templateDNAFinalMass = cuttingform.cleaned_data['templateDNAFinalMass']
# bufferVol = cuttingform.cleaned_data['bufferVol']
# bufferConc = cuttingform.cleaned_data['bufferConc']
# restrictionEnzymeVol = cuttingform.cleaned_data['restrictionEnzymeVol']
# restrictionEnzymeConc = cuttingform.cleaned_data['restrictionEnzymeConc']
def cutting_reaction_input_view(request):
if request.method == "POST":
# create a form instance and populate it with data from the request:
cuttingform = CuttingEdgeForm(request.POST)
# check whether it's valid:
if cuttingform.is_valid():
totalVol = cuttingform.cleaned_data["totalVol"]
templateDNAVol = cuttingform.cleaned_data["templateDNAVol"]
templateDNAInitConc = cuttingform.cleaned_data["templateDNAInitConc"]
templateDNAFinalMass = cuttingform.cleaned_data["templateDNAFinalMass"]
bufferVol = cuttingform.cleaned_data["bufferVol"]
bufferInitConc = cuttingform.cleaned_data["bufferInitConc"]
bufferFinalConc = cuttingform.cleaned_data["bufferFinalConc"]
restrictionEnzymeVol = cuttingform.cleaned_data["restrictionEnzymeVol"]
restrictionEnzymeInitConc = cuttingform.cleaned_data[
"restrictionEnzymeInitConc"
]
restrictionEnzymeFinalConc = cuttingform.cleaned_data[
"restrictionEnzymeFinalConc"
]
# call python functions from your py file
results = getVolumesCuttingReaction(
totalVol,
templateDNAVol,
templateDNAInitConc,
templateDNAFinalMass,
bufferVol,
bufferInitConc,
bufferFinalConc,
restrictionEnzymeVol,
restrictionEnzymeInitConc,
restrictionEnzymeFinalConc,
)
# parsing your results
(
totalVol,
templateDNAVol,
templateDNAInitConc,
templateDNAFinalMass,
bufferVol,
bufferInitConc,
bufferFinalConc,
restrictionEnzymeVol,
restrictionEnzymeInitConc,
restrictionEnzymeFinalConc,
waterVol,
ERROR,
) = results
# feed that into the result/error
if ERROR == False:
return render(
request,
"cuttingReactionCalcResult.html",
{
"totalVol": totalVol,
"templateDNAVol": templateDNAVol,
"templateDNAInitConc": templateDNAInitConc,
"templateDNAFinalMass": templateDNAFinalMass,
"bufferVol": bufferVol,
"bufferInitConc": bufferInitConc,
"bufferFinalConc": bufferFinalConc,
"restrictionEnzymeVol": restrictionEnzymeVol,
"restrictionEnzymeInitConc": restrictionEnzymeInitConc,
"restrictionEnzymeFinalConc": restrictionEnzymeFinalConc,
"waterVol": waterVol,
},
)
# if ERROR == False:
# return render(request, 'calcUnitConvertResult.html', {"inputValue": INPUTVALUE, "inputUnit": INPUTUNIT, "outputValue": OUTPUTVALUE, "outputUnit": OUTPUTUNIT, "molarMass": MOLARMASS})
# else:
# ERRORMSG = "There's some error"
# return render(request, 'calcUnitConvertError.html', {'errorMsg': ERRORMSG})
# else:
# return render(request, 'cuttingReactionCalcError.html', {'cuttingform': cuttingform})
else:
cuttingform = CuttingEdgeForm()
return render(request, "cuttingReactionCalc.html", {"cuttingform": cuttingform})
# TODO: Define global variables --> Work on the results page
def cutting_reaction_result_view(request):
# return HttpResponse("Contact page!")
return render(
request,
"cuttingReactionCalcResult.html",
{
"totalVol": TOTALVOL,
"templateDNAVol": TEMPLATEDNAVOL,
"templateDNAInitConc": TEMPLATEDNAINITCONC,
"templateDNAFinalMass": TEMPLATEDNAFINALMASS,
"bufferVol": BUFFERVOL,
"bufferConc": BUFFERCONC,
"restrictionEnzymeVol": RESTRICTIONENZYMEVOL,
"restrictionEnzymeConc": RESTRICTIONENZYMECONC,
"waterVol": WATERVOL,
},
)
######################################## AR OPENTRONS CALCULATOR #######################################
# global variable
FLOOR = None
CEILING = None
OPENTRONS_RESULT = None
def opentrons_view(request):
print("request method:", request.method)
# if this is a POST request we need to process the form data
# request.method="POST"
print("request.method", request.method)
if request.method == "POST":
print("helloooooo")
# create a form instance and populate it with data from the request:
randomForm = RandomNumGenerator(request.POST)
# check whether it's valid:
if randomForm.is_valid():
print("i want dessert")
FLOOR = randomForm.cleaned_data["floor"]
CEILING = randomForm.cleaned_data["ceiling"]
# call python functions from your py file
OPENTRONS_RESULT = randomNumGenerator(FLOOR, CEILING)
return render(
request,
"opentronsResult.html",
{"floor": FLOOR, "ceiling": CEILING, "result": OPENTRONS_RESULT},
)
# else clause should be 'else' compared to "if request.method == 'POST':"
else:
randomForm = RandomNumGenerator()
# return HttpResponse("AR opentrons page")
return render(request, "opentrons.html", {"randomForm": randomForm})
def opentrons_result_view(request):
# return HTTP response object
return render(
request,
"opentronsResult.html",
{"floor": FLOOR, "ceiling": CEILING, "result": OPENTRONS_RESULT},
)
# ####### TRIAL DEPENDENT DROPDOWN AAA AAAA #########
# from django.views.generic import ListView, CreateView, UpdateView
# from django.urls import reverse_lazy
# class PersonListView(ListView):
# model = Person
# context_object_name = 'people'
# class PersonCreateView(CreateView):
# model = Person
# form_class = PersonForm
# success_url = reverse_lazy('person_changelist')
# class PersonUpdateView(UpdateView):
# model = Person
# form_class = PersonForm
# success_url = reverse_lazy('person_changelist')
| 19,702 | 0 | 390 |
d3c44721938c2e001d9a0ea64b9e887be6780370 | 1,293 | py | Python | scrape_tvz.py | awordforthat/rhymes | b7d47b48a9b641e4736ed04058a183afc0a83b04 | [
"MIT"
] | null | null | null | scrape_tvz.py | awordforthat/rhymes | b7d47b48a9b641e4736ed04058a183afc0a83b04 | [
"MIT"
] | null | null | null | scrape_tvz.py | awordforthat/rhymes | b7d47b48a9b641e4736ed04058a183afc0a83b04 | [
"MIT"
] | 1 | 2021-02-16T03:06:38.000Z | 2021-02-16T03:06:38.000Z | # scrapes Townes van Zandt lyrics
# sample code so I don't have to remember all of this stuff
# the next time I want to source some verses
from bs4 import BeautifulSoup as soup
import requests
import string
punctuation_trans_table = str.maketrans("", "", string.punctuation)
base_url = "http://ippc2.orst.edu/coopl/lyrics/"
index = requests.get(base_url + "albums.html")
parsed_index = soup(index.text)
all_links = parsed_index.find_all("a") # get all <a> tags
links = [l for l in all_links if l.text] # filter out image links
def to_filename(s, path="texts/townes_van_zandt/"):
'''Quick and dirty snake-casing'''
s = s.replace("&", "and") # special case, "Poncho & Lefty"
s = strip_punctuation(s)
s = s.lower()
s = s.replace(" ", "_")
s = path + s + ".txt"
return s
| 28.108696 | 67 | 0.657386 | # scrapes Townes van Zandt lyrics
# sample code so I don't have to remember all of this stuff
# the next time I want to source some verses
from bs4 import BeautifulSoup as soup
import requests
import string
punctuation_trans_table = str.maketrans("", "", string.punctuation)
def strip_punctuation(s):
return s.translate(punctuation_trans_table)
base_url = "http://ippc2.orst.edu/coopl/lyrics/"
index = requests.get(base_url + "albums.html")
parsed_index = soup(index.text)
all_links = parsed_index.find_all("a") # get all <a> tags
links = [l for l in all_links if l.text] # filter out image links
def to_filename(s, path="texts/townes_van_zandt/"):
'''Quick and dirty snake-casing'''
s = s.replace("&", "and") # special case, "Poncho & Lefty"
s = strip_punctuation(s)
s = s.lower()
s = s.replace(" ", "_")
s = path + s + ".txt"
return s
def process_link(link):
title = link.text
f = open(to_filename(title), "w")
remote_file = link.get("href")
song_file = requests.get(base_url + remote_file)
verses = [l for l in soup(song_file.text).find_all("font")
if l.get("size")]
for verse in verses:
if verse.text:
f.writelines("\n".join(verse.stripped_strings))
f.write("\n\n")
| 433 | 0 | 46 |
ad6a3927e81b1954002958e1e12bc5b7e6b8edec | 592 | py | Python | charity/management/commands/import_charities.py | A-jha383/Charity_backend | 2f985dac9de41af80b593210e74bd1890022a435 | [
"MIT"
] | 1 | 2021-06-10T03:36:22.000Z | 2021-06-10T03:36:22.000Z | charity/management/commands/import_charities.py | A-jha383/Charity_backend | 2f985dac9de41af80b593210e74bd1890022a435 | [
"MIT"
] | null | null | null | charity/management/commands/import_charities.py | A-jha383/Charity_backend | 2f985dac9de41af80b593210e74bd1890022a435 | [
"MIT"
] | null | null | null | from django.core import management
from django.core.management.base import BaseCommand
| 29.6 | 89 | 0.594595 | from django.core import management
from django.core.management.base import BaseCommand
class Command(BaseCommand):
scrapers = [
"ccew",
"ccni",
# "oscr",
]
def handle(self, *args, **options):
for scraper in self.scrapers:
try:
management.call_command("import_{}".format(scraper))
except Exception:
self.stdout.write(self.style.ERROR("Command {} failed".format(scraper)))
management.call_command("update_orgids")
management.call_command("update_geodata")
| 363 | 114 | 24 |
a6bf6944f817db921b3a6da6d9784ed05684654a | 301 | py | Python | examples/ssids.py | AndersBlomdell/python-networkmanager | 5842142a3777acdeab1740623b07e7152eba6706 | [
"Zlib"
] | 128 | 2015-01-13T12:42:31.000Z | 2022-02-19T11:21:53.000Z | examples/ssids.py | smuething/python-networkmanager | d3018f4e24abf929f322c26e90afe4e364c5fd5b | [
"Zlib"
] | 75 | 2015-01-06T15:47:56.000Z | 2022-03-15T08:43:04.000Z | examples/ssids.py | smuething/python-networkmanager | d3018f4e24abf929f322c26e90afe4e364c5fd5b | [
"Zlib"
] | 85 | 2015-01-22T08:59:33.000Z | 2022-03-23T17:05:43.000Z | """
Display all visible SSIDs
"""
import NetworkManager
for dev in NetworkManager.NetworkManager.GetDevices():
if dev.DeviceType != NetworkManager.NM_DEVICE_TYPE_WIFI:
continue
for ap in dev.GetAccessPoints():
print('%-30s %dMHz %d%%' % (ap.Ssid, ap.Frequency, ap.Strength))
| 25.083333 | 72 | 0.694352 | """
Display all visible SSIDs
"""
import NetworkManager
for dev in NetworkManager.NetworkManager.GetDevices():
if dev.DeviceType != NetworkManager.NM_DEVICE_TYPE_WIFI:
continue
for ap in dev.GetAccessPoints():
print('%-30s %dMHz %d%%' % (ap.Ssid, ap.Frequency, ap.Strength))
| 0 | 0 | 0 |
9e584b561484c57289fc7a5484fd3660c7b2b173 | 5,255 | py | Python | src/utility/GetStationCategoryNameLocation.py | Jinglebear/db-ripper | dbea55349fbc37df8836cefc6206fa5972f34d99 | [
"MIT"
] | 1 | 2021-06-28T18:37:27.000Z | 2021-06-28T18:37:27.000Z | src/utility/GetStationCategoryNameLocation.py | Jinglebear/db-ripper | dbea55349fbc37df8836cefc6206fa5972f34d99 | [
"MIT"
] | 2 | 2021-07-12T18:01:43.000Z | 2021-07-26T15:26:56.000Z | src/utility/GetStationCategoryNameLocation.py | Jinglebear/db-ripper | dbea55349fbc37df8836cefc6206fa5972f34d99 | [
"MIT"
] | 1 | 2021-06-25T08:29:29.000Z | 2021-06-25T08:29:29.000Z | import requests
import time
import csv
import sys
from itertools import groupby
import Utils
# ====================================================================
# **Description**
# This script takes the list of train stations we found on wikipedia
# --> Extracts: train station number, train station name, train station category
# --> Calls the db-stations API to recieve:
# --> GeoJSON (Point) [longitude, latitude] for every train station
# --> Saves the successfull recieved data in a new .csv file
# --> Saves the train stations where the request failed in a new .csv file
# (Depending on the size of the failed request, repeat the steps)
# read train stations from csv file (where data begins in second row)
# function to handle the api call and save the data
#utility function to quickly check if all values of the array are equal
# function to handle the auth tokens
# array with the important station data
stations = read_station("/home/bigdata/db-ripper/misc/Mappe1.csv")
# the URL for db API requests regarding train station data
base_request_string = "https://api.deutschebahn.com/stada/v2/stations/"
# the array of successfull collected train station data
resultArr = []
# the array of unsuccessfull collected train station data
failArr = []
# the array of auth tokens that are subscribed to the db-api stations
tokenArr = Utils.tokens_timetable_parking
# the array of headers needed for the request containing the auth tokens
headers_arr = []
for token in tokenArr:
header = {'Accept': 'application/json', 'Authorization': token}
headers_arr.append(header)
# the array of counters for each auth token, to make sure every auth token is only used n-times before
# the next token has to be used, if all tokens have been used n-times the loop needs to sleep to garant access
counter_arr = []
for i in range(30):
counter_arr.append(0)
# work
compute_geo_data(stations, base_request_string, headers_arr,
resultArr, failArr, counter_arr=counter_arr)
#write successes data in a new file 'test_table_result.csv'
with open("/home/bigdata/db-ripper/misc/test_table_result.csv", "w", newline='', encoding='utf-8') as resultfile:
writer = csv.writer(resultfile)
for result in resultArr:
writer.writerow(result)
#write failures in a new file 'test_table_fail.csv'
with open("/home/bigdata/db-ripper/misc/test_table_fail.csv", "w", newline='', encoding='utf-8') as failfile:
writer = csv.writer(failfile)
for fail in failArr:
writer.writerow(fail)
| 40.423077 | 113 | 0.64548 | import requests
import time
import csv
import sys
from itertools import groupby
import Utils
# ====================================================================
# **Description**
# This script takes the list of train stations we found on wikipedia
# --> Extracts: train station number, train station name, train station category
# --> Calls the db-stations API to recieve:
# --> GeoJSON (Point) [longitude, latitude] for every train station
# --> Saves the successfull recieved data in a new .csv file
# --> Saves the train stations where the request failed in a new .csv file
# (Depending on the size of the failed request, repeat the steps)
# read train stations from csv file (where data begins in second row)
def read_station(filename):
stations = []
with open(filename, encoding='utf-8') as file:
skip = True # skip first row of csv file
reader = csv.reader(file)
for row in reader:
if(not skip):
splitted_string = row[0].split(',')
# string[0] --> train_station_number
# string[1] --> train_station_name
# string[4] --> train_station_category
station_triplet = (
splitted_string[1], splitted_string[0], splitted_string[4])
stations.append(station_triplet)
skip = False
return stations
# function to handle the api call and save the data
def get_geo_data(station, base_request_string, header, result_arr, fail_arr):
with requests.Session() as session:
response = session.get(
base_request_string+station[1], headers=header)
response_status_code = response.status_code
response = response.json() # json encoding of response
if(response_status_code == 200): # sucessful response code
city = response['result'][0]['mailingAddress']['city']
geographic_coordinates = response['result'][0]['evaNumbers'][0]['geographicCoordinates']
station_enriched = (station[0], station[2],
city, geographic_coordinates)
result_arr.append(station_enriched)
# print(eva_triplet, flush=True) # debug
else:
print("ERROR: "+str(response_status_code), flush=True) # debug
fail_arr.append(station)
#utility function to quickly check if all values of the array are equal
def all_equal(counter_arr):
g = groupby(counter_arr)
return next(g, True) and not next(g, False)
# function to handle the auth tokens
def compute_geo_data(stations, base_request_string, headers, result_arr, fail_arr, counter_arr):
control = 0
for station in stations:
control += 1
print(control, flush=True)
try:
for i in range(len(counter_arr)):
if(counter_arr[i] < 100):
counter_arr[i] += 1
get_geo_data(station=station, base_request_string=base_request_string,
header=headers[i], result_arr=result_arr, fail_arr=fail_arr)
break
sleep = all_equal(counter_arr=counter_arr)
if(sleep):
print("<<<<<<<<<<<<<<SLEEPING>>>>>>>>>>>>>>>>", flush=True)
time.sleep(61)
for j in range(len(counter_arr)):
counter_arr[j] = 0
except IndexError:
print("ERROR: IndexError", flush=True) # debug
fail_arr.append(station)
fail_arr.append("(IndexError)")
except:
e = sys.exc_info()
print(e)
# array with the important station data
stations = read_station("/home/bigdata/db-ripper/misc/Mappe1.csv")
# the URL for db API requests regarding train station data
base_request_string = "https://api.deutschebahn.com/stada/v2/stations/"
# the array of successfull collected train station data
resultArr = []
# the array of unsuccessfull collected train station data
failArr = []
# the array of auth tokens that are subscribed to the db-api stations
tokenArr = Utils.tokens_timetable_parking
# the array of headers needed for the request containing the auth tokens
headers_arr = []
for token in tokenArr:
header = {'Accept': 'application/json', 'Authorization': token}
headers_arr.append(header)
# the array of counters for each auth token, to make sure every auth token is only used n-times before
# the next token has to be used, if all tokens have been used n-times the loop needs to sleep to garant access
counter_arr = []
for i in range(30):
counter_arr.append(0)
# work
compute_geo_data(stations, base_request_string, headers_arr,
resultArr, failArr, counter_arr=counter_arr)
#write successes data in a new file 'test_table_result.csv'
with open("/home/bigdata/db-ripper/misc/test_table_result.csv", "w", newline='', encoding='utf-8') as resultfile:
writer = csv.writer(resultfile)
for result in resultArr:
writer.writerow(result)
#write failures in a new file 'test_table_fail.csv'
with open("/home/bigdata/db-ripper/misc/test_table_fail.csv", "w", newline='', encoding='utf-8') as failfile:
writer = csv.writer(failfile)
for fail in failArr:
writer.writerow(fail)
| 2,642 | 0 | 88 |
5d41f03e0a4e0e25064b6ab9df87cfc137c1462d | 19,283 | py | Python | curt/cli.py | mittagessen/curt | f8c5fa8b785a87a126cdbbbaae3dba2be8acaa3b | [
"Apache-2.0"
] | null | null | null | curt/cli.py | mittagessen/curt | f8c5fa8b785a87a126cdbbbaae3dba2be8acaa3b | [
"Apache-2.0"
] | null | null | null | curt/cli.py | mittagessen/curt | f8c5fa8b785a87a126cdbbbaae3dba2be8acaa3b | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
import glob
import time
import torch
import click
import os.path
import random
import logging
import pathlib
import datetime
import numpy as np
import torchvision.transforms as tf
from PIL import Image, ImageDraw
from pathlib import Path
from rich.logging import RichHandler
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, StochasticWeightAveraging
from curt.models import CurtCurveModel, MaskedCurtCurveModel
from curt.dataset import CurveDataModule
from curt.progress import KrakenTrainProgressBar
from curt.util.misc import NestedTensor
from curt.transforms import BezierCoeff
# raise default max image size to 20k * 20k pixels
Image.MAX_IMAGE_PIXELS = 20000 ** 2
logging.captureWarnings(True)
logger = logging.getLogger()
torch.multiprocessing.set_sharing_strategy('file_system')
def _validate_merging(ctx, param, value):
"""
Maps baseline/region merging to a dict of merge structures.
"""
if not value:
return None
merge_dict = {}
try:
for m in value:
k, v = m.split(':')
merge_dict[v] = k # type: ignore
except Exception:
raise click.BadParameter('Mappings must be in format target:src')
return merge_dict
@click.group()
@click.pass_context
@click.option('-v', '--verbose', default=0, count=True)
@click.option('-s', '--seed', default=None, type=click.INT,
help='Seed for numpy\'s and torch\'s RNG. Set to a fixed value to '
'ensure reproducible random splits of data')
@cli.command('polytrain')
@click.pass_context
@click.option('--precision', default='32', type=click.Choice(['64', '32', '16', 'bf16']), help='set tensor precision')
@click.option('-lr', '--learning-rate', default=0.0006, help='Learning rate')
@click.option('-blr', '--backbone-learning-rate', default=0.00006, help='Learning rate')
@click.option('-B', '--batch-size', default=1, help='Batch size')
@click.option('-w', '--weight-decay', default=0.01, help='Weight decay in optimizer')
@click.option('-N', '--epochs', default=25, help='Number of epochs to train for')
@click.option('-F', '--freq', show_default=True, default=1.0, type=click.FLOAT,
help='Model saving and report generation frequency in epochs '
'during training. If frequency is >1 it must be an integer, '
'i.e. running validation every n-th epoch.')
@click.option('-lr-drop', '--lr-drop', default=15, help='Reduction factor of learning rate over time')
@click.option('--dropout', default=0.1, help='Dropout applied in the transformer')
@click.option('--match-cost-class', default=1.0, help='Class coefficient in the matching cost')
@click.option('--match-cost-curve', default=5.0, help='L1 curve coefficient in the matching cost')
@click.option('--curve-loss-coef', default=5.0, help='L1 curve coefficient in the loss')
@click.option('--eos-coef', default=0.1, help='Relative classification weight of the no-object class')
@click.option('--mask-loss-coef', default=1.0, help='Mask loss coefficient')
@click.option('--dice-loss-coef', default=1.0, help='Mask dice loss coefficient')
@click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training')
@click.option('-o', '--output', show_default=True, type=click.Path(), default='curt_model', help='Pytorch lightning output directory')
@click.option('-p', '--partition', show_default=True, default=0.9,
help='Ground truth data partition ratio between train/validation set')
@click.option('-t', '--training-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with additional paths to training data')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data. Overrides the `-p` parameter')
@click.option('-vb', '--valid-baselines', show_default=True, default=None, multiple=True,
help='Valid baseline types in training data. May be used multiple times.')
@click.option('-mb',
'--merge-baselines',
show_default=True,
default=None,
help='Baseline type merge mapping. Same syntax as `--merge-regions`',
multiple=True,
callback=_validate_merging)
@click.option('--merge-all-baselines/--no-merge-baselines',
show_default=True,
default=False,
help='Merge all baseline types into `default`')
@click.option('--workers', show_default=True, default=2, help='Number of data loader workers.')
@click.option('-d', '--device', show_default=True, default='1', help='Select device to use (1, ...)')
@click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
@cli.command('train')
@click.pass_context
@click.option('--precision', default='32', type=click.Choice(['64', '32', '16', 'bf16']), help='set tensor precision')
@click.option('-lr', '--learning-rate', default=1e-4, help='Learning rate')
@click.option('-blr', '--backbone-learning-rate', default=1e-4, help='Learning rate')
@click.option('-B', '--batch-size', default=1, help='Batch size')
@click.option('-w', '--weight-decay', default=1e-4, help='Weight decay in optimizer')
@click.option('-N', '--epochs', default=300, help='Number of epochs to train for')
@click.option('-F', '--freq', show_default=True, default=1.0, type=click.FLOAT,
help='Model saving and report generation frequency in epochs '
'during training. If frequency is >1 it must be an integer, '
'i.e. running validation every n-th epoch.')
@click.option('-lr-drop', '--lr-drop', default=200, help='Reduction factor of learning rate over time')
@click.option('--encoder', default='mit_b0', type=click.Choice(['mit_b0', 'mit_b1', 'mit_b2', 'mit_b3', 'mit_b4', 'mit_b5']), help='Encoding max transformers architecture')
@click.option('-dl', '--decoder-layers', default=3, help='Number of decoder layers in the transformer')
@click.option('-dff', '--dim-ff', default=2048, help='Intermediate size of the feedforward layers in the transformer block')
@click.option('-edd', '--embedding-dim', default=256, help='Size of the embeddings (dimension of the transformer')
@click.option('--dropout', default=0.1, help='Dropout applied in the transformer')
@click.option('-nh', '--num-heads', default=8, help="Number of attention heads inside the transformer's attentions")
@click.option('-nq', '--num-queries', default=500, help='Number of query slots (#lines + #regions detectable in an image)')
@click.option('--match-cost-class', default=1.0, help='Class coefficient in the matching cost')
@click.option('--match-cost-curve', default=5.0, help='L1 curve coefficient in the matching cost')
@click.option('--curve-loss-coef', default=5.0, help='L1 curve coefficient in the loss')
@click.option('--eos-coef', default=0.1, help='Relative classification weight of the no-object class')
@click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training')
@click.option('-o', '--output', show_default=True, type=click.Path(), default='curt_model', help='Pytorch lightning output directory')
@click.option('-p', '--partition', show_default=True, default=0.9,
help='Ground truth data partition ratio between train/validation set')
@click.option('-t', '--training-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with additional paths to training data')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data. Overrides the `-p` parameter')
@click.option('-vb', '--valid-baselines', show_default=True, default=None, multiple=True,
help='Valid baseline types in training data. May be used multiple times.')
@click.option('-mb',
'--merge-baselines',
show_default=True,
default=None,
help='Baseline type merge mapping. Same syntax as `--merge-regions`',
multiple=True,
callback=_validate_merging)
@click.option('--merge-all-baselines/--no-merge-baselines',
show_default=True,
default=False,
help='Merge all baseline types into `default`')
@click.option('--set-matcher/--dummy-matcher', show_default=True, default=True, help='Use the set criterion or dummy matching.')
@click.option('--aux-loss/--no-aux-loss', show_default=True, default=True, help='Enable auxiliary losses in decoder.')
@click.option('--workers', show_default=True, default=2, help='Number of data loader workers.')
@click.option('-d', '--device', show_default=True, default='1', help='Select device to use')
@click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
@cli.command('pred')
@click.pass_context
@click.option('-i', '--load', help='Input model')
@click.option('-o', '--suffix', default='.overlay.png', show_default=True, help='Suffix for output files')
@click.option('-t', '--threshold', default=0.9, show_default=True, help='Minimum score for objectness')
@click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.argument('input_files', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
if __name__ == '__main__':
cli()
| 49.955959 | 172 | 0.620858 | #! /usr/bin/env python
import glob
import time
import torch
import click
import os.path
import random
import logging
import pathlib
import datetime
import numpy as np
import torchvision.transforms as tf
from PIL import Image, ImageDraw
from pathlib import Path
from rich.logging import RichHandler
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, StochasticWeightAveraging
from curt.models import CurtCurveModel, MaskedCurtCurveModel
from curt.dataset import CurveDataModule
from curt.progress import KrakenTrainProgressBar
from curt.util.misc import NestedTensor
from curt.transforms import BezierCoeff
def set_logger(logger=None, level=logging.ERROR):
logger.addHandler(RichHandler(rich_tracebacks=True))
logger.setLevel(level)
# raise default max image size to 20k * 20k pixels
Image.MAX_IMAGE_PIXELS = 20000 ** 2
logging.captureWarnings(True)
logger = logging.getLogger()
torch.multiprocessing.set_sharing_strategy('file_system')
def _expand_gt(ctx, param, value):
images = []
for expression in value:
images.extend([x for x in glob.iglob(expression, recursive=True) if os.path.isfile(x)])
return images
def _validate_manifests(ctx, param, value):
images = []
for manifest in value:
for entry in manifest.readlines():
im_p = entry.rstrip('\r\n')
if os.path.isfile(im_p):
images.append(im_p)
else:
logger.warning('Invalid entry "{}" in {}'.format(im_p, manifest.name))
return images
def _validate_merging(ctx, param, value):
"""
Maps baseline/region merging to a dict of merge structures.
"""
if not value:
return None
merge_dict = {}
try:
for m in value:
k, v = m.split(':')
merge_dict[v] = k # type: ignore
except Exception:
raise click.BadParameter('Mappings must be in format target:src')
return merge_dict
@click.group()
@click.pass_context
@click.option('-v', '--verbose', default=0, count=True)
@click.option('-s', '--seed', default=None, type=click.INT,
help='Seed for numpy\'s and torch\'s RNG. Set to a fixed value to '
'ensure reproducible random splits of data')
def cli(ctx, verbose, seed):
if seed:
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
ctx.meta['verbose'] = verbose
set_logger(logger, level=30 - min(10 * verbose, 20))
@cli.command('polytrain')
@click.pass_context
@click.option('--precision', default='32', type=click.Choice(['64', '32', '16', 'bf16']), help='set tensor precision')
@click.option('-lr', '--learning-rate', default=0.0006, help='Learning rate')
@click.option('-blr', '--backbone-learning-rate', default=0.00006, help='Learning rate')
@click.option('-B', '--batch-size', default=1, help='Batch size')
@click.option('-w', '--weight-decay', default=0.01, help='Weight decay in optimizer')
@click.option('-N', '--epochs', default=25, help='Number of epochs to train for')
@click.option('-F', '--freq', show_default=True, default=1.0, type=click.FLOAT,
help='Model saving and report generation frequency in epochs '
'during training. If frequency is >1 it must be an integer, '
'i.e. running validation every n-th epoch.')
@click.option('-lr-drop', '--lr-drop', default=15, help='Reduction factor of learning rate over time')
@click.option('--dropout', default=0.1, help='Dropout applied in the transformer')
@click.option('--match-cost-class', default=1.0, help='Class coefficient in the matching cost')
@click.option('--match-cost-curve', default=5.0, help='L1 curve coefficient in the matching cost')
@click.option('--curve-loss-coef', default=5.0, help='L1 curve coefficient in the loss')
@click.option('--eos-coef', default=0.1, help='Relative classification weight of the no-object class')
@click.option('--mask-loss-coef', default=1.0, help='Mask loss coefficient')
@click.option('--dice-loss-coef', default=1.0, help='Mask dice loss coefficient')
@click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training')
@click.option('-o', '--output', show_default=True, type=click.Path(), default='curt_model', help='Pytorch lightning output directory')
@click.option('-p', '--partition', show_default=True, default=0.9,
help='Ground truth data partition ratio between train/validation set')
@click.option('-t', '--training-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with additional paths to training data')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data. Overrides the `-p` parameter')
@click.option('-vb', '--valid-baselines', show_default=True, default=None, multiple=True,
help='Valid baseline types in training data. May be used multiple times.')
@click.option('-mb',
'--merge-baselines',
show_default=True,
default=None,
help='Baseline type merge mapping. Same syntax as `--merge-regions`',
multiple=True,
callback=_validate_merging)
@click.option('--merge-all-baselines/--no-merge-baselines',
show_default=True,
default=False,
help='Merge all baseline types into `default`')
@click.option('--workers', show_default=True, default=2, help='Number of data loader workers.')
@click.option('-d', '--device', show_default=True, default='1', help='Select device to use (1, ...)')
@click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def polytrain(ctx, precision, learning_rate, backbone_learning_rate,
batch_size, weight_decay, epochs, freq, lr_drop, dropout,
match_cost_class, match_cost_curve, curve_loss_coef, eos_coef,
mask_loss_coef, dice_loss_coef, load, output, partition,
training_files, evaluation_files, valid_baselines, merge_baselines,
merge_all_baselines, workers, device, ground_truth):
if evaluation_files:
partition = 1
ground_truth = list(ground_truth)
if training_files:
ground_truth.extend(training_files)
if len(ground_truth) == 0:
raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.')
if freq > 1:
val_check_interval = {'check_val_every_n_epoch': int(freq)}
else:
val_check_interval = {'val_check_interval': freq}
if not valid_baselines:
valid_baselines = None
data_module = CurveDataModule(train_files=ground_truth,
val_files=evaluation_files,
partition=partition,
valid_baselines=valid_baselines,
merge_baselines=merge_baselines,
merge_all_baselines=merge_all_baselines,
max_lines=curt_model.num_queries,
batch_size=batch_size,
num_workers=workers,
masks=True)
if load:
curt_model = CurtCurveModel.load_from_checkpoint(load).model
model = MaskedCurtCurveModel(curt_model,
learning_rate=learning_rate,
weight_decay=weight_decay,
lr_drop=lr_drop,
match_cost_class=match_cost_class,
match_cost_curve=match_cost_curve,
curve_loss_coef=curve_loss_coef,
mask_loss_coef=mask_loss_coef,
dice_loss_coef=dice_loss_coef,
eos_coef=eos_coef,
batches_per_epoch=len(data_module.train_dataloader()))
else:
raise click.UsageError('No pretrained weights given for mask head training.')
click.echo("Line types: There's only one.")
# for k, v in data_module.curve_train.dataset.class_mapping.items():
# click.echo(f'{k}\t{v}')
checkpoint_cb = ModelCheckpoint(monitor='loss', save_top_k=5, mode='min')
if precision != 'bf16':
precision = int(precision)
trainer = Trainer(default_root_dir=output,
precision=precision,
max_epochs=epochs,
auto_select_gpus=True,
accelerator='gpu',
devices=device,
strategy='ddp',
callbacks=[KrakenTrainProgressBar(), checkpoint_cb, StochasticWeightAveraging(swa_epoch_start=0.8, annealing_epochs=int(0.2*epochs))],
batches_per_epoch=len(data_module.train_dataloader()),
**val_check_interval)
trainer.fit(model, data_module)
@cli.command('train')
@click.pass_context
@click.option('--precision', default='32', type=click.Choice(['64', '32', '16', 'bf16']), help='set tensor precision')
@click.option('-lr', '--learning-rate', default=1e-4, help='Learning rate')
@click.option('-blr', '--backbone-learning-rate', default=1e-4, help='Learning rate')
@click.option('-B', '--batch-size', default=1, help='Batch size')
@click.option('-w', '--weight-decay', default=1e-4, help='Weight decay in optimizer')
@click.option('-N', '--epochs', default=300, help='Number of epochs to train for')
@click.option('-F', '--freq', show_default=True, default=1.0, type=click.FLOAT,
help='Model saving and report generation frequency in epochs '
'during training. If frequency is >1 it must be an integer, '
'i.e. running validation every n-th epoch.')
@click.option('-lr-drop', '--lr-drop', default=200, help='Reduction factor of learning rate over time')
@click.option('--encoder', default='mit_b0', type=click.Choice(['mit_b0', 'mit_b1', 'mit_b2', 'mit_b3', 'mit_b4', 'mit_b5']), help='Encoding max transformers architecture')
@click.option('-dl', '--decoder-layers', default=3, help='Number of decoder layers in the transformer')
@click.option('-dff', '--dim-ff', default=2048, help='Intermediate size of the feedforward layers in the transformer block')
@click.option('-edd', '--embedding-dim', default=256, help='Size of the embeddings (dimension of the transformer')
@click.option('--dropout', default=0.1, help='Dropout applied in the transformer')
@click.option('-nh', '--num-heads', default=8, help="Number of attention heads inside the transformer's attentions")
@click.option('-nq', '--num-queries', default=500, help='Number of query slots (#lines + #regions detectable in an image)')
@click.option('--match-cost-class', default=1.0, help='Class coefficient in the matching cost')
@click.option('--match-cost-curve', default=5.0, help='L1 curve coefficient in the matching cost')
@click.option('--curve-loss-coef', default=5.0, help='L1 curve coefficient in the loss')
@click.option('--eos-coef', default=0.1, help='Relative classification weight of the no-object class')
@click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training')
@click.option('-o', '--output', show_default=True, type=click.Path(), default='curt_model', help='Pytorch lightning output directory')
@click.option('-p', '--partition', show_default=True, default=0.9,
help='Ground truth data partition ratio between train/validation set')
@click.option('-t', '--training-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with additional paths to training data')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data. Overrides the `-p` parameter')
@click.option('-vb', '--valid-baselines', show_default=True, default=None, multiple=True,
help='Valid baseline types in training data. May be used multiple times.')
@click.option('-mb',
'--merge-baselines',
show_default=True,
default=None,
help='Baseline type merge mapping. Same syntax as `--merge-regions`',
multiple=True,
callback=_validate_merging)
@click.option('--merge-all-baselines/--no-merge-baselines',
show_default=True,
default=False,
help='Merge all baseline types into `default`')
@click.option('--set-matcher/--dummy-matcher', show_default=True, default=True, help='Use the set criterion or dummy matching.')
@click.option('--aux-loss/--no-aux-loss', show_default=True, default=True, help='Enable auxiliary losses in decoder.')
@click.option('--workers', show_default=True, default=2, help='Number of data loader workers.')
@click.option('-d', '--device', show_default=True, default='1', help='Select device to use')
@click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def train(ctx, precision, learning_rate, backbone_learning_rate, batch_size,
weight_decay, epochs, freq, lr_drop, encoder, decoder_layers, dim_ff,
embedding_dim, dropout, num_heads, num_queries, match_cost_class,
match_cost_curve, curve_loss_coef, eos_coef, load, output, partition,
training_files, evaluation_files, valid_baselines, merge_baselines,
merge_all_baselines, set_matcher, aux_loss, workers, device,
ground_truth):
if evaluation_files:
partition = 1
ground_truth = list(ground_truth)
if training_files:
ground_truth.extend(training_files)
if len(ground_truth) == 0:
raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.')
if freq > 1:
val_check_interval = {'check_val_every_n_epoch': int(freq)}
else:
val_check_interval = {'val_check_interval': freq}
if not valid_baselines:
valid_baselines = None
data_module = CurveDataModule(train_files=ground_truth,
val_files=evaluation_files,
partition=partition,
valid_baselines=valid_baselines,
merge_baselines=merge_baselines,
merge_all_baselines=merge_all_baselines,
max_lines=num_queries,
batch_size=batch_size,
num_workers=workers)
click.echo("Line types: There's only one.")
# for k, v in data_module.curve_train.dataset.class_mapping.items():
# click.echo(f'{k}\t{v}')
if load:
model = CurtCurveModel.load_from_checkpoint(load)
else:
model = CurtCurveModel(data_module.num_classes,
num_queries=num_queries,
learning_rate=learning_rate,
backbone_learning_rate=backbone_learning_rate,
weight_decay=weight_decay,
lr_drop=lr_drop,
match_cost_class=match_cost_class,
match_cost_curve=match_cost_curve,
curve_loss_coef=curve_loss_coef,
eos_coef=eos_coef,
embedding_dim=embedding_dim,
dropout=dropout,
num_heads=num_heads,
dim_ff=dim_ff,
encoder=encoder,
decoder_layers=decoder_layers,
set_matcher=set_matcher,
aux_loss=aux_loss)
checkpoint_cb = ModelCheckpoint(monitor='loss', save_top_k=5, mode='min')
if precision != 'bf16':
precision = int(precision)
trainer = Trainer(default_root_dir=output,
precision=precision,
max_epochs=epochs,
auto_select_gpus=True,
accelerator='gpu',
devices=device,
strategy='ddp',
callbacks=[KrakenTrainProgressBar(), checkpoint_cb, StochasticWeightAveraging(swa_epoch_start=0.8, annealing_epochs=int(0.2*epochs))],
**val_check_interval)
trainer.fit(model, data_module)
@cli.command('pred')
@click.pass_context
@click.option('-i', '--load', help='Input model')
@click.option('-o', '--suffix', default='.overlay.png', show_default=True, help='Suffix for output files')
@click.option('-t', '--threshold', default=0.9, show_default=True, help='Minimum score for objectness')
@click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.argument('input_files', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def pred(ctx, load, suffix, threshold, device, input_files):
curt_model = CurtCurveModel.load_from_checkpoint(load).model
curt_model = curt_model.to(device)
transforms = tf.Compose([tf.Resize(800),
tf.ToTensor(),
tf.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
for file in input_files:
click.echo(f'Processing {file}')
file = pathlib.Path(file)
with open(file, 'rb') as fp:
im = Image.open(file)
with open(file.with_suffix(suffix), 'wb') as fo:
with torch.no_grad():
i = transforms(im).to(device).unsqueeze(0)
mask = torch.zeros((1,) + i.shape[2:], device=device)
i = NestedTensor(i, mask)
o = curt_model(i)
draw = ImageDraw.Draw(im)
samples = np.linspace(0, 1, 20)
curves, logits = o['pred_curves'], o['pred_logits']
scores, labels = logits.softmax(-1).max(-1)
keep = labels.eq(1) & (scores > threshold)
curves = curves[keep]
curves = curves.to('cpu')
for line in curves:
line = (np.array(line) * (im.size * 4))
line.resize(4, 2)
draw.line([tuple(x) for x in np.array(BezierCoeff(samples)).dot(line)], fill=(0, 130, 200, 255), width=2, joint='curve')
del draw
im.save(fo, format='png')
if __name__ == '__main__':
cli()
| 9,201 | 0 | 157 |
69886e37db12958e6460c2b4e7e9a2b9adf662f5 | 430 | py | Python | panGraphViewerWeb/pangraphviewer/models.py | q623928815/panGraphViewer | 05ca341d74d3cbabcbbaa16f5d1be5292c217447 | [
"MIT"
] | 13 | 2021-08-05T09:01:35.000Z | 2022-03-05T17:20:08.000Z | panGraphViewerWeb/pangraphviewer/models.py | q623928815/panGraphViewer | 05ca341d74d3cbabcbbaa16f5d1be5292c217447 | [
"MIT"
] | 2 | 2021-08-05T13:01:58.000Z | 2022-01-09T09:11:45.000Z | panGraphViewerWeb/pangraphviewer/models.py | q623928815/panGraphViewer | 05ca341d74d3cbabcbbaa16f5d1be5292c217447 | [
"MIT"
] | 2 | 2021-08-14T03:58:25.000Z | 2022-01-04T05:45:27.000Z | from django.db import models
# Create your models here.
| 26.875 | 60 | 0.725581 | from django.db import models
# Create your models here.
class Upload(models.Model):
#image = models.ImageField(upload_to='images')
image = models.FileField(upload_to='files')
def __str__(self):
return str(self.pk)
class Profile(models.Model):
user_id = models.AutoField(primary_key=True)
username = models.CharField(max_length=500, unique=True)
work_base_dir = models.CharField(max_length=500)
| 25 | 302 | 46 |
39fa3e4bb022ce4e7de1242c59424ed31de363f0 | 1,598 | py | Python | util/archives.py | roteroktober/stromx | e081a35114f68a77e99a4761946b8b8c64eb591a | [
"Apache-2.0"
] | null | null | null | util/archives.py | roteroktober/stromx | e081a35114f68a77e99a4761946b8b8c64eb591a | [
"Apache-2.0"
] | null | null | null | util/archives.py | roteroktober/stromx | e081a35114f68a77e99a4761946b8b8c64eb591a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import argparse
import os
import re
import shutil
import zipfile
parser = argparse.ArgumentParser()
parser.add_argument('directory', type=str, nargs=1)
group = parser.add_mutually_exclusive_group(required = True)
group.add_argument('--extract', action='store_true')
group.add_argument('--compress', action='store_true')
args = parser.parse_args()
for root, dirs, files in os.walk(args.directory[0]):
if args.extract:
for f in files:
if re.match(r".+\.(zip|stromx)", f):
path = os.path.join(root, f)
try:
with zipfile.ZipFile(path, 'r') as archive:
dirname = "{0}.extracted".format(f)
dirpath = os.path.join(root, dirname)
os.mkdir(dirpath)
archive.extractall(dirpath)
print "Created {0}".format(dirpath)
except zipfile.BadZipfile:
print "Failed to open {0}".format(path)
else:
for d in dirs:
if re.match(r".+\.(zip|stromx).extracted", d):
f = d[:-len(".extracted")]
filePath = os.path.join(root, f)
dirPath = os.path.join(root, d)
with zipfile.ZipFile(filePath, 'w') as archive:
for member in os.listdir(dirPath):
memberPath = os.path.join(root, d, member)
archive.write(memberPath, member)
shutil.rmtree(dirPath)
print "Created {0}".format(filePath) | 38.047619 | 66 | 0.536921 | # -*- coding: utf-8 -*-
import argparse
import os
import re
import shutil
import zipfile
parser = argparse.ArgumentParser()
parser.add_argument('directory', type=str, nargs=1)
group = parser.add_mutually_exclusive_group(required = True)
group.add_argument('--extract', action='store_true')
group.add_argument('--compress', action='store_true')
args = parser.parse_args()
for root, dirs, files in os.walk(args.directory[0]):
if args.extract:
for f in files:
if re.match(r".+\.(zip|stromx)", f):
path = os.path.join(root, f)
try:
with zipfile.ZipFile(path, 'r') as archive:
dirname = "{0}.extracted".format(f)
dirpath = os.path.join(root, dirname)
os.mkdir(dirpath)
archive.extractall(dirpath)
print "Created {0}".format(dirpath)
except zipfile.BadZipfile:
print "Failed to open {0}".format(path)
else:
for d in dirs:
if re.match(r".+\.(zip|stromx).extracted", d):
f = d[:-len(".extracted")]
filePath = os.path.join(root, f)
dirPath = os.path.join(root, d)
with zipfile.ZipFile(filePath, 'w') as archive:
for member in os.listdir(dirPath):
memberPath = os.path.join(root, d, member)
archive.write(memberPath, member)
shutil.rmtree(dirPath)
print "Created {0}".format(filePath) | 0 | 0 | 0 |
8dc0317b820267511cd7dbce5551f965f80a08c5 | 4,594 | py | Python | tests/core/test_prediction_problem_generator.py | FeatureLabs/Trane | 04c18c2f2e2351706d2434ea355c01892bab22ae | [
"MIT"
] | 31 | 2018-04-02T15:46:28.000Z | 2022-01-22T02:41:44.000Z | tests/core/test_prediction_problem_generator.py | FeatureLabs/Trane | 04c18c2f2e2351706d2434ea355c01892bab22ae | [
"MIT"
] | 17 | 2018-04-26T19:17:28.000Z | 2018-11-20T19:11:47.000Z | tests/core/test_prediction_problem_generator.py | FeatureLabs/Trane | 04c18c2f2e2351706d2434ea355c01892bab22ae | [
"MIT"
] | 13 | 2018-05-03T23:41:40.000Z | 2021-09-27T00:53:48.000Z | """TESTING STRATEGY
Function: generate(self)
1. Ensure the correct # of problems are generated.
2. Generated problems are of proper type
3. Ensure prediction problems are generated in order of
Filter->Row->Transformation->Aggregation
"""
import unittest
import pandas as pd
from mock import MagicMock, call, patch
from trane.core.prediction_problem_generator import PredictionProblemGenerator
from trane.ops.aggregation_ops import * # noqa
from trane.ops.filter_ops import * # noqa
from trane.ops.row_ops import * # noqa
from trane.ops.transformation_ops import * # noqa
from trane.utils.table_meta import TableMeta
class TestPredictionProblemGeneratorValidation(unittest.TestCase):
'''
TestPredictionProblemGeneratorValidation has its own class, because unlike
other tests, the ensure_valid_inputs method cannot be mocked out.
'''
def create_patch(self, name, return_value=None):
'''helper method for creating patches'''
patcher = patch(name)
thing = patcher.start()
self.addCleanup(patcher.stop)
if return_value:
thing.return_value = return_value
return thing
| 34.80303 | 79 | 0.614497 | """TESTING STRATEGY
Function: generate(self)
1. Ensure the correct # of problems are generated.
2. Generated problems are of proper type
3. Ensure prediction problems are generated in order of
Filter->Row->Transformation->Aggregation
"""
import unittest
import pandas as pd
from mock import MagicMock, call, patch
from trane.core.prediction_problem_generator import PredictionProblemGenerator
from trane.ops.aggregation_ops import * # noqa
from trane.ops.filter_ops import * # noqa
from trane.ops.row_ops import * # noqa
from trane.ops.transformation_ops import * # noqa
from trane.utils.table_meta import TableMeta
class TestPredictionProblemGenerator(unittest.TestCase):
def setUp(self):
self.table_meta_mock = MagicMock()
self.entity_col = "taxi_id"
self.label_col = "fare"
self.filter_col = "taxi_id"
self.ensure_valid_inputs_patch = self.create_patch(
'trane.core.PredictionProblemGenerator.ensure_valid_inputs')
self.generator = PredictionProblemGenerator(
table_meta=self.table_meta_mock,
entity_col=self.entity_col,
label_col=self.label_col,
filter_col=self.filter_col)
def prep_for_integration(self):
'''
Creates a full fledged prediction problem generator without
a mocked out ensure_valid_inputs method
'''
meta_json_str = ' \
{"path": "", \
"tables": [ \
{"path": "synthetic_taxi_data.csv",\
"name": "taxi_data", \
"fields": [ \
{"name": "vendor_id", "type": "id"},\
{"name": "taxi_id", "type": "id"}, \
{"name": "trip_id", "type": "datetime"}, \
{"name": "distance", "type": "number", "subtype": "float"}, \
{"name": "duration", "type": "number", "subtype": "float"}, \
{"name": "fare", "type": "number", "subtype": "float"}, \
{"name": "num_passengers", "type": "number", \
"subtype": "float"} \
]}]}'
self.table_meta = TableMeta.from_json(meta_json_str)
self.df = pd.DataFrame(
[(0, 0, 0, 5.32, 19.7, 53.89, 1),
(0, 0, 1, 1.08, 6.78, 18.89, 2),
(0, 0, 2, 4.69, 14.11, 41.35, 4)],
columns=["vendor_id", "taxi_id", "trip_id", "distance", "duration",
"fare", "num_passengers"])
self.generator = PredictionProblemGenerator(
table_meta=self.table_meta,
entity_col=self.entity_col,
label_col=self.label_col,
filter_col=self.filter_col)
def create_patch(self, name, return_value=None):
'''helper method for creating patches'''
patcher = patch(name)
thing = patcher.start()
self.addCleanup(patcher.stop)
if return_value:
thing.return_value = return_value
return thing
def test_generate(self):
self.prep_for_integration()
self.assertIsNotNone(self.generator.generate)
self.generator.generate(self.df)
class TestPredictionProblemGeneratorValidation(unittest.TestCase):
'''
TestPredictionProblemGeneratorValidation has its own class, because unlike
other tests, the ensure_valid_inputs method cannot be mocked out.
'''
def create_patch(self, name, return_value=None):
'''helper method for creating patches'''
patcher = patch(name)
thing = patcher.start()
self.addCleanup(patcher.stop)
if return_value:
thing.return_value = return_value
return thing
def test_ensure_valid_imputs(self):
table_meta_mock = MagicMock()
entity_col = "taxi_id"
label_col = "fare"
filter_col = "taxi_id"
# set up table_meta types
table_meta_mock.get_type.return_value = True
table_meta_patch = self.create_patch(
'trane.core.prediction_problem_generator.TableMeta', 'tm_patch')
table_meta_patch.TYPE_IDENTIFIER = True
table_meta_patch.TYPE_FLOAT = True
table_meta_patch.TYPE_TIME = True
# create generator
generator = PredictionProblemGenerator(
table_meta=table_meta_mock,
entity_col=entity_col,
label_col=label_col,
filter_col=filter_col)
self.assertIsNotNone(generator.ensure_valid_inputs)
table_meta_mock.get_type.assert_has_calls([
call(entity_col),
call(label_col)], any_order=True)
| 1,520 | 1,867 | 50 |
d102d09522f569e78de5c05b74c53dceac5c7cdc | 17,668 | py | Python | source/pydwf/core/api/digital_spi.py | sidneycadot/pydwf | cd9eba8b45d990f09095bec62b20115f0757baba | [
"MIT"
] | 14 | 2021-05-10T16:19:45.000Z | 2022-03-13T08:30:12.000Z | source/pydwf/core/api/digital_spi.py | sidneycadot/pydwf | cd9eba8b45d990f09095bec62b20115f0757baba | [
"MIT"
] | 22 | 2021-05-01T09:51:09.000Z | 2021-11-13T12:35:36.000Z | source/pydwf/core/api/digital_spi.py | sidneycadot/pydwf | cd9eba8b45d990f09095bec62b20115f0757baba | [
"MIT"
] | 2 | 2021-05-02T12:13:16.000Z | 2022-03-11T21:15:07.000Z | """The |pydwf.core.api.digital_spi| module implements a single class: |DigitalSpi|."""
from typing import List
from pydwf.core.dwf_device_subapi import AbstractDwfDeviceSubApi
from pydwf.core.auxiliary.typespec_ctypes import typespec_ctypes
from pydwf.core.auxiliary.constants import RESULT_SUCCESS
from pydwf.core.auxiliary.enum_types import DwfDigitalOutIdle
class DigitalSpi(AbstractDwfDeviceSubApi):
"""The |DigitalSpi| class provides access to the SPI protocol functionality of a |DwfDevice:link|.
Attention:
Users of |pydwf| should not create instances of this class directly.
It is instantiated during initialization of a |DwfDevice| and subsequently assigned to its public
|digitalSpi:link| attribute for access by the user.
"""
def reset(self) -> None:
"""Reset the SPI protocol support.
Raises:
DwfLibraryError: An error occurred while executing the *reset* operation.
"""
result = self.lib.FDwfDigitalSpiReset(self.hdwf)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def frequencySet(self, frequency: float) -> None:
"""Set the SPI frequency, in Hz.
Parameters:
frequency (float): SPI frequency, in Hz.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfDigitalSpiFrequencySet(self.hdwf, frequency)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def clockSet(self, channel_index: int) -> None:
"""Set the digital channel (pin) for the SPI clock signal.
Parameters:
channel_index (int):
The digital channel (pin) where the SPI clock signal will be generated.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfDigitalSpiClockSet(self.hdwf, channel_index)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def dataSet(self, spi_data_bit: int, channel_index: int) -> None:
"""Set the digital channel (pin) for an SPI data bit.
Parameters:
spi_data_bit (int):
The data bit to configure:
* 0 — DQ0 / MOSI / SISO
* 1 — DQ1 / MISO
* 2 — DQ2
* 3 — DQ3
channel_index (int):
The digital channel (pin) for this data bit.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfDigitalSpiDataSet(self.hdwf, spi_data_bit, channel_index)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def idleSet(self, spi_data_bit: int, idle_mode: DwfDigitalOutIdle) -> None:
"""Set the idle behavior for an SPI data bit.
Parameters:
spi_data_bit (int):
The data bit to configure:
* 0 — DQ0 / MOSI / SISO
* 1 — DQ1 / MISO
* 2 — DQ2
* 3 — DQ3
idle_mode (DwfDigitalOutIdle):
The idle behavior of this bit.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfDigitalSpiIdleSet(self.hdwf, spi_data_bit, idle_mode.value)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def modeSet(self, spi_mode: int) -> None:
"""Set the SPI mode.
Parameters:
spi_mode (int):
The values for CPOL (polarity) and CPHA (phase) to use with the attached slave device:
* 0 — CPOL = 0, CPHA = 0
* 1 — CPOL = 0, CPHA = 1
* 2 — CPOL = 1, CPHA = 0
* 3 — CPOL = 1, CPHA = 1
Refer to the slave device's datasheet to select the correct value.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfDigitalSpiModeSet(self.hdwf, spi_mode)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def orderSet(self, bit_order: int) -> None:
"""Set the SPI data bit order.
Parameters:
bit_order (int):
Select the bit order of each word sent out:
* 1 — MSB first, LSB last
* 0 — LSB first, MSB last
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfDigitalSpiOrderSet(self.hdwf, bit_order)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def select(self, channel_index: int, level: int) -> None:
"""Set the chip select (CS) status.
Parameters:
channel_index (int):
The digital channel (pin) for the Chip Select signal.
level (int):
The Chip Select level to configure.
* 0 — low
* 1 — high
* -1 — Z (high impedance)
The CS (chip select) is usually an active-low signal, from the
SPI bus master to a specific SPI slave device. Before starting a bus
request, the master should set CS to 0 for the chip it wants to talk to.
Usually, each slave on an SPI bus has its own CS line. At most one of them
should be selected at any time.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfDigitalSpiSelect(self.hdwf, channel_index, level)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def writeRead(self, transfer_type: int, bits_per_word: int, tx: List[int]) -> List[int]:
"""Write and read multiple SPI data-words, with up to 8 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…8).
tx (List[int]):
The data-words to write.
Returns:
List[int]: The data-words received.
Raises:
DwfLibraryError: An error occurred while executing the write/read operation.
"""
tx_list = list(tx)
number_of_words = len(tx_list)
buffer_type = typespec_ctypes.c_unsigned_char * number_of_words
tx_buffer = buffer_type(*tx_list)
rx_buffer = buffer_type()
result = self.lib.FDwfDigitalSpiWriteRead(
self.hdwf,
transfer_type,
bits_per_word,
tx_buffer,
number_of_words,
rx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
rx_list = list(rx_buffer)
return rx_list
def writeRead16(self, transfer_type: int, bits_per_word: int, tx: List[int]) -> List[int]:
"""Write and read multiple SPI data-words, with up to 16 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…16).
tx (List[int]):
The data-words to write.
Returns:
List[int]: The data-words received.
Raises:
DwfLibraryError: An error occurred while executing the write/read operation.
"""
tx_list = list(tx)
number_of_words = len(tx_list)
buffer_type = typespec_ctypes.c_unsigned_short * number_of_words
tx_buffer = buffer_type(*tx_list)
rx_buffer = buffer_type()
result = self.lib.FDwfDigitalSpiWriteRead16(
self.hdwf,
transfer_type,
bits_per_word,
tx_buffer,
number_of_words,
rx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
rx_list = list(rx_buffer)
return rx_list
def writeRead32(self, transfer_type: int, bits_per_word: int, tx: List[int]) -> List[int]:
"""Write and read multiple SPI data-words, with up to 32 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…32).
tx (List[int]):
The data-words to write.
Returns:
List[int]: The data-words received.
Raises:
DwfLibraryError: An error occurred while executing the write/read operation.
"""
tx_list = list(tx)
number_of_words = len(tx_list)
buffer_type = typespec_ctypes.c_unsigned_int * number_of_words
tx_buffer = buffer_type(*tx_list)
rx_buffer = buffer_type()
result = self.lib.FDwfDigitalSpiWriteRead32(
self.hdwf,
transfer_type,
bits_per_word,
tx_buffer,
number_of_words,
rx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
rx_list = list(rx_buffer)
return rx_list
def read(self, transfer_type: int, bits_per_word: int, number_of_words: int) -> List[int]:
"""Read multiple SPI data-words, with up to 8 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…8).
number_of_words (int):
The number of data-words to read.
Returns:
List[int]: The data-words received.
Raises:
DwfLibraryError: An error occurred while executing the read operation.
"""
buffer_type = typespec_ctypes.c_unsigned_char * number_of_words
rx_buffer = buffer_type()
result = self.lib.FDwfDigitalSpiRead(
self.hdwf,
transfer_type,
bits_per_word,
rx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
rx_list = list(rx_buffer)
return rx_list
def read16(self, transfer_type: int, bits_per_word: int, number_of_words: int) -> List[int]:
"""Read multiple SPI data-words, with up to 16 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…16).
number_of_words (int):
The number of data-words to read.
Returns:
List[int]: The data-words received.
Raises:
DwfLibraryError: An error occurred while executing the read operation.
"""
buffer_type = typespec_ctypes.c_unsigned_short * number_of_words
rx_buffer = buffer_type()
result = self.lib.FDwfDigitalSpiRead16(
self.hdwf,
transfer_type,
bits_per_word,
rx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
rx_list = list(rx_buffer)
return rx_list
def read32(self, transfer_type: int, bits_per_word: int, number_of_words: int) -> List[int]:
"""Read multiple SPI data-words, with up to 32 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…32).
number_of_words (int):
The number of data-words to read.
Returns:
List[int]: The data-words received.
Raises:
DwfLibraryError: An error occurred while executing the read operation.
"""
buffer_type = typespec_ctypes.c_unsigned_int * number_of_words
rx_buffer = buffer_type()
result = self.lib.FDwfDigitalSpiRead32(
self.hdwf,
transfer_type,
bits_per_word,
rx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
rx_list = list(rx_buffer)
return rx_list
def readOne(self, transfer_type: int, bits_per_word: int) -> int:
"""Read a single SPI data-word, with up to 32 bits.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits of the data-word (1…32).
Returns:
int: The data-word received.
Raises:
DwfLibraryError: An error occurred while executing the read operation.
"""
c_rx = typespec_ctypes.c_unsigned_int()
result = self.lib.FDwfDigitalSpiReadOne(self.hdwf, transfer_type, bits_per_word, c_rx)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
rx = c_rx.value
return rx
def write(self, transfer_type: int, bits_per_word: int, tx: List[int]) -> None:
"""Write multiple SPI data-words, with up to 32 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…32).
tx (List[int]):
The data-words to write.
Raises:
DwfLibraryError: An error occurred while executing the write operation.
"""
tx_list = list(tx)
number_of_words = len(tx_list)
buffer_type = typespec_ctypes.c_unsigned_char * number_of_words
tx_buffer = buffer_type(*tx_list)
result = self.lib.FDwfDigitalSpiWrite(
self.hdwf,
transfer_type,
bits_per_word,
tx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def write16(self, transfer_type: int, bits_per_word: int, tx: List[int]) -> None:
"""Write multiple SPI data-words, with up to 16 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…16).
tx (List[int]):
The data-words to write.
Raises:
DwfLibraryError: An error occurred while executing the write operation.
"""
tx_list = list(tx)
number_of_words = len(tx_list)
buffer_type = typespec_ctypes.c_unsigned_short * number_of_words
tx_buffer = buffer_type(*tx_list)
result = self.lib.FDwfDigitalSpiWrite16(
self.hdwf,
transfer_type,
bits_per_word,
tx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def write32(self, transfer_type: int, bits_per_word: int, tx: List[int]) -> None:
"""Write multiple SPI data-words, with up to 32 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…32).
tx (List[int]):
The data-words to write.
Raises:
DwfLibraryError: An error occurred while executing the write operation.
"""
tx_list = list(tx)
number_of_words = len(tx_list)
buffer_type = typespec_ctypes.c_unsigned_int * number_of_words
tx_buffer = buffer_type(*tx_list)
result = self.lib.FDwfDigitalSpiWrite32(
self.hdwf,
transfer_type,
bits_per_word,
tx_buffer, number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def writeOne(self, transfer_type: int, bits_per_word: int, tx: int) -> None:
"""Write a single SPI data-word, with up to 32 bits.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits of the data-word (1…32).
tx (int):
The data-word to write.
Raises:
DwfLibraryError: An error occurred while executing the write operation.
"""
result = self.lib.FDwfDigitalSpiWriteOne(self.hdwf, transfer_type, bits_per_word, tx)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
| 31.050967 | 105 | 0.553883 | """The |pydwf.core.api.digital_spi| module implements a single class: |DigitalSpi|."""
from typing import List
from pydwf.core.dwf_device_subapi import AbstractDwfDeviceSubApi
from pydwf.core.auxiliary.typespec_ctypes import typespec_ctypes
from pydwf.core.auxiliary.constants import RESULT_SUCCESS
from pydwf.core.auxiliary.enum_types import DwfDigitalOutIdle
class DigitalSpi(AbstractDwfDeviceSubApi):
"""The |DigitalSpi| class provides access to the SPI protocol functionality of a |DwfDevice:link|.
Attention:
Users of |pydwf| should not create instances of this class directly.
It is instantiated during initialization of a |DwfDevice| and subsequently assigned to its public
|digitalSpi:link| attribute for access by the user.
"""
def reset(self) -> None:
"""Reset the SPI protocol support.
Raises:
DwfLibraryError: An error occurred while executing the *reset* operation.
"""
result = self.lib.FDwfDigitalSpiReset(self.hdwf)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def frequencySet(self, frequency: float) -> None:
"""Set the SPI frequency, in Hz.
Parameters:
frequency (float): SPI frequency, in Hz.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfDigitalSpiFrequencySet(self.hdwf, frequency)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def clockSet(self, channel_index: int) -> None:
"""Set the digital channel (pin) for the SPI clock signal.
Parameters:
channel_index (int):
The digital channel (pin) where the SPI clock signal will be generated.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfDigitalSpiClockSet(self.hdwf, channel_index)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def dataSet(self, spi_data_bit: int, channel_index: int) -> None:
"""Set the digital channel (pin) for an SPI data bit.
Parameters:
spi_data_bit (int):
The data bit to configure:
* 0 — DQ0 / MOSI / SISO
* 1 — DQ1 / MISO
* 2 — DQ2
* 3 — DQ3
channel_index (int):
The digital channel (pin) for this data bit.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfDigitalSpiDataSet(self.hdwf, spi_data_bit, channel_index)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def idleSet(self, spi_data_bit: int, idle_mode: DwfDigitalOutIdle) -> None:
"""Set the idle behavior for an SPI data bit.
Parameters:
spi_data_bit (int):
The data bit to configure:
* 0 — DQ0 / MOSI / SISO
* 1 — DQ1 / MISO
* 2 — DQ2
* 3 — DQ3
idle_mode (DwfDigitalOutIdle):
The idle behavior of this bit.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfDigitalSpiIdleSet(self.hdwf, spi_data_bit, idle_mode.value)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def modeSet(self, spi_mode: int) -> None:
"""Set the SPI mode.
Parameters:
spi_mode (int):
The values for CPOL (polarity) and CPHA (phase) to use with the attached slave device:
* 0 — CPOL = 0, CPHA = 0
* 1 — CPOL = 0, CPHA = 1
* 2 — CPOL = 1, CPHA = 0
* 3 — CPOL = 1, CPHA = 1
Refer to the slave device's datasheet to select the correct value.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfDigitalSpiModeSet(self.hdwf, spi_mode)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def orderSet(self, bit_order: int) -> None:
"""Set the SPI data bit order.
Parameters:
bit_order (int):
Select the bit order of each word sent out:
* 1 — MSB first, LSB last
* 0 — LSB first, MSB last
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfDigitalSpiOrderSet(self.hdwf, bit_order)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def select(self, channel_index: int, level: int) -> None:
"""Set the chip select (CS) status.
Parameters:
channel_index (int):
The digital channel (pin) for the Chip Select signal.
level (int):
The Chip Select level to configure.
* 0 — low
* 1 — high
* -1 — Z (high impedance)
The CS (chip select) is usually an active-low signal, from the
SPI bus master to a specific SPI slave device. Before starting a bus
request, the master should set CS to 0 for the chip it wants to talk to.
Usually, each slave on an SPI bus has its own CS line. At most one of them
should be selected at any time.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfDigitalSpiSelect(self.hdwf, channel_index, level)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def writeRead(self, transfer_type: int, bits_per_word: int, tx: List[int]) -> List[int]:
"""Write and read multiple SPI data-words, with up to 8 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…8).
tx (List[int]):
The data-words to write.
Returns:
List[int]: The data-words received.
Raises:
DwfLibraryError: An error occurred while executing the write/read operation.
"""
tx_list = list(tx)
number_of_words = len(tx_list)
buffer_type = typespec_ctypes.c_unsigned_char * number_of_words
tx_buffer = buffer_type(*tx_list)
rx_buffer = buffer_type()
result = self.lib.FDwfDigitalSpiWriteRead(
self.hdwf,
transfer_type,
bits_per_word,
tx_buffer,
number_of_words,
rx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
rx_list = list(rx_buffer)
return rx_list
def writeRead16(self, transfer_type: int, bits_per_word: int, tx: List[int]) -> List[int]:
"""Write and read multiple SPI data-words, with up to 16 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…16).
tx (List[int]):
The data-words to write.
Returns:
List[int]: The data-words received.
Raises:
DwfLibraryError: An error occurred while executing the write/read operation.
"""
tx_list = list(tx)
number_of_words = len(tx_list)
buffer_type = typespec_ctypes.c_unsigned_short * number_of_words
tx_buffer = buffer_type(*tx_list)
rx_buffer = buffer_type()
result = self.lib.FDwfDigitalSpiWriteRead16(
self.hdwf,
transfer_type,
bits_per_word,
tx_buffer,
number_of_words,
rx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
rx_list = list(rx_buffer)
return rx_list
def writeRead32(self, transfer_type: int, bits_per_word: int, tx: List[int]) -> List[int]:
"""Write and read multiple SPI data-words, with up to 32 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…32).
tx (List[int]):
The data-words to write.
Returns:
List[int]: The data-words received.
Raises:
DwfLibraryError: An error occurred while executing the write/read operation.
"""
tx_list = list(tx)
number_of_words = len(tx_list)
buffer_type = typespec_ctypes.c_unsigned_int * number_of_words
tx_buffer = buffer_type(*tx_list)
rx_buffer = buffer_type()
result = self.lib.FDwfDigitalSpiWriteRead32(
self.hdwf,
transfer_type,
bits_per_word,
tx_buffer,
number_of_words,
rx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
rx_list = list(rx_buffer)
return rx_list
def read(self, transfer_type: int, bits_per_word: int, number_of_words: int) -> List[int]:
"""Read multiple SPI data-words, with up to 8 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…8).
number_of_words (int):
The number of data-words to read.
Returns:
List[int]: The data-words received.
Raises:
DwfLibraryError: An error occurred while executing the read operation.
"""
buffer_type = typespec_ctypes.c_unsigned_char * number_of_words
rx_buffer = buffer_type()
result = self.lib.FDwfDigitalSpiRead(
self.hdwf,
transfer_type,
bits_per_word,
rx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
rx_list = list(rx_buffer)
return rx_list
def read16(self, transfer_type: int, bits_per_word: int, number_of_words: int) -> List[int]:
"""Read multiple SPI data-words, with up to 16 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…16).
number_of_words (int):
The number of data-words to read.
Returns:
List[int]: The data-words received.
Raises:
DwfLibraryError: An error occurred while executing the read operation.
"""
buffer_type = typespec_ctypes.c_unsigned_short * number_of_words
rx_buffer = buffer_type()
result = self.lib.FDwfDigitalSpiRead16(
self.hdwf,
transfer_type,
bits_per_word,
rx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
rx_list = list(rx_buffer)
return rx_list
def read32(self, transfer_type: int, bits_per_word: int, number_of_words: int) -> List[int]:
"""Read multiple SPI data-words, with up to 32 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…32).
number_of_words (int):
The number of data-words to read.
Returns:
List[int]: The data-words received.
Raises:
DwfLibraryError: An error occurred while executing the read operation.
"""
buffer_type = typespec_ctypes.c_unsigned_int * number_of_words
rx_buffer = buffer_type()
result = self.lib.FDwfDigitalSpiRead32(
self.hdwf,
transfer_type,
bits_per_word,
rx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
rx_list = list(rx_buffer)
return rx_list
def readOne(self, transfer_type: int, bits_per_word: int) -> int:
"""Read a single SPI data-word, with up to 32 bits.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits of the data-word (1…32).
Returns:
int: The data-word received.
Raises:
DwfLibraryError: An error occurred while executing the read operation.
"""
c_rx = typespec_ctypes.c_unsigned_int()
result = self.lib.FDwfDigitalSpiReadOne(self.hdwf, transfer_type, bits_per_word, c_rx)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
rx = c_rx.value
return rx
def write(self, transfer_type: int, bits_per_word: int, tx: List[int]) -> None:
"""Write multiple SPI data-words, with up to 32 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…32).
tx (List[int]):
The data-words to write.
Raises:
DwfLibraryError: An error occurred while executing the write operation.
"""
tx_list = list(tx)
number_of_words = len(tx_list)
buffer_type = typespec_ctypes.c_unsigned_char * number_of_words
tx_buffer = buffer_type(*tx_list)
result = self.lib.FDwfDigitalSpiWrite(
self.hdwf,
transfer_type,
bits_per_word,
tx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def write16(self, transfer_type: int, bits_per_word: int, tx: List[int]) -> None:
"""Write multiple SPI data-words, with up to 16 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…16).
tx (List[int]):
The data-words to write.
Raises:
DwfLibraryError: An error occurred while executing the write operation.
"""
tx_list = list(tx)
number_of_words = len(tx_list)
buffer_type = typespec_ctypes.c_unsigned_short * number_of_words
tx_buffer = buffer_type(*tx_list)
result = self.lib.FDwfDigitalSpiWrite16(
self.hdwf,
transfer_type,
bits_per_word,
tx_buffer,
number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def write32(self, transfer_type: int, bits_per_word: int, tx: List[int]) -> None:
"""Write multiple SPI data-words, with up to 32 bits per data-word.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits per data-word (1…32).
tx (List[int]):
The data-words to write.
Raises:
DwfLibraryError: An error occurred while executing the write operation.
"""
tx_list = list(tx)
number_of_words = len(tx_list)
buffer_type = typespec_ctypes.c_unsigned_int * number_of_words
tx_buffer = buffer_type(*tx_list)
result = self.lib.FDwfDigitalSpiWrite32(
self.hdwf,
transfer_type,
bits_per_word,
tx_buffer, number_of_words)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def writeOne(self, transfer_type: int, bits_per_word: int, tx: int) -> None:
"""Write a single SPI data-word, with up to 32 bits.
Parameters:
transfer_type (int):
* 0 — SISO
* 1 — MOSI/MISO
* 2 — dual
* 4 — quad
bits_per_word (int):
The number of bits of the data-word (1…32).
tx (int):
The data-word to write.
Raises:
DwfLibraryError: An error occurred while executing the write operation.
"""
result = self.lib.FDwfDigitalSpiWriteOne(self.hdwf, transfer_type, bits_per_word, tx)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
| 0 | 0 | 0 |
01b3e8f71a828e37adfa40af2be365af344a02c3 | 2,591 | py | Python | buildProjectDocumentation.py | pktrigg/DocumentDelivery | fabcd39441653797d2083edb1cf642ebf1196d01 | [
"Apache-2.0"
] | 2 | 2019-04-30T02:13:44.000Z | 2019-04-30T02:13:54.000Z | buildProjectDocumentation.py | pktrigg/DocumentDelivery | fabcd39441653797d2083edb1cf642ebf1196d01 | [
"Apache-2.0"
] | null | null | null | buildProjectDocumentation.py | pktrigg/DocumentDelivery | fabcd39441653797d2083edb1cf642ebf1196d01 | [
"Apache-2.0"
] | null | null | null | #created p.kennedy@fugro.com
#created: May 2015
#Based on twitter bootstrap
import os
import sys
import re
import time
import datetime
# set an include path
sys.path.append("./_bootstrap/_scripts")
import zBMS
import scan
import whatsNew
# import repairMenu
outputFileName = "index.html"
ignoreFolders = ['trash', '_trash', '_bootstrap', '__pychache__', 'aspnet_client', '.vscode', '.git']
print("Starting to compile the Project Documentation...")
print("compiling static parts..")
projectconfig = scan.loadCSVFile('config.txt')
fin = open("./_bootstrap/_parts/1head.html", "r")
document = fin.read()
fin.close()
fout = open(outputFileName, "w")
fout.write(document)
fout.close()
scan.mergeFile(outputFileName, "./_bootstrap/_parts/2bodyStart.html")
# build the menu system
scan.mergeFile(outputFileName,"./_bootstrap/_parts/34menuHeader.html")
scan.buildDropDownMenu("dropdownmenu.txt", "./", ignoreFolders)
scan.mergeFile(outputFileName, "dropdownmenu.txt")
scan.mergeFile(outputFileName,"./_bootstrap/_parts/35menuSideBarHeader.html")
scan.buildSideBarMenu("sidebarmenu.txt", "./", ignoreFolders)
scan.mergeFile(outputFileName, "sidebarmenu.txt")
scan.mergeFile(outputFileName,"./_bootstrap/_parts/5title.html")
#create the whatsnew section
whatsNew.whatsnew("tmp.txt", "./", ignoreFolders)
# scan folders and sub folders and build all links...
scan.scanFolders("tmp.txt", "./", ignoreFolders)
now = datetime.datetime.now()
compileStr = projectconfig['projectid'] + " Updated: " + now.strftime("%d/%m/%y %H:%M")
print(compileStr)
scan.wordSearchReplace(outputFileName, "compileInfo", compileStr )
scan.wordSearchReplace(outputFileName, "projectname", projectconfig['projectname'] )
scan.wordSearchReplace(outputFileName, "projectsummary", projectconfig['projectsummary'] )
scan.wordSearchReplace(outputFileName, "browsertabname", projectconfig['browsertabname'] )
scan.mergeFile(outputFileName,"tmp.txt")
os.remove("tmp.txt")
#Overview pages...
scan.mergeFile(outputFileName,"./_bootstrap/_parts/about.html")
scan.mergeFile(outputFileName,"./_bootstrap/_parts/contact.html")
scan.mergeFile(outputFileName,"./_bootstrap/_parts/overview.html")
os.remove("dropdownmenu.txt")
os.remove("sidebarmenu.txt")
#readme section with 2 boxes
print("Adding readme...")
scan.mergeFile(outputFileName,"./_bootstrap/_parts/readme.html")
print("Adding the closing tags to the page...")
scan.mergeFile(outputFileName,"./_bootstrap/_parts/99lastPart.html")
# print("Zipping the entire Portal into a all-in-one so users can self serve...")
# zBMS.zipEntireBMS(".")
print("Complete;-)")
| 31.597561 | 101 | 0.764956 | #created p.kennedy@fugro.com
#created: May 2015
#Based on twitter bootstrap
import os
import sys
import re
import time
import datetime
# set an include path
sys.path.append("./_bootstrap/_scripts")
import zBMS
import scan
import whatsNew
# import repairMenu
outputFileName = "index.html"
ignoreFolders = ['trash', '_trash', '_bootstrap', '__pychache__', 'aspnet_client', '.vscode', '.git']
print("Starting to compile the Project Documentation...")
print("compiling static parts..")
projectconfig = scan.loadCSVFile('config.txt')
fin = open("./_bootstrap/_parts/1head.html", "r")
document = fin.read()
fin.close()
fout = open(outputFileName, "w")
fout.write(document)
fout.close()
scan.mergeFile(outputFileName, "./_bootstrap/_parts/2bodyStart.html")
# build the menu system
scan.mergeFile(outputFileName,"./_bootstrap/_parts/34menuHeader.html")
scan.buildDropDownMenu("dropdownmenu.txt", "./", ignoreFolders)
scan.mergeFile(outputFileName, "dropdownmenu.txt")
scan.mergeFile(outputFileName,"./_bootstrap/_parts/35menuSideBarHeader.html")
scan.buildSideBarMenu("sidebarmenu.txt", "./", ignoreFolders)
scan.mergeFile(outputFileName, "sidebarmenu.txt")
scan.mergeFile(outputFileName,"./_bootstrap/_parts/5title.html")
#create the whatsnew section
whatsNew.whatsnew("tmp.txt", "./", ignoreFolders)
# scan folders and sub folders and build all links...
scan.scanFolders("tmp.txt", "./", ignoreFolders)
now = datetime.datetime.now()
compileStr = projectconfig['projectid'] + " Updated: " + now.strftime("%d/%m/%y %H:%M")
print(compileStr)
scan.wordSearchReplace(outputFileName, "compileInfo", compileStr )
scan.wordSearchReplace(outputFileName, "projectname", projectconfig['projectname'] )
scan.wordSearchReplace(outputFileName, "projectsummary", projectconfig['projectsummary'] )
scan.wordSearchReplace(outputFileName, "browsertabname", projectconfig['browsertabname'] )
scan.mergeFile(outputFileName,"tmp.txt")
os.remove("tmp.txt")
#Overview pages...
scan.mergeFile(outputFileName,"./_bootstrap/_parts/about.html")
scan.mergeFile(outputFileName,"./_bootstrap/_parts/contact.html")
scan.mergeFile(outputFileName,"./_bootstrap/_parts/overview.html")
os.remove("dropdownmenu.txt")
os.remove("sidebarmenu.txt")
#readme section with 2 boxes
print("Adding readme...")
scan.mergeFile(outputFileName,"./_bootstrap/_parts/readme.html")
print("Adding the closing tags to the page...")
scan.mergeFile(outputFileName,"./_bootstrap/_parts/99lastPart.html")
# print("Zipping the entire Portal into a all-in-one so users can self serve...")
# zBMS.zipEntireBMS(".")
print("Complete;-)")
| 0 | 0 | 0 |
1845773d797a18574b396466b48f7eead0777656 | 1,121 | py | Python | f1tenth_gym/envs/tasks.py | axelbr/f1tenth_gym | ee220fef9ed3973259fd164756b4a99be9a3f632 | [
"MIT"
] | null | null | null | f1tenth_gym/envs/tasks.py | axelbr/f1tenth_gym | ee220fef9ed3973259fd164756b4a99be9a3f632 | [
"MIT"
] | null | null | null | f1tenth_gym/envs/tasks.py | axelbr/f1tenth_gym | ee220fef9ed3973259fd164756b4a99be9a3f632 | [
"MIT"
] | null | null | null | import abc
from abc import ABC
from typing import Any
import numpy as np
| 23.851064 | 82 | 0.624442 | import abc
from abc import ABC
from typing import Any
import numpy as np
class Task(ABC):
@abc.abstractmethod
def reward(self, state: Any, action: Any, next_state: Any = None) -> float:
pass
@abc.abstractmethod
def done(self, state: Any) -> bool:
pass
def reset(self) -> None:
pass
class TimestepRewardTask(Task):
def __init__(self, ego_index: int = 0, laps: int = 2, timestep: float = 0.01):
self._ego_idx = ego_index
self._timestep = timestep
self._laps = laps
def reward(self, state: Any, action: Any, next_state: Any = None) -> float:
return self._timestep
def done(self, state: Any) -> bool:
collisions = state['collisions']
lap_counts = state['lap_counts']
done = collisions[self._ego_idx] or np.all(lap_counts >= self._laps)
return done
class MaximizeProgressTask(Task):
def __init__(self, progress_map: np.ndarray):
pass
def reward(self, state: Any, action: Any, next_state: Any = None) -> float:
pass
def done(self, state: Any) -> bool:
pass | 669 | 146 | 231 |
19fb694bd712315309184b4c64de1903dd99d3d8 | 2,374 | py | Python | srcs/mazes_main.py | ChampiB/POMCP | af6b7f9df3476126abad2adf21cc618e1d9898d1 | [
"MIT"
] | null | null | null | srcs/mazes_main.py | ChampiB/POMCP | af6b7f9df3476126abad2adf21cc618e1d9898d1 | [
"MIT"
] | null | null | null | srcs/mazes_main.py | ChampiB/POMCP | af6b7f9df3476126abad2adf21cc618e1d9898d1 | [
"MIT"
] | null | null | null | from srcs.environments.MazeEnv import MazeEnv
from srcs.agent.POMCP import POMCP
import time
import math
if __name__ == "__main__":
# Create the environment
MAZE_FILE_NAME = "5.maze"
env = MazeEnv("./mazes/" + MAZE_FILE_NAME)
LOCAL_MINIMA = env.get_local_minima(MAZE_FILE_NAME)
S = env.states_list()
A = env.actions_list()
O = env.observations_list()
# Hyper-parameters
NB_SIMULATIONS = 100
NB_ACTION_PERCEPTION_CYCLES = 20
GAMMA = 0.9
TIMEOUT = 500
NO_PARTICLES = 100
EXP_CONST = 5
# Performance tracking variables
exec_times = []
perf = [0 for i in range(0, len(LOCAL_MINIMA) + 2)]
# Run the simulations
for j in range(0, NB_SIMULATIONS):
# Create the agent
agent = POMCP(S, A, O, env, c=EXP_CONST, gamma=GAMMA, timeout=TIMEOUT, no_particles=NO_PARTICLES)
# Action-perception cycles
start = time.time()
# env.render() # TODO
for t in range(0, NB_ACTION_PERCEPTION_CYCLES):
action = agent.search()
obs, _, done = env.step(action)
# env.render() # TODO
if done:
break
agent.update_belief(action, obs)
agent.tree.prune_after_action(action, obs)
# Track execution time and performance
exec_times.append(time.time() - start)
perf = env.track(perf, LOCAL_MINIMA)
# Reset the environment to its initial state
env.reset()
# Display hyperparameters setting
print("MAZE_FILE_NAME={}".format(MAZE_FILE_NAME))
print("NB_SIMULATIONS={}".format(NB_SIMULATIONS))
print("NB_ACTION_PERCEPTION_CYCLES={}".format(NB_ACTION_PERCEPTION_CYCLES))
print("GAMMA={}".format(GAMMA))
print("TIMEOUT={}".format(TIMEOUT))
print("NO_PARTICLES={}".format(NO_PARTICLES))
print("EXP_CONST={}".format(EXP_CONST))
print()
# Display execution time and performance of the POMCP agent
avg = sum(exec_times) / len(exec_times)
var = sum((x - avg)**2 for x in exec_times) / (len(exec_times) - 1)
print("Time: {} +/- {}\n".format(avg, math.sqrt(var)))
total = sum(perf)
print("P(global): {}".format(perf[len(perf) - 1] / total))
for i in range(0, len(LOCAL_MINIMA)):
print("P(local {}): {}".format(i + 1, perf[i + 1] / total))
print("P(other): {}".format(perf[0] / total))
| 32.520548 | 105 | 0.627211 | from srcs.environments.MazeEnv import MazeEnv
from srcs.agent.POMCP import POMCP
import time
import math
if __name__ == "__main__":
# Create the environment
MAZE_FILE_NAME = "5.maze"
env = MazeEnv("./mazes/" + MAZE_FILE_NAME)
LOCAL_MINIMA = env.get_local_minima(MAZE_FILE_NAME)
S = env.states_list()
A = env.actions_list()
O = env.observations_list()
# Hyper-parameters
NB_SIMULATIONS = 100
NB_ACTION_PERCEPTION_CYCLES = 20
GAMMA = 0.9
TIMEOUT = 500
NO_PARTICLES = 100
EXP_CONST = 5
# Performance tracking variables
exec_times = []
perf = [0 for i in range(0, len(LOCAL_MINIMA) + 2)]
# Run the simulations
for j in range(0, NB_SIMULATIONS):
# Create the agent
agent = POMCP(S, A, O, env, c=EXP_CONST, gamma=GAMMA, timeout=TIMEOUT, no_particles=NO_PARTICLES)
# Action-perception cycles
start = time.time()
# env.render() # TODO
for t in range(0, NB_ACTION_PERCEPTION_CYCLES):
action = agent.search()
obs, _, done = env.step(action)
# env.render() # TODO
if done:
break
agent.update_belief(action, obs)
agent.tree.prune_after_action(action, obs)
# Track execution time and performance
exec_times.append(time.time() - start)
perf = env.track(perf, LOCAL_MINIMA)
# Reset the environment to its initial state
env.reset()
# Display hyperparameters setting
print("MAZE_FILE_NAME={}".format(MAZE_FILE_NAME))
print("NB_SIMULATIONS={}".format(NB_SIMULATIONS))
print("NB_ACTION_PERCEPTION_CYCLES={}".format(NB_ACTION_PERCEPTION_CYCLES))
print("GAMMA={}".format(GAMMA))
print("TIMEOUT={}".format(TIMEOUT))
print("NO_PARTICLES={}".format(NO_PARTICLES))
print("EXP_CONST={}".format(EXP_CONST))
print()
# Display execution time and performance of the POMCP agent
avg = sum(exec_times) / len(exec_times)
var = sum((x - avg)**2 for x in exec_times) / (len(exec_times) - 1)
print("Time: {} +/- {}\n".format(avg, math.sqrt(var)))
total = sum(perf)
print("P(global): {}".format(perf[len(perf) - 1] / total))
for i in range(0, len(LOCAL_MINIMA)):
print("P(local {}): {}".format(i + 1, perf[i + 1] / total))
print("P(other): {}".format(perf[0] / total))
| 0 | 0 | 0 |
69ca2ca35b80c9e593d1ce4f32abc074a09604df | 1,518 | py | Python | utils.py | Avashist1998/Morning_view_mode | d597b517e714fcf9a16286482bcc58dc023ad2f8 | [
"MIT"
] | 1 | 2021-06-04T07:20:55.000Z | 2021-06-04T07:20:55.000Z | utils.py | Avashist1998/Morning_view_mode | d597b517e714fcf9a16286482bcc58dc023ad2f8 | [
"MIT"
] | null | null | null | utils.py | Avashist1998/Morning_view_mode | d597b517e714fcf9a16286482bcc58dc023ad2f8 | [
"MIT"
] | null | null | null | import numpy as np
from os import getcwd, path
from cv2 import imread, IMREAD_COLOR
def read_image(path=None):
'''
Read an image from a path
Path is relative path or full path
'''
base_path = getcwd()
full_path = path.join(base_path,path)
if base_path in path:
full_path = path
if not(path.exists(full_path)):
print('The path \" {}\"does not exist. Make just that the file exist').fromat(full_path)
return None
else:
image = imread(full_path,IMREAD_COLOR)
return image
| 30.36 | 96 | 0.620553 | import numpy as np
from os import getcwd, path
from cv2 import imread, IMREAD_COLOR
def read_image(path=None):
'''
Read an image from a path
Path is relative path or full path
'''
base_path = getcwd()
full_path = path.join(base_path,path)
if base_path in path:
full_path = path
if not(path.exists(full_path)):
print('The path \" {}\"does not exist. Make just that the file exist').fromat(full_path)
return None
else:
image = imread(full_path,IMREAD_COLOR)
return image
def rgb_min_image(image):
# extractes the min of the rgb values and outputs
# a gray scale image
rgb_image = np.amin(image, axis= 2)
return rgb_image
def min_filter(image):
# perfroms the min filter on 15 by 15 area
for k in range (3):
# creating a copy of the filter
i_image = image.copy()
# extracting one channel of the image
temp_image = image[:,:,k].copy()
[row,col] = temp_image.shape
# padding the iamge
temp_image = cv2.copyMakeBorder(temp_image, 14, 14, 14, 14, cv2.BORDER_REFLECT)
# perfroming the min filter with 15 x 15 window
for i in range(row):
for j in range(col):
i_image[i,j,k] = (temp_image[i:15+i,j:15+j]).min()
return i_image
def soft_matting(L,image,t_map):
image_copy = image.copy()
lamda = 10**(-4)
U = np.identity(L.shape[0])
t_map_mat = t_map*(L+lamda*U)/lamda
return t_map_mat | 890 | 0 | 69 |
b5152d0b0885d2eca41687730c8b29cfd3d8aac3 | 27,039 | py | Python | tests/tape/interfaces/test_tape_autograd.py | anthayes92/pennylane | 57734bbefadd7628d72b58bb2b0efea195cfd286 | [
"Apache-2.0"
] | 1 | 2020-10-15T01:09:27.000Z | 2020-10-15T01:09:27.000Z | tests/tape/interfaces/test_tape_autograd.py | anthayes92/pennylane | 57734bbefadd7628d72b58bb2b0efea195cfd286 | [
"Apache-2.0"
] | null | null | null | tests/tape/interfaces/test_tape_autograd.py | anthayes92/pennylane | 57734bbefadd7628d72b58bb2b0efea195cfd286 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the autograd interface"""
import pytest
from pennylane import numpy as np
import pennylane as qml
from pennylane.tape import JacobianTape
from pennylane.tape.interfaces.autograd import AutogradInterface
class TestAutogradQuantumTape:
"""Test the autograd interface applied to a tape"""
def test_interface_str(self):
"""Test that the interface string is correctly identified as autograd"""
with AutogradInterface.apply(JacobianTape()) as tape:
qml.RX(0.5, wires=0)
qml.expval(qml.PauliX(0))
assert tape.interface == "autograd"
assert isinstance(tape, AutogradInterface)
def test_get_parameters(self):
"""Test that the get_parameters function correctly gets the trainable parameters and all
parameters, depending on the trainable_only argument"""
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=False)
c = np.array(0.3, requires_grad=True)
d = np.array(0.4, requires_grad=False)
with AutogradInterface.apply(JacobianTape()) as tape:
qml.Rot(a, b, c, wires=0)
qml.RX(d, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliX(0))
assert tape.trainable_params == {0, 2}
assert np.all(tape.get_parameters(trainable_only=True) == [a, c])
assert np.all(tape.get_parameters(trainable_only=False) == [a, b, c, d])
def test_execution(self):
"""Test execution"""
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=False)
dev = qml.device("default.qubit", wires=1)
res = cost(a, b, device=dev)
assert res.shape == (1,)
def test_scalar_jacobian(self, tol):
"""Test scalar jacobian calculation"""
a = np.array(0.1, requires_grad=True)
dev = qml.device("default.qubit", wires=2)
res = qml.jacobian(cost)(a, device=dev)
assert res.shape == (1,)
# compare to standard tape jacobian
with JacobianTape() as tape:
qml.RY(a, wires=0)
qml.expval(qml.PauliZ(0))
tape.trainable_params = {0}
expected = tape.jacobian(dev)
assert expected.shape == (1, 1)
assert np.allclose(res, np.squeeze(expected), atol=tol, rtol=0)
def test_jacobian(self, tol):
"""Test jacobian calculation"""
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=True)
dev = qml.device("default.qubit", wires=2)
res = cost(a, b, device=dev)
expected = [np.cos(a), -np.cos(a) * np.sin(b)]
assert np.allclose(res, expected, atol=tol, rtol=0)
res = qml.jacobian(cost)(a, b, device=dev)
assert res.shape == (2, 2)
expected = [[-np.sin(a), 0], [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)]]
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_jacobian_options(self, mocker, tol):
"""Test setting jacobian options"""
spy = mocker.spy(JacobianTape, "numeric_pd")
a = np.array([0.1, 0.2], requires_grad=True)
dev = qml.device("default.qubit", wires=1)
res = qml.jacobian(cost)(a, device=dev)
for args in spy.call_args_list:
assert args[1]["order"] == 2
assert args[1]["h"] == 1e-8
def test_reusing_quantum_tape(self, tol):
"""Test re-using a quantum tape by passing new parameters"""
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=True)
dev = qml.device("default.qubit", wires=2)
with AutogradInterface.apply(JacobianTape()) as tape:
qml.RY(a, wires=0)
qml.RX(b, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliY(1))
assert tape.trainable_params == {0, 1}
jac_fn = qml.jacobian(cost)
jac = jac_fn(a, b)
a = np.array(0.54, requires_grad=True)
b = np.array(0.8, requires_grad=True)
res2 = cost(2 * a, b)
expected = [np.cos(2 * a), -np.cos(2 * a) * np.sin(b)]
assert np.allclose(res2, expected, atol=tol, rtol=0)
jac_fn = qml.jacobian(lambda a, b: cost(2 * a, b))
jac = jac_fn(a, b)
expected = [
[-2 * np.sin(2 * a), 0],
[2 * np.sin(2 * a) * np.sin(b), -np.cos(2 * a) * np.cos(b)],
]
assert np.allclose(jac, expected, atol=tol, rtol=0)
def test_classical_processing(self, tol):
"""Test classical processing within the quantum tape"""
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=False)
c = np.array(0.3, requires_grad=True)
dev = qml.device("default.qubit", wires=2)
res = qml.jacobian(cost)(a, b, c, device=dev)
assert res.shape == (1, 2)
def test_no_trainable_parameters(self, tol):
"""Test evaluation and Jacobian if there are no trainable parameters"""
a = np.array(0.1, requires_grad=False)
b = np.array(0.2, requires_grad=False)
dev = qml.device("default.qubit", wires=2)
res = cost(a, b, device=dev)
assert res.shape == (2,)
res = qml.jacobian(cost)(a, b, device=dev)
assert not res
with pytest.warns(UserWarning, match="Output seems independent"):
res = qml.grad(loss)(a, b)
assert not res
def test_matrix_parameter(self, tol):
"""Test that the autograd interface works correctly
with a matrix parameter"""
U = np.array([[0, 1], [1, 0]], requires_grad=False)
a = np.array(0.1, requires_grad=True)
dev = qml.device("default.qubit", wires=2)
res = cost(a, U, device=dev)
assert np.allclose(res, -np.cos(a), atol=tol, rtol=0)
jac_fn = qml.jacobian(cost)
res = jac_fn(a, U, device=dev)
assert np.allclose(res, np.sin(a), atol=tol, rtol=0)
def test_differentiable_expand(self, mocker, tol):
"""Test that operation and nested tapes expansion
is differentiable"""
mock = mocker.patch.object(qml.operation.Operation, "do_check_domain", False)
a = np.array(0.1, requires_grad=False)
p = np.array([0.1, 0.2, 0.3], requires_grad=True)
dev = qml.device("default.qubit", wires=1)
res = cost_fn(a, p, device=dev)
expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * (
np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2])
)
assert np.allclose(res, expected, atol=tol, rtol=0)
jac_fn = qml.jacobian(cost_fn)
res = jac_fn(a, p, device=dev)
expected = np.array(
[
np.cos(p[1]) * (np.cos(a) * np.cos(p[0]) - np.sin(a) * np.sin(p[0]) * np.sin(p[2])),
np.cos(p[1]) * np.cos(p[2]) * np.sin(a)
- np.sin(p[1])
* (np.cos(a) * np.sin(p[0]) + np.cos(p[0]) * np.sin(a) * np.sin(p[2])),
np.sin(a)
* (np.cos(p[0]) * np.cos(p[1]) * np.cos(p[2]) - np.sin(p[1]) * np.sin(p[2])),
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_probability_differentiation(self, tol):
"""Tests correct output shape and evaluation for a tape
with prob outputs"""
dev = qml.device("default.qubit", wires=2)
x = np.array(0.543, requires_grad=True)
y = np.array(-0.654, requires_grad=True)
res = cost(x, y, device=dev)
expected = np.array(
[
[np.cos(x / 2) ** 2, np.sin(x / 2) ** 2],
[(1 + np.cos(x) * np.cos(y)) / 2, (1 - np.cos(x) * np.cos(y)) / 2],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
jac_fn = qml.jacobian(cost)
res = jac_fn(x, y, device=dev)
assert res.shape == (2, 2, 2)
expected = np.array(
[
[[-np.sin(x) / 2, 0], [-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2]],
[
[np.sin(x) / 2, 0],
[np.cos(y) * np.sin(x) / 2, np.cos(x) * np.sin(y) / 2],
],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_ragged_differentiation(self, tol):
"""Tests correct output shape and evaluation for a tape
with prob and expval outputs"""
dev = qml.device("default.qubit", wires=2)
x = np.array(0.543, requires_grad=True)
y = np.array(-0.654, requires_grad=True)
res = cost(x, y, device=dev)
expected = np.array(
[np.cos(x), (1 + np.cos(x) * np.cos(y)) / 2, (1 - np.cos(x) * np.cos(y)) / 2]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
jac_fn = qml.jacobian(cost)
res = jac_fn(x, y, device=dev)
expected = np.array(
[
[-np.sin(x), 0],
[-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2],
[np.cos(y) * np.sin(x) / 2, np.cos(x) * np.sin(y) / 2],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_sampling(self):
"""Test sampling works as expected"""
dev = qml.device("default.qubit", wires=2, shots=10)
x = np.array(0.543, requires_grad=True)
res = cost(x, device=dev)
assert res.shape == (2, 10)
class TestAutogradPassthru:
"""Test that the quantum tape works with an autograd passthru
device.
These tests are very similar to the tests above, with three key differences:
* We do **not** apply the autograd interface. These tapes simply use passthru
backprop, no custom gradient registration needed.
* We do not test the trainable_params attribute. Since these tapes have no
autograd interface, the tape does not need to bookkeep which parameters
are trainable; this is done by autograd internally.
* We use mock.spy to ensure that the tape's Jacobian method is not being called.
"""
def test_execution(self):
"""Test execution"""
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=False)
dev = qml.device("default.qubit.autograd", wires=1)
res = cost(a, b, device=dev)
assert res.shape == (1,)
def test_scalar_jacobian(self, tol, mocker):
"""Test scalar jacobian calculation"""
spy = mocker.spy(JacobianTape, "jacobian")
a = np.array(0.1, requires_grad=True)
dev = qml.device("default.qubit.autograd", wires=2)
res = qml.jacobian(cost)(a, device=dev)
spy.assert_not_called()
assert res.shape == (1,)
# compare to standard tape jacobian
with JacobianTape() as tape:
qml.RY(a, wires=0)
qml.expval(qml.PauliZ(0))
expected = tape.jacobian(dev)
assert expected.shape == (1, 1)
assert np.allclose(res, np.squeeze(expected), atol=tol, rtol=0)
def test_jacobian(self, mocker, tol):
"""Test jacobian calculation"""
spy = mocker.spy(JacobianTape, "jacobian")
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=True)
dev = qml.device("default.qubit.autograd", wires=2)
res = qml.jacobian(cost)(a, b, device=dev)
spy.assert_not_called()
assert res.shape == (2, 2)
# compare to standard tape jacobian
with JacobianTape() as tape:
qml.RY(a, wires=0)
qml.RX(b, wires=0)
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliY(0))
expected = tape.jacobian(dev)
assert expected.shape == (2, 2)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_classical_processing(self, mocker, tol):
"""Test classical processing within the quantum tape"""
spy = mocker.spy(JacobianTape, "jacobian")
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=False)
c = np.array(0.3, requires_grad=True)
dev = qml.device("default.qubit.autograd", wires=2)
res = qml.jacobian(cost)(a, b, c, device=dev)
assert res.shape == (1, 2)
spy.assert_not_called()
def test_no_trainable_parameters(self, mocker, tol):
"""Test evaluation and Jacobian if there are no trainable parameters"""
spy = mocker.spy(JacobianTape, "jacobian")
a = np.array(0.1, requires_grad=False)
b = np.array(0.2, requires_grad=False)
dev = qml.device("default.qubit.autograd", wires=2)
res = cost(a, b, device=dev)
assert res.shape == (2,)
spy.assert_not_called()
res = qml.jacobian(cost)(a, b, device=dev)
assert not res
with pytest.warns(UserWarning, match="Output seems independent"):
res = qml.grad(loss)(a, b)
assert not res
def test_matrix_parameter(self, mocker, tol):
"""Test jacobian computation when the tape includes a matrix parameter"""
spy = mocker.spy(JacobianTape, "jacobian")
U = np.array([[0, 1], [1, 0]], requires_grad=False)
a = np.array(0.1, requires_grad=True)
dev = qml.device("default.qubit.autograd", wires=2)
res = cost(a, U, device=dev)
assert np.allclose(res, -np.cos(a), atol=tol, rtol=0)
jac_fn = qml.jacobian(cost)
res = jac_fn(a, U, device=dev)
assert np.allclose(res, np.sin(a), atol=tol, rtol=0)
spy.assert_not_called()
def test_differentiable_expand(self, mocker, tol):
"""Test that operation and nested tapes expansion
is differentiable"""
spy = mocker.spy(JacobianTape, "jacobian")
mock = mocker.patch.object(qml.operation.Operation, "do_check_domain", False)
a = np.array(0.1, requires_grad=False)
p = np.array([0.1, 0.2, 0.3], requires_grad=True)
dev = qml.device("default.qubit.autograd", wires=1)
res = cost_fn(a, p, device=dev)
expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * (
np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2])
)
assert np.allclose(res, expected, atol=tol, rtol=0)
jac_fn = qml.jacobian(cost_fn)
res = jac_fn(a, p, device=dev)
expected = np.array(
[
np.cos(p[1]) * (np.cos(a) * np.cos(p[0]) - np.sin(a) * np.sin(p[0]) * np.sin(p[2])),
np.cos(p[1]) * np.cos(p[2]) * np.sin(a)
- np.sin(p[1])
* (np.cos(a) * np.sin(p[0]) + np.cos(p[0]) * np.sin(a) * np.sin(p[2])),
np.sin(a)
* (np.cos(p[0]) * np.cos(p[1]) * np.cos(p[2]) - np.sin(p[1]) * np.sin(p[2])),
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
spy.assert_not_called()
def test_probability_differentiation(self, mocker, tol):
"""Tests correct output shape and evaluation for a tape
with prob and expval outputs"""
spy = mocker.spy(JacobianTape, "jacobian")
dev = qml.device("default.qubit.autograd", wires=2)
x = np.array(0.543, requires_grad=True)
y = np.array(-0.654, requires_grad=True)
res = cost(x, y, device=dev)
expected = np.array(
[
[np.cos(x / 2) ** 2, np.sin(x / 2) ** 2],
[(1 + np.cos(x) * np.cos(y)) / 2, (1 - np.cos(x) * np.cos(y)) / 2],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
jac_fn = qml.jacobian(cost)
res = jac_fn(x, y, device=dev)
assert res.shape == (2, 2, 2)
expected = np.array(
[
[[-np.sin(x) / 2, 0], [-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2]],
[
[np.sin(x) / 2, 0],
[np.cos(y) * np.sin(x) / 2, np.cos(x) * np.sin(y) / 2],
],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
spy.assert_not_called()
def test_ragged_differentiation(self, mocker, monkeypatch, tol):
"""Tests correct output shape and evaluation for a tape
with prob and expval outputs"""
spy = mocker.spy(JacobianTape, "jacobian")
dev = qml.device("default.qubit.autograd", wires=2)
# The current DefaultQubitAutograd device provides an _asarray method that does
# not work correctly for ragged arrays. For ragged arrays, we would like _asarray to
# flatten the array. Here, we patch the _asarray method on the device to achieve this
# behaviour; once the tape has moved from the beta folder, we should implement
# this change directly in the device.
monkeypatch.setattr(dev, "_asarray", _asarray)
x = np.array(0.543, requires_grad=True)
y = np.array(-0.654, requires_grad=True)
res = cost(x, y, device=dev)
expected = np.array(
[np.cos(x), (1 + np.cos(x) * np.cos(y)) / 2, (1 - np.cos(x) * np.cos(y)) / 2]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
jac_fn = qml.jacobian(cost)
res = jac_fn(x, y, device=dev)
expected = np.array(
[
[-np.sin(x), 0],
[-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2],
[np.cos(y) * np.sin(x) / 2, np.cos(x) * np.sin(y) / 2],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
spy.assert_not_called()
def test_sampling(self):
"""Test sampling works as expected"""
dev = qml.device("default.qubit.autograd", wires=2, shots=10)
x = np.array(0.543, requires_grad=True)
res = cost(x, device=dev)
assert res.shape == (2, 10)
| 36.888131 | 101 | 0.522246 | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the autograd interface"""
import pytest
from pennylane import numpy as np
import pennylane as qml
from pennylane.tape import JacobianTape
from pennylane.tape.interfaces.autograd import AutogradInterface
class TestAutogradQuantumTape:
"""Test the autograd interface applied to a tape"""
def test_interface_str(self):
"""Test that the interface string is correctly identified as autograd"""
with AutogradInterface.apply(JacobianTape()) as tape:
qml.RX(0.5, wires=0)
qml.expval(qml.PauliX(0))
assert tape.interface == "autograd"
assert isinstance(tape, AutogradInterface)
def test_get_parameters(self):
"""Test that the get_parameters function correctly gets the trainable parameters and all
parameters, depending on the trainable_only argument"""
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=False)
c = np.array(0.3, requires_grad=True)
d = np.array(0.4, requires_grad=False)
with AutogradInterface.apply(JacobianTape()) as tape:
qml.Rot(a, b, c, wires=0)
qml.RX(d, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliX(0))
assert tape.trainable_params == {0, 2}
assert np.all(tape.get_parameters(trainable_only=True) == [a, c])
assert np.all(tape.get_parameters(trainable_only=False) == [a, b, c, d])
def test_execution(self):
"""Test execution"""
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=False)
def cost(a, b, device):
with AutogradInterface.apply(JacobianTape()) as tape:
qml.RY(a, wires=0)
qml.RX(b, wires=0)
qml.expval(qml.PauliZ(0))
assert tape.trainable_params == {0}
return tape.execute(device)
dev = qml.device("default.qubit", wires=1)
res = cost(a, b, device=dev)
assert res.shape == (1,)
def test_scalar_jacobian(self, tol):
"""Test scalar jacobian calculation"""
a = np.array(0.1, requires_grad=True)
def cost(a, device):
with AutogradInterface.apply(JacobianTape()) as tape:
qml.RY(a, wires=0)
qml.expval(qml.PauliZ(0))
assert tape.trainable_params == {0}
return tape.execute(device)
dev = qml.device("default.qubit", wires=2)
res = qml.jacobian(cost)(a, device=dev)
assert res.shape == (1,)
# compare to standard tape jacobian
with JacobianTape() as tape:
qml.RY(a, wires=0)
qml.expval(qml.PauliZ(0))
tape.trainable_params = {0}
expected = tape.jacobian(dev)
assert expected.shape == (1, 1)
assert np.allclose(res, np.squeeze(expected), atol=tol, rtol=0)
def test_jacobian(self, tol):
"""Test jacobian calculation"""
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=True)
def cost(a, b, device):
with AutogradInterface.apply(JacobianTape()) as tape:
qml.RY(a, wires=0)
qml.RX(b, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliY(1))
assert tape.trainable_params == {0, 1}
return tape.execute(device)
dev = qml.device("default.qubit", wires=2)
res = cost(a, b, device=dev)
expected = [np.cos(a), -np.cos(a) * np.sin(b)]
assert np.allclose(res, expected, atol=tol, rtol=0)
res = qml.jacobian(cost)(a, b, device=dev)
assert res.shape == (2, 2)
expected = [[-np.sin(a), 0], [np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)]]
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_jacobian_options(self, mocker, tol):
"""Test setting jacobian options"""
spy = mocker.spy(JacobianTape, "numeric_pd")
a = np.array([0.1, 0.2], requires_grad=True)
dev = qml.device("default.qubit", wires=1)
def cost(a, device):
with AutogradInterface.apply(JacobianTape()) as qtape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.expval(qml.PauliZ(0))
qtape.jacobian_options = {"h": 1e-8, "order": 2}
return qtape.execute(dev)
res = qml.jacobian(cost)(a, device=dev)
for args in spy.call_args_list:
assert args[1]["order"] == 2
assert args[1]["h"] == 1e-8
def test_reusing_quantum_tape(self, tol):
"""Test re-using a quantum tape by passing new parameters"""
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=True)
dev = qml.device("default.qubit", wires=2)
with AutogradInterface.apply(JacobianTape()) as tape:
qml.RY(a, wires=0)
qml.RX(b, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliY(1))
assert tape.trainable_params == {0, 1}
def cost(a, b):
tape.set_parameters([a, b])
return tape.execute(dev)
jac_fn = qml.jacobian(cost)
jac = jac_fn(a, b)
a = np.array(0.54, requires_grad=True)
b = np.array(0.8, requires_grad=True)
res2 = cost(2 * a, b)
expected = [np.cos(2 * a), -np.cos(2 * a) * np.sin(b)]
assert np.allclose(res2, expected, atol=tol, rtol=0)
jac_fn = qml.jacobian(lambda a, b: cost(2 * a, b))
jac = jac_fn(a, b)
expected = [
[-2 * np.sin(2 * a), 0],
[2 * np.sin(2 * a) * np.sin(b), -np.cos(2 * a) * np.cos(b)],
]
assert np.allclose(jac, expected, atol=tol, rtol=0)
def test_classical_processing(self, tol):
"""Test classical processing within the quantum tape"""
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=False)
c = np.array(0.3, requires_grad=True)
def cost(a, b, c, device):
with AutogradInterface.apply(JacobianTape()) as tape:
qml.RY(a * c, wires=0)
qml.RZ(b, wires=0)
qml.RX(c + c ** 2 + np.sin(a), wires=0)
qml.expval(qml.PauliZ(0))
assert tape.trainable_params == {0, 2}
return tape.execute(device)
dev = qml.device("default.qubit", wires=2)
res = qml.jacobian(cost)(a, b, c, device=dev)
assert res.shape == (1, 2)
def test_no_trainable_parameters(self, tol):
"""Test evaluation and Jacobian if there are no trainable parameters"""
a = np.array(0.1, requires_grad=False)
b = np.array(0.2, requires_grad=False)
def cost(a, b, device):
with AutogradInterface.apply(JacobianTape()) as tape:
qml.RY(a, wires=0)
qml.RX(b, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliZ(1))
assert tape.trainable_params == set()
return tape.execute(device)
dev = qml.device("default.qubit", wires=2)
res = cost(a, b, device=dev)
assert res.shape == (2,)
res = qml.jacobian(cost)(a, b, device=dev)
assert not res
def loss(a, b):
return np.sum(cost(a, b, device=dev))
with pytest.warns(UserWarning, match="Output seems independent"):
res = qml.grad(loss)(a, b)
assert not res
def test_matrix_parameter(self, tol):
"""Test that the autograd interface works correctly
with a matrix parameter"""
U = np.array([[0, 1], [1, 0]], requires_grad=False)
a = np.array(0.1, requires_grad=True)
def cost(a, U, device):
with AutogradInterface.apply(JacobianTape()) as tape:
qml.QubitUnitary(U, wires=0)
qml.RY(a, wires=0)
qml.expval(qml.PauliZ(0))
assert tape.trainable_params == {1}
return tape.execute(device)
dev = qml.device("default.qubit", wires=2)
res = cost(a, U, device=dev)
assert np.allclose(res, -np.cos(a), atol=tol, rtol=0)
jac_fn = qml.jacobian(cost)
res = jac_fn(a, U, device=dev)
assert np.allclose(res, np.sin(a), atol=tol, rtol=0)
def test_differentiable_expand(self, mocker, tol):
"""Test that operation and nested tapes expansion
is differentiable"""
mock = mocker.patch.object(qml.operation.Operation, "do_check_domain", False)
class U3(qml.U3):
def expand(self):
tape = JacobianTape()
theta, phi, lam = self.data
wires = self.wires
tape._ops += [
qml.Rot(lam, theta, -lam, wires=wires),
qml.PhaseShift(phi + lam, wires=wires),
]
return tape
def cost_fn(a, p, device):
tape = JacobianTape()
with tape:
qml.RX(a, wires=0)
U3(*p, wires=0)
qml.expval(qml.PauliX(0))
tape = AutogradInterface.apply(tape.expand())
assert tape.trainable_params == {1, 2, 3, 4}
assert [i.name for i in tape.operations] == ["RX", "Rot", "PhaseShift"]
assert np.all(np.array(tape.get_parameters()) == [p[2], p[0], -p[2], p[1] + p[2]])
return tape.execute(device=device)
a = np.array(0.1, requires_grad=False)
p = np.array([0.1, 0.2, 0.3], requires_grad=True)
dev = qml.device("default.qubit", wires=1)
res = cost_fn(a, p, device=dev)
expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * (
np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2])
)
assert np.allclose(res, expected, atol=tol, rtol=0)
jac_fn = qml.jacobian(cost_fn)
res = jac_fn(a, p, device=dev)
expected = np.array(
[
np.cos(p[1]) * (np.cos(a) * np.cos(p[0]) - np.sin(a) * np.sin(p[0]) * np.sin(p[2])),
np.cos(p[1]) * np.cos(p[2]) * np.sin(a)
- np.sin(p[1])
* (np.cos(a) * np.sin(p[0]) + np.cos(p[0]) * np.sin(a) * np.sin(p[2])),
np.sin(a)
* (np.cos(p[0]) * np.cos(p[1]) * np.cos(p[2]) - np.sin(p[1]) * np.sin(p[2])),
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_probability_differentiation(self, tol):
"""Tests correct output shape and evaluation for a tape
with prob outputs"""
def cost(x, y, device):
with AutogradInterface.apply(JacobianTape()) as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.probs(wires=[0])
qml.probs(wires=[1])
return tape.execute(device)
dev = qml.device("default.qubit", wires=2)
x = np.array(0.543, requires_grad=True)
y = np.array(-0.654, requires_grad=True)
res = cost(x, y, device=dev)
expected = np.array(
[
[np.cos(x / 2) ** 2, np.sin(x / 2) ** 2],
[(1 + np.cos(x) * np.cos(y)) / 2, (1 - np.cos(x) * np.cos(y)) / 2],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
jac_fn = qml.jacobian(cost)
res = jac_fn(x, y, device=dev)
assert res.shape == (2, 2, 2)
expected = np.array(
[
[[-np.sin(x) / 2, 0], [-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2]],
[
[np.sin(x) / 2, 0],
[np.cos(y) * np.sin(x) / 2, np.cos(x) * np.sin(y) / 2],
],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_ragged_differentiation(self, tol):
"""Tests correct output shape and evaluation for a tape
with prob and expval outputs"""
def cost(x, y, device):
with AutogradInterface.apply(JacobianTape()) as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.probs(wires=[1])
return tape.execute(device)
dev = qml.device("default.qubit", wires=2)
x = np.array(0.543, requires_grad=True)
y = np.array(-0.654, requires_grad=True)
res = cost(x, y, device=dev)
expected = np.array(
[np.cos(x), (1 + np.cos(x) * np.cos(y)) / 2, (1 - np.cos(x) * np.cos(y)) / 2]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
jac_fn = qml.jacobian(cost)
res = jac_fn(x, y, device=dev)
expected = np.array(
[
[-np.sin(x), 0],
[-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2],
[np.cos(y) * np.sin(x) / 2, np.cos(x) * np.sin(y) / 2],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_sampling(self):
"""Test sampling works as expected"""
def cost(x, device):
with AutogradInterface.apply(JacobianTape()) as tape:
qml.Hadamard(wires=[0])
qml.CNOT(wires=[0, 1])
qml.sample(qml.PauliZ(0))
qml.sample(qml.PauliX(1))
return tape.execute(device)
dev = qml.device("default.qubit", wires=2, shots=10)
x = np.array(0.543, requires_grad=True)
res = cost(x, device=dev)
assert res.shape == (2, 10)
class TestAutogradPassthru:
"""Test that the quantum tape works with an autograd passthru
device.
These tests are very similar to the tests above, with three key differences:
* We do **not** apply the autograd interface. These tapes simply use passthru
backprop, no custom gradient registration needed.
* We do not test the trainable_params attribute. Since these tapes have no
autograd interface, the tape does not need to bookkeep which parameters
are trainable; this is done by autograd internally.
* We use mock.spy to ensure that the tape's Jacobian method is not being called.
"""
def test_execution(self):
"""Test execution"""
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=False)
def cost(a, b, device):
with JacobianTape() as tape:
qml.RY(a, wires=0)
qml.RX(b, wires=0)
qml.expval(qml.PauliZ(0))
return tape.execute(device)
dev = qml.device("default.qubit.autograd", wires=1)
res = cost(a, b, device=dev)
assert res.shape == (1,)
def test_scalar_jacobian(self, tol, mocker):
"""Test scalar jacobian calculation"""
spy = mocker.spy(JacobianTape, "jacobian")
a = np.array(0.1, requires_grad=True)
def cost(a, device):
with JacobianTape() as tape:
qml.RY(a, wires=0)
qml.expval(qml.PauliZ(0))
return tape.execute(device)
dev = qml.device("default.qubit.autograd", wires=2)
res = qml.jacobian(cost)(a, device=dev)
spy.assert_not_called()
assert res.shape == (1,)
# compare to standard tape jacobian
with JacobianTape() as tape:
qml.RY(a, wires=0)
qml.expval(qml.PauliZ(0))
expected = tape.jacobian(dev)
assert expected.shape == (1, 1)
assert np.allclose(res, np.squeeze(expected), atol=tol, rtol=0)
def test_jacobian(self, mocker, tol):
"""Test jacobian calculation"""
spy = mocker.spy(JacobianTape, "jacobian")
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=True)
def cost(a, b, device):
with JacobianTape() as tape:
qml.RY(a, wires=0)
qml.RX(b, wires=0)
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliY(0))
return tape.execute(device)
dev = qml.device("default.qubit.autograd", wires=2)
res = qml.jacobian(cost)(a, b, device=dev)
spy.assert_not_called()
assert res.shape == (2, 2)
# compare to standard tape jacobian
with JacobianTape() as tape:
qml.RY(a, wires=0)
qml.RX(b, wires=0)
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliY(0))
expected = tape.jacobian(dev)
assert expected.shape == (2, 2)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_classical_processing(self, mocker, tol):
"""Test classical processing within the quantum tape"""
spy = mocker.spy(JacobianTape, "jacobian")
a = np.array(0.1, requires_grad=True)
b = np.array(0.2, requires_grad=False)
c = np.array(0.3, requires_grad=True)
def cost(a, b, c, device):
with JacobianTape() as tape:
qml.RY(a * c, wires=0)
qml.RZ(b, wires=0)
qml.RX(c + c ** 2 + np.sin(a), wires=0)
qml.expval(qml.PauliZ(0))
return tape.execute(device)
dev = qml.device("default.qubit.autograd", wires=2)
res = qml.jacobian(cost)(a, b, c, device=dev)
assert res.shape == (1, 2)
spy.assert_not_called()
def test_no_trainable_parameters(self, mocker, tol):
"""Test evaluation and Jacobian if there are no trainable parameters"""
spy = mocker.spy(JacobianTape, "jacobian")
a = np.array(0.1, requires_grad=False)
b = np.array(0.2, requires_grad=False)
def cost(a, b, device):
with JacobianTape() as tape:
qml.RY(a, wires=0)
qml.RX(b, wires=0)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliZ(1))
return tape.execute(device)
dev = qml.device("default.qubit.autograd", wires=2)
res = cost(a, b, device=dev)
assert res.shape == (2,)
spy.assert_not_called()
res = qml.jacobian(cost)(a, b, device=dev)
assert not res
def loss(a, b):
return np.sum(cost(a, b, device=dev))
with pytest.warns(UserWarning, match="Output seems independent"):
res = qml.grad(loss)(a, b)
assert not res
def test_matrix_parameter(self, mocker, tol):
"""Test jacobian computation when the tape includes a matrix parameter"""
spy = mocker.spy(JacobianTape, "jacobian")
U = np.array([[0, 1], [1, 0]], requires_grad=False)
a = np.array(0.1, requires_grad=True)
def cost(a, U, device):
with JacobianTape() as tape:
qml.QubitUnitary(U, wires=0)
qml.RY(a, wires=0)
qml.expval(qml.PauliZ(0))
return tape.execute(device)
dev = qml.device("default.qubit.autograd", wires=2)
res = cost(a, U, device=dev)
assert np.allclose(res, -np.cos(a), atol=tol, rtol=0)
jac_fn = qml.jacobian(cost)
res = jac_fn(a, U, device=dev)
assert np.allclose(res, np.sin(a), atol=tol, rtol=0)
spy.assert_not_called()
def test_differentiable_expand(self, mocker, tol):
"""Test that operation and nested tapes expansion
is differentiable"""
spy = mocker.spy(JacobianTape, "jacobian")
mock = mocker.patch.object(qml.operation.Operation, "do_check_domain", False)
class U3(qml.U3):
def expand(self):
tape = JacobianTape()
theta, phi, lam = self.data
wires = self.wires
tape._ops += [
qml.Rot(lam, theta, -lam, wires=wires),
qml.PhaseShift(phi + lam, wires=wires),
]
return tape
def cost_fn(a, p, device):
tape = JacobianTape()
with tape:
qml.RX(a, wires=0)
U3(*p, wires=0)
qml.expval(qml.PauliX(0))
tape = tape.expand()
assert [i.name for i in tape.operations] == ["RX", "Rot", "PhaseShift"]
assert np.all(tape.get_parameters() == [a, p[2], p[0], -p[2], p[1] + p[2]])
return tape.execute(device=device)
a = np.array(0.1, requires_grad=False)
p = np.array([0.1, 0.2, 0.3], requires_grad=True)
dev = qml.device("default.qubit.autograd", wires=1)
res = cost_fn(a, p, device=dev)
expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * (
np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2])
)
assert np.allclose(res, expected, atol=tol, rtol=0)
jac_fn = qml.jacobian(cost_fn)
res = jac_fn(a, p, device=dev)
expected = np.array(
[
np.cos(p[1]) * (np.cos(a) * np.cos(p[0]) - np.sin(a) * np.sin(p[0]) * np.sin(p[2])),
np.cos(p[1]) * np.cos(p[2]) * np.sin(a)
- np.sin(p[1])
* (np.cos(a) * np.sin(p[0]) + np.cos(p[0]) * np.sin(a) * np.sin(p[2])),
np.sin(a)
* (np.cos(p[0]) * np.cos(p[1]) * np.cos(p[2]) - np.sin(p[1]) * np.sin(p[2])),
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
spy.assert_not_called()
def test_probability_differentiation(self, mocker, tol):
"""Tests correct output shape and evaluation for a tape
with prob and expval outputs"""
spy = mocker.spy(JacobianTape, "jacobian")
def cost(x, y, device):
with JacobianTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.probs(wires=[0])
qml.probs(wires=[1])
return tape.execute(device)
dev = qml.device("default.qubit.autograd", wires=2)
x = np.array(0.543, requires_grad=True)
y = np.array(-0.654, requires_grad=True)
res = cost(x, y, device=dev)
expected = np.array(
[
[np.cos(x / 2) ** 2, np.sin(x / 2) ** 2],
[(1 + np.cos(x) * np.cos(y)) / 2, (1 - np.cos(x) * np.cos(y)) / 2],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
jac_fn = qml.jacobian(cost)
res = jac_fn(x, y, device=dev)
assert res.shape == (2, 2, 2)
expected = np.array(
[
[[-np.sin(x) / 2, 0], [-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2]],
[
[np.sin(x) / 2, 0],
[np.cos(y) * np.sin(x) / 2, np.cos(x) * np.sin(y) / 2],
],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
spy.assert_not_called()
def test_ragged_differentiation(self, mocker, monkeypatch, tol):
"""Tests correct output shape and evaluation for a tape
with prob and expval outputs"""
spy = mocker.spy(JacobianTape, "jacobian")
dev = qml.device("default.qubit.autograd", wires=2)
def _asarray(args, dtype=np.float64):
return np.hstack(args).flatten()
# The current DefaultQubitAutograd device provides an _asarray method that does
# not work correctly for ragged arrays. For ragged arrays, we would like _asarray to
# flatten the array. Here, we patch the _asarray method on the device to achieve this
# behaviour; once the tape has moved from the beta folder, we should implement
# this change directly in the device.
monkeypatch.setattr(dev, "_asarray", _asarray)
def cost(x, y, device):
with JacobianTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.probs(wires=[1])
return tape.execute(device)
x = np.array(0.543, requires_grad=True)
y = np.array(-0.654, requires_grad=True)
res = cost(x, y, device=dev)
expected = np.array(
[np.cos(x), (1 + np.cos(x) * np.cos(y)) / 2, (1 - np.cos(x) * np.cos(y)) / 2]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
jac_fn = qml.jacobian(cost)
res = jac_fn(x, y, device=dev)
expected = np.array(
[
[-np.sin(x), 0],
[-np.sin(x) * np.cos(y) / 2, -np.cos(x) * np.sin(y) / 2],
[np.cos(y) * np.sin(x) / 2, np.cos(x) * np.sin(y) / 2],
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
spy.assert_not_called()
def test_sampling(self):
"""Test sampling works as expected"""
def cost(x, device):
with JacobianTape() as tape:
qml.Hadamard(wires=[0])
qml.CNOT(wires=[0, 1])
qml.sample(qml.PauliZ(0))
qml.sample(qml.PauliX(1))
return tape.execute(device)
dev = qml.device("default.qubit.autograd", wires=2, shots=10)
x = np.array(0.543, requires_grad=True)
res = cost(x, device=dev)
assert res.shape == (2, 10)
| 7,039 | -8 | 961 |
e92a98bf4986fdb2e9e8cb59b7d590d3a5b15061 | 2,082 | py | Python | tests/endpoints/test_account_token.py | eruvanos/warehouse14 | a03c2b8e287368b77338cc1eb283c5a762f23104 | [
"MIT"
] | 2 | 2021-12-09T18:29:54.000Z | 2022-02-23T13:59:36.000Z | tests/endpoints/test_account_token.py | eruvanos/warehouse14 | a03c2b8e287368b77338cc1eb283c5a762f23104 | [
"MIT"
] | 5 | 2021-07-13T04:29:42.000Z | 2021-10-14T08:31:39.000Z | tests/endpoints/test_account_token.py | eruvanos/warehouse14 | a03c2b8e287368b77338cc1eb283c5a762f23104 | [
"MIT"
] | 1 | 2021-07-15T07:05:06.000Z | 2021-07-15T07:05:06.000Z | import requests_html
from pytest import fixture
from requests_html import HTMLResponse
from wsgiadapter import WSGIAdapter
from tests.endpoints import login
from warehouse14 import create_app
@fixture
@fixture
| 26.35443 | 87 | 0.68828 | import requests_html
from pytest import fixture
from requests_html import HTMLResponse
from wsgiadapter import WSGIAdapter
from tests.endpoints import login
from warehouse14 import create_app
@fixture
def app(db, storage, authenticator):
app = create_app(db, storage, authenticator)
app.debug = True
return app
@fixture
def html_client(app) -> requests_html.HTMLSession:
session = requests_html.HTMLSession()
session.mount("http://localhost", WSGIAdapter(app))
return session
def test_tokens_list_empty(html_client, app):
login(html_client, "user1")
res: HTMLResponse = html_client.get("http://localhost/manage/account")
assert res.status_code == 200
tokens = list(map(lambda e: e.text, res.html.find(".account-token-name")))
assert tokens == []
def test_tokens_list(html_client, app, db):
login(html_client, "user1")
db.account_token_add("user1", "token-1", "token-1-name", "key")
res: HTMLResponse = html_client.get("http://localhost/manage/account")
assert res.status_code == 200
tokens = list(map(lambda e: e.text, res.html.find(".account-token-name")))
assert tokens == ["token-1-name"]
def test_tokens_add(html_client, app, db):
login(html_client, "user1")
res: HTMLResponse = html_client.post(
"http://localhost/manage/account/tokens_form", data={"name": "token-2"}
)
assert res.status_code == 200
tokens = db.account_token_list("user1")
assert len(tokens) == 1
assert tokens[0].name == "token-2"
def test_tokens_remove(html_client, app, db):
login(html_client, "user1")
db.account_token_add("user1", "token-1", "token-1-name", "key")
res: HTMLResponse = html_client.get(
"http://localhost/manage/account/tokens/delete", params={"token_id": "token-1"}
)
assert res.status_code == 200
assert res.url == "http://localhost/manage/account"
tokens = list(map(lambda e: e.text, res.html.find(".account-token-name")))
assert tokens == []
tokens = db.account_token_list("user1")
assert len(tokens) == 0
| 1,727 | 0 | 136 |
bf221672f7c8a4902f6b355383fd12f3f97c36b4 | 252 | py | Python | answers/Nitish1702/Day6/Answer1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 22 | 2021-03-16T14:07:47.000Z | 2021-08-13T08:52:50.000Z | answers/Nitish1702/Day6/Answer1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 174 | 2021-03-16T21:16:40.000Z | 2021-06-12T05:19:51.000Z | answers/Nitish1702/Day6/Answer1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 135 | 2021-03-16T16:47:12.000Z | 2021-06-27T14:22:38.000Z | l=input("CANDIES: ").lstrip("[").rstrip("]")
t=l.split(",")
boolist=[]
n= int(input("EXTRA CANDIES: "))
for i in range(len(t)):
if int(t[i])+n>=int(max(t)):
boolist.append("True")
else:
boolist.append("False")
print(boolist)
| 22.909091 | 44 | 0.559524 | l=input("CANDIES: ").lstrip("[").rstrip("]")
t=l.split(",")
boolist=[]
n= int(input("EXTRA CANDIES: "))
for i in range(len(t)):
if int(t[i])+n>=int(max(t)):
boolist.append("True")
else:
boolist.append("False")
print(boolist)
| 0 | 0 | 0 |
427ed1b45bd9acc71f91723b1a57e63ae7c1f115 | 5,268 | py | Python | autovizwidget/autovizwidget/tests/test_plotlygraphs.py | sciserver/sparkmagic | ac0852cbe88a41faa368cf1e1c89045a2de973bf | [
"RSA-MD"
] | 1,141 | 2015-09-21T20:52:00.000Z | 2022-03-31T14:15:51.000Z | autovizwidget/autovizwidget/tests/test_plotlygraphs.py | sciserver/sparkmagic | ac0852cbe88a41faa368cf1e1c89045a2de973bf | [
"RSA-MD"
] | 605 | 2015-09-23T23:27:43.000Z | 2022-03-16T07:46:52.000Z | autovizwidget/autovizwidget/tests/test_plotlygraphs.py | sciserver/sparkmagic | ac0852cbe88a41faa368cf1e1c89045a2de973bf | [
"RSA-MD"
] | 442 | 2015-09-23T21:31:28.000Z | 2022-03-13T15:19:57.000Z | import pandas as pd
from mock import MagicMock
from ..plotlygraphs.graphbase import GraphBase
from ..plotlygraphs.piegraph import PieGraph
from ..plotlygraphs.datagraph import DataGraph
from ..widget.encoding import Encoding
from ..widget.invalidencodingerror import InvalidEncodingError
| 43.180328 | 120 | 0.64104 | import pandas as pd
from mock import MagicMock
from ..plotlygraphs.graphbase import GraphBase
from ..plotlygraphs.piegraph import PieGraph
from ..plotlygraphs.datagraph import DataGraph
from ..widget.encoding import Encoding
from ..widget.invalidencodingerror import InvalidEncodingError
def test_graph_base_display_methods():
assert GraphBase.display_x()
assert GraphBase.display_y()
assert GraphBase.display_logarithmic_x_axis()
assert GraphBase.display_logarithmic_y_axis()
def test_graphbase_get_x_y_values():
records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': 12, u"str": "str"},
{u'buildingID': 1, u'date': u'6/1/13', u'temp_diff': 0, u"str": "str"},
{u'buildingID': 2, u'date': u'6/1/14', u'temp_diff': 11, u"str": "str"},
{u'buildingID': 0, u'date': u'6/1/15', u'temp_diff': 5, u"str": "str"},
{u'buildingID': 1, u'date': u'6/1/16', u'temp_diff': 19, u"str": "str"},
{u'buildingID': 2, u'date': u'6/1/17', u'temp_diff': 32, u"str": "str"}]
df = pd.DataFrame(records)
expected_xs = [u'6/1/13', u'6/1/14', u'6/1/15', u'6/1/16', u'6/1/17']
encoding = Encoding(chart_type=Encoding.chart_type_line, x="date", y="temp_diff", y_aggregation=Encoding.y_agg_sum)
xs, yx = GraphBase._get_x_y_values(df, encoding)
assert xs == expected_xs
assert yx == [12, 11, 5, 19, 32]
encoding = Encoding(chart_type=Encoding.chart_type_line, x="date", y="temp_diff", y_aggregation=Encoding.y_agg_avg)
xs, yx = GraphBase._get_x_y_values(df, encoding)
assert xs == expected_xs
assert yx == [6, 11, 5, 19, 32]
encoding = Encoding(chart_type=Encoding.chart_type_line, x="date", y="temp_diff", y_aggregation=Encoding.y_agg_max)
xs, yx = GraphBase._get_x_y_values(df, encoding)
assert xs == expected_xs
assert yx == [12, 11, 5, 19, 32]
encoding = Encoding(chart_type=Encoding.chart_type_line, x="date", y="temp_diff", y_aggregation=Encoding.y_agg_min)
xs, yx = GraphBase._get_x_y_values(df, encoding)
assert xs == expected_xs
assert yx == [0, 11, 5, 19, 32]
encoding = Encoding(chart_type=Encoding.chart_type_line, x="date", y="temp_diff", y_aggregation=Encoding.y_agg_none)
xs, yx = GraphBase._get_x_y_values(df, encoding)
assert xs == [u'6/1/13', u'6/1/13', u'6/1/14', u'6/1/15', u'6/1/16', u'6/1/17']
assert yx == [12, 0, 11, 5, 19, 32]
try:
encoding = Encoding(chart_type=Encoding.chart_type_line, x="buildingID", y="date",
y_aggregation=Encoding.y_agg_avg)
GraphBase._get_x_y_values(df, encoding)
assert False
except InvalidEncodingError:
pass
try:
encoding = Encoding(chart_type=Encoding.chart_type_line, x="date", y="str",
y_aggregation=Encoding.y_agg_avg)
GraphBase._get_x_y_values(df, encoding)
assert False
except InvalidEncodingError:
pass
def test_pie_graph_display_methods():
assert PieGraph.display_x()
assert PieGraph.display_y()
assert not PieGraph.display_logarithmic_x_axis()
assert not PieGraph.display_logarithmic_y_axis()
def test_pie_graph_get_values_labels():
records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': 12},
{u'buildingID': 1, u'date': u'6/1/13', u'temp_diff': 0},
{u'buildingID': 2, u'date': u'6/1/14', u'temp_diff': 11},
{u'buildingID': 0, u'date': u'6/1/15', u'temp_diff': 5},
{u'buildingID': 1, u'date': u'6/1/16', u'temp_diff': 19},
{u'buildingID': 2, u'date': u'6/1/17', u'temp_diff': 32}]
df = pd.DataFrame(records)
encoding = Encoding(chart_type=Encoding.chart_type_line, x="date", y=None, y_aggregation=Encoding.y_agg_sum)
values, labels = PieGraph._get_x_values_labels(df, encoding)
assert values == [2, 1, 1, 1, 1]
assert labels == ["6/1/13", "6/1/14", "6/1/15", "6/1/16", "6/1/17"]
encoding = Encoding(chart_type=Encoding.chart_type_line, x="date", y="temp_diff", y_aggregation=Encoding.y_agg_sum)
values, labels = PieGraph._get_x_values_labels(df, encoding)
assert values == [12, 11, 5, 19, 32]
assert labels == ["6/1/13", "6/1/14", "6/1/15", "6/1/16", "6/1/17"]
def test_data_graph_render():
records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': 12},
{u'buildingID': 1, u'date': u'6/1/13', u'temp_diff': 0},
{u'buildingID': 2, u'date': u'6/1/14', u'temp_diff': 11},
{u'buildingID': 0, u'date': u'6/1/15', u'temp_diff': 5},
{u'buildingID': 1, u'date': u'6/1/16', u'temp_diff': 19},
{u'buildingID': 2, u'date': u'6/1/17', u'temp_diff': 32}]
df = pd.DataFrame(records)
encoding = Encoding(chart_type=Encoding.chart_type_line, x="date", y="temp_diff", y_aggregation=Encoding.y_agg_sum)
display = MagicMock()
data = DataGraph(display)
data.render(df, encoding, MagicMock())
assert display.html.call_count == 2
def test_data_graph_display_methods():
assert not DataGraph.display_x()
assert not DataGraph.display_y()
assert not DataGraph.display_logarithmic_x_axis()
assert not DataGraph.display_logarithmic_y_axis()
| 4,835 | 0 | 138 |
aba521846c16917007362546f863e685b09e3a92 | 963 | py | Python | galaxy/main/migrations/0002_auto_20150824_1342.py | SamyCoenen/galaxy | 7c17ef45e53b0fc2fe8a2c70a99f3947604e0b0e | [
"Apache-2.0"
] | null | null | null | galaxy/main/migrations/0002_auto_20150824_1342.py | SamyCoenen/galaxy | 7c17ef45e53b0fc2fe8a2c70a99f3947604e0b0e | [
"Apache-2.0"
] | null | null | null | galaxy/main/migrations/0002_auto_20150824_1342.py | SamyCoenen/galaxy | 7c17ef45e53b0fc2fe8a2c70a99f3947604e0b0e | [
"Apache-2.0"
] | null | null | null | # NOTE(cutwater): This migration is replaced by v2_4_0 and should be
# deleted once superseding migration is merged into master.
from django.db import migrations
| 25.342105 | 68 | 0.556594 | # NOTE(cutwater): This migration is replaced by v2_4_0 and should be
# deleted once superseding migration is merged into master.
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='rolerating',
name='code_quality',
),
migrations.RemoveField(
model_name='rolerating',
name='documentation',
),
migrations.RemoveField(
model_name='rolerating',
name='down_votes',
),
migrations.RemoveField(
model_name='rolerating',
name='reliability',
),
migrations.RemoveField(
model_name='rolerating',
name='up_votes',
),
migrations.RemoveField(
model_name='rolerating',
name='wow_factor',
),
]
| 0 | 775 | 23 |
13211b63c9ef6ff265882d203f5807eb77a45c16 | 7,973 | py | Python | landgrab.py | tangrams/landgrab | 217699e4730a1bdb7c9e03bfd9c2c0c31950eb7c | [
"MIT"
] | 20 | 2015-02-26T15:55:42.000Z | 2021-07-30T00:19:31.000Z | landgrab.py | tangrams/landgrab | 217699e4730a1bdb7c9e03bfd9c2c0c31950eb7c | [
"MIT"
] | 1 | 2018-04-02T12:13:30.000Z | 2021-10-04T00:59:38.000Z | landgrab.py | tangrams/landgrab | 217699e4730a1bdb7c9e03bfd9c2c0c31950eb7c | [
"MIT"
] | 5 | 2015-03-03T23:31:39.000Z | 2018-01-17T03:13:34.000Z | # todo: handle cases where the boundary crosses the dateline
# usage:
# python landgrab.py [osm id] [zoom level] [optional list-only flag]
import requests, json, math, sys, os
import xml.etree.ElementTree as ET
import pprint
if len(sys.argv) < 3:
print "At least 2 arguments needed - please enter an OSM ID and zoom level."
sys.exit()
# get the OpenStreetMap ID
OSMID=sys.argv[1]
zoom=[]
# handle multi-part ranges, eg "3-4, 5, 6-7"
for part in sys.argv[2].split(','):
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
zoom.extend(range(a, b + 1))
else:
a = int(part)
zoom.append(a)
coordsonly=0
if len(sys.argv) > 3:
coordsonly=int(sys.argv[3])
print coordsonly
success = False
try:
INFILE = 'http://www.openstreetmap.org/api/0.6/relation/'+OSMID+'/full'
print "Downloading", INFILE
r = requests.get(INFILE)
r.raise_for_status()
success = True
except Exception, e:
print e
if not success:
try:
INFILE = 'http://www.openstreetmap.org/api/0.6/way/'+OSMID+'/full'
print "Downloading", INFILE
r = requests.get(INFILE)
r.raise_for_status()
success = True
except Exception, e:
print e
if not success:
try:
INFILE = 'http://www.openstreetmap.org/api/0.6/node/'+OSMID
print "Downloading", INFILE
r = requests.get(INFILE)
r.raise_for_status()
success = True
except Exception, e:
print e
print "Element not found, exiting"
sys.exit()
# print r.encoding
open('outfile.xml', 'w').close() # clear existing OUTFILE
with open('outfile.xml', 'w') as fd:
fd.write(r.text.encode("UTF-8"))
fd.close()
try:
tree = ET.parse('outfile.xml')
except Exception, e:
print e
print "XML parse failed, please check outfile.xml"
sys.exit()
root = tree.getroot()
##
## HELPER FUNCTIONS
##
tile_size = 256
half_circumference_meters = 20037508.342789244;
# Convert lat-lng to mercator meters
# convert from tile-space coords to meters, depending on zoom
# Given a point in mercator meters and a zoom level, return the tile X/Y/Z that the point lies in
# Convert tile location to mercator meters - multiply by pixels per tile, then by meters per pixel, adjust for map origin
##
## PROCESSING points
##
print "Processing:"
points = []
for node in root:
if node.tag == "node":
lat = float(node.attrib["lat"])
lon = float(node.attrib["lon"])
points.append({'y':lat, 'x':lon})
##
## GET TILES for all zoom levels
##
for z in zoom:
getTiles(points,z)
| 31.023346 | 154 | 0.572056 | # todo: handle cases where the boundary crosses the dateline
# usage:
# python landgrab.py [osm id] [zoom level] [optional list-only flag]
import requests, json, math, sys, os
import xml.etree.ElementTree as ET
import pprint
if len(sys.argv) < 3:
print "At least 2 arguments needed - please enter an OSM ID and zoom level."
sys.exit()
# get the OpenStreetMap ID
OSMID=sys.argv[1]
zoom=[]
# handle multi-part ranges, eg "3-4, 5, 6-7"
for part in sys.argv[2].split(','):
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
zoom.extend(range(a, b + 1))
else:
a = int(part)
zoom.append(a)
coordsonly=0
if len(sys.argv) > 3:
coordsonly=int(sys.argv[3])
print coordsonly
success = False
try:
INFILE = 'http://www.openstreetmap.org/api/0.6/relation/'+OSMID+'/full'
print "Downloading", INFILE
r = requests.get(INFILE)
r.raise_for_status()
success = True
except Exception, e:
print e
if not success:
try:
INFILE = 'http://www.openstreetmap.org/api/0.6/way/'+OSMID+'/full'
print "Downloading", INFILE
r = requests.get(INFILE)
r.raise_for_status()
success = True
except Exception, e:
print e
if not success:
try:
INFILE = 'http://www.openstreetmap.org/api/0.6/node/'+OSMID
print "Downloading", INFILE
r = requests.get(INFILE)
r.raise_for_status()
success = True
except Exception, e:
print e
print "Element not found, exiting"
sys.exit()
# print r.encoding
open('outfile.xml', 'w').close() # clear existing OUTFILE
with open('outfile.xml', 'w') as fd:
fd.write(r.text.encode("UTF-8"))
fd.close()
try:
tree = ET.parse('outfile.xml')
except Exception, e:
print e
print "XML parse failed, please check outfile.xml"
sys.exit()
root = tree.getroot()
##
## HELPER FUNCTIONS
##
tile_size = 256
half_circumference_meters = 20037508.342789244;
# Convert lat-lng to mercator meters
def latLngToMeters( coords ):
y = float(coords['y'])
x = float(coords['x'])
# Latitude
y = math.log(math.tan(y*math.pi/360 + math.pi/4)) / math.pi
y *= half_circumference_meters
# Longitude
x *= half_circumference_meters / 180;
return {"x": x, "y": y}
# convert from tile-space coords to meters, depending on zoom
def tile_to_meters(zoom):
return 40075016.68557849 / pow(2, zoom)
# Given a point in mercator meters and a zoom level, return the tile X/Y/Z that the point lies in
def tileForMeters(coords, zoom):
y = float(coords['y'])
x = float(coords['x'])
return {
"x": math.floor((x + half_circumference_meters) / (half_circumference_meters * 2 / pow(2, zoom))),
"y": math.floor((-y + half_circumference_meters) / (half_circumference_meters * 2 / pow(2, zoom))),
"z": zoom
}
# Convert tile location to mercator meters - multiply by pixels per tile, then by meters per pixel, adjust for map origin
def metersForTile(tile):
return {
"x": tile['x'] * half_circumference_meters * 2 / pow(2, tile.z) - half_circumference_meters,
"y": -(tile['y'] * half_circumference_meters * 2 / pow(2, tile.z) - half_circumference_meters)
}
def getTiles(_points,_zoom):
tiles = []
## find the tile which contains each point
for point in _points:
tiles.append(tileForMeters(latLngToMeters({'x':point['x'],'y':point['y']}), _zoom))
## de-dupe
tiles = [dict(tupleized) for tupleized in set(tuple(item.items()) for item in tiles)]
## patch holes in tileset
## get min and max tiles for lat and long
# set min vals to maximum tile #s + 1 at zoom 21
minx = 2097152
maxx = -1
miny = 2097152
maxy = -1
print "tiles:"+str(tiles)
for tile in tiles:
minx = min(minx, tile['x'])
maxx = max(maxx, tile['x'])
miny = min(miny, tile['y'])
maxy = max(maxy, tile['y'])
# print miny, minx, maxy, maxx
newtiles = []
for tile in tiles:
# find furthest tiles from this tile on x and y axes
# todo: check across the dateline, maybe with some kind of mod(360) -
# if a closer value is found, use that instead and warp across the antimeridian
x = tile['x']
lessx = 2097152
morex = -1
y = tile['y']
lessy = 2097152
morey = -1
for t in tiles:
if int(t['x']) == int(tile['x']):
# check on y axis
lessy = min(lessy, t['y'])
morey = max(morey, t['y'])
if int(t['y']) == int(tile['y']):
# check on x axis
lessx = min(lessx, t['x'])
morex = max(morex, t['x'])
# if a tile is found which is not directly adjacent, add all the tiles between the two
if (lessy + 2) < tile['y']:
for i in range(int(lessy+1), int(tile['y'])):
newtiles.append({'x':tile['x'],'y':i, 'z':_zoom})
if (morey - 2) > tile['y']:
for i in range(int(morey-1), int(tile['y'])):
newtiles.append({'x':tile['x'],'y':i, 'z':_zoom})
if (lessx + 2) < tile['x']:
for i in range(int(lessx+1), int(tile['x'])):
newtiles.append({'x':i,'y':tile['y'], 'z':_zoom})
if (morex - 2) > tile['x']:
for i in range(int(morex-1), int(tile['x'])):
newtiles.append({'x':i,'y':tile['y'], 'z':_zoom})
## de-dupe
newtiles = [dict(tupleized) for tupleized in set(tuple(item.items()) for item in newtiles)]
## add fill tiles to boundary tiles
tiles = tiles + newtiles
## de-dupe
tiles = [dict(tupleized) for tupleized in set(tuple(item.items()) for item in tiles)]
if coordsonly == 1:
## output coords
pprint.pprint(tiles)
print "\nFinished: %i tiles at zoom level %i\n" % (len(tiles), _zoom)
else:
## download tiles
print "\nDownloading %i tiles at zoom level %i" % (len(tiles), _zoom)
## make/empty the tiles folder
folder = "tiles"
if not os.path.exists(folder):
os.makedirs(folder)
# for the_file in os.listdir(folder):
# file_path = os.path.join(folder, the_file)
# try:
# if os.path.isfile(file_path):
# os.unlink(file_path)
# except Exception, e:
# print e
total = len(tiles)
if total == 0:
print("Error: no tiles")
exit()
count = 0
sys.stdout.write("\r%d%%" % (float(count)/float(total)*100.))
sys.stdout.flush()
for tile in tiles:
tilename = "%i-%i-%i.json" % (_zoom,tile['x'],tile['y'])
r = requests.get("https://tile.nextzen.org/tilezen/vector/v1/all/%i/%i/%i.json?api_key=tsINU1vsQnKLU1jjCimtVw" % (_zoom, tile['x'],tile['y']))
j = json.loads(r.text)
# extract only buildings layer - nextzen vector tile files are collections of geojson objects -
# doing this turns each file into a valid standalone geojson files -
# you can replace "buildings" with whichever layer you want
# j = json.dumps(j["buildings"])
# use this jumps() command instead for the original feature collection with all the data
j = json.dumps(j);
with open('tiles/'+tilename, 'w') as fd:
fd.write(j.encode("UTF-8"))
fd.close()
count += 1
sys.stdout.write("\r%d%%" % (float(count)/float(total)*100.))
sys.stdout.flush()
##
## PROCESSING points
##
print "Processing:"
points = []
for node in root:
if node.tag == "node":
lat = float(node.attrib["lat"])
lon = float(node.attrib["lon"])
points.append({'y':lat, 'x':lon})
##
## GET TILES for all zoom levels
##
for z in zoom:
getTiles(points,z)
| 5,241 | 0 | 111 |
0233556465378f52f8e0c9b96010bffb6dee39d0 | 714 | py | Python | e-gov Hackathon/ReplyBot1/test.py | chellabeatrixkiddo/MTechAssignments | 327159c44e91872ff539ec15d6c8e066b6cfef4c | [
"CC0-1.0"
] | null | null | null | e-gov Hackathon/ReplyBot1/test.py | chellabeatrixkiddo/MTechAssignments | 327159c44e91872ff539ec15d6c8e066b6cfef4c | [
"CC0-1.0"
] | null | null | null | e-gov Hackathon/ReplyBot1/test.py | chellabeatrixkiddo/MTechAssignments | 327159c44e91872ff539ec15d6c8e066b6cfef4c | [
"CC0-1.0"
] | null | null | null | import tweepy
from keys import keys
CONSUMER_KEY = keys['consumer_key']
CONSUMER_SECRET = keys['consumer_secret']
ACCESS_TOKEN = keys['access_token']
ACCESS_TOKEN_SECRET = keys['access_token_secret']
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
while 1:
twt = api.search(q="Jyo_Test_1")
#list of specific strings we want to check for in Tweets
t = ['Jyo_Test_1']
for s in twt:
for i in t:
if i == s.text:
sn = s.user.screen_name
m = "@%s Hello! We will resolve your complaint" % (sn)
s = api.update_status(m, s.id)
| 27.461538 | 70 | 0.645658 | import tweepy
from keys import keys
CONSUMER_KEY = keys['consumer_key']
CONSUMER_SECRET = keys['consumer_secret']
ACCESS_TOKEN = keys['access_token']
ACCESS_TOKEN_SECRET = keys['access_token_secret']
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
while 1:
twt = api.search(q="Jyo_Test_1")
#list of specific strings we want to check for in Tweets
t = ['Jyo_Test_1']
for s in twt:
for i in t:
if i == s.text:
sn = s.user.screen_name
m = "@%s Hello! We will resolve your complaint" % (sn)
s = api.update_status(m, s.id)
| 0 | 0 | 0 |
31798a13dc457f168afe39f5703df56d9635b651 | 74 | py | Python | cobain/structure/__init__.py | gecheline/cobain-code | 4f1c0e9c08d23bf7b5f57cc5af0fbf904d5109f7 | [
"MIT"
] | 2 | 2018-04-24T14:13:16.000Z | 2018-04-24T14:13:19.000Z | cobain/structure/__init__.py | gecheline/cobain01 | 4f1c0e9c08d23bf7b5f57cc5af0fbf904d5109f7 | [
"MIT"
] | 1 | 2018-10-22T17:08:40.000Z | 2018-10-22T17:08:40.000Z | cobain/structure/__init__.py | gecheline/cobain01 | 4f1c0e9c08d23bf7b5f57cc5af0fbf904d5109f7 | [
"MIT"
] | null | null | null | from constants import *
from polytropes import *
from potentials import *
| 18.5 | 24 | 0.797297 | from constants import *
from polytropes import *
from potentials import *
| 0 | 0 | 0 |
0a569b30a97a6e6fe704519e61b6543be9162f24 | 77 | py | Python | File Vault/src/webApp/utils/app/__init__.py | comvx/File-Vault | 305166a647f9cb001274250a96ea19c7dba1e369 | [
"MIT",
"Unlicense"
] | 3 | 2021-01-20T17:26:41.000Z | 2022-03-14T17:39:44.000Z | File Vault/dev enviroment/src/webApp/utils/app/__init__.py | comvx/File-Vault | 305166a647f9cb001274250a96ea19c7dba1e369 | [
"MIT",
"Unlicense"
] | null | null | null | File Vault/dev enviroment/src/webApp/utils/app/__init__.py | comvx/File-Vault | 305166a647f9cb001274250a96ea19c7dba1e369 | [
"MIT",
"Unlicense"
] | null | null | null | from webApp.utils.app.routes import *
from webApp.utils.app.forms import auth | 38.5 | 39 | 0.818182 | from webApp.utils.app.routes import *
from webApp.utils.app.forms import auth | 0 | 0 | 0 |
1b65f7b9f00421ed6ead25d24ae20f352063576b | 2,669 | py | Python | gateware/rtl/video/framer.py | gregdavill/DiVA-firmware | 2d67d8e5dd15c4a7863616979309700c3aeb74a2 | [
"BSD-2-Clause"
] | 9 | 2020-11-07T02:50:52.000Z | 2021-11-29T19:46:18.000Z | gateware/rtl/video/framer.py | gregdavill/DiVA-firmware | 2d67d8e5dd15c4a7863616979309700c3aeb74a2 | [
"BSD-2-Clause"
] | 17 | 2020-11-01T00:30:17.000Z | 2021-09-19T05:39:44.000Z | gateware/rtl/video/framer.py | gregdavill/DiVA-firmware | 2d67d8e5dd15c4a7863616979309700c3aeb74a2 | [
"BSD-2-Clause"
] | 2 | 2020-09-27T13:39:46.000Z | 2021-06-18T20:54:48.000Z | # This file is Copyright (c) 2020 Gregory Davill <greg.davill@gmail.com>
# License: BSD
from migen import *
from litex.soc.interconnect.stream import Endpoint
from rtl.edge_detect import EdgeDetect
from litex.soc.interconnect.csr import AutoCSR, CSR, CSRStatus, CSRStorage
from litex.soc.interconnect.stream import Endpoint, EndpointDescription, AsyncFIFO | 30.329545 | 87 | 0.498689 | # This file is Copyright (c) 2020 Gregory Davill <greg.davill@gmail.com>
# License: BSD
from migen import *
from litex.soc.interconnect.stream import Endpoint
from rtl.edge_detect import EdgeDetect
from litex.soc.interconnect.csr import AutoCSR, CSR, CSRStatus, CSRStorage
from litex.soc.interconnect.stream import Endpoint, EndpointDescription, AsyncFIFO
def framer_params():
return [
("x_start", 16),
("y_start", 16),
("x_stop", 16),
("y_stop", 16),
]
class Framer(Module, AutoCSR):
def __init__(self):
self.sink = sink = Endpoint([("data", 32)])
self.params = params = Endpoint(framer_params())
self.hsync = hsync = Signal()
self.vsync = vsync = Signal()
# VGA output
self.red = red = Signal(8)
self.green = green = Signal(8)
self.blue = blue = Signal(8)
self.data_valid = data_valid = Signal()
# parameters
pixel_counter = Signal(14)
line_counter = Signal(14)
h_det = EdgeDetect(mode="fall", input_cd="video", output_cd="video")
v_det = EdgeDetect(mode="fall", input_cd="video", output_cd="video")
self.submodules += h_det, v_det
self.comb += [
h_det.i.eq(hsync),
v_det.i.eq(vsync),
]
self.comb += [
If((line_counter >= params.y_start) & (line_counter < params.y_stop),
If((pixel_counter >= params.x_start) & (pixel_counter < params.x_stop),
sink.ready.eq(1)
)
)
]
self.sync.video += [
# Default values
red.eq(0),
green.eq(0),
blue.eq(0),
data_valid.eq(0),
# Show pixels
If((line_counter >= params.y_start) & (line_counter < params.y_stop),
If((pixel_counter >= params.x_start) & (pixel_counter < params.x_stop),
data_valid.eq(1),
If(sink.valid,
red.eq(sink.data[0:8]),
green.eq(sink.data[8:16]),
blue.eq(sink.data[16:24])
).Else(
red.eq(0xFF),
green.eq(0x77),
blue.eq(0xFF)
)
)
),
# Horizontal timing for one line
pixel_counter.eq(pixel_counter + 1),
If(h_det.o,
pixel_counter.eq(0),
line_counter.eq(line_counter + 1),
),
If(v_det.o,
line_counter.eq(0),
)
] | 2,231 | 9 | 72 |
6b06fbd8744ffcbc1d05513c367e6f1c03a4f40c | 17,612 | py | Python | alife/stats.py | flags/Reactor-3 | b41a2904c9ec8cc14bcee03611602d0e568acf12 | [
"MIT"
] | 56 | 2015-04-20T08:31:29.000Z | 2021-12-19T14:05:18.000Z | alife/stats.py | HexDecimal/Reactor-3 | b41a2904c9ec8cc14bcee03611602d0e568acf12 | [
"MIT"
] | 2 | 2018-07-24T11:24:41.000Z | 2021-05-16T03:04:53.000Z | alife/stats.py | HexDecimal/Reactor-3 | b41a2904c9ec8cc14bcee03611602d0e568acf12 | [
"MIT"
] | 9 | 2015-11-03T02:56:20.000Z | 2021-04-28T08:19:57.000Z | from globals import *
import life as lfe
import historygen
import judgement
import survival
import speech
import groups
import combat
import camps
import sight
import brain
import zones
import bad_numbers
import logging
import random
MAX_INFLUENCE_FROM = 80
MAX_INTROVERSION = 10
MAX_CHARISMA = 9 | 25.824047 | 152 | 0.718544 | from globals import *
import life as lfe
import historygen
import judgement
import survival
import speech
import groups
import combat
import camps
import sight
import brain
import zones
import bad_numbers
import logging
import random
MAX_INFLUENCE_FROM = 80
MAX_INTROVERSION = 10
MAX_CHARISMA = 9
def init(life):
life['stats'] = historygen.create_background(life)
#life['stats']['charisma'] = random.randint(1, MAX_CHARISMA)
def desires_job(life):
#TODO: We recalculate this, but the answer is always the same.
_wont = brain.get_flag(life, 'wont_work')
if life['job'] or _wont:
if _wont:
_wont = brain.flag(life, 'wont_work', value=_wont-1)
return False
if not life['stats']['lone_wolf']:
return True
brain.flag(life, 'wont_work', value=1000)
return False
def desires_life(life, life_id):
if not lfe.execute_raw(life, 'judge', 'factors', life_id=life_id):
return False
return True
def desires_interaction(life):
if not lfe.execute_raw(life, 'talk', 'desires_interaction'):
return False
return True
def desires_first_contact_with(life, life_id):
#print life['name'], LIFE[life_id]['name'],brain.knows_alife_by_id(life, life_id)['alignment']
if not brain.knows_alife_by_id(life, life_id)['alignment'] == 'neutral':
return False
if life['group'] and not groups.is_leader(life, life['group'], life['id']):
#Don't talk if we're in a group and near our leader.
#TODO: #judgement Even then, we should consider having group members avoid non-members regardless.
#TODO: #judgement How do group types play into this?
_leader = brain.knows_alife_by_id(life, groups.get_leader(life, life['group']))
if _leader:
#TODO: #judgement Placeholder for future logic.
if bad_numbers.distance(life['pos'], _leader['life']['pos']) < 100:
return False
if life['stats']['motive_for_crime']>=4:
return True
if life['stats']['sociability']>=6:
return True
return False
def desires_conversation_with(life, life_id):
_knows = brain.knows_alife_by_id(life, life_id)
if not _knows:
logging.error('FIXME: Improperly Used Function: Doesn\'t know talking target.')
return False
if not lfe.execute_raw(life, 'talk', 'desires_conversation_with', life_id=life_id):
return False
if not judgement.can_trust(life, life_id):
return False
return True
def desires_to_create_group(life):
if life['group']:
return False
if not lfe.execute_raw(life, 'group', 'create_group'):
return False
return True
def wants_to_abandon_group(life, group_id):
_trusted = 0
_hostile = 0
for member in groups.get_group(life, group_id)['members']:
if life['id'] == member:
continue
_knows = brain.knows_alife_by_id(life, member)
if _knows['alignment'] == 'hostile':
_hostile += 1
else:
_trusted += 1
return _hostile>_trusted
def desires_group(life, group_id):
if life['group']:
return wants_to_abandon_group(life, life['group'], with_new_group_in_mind=group_id)
if judgement.judge_group(life, group_id)>get_minimum_group_score(life):
return True
return False
def desires_to_create_camp(life):
if not 'CAN_GROUP' in life['life_flags']:
return False
if life['group'] and not groups.get_camp(life['group']) and groups.is_leader(life, life['group'], life['id']):
if len(groups.get_group(life, life['group'])['members'])>1:
return True
return False
def desires_help_from(life, life_id):
return judgement.can_trust(life, life_id) and judgement.get_tension_with(life, life_id)<=judgement.get_max_tension_with(life, life_id)
def desires_shelter(life):
if not lfe.execute_raw(life, 'discover', 'desires_shelter'):
return False
#TODO: Why?
if life['state'] == 'needs':
return False
return True
def desires_to_join_camp(life, camp_id):
if life['group']:
return False
if life['camp']:
print life['name'],'already has camp',camps.knows_founder(life, life['camp'])
return False
if life['stats']['lone_wolf']:
return False
_memories = lfe.get_memory(life, matches={'text': 'heard_about_camp', 'camp': camp_id, 'founder': '*'})
if _memories:
_memory = _memories.pop()
if not judgement.can_trust(life, _memory['founder']):
print life['name'],'Cant trust founder' * 10
return False
if lfe.get_memory(life, matches={'text': 'ask_to_join_camp', 'camp': camp_id}):
return False
return True
def desires_weapon(life):
if not combat.get_equipped_weapons(life):
return True
#if life['stats']['firearms'] >= 5:
return False
def battle_cry(life):
_battle_cry = lfe.execute_raw(life, 'talk', 'battle_cry')
if _battle_cry == 'action':
_battle_cry_action = lfe.execute_raw(life, 'talk', 'battle_cry_action')
lfe.say(life, _battle_cry_action, action=True)
def get_melee_skill(life):
return bad_numbers.clip((life['stats']['melee'])/10.0, 0.1, 1)
def get_firearm_accuracy(life):
return bad_numbers.clip((life['stats']['firearms'])/10.0, 0.35, 1)
def get_recoil_recovery_rate(life):
return bad_numbers.clip(life['stats']['firearms']/10.0, 0.4, 1)*.2
def get_antisocial_percentage(life):
return life['stats']['introversion']/float(MAX_INTROVERSION)
def get_minimum_group_score(life):
if life['group']:
return judgement.judge_group(life, life['group'])
return 0
def get_employability(life):
#TODO: Placeholder
return 50
def get_group_motive(life):
if life['stats']['motive_for_crime'] >= 6:
if life['stats']['motive_for_wealth'] >= 5:
return 'wealth'
return 'crime'
if life['stats']['motive_for_wealth'] >= 5:
return 'wealth'
return 'survival'
def get_minimum_camp_score(life):
if life['group'] and groups.is_leader(life, life['group'], life['id']):
return len(groups.get_group(life, life['group'])['members'])
return 3
def wants_group_member(life, life_id):
if not life['group']:
return False
if groups.is_member(life, life['group'], life_id):
return False
if not groups.is_leader(life, life['group'], life['id']):
return False
if not lfe.execute_raw(life, 'group', 'wants_group_member', life_id=life_id):
return False
_know = brain.knows_alife_by_id(life, life_id)
if not _know:
return False
if not judgement.can_trust(life, life_id):
return False
return True
def will_obey(life, life_id):
_know = brain.knows_alife_by_id(life, life_id)
if not _know:
return False
if judgement.can_trust(life, life_id):
return True
return False
def can_talk_to(life, life_id):
if LIFE[life_id]['asleep'] or LIFE[life_id]['dead']:
return False
if not lfe.execute_raw(life, 'talk', 'can_talk_to', life_id=life_id):
return False
return True
def can_camp(life):
if not lfe.execute_raw(life, 'camp', 'can_camp'):
return False
return True
def can_create_camp(life):
if not lfe.execute_raw(life, 'camp', 'can_create_camp'):
return False
return True
def can_bite(life):
_melee_limbs = lfe.get_melee_limbs(life)
if not _melee_limbs:
return False
for limb in _melee_limbs:
if 'CAN_BITE' in lfe.get_limb(life, limb)['flags']:
return limb
return None
def can_scratch(life):
_melee_limbs = lfe.get_melee_limbs(life)
if not _melee_limbs:
print life['name'],'no melee limbs'
return False
for limb in _melee_limbs:
if 'SHARP' in lfe.get_limb(life, limb)['flags']:
return limb
print life['name'],'cant scratch'
return None
def is_nervous(life, life_id):
if not lfe.execute_raw(life, 'judge', 'nervous', life_id=life_id):
return False
_dist = bad_numbers.distance(life['pos'], LIFE[life_id]['pos'])
if _dist <= sight.get_vision(LIFE[life_id])/2:
return True
return False
def is_aggravated(life, life_id):
if lfe.execute_raw(life, 'judge', 'aggravated', life_id=life_id):
return True
return False
def is_incapacitated(life):
_size = sum([lfe.get_limb(life, l)['size'] for l in life['body']])
_count = 0
for limb in life['body']:
_count += lfe.limb_is_cut(life, limb)
_count += lfe.get_limb_pain(life, limb)
if (_count/float(_size))>=.35:
return True
return False
def is_intimidated_by(life, life_id):
if lfe.execute_raw(life, 'safety', 'intimidated', life_id=life_id):
return True
return False
def is_intimidated(life):
#for target_id in judgement.get_targets(life, ignore_escaped=True):
# if is_intimidated_by(life, target_id):
# return True
for target_id in judgement.get_threats(life, ignore_escaped=True):
if is_intimidated_by(life, target_id):
return True
return False
def is_injured(life):
return len(lfe.get_bleeding_limbs(life)) > 0
def is_confident(life):
if 'player' in life:
return False
_friendly_confidence = judgement.get_ranged_combat_rating_of_target(life, life['id'])
_threat_confidence = 0
for target_id in judgement.get_trusted(life, visible=False):
_knows = brain.knows_alife_by_id(life, target_id)
if _knows['dead'] or _knows['asleep']:
continue
if _knows['last_seen_time']>30:
if brain.get_alife_flag(life, target_id, 'threat_score'):
_recent_mod = 1-(bad_numbers.clip(_knows['last_seen_time'], 0, 300)/300.0)
_score = brain.get_alife_flag(life, target_id, 'threat_score')
_friendly_confidence += _score*_recent_mod
else:
_friendly_confidence += 1
else:
_score = judgement.get_ranged_combat_rating_of_target(life, target_id)
brain.flag_alife(life, target_id, 'threat_score', value=_score)
_friendly_confidence += _score
for target_id in judgement.get_threats(life, ignore_escaped=False):
_knows = brain.knows_alife_by_id(life, target_id)
if _knows['dead'] or _knows['asleep']:
continue
if _knows['last_seen_time']:
if brain.get_alife_flag(life, target_id, 'threat_score'):
if _knows['last_seen_time']>50:
_recent_mod = 1-(bad_numbers.clip(_knows['last_seen_time'], 0, 600)/600.0)
else:
_recent_mod = 1
_score = brain.get_alife_flag(life, target_id, 'threat_score')
_threat_confidence += _score*_recent_mod
else:
_threat_confidence += 1
else:
_score = judgement.get_ranged_combat_rating_of_target(life, target_id, inventory_check=False)
brain.flag_alife(life, target_id, 'threat_score', value=_score)
_threat_confidence += _score
return _friendly_confidence-_threat_confidence>=-2
def is_threat_too_close(life):
_nearest_threat = judgement.get_nearest_threat(life)
if not _nearest_threat:
return False
_knows = brain.knows_alife_by_id(life, _nearest_threat)
if not _nearest_threat:
return False
if _knows['last_seen_time'] >= 100:
return False
_danger_close_range = int(lfe.execute_raw(life, 'safety', 'danger_close_range'))
if bad_numbers.distance(life['pos'], _knows['last_seen_at'])<_danger_close_range:
return True
return False
def has_threat_in_combat_range(life):
_engage_distance = combat.get_engage_distance(life)
for target_id in judgement.get_threats(life):
_target = brain.knows_alife_by_id(life, target_id)
if bad_numbers.distance(life['pos'], _target['last_seen_at']) <= _engage_distance:
return True
return False
def is_same_species(life, life_id):
if life['species'] == LIFE[life_id]['species']:
return True
return False
def is_family(life, life_id):
_know = brain.knows_alife_by_id(life, life_id)
if not _know:
return False
for relation in ['son', 'daughter', 'mother', 'father', 'sibling']:
if brain.get_alife_flag(life, life_id, relation):
return True
return False
def is_child_of(life, life_id):
_know = brain.knows_alife_by_id(life, life_id)
if not _know:
return False
if not _know['escaped'] and _know['life']['dead']:
return False
for relation in ['mother', 'father']:
if brain.get_alife_flag(life, life_id, relation):
return True
return False
def is_parent_of(life, life_id):
_know = brain.knows_alife_by_id(life, life_id)
if not _know:
return False
for relation in ['son', 'daughter']:
if brain.get_alife_flag(life, life_id, relation):
return True
return False
def has_parent(life):
for life_id in life['know'].keys():
if is_child_of(life, life_id):
return True
return False
def has_child(life):
for life_id in life['know'].keys():
if is_parent_of(life, life_id):
return True
return False
def is_safe_in_shelter(life, life_id):
if not lfe.is_in_shelter(life):
return True
return True
def is_born_leader(life):
return life['stats']['is_leader']
def is_psychotic(life):
return life['stats']['psychotic']
def _has_attacked(life, life_id, target_list):
for memory in lfe.get_memory(life, matches={'text': 'heard about attack', 'attacker': life_id}):
if memory['target'] in target_list:
return True
return False
def has_attacked_trusted(life, life_id):
return _has_attacked(life, life_id, judgement.get_trusted(life))
def has_attacked_self(life, life_id):
return len(lfe.get_memory(life, matches={'text': 'shot_by', 'target': life_id}))>0
def react_to_attack(life, life_id):
_knows = brain.knows_alife_by_id(life, life_id)
if not _knows['alignment'] == 'hostile':
speech.start_dialog(life, _knows['life']['id'], 'establish_hostile')
if life['group']:
groups.announce(life,
life['group'],
'attacked_by_hostile',
target_id=_knows['life']['id'],
filter_if=lambda life_id: brain.knows_alife_by_id(life, life_id)['last_seen_time']<=30,
ignore_if_said_in_last=150)
def react_to_tension(life, life_id):
if brain.knows_alife_by_id(life, life_id)['alignment'] in ['hostile']:
return False
if life['group'] and not groups.is_leader(life, life['group'], life['id']) and groups.get_leader(life, life['group']):
if sight.can_see_target(life, groups.get_leader(life, life['group'])) and sight.can_see_target(LIFE[life_id], groups.get_leader(life, life['group'])):
return False
_disarm = brain.get_alife_flag(life, life_id, 'disarm')
if _disarm:
#For now...
if not sight.can_see_position(life, LIFE[life_id]['pos']):
groups.announce(life,
life['group'],
'attacked_by_hostile',
filter_if=lambda life_id: brain.knows_alife_by_id(life, life_id)['last_seen_time']<=30,
target_id=life_id)
return False
for item_uid in lfe.get_all_visible_items(LIFE[life_id]):
if ITEMS[item_uid]['type'] == 'gun':
break
else:
brain.unflag_alife(life, life_id, 'disarm')
speech.start_dialog(life, life_id, 'clear_drop_weapon')
return False
_time_elapsed = WORLD_INFO['ticks']-_disarm
if _time_elapsed>135 and not speech.has_sent(life, life_id, 'threaten'):
speech.start_dialog(life, life_id, 'threaten')
speech.send(life, life_id, 'threaten')
elif _time_elapsed>185:
speech.start_dialog(life, life_id, 'establish_hostile')
elif not speech.has_sent(life, life_id, 'confront'):
speech.start_dialog(life, life_id, 'confront')
speech.send(life, life_id, 'confront')
def ask_for_help(life, life_id):
_bleeding_limbs = len(lfe.get_bleeding_limbs(life))
if not speech.has_sent(life, life_id, 'hurt'):
speech.start_dialog(life, life_id, 'hurt')
speech.send(life, life_id, 'hurt')
def wants_alignment_change(life, life_id):
_target = brain.knows_alife_by_id(life, life_id)
for memory in lfe.get_memory(life, matches={'text': 'healed_by'}):
if memory['target'] == life_id:
if _target['alignment'] == 'feign_trust':
return 'trust'
return None
def distance_from_pos_to_pos(life, pos1, pos2):
return bad_numbers.distance(pos1, pos2)
def get_goal_alignment_for_target(life, life_id):
_genuine = 100
_malicious = 100
if is_psychotic(life):
if life['group']:
if not life['group'] == LIFE[life_id]['group']:
return 'hostile'
else:
return 'hostile'
_malicious*=life['stats']['motive_for_crime']/10.0
if life['stats']['lone_wolf']:
_malicious*=.65
_genuine*=.65
if life['stats']['self_absorbed']:
_malicious*=.85
if not _genuine>=50 and not _malicious>=50:
return False
if _malicious>=75 and _genuine>=75:
return 'feign_trust'
if _genuine>_malicious:
return 'trust'
return 'hostile'
def change_alignment(life, life_id, alignment):
_knows = brain.knows_alife_by_id(life, life_id)
if not _knows:
brain.meet_alife(life, LIFE[life_id])
_knows = brain.knows_alife_by_id(life, life_id)
logging.debug('%s changed alignment of %s: %s' % (' '.join(life['name']), ' '.join(LIFE[life_id]['name']), alignment))
_knows['alignment'] = alignment
def establish_trust(life, life_id):
change_alignment(life, life_id, 'trust')
def establish_feign_trust(life, life_id):
change_alignment(life, life_id, 'feign_trust')
def establish_aggressive(life, life_id):
change_alignment(life, life_id, 'aggressive')
def establish_hostile(life, life_id):
change_alignment(life, life_id, 'hostile')
def establish_scared(life, life_id):
change_alignment(life, life_id, 'scared')
def declare_group_target(life, target_id, alignment):
change_alignment(life, target_id, alignment)
groups.announce(life, life['group'], 'add_group_target', target_id=target_id)
def declare_group(life, group_id, alignment):
groups.update_group_memory(life, group_id, 'alignment', alignment)
for member in groups.get_group_memory(life, group_id, 'members'):
change_alignment(life, member, alignment)
logging.debug('%s declared group %s %s.' % (' '.join(life['name']), group_id, alignment))
def declare_group_trusted(life, group_id):
declare_group(life, group_id, 'trust')
def declare_group_hostile(life, group_id):
declare_group(life, group_id, 'hostile')
def declare_group_scared(life, group_id):
declare_group(life, group_id, 'scared') | 15,748 | 0 | 1,564 |
6a7d4a0082b0eb7a570ad9c0c1c8abfa45a66d5b | 1,930 | py | Python | ndvi.py | ismailsunni/docker-qgis3-model | 04d98564b04253dc19810ebc6ebb1d679292cd13 | [
"MIT"
] | 2 | 2019-06-03T20:17:39.000Z | 2020-09-11T08:40:24.000Z | ndvi.py | ismailsunni/docker-qgis3-model | 04d98564b04253dc19810ebc6ebb1d679292cd13 | [
"MIT"
] | null | null | null | ndvi.py | ismailsunni/docker-qgis3-model | 04d98564b04253dc19810ebc6ebb1d679292cd13 | [
"MIT"
] | 1 | 2019-06-27T21:59:29.000Z | 2019-06-27T21:59:29.000Z | from qgis.core import QgsProcessing
from qgis.core import QgsProcessingAlgorithm
from qgis.core import QgsProcessingMultiStepFeedback
from qgis.core import QgsProcessingParameterRasterLayer
from qgis.core import QgsProcessingParameterRasterDestination
import processing
| 38.6 | 150 | 0.691192 | from qgis.core import QgsProcessing
from qgis.core import QgsProcessingAlgorithm
from qgis.core import QgsProcessingMultiStepFeedback
from qgis.core import QgsProcessingParameterRasterLayer
from qgis.core import QgsProcessingParameterRasterDestination
import processing
class Calculate_ndvi(QgsProcessingAlgorithm):
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer('inputnirband', 'INPUT_NIR_BAND', defaultValue=None))
self.addParameter(QgsProcessingParameterRasterLayer('inputredband', 'INPUT_RED_BAND', defaultValue=None))
self.addParameter(QgsProcessingParameterRasterDestination('Output', 'OUTPUT', createByDefault=True, defaultValue=None))
def processAlgorithm(self, parameters, context, model_feedback):
# Use a multi-step feedback, so that individual child algorithm progress reports are adjusted for the
# overall progress through the model
feedback = QgsProcessingMultiStepFeedback(1, model_feedback)
results = {}
outputs = {}
# Raster calculator
alg_params = {
'CELLSIZE': 30,
'CRS': 'ProjectCrs',
'EXPRESSION': ' ( \"INPUT_NIR_BAND@1\" - \"INPUT_RED_BAND@1\" ) / ( \"INPUT_NIR_BAND@1\" + \"INPUT_RED_BAND@1\" ) ',
'EXTENT': parameters['inputnirband'],
'LAYERS': [],
'OUTPUT': parameters['Output']
}
outputs['RasterCalculator'] = processing.run('qgis:rastercalculator', alg_params, context=context, feedback=feedback, is_child_algorithm=True)
results['Output'] = outputs['RasterCalculator']['OUTPUT']
return results
def name(self):
return 'Calculate_NDVI'
def displayName(self):
return 'Calculate_NDVI'
def group(self):
return 'AWF'
def groupId(self):
return 'AWF'
def createInstance(self):
return Calculate_ndvi()
| 1,423 | 24 | 212 |
03b515f57dd956e4947044b58411862c28b988fe | 6,241 | py | Python | bof_torch.py | xujli/cbof_torch | ed8d67dd7a41b6345305d970d0f8fa0892f8ccee | [
"MIT"
] | null | null | null | bof_torch.py | xujli/cbof_torch | ed8d67dd7a41b6345305d970d0f8fa0892f8ccee | [
"MIT"
] | null | null | null | bof_torch.py | xujli/cbof_torch | ed8d67dd7a41b6345305d970d0f8fa0892f8ccee | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
import torch
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import pairwise_distances
import numpy as np
def initialize_bof_layers(model, data_loader, n_samples=100, n_feature_samples=5000, batch_size=32, k_means_max_iters=300,
k_means_n_init=4):
"""
Initializes the BoF layers of a model
:param model: the model
:param data: data to be used for initializing the model
:param n_samples: number of data samples used for the initializes
:param n_feature_samples: number of feature vectors to be used for the clustering process
:param batch_size:
:param k_means_max_iters: the maximum number of iterations for the clustering algorithm (k-means)
:param k_means_n_init: defines how many times to run the k-means algorithm
:return:
"""
features = {}
iternum = int(n_samples / batch_size + 0.5)
for name, layer in model.named_modules():
if isinstance(layer, BoF_Pooling):
print("Found BoF layer (layer %s), initializing..." % name)
# Compile a function for getting the feature vectors
# get_features = K.function([model.input] + [model.training], [model.layers[i - 1].output])
features[name] = []
handler = layer.register_forward_pre_hook(get_features(name))
# iterate dataset to trigger hook to get features
for i in range(iternum):
data, labels = data_loader.__iter__().next()
if len(list(data.shape)) == 5:
data = data[:, 0]
if torch.cuda.is_available():
data = data.cuda()
output = model(data)
handler.remove()
layer_features = np.concatenate(features[name])
np.random.shuffle(layer_features)
layer_features = layer_features[:n_feature_samples]
# Cluster the features
kmeans = KMeans(n_clusters=layer.N_k, n_init=k_means_n_init, max_iter=k_means_max_iters)
kmeans.fit(layer_features)
# V of BoF pooling layer
V = kmeans.cluster_centers_
V = V.reshape((V.shape[0], V.shape[1], 1, 1))
# Set the value for the codebook
layer.V.data = torch.tensor(np.float32(V)).cuda() if torch.cuda.is_available() else \
torch.tensor(np.float32(V))
# Get the mean distance for initializing the sigmas
mean_dist = np.mean(pairwise_distances(layer_features[:100]))
# Set the value for sigmas
sigmas = np.ones((1, layer.N_k, 1, 1)) * (mean_dist ** 2)
layer.sigmas.data = torch.tensor(np.float32(sigmas)).cuda() if torch.cuda.is_available() else \
torch.tensor(np.float32(sigmas))
if __name__ == '__main__':
x = torch.ones(size=(32, 32, 11, 11)) * 0.5
model = BoF_Pooling(64, features=32, spatial_level=1)
y = model(x)
print(y.mean())
| 43.340278 | 122 | 0.617529 | import torch.nn as nn
import torch.nn.functional as F
import torch
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import pairwise_distances
import numpy as np
class BoF_Pooling(nn.Module):
def __init__(self, n_codewords, features, spatial_level=0, **kwargs):
super(BoF_Pooling, self).__init__()
"""
Initializes a BoF Pooling layer
:param n_codewords: the number of the codewords to be used
:param spatial_level: 0 -> no spatial pooling, 1 -> spatial pooling at level 1 (4 regions). Note that the
codebook is shared between the different spatial regions
:param kwargs:
"""
self.N_k = n_codewords
self.spatial_level = spatial_level
self.V, self.sigmas = None, None
self.relu = nn.ReLU()
self.init(features)
self.softmax = nn.Softmax(dim=1)
def init(self, features):
self.V = nn.Parameter(nn.init.uniform_(torch.empty((self.N_k, features, 1, 1), requires_grad=True)))
# self.V.shape = (output channels, input channels, kernel width, kernel height)
self.sigmas = nn.Parameter(nn.init.constant_(torch.empty((1, self.N_k, 1, 1), requires_grad=True), 0.1))
def forward(self, input):
# Calculate the pairwise distances between the codewords and the feature vectors
x_square = torch.sum(input=input, dim=1, keepdim=True)
y_square = torch.sum(self.V ** 2, dim=1, keepdim=True).permute([3, 0, 1, 2]) # permute axis to
dists = x_square + y_square - 2 * F.conv2d(input, self.V)
#dists = torch.maximum(dists, torch.zeros(size=dists.shape))
dists = self.relu(dists) # replace maximum to keep grads
quantized_features = self.softmax(- dists / (self.sigmas ** 2))
# Compile the histogram
if self.spatial_level == 0:
histogram = torch.mean(quantized_features, dim=[2, 3])
elif self.spatial_level == 1:
shape = quantized_features.shape
mid_1 = shape[2] / 2
mid_1 = int(mid_1)
mid_2 = shape[3] / 2
mid_2 = int(mid_2)
histogram1 = torch.mean(quantized_features[:, :, :mid_1, :mid_2], [2, 3])
histogram2 = torch.mean(quantized_features[:, :, mid_1:, :mid_2], [2, 3])
histogram3 = torch.mean(quantized_features[:, :, :mid_1, mid_2:], [2, 3])
histogram4 = torch.mean(quantized_features[:, :, mid_1:, mid_2:], [2, 3])
histogram = torch.stack([histogram1, histogram2, histogram3, histogram4], 1)
histogram = torch.reshape(histogram, (-1, 4 * self.N_k))
else:
# No other spatial level is currently supported (it is trivial to extend the code)
assert False
# Simple trick to avoid rescaling issues
return histogram * self.N_k
def compute_output_shape(self, input_shape): # 当spatial_level=0时,输出的特征数=n_codewords,为1时输出的特征数为n_codewords * 4
if self.spatial_level == 0:
return (input_shape[0], self.N_k)
elif self.spatial_level == 1:
return (input_shape[0], 4 * self.N_k)
def initialize_bof_layers(model, data_loader, n_samples=100, n_feature_samples=5000, batch_size=32, k_means_max_iters=300,
k_means_n_init=4):
"""
Initializes the BoF layers of a model
:param model: the model
:param data: data to be used for initializing the model
:param n_samples: number of data samples used for the initializes
:param n_feature_samples: number of feature vectors to be used for the clustering process
:param batch_size:
:param k_means_max_iters: the maximum number of iterations for the clustering algorithm (k-means)
:param k_means_n_init: defines how many times to run the k-means algorithm
:return:
"""
features = {}
def get_features(name):
def hook(module, input):
if len(input) == 1:
data = input[0].cpu().detach().permute([0, 2, 3, 1]).numpy()
features[name].append(data.reshape(-1, data.shape[-1]))
return hook
iternum = int(n_samples / batch_size + 0.5)
for name, layer in model.named_modules():
if isinstance(layer, BoF_Pooling):
print("Found BoF layer (layer %s), initializing..." % name)
# Compile a function for getting the feature vectors
# get_features = K.function([model.input] + [model.training], [model.layers[i - 1].output])
features[name] = []
handler = layer.register_forward_pre_hook(get_features(name))
# iterate dataset to trigger hook to get features
for i in range(iternum):
data, labels = data_loader.__iter__().next()
if len(list(data.shape)) == 5:
data = data[:, 0]
if torch.cuda.is_available():
data = data.cuda()
output = model(data)
handler.remove()
layer_features = np.concatenate(features[name])
np.random.shuffle(layer_features)
layer_features = layer_features[:n_feature_samples]
# Cluster the features
kmeans = KMeans(n_clusters=layer.N_k, n_init=k_means_n_init, max_iter=k_means_max_iters)
kmeans.fit(layer_features)
# V of BoF pooling layer
V = kmeans.cluster_centers_
V = V.reshape((V.shape[0], V.shape[1], 1, 1))
# Set the value for the codebook
layer.V.data = torch.tensor(np.float32(V)).cuda() if torch.cuda.is_available() else \
torch.tensor(np.float32(V))
# Get the mean distance for initializing the sigmas
mean_dist = np.mean(pairwise_distances(layer_features[:100]))
# Set the value for sigmas
sigmas = np.ones((1, layer.N_k, 1, 1)) * (mean_dist ** 2)
layer.sigmas.data = torch.tensor(np.float32(sigmas)).cuda() if torch.cuda.is_available() else \
torch.tensor(np.float32(sigmas))
if __name__ == '__main__':
x = torch.ones(size=(32, 32, 11, 11)) * 0.5
model = BoF_Pooling(64, features=32, spatial_level=1)
y = model(x)
print(y.mean())
| 3,091 | 8 | 156 |
1f52ae0846d6b5f387a3df82623f2baf524401d1 | 810 | py | Python | examples/ping_pong.py | arago/gevent-tiny-actorsystem | f55abee8bd2089a9965a517e9fc6126ab5476f87 | [
"MIT"
] | null | null | null | examples/ping_pong.py | arago/gevent-tiny-actorsystem | f55abee8bd2089a9965a517e9fc6126ab5476f87 | [
"MIT"
] | null | null | null | examples/ping_pong.py | arago/gevent-tiny-actorsystem | f55abee8bd2089a9965a517e9fc6126ab5476f87 | [
"MIT"
] | 2 | 2019-11-28T22:07:17.000Z | 2021-07-01T09:01:15.000Z | #!/usr/bin/env python3
from gevent import monkey; monkey.patch_all()
from arago.actors import Actor, Root
import arago.actors.pattern_matching as matching
from arago.common.logging import getCustomLogger
logger = getCustomLogger(level="WARNING")
players = [PingPong(name="Player One"), PingPong(name="Player Two")]
players[0].serve(opponent=players[1])
Root(name="root", children=players)
| 27 | 68 | 0.739506 | #!/usr/bin/env python3
from gevent import monkey; monkey.patch_all()
from arago.actors import Actor, Root
import arago.actors.pattern_matching as matching
from arago.common.logging import getCustomLogger
logger = getCustomLogger(level="WARNING")
class PingPong(Actor):
def serve(self, opponent):
opponent.tell("Ping", sender=self)
@matching.match(msg = "Ping", sender = matching.isoftype(Actor))
def handle(self, msg, payload, sender):
sender.tell("Pong")
@matching.match(msg = "Pong", sender = matching.isoftype(Actor))
def handle(self, msg, payload, sender):
sender.tell("Ping")
@matching.default
def handle(self, msg, payload, sender):
pass
players = [PingPong(name="Player One"), PingPong(name="Player Two")]
players[0].serve(opponent=players[1])
Root(name="root", children=players)
| 147 | 247 | 23 |
ddbd2047a7b0a039506309a52d105624b98a0fcf | 553 | py | Python | labs/lab-05/scripts/sb_sample_app.py | AndreyVoytuk/bigdata-playground-master | 9faf3ca7ca5642a737f477768d4ddee9e5e81009 | [
"Apache-2.0"
] | 4 | 2019-09-16T07:02:57.000Z | 2020-07-09T10:12:58.000Z | labs/lab-05/scripts/sb_sample_app.py | AndreyVoytuk/bigdata-playground-master | 9faf3ca7ca5642a737f477768d4ddee9e5e81009 | [
"Apache-2.0"
] | null | null | null | labs/lab-05/scripts/sb_sample_app.py | AndreyVoytuk/bigdata-playground-master | 9faf3ca7ca5642a737f477768d4ddee9e5e81009 | [
"Apache-2.0"
] | 4 | 2019-09-15T20:44:51.000Z | 2019-09-27T18:31:23.000Z | import pyspark
import random
if __name__ == "__main__":
sc_conf = pyspark.SparkConf()
sc_conf.setAppName("pi-app")
sc_conf.setMaster('spark://spark-master-01:7077')
sc_conf.set('spark.executor.cores','1')
sc_conf.set("spark.executor.memory", '1g')
sc = pyspark.SparkContext(conf=sc_conf)
num_samples = 1000000000
count = sc.parallelize(range(0, num_samples)).filter(inside).count()
pi = 4 * count / num_samples
print(pi)
sc.stop()
| 21.269231 | 70 | 0.669078 | import pyspark
import random
if __name__ == "__main__":
sc_conf = pyspark.SparkConf()
sc_conf.setAppName("pi-app")
sc_conf.setMaster('spark://spark-master-01:7077')
sc_conf.set('spark.executor.cores','1')
sc_conf.set("spark.executor.memory", '1g')
sc = pyspark.SparkContext(conf=sc_conf)
num_samples = 1000000000
def inside(p):
x, y = random.random(), random.random()
return x*x + y*y < 1
count = sc.parallelize(range(0, num_samples)).filter(inside).count()
pi = 4 * count / num_samples
print(pi)
sc.stop()
| 67 | 0 | 25 |
e7adba8b2cf3c64b73de2096ac44fc8cf879a089 | 4,772 | py | Python | bika/lims/jsonapi/read.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | bika/lims/jsonapi/read.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | bika/lims/jsonapi/read.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | from Products.CMFPlone.utils import safe_unicode
from bika.lims import logger, to_utf8
from bika.lims.interfaces import IJSONReadExtender
from bika.lims.jsonapi import get_include_fields
from plone.jsonapi.core import router
from plone.jsonapi.core.interfaces import IRouteProvider
from plone.protect.authenticator import AuthenticatorView
from bika.lims.jsonapi import load_brain_metadata
from bika.lims.jsonapi import load_field_values
from Products.CMFCore.utils import getToolByName
from zope import interface
from zope.component import getAdapters
import re
import App
| 32.910345 | 80 | 0.64627 | from Products.CMFPlone.utils import safe_unicode
from bika.lims import logger, to_utf8
from bika.lims.interfaces import IJSONReadExtender
from bika.lims.jsonapi import get_include_fields
from plone.jsonapi.core import router
from plone.jsonapi.core.interfaces import IRouteProvider
from plone.protect.authenticator import AuthenticatorView
from bika.lims.jsonapi import load_brain_metadata
from bika.lims.jsonapi import load_field_values
from Products.CMFCore.utils import getToolByName
from zope import interface
from zope.component import getAdapters
import re
import App
def read(context, request):
tag = AuthenticatorView(context, request).authenticator()
pattern = '<input .*name="(\w+)".*value="(\w+)"'
_authenticator = re.match(pattern, tag).groups()[1]
ret = {
"url": router.url_for("read", force_external=True),
"success": True,
"error": False,
"objects": [],
"_authenticator": _authenticator,
}
debug_mode = App.config.getConfiguration().debug_mode
catalog_name = request.get("catalog_name", "portal_catalog")
if not catalog_name:
raise ValueError("bad or missing catalog_name: " + catalog_name)
catalog = getToolByName(context, catalog_name)
indexes = catalog.indexes()
contentFilter = {}
for index in indexes:
if index in request:
if index == 'review_state' and "{" in request[index]:
continue
contentFilter[index] = safe_unicode(request[index])
if "%s[]"%index in request:
value = request["%s[]"%index]
if type(value) in (list, tuple):
contentFilter[index] = [safe_unicode(v) for v in value]
else:
contentFilter[index] = value
if 'limit' in request:
try:
contentFilter['sort_limit'] = int(request["limit"])
except ValueError:
pass
sort_on = request.get('sort_on', 'id')
contentFilter['sort_on'] = sort_on
# sort order
sort_order = request.get('sort_order', '')
if sort_order:
contentFilter['sort_order'] = sort_order
else:
sort_order = 'ascending'
contentFilter['sort_order'] = 'ascending'
include_fields = get_include_fields(request)
if debug_mode:
logger.info("contentFilter: " + str(contentFilter))
# Get matching objects from catalog
proxies = catalog(**contentFilter)
# batching items
page_nr = int(request.get("page_nr", 0))
try:
page_size = int(request.get("page_size", 10))
except ValueError:
page_size = 10
# page_size == 0: show all
if page_size == 0:
page_size = len(proxies)
first_item_nr = page_size * page_nr
if first_item_nr > len(proxies):
first_item_nr = 0
page_proxies = proxies[first_item_nr:first_item_nr + page_size]
for proxy in page_proxies:
obj_data = {}
# Place all proxy attributes into the result.
obj_data.update(load_brain_metadata(proxy, include_fields))
# Place all schema fields ino the result.
obj = proxy.getObject()
obj_data.update(load_field_values(obj, include_fields))
obj_data['path'] = "/".join(obj.getPhysicalPath())
# call any adapters that care to modify this data.
adapters = getAdapters((obj, ), IJSONReadExtender)
for name, adapter in adapters:
adapter(request, obj_data)
ret['objects'].append(obj_data)
ret['total_objects'] = len(proxies)
ret['first_object_nr'] = first_item_nr
last_object_nr = first_item_nr + len(page_proxies)
if last_object_nr > ret['total_objects']:
last_object_nr = ret['total_objects']
ret['last_object_nr'] = last_object_nr
if debug_mode:
logger.info("{0} objects returned".format(len(ret['objects'])))
return ret
class Read(object):
interface.implements(IRouteProvider)
def initialize(self, context, request):
pass
@property
def routes(self):
return (
("/read", "read", self.read, dict(methods=['GET', 'POST'])),
)
def read(self, context, request):
"""/@@API/read: Search the catalog and return data for all objects found
Optional parameters:
- catalog_name: uses portal_catalog if unspecified
- limit default=1
- All catalog indexes are searched for in the request.
{
runtime: Function running time.
error: true or string(message) if error. false if no error.
success: true or string(message) if success. false if no success.
objects: list of dictionaries, containing catalog metadata
}
"""
return read(context, request)
| 3,394 | 756 | 46 |
481f134705f71ab0bcdea34d192dfce72053ecc1 | 2,112 | py | Python | cpgames/modules/core/tankwar/modules/interfaces/switchinterface.py | Wasabii88/Games | 33262ca1958207a24e57e3532feded7e275b1dd1 | [
"MIT"
] | 1 | 2022-02-27T10:33:41.000Z | 2022-02-27T10:33:41.000Z | cpgames/modules/core/tankwar/modules/interfaces/switchinterface.py | beiwei365/Games | f6499f378802d3212a08aeca761191b58714b7f0 | [
"MIT"
] | null | null | null | cpgames/modules/core/tankwar/modules/interfaces/switchinterface.py | beiwei365/Games | f6499f378802d3212a08aeca761191b58714b7f0 | [
"MIT"
] | null | null | null | '''
Function:
关卡切换界面
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import pygame
from .....utils import QuitGame
'''关卡切换界面''' | 39.111111 | 161 | 0.671875 | '''
Function:
关卡切换界面
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import pygame
from .....utils import QuitGame
'''关卡切换界面'''
def SwitchLevelIterface(screen, cfg, resource_loader, level_next=1):
background_img = resource_loader.images['others']['background']
color_white = (255, 255, 255)
color_gray = (192, 192, 192)
font = resource_loader.fonts['switch']
logo_img = resource_loader.images['others']['logo']
logo_img = pygame.transform.scale(logo_img, (446, 70))
logo_rect = logo_img.get_rect()
logo_rect.centerx, logo_rect.centery = cfg.SCREENSIZE[0] / 2, cfg.SCREENSIZE[1] // 4
# 游戏加载提示
font_render = font.render('Loading game data, You will enter Level-%s' % level_next, True, color_white)
font_rect = font_render.get_rect()
font_rect.centerx, font_rect.centery = cfg.SCREENSIZE[0] / 2, cfg.SCREENSIZE[1] / 2
# 游戏加载进度条
gamebar = resource_loader.images['others']['gamebar'].convert_alpha()
gamebar_rect = gamebar.get_rect()
gamebar_rect.centerx, gamebar_rect.centery = cfg.SCREENSIZE[0] / 2, cfg.SCREENSIZE[1] / 1.4
tank_cursor = resource_loader.images['player']['player1'][0].convert_alpha().subsurface((0, 144), (48, 48))
tank_rect = tank_cursor.get_rect()
tank_rect.left = gamebar_rect.left
tank_rect.centery = gamebar_rect.centery
# 加载所需时间
load_time_left = gamebar_rect.right - tank_rect.right + 8
# 主循环
clock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
QuitGame()
if load_time_left <= 0:
return
screen.blit(background_img, (0, 0))
screen.blit(logo_img, logo_rect)
screen.blit(font_render, font_rect)
screen.blit(gamebar, gamebar_rect)
screen.blit(tank_cursor, tank_rect)
pygame.draw.rect(screen, color_gray, (gamebar_rect.left+8, gamebar_rect.top+8, tank_rect.left-gamebar_rect.left-8, tank_rect.bottom-gamebar_rect.top-16))
tank_rect.left += 1
load_time_left -= 1
pygame.display.update()
clock.tick(cfg.FPS) | 2,002 | 0 | 22 |
19a72e22b7b667c3b7c010898f69b0fa5b3d7a52 | 10,009 | py | Python | entso_data_clean.py | hungchristine/ReDyFEV | b40d9df43c7b2611fba9f34501e69c2df6b5b5b2 | [
"BSD-3-Clause"
] | null | null | null | entso_data_clean.py | hungchristine/ReDyFEV | b40d9df43c7b2611fba9f34501e69c2df6b5b5b2 | [
"BSD-3-Clause"
] | null | null | null | entso_data_clean.py | hungchristine/ReDyFEV | b40d9df43c7b2611fba9f34501e69c2df6b5b5b2 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Reads in the raw data fetched from ENTSO-E Transparency Platform, refactors the
data and fills in missing data
"""
import pandas as pd
import numpy as np
from datetime import datetime, date, time, timezone, timedelta
import pickle
import os
import logging
#%%
def aggregate_entso(dictionary, start=None, end=None, start2=None):
""" Receives dictionary of raw data queried from ENTSO-E Transparency Portal
and start and end of sub-annual period desired (optional)
Calculates the size in timesteps between samples (rows) and checks
if they are identical (i.e., even timesteps throughout time series)
"""
new_dict = {}
summed_dict = {}
no_trade = []
for key, value in dictionary.items():
timestep_list = []
if (start is not None) and (end is not None) and (not isinstance(value, float)):
try:
value_tmp = value.loc[start:end]
if value_tmp.empty and (start2 is not None):
print(f'{key} does not have an entry for {start}. Sampling closest hour instead')
value_tmp = value.loc[start2:start2 + timedelta(minutes=1)]
# entsotime = value.index
except KeyError:
if isinstance(key, tuple):
print(f'No trade for {key} at {start}')
except AttributeError:
if isinstance(key, tuple):
no_trade.append(key)
else:
print(f'No dataframe for {key}, cannot take slice!')
raise
except Exception as e:
print('Ran into a problem!')
print(e)
raise
value = value_tmp
# try:
if isinstance(value, pd.DataFrame) or isinstance(value, pd.Series):
# we don't need timesteps if we have a single time period (i.e., footprint analysis)
for i in range(0, value.shape[0]-1):
try:
timestep = (value.index[i+1] - value.index[i]).total_seconds() / 3600 # get timestep in hours
timestep_list.append(timestep)
except IndexError:
print(f'IndexError {i}')
except Exception as e:
print('Could not perform!')
print(e)
# Make sure timesteps are all equal length before calculating
# NB: this is made obsolete by using bentso's fullyear=True
if checkEqual(timestep_list):
if (isinstance(value, pd.DataFrame)):
# ENTSO-E data from 2020 introduces MultiIndex headers
if type(value.columns) == pd.MultiIndex:
value = value.loc(axis=1)[:, 'Actual Aggregated'] # drop "Actual Consumption"
value.columns = value.columns.droplevel(1)
if (value.columns.str.contains('Hydro Pumped Storage').sum() == 1):
# check if value is the production matrix and if so, if the country has pumped storage
logging.info(f'Correcting for negative pumped hydropower in {key}')
value = value.apply(distribute_neg_hydro, axis=1) # correct for negative pumped hydro
if value.shape[0] -1 > 0:
summed_dict[key] = (value * timestep_list[0] / 1e6).sum() # To calculate electricity generated; take power per timestep and multiply by length of time period. Sum over whole year
elif value.shape[0] - 1 == 0:
summed_dict[key] = (value / 1e6).sum()
new_dict[key] = value
else:
print(f'warning: unequal time steps for {key}')
logging.warning(f'unequal time steps for %s', key)
if min(timestep_list) == 1:
logging.info(f'resampling data to 1 hour increments in {key}')
new_dict[key] = value.resample('1H').interpolate()
summed_dict[key] = (new_dict[key] * timestep_list[0] / 1e6).sum()
print('1 hour')
elif min(timestep_list) == 0.25:
logging.info(f'resampling data to 15 minute increments in {key}')
new_dict[key] = value.resample('15T').interpolate()
summed_dict[key] = (new_dict[key] * timestep_list[0] / 1e6).sum()
print('15 minutes')
else:
print('very uneven timesteps')
#value = (value*timestep_list[0]/1000).sum()
# except Exception as e:
# print(f'something went wrong in {key}')
# print(e)
return summed_dict, new_dict #, entsotime
| 46.124424 | 198 | 0.593866 | # -*- coding: utf-8 -*-
"""
Reads in the raw data fetched from ENTSO-E Transparency Platform, refactors the
data and fills in missing data
"""
import pandas as pd
import numpy as np
from datetime import datetime, date, time, timezone, timedelta
import pickle
import os
import logging
#%%
def checkEqual(iterator):
return len(set(iterator)) <= 1
def distribute_neg_hydro(time_per):
try:
if time_per['Hydro Pumped Storage'] < 0:
neg_val = time_per['Hydro Pumped Storage']
time_per['Hydro Pumped Storage'] = 0
# "supply" pumping power from proportional to the production mix of the
# time period
time_per = time_per + (time_per / time_per.sum()) * neg_val
return time_per
except KeyError:
print('No pumped hydro')
except Exception as e:
print('Error in correcting for pumped hydro')
print(e)
def aggregate_entso(dictionary, start=None, end=None, start2=None):
""" Receives dictionary of raw data queried from ENTSO-E Transparency Portal
and start and end of sub-annual period desired (optional)
Calculates the size in timesteps between samples (rows) and checks
if they are identical (i.e., even timesteps throughout time series)
"""
new_dict = {}
summed_dict = {}
no_trade = []
for key, value in dictionary.items():
timestep_list = []
if (start is not None) and (end is not None) and (not isinstance(value, float)):
try:
value_tmp = value.loc[start:end]
if value_tmp.empty and (start2 is not None):
print(f'{key} does not have an entry for {start}. Sampling closest hour instead')
value_tmp = value.loc[start2:start2 + timedelta(minutes=1)]
# entsotime = value.index
except KeyError:
if isinstance(key, tuple):
print(f'No trade for {key} at {start}')
except AttributeError:
if isinstance(key, tuple):
no_trade.append(key)
else:
print(f'No dataframe for {key}, cannot take slice!')
raise
except Exception as e:
print('Ran into a problem!')
print(e)
raise
value = value_tmp
# try:
if isinstance(value, pd.DataFrame) or isinstance(value, pd.Series):
# we don't need timesteps if we have a single time period (i.e., footprint analysis)
for i in range(0, value.shape[0]-1):
try:
timestep = (value.index[i+1] - value.index[i]).total_seconds() / 3600 # get timestep in hours
timestep_list.append(timestep)
except IndexError:
print(f'IndexError {i}')
except Exception as e:
print('Could not perform!')
print(e)
# Make sure timesteps are all equal length before calculating
# NB: this is made obsolete by using bentso's fullyear=True
if checkEqual(timestep_list):
if (isinstance(value, pd.DataFrame)):
# ENTSO-E data from 2020 introduces MultiIndex headers
if type(value.columns) == pd.MultiIndex:
value = value.loc(axis=1)[:, 'Actual Aggregated'] # drop "Actual Consumption"
value.columns = value.columns.droplevel(1)
if (value.columns.str.contains('Hydro Pumped Storage').sum() == 1):
# check if value is the production matrix and if so, if the country has pumped storage
logging.info(f'Correcting for negative pumped hydropower in {key}')
value = value.apply(distribute_neg_hydro, axis=1) # correct for negative pumped hydro
if value.shape[0] -1 > 0:
summed_dict[key] = (value * timestep_list[0] / 1e6).sum() # To calculate electricity generated; take power per timestep and multiply by length of time period. Sum over whole year
elif value.shape[0] - 1 == 0:
summed_dict[key] = (value / 1e6).sum()
new_dict[key] = value
else:
print(f'warning: unequal time steps for {key}')
logging.warning(f'unequal time steps for %s', key)
if min(timestep_list) == 1:
logging.info(f'resampling data to 1 hour increments in {key}')
new_dict[key] = value.resample('1H').interpolate()
summed_dict[key] = (new_dict[key] * timestep_list[0] / 1e6).sum()
print('1 hour')
elif min(timestep_list) == 0.25:
logging.info(f'resampling data to 15 minute increments in {key}')
new_dict[key] = value.resample('15T').interpolate()
summed_dict[key] = (new_dict[key] * timestep_list[0] / 1e6).sum()
print('15 minutes')
else:
print('very uneven timesteps')
#value = (value*timestep_list[0]/1000).sum()
# except Exception as e:
# print(f'something went wrong in {key}')
# print(e)
return summed_dict, new_dict #, entsotime
def build_trade_mat(trade_dict):
mi = trade_dict.keys()
trade_mat = pd.DataFrame(trade_dict.values(), index=mi)
trade_mat = trade_mat.unstack()
trade_mat.columns = trade_mat.columns.get_level_values(1) # remove extraneous level from column labels
return trade_mat
def clean_entso(year=None, start=None, end=None, country=None):
fp = os.path.abspath(os.path.curdir)
fp_output = os.path.join(os.path.curdir, 'output', 'entsoe')
os.chdir(fp_output)
## Load previous results
with open(r'entso_export_trade_' + str(year) + '.pkl', 'rb') as handle:
trade_dict = pickle.load(handle)
with open(r'entso_export_gen_' + str(year) + '.pkl', 'rb') as handle:
gen_dict = pickle.load(handle)
generation = gen_dict.copy()
trade = trade_dict.copy()
# start = datetime(2019, 1, 1, 0) # test values for sub-annual periods
# end = datetime(2019, 6, 30, 23)
if country is not None:
# find closest timestamp to query date
start = generation[country].iloc[generation[country].index.get_loc(start, method='nearest')].name
if start.minute != 00:
# some countries only have hourly sampling; go to nearest hour
if start.minute > 30:
start2 = start + timedelta(minutes=60-start.minute)
else:
start2 = start - timedelta(minutes=start.minute)
else:
start2 = None
end = start + timedelta(minutes=1)
# summed_gen = generation[country].loc[entsotime]
# summed_trade = trade[country].loc[entsotime]
else:
start2 = None
summed_gen, new_gen = aggregate_entso(generation, start, end, start2)
summed_trade, new_trade = aggregate_entso(trade, start, end, start2)
trade_df = build_trade_mat(summed_trade)
gen_df = pd.DataFrame.from_dict(summed_gen, orient='index')
""" Read in Ireland's data manually from .csv export from Transparency Platform;
'country' (which Bentso fetches) and the 'bidding zone' classifications differ,
for which the latter has higher production values (which agree better with statistical
production from 2018)
"""
# if year == 2019:
# fp_ie = os.path.join(fp, 'data', 'gen_IE.csv')
# new_ie = pd.read_csv(fp_ie)
# new_ie = new_ie.set_index('MTU', drop=True, append=False).drop('Area', axis=1)
# new_ie = new_ie.replace('n/e', np.nan)
# new_ie = (new_ie * 0.5).sum() / 1e6 # samples on the half hour; MWh to TWh
# new_ie = new_ie.drop(index='Marine - Actual Aggregated [MW]')
# new_ie.index = gen_df.columns
# gen_df.loc['IE'] = new_ie
# """ Aggregate GB and GB_NIR regions """
# gen_df = (gen_df.reset_index().replace({'index': {'GB-NIR':'GB'}}).groupby('index', sort=False).sum())
# trade_df = (trade_df.reset_index().replace({'index': {'GB-NIR':'GB'}}).groupby('index', sort=False).sum()) # aggregate trade rows
# trade_df = ((trade_df.T).reset_index().replace({'index': {'GB-NIR':'GB'}}).groupby('index', sort=False).sum()).T # aggregate trade column
# elif year == 2021:
# new_ie = generation['IE']
# new_ie = new_ie.loc(axis=1)[:, 'Actual Aggregated'] # drop "Actual Consumption"
# new_ie.columns = new_ie.columns.droplevel(1)
# new_ie *= 2 # for whatever reason, results from IE are half of what they should be....
# new_ie = (new_ie * 0.5).sum() / 1e6 # from MWh to TWh
# new_ie.rename('IE', inplace=True)
# gen_df.drop(index='IE', inplace=True)
# gen_df = gen_df.append(new_ie)
""" Add Cyprus to trade df (no trade relationships) """
trade_df['CY'] = 0
trade_df.loc['CY'] = 0
# Add in countries with trade relationships, but no generation data; assume 0 production
add_countries = list(set(trade_df.index) - set(gen_df.index))
for count in add_countries:
gen_df.loc[count] = 0
gen_df.replace(0, np.nan, inplace=True)
with open(r'trade_final_' + str(year) + '.pkl', 'wb') as handle:
pickle.dump(trade_df, handle)
with open(r'gen_final_' + str(year) + '.pkl', 'wb') as handle:
pickle.dump(gen_df, handle)
trade_df.to_csv('trades_' + str(year) + '.csv')
gen_df.to_csv('ENTSO_production_volumes_' + str(year) + '.csv')
logging.info('Completed export of ENTSO-E data')
os.chdir(fp)
if country is not None:
entsotime = start # report the ENTSO sampling period used
return add_countries, entsotime
else:
return add_countries | 5,115 | 0 | 91 |
01aaf6de57071dcdf6b605b4f73dddb353bd903a | 949 | py | Python | engine/dictionary.py | blubits/unscramble | a90da49b593a29b8968799e1995814e9c9df379c | [
"MIT"
] | null | null | null | engine/dictionary.py | blubits/unscramble | a90da49b593a29b8968799e1995814e9c9df379c | [
"MIT"
] | null | null | null | engine/dictionary.py | blubits/unscramble | a90da49b593a29b8968799e1995814e9c9df379c | [
"MIT"
] | null | null | null | """
A dictionary of words.
:Author: Maded Batara III
:Version: v20181010
"""
from collections import Counter
from .dictionary_query import DictionaryQuery
class Dictionary(DictionaryQuery):
"""A dictionary of words.
As this is a subclass of DictionaryQuery, this class supports all its
methods."""
def __init__(self, filename):
"""
Initializes a new dictionary.
Args:
filename (str): Filename of the dictionary to be loaded in.
"""
with open(filename) as dictionary_file:
self.words = {
word: Counter(word)
for word in dictionary_file.read().splitlines()
}
def __repr__(self):
"""Implements repr(Dictionary)."""
return "Dictionary(words={0})".format(len(self))
def __str__(self):
"""Implements str(Dictionary)."""
return "Dictionary with {0} entries".format(len(self))
| 24.973684 | 73 | 0.61117 | """
A dictionary of words.
:Author: Maded Batara III
:Version: v20181010
"""
from collections import Counter
from .dictionary_query import DictionaryQuery
class Dictionary(DictionaryQuery):
"""A dictionary of words.
As this is a subclass of DictionaryQuery, this class supports all its
methods."""
def __init__(self, filename):
"""
Initializes a new dictionary.
Args:
filename (str): Filename of the dictionary to be loaded in.
"""
with open(filename) as dictionary_file:
self.words = {
word: Counter(word)
for word in dictionary_file.read().splitlines()
}
def __repr__(self):
"""Implements repr(Dictionary)."""
return "Dictionary(words={0})".format(len(self))
def __str__(self):
"""Implements str(Dictionary)."""
return "Dictionary with {0} entries".format(len(self))
| 0 | 0 | 0 |
161ba4ce1425c6adbab5f5ea95ba45d85ab47903 | 6,770 | py | Python | emulator/cr.py | Adancurusul/UR408_Core | 077712cb3d2a2dd3d9d1a0eeaae2bc71b632e159 | [
"MIT"
] | 4 | 2020-07-13T03:12:19.000Z | 2021-08-03T02:09:28.000Z | emulator/cr.py | Adancurusul/UR408_Core | 077712cb3d2a2dd3d9d1a0eeaae2bc71b632e159 | [
"MIT"
] | null | null | null | emulator/cr.py | Adancurusul/UR408_Core | 077712cb3d2a2dd3d9d1a0eeaae2bc71b632e159 | [
"MIT"
] | null | null | null | from myhdl import *
@block
def cr(pc_next,branch_offset,r6_r7_data,cr_data,clk,rst,int0,int1,int2,int3,mem_read,mem_write,mem_ok,branch,selector,cr_write,ret,apc,jmp,bra
,main_state):
'''
:param clk: 1 in clk
:param rst: 1 in rst
:param int0: 1 in interrupt
:param int1: 1 in interrupt
:param int2: 1 in interrupt
:param int3: 1 in interrupt
:param mem_read: 1 in memory read
:param mem_write: 1 in memory write
:param mem_ok: 1 in memory ok
:param branch: 1 in branch
:param selector: 3 in selectors
:param cr_write:1 in cr write
:param ret: 1 in return
:param apc: 1 in apc
:param jmp: 1 in jmp
:param bra: 1 in branch if
:param main_state: 1 out main state
:param pc_next: 16 out program counter next
:param branch_offset: 16 in branch offset
:param r6_r7_data: 16 in data from r6 and r7 (pointer
:param cr_data: 16 out cr register data
:return:
'''
states = enum ('status','ie','epc','cpc','tvec0','tvec1','tvec2','tvec3')
CPC = Signal(intbv(0)[16:])
#TEMP = Signal(intbv(0)[16:])
TVEC0 = Signal(intbv(0)[16:])
TVEC1 = Signal(intbv(0)[16:])
TVEC2 = Signal(intbv(0)[16:])
TVEC3 = Signal(intbv(0)[16:])
EPC = Signal(intbv(0)[16:])
PC = Signal(intbv(0)[16:])
GIE = Signal(bool(0))
PGIE = Signal(bool(0))
IE0 = Signal(bool(0))
IE1 = Signal(bool(0))
IE2 = Signal(bool(0))
IE3 = Signal(bool(0))
int_acc = Signal(bool(0))
tvec = Signal(intbv(0)[16:])
int0_acc = Signal(bool(0))
int1_acc = Signal(bool(0))
int2_acc = Signal(bool(0))
int3_acc = Signal(bool(0))
@always_comb
@always_comb
'''
@always_comb
def comb_logic3():
if selector=='000000001':
cr_data[16:2].next = intbv(0)[14:]
cr_data[1].next = PGIE
cr_data[0].next = GIE
elif selector=='000000010':
cr_data[16:4].next = intbv(0)[12:]
cr_data[3].next = IE3
cr_data[2].next = IE2
cr_data[1].next = IE1
cr_data[0].next = IE0
elif selector=='000000100':
cr_data.next = EPC
elif selector=='000001000':
cr_data.next = CPC
elif selector == '000010000':
cr_data.next = TVEC0
elif selector == '000100000':
cr_data.next = TVEC1
elif selector == '001000000':
cr_data.next = TVEC2
elif selector == '010000000':
cr_data.next = TVEC3
else:
cr_data.next = 0
'''
@always_comb
@always_seq(clk.posedge,reset = rst)
@always_seq(clk.posedge, reset=rst)
@always_seq(clk.posedge, reset=rst)
@always_seq(clk.posedge, reset=rst)
@always_seq(clk.posedge, reset=rst)
@always_seq(clk.posedge, reset=rst)
@always_seq(clk.posedge, reset=rst)
@always_comb
@always_comb
return instances()
| 29.181034 | 142 | 0.538109 | from myhdl import *
@block
def cr(pc_next,branch_offset,r6_r7_data,cr_data,clk,rst,int0,int1,int2,int3,mem_read,mem_write,mem_ok,branch,selector,cr_write,ret,apc,jmp,bra
,main_state):
'''
:param clk: 1 in clk
:param rst: 1 in rst
:param int0: 1 in interrupt
:param int1: 1 in interrupt
:param int2: 1 in interrupt
:param int3: 1 in interrupt
:param mem_read: 1 in memory read
:param mem_write: 1 in memory write
:param mem_ok: 1 in memory ok
:param branch: 1 in branch
:param selector: 3 in selectors
:param cr_write:1 in cr write
:param ret: 1 in return
:param apc: 1 in apc
:param jmp: 1 in jmp
:param bra: 1 in branch if
:param main_state: 1 out main state
:param pc_next: 16 out program counter next
:param branch_offset: 16 in branch offset
:param r6_r7_data: 16 in data from r6 and r7 (pointer
:param cr_data: 16 out cr register data
:return:
'''
states = enum ('status','ie','epc','cpc','tvec0','tvec1','tvec2','tvec3')
CPC = Signal(intbv(0)[16:])
#TEMP = Signal(intbv(0)[16:])
TVEC0 = Signal(intbv(0)[16:])
TVEC1 = Signal(intbv(0)[16:])
TVEC2 = Signal(intbv(0)[16:])
TVEC3 = Signal(intbv(0)[16:])
EPC = Signal(intbv(0)[16:])
PC = Signal(intbv(0)[16:])
GIE = Signal(bool(0))
PGIE = Signal(bool(0))
IE0 = Signal(bool(0))
IE1 = Signal(bool(0))
IE2 = Signal(bool(0))
IE3 = Signal(bool(0))
int_acc = Signal(bool(0))
tvec = Signal(intbv(0)[16:])
int0_acc = Signal(bool(0))
int1_acc = Signal(bool(0))
int2_acc = Signal(bool(0))
int3_acc = Signal(bool(0))
@always_comb
def comb_logic():
if int0_acc:
tvec.next = TVEC0
elif int1_acc:
tvec.next = TVEC1
elif int2_acc:
tvec.next = TVEC2
else:
tvec.next = TVEC3
@always_comb
def comb_logic2():
if ret:
pc_next.next = EPC
elif branch :
pc_next.next = PC+ branch_offset
elif jmp:
pc_next.next = r6_r7_data
else:
pc_next.next = PC
'''
@always_comb
def comb_logic3():
if selector=='000000001':
cr_data[16:2].next = intbv(0)[14:]
cr_data[1].next = PGIE
cr_data[0].next = GIE
elif selector=='000000010':
cr_data[16:4].next = intbv(0)[12:]
cr_data[3].next = IE3
cr_data[2].next = IE2
cr_data[1].next = IE1
cr_data[0].next = IE0
elif selector=='000000100':
cr_data.next = EPC
elif selector=='000001000':
cr_data.next = CPC
elif selector == '000010000':
cr_data.next = TVEC0
elif selector == '000100000':
cr_data.next = TVEC1
elif selector == '001000000':
cr_data.next = TVEC2
elif selector == '010000000':
cr_data.next = TVEC3
else:
cr_data.next = 0
'''
@always_comb
def comb_logic3():
if selector==states.status:#'000000001'
cr_data[16:2].next = intbv(0)[14:]
cr_data[1].next = PGIE
cr_data[0].next = GIE
elif selector==states.ie:
cr_data[16:4].next = intbv(0)[12:]
cr_data[3].next = IE3
cr_data[2].next = IE2
cr_data[1].next = IE1
cr_data[0].next = IE0
elif selector==states.epc:
cr_data.next = EPC
elif selector==states.cpc:
cr_data.next = CPC
elif selector == states.tvec0:
cr_data.next = TVEC0
elif selector == states.tvec1:
cr_data.next = TVEC1
elif selector == states.tvec2:
cr_data.next = TVEC2
elif selector == states.tvec3:
cr_data.next = TVEC3
else:
cr_data.next = 0
@always_seq(clk.posedge,reset = rst)
def cr_logic():
# main_state
if not main_state:
if mem_read | mem_write:
main_state.next = intbv(bool(1))
else:
main_state.next = intbv(bool(0))
elif main_state:
if mem_ok:
main_state.next = intbv(bool(0))
else:
main_state.next = intbv(bool(1))
# status
@always_seq(clk.posedge, reset=rst)
def cr_logic2():
if int_acc:
GIE.next = 0
elif ret:
GIE.next = PGIE
elif selector==states.status and cr_write:
GIE.next = r6_r7_data[0]
if int_acc:
PGIE.next = GIE
elif selector == states.status and cr_write:
PGIE.next = r6_r7_data[1]
@always_seq(clk.posedge, reset=rst)
def cr_logic3():
#ie
if selector == states.ie and cr_write:
IE0.next = r6_r7_data[0]
IE1.next = r6_r7_data[1]
IE2.next = r6_r7_data[2]
IE3.next = r6_r7_data[3]
@always_seq(clk.posedge, reset=rst)
def cr_logic4():
# epc
if int_acc:
EPC.next = PC
elif selector == states.epc and cr_write:
EPC.next = r6_r7_data
@always_seq(clk.posedge, reset=rst)
def cr_logic5():
# cpc
if selector == states.cpc and cr_write:
CPC.next = r6_r7_data
elif apc:
CPC.next = PC
@always_seq(clk.posedge, reset=rst)
def cr_logic6():
# pc
if int_acc:
PC.next = tvec + 1
elif ret:
PC.next = EPC + 1
elif jmp:
PC.next = r6_r7_data + 1
elif branch:
PC.next = PC + branch_offset + 1
else:
if ((not main_state) or not (mem_read or mem_write)) or (main_state and mem_ok):
PC.next = PC + 1
else:
PC.next = PC
@always_seq(clk.posedge, reset=rst)
def cr_logic7():
#tvec0
if selector == states.tvec0 and cr_write:
TVEC0.next = r6_r7_data
# tvec1
if selector == states.tvec1 and cr_write:
TVEC1.next = r6_r7_data
# tvec2
if selector == states.tvec2 and cr_write:
TVEC2.next = r6_r7_data
# tvec3
if selector == states.tvec3 and cr_write:
TVEC3.next = r6_r7_data
@always_comb
def int_logic():
int0_acc.next = GIE & int0 & IE0
int1_acc.next = GIE & int1 & IE1
int2_acc.next = GIE & int2 & IE2
int3_acc.next = GIE & int3 & IE3
@always_comb
def int_plus_logic():
int_acc.next = not (bra | jmp | ret | mem_read | mem_write) & (int0_acc | int1_acc | int2_acc | int3_acc)
return instances()
| 3,522 | 0 | 312 |
837c2bb49c43c7827b7da66e16ade1f183903c41 | 3,994 | py | Python | CinderMetrics.py | dloucasfx/collectd-openstack | 05a0e4657ff28a9c2cccb13b61144c702ec766cc | [
"Apache-2.0"
] | null | null | null | CinderMetrics.py | dloucasfx/collectd-openstack | 05a0e4657ff28a9c2cccb13b61144c702ec766cc | [
"Apache-2.0"
] | null | null | null | CinderMetrics.py | dloucasfx/collectd-openstack | 05a0e4657ff28a9c2cccb13b61144c702ec766cc | [
"Apache-2.0"
] | null | null | null | import sys
from os import path
import inspect
from keystoneauth1 import identity
from keystoneauth1 import session
from cinderclient import client
import re
METRIC_NAME_PREFIX = "openstack."
CINDER_LIMIT_PREFIX = "cinder.limit."
CINDER_VOLUME_PREFIX = "cinder.volume."
CINDER_SNAPSHOT_PREFIX = "cinder.snapshot."
DEFAULT_CINDER_CLIENT_VERSION = "2.0"
| 32.209677 | 85 | 0.593891 | import sys
from os import path
import inspect
from keystoneauth1 import identity
from keystoneauth1 import session
from cinderclient import client
import re
METRIC_NAME_PREFIX = "openstack."
CINDER_LIMIT_PREFIX = "cinder.limit."
CINDER_VOLUME_PREFIX = "cinder.volume."
CINDER_SNAPSHOT_PREFIX = "cinder.snapshot."
DEFAULT_CINDER_CLIENT_VERSION = "2.0"
class CinderMetrics:
def __init__(
self,
auth_url,
username,
password,
project_name,
project_domain_id,
user_domain_id,
region_name,
ssl_verify
):
self._auth_url = auth_url
self._username = username
self._password = password
self._project_name = project_name
self._project_domain_id = project_domain_id
self._user_domain_id = user_domain_id
self._region_name = region_name
self._ssl_verify = ssl_verify
self.auth = identity.Password(
auth_url=self._auth_url,
username=self._username,
password=self._password,
project_name=self._project_name,
project_domain_id=self._project_domain_id,
user_domain_id=self._user_domain_id
)
self.sess = session.Session(auth=self.auth, verify=self._ssl_verify)
self.cinder = client.Client(
DEFAULT_CINDER_CLIENT_VERSION,
session=self.sess,
region_name=self._region_name
)
def collect_cinder_metrics(self):
metrics = []
dims = {}
props = {}
self.collect_volume_metrics(metrics)
self.collect_snapshot_metrics(metrics)
self.collect_limit_metrics(metrics)
dims["project_id"] = self.cinder.client.get_project_id()
props["project_name"] = self._project_name
props["project_domain_name"] = self._project_domain_id
props["user_domain_name"] = self._user_domain_id
if self._region_name:
props["region_name"] = self._region_name
return {'0': (metrics, dims, props)}
def collect_volume_metrics(self, metrics):
volumes = self.cinder.volumes.list(search_opts={'all_tenants': 1})
data_tenant = dict()
data_tenant['volumes'] = {'count': 0, 'bytes': 0}
for volume in volumes:
data_tenant['volumes']['count'] += 1
data_tenant['volumes']['bytes'] += (volume.size * 1024 * 1024 * 1024)
if data_tenant is not None:
metrics.append(("{0}{1}{2}".format(
METRIC_NAME_PREFIX,
CINDER_VOLUME_PREFIX,
'count'
), data_tenant['volumes']['count']))
metrics.append(("{0}{1}{2}".format(
METRIC_NAME_PREFIX,
CINDER_VOLUME_PREFIX,
'size'
), data_tenant['volumes']['bytes']))
def collect_snapshot_metrics(self, metrics):
snapshots = self.cinder.volume_snapshots.list(search_opts={'all_tenants': 1})
data_tenant = dict()
data_tenant['snapshot'] = {'count': 0, 'bytes': 0}
for snapshot in snapshots:
data_tenant['snapshot']['count'] += 1
data_tenant['snapshot']['bytes'] += (snapshot.size * 1024 * 1024 * 1024)
if data_tenant is not None:
metrics.append(("{0}{1}{2}".format(
METRIC_NAME_PREFIX,
CINDER_SNAPSHOT_PREFIX,
'count'
), data_tenant['snapshot']['count']))
metrics.append(("{0}{1}{2}".format(
METRIC_NAME_PREFIX,
CINDER_SNAPSHOT_PREFIX,
'size'
), data_tenant['snapshot']['bytes']))
def collect_limit_metrics(self, metrics):
limits = self.cinder.limits.get().to_dict()['absolute']
for limit in limits:
metrics.append(("{0}{1}{2}".format(
METRIC_NAME_PREFIX,
CINDER_LIMIT_PREFIX,
limit
), limits[limit]))
| 3,484 | -1 | 157 |