hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e2ecc399d23bb87715d2e0a2a17198c925eecbf0 | 1,418 | py | Python | build_world_clouds.py | amercer1/jebe | dd922f6e12c43a96c550b39a0c2eb0154ad5ec5b | [
"MIT"
] | 1 | 2015-06-28T06:35:26.000Z | 2015-06-28T06:35:26.000Z | build_world_clouds.py | amercer1/jebe | dd922f6e12c43a96c550b39a0c2eb0154ad5ec5b | [
"MIT"
] | null | null | null | build_world_clouds.py | amercer1/jebe | dd922f6e12c43a96c550b39a0c2eb0154ad5ec5b | [
"MIT"
] | null | null | null | import os
import re
import random
from scipy.misc import imread
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
d = os.path.dirname(__file__)
text_files = os.path.join(d, 'text_files')
images = os.path.join(d, 'images')
paths = [fn for fn in next(os.walk(text_files))[2]]
# Creat Normal word cloud images
for path in paths:
png_filename = re.sub(r'\.txt$', '.png', path)
text = open(os.path.join(text_files, path)).read()
# generate word cloud
wc = WordCloud(width=1000, height=500, margin=10).generate(text)
wc.generate(text)
# store to file
wc.to_file(os.path.join(images, png_filename))
# read the mask image
# taken from
# http://www.clker.com/cliparts/Q/I/V/k/y/2/black-basketball-hi.png
mask = imread(os.path.join(images, "basketball-silhouette.png"))
def orange_color_func(word, font_size, position, orientation, random_state=None,
**kwargs):
return "hsl(25, 100%%, %d%%)" % random.randint(60, 100)
for path in paths:
png_filename = re.sub(r'\.txt$', '.png', path)
png_filename = re.sub(r'game', 'basketball_game', png_filename)
text = open(os.path.join(text_files, path)).read()
# generate word cloud
wc = WordCloud(max_words=1000, mask=mask,
random_state=1).generate(text)
wc.recolor(color_func=orange_color_func, random_state=3)
wc.to_file(os.path.join(images, png_filename))
| 32.227273 | 80 | 0.688999 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 312 | 0.220028 |
e2eeb62862c41ce4f6916bdfc40e2594722e7ae9 | 13,176 | py | Python | track.py | ahrnbom/guts | 9134e7f6568a24b435841e5934a640bdbe329a68 | [
"MIT"
] | null | null | null | track.py | ahrnbom/guts | 9134e7f6568a24b435841e5934a640bdbe329a68 | [
"MIT"
] | null | null | null | track.py | ahrnbom/guts | 9134e7f6568a24b435841e5934a640bdbe329a68 | [
"MIT"
] | null | null | null | """
Copyright (C) 2022 Martin Ahrnbom
Released under MIT License. See the file LICENSE for details.
This module describes 2D/3D tracks. GUTS's output is a list of instances
of these classes.
"""
import numpy as np
from filter import filter2D, filter3D
from options import Options, Filter2DParams, Filter3DParams
from position import Position, Position3D
from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle
from activecorners import activecorners
from world import World
curr_id = 0
def next_id():
global curr_id
curr_id += 1
return curr_id
def reset_id():
global curr_id
curr_id = 0
class Track:
def __init__(self, options:Options, class_name:str, world:World,
current_time=None, det=None):
self.options = options
self.type = None # either "2D" och "3D"
self.history = dict()
self.times = dict() # total amount of seconds as a float, for each frame
self.class_name = class_name
self.world = world
self.last_updated = current_time
self.last_updated_frame = None
self.id = next_id()
self.should_die = False
def is_numerically_stable(self):
# Bad numerics can sometimes make Kalman numbers grow very large or NaN
# We are not interested in such tracks
c1 = np.any(np.abs(self.filter.x) > 1e8)
c2 = np.any(np.isnan(self.filter.x))
return (not c1) and (not c2)
def finalize(self):
if self.last_updated_frame is None:
self.history = {}
else:
self.history = {key:val for (key,val) in self.history.items() if
key <= self.last_updated_frame}
self.history = {key:val for (key,val) in self.history.items() if
not np.any(np.isnan(val))}
# Remove the track if it never moves significantly
has_significant_motion = False
has_significant_motion_counter = 0
first_pos = None
prev_frame = None
for frame_no, x_vec in self.history.items():
pos = x_vec[0:2]
if first_pos is None:
first_pos = pos
else:
assert frame_no > prev_frame
dist = vector_dist(pos, first_pos)
if dist > self.options.significant_motion_distance:
has_significant_motion_counter += 1
if has_significant_motion_counter > 8:
has_significant_motion = True
break
else:
has_significant_motion_counter = 0
prev_frame = frame_no
if not has_significant_motion:
self.history = dict()
class Track2D(Track):
def __init__(self, pos:Position, **kwargs):
super().__init__(**kwargs)
self.type = '2D'
p:Filter2DParams = kwargs['options'].params2D
x1, y1, x2, y2 = pos.aabb
x = (x1+x2)/2
y = (y1+y2)/2
self.filter = filter2D([x, y], [x2-x1, y2-y1],
P_factor=p.P_factor, Q_c=p.Q_c, Q_s=p.Q_s,
Q_v=p.Q_v, Q_ds=p.Q_ds, Q_a=p.Q_a, Q_cov=p.Q_cov,
Q_scov=p.Q_scov, R_c=p.R_c, R_s=p.R_s)
if not self.options.tracks2D:
raise ValueError("Tried to create a 2D track when not allowed")
def store_history(self, frame_no:int, time:float):
if frame_no in self.history:
raise ValueError(f"Frame number {frame_no} already exists!!")
self.history[frame_no] = self.filter.x.copy()
self.times[frame_no] = time
def predict(self):
self.filter.predict()
if not self.is_numerically_stable():
self.should_die = True
def get_x(self):
return self.filter.x
def update(self, det:Position, dt:float, frame_no:int, current_time:float):
x1, y1, x2, y2 = det.aabb
w = x2-x1
h = y2-y1
x = (x1+x2)/2
y = (y1+y2)/2
z = np.array([x, y, w, h], dtype=np.float32)
self.filter.update(z, dt)
assert current_time > self.last_updated
self.last_updated = current_time
self.last_updated_frame = frame_no
# Determine if track has sufficient amount of movement to be converted to a
# 3D track instead
def saom(self, current_time:float):
# 2D tracks need to have been recently updated for SAOM to trigger
# otherwise drifting nonsense tracks become 3D tracks
max_time = 2.01*(1.0/self.options.frame_rate)
if self.history and (current_time-self.last_updated)<=max_time:
first = min(self.history.keys())
xf, yf, wf, hf = self.history[first][0:4]
xn, yn, wn, hn = self.filter.x[0:4]
typical_size = np.mean([wf, hf, wn, hn])
dist = vector_dist([xf, yf], [xn, yn])
ratio = dist/typical_size
if ratio > self.options.saom_thresh:
return True
return False
# Convert to 3D track
def to3D(self, current_time:float):
first = min(self.history.keys())
dt = current_time - self.times[first]
assert dt > 0
xf, yf, wf, hf = self.history[first][0:4]
xn, yn, wn, hn = self.filter.x[0:4]
aabb_first = to_aabb(xf, yf, wf, hf)
aabb_now = to_aabb(xn, yn, wn, hn)
pos_first = Position(aabb=aabb_first, class_name=self.class_name)
pos_now = Position(aabb=aabb_now, class_name=self.class_name)
out = activecorners(pos1=pos_first, pos2=pos_now,
class_name=self.class_name,
world=self.world, dt=dt)
if out is None:
# Conversion to 3D failed, try again later
return self
else:
X, Y, l, w, h, v, phi = out
pos3D=np.array([X, Y], dtype=np.float32)
shape=np.array([l, w, h], dtype=np.float32)
new_track = Track3D(pos3D, shape, phi, v,
world=self.world, class_name=self.class_name,
options=self.options, current_time=current_time,
aabb_history=dict_copy(self.history),
old_times=self.times)
# Same ID to clearly mark that this 3D track inherits from 2D track
# Unintended side effect is that the track counter is increased
new_track.id = self.id
return new_track
class Track3D(Track):
def __init__(self, pos3D:np.ndarray, shape:np.ndarray, phi:float,
v:float, aabb_history:dict, old_times:dict, **kwargs):
super().__init__(**kwargs)
self.type = '3D'
self.tau = 1.0 / kwargs['world'].frame_rate
self.options = kwargs['options']
self.height = shape[-1]
self.aabb_history = aabb_history
self.times = dict_merge(self.times, old_times)
self.previous_detection = None
self.old_phi = None
if phi is None:
# If the road user is standing still, we still want to let
# activecorners work (or do we?)
self.init_filter(pos3D, shape, phi, v, self.tau)
elif np.isnan(phi):
# If we don't have phi yet, wait to create filter until we do
# which should happen at next update
# For now, just store the position which we'll need to compute phi
# This is only done in GUTS, active corners should never output NaN
self.filter = None
self.previous_detection = kwargs['det']
else:
self.init_filter(pos3D, shape, phi, v, self.tau)
def __repr__(self):
frames = list(self.history.keys())
if frames:
frames.sort()
start = frames[0]
stop = frames[-1]
else:
start = '?'
stop = '?'
return f"Track3D {self.class_name} {self.id}, {start}-{stop}"
def init_filter(self, pos3D, shape, phi, v, tau):
p:Filter3DParams = self.options.params3D
self.filter = filter3D(pos3D[0:2], shape[0:2], phi, v, tau=tau,
kappa=p.kappa, P_factor=p.P_factor,
Q_c=p.Q_c, Q_s=p.Q_s, Q_phi=p.Q_phi, Q_v=p.Q_v,
Q_omega=p.Q_omega, Q_cov=p.Q_cov,
R_c=p.R_c, R_s=p.R_s, R_phi=p.R_phi,
min_v_for_rotate=self.options.min_v_for_rotate)
def store_history(self, frame_no:int, time:float):
if self.filter is None:
return
if frame_no in self.history:
raise ValueError(f"Frame number {frame_no} already exists!!")
self.history[frame_no] = self.filter.x.copy()
self.times[frame_no] = time
def predict(self):
if self.filter is None:
return
self.filter.predict()
if not self.is_numerically_stable():
self.should_die = True
def get_x(self):
if self.filter is None:
x = np.array([*self.previous_detection.pos3D.flatten()[0:2],
*self.previous_detection.shape[0:2], float("nan")],
dtype=np.float32)
return x
else:
return self.filter.x
def vector_for_scoring(self, frame_no:int):
X = self.history[frame_no]
# Scoring vector should be x, y, l, w, phi
return X[0:5]
def suitable_previous_aabb_time(self, current_time:float):
good_number_of_frames = 5
l = len(self.aabb_history)
if l <= good_number_of_frames:
frame_no = min(self.aabb_history.keys())
return frame_no, current_time-self.times[frame_no]
else:
frame_nos = list(self.aabb_history.keys())
frame_nos.sort()
# Hopefully not too distant and also not too recent..?
frame_no = frame_nos[-good_number_of_frames]
return frame_no, current_time-self.times[frame_no]
def update(self, det, dt:float, frame_no:int, current_time:float):
assert current_time >= self.last_updated
self.last_updated = current_time
self.last_updated_frame = frame_no
if isinstance(det, Position3D):
X, Y = det.pos3D[0:2]
x, y = self.previous_detection.pos3D[0:2]
dist = vector_dist([X,Y], [x,y])
if dist > self.options.min_dist_for_phi:
phi = np.arctan2(Y-y, X-x)
factor = self.options.phi_smoothing_factor
if factor > 0.0 and (self.old_phi is not None):
phi = weighted_angle(self.old_phi, phi, factor)
if self.filter is None:
v = dist/self.tau
self.init_filter(det.pos3D, det.shape, phi, v, self.tau)
else:
z = np.array([*det.pos3D[0:2], *det.shape[0:2], phi],
dtype=np.float32)
self.filter.update(z)
self.old_phi = phi
elif isinstance(det, Position):
before, before_dt = self.suitable_previous_aabb_time(current_time)
xb, yb, wb, hb = self.aabb_history[before][0:4]
aabb_before = to_aabb(xb, yb, wb, hb)
pos_before = Position(aabb=aabb_before, class_name=self.class_name)
out = activecorners(pos_before, det,
self.class_name, self.world,
before_dt)
if out is None:
# Don't update the filter if active corners fail!
pass
else:
X, Y, l, w, h, v, phi = out
if l is None or w is None:
l, w = self.filter.x[2:4]
if h is None:
h = self.height
if phi is None:
phi = self.filter.x[4]
z = np.array([X, Y, l, w, phi], dtype=np.float32).flatten()
self.filter.update(z)
# Gradually update the height
self.height = 0.9 * self.height + 0.1 * h
# Store new AABB in AABB history, because this isn't done elsewhere
x1, y1, x2, y2 = det.aabb
xn = (x1+x2)/2.0
yn = (y1+y2)/2.0
wn = x2-x1
hn = y2-y1
to_be_stored = np.array([xn, yn, wn, hn], dtype=np.float32)
self.aabb_history[frame_no] = to_be_stored
else:
raise ValueError(f"Detection was of unknown type {type(det)}")
self.previous_detection = det
| 36.39779 | 80 | 0.539921 | 12,458 | 0.945507 | 0 | 0 | 0 | 0 | 0 | 0 | 1,692 | 0.128415 |
e2f024dc0a72d06171bf91a03cad26deda82fb0a | 753 | py | Python | corehq/ex-submodules/casexml/apps/stock/tests/mock_consumption.py | akashkj/commcare-hq | b00a62336ec26cea1477dfb8c048c548cc462831 | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/ex-submodules/casexml/apps/stock/tests/mock_consumption.py | akashkj/commcare-hq | b00a62336ec26cea1477dfb8c048c548cc462831 | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/ex-submodules/casexml/apps/stock/tests/mock_consumption.py | akashkj/commcare-hq | b00a62336ec26cea1477dfb8c048c548cc462831 | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | from datetime import datetime, timedelta
from dimagi.utils import parsing as dateparse
from casexml.apps.stock.consumption import (
ConsumptionConfiguration,
compute_daily_consumption_from_transactions,
)
to_ts = dateparse.json_format_datetime
now = datetime.utcnow()
def ago(days):
return now - timedelta(days=days)
# note that you must add inferred consumption transactions manually to txdata
def mock_consumption(txdata, window, params=None):
default_params = {'min_window': 0, 'min_periods': 0}
params = params or {}
default_params.update(params)
config = ConsumptionConfiguration(**default_params)
return compute_daily_consumption_from_transactions(
txdata,
ago(window),
config,
)
| 25.965517 | 77 | 0.746348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.135458 |
e2f1711a65821686f4006c1b21450b10f12a25e1 | 1,007 | py | Python | multi-cluster-rescheduler/mcr.py | moule3053/mck8s | 9555b87c6cfb47c22a3c5f51ebb786587dddd233 | [
"Apache-2.0"
] | 57 | 2021-07-05T07:27:56.000Z | 2022-02-23T10:56:35.000Z | multi-cluster-rescheduler/mcr.py | 00ahui/mck8s | d7742e11a19b34c21095e5a6b10b3e4d07f8a5e7 | [
"Apache-2.0"
] | 4 | 2021-08-19T08:22:04.000Z | 2021-08-19T09:03:29.000Z | multi-cluster-rescheduler/mcr.py | 00ahui/mck8s | d7742e11a19b34c21095e5a6b10b3e4d07f8a5e7 | [
"Apache-2.0"
] | 6 | 2021-07-22T11:31:17.000Z | 2021-12-03T21:17:26.000Z | import kopf
import time
from utils import get_all_federation_clusters, rescheduleApp
# Create app rescheduler
@kopf.daemon('fogguru.eu', 'v1', 'appreschedulers', initial_delay=5)
def create_fn(stopped, **kwargs):
CHECK_PERIOD = 60
RESCHEDULE_PERIOD = 31 * 60
while not stopped:
# for now just rescheduler from cloud to fog
# TO DO: reschedule pods to users' preferred locations
# Check if there is a cloud cluster
all_clusters = get_all_federation_clusters()
if not any('cloud' in s for s in all_clusters):
print("There are no cloud clusters. Going to next cycle ....", CHECK_PERIOD)
time.sleep(CHECK_PERIOD)
else:
print("Cloud cluster found. Will start rescheduling after " + str(RESCHEDULE_PERIOD) + " seconds .....")
time.sleep(RESCHEDULE_PERIOD)
rescheduleApp()
print("Sleep for " + str(CHECK_PERIOD) + " secs until next cycle .........")
time.sleep(CHECK_PERIOD)
| 38.730769 | 116 | 0.655412 | 0 | 0 | 0 | 0 | 895 | 0.888779 | 0 | 0 | 367 | 0.364449 |
e2f1d37bd8721b99e8bd17fdefb5d3f548a12c16 | 538 | py | Python | retuo.py | Azi-Dahaka/- | 8d47d8e18a4b4fcfee4d2649c8efa819d4cd357e | [
"MIT"
] | 1 | 2021-11-25T03:28:30.000Z | 2021-11-25T03:28:30.000Z | retuo.py | Azi-Dahaka/- | 8d47d8e18a4b4fcfee4d2649c8efa819d4cd357e | [
"MIT"
] | null | null | null | retuo.py | Azi-Dahaka/- | 8d47d8e18a4b4fcfee4d2649c8efa819d4cd357e | [
"MIT"
] | 2 | 2021-09-06T07:41:48.000Z | 2021-11-25T09:28:07.000Z | # -*- coding:utf-8 -*-
# 1.导入拓展
from flask import Flask
from flask_restful import Api
import config
from app.api.view.auth import wx_login
from app.api.view.talk import Reply
# 2.创建flask应用实例,__name__用来确定资源所在的路径
app = Flask(__name__)
app.config.from_object(config.DevelopmentConfig)
api = Api(app)
# 3.定义全局变量
# 4.定义路由和视图函数
# 定义restful api
app.add_url_rule('/auth/wxlogin', view_func=wx_login.as_view('wxlogin'))
app.add_url_rule('/reply', view_func=Reply.as_view('reply'))
# 4.启动程序
if __name__ == '__main__':
app.run(debug=True)
| 20.692308 | 72 | 0.749071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 246 | 0.394231 |
e2f1f8c5a89469b4bf2471953837accc59dc980e | 6,904 | py | Python | PSO.py | cece95/F21BC-Coursework | dd3586891b6eda03b208e25b00392db042a4ec5b | [
"Apache-2.0"
] | 1 | 2021-04-30T12:24:58.000Z | 2021-04-30T12:24:58.000Z | PSO.py | aahginoux/F21BC-Coursework | dd3586891b6eda03b208e25b00392db042a4ec5b | [
"Apache-2.0"
] | null | null | null | PSO.py | aahginoux/F21BC-Coursework | dd3586891b6eda03b208e25b00392db042a4ec5b | [
"Apache-2.0"
] | 1 | 2021-04-30T12:23:16.000Z | 2021-04-30T12:23:16.000Z | import numpy.random as rand
import numpy as np
import pandas as pd
import random
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.animation import FuncAnimation
from Particle import Particle
#Initialization of the plots
fig = plt.figure(figsize=(20,10))
axes = [None, None, None]
def generate_random_particle(_id, input_size, neurons):
"""Function to generate random particle to init PSO algorithm"""
position = []
speed = []
n_neurons = sum(neurons)
n_weights = input_size * neurons[0]
for i in range(len(neurons) - 1):
n_weights = n_weights + neurons[i]*neurons[i+1]
total_n_values = n_weights + (2* n_neurons) # give the PSO the possibility to select the activation functions and bias, subtract one if the activation function is not needed for the last neuron
position = 2 * rand.random_sample(total_n_values) - 1
speed = np.zeros(total_n_values)
return Particle(_id, position, speed, n_weights, n_neurons)
class PSO:
"""Class that implements the PSO algorithm"""
def __init__(self, swarm_size, n_informants, alpha_max, alpha_min, beta, gamma, delta, epsilon, ann, max_iterations, test_set_path, input_size):
axes[1] = fig.add_subplot(132)
axes[2] = fig.add_subplot(133)
self.swarm_size = swarm_size
self.alpha_max = alpha_max
self.alpha_min = alpha_min
self.beta = beta
self.gamma = gamma
self.delta = delta
self.epsilon = epsilon
self.swarm = [generate_random_particle(id, input_size, ann.neurons) for id in range(swarm_size)] # init swarm
self.best = None
self.best_fitness = 1000 #initialise the error to an high value
self.ann = ann
self.max_iterations = max_iterations
self.input_size = input_size
self.n_informants = n_informants
# Setup the dataset structure to expect and the function plots based on the input size
if input_size == 1:
columns = ['x', 'y']
axes[0] = fig.add_subplot(131)
else:
columns = ['x1', 'x2', 'y']
axes[0] = fig.add_subplot(131, projection='3d')
self.test_set = pd.read_csv(test_set_path, sep='\s+|\t+|\s+\t+|\t+\s+', header=None, names=columns, engine='python')
#init arrays used to plot the results during the execution
self.error = []
self.steps = []
self.best_record = []
#assign informants to each particle
for p in self.swarm:
p.select_informants(self.swarm, self.n_informants)
def execute(self):
""" Function to run the PSO algorithm"""
anim = FuncAnimation(fig, self.step, frames=self.max_iterations, repeat=False)
plt.show()
def step(self, i):
""" Wrapper to execute one step of the PSO algorithm and plot the indermediate results"""
self.pso_step(i+1)
self.plot_result()
def pso_step(self, i):
""" Execution of a step of the PSO algorithm as explained in the lectures slides """
for particle in self.swarm:
self.assess_fitness(particle)
if self.best is None or particle.fitness < self.best_fitness:
self.best = particle
self.best_fitness = particle.fitness
self.best_fitness_position = particle.best_fitness_position
x_swarm = self.best_fitness_position
for particle in self.swarm:
new_speed = np.zeros(particle.speed.shape)
x_fit = particle.best_fitness_position
x_inf = particle.get_previous_fittest_of_informants()
for l in range(len(particle.position)):
a = (self.alpha_max - self.alpha_min) * ((self.max_iterations - i) / self.max_iterations) + self.alpha_min
b = random.uniform(0, self.beta)
c = random.uniform(0, self.gamma)
d = random.uniform(0, self.delta)
new_speed[l] = a * particle.speed[l] + b * (x_fit[l] - particle.position[l]) + c * (x_inf[l] - particle.position[l]) + d * (x_swarm[l] - particle.position[l])
particle.speed = new_speed
particle.update_position(self.epsilon)
self.steps.append(i)
self.error.append(self.best_fitness)
self.best_record.append(self.best.id)
print("{} | Best fitness so far: {}".format(i, self.best_fitness))
def assess_fitness(self, particle):
""" Function to assess the fitness of a particle using MSE"""
graph = []
old_fitness = particle.best_fitness
self.ann.set_values(particle.position)
mse = 0
n = len(self.test_set)
for _, row in self.test_set.iterrows():
if self.input_size == 1:
x_i = [row[0]]
d = row[1]
else:
x_i = [row[0], row[1]]
d = row[2]
u = self.ann.process(x_i)
graph.append(u)
mse_i = (d - u) ** 2
mse = mse + mse_i
particle.fitness = mse / n
if (particle.fitness < old_fitness):
particle.best_fitness_graph = graph
particle.best_fitness = particle.fitness
particle.best_fitness_position = particle.position
def plot_result(self):
"Function to plot the intermediate results of the PSO algorithm"
#clear the figure from previous step's results
axes[0].clear()
axes[1].clear()
axes[2].clear()
#Reconstruct the cleared plots
axes[0].title.set_text('Functions')
axes[1].title.set_text('MSE')
axes[1].set_xlabel('Number of iterations')
axes[1].set_ylabel('Mean Squared Error')
axes[2].title.set_text('Best Particle')
axes[2].set_xlabel('Number of iterations')
axes[2].set_ylabel('Best Particle ID')
#plot the results in a different manner depending on the input size
if self.input_size == 1:
x = self.test_set['x']
y = self.test_set['y']
g = self.best.best_fitness_graph
axes[0].plot(x,g, label='Approximated Function')
axes[0].plot(x,y, label='Desidered Function')
axes[0].legend()
else:
x1 = self.test_set['x1']
x2 = self.test_set['x2']
y = self.test_set['y']
g = self.best.best_fitness_graph
axes[0].scatter(x1, x2, y, label='Desidered Function')
axes[0].scatter(x1, x2, g, label='Approximated Function')
axes[0].legend()
#plot error
axes[1].set_ylim([0, 0.1])
axes[1].plot(self.steps, self.error)
#plot the fittest particle
axes[2].plot(self.steps, self.best_record)
axes[2].set_ylim([0, self.swarm_size])
| 39.678161 | 198 | 0.60617 | 5,878 | 0.85139 | 0 | 0 | 0 | 0 | 0 | 0 | 1,332 | 0.192932 |
e2f2a441790ca7a9000ae20c712a0f4467b4c1c4 | 1,334 | py | Python | Modulos/ProvasPassadas/aux_scraping.py | gabrielfava/asap | be6211190d4acfca7aacef45d7dc467e2237496d | [
"Apache-2.0"
] | 2 | 2018-03-16T19:24:35.000Z | 2018-03-20T01:15:21.000Z | Modulos/ProvasPassadas/aux_scraping.py | jvalv/asaPY | 97cdc9359d8afeb9747f4372b253b179131d2be4 | [
"Apache-2.0"
] | 1 | 2018-02-24T23:43:15.000Z | 2018-02-24T23:43:15.000Z | Modulos/ProvasPassadas/aux_scraping.py | gabrielfava/asapy | be6211190d4acfca7aacef45d7dc467e2237496d | [
"Apache-2.0"
] | 1 | 2018-02-28T14:45:52.000Z | 2018-02-28T14:45:52.000Z | #ASAPY
import requests
__URL_GLOBAL = "https://www.urionlinejudge.com.br";
def printme(pagina):
body = getCorpo(__URL_GLOBAL+"/judge/pt/problems/view/"+pagina);
iInicio = find_str(body, "<iframe");
pos = (body[iInicio:]);
iFim = find_str(pos, ">")+1;
tupla = pos[:iFim];
page2 = getAttr(tupla,"src");
bodyframe = getCorpo(__URL_GLOBAL+page2);
print(bodyframe);
return;
def find_str(s, char):
index = 0
if char in s:
c = char[0]
for ch in s:
if ch == c:
if s[index:index+len(char)] == char:
return index
index += 1
return -1
#TODO - TRATAR EQUIVALENCIA DE SINTAXE !
def getAttr(tupla, atributo):
tamanhoAtr = len(atributo)+2; #ja apaga atributo="
inicioAtr = find_str(tupla, atributo)+tamanhoAtr;
if inicioAtr == -1:
return "ERRO"
fimAttr = find_str(tupla[inicioAtr:], '"');
return tupla[inicioAtr:inicioAtr+fimAttr];
def getCorpo(req):
page = requests.get(req);
return str(page.content);
printme("2166")
#print("titulo => URI Online Judge - Problema 2166 - Raiz Quadrada de 2")
#print("autor => M.C. Pinto, UNILA")
#print("probm => ma das formas de calcular a raiz quadrada de um n\xc3\xbamero natural") | 27.22449 | 90 | 0.586957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 366 | 0.274363 |
e2f2ab45cb99205459023f3975ca984038c2b9fd | 876 | py | Python | picking_numbers/picking_numbers.py | pvital/4lg0rithm5 | 9e3a458b28065e2156aef6df92fe40b35cd9881c | [
"MIT"
] | null | null | null | picking_numbers/picking_numbers.py | pvital/4lg0rithm5 | 9e3a458b28065e2156aef6df92fe40b35cd9881c | [
"MIT"
] | null | null | null | picking_numbers/picking_numbers.py | pvital/4lg0rithm5 | 9e3a458b28065e2156aef6df92fe40b35cd9881c | [
"MIT"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Generator method to buld a list containing only the elements with diff <= 1
#
def genSucessors(pivot, array):
for i in array:
if (abs(pivot - i) <= 1):
yield i
#
# Complete the 'pickingNumbers' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY a as parameter.
#
def pickingNumbers(a):
a.sort()
max = 2
for i in range(0,len(a)):
size = len(list(genSucessors(a[i], a[i:])))
if (size > max):
max = size
return max
if __name__ == '__main__':
#fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
a = list(map(int, input().rstrip().split()))
result = pickingNumbers(a)
#fptr.write(str(result) + '\n')
#fptr.close()
print(result)
| 19.043478 | 77 | 0.608447 | 0 | 0 | 105 | 0.119863 | 0 | 0 | 0 | 0 | 341 | 0.389269 |
e2f315499e462d747fce1af2b55052eeb6910f0b | 2,911 | py | Python | toontown/safezone/DistributedButterflyAI.py | TrueBlueDogemon/Toontown | ebed7fc3f2ef06a529cf02eda7ab46361aceef9d | [
"MIT"
] | 1 | 2021-02-25T06:22:49.000Z | 2021-02-25T06:22:49.000Z | toontown/safezone/DistributedButterflyAI.py | TrueBlueDogemon/Toontown | ebed7fc3f2ef06a529cf02eda7ab46361aceef9d | [
"MIT"
] | null | null | null | toontown/safezone/DistributedButterflyAI.py | TrueBlueDogemon/Toontown | ebed7fc3f2ef06a529cf02eda7ab46361aceef9d | [
"MIT"
] | 2 | 2020-11-08T03:38:35.000Z | 2021-09-02T07:03:47.000Z | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.distributed.ClockDelta import *
import ButterflyGlobals
import random
class DistributedButterflyAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedButterflyAI")
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
self.area = 0
self.playground = 0
self.stateIndex = 0
self.curIndex = 0
self.destIndex = 0
self.time = 0
self.timestamp = 0
def generate(self):
ButterflyGlobals.generateIndexes(self.doId, self.playground)
fr = ButterflyGlobals.getFirstRoute(self.playground, self.area, self.doId)
self.b_setState(ButterflyGlobals.FLYING, fr[1], fr[3], fr[4], globalClockDelta.getRealNetworkTime())
taskMgr.doMethodLater(fr[4], self.__land, 'landButterfly%i' % self.doId, [])
def __land(self):
ttl = random.uniform(0, ButterflyGlobals.MAX_LANDED_TIME)
self.b_setState(ButterflyGlobals.LANDED, self.curIndex, self.destIndex, ttl, globalClockDelta.getRealNetworkTime())
taskMgr.doMethodLater(ttl, self.__fly, 'flyButterfly%i' % self.doId, [])
def __fly(self):
next = ButterflyGlobals.getNextPos(ButterflyGlobals.ButterflyPoints[self.playground][self.area][self.destIndex], self.playground, self.area, self.doId)
self.b_setState(ButterflyGlobals.FLYING, self.destIndex, next[1], next[2], globalClockDelta.getRealNetworkTime())
taskMgr.doMethodLater(next[2], self.__land, 'landButterfly%i' % self.doId, [])
def setArea(self, playground, area):
self.area = area
self.playground = playground
def d_setArea(self, playground, area):
self.sendUpdate('setArea', [playground, area])
def b_setArea(self, playground, area):
self.setArea(playground, area)
self.d_setArea(playground, area)
def getArea(self):
return [self.playground, self.area]
def setState(self, stateIndex, curIndex, destIndex, time, timestamp):
self.stateIndex = stateIndex
self.curIndex = curIndex
self.destIndex = destIndex
self.time = time
self.timestamp = timestamp
def d_setState(self, stateIndex, curIndex, destIndex, time, timestamp):
self.sendUpdate('setState', [stateIndex, curIndex, destIndex, time, timestamp])
def b_setState(self, stateIndex, curIndex, destIndex, time, timestamp):
self.setState(stateIndex, curIndex, destIndex, time, timestamp)
self.d_setState(stateIndex, curIndex, destIndex, time, timestamp)
def getState(self):
return [self.stateIndex, self.curIndex, self.destIndex, self.time, self.timestamp]
def avatarEnter(self):
pass
| 41 | 159 | 0.684988 | 2,704 | 0.92889 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.031948 |
e2f4dd0ee18ce021318531b4d9a81e9c00ac0a21 | 1,637 | py | Python | proj01_ifelse/proj01.py | CalvinsHyper/Vanderbilt-2018 | fa67c1f08f5d29bac4bd7747ec4a9110e5b3de00 | [
"MIT"
] | null | null | null | proj01_ifelse/proj01.py | CalvinsHyper/Vanderbilt-2018 | fa67c1f08f5d29bac4bd7747ec4a9110e5b3de00 | [
"MIT"
] | null | null | null | proj01_ifelse/proj01.py | CalvinsHyper/Vanderbilt-2018 | fa67c1f08f5d29bac4bd7747ec4a9110e5b3de00 | [
"MIT"
] | null | null | null | # Name:
# Date:
# proj01: A Simple Program
# Part I:
# This program asks the user for his/her name and grade.
#Then, it prints out a sentence that says the number of years until they graduate.
print "Hello"
Your_Name = raw_input("What's your name?")
print "Your name is "+ Your_Name
Your_Grade = raw_input("What Grade are you in?")
print "you are in"+ Your_Grade
x = 16-int(Your_Grade)
print "you wil graduate in" +str(x) + "Years"
# Part II:
# This program asks the user for his/her name and birth month.
# Then, it prints a sentence that says the number of days and months until their birthday
print "Part II"
Current_Month = int(raw_input("what is the current month NUMBER"))
Current_Day = int(raw_input("What is the current day NUMBER"))
Your_Month = int(raw_input("what is your birth month NUMBER?"))
Your_Day = int(raw_input("what day of the month is your Birthday NUMBER?"))
q = (Your_Month-Current_Month)
w = (12-Current_Month-Your_Month)
e = (Your_Day-Current_Day)
r = (30-Current_Day-Your_Day)
if Your_Month>Current_Month:
print "the number of days until your bday is " + str( q)
else:
print"the number of days until your bday is " + str( w)
if Your_Day >= Current_Day:
print "the number of months until your bday is" + str(e)
else:
print "The number of months until your birthday is" + str(r)
# If you complete extensions, describe your extensions here!
Your_Age=int(raw_input("how old are you"))
if Your_Age<13:
print ("you may only see G and PG movies")
if Your_Age>13:
print ("You can see any movies except for R movies")
if Your_Age>17:
print ("you can watch any rated movie")
| 29.232143 | 89 | 0.718998 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 962 | 0.58766 |
e2f8cc5f79c191afa6de68ed0f7f734ad2297be2 | 1,113 | py | Python | app-django/departamentos/models.py | ilanaraujo/sistema-gerenciamento-empresarial | d9be1d0b1a7541205099bb99311611d193d3ec7d | [
"MIT"
] | null | null | null | app-django/departamentos/models.py | ilanaraujo/sistema-gerenciamento-empresarial | d9be1d0b1a7541205099bb99311611d193d3ec7d | [
"MIT"
] | null | null | null | app-django/departamentos/models.py | ilanaraujo/sistema-gerenciamento-empresarial | d9be1d0b1a7541205099bb99311611d193d3ec7d | [
"MIT"
] | null | null | null | from io import IncrementalNewlineDecoder
from django.db import models
# Classe de departamento
class Departamento(models.Model):
id = models.IntegerField(primary_key=True, editable=False)
nome = models.CharField(max_length=255, blank=False)
numero_projetos = models.IntegerField(default=0) # Quantidade de projetos no departamento
numero_funcionarios = models.IntegerField(default=0) # Quantidade de funcionários do departamento
# Construtor da classe
#def __init__(self, nome):
# self.nome = nome
def __str__(self):
return self.nome
def adiciona_funcionario(self, id_funcionario):
# ToDo acrescentar linha na tabela funcionario_departamento
print(id_funcionario)
self.numero_funcionarios += 1
def remove_funcionario(self, id_funcionario):
# ToDo acrescentar linha na tabela funcionario_departamento
print(id_funcionario)
self.numero_funcionarios -= 1
def adiciona_projeto(self):
self.numero_projetos += 1
def remove_projeto(self):
self.numero_projetos -= 1
| 32.735294 | 101 | 0.707996 | 1,017 | 0.912926 | 0 | 0 | 0 | 0 | 0 | 0 | 296 | 0.265709 |
e2f8e237bea623fc9982392a84d67cc07da6f9f5 | 422 | py | Python | leetcode/wc_count_even_dig_sum.py | sci-c0/python-misc-problems | a0827cc9cd290ca142bba3b7dda307234da63c3c | [
"BSD-3-Clause"
] | null | null | null | leetcode/wc_count_even_dig_sum.py | sci-c0/python-misc-problems | a0827cc9cd290ca142bba3b7dda307234da63c3c | [
"BSD-3-Clause"
] | null | null | null | leetcode/wc_count_even_dig_sum.py | sci-c0/python-misc-problems | a0827cc9cd290ca142bba3b7dda307234da63c3c | [
"BSD-3-Clause"
] | null | null | null | """
https://leetcode.com/contest/weekly-contest-281/problems/count-integers-with-even-digit-sum/
Tags: Weekly-Contest_281; Brute-Force; Easy
"""
class Solution:
def countEven(self, num: int) -> int:
ans = 0
for i in range(1, num + 1):
s = str(i)
# Digit Sum
sm = sum(map(int, s))
if sm % 2 == 0:
ans += 1
return ans
| 20.095238 | 96 | 0.504739 | 265 | 0.627962 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.388626 |
e2f9e48c836975b60f61a40a6083287d7f5083ca | 9,054 | py | Python | tools/train_dist.py | shi510/cifr-pytorch | 8e9aec5e520f0c1d89e97fd60abc7d72ff1a2854 | [
"Apache-2.0"
] | null | null | null | tools/train_dist.py | shi510/cifr-pytorch | 8e9aec5e520f0c1d89e97fd60abc7d72ff1a2854 | [
"Apache-2.0"
] | null | null | null | tools/train_dist.py | shi510/cifr-pytorch | 8e9aec5e520f0c1d89e97fd60abc7d72ff1a2854 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import torch
import matplotlib.pyplot as plt
from torch.utils.data.distributed import DistributedSampler
from torch import distributed as dist
from torch import optim
from tqdm import tqdm
from torch_ema import ExponentialMovingAverage
from cifr.core.config import Config
from cifr.models.builder import build_architecture, build_optimizer, build_dataset
from cifr.models.builder import build_discriminator
from cifr.models.losses.contextual_loss import ContextualLoss, ContextualBilateralLoss
from cifr.models.losses.gradient_norm import normalize_gradient
from cifr.models.losses.gan_loss import d_logistic_loss
from cifr.models.losses.gan_loss import g_nonsaturating_loss
from tools.utils import query_all_pixels
from tools.utils import requires_grad
from tools.utils import save_pred_img
WORK_DIR = './work_dir'
def synchronize():
if not dist.is_available() or not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def get_world_size():
if not dist.is_available() or not dist.is_initialized():
return 1
return dist.get_world_size()
def reduce_loss_dict(loss_dict):
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
keys = []
losses = []
for k in sorted(loss_dict.keys()):
keys.append(k)
losses.append(loss_dict[k])
losses = torch.stack(losses, 0)
dist.reduce(losses, dst=0)
if dist.get_rank() == 0:
losses /= world_size
reduced_losses = {k: v for k, v in zip(keys, losses)}
return reduced_losses
def train(args, config, device):
model = build_architecture(config.model).to(device)
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
encoder = build_architecture(config.encoder).to(device)
encoder = torch.nn.SyncBatchNorm.convert_sync_batchnorm(encoder)
disc = build_discriminator(config.discriminator).to(device)
disc = torch.nn.SyncBatchNorm.convert_sync_batchnorm(disc)
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[dist.get_rank()],
output_device=dist.get_rank(),
broadcast_buffers=False
)
encoder = torch.nn.parallel.DistributedDataParallel(
encoder,
device_ids=[dist.get_rank()],
output_device=dist.get_rank(),
broadcast_buffers=False
)
disc = torch.nn.parallel.DistributedDataParallel(
disc,
device_ids=[dist.get_rank()],
output_device=dist.get_rank(),
broadcast_buffers=False
)
config.optimizer.update({'params': [
{'params': encoder.parameters()},
{'params': model.parameters()}
]})
optim_g = build_optimizer(config.optimizer)
config.optimizer.update({'params': disc.parameters()})
optim_d = build_optimizer(config.optimizer)
scheduler_g = optim.lr_scheduler.StepLR(optim_g, step_size=50, gamma=0.5)
scheduler_d = optim.lr_scheduler.StepLR(optim_d, step_size=50, gamma=0.5)
model_ema = ExponentialMovingAverage(model.parameters(), decay=0.995)
encoder_ema = ExponentialMovingAverage(encoder.parameters(), decay=0.995)
train_set_gan = build_dataset(config.train_dataset_gan)
train_set = build_dataset(config.train_dataset)
test_set = build_dataset(config.test_dataset)
train_loader_gan = torch.utils.data.DataLoader(
train_set_gan,
batch_size=config.batch_size,
num_workers=6,
drop_last=True,
sampler=DistributedSampler(train_set_gan, shuffle=True),
)
train_loader = torch.utils.data.DataLoader(
train_set,
batch_size=config.batch_size,
num_workers=6,
drop_last=True,
sampler=DistributedSampler(train_set, shuffle=True),
)
test_loader = torch.utils.data.DataLoader(
test_set,
batch_size=1,
num_workers=1
)
contextual_loss = ContextualLoss(use_vgg=True, vgg_layer="conv5_4").to(device)
loss_fn = torch.nn.L1Loss()
grad_norm_fn = normalize_gradient if config.discriminator_gradient_norm else lambda fn, x: fn(x)
config_name = os.path.splitext(os.path.basename(args.config))[0] if args.name is None else args.name
os.makedirs(f'{WORK_DIR}/{config_name}/images', exist_ok=True)
os.makedirs(f'{WORK_DIR}/{config_name}/checkpoints', exist_ok=True)
# config.dump(f'{WORK_DIR}/{config_name}/{config_name}.py')
rows = 20
cols = 3
fig = plt.figure(figsize=(15, rows*6))
total_iter = len(train_set) // config.batch_size // dist.get_world_size()
epoch_pbar = tqdm(
range(config.epoch),
total=config.epoch,
desc='Epoch',
position=0,
ncols=0,
disable=dist.get_rank()!=0
)
for epoch in epoch_pbar:
iter_pbar = tqdm(
enumerate(zip(train_loader, train_loader_gan)),
total=total_iter,
leave=False,
position=1,
ncols=0,
disable=dist.get_rank()!=0
)
for n, (batch, batch_gan) in iter_pbar:
encoder.train()
model.train()
disc.train()
lr = batch_gan['lr'].to(device)
coord = batch_gan['coord'].to(device)
cell = batch_gan['cell'].to(device)
real = batch_gan['real'].to(device)
#
# Generator Step
#
requires_grad(disc, False)
optim_g.zero_grad()
fake = query_all_pixels(encoder, model, lr, coord, cell, 1024)
fake_pred = grad_norm_fn(disc, fake)
ctx_loss = contextual_loss(fake, real)
loss_fake = g_nonsaturating_loss(fake_pred)
loss_g = ctx_loss + loss_fake
loss_g.backward()
query_inp = batch['inp'].to(device)
query_coord = batch['coord'].to(device)
query_cell = batch['cell'].to(device)
query_gt = batch['gt'].to(device)
feature = encoder(query_inp)
query_pred = model(query_inp, feature, query_coord, query_cell)
query_l1_loss = loss_fn(query_pred, query_gt)
query_l1_loss.backward()
optim_g.step()
encoder_ema.update()
model_ema.update()
#
# Discriminator Step
#
requires_grad(disc, True)
optim_d.zero_grad()
fake_pred = grad_norm_fn(disc, fake.detach())
real_pred = grad_norm_fn(disc, real)
loss_d = d_logistic_loss(real_pred, fake_pred)
loss_d.backward()
optim_d.step()
loss_dict = {
'd': loss_d,
'g': loss_g,
'g_ctx': ctx_loss,
'query_l1': query_l1_loss
}
reduced_loss = reduce_loss_dict(loss_dict)
if dist.get_rank() == 0:
loss_d = reduced_loss['d']
loss_g = reduced_loss['g']
ctx_loss = reduced_loss['g_ctx']
query_l1_loss = reduced_loss['query_l1']
loss_str = f'd: {loss_d:.4f};'
loss_str += f' g: {loss_g:.4f};'
loss_str += f' g_ctx: {ctx_loss:.4f}'
loss_str += f' query_l1: {query_l1_loss:.4f}'
iter_pbar.set_description(loss_str)
scheduler_g.step()
scheduler_d.step()
if dist.get_rank() == 0:
torch.save(
{
'encoder': encoder.module.state_dict(),
'model': model.module.state_dict(),
'encoder_ema': encoder_ema.state_dict(),
'model_ema': model_ema.state_dict(),
'discriminator': disc.module.state_dict(),
},
f'{WORK_DIR}/{config_name}/checkpoints/{epoch+1:0>6}.pth'
)
encoder_ema.store(encoder.parameters())
model_ema.store(model.parameters())
encoder_ema.copy_to(encoder.parameters())
model_ema.copy_to(model.parameters())
encoder.eval()
model.eval()
img_path = f'{WORK_DIR}/{config_name}/images/train_{epoch+1:0>6}.jpg'
save_pred_img(encoder, model, test_loader, img_path, fig, rows, cols)
encoder_ema.restore(encoder.parameters())
model_ema.restore(model.parameters())
iter_pbar.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--name', type=str, default=None)
args = parser.parse_args()
local_rank = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
device = torch.device(f'cuda:{local_rank}')
synchronize()
cfg = Config.fromfile(args.config)
train(args, cfg, device)
| 34.295455 | 104 | 0.623481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 664 | 0.073338 |
e2fa28ba790d57d8d11a1525236c74ab08efcf00 | 14,930 | py | Python | Lib/site-packages/hackedit/app/forms/main_window_ui.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | Lib/site-packages/hackedit/app/forms/main_window_ui.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/hackedit/app/forms/main_window_ui.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/Colin/Documents/hackedit/data/forms/main_window.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(778, 575)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/hackedit.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setDockOptions(QtWidgets.QMainWindow.AnimatedDocks)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)
self.stackedWidget.setObjectName("stackedWidget")
self.page_edit = QtWidgets.QWidget()
self.page_edit.setObjectName("page_edit")
self.verticalLayout = QtWidgets.QVBoxLayout(self.page_edit)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.tabs = SplittableCodeEditTabWidget(self.page_edit)
self.tabs.setObjectName("tabs")
self.verticalLayout.addWidget(self.tabs)
self.stackedWidget.addWidget(self.page_edit)
self.page_empty = QtWidgets.QWidget()
self.page_empty.setObjectName("page_empty")
self.gridLayout_2 = QtWidgets.QGridLayout(self.page_empty)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
spacerItem = QtWidgets.QSpacerItem(20, 171, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem, 0, 1, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(193, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem1, 1, 0, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(192, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem2, 1, 2, 1, 1)
self.label = QtWidgets.QLabel(self.page_empty)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 1, 1, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem3, 2, 1, 1, 1)
self.stackedWidget.addWidget(self.page_empty)
self.gridLayout.addWidget(self.stackedWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 778, 27))
self.menubar.setObjectName("menubar")
self.mnu_file = QtWidgets.QMenu(self.menubar)
self.mnu_file.setObjectName("mnu_file")
self.mnu_view = QtWidgets.QMenu(self.menubar)
self.mnu_view.setObjectName("mnu_view")
self.mnu_windows = QtWidgets.QMenu(self.mnu_view)
self.mnu_windows.setObjectName("mnu_windows")
self.mnu_help = QtWidgets.QMenu(self.menubar)
self.mnu_help.setObjectName("mnu_help")
self.mnu_edit = QtWidgets.QMenu(self.menubar)
self.mnu_edit.setObjectName("mnu_edit")
self.menuActive_editor = QtWidgets.QMenu(self.mnu_edit)
icon = QtGui.QIcon.fromTheme("accessories-text-editor")
self.menuActive_editor.setIcon(icon)
self.menuActive_editor.setObjectName("menuActive_editor")
self.menuTools = QtWidgets.QMenu(self.menubar)
self.menuTools.setObjectName("menuTools")
MainWindow.setMenuBar(self.menubar)
self.toolBarFile = QtWidgets.QToolBar(MainWindow)
self.toolBarFile.setObjectName("toolBarFile")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBarFile)
self.action_open = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("folder")
self.action_open.setIcon(icon)
self.action_open.setShortcutContext(QtCore.Qt.WindowShortcut)
self.action_open.setObjectName("action_open")
self.action_save = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("document-save")
self.action_save.setIcon(icon)
self.action_save.setObjectName("action_save")
self.action_save_as = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("document-save-as")
self.action_save_as.setIcon(icon)
self.action_save_as.setObjectName("action_save_as")
self.action_close = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("window-close")
self.action_close.setIcon(icon)
self.action_close.setObjectName("action_close")
self.action_quit = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("application-exit")
self.action_quit.setIcon(icon)
self.action_quit.setObjectName("action_quit")
self.action_preferences = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("preferences-system")
self.action_preferences.setIcon(icon)
self.action_preferences.setObjectName("action_preferences")
self.action_about = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("help-about")
self.action_about.setIcon(icon)
self.action_about.setObjectName("action_about")
self.action_help = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("system-help")
self.action_help.setIcon(icon)
self.action_help.setObjectName("action_help")
self.action_new = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("document-new")
self.action_new.setIcon(icon)
self.action_new.setObjectName("action_new")
self.action_report_bug = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("tools-report-bug")
self.action_report_bug.setIcon(icon)
self.action_report_bug.setObjectName("action_report_bug")
self.action_save_all = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("document-save-all")
self.action_save_all.setIcon(icon)
self.action_save_all.setObjectName("action_save_all")
self.action_check_for_update = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("system-software-update")
self.action_check_for_update.setIcon(icon)
self.action_check_for_update.setObjectName("action_check_for_update")
self.a_fullscreen = QtWidgets.QAction(MainWindow)
self.a_fullscreen.setCheckable(True)
self.a_fullscreen.setChecked(False)
icon = QtGui.QIcon.fromTheme("view-fullscreen")
self.a_fullscreen.setIcon(icon)
self.a_fullscreen.setObjectName("a_fullscreen")
self.a_menu = QtWidgets.QAction(MainWindow)
self.a_menu.setCheckable(True)
self.a_menu.setChecked(False)
self.a_menu.setObjectName("a_menu")
self.action_open_file = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("document-open")
self.action_open_file.setIcon(icon)
self.action_open_file.setObjectName("action_open_file")
self.a_print = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("document-print")
self.a_print.setIcon(icon)
self.a_print.setObjectName("a_print")
self.mnu_file.addAction(self.action_new)
self.mnu_file.addAction(self.action_open_file)
self.mnu_file.addAction(self.action_open)
self.mnu_file.addSeparator()
self.mnu_file.addAction(self.action_save)
self.mnu_file.addAction(self.action_save_as)
self.mnu_file.addAction(self.action_save_all)
self.mnu_file.addSeparator()
self.mnu_file.addAction(self.a_print)
self.mnu_file.addSeparator()
self.mnu_file.addAction(self.action_close)
self.mnu_file.addSeparator()
self.mnu_file.addAction(self.action_quit)
self.mnu_view.addAction(self.a_fullscreen)
self.mnu_view.addAction(self.a_menu)
self.mnu_view.addSeparator()
self.mnu_view.addAction(self.mnu_windows.menuAction())
self.mnu_help.addAction(self.action_help)
self.mnu_help.addAction(self.action_about)
self.mnu_help.addSeparator()
self.mnu_help.addAction(self.action_report_bug)
self.mnu_help.addSeparator()
self.mnu_help.addAction(self.action_check_for_update)
self.mnu_edit.addAction(self.menuActive_editor.menuAction())
self.mnu_edit.addSeparator()
self.mnu_edit.addAction(self.action_preferences)
self.menubar.addAction(self.mnu_file.menuAction())
self.menubar.addAction(self.mnu_edit.menuAction())
self.menubar.addAction(self.menuTools.menuAction())
self.menubar.addAction(self.mnu_view.menuAction())
self.menubar.addAction(self.mnu_help.menuAction())
self.toolBarFile.addAction(self.action_new)
self.toolBarFile.addAction(self.action_open_file)
self.toolBarFile.addAction(self.action_open)
self.toolBarFile.addSeparator()
self.toolBarFile.addAction(self.action_save)
self.toolBarFile.addAction(self.action_save_as)
self.retranslateUi(MainWindow)
self.stackedWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "HackEdit"))
self.label.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"><span style=\" font-size:18pt;\">Open a document</span></p><hr/><ul style=\"margin-top: 0px; margin-bottom: 0px; margin-left: 0px; margin-right: 0px; -qt-list-indent: 1;\"><li style=\" margin-top:12px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-style:italic;\">File > Open File</span> (<span style=\" font-style:italic;\">Ctrl+O</span>)</li><li style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-style:italic;\">Goto > Goto anything</span> (<span style=\" font-style:italic;\">Ctrl+P</span>) and type to open file from any open project</li><li style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Use the project tree view</li></ul><ul style=\"margin-top: 0px; margin-bottom: 0px; margin-left: 0px; margin-right: 0px; -qt-list-indent: 1;\"><li style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Drag and drop files here</li></ul></body></html>"))
self.mnu_file.setTitle(_translate("MainWindow", "Fi&le"))
self.mnu_view.setTitle(_translate("MainWindow", "&View"))
self.mnu_windows.setToolTip(_translate("MainWindow", "The list of windows"))
self.mnu_windows.setTitle(_translate("MainWindow", "&Windows"))
self.mnu_help.setTitle(_translate("MainWindow", "?"))
self.mnu_edit.setTitle(_translate("MainWindow", "E&dit"))
self.menuActive_editor.setToolTip(_translate("MainWindow", "Active editor actions"))
self.menuActive_editor.setTitle(_translate("MainWindow", "&Active editor"))
self.menuTools.setTitle(_translate("MainWindow", "&Tools"))
self.toolBarFile.setWindowTitle(_translate("MainWindow", "FileToolBar"))
self.action_open.setText(_translate("MainWindow", "&Open project"))
self.action_open.setToolTip(_translate("MainWindow", "Open a project"))
self.action_open.setShortcut(_translate("MainWindow", "Ctrl+O"))
self.action_save.setText(_translate("MainWindow", "&Save"))
self.action_save.setToolTip(_translate("MainWindow", "Save current editor"))
self.action_save.setShortcut(_translate("MainWindow", "Ctrl+S"))
self.action_save_as.setText(_translate("MainWindow", "Sa&ve as"))
self.action_save_as.setToolTip(_translate("MainWindow", "Save current editor as"))
self.action_save_as.setShortcut(_translate("MainWindow", "Ctrl+Alt+S"))
self.action_close.setText(_translate("MainWindow", "&Close window"))
self.action_close.setShortcut(_translate("MainWindow", "Ctrl+Shift+Q"))
self.action_quit.setText(_translate("MainWindow", "&Quit"))
self.action_quit.setToolTip(_translate("MainWindow", "Quit application"))
self.action_quit.setShortcut(_translate("MainWindow", "Ctrl+Q"))
self.action_preferences.setText(_translate("MainWindow", "&Preferences"))
self.action_preferences.setToolTip(_translate("MainWindow", "Edit preferences"))
self.action_preferences.setShortcut(_translate("MainWindow", "Ctrl+,"))
self.action_about.setText(_translate("MainWindow", "&About"))
self.action_about.setToolTip(_translate("MainWindow", "About HackEdit"))
self.action_help.setText(_translate("MainWindow", "&Help"))
self.action_help.setToolTip(_translate("MainWindow", "Get some help"))
self.action_new.setText(_translate("MainWindow", "&New"))
self.action_new.setToolTip(_translate("MainWindow", "Create a new file/project"))
self.action_new.setShortcut(_translate("MainWindow", "Ctrl+N"))
self.action_report_bug.setText(_translate("MainWindow", "&Report an issue..."))
self.action_report_bug.setToolTip(_translate("MainWindow", "Create an issue (report a bug/enhancement)"))
self.action_save_all.setText(_translate("MainWindow", "Save a&ll"))
self.action_save_all.setToolTip(_translate("MainWindow", "Save all editors"))
self.action_save_all.setShortcut(_translate("MainWindow", "Ctrl+Shift+S"))
self.action_check_for_update.setText(_translate("MainWindow", "&Check for update"))
self.a_fullscreen.setText(_translate("MainWindow", "&Toggle fullscreen"))
self.a_fullscreen.setToolTip(_translate("MainWindow", "Toggle fullscreen"))
self.a_menu.setText(_translate("MainWindow", "Toggle &menu"))
self.a_menu.setToolTip(_translate("MainWindow", "Show/Hide menu bar"))
self.action_open_file.setText(_translate("MainWindow", "Open &file"))
self.action_open_file.setToolTip(_translate("MainWindow", "Open a file"))
self.a_print.setText(_translate("MainWindow", "&Print"))
from pyqode.core.widgets import SplittableCodeEditTabWidget
| 61.950207 | 1,233 | 0.709779 | 14,584 | 0.976825 | 0 | 0 | 0 | 0 | 0 | 0 | 3,418 | 0.228935 |
e2fd92287732d912730a455c577004d7bf52e164 | 1,314 | py | Python | order/models.py | divyesh1099/badboystyle | f4fec0858b43e14f0e1f173261f363d4262c28ea | [
"MIT"
] | null | null | null | order/models.py | divyesh1099/badboystyle | f4fec0858b43e14f0e1f173261f363d4262c28ea | [
"MIT"
] | null | null | null | order/models.py | divyesh1099/badboystyle | f4fec0858b43e14f0e1f173261f363d4262c28ea | [
"MIT"
] | null | null | null | from django.db import models
import uuid
from product.models import Product
from django.contrib.auth.models import User
# Create your models here.
class Order(models.Model):
generated_order_id = models.CharField(max_length=100, default=uuid.uuid4, unique=True)
products = models.ManyToManyField(Product, related_name='product_of_order')
user = models.ForeignKey(User, on_delete=models.CASCADE)
payment_method = models.CharField(max_length=1000)
date = models.DateField(auto_now_add=True)
time = models.TimeField(auto_now_add=True)
address = models.TextField()
city = models.CharField(max_length=1000)
state = models.CharField(max_length=1000)
zip = models.PositiveBigIntegerField()
phonenumber = models.PositiveBigIntegerField()
amount = models.PositiveIntegerField()
dispatched = models.BooleanField()
dispatched_timestamp = models.DateTimeField(auto_now_add=True)
delivered = models.BooleanField()
delivered_timestamp = models.DateTimeField(auto_now_add=True)
paid = models.BooleanField()
cancelled = models.BooleanField(default=False)
active = models.BooleanField(default=True)
class Meta:
ordering = ['-date']
def __unicode__(self):
return self.name
def __str__(self):
return str(self.id)
| 32.85 | 91 | 0.739726 | 1,161 | 0.883562 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.038813 |
e2fd9b72303648dae3dfdfa2c4d0a2b7b6a25ffe | 164 | py | Python | src/ctc/protocols/uniswap_v2_utils/__init__.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 94 | 2022-02-15T19:34:49.000Z | 2022-03-26T19:26:22.000Z | src/ctc/protocols/uniswap_v2_utils/__init__.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 7 | 2022-03-03T02:58:47.000Z | 2022-03-11T18:41:05.000Z | src/ctc/protocols/uniswap_v2_utils/__init__.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 7 | 2022-02-15T17:53:07.000Z | 2022-03-17T19:14:17.000Z | from .uniswap_v2_deltas import *
from .uniswap_v2_events import *
from .uniswap_v2_metadata import *
from .uniswap_v2_spec import *
from .uniswap_v2_state import *
| 27.333333 | 34 | 0.817073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e2fdb731838b2d76921b4855eabdb911ea477912 | 558 | py | Python | MAX40080/src/torque_test_stand/src/torque_tester.py | MilosRasic98/Orbweaver-Rover | caa2ac0a73211b2af304f09fdc0d6be632e71ddd | [
"MIT"
] | 1 | 2022-01-25T16:38:02.000Z | 2022-01-25T16:38:02.000Z | MAX40080/src/torque_test_stand/src/torque_tester.py | MilosRasic98/Orbweaver-Rover | caa2ac0a73211b2af304f09fdc0d6be632e71ddd | [
"MIT"
] | null | null | null | MAX40080/src/torque_test_stand/src/torque_tester.py | MilosRasic98/Orbweaver-Rover | caa2ac0a73211b2af304f09fdc0d6be632e71ddd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import rospy
import serial
from std_msgs.msg import Float32
tt_arduino = serial.Serial("/dev/ttyUSB0", 9600)
rospy.init_node('torque_test_stand', anonymous = False)
pub = rospy.Publisher('/test_equipment/measured_torque', Float32, queue_size=10)
r = rospy.Rate(10)
print('Torque Test Stand Node Started!')
while not rospy.is_shutdown():
raw_data = str(tt_arduino.readline())
extracted_data = raw_data[2:raw_data.find('\\r')]
converted_data = float(extracted_data)
pub.publish(converted_data)
r.sleep() | 27.9 | 80 | 0.731183 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.225806 |
e2fe567efc33b25a0d91b4d7fa626e435caa6ed8 | 3,862 | py | Python | tenant_schemas_celery/tests.py | bufke/tenant-schemas-celery | 1475e3dc8112c1eb4f6df917863fb841421fa6f0 | [
"MIT"
] | null | null | null | tenant_schemas_celery/tests.py | bufke/tenant-schemas-celery | 1475e3dc8112c1eb4f6df917863fb841421fa6f0 | [
"MIT"
] | null | null | null | tenant_schemas_celery/tests.py | bufke/tenant-schemas-celery | 1475e3dc8112c1eb4f6df917863fb841421fa6f0 | [
"MIT"
] | null | null | null | from django.db import connection
from django.utils.unittest import skipIf
from tenant_schemas.tests.models import Tenant, DummyModel
from tenant_schemas.tests.testcases import BaseTestCase
from tenant_schemas.utils import get_public_schema_name
try:
from .app import CeleryApp
except ImportError:
app = None
else:
app = CeleryApp('testapp')
class CeleryConfig:
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
app.config_from_object(CeleryConfig)
@app.task
def update_task(model_id, name):
dummy = DummyModel.objects.get(pk=model_id)
dummy.name = name
dummy.save()
@app.task
def update_retry_task(model_id, name):
if update_retry_task.request.retries:
return update_task(model_id, name)
# Don't throw the Retry exception.
update_retry_task.retry(throw=False)
@skipIf(app is None, 'Celery is not available.')
class CeleryTasksTests(BaseTestCase):
def setUp(self):
super(CeleryTasksTests, self).setUp()
self.tenant1 = Tenant(domain_url='test1', schema_name='test1')
self.tenant1.save()
self.tenant2 = Tenant(domain_url='test2', schema_name='test2')
self.tenant2.save()
connection.set_tenant(self.tenant1)
self.dummy1 = DummyModel.objects.create(name='test1')
connection.set_tenant(self.tenant2)
self.dummy2 = DummyModel.objects.create(name='test2')
connection.set_schema_to_public()
def test_basic_model_update(self):
# We should be in public schema where dummies don't exist.
for dummy in self.dummy1, self.dummy2:
# Test both async and local versions.
with self.assertRaises(DummyModel.DoesNotExist):
update_task.apply_async(args=(dummy.pk, 'updated-name'))
with self.assertRaises(DummyModel.DoesNotExist):
update_task.apply(args=(dummy.pk, 'updated-name'))
connection.set_tenant(self.tenant1)
update_task.apply_async(args=(self.dummy1.pk, 'updated-name'))
self.assertEqual(connection.schema_name, self.tenant1.schema_name)
# The task restores the schema from before running the task, so we are
# using the `tenant1` tenant now.
model_count = DummyModel.objects.filter(name='updated-name').count()
self.assertEqual(model_count, 1)
connection.set_tenant(self.tenant2)
model_count = DummyModel.objects.filter(name='updated-name').count()
self.assertEqual(model_count, 0)
def test_task_retry(self):
# Schema name should persist through retry attempts.
connection.set_tenant(self.tenant1)
update_retry_task.apply_async(args=(self.dummy1.pk, 'updated-name'))
model_count = DummyModel.objects.filter(name='updated-name').count()
self.assertEqual(model_count, 1)
def test_restoring_schema_name(self):
update_task.apply_async(
args=(self.dummy1.pk, 'updated-name'),
kwargs={'_schema_name': self.tenant1.schema_name}
)
self.assertEqual(connection.schema_name, get_public_schema_name())
connection.set_tenant(self.tenant1)
update_task.apply_async(
args=(self.dummy2.pk, 'updated-name'),
kwargs={'_schema_name': self.tenant2.schema_name}
)
self.assertEqual(connection.schema_name, self.tenant1.schema_name)
connection.set_tenant(self.tenant2)
# The model does not exist in the public schema.
with self.assertRaises(DummyModel.DoesNotExist):
update_task.apply_async(
args=(self.dummy2.pk, 'updated-name'),
kwargs={'_schema_name': get_public_schema_name()}
)
self.assertEqual(connection.schema_name, self.tenant2.schema_name)
| 35.431193 | 78 | 0.677369 | 3,015 | 0.780684 | 0 | 0 | 3,339 | 0.864578 | 0 | 0 | 591 | 0.15303 |
e2ff0cc3f2dad2d0415df8d97008ea00f1d1ac14 | 1,273 | py | Python | flake8_pie/tests/test_pie804_no_unnecessary_dict_kwargs.py | sbdchd/flake8-pie | 96ae441d92abe64b23e1c37b0eb15778434000cc | [
"BSD-2-Clause"
] | 23 | 2019-01-25T14:58:20.000Z | 2022-03-27T02:20:01.000Z | flake8_pie/tests/test_pie804_no_unnecessary_dict_kwargs.py | sbdchd/flake8-assign-and-return | 96ae441d92abe64b23e1c37b0eb15778434000cc | [
"BSD-2-Clause"
] | 50 | 2019-04-17T02:37:01.000Z | 2022-03-27T02:19:53.000Z | flake8_pie/tests/test_pie804_no_unnecessary_dict_kwargs.py | sbdchd/flake8-assign-and-return | 96ae441d92abe64b23e1c37b0eb15778434000cc | [
"BSD-2-Clause"
] | 5 | 2019-02-21T07:29:12.000Z | 2021-11-06T21:01:26.000Z | from __future__ import annotations
import ast
import pytest
from flake8_pie import Flake8PieCheck
from flake8_pie.pie804_no_unnecessary_dict_kwargs import PIE804
from flake8_pie.tests.utils import Error, ex, to_errors
EXAMPLES = [
ex(
code="""
foo(**{"bar": True})
""",
errors=[PIE804(lineno=2, col_offset=6)],
),
ex(
code="""
foo(**{"r2d2": True})
""",
errors=[PIE804(lineno=2, col_offset=6)],
),
ex(
code="""
Foo.objects.create(**{"bar": True})
""",
errors=[PIE804(lineno=2, col_offset=21)],
),
ex(
code="""
Foo.objects.create(**{"_id": some_id})
""",
errors=[PIE804(lineno=2, col_offset=21)],
),
ex(
code="""
Foo.objects.create(**{**bar})
""",
errors=[PIE804(lineno=2, col_offset=21)],
),
ex(
code="""
foo(**{**data, "foo": "buzz"})
foo(**buzz)
foo(**{"bar-foo": True})
foo(**{"bar foo": True})
foo(**{"1foo": True})
foo(**{buzz: True})
foo(**{"": True})
foo(**{f"buzz__{bar}": True})
""",
errors=[],
),
]
@pytest.mark.parametrize("code,errors", EXAMPLES)
def test_examples(code: str, errors: list[Error]) -> None:
expr = ast.parse(code)
assert to_errors(Flake8PieCheck(expr, filename="foo.py").run()) == errors
| 20.532258 | 77 | 0.571877 | 0 | 0 | 0 | 0 | 213 | 0.167321 | 0 | 0 | 394 | 0.309505 |
e2ff3ce76141b539b88b724b3e32501df66e17ca | 6,161 | py | Python | campfire/components/models/publications/Post.py | Camper-CoolDie/campfire.py | bc90f004a6f086c18d4f46e9a7679ead96feca1f | [
"MIT"
] | null | null | null | campfire/components/models/publications/Post.py | Camper-CoolDie/campfire.py | bc90f004a6f086c18d4f46e9a7679ead96feca1f | [
"MIT"
] | null | null | null | campfire/components/models/publications/Post.py | Camper-CoolDie/campfire.py | bc90f004a6f086c18d4f46e9a7679ead96feca1f | [
"MIT"
] | null | null | null | from ...reqs import publications
from .. import main
class Post(main._all["publication"]):
"""
Имитирует объект поста.
"""
__slots__ = (
"pages",
"best_comment",
"rubric_id",
"rubric_name"
)
def __init__(self, content):
"""
Создать класс Post.
content: :class:`dict`
Словарь, который сервер Campfire отправляет для создания объекта поста.
"""
super(Post, self).__init__(content)
self.pages = content["jsonDB"]["J_PAGES"] # list::dict
if content["bestComment"] != None:
self.best_comment = main._all["comment"](content["bestComment"])
else:
self.best_comment = None
self.rubric_id = content["rubricId"]
self.rubric_name = content["rubricName"]
@staticmethod
def get(post_id: int):
"""
Создать класс Post с помощью его идентификатора.
post_id: :class:`int`
Идентификатор поста.
Возвращает
:class:`Post`
Объект поста.
"""
return Post(publications.get_post(post_id))
@staticmethod
def get_from_feed(offset: int = 0, languages: list = [2], subscribes: bool = False, *, important: int = False):
"""
Получить посты из ленты.
offset: :class:`int`
Дата создания последнего поста в миллисекундах.
languages: :class:`list[int]`
Лист с языками, которые будут иметь посты из ленты.
subscribes: :class:`bool`
Если значение верно, то посты из ленты будут из категории "Подписки".
important: :class:`bool`
Только важные посты.
Возвращает
:class:`list[Post]`
Посты из ленты.
"""
posts = publications.get_posts_from_feed(offset, languages, subscribes, important)
return [ Post(post) for post in posts ]
# Self-actions
def change_fandom(self, fandom_id: int, fandom_lang: int = 2):
"""
Изменить фэндом поста.
fandom_id: :class:`int`
Идентификатор фэндома.
fandom_lang: :class:`int`
Язык фэндома.
"""
return publications.post_change_fandom(self.id, "", fandom_id, fandom_lang)
def to_drafts(self):
"""
Отправить пост в черновики.
"""
return publications.post_to_drafts(self.id)
def close(self):
"""
Закрыть пост.
"""
return publications.post_close(self.id)
def no_close(self):
"""
Открыть пост.
"""
return publications.post_close_no(self.id)
def set_multilingual(self):
"""
Сделать пост мультиязычным.
"""
return publications.post_set_multilingual(self.id)
def unset_multilingual(self):
"""
Сделать пост не мультиязычным.
"""
return publications.post_unset_multilingual(self.id)
def notify_followers(self):
"""
Уведомить подписчиков.
"""
return publications.post_notify_followers(self.id)
def pin_to_account(self):
"""
Закрепить пост в своём профиле.
"""
return publications.post_pin_to_account(self.id)
# Moderator
def moderator_close(self, comment: str):
"""
Закрыть пост.
comment: :class:`str`
Комментарий к модераторскому действию.
"""
return publications.moderator_post_close(self.id, comment)
def moderator_no_close(self, comment: str):
"""
Открыть пост.
comment: :class:`str`
Комментарий к модераторскому действию.
"""
return publications.moderator_post_close_no(self.id, comment)
def moderator_unset_multilingual(self, comment: str):
"""
Сделать пост не мультиязычным.
comment: :class:`str`
Комментарий к модераторскому действию.
"""
return publications.moderator_post_unset_multilingual(self.id, comment)
def moderator_set_important(self, comment: str, important: bool = True):
"""
Пометить/убрать метку важности с поста.
comment: :class:`str`
Комментарий к модераторскому действию.
important: :class:`bool`
Убрать или поставить метку важности.
"""
return publications.moderator_post_set_important(self.id, comment, important)
def moderator_to_drafts(self, comment: str):
"""
Отправить пост в черновики.
comment: :class:`str`
Комментарий к модераторскому действию.
"""
return publications.moderator_post_to_drafts(self.id, comment)
def moderator_pin_to_fandom(self, comment: str):
"""
Закрепить пост в фэндоме.
comment: :class:`str`
Комментарий к модераторскому действию.
"""
return publications.moderator_post_pin_to_fandom(self.id, self.fandom_id, self.fandom_lang, comment)
def admin_change_fandom(self, comment: str, fandom_id: int, fandom_lang: int = 2):
"""
Изменить фэндом поста.
comment: :class:`str`
Комментарий к модераторскому действию.
fandom_id: :class:`int`
Идентификатор фэндома.
fandom_lang: :class:`int`
Язык фэндома.
"""
return publications.post_change_fandom(self.id, comment, fandom_id, fandom_lang)
def admin_make_moderator(self, comment: str):
"""
Сделать автора поста модератором в фэндоме.
comment: :class:`str`
Комментарий к модераторскому действию.
"""
return publications.admin_post_make_moderator(self.id, comment)
main._all["post"] = Post | 27.382222 | 115 | 0.558513 | 7,144 | 0.988926 | 0 | 0 | 1,420 | 0.196567 | 0 | 0 | 4,044 | 0.559801 |
3900f3e6b7b3e7b34138de564d3504d79dc0b9a5 | 4,393 | py | Python | thyme/parsers/lammps.py | nw13slx/thyme | b2a16aa1e6b0701adcfd2bd146f85b5c46b35254 | [
"MIT"
] | null | null | null | thyme/parsers/lammps.py | nw13slx/thyme | b2a16aa1e6b0701adcfd2bd146f85b5c46b35254 | [
"MIT"
] | null | null | null | thyme/parsers/lammps.py | nw13slx/thyme | b2a16aa1e6b0701adcfd2bd146f85b5c46b35254 | [
"MIT"
] | null | null | null | import logging
import numpy as np
from glob import glob
from os.path import getmtime, isfile
from os import remove
from thyme import Trajectory
from thyme.parsers.monty import read_pattern, read_table_pattern
from thyme.routines.folders import find_folders, find_folders_matching
from thyme._key import *
from thyme.parsers.lammps_pizza_log import log as lammps_log
from thyme.parsers.lammps_pizza_dump import *
fl_num = r"([+-]?\d+.\d+[eE]?[+-]?\d*)"
sfl_num = r"\s+([+-]?\d+.\d+[eE]?[+-]?\d*)"
snum = r"\s+([+-]?\d+)"
nc_fl_num = r"[+-]?\d+.\d+[eE]?[+-]?\d*"
head_str = """ITEM: TIMESTEP
{timestep}
ITEM: NUMBER OF ATOMS
{natom}
ITEM: BOX BOUNDS pp pp pp
0 {lx}
0 {ly}
0 {lz}
ITEM: ATOMS id type x y z {type_str}"""
def write(name, trj, color_key="", spe2num={}):
if isfile(name):
remove(name)
keys = [POSITION]
type_str = ""
for key in trj.per_frame_attrs:
if key == FORCE:
type_str += " fx fy fz"
keys += [FORCE]
elif key == VELOCITY:
type_str += " vx vy vz"
keys += [VELOCITY]
elif key == color_key:
type_str += " q"
keys += [color_key]
fout = open(name, "w+")
for i in range(trj.nframes):
frame = trj.get_frame(i)
cell = frame[CELL]
off_dia_sum = np.sum(np.abs(cell)) - np.trace(np.abs(cell))
if off_dia_sum > 0:
raise NotImplementedError()
natom = frame[NATOMS]
hs = head_str.format(
lx=cell[0, 0],
ly=cell[1, 1],
lz=cell[2, 2],
timestep=i,
natom=natom,
type_str=type_str,
)
species = np.unique(frame[SPECIES])
base = len(spe2num)
if base == 0:
base = 1
spe2num.update(
{spe: i + base for i, spe in enumerate(species) if spe not in spe2num}
)
string = f"{hs}"
for j in range(natom):
string += f"\n{j+1} {spe2num[frame[SPECIES][j]]} "
for key in keys:
string += " " + " ".join([f"{value}" for value in frame[key][j]])
print(string, file=fout)
logging.info(f"write {name}")
fout.close()
logging.info(f"spe2num {spe2num}")
def from_file(filename):
data = dump(filename, 0)
data.read_all(allow_overlap=True)
col_id = data.names["id"]
col_type = data.names["type"]
x_id = data.names["x"]
y_id = data.names["y"]
z_id = data.names["z"]
if "fx" in data.names:
fx_id = data.names["fx"]
fy_id = data.names["fy"]
fz_id = data.names["fz"]
remaining_names = [
(i, name)
for i, name in enumerate(data.names)
if name not in ["id", "type", "x", "y", "z", "fx", "fy", "fz"]
]
list_trj = []
for i in range(data.nsnaps):
if i % 1000 == 0:
logging.info(f"{i} / {data.nsnaps}")
snap = data.snaps[i]
cols = np.vstack(snap.atoms)
ids = np.argsort(cols[:, col_id])
species = cols[:, col_type][ids]
pos = np.hstack(
(
cols[:, x_id].reshape([-1, 1]),
cols[:, y_id].reshape([-1, 1]),
cols[:, z_id].reshape([-1, 1]),
)
)
lx = snap.xhi - snap.xlo
ly = snap.yhi - snap.ylo
lz = snap.zhi - snap.zlo
d = {
CELL: np.diag([lx, ly, lz]).reshape([1, 3, 3]),
POSITION: pos[ids].reshape([1, -1, 3]),
SPECIES: species,
PER_FRAME_ATTRS: [POSITION, CELL],
FIXED_ATTRS: [SPECIES, NATOMS],
}
if "fx" in data.names:
force = np.hstack(
(
cols[:, fx_id].reshape([-1, 1]),
cols[:, fy_id].reshape([-1, 1]),
cols[:, fz_id].reshape([-1, 1]),
)
)[ids]
d.update({FORCE: force.reshape([1, -1, 3])})
d[PER_FRAME_ATTRS] += [FORCE]
d.update({name: cols[:, i].reshape([1, -1]) for i, name in remaining_names})
d[PER_FRAME_ATTRS] += [name for i, name in remaining_names]
_trj = Trajectory.from_dict(d)
list_trj += [_trj]
trj = Trajectory.stack(list_trj)
return trj
def read_log(filename):
l = lammps_log(filename, 0)
l.next()
data = np.array(l.data)
return l.names, data
| 27.45625 | 84 | 0.515365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 476 | 0.108354 |
3900f50cc35a91f1b2e65d295de22b272d80a5f7 | 802 | py | Python | quizzes/mixins.py | NeedsSoySauce/testme | dfc11737779809c1df475e9224e753ca7117c981 | [
"MIT"
] | 1 | 2020-11-22T22:38:02.000Z | 2020-11-22T22:38:02.000Z | quizzes/mixins.py | NeedsSoySauce/testme | dfc11737779809c1df475e9224e753ca7117c981 | [
"MIT"
] | 3 | 2021-06-04T23:59:02.000Z | 2021-09-22T19:39:14.000Z | quizzes/mixins.py | NeedsSoySauce/testme | dfc11737779809c1df475e9224e753ca7117c981 | [
"MIT"
] | null | null | null | from rest_framework.mixins import CreateModelMixin
from rest_framework.viewsets import GenericViewSet
class CreateUserLinkedModelMixin(CreateModelMixin, GenericViewSet):
"""
Set the user related to an object being created to the user who made the request.
Usage:
Override the class and set the `.queryset` and `.serializer_class` attributes. Make sure to call the super
'perform_create' method if you override it. Set the USER_FIELD class attribute to the name of the model's user
field (default is 'creator').
"""
USER_FIELD = 'creator'
def perform_create(self, serializer):
save_kwargs = {}
if not self.request.user.is_anonymous:
save_kwargs[self.USER_FIELD] = self.request.user
serializer.save(**save_kwargs)
| 34.869565 | 118 | 0.714464 | 697 | 0.869077 | 0 | 0 | 0 | 0 | 0 | 0 | 390 | 0.486284 |
3902e3c1d9f27314dcc2c50f2e8d39999e51a918 | 308 | py | Python | python-algorithm/leetcode/problem_191.py | isudox/nerd-algorithm | c1fbe153953cf3fc24395f75d102016fdf9ea0fa | [
"MIT"
] | 5 | 2017-06-11T09:19:34.000Z | 2019-01-16T16:58:31.000Z | python-algorithm/leetcode/problem_191.py | isudox/leetcode-solution | 60085e64deaf396a171367affc94b18114565c43 | [
"MIT"
] | 5 | 2020-03-22T13:53:54.000Z | 2020-03-23T08:49:35.000Z | python-algorithm/leetcode/problem_191.py | isudox/nerd-algorithm | c1fbe153953cf3fc24395f75d102016fdf9ea0fa | [
"MIT"
] | 1 | 2019-03-02T15:50:43.000Z | 2019-03-02T15:50:43.000Z | """191. Number of 1 Bits
https://leetcode.com/problems/number-of-1-bits/
"""
class Solution:
def hammingWeight(self, n: int) -> int:
def low_bit(x: int) -> int:
return x & -x
ans = 0
while n != 0:
n -= low_bit(n)
ans += 1
return ans
| 19.25 | 47 | 0.49026 | 228 | 0.74026 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.246753 |
39040095ba97fb25c2f48d71d61af9009cf80bac | 1,249 | py | Python | tests/test_base/test_components.py | jlichter/pyClarion | 326a9b7ac03baaaf8eba49a42954f88542c191e9 | [
"MIT"
] | 25 | 2018-09-21T17:51:09.000Z | 2022-03-08T12:24:35.000Z | tests/test_base/test_components.py | jlichter/pyClarion | 326a9b7ac03baaaf8eba49a42954f88542c191e9 | [
"MIT"
] | 9 | 2018-07-01T00:44:02.000Z | 2022-02-10T10:56:30.000Z | tests/test_base/test_components.py | jlichter/pyClarion | 326a9b7ac03baaaf8eba49a42954f88542c191e9 | [
"MIT"
] | 10 | 2018-09-21T17:51:13.000Z | 2022-03-03T07:58:37.000Z | import pyClarion.base as clb
import pyClarion.numdicts as nd
import unittest
import unittest.mock as mock
class TestProcess(unittest.TestCase):
@mock.patch.object(clb.Process, "_serves", clb.ConstructType.chunks)
def test_check_inputs_accepts_good_input_structure(self):
process = clb.Process(
expected=[clb.buffer("wm"), clb.terminus("selection")]
)
inputs = {
clb.buffer("wm"): nd.NumDict(default=0),
clb.terminus("selection"): nd.NumDict(default=0),
clb.terminus("selection2"): nd.NumDict(default=0)
}
process.check_inputs(inputs)
@mock.patch.object(clb.Process, "_serves", clb.ConstructType.chunks)
def test_check_inputs_rejects_incomplete_input(self):
process = clb.Process(
expected=[clb.chunks("in"), clb.terminus("selection")]
)
with self.assertRaises(RuntimeError):
inputs = {
# clb.buffer("wm"): nd.NumDict(default=0),
clb.terminus("selection"): nd.NumDict(default=0),
clb.terminus("selection2"): nd.NumDict(default=0)
}
process.check_inputs(inputs)
class TestWrappedProcess(unittest.TestCase):
pass
| 29.738095 | 72 | 0.629303 | 1,136 | 0.909528 | 0 | 0 | 1,034 | 0.827862 | 0 | 0 | 140 | 0.11209 |
3904ee13823977d061bad199f6b94392dd9030ae | 10,499 | py | Python | service_capacity_modeling/models/org/netflix/stateless_java.py | jolynch/service-capacity-modeling | 4fa8c600fa2cf12dc75735539d3b115a9cefe93d | [
"Apache-2.0"
] | 6 | 2021-06-24T21:32:35.000Z | 2021-12-20T21:03:46.000Z | service_capacity_modeling/models/org/netflix/stateless_java.py | jolynch/service-capacity-modeling | 4fa8c600fa2cf12dc75735539d3b115a9cefe93d | [
"Apache-2.0"
] | 6 | 2021-06-30T23:05:04.000Z | 2022-03-09T16:29:29.000Z | service_capacity_modeling/models/org/netflix/stateless_java.py | jolynch/service-capacity-modeling | 4fa8c600fa2cf12dc75735539d3b115a9cefe93d | [
"Apache-2.0"
] | 4 | 2021-06-26T19:27:16.000Z | 2021-09-23T09:39:40.000Z | import math
from decimal import Decimal
from typing import Any
from typing import Dict
from typing import Optional
from typing import Sequence
from typing import Tuple
from service_capacity_modeling.interface import AccessConsistency
from service_capacity_modeling.interface import AccessPattern
from service_capacity_modeling.interface import CapacityDesires
from service_capacity_modeling.interface import CapacityPlan
from service_capacity_modeling.interface import CapacityRegretParameters
from service_capacity_modeling.interface import CapacityRequirement
from service_capacity_modeling.interface import certain_float
from service_capacity_modeling.interface import certain_int
from service_capacity_modeling.interface import Clusters
from service_capacity_modeling.interface import Consistency
from service_capacity_modeling.interface import DataShape
from service_capacity_modeling.interface import Drive
from service_capacity_modeling.interface import FixedInterval
from service_capacity_modeling.interface import GlobalConsistency
from service_capacity_modeling.interface import Instance
from service_capacity_modeling.interface import Interval
from service_capacity_modeling.interface import QueryPattern
from service_capacity_modeling.interface import RegionClusterCapacity
from service_capacity_modeling.interface import RegionContext
from service_capacity_modeling.interface import Requirements
from service_capacity_modeling.models import CapacityModel
from service_capacity_modeling.models.common import compute_stateless_region
from service_capacity_modeling.models.common import simple_network_mbps
from service_capacity_modeling.models.common import sqrt_staffed_cores
def _estimate_java_app_requirement(
desires: CapacityDesires,
failover: bool = True,
jvm_memory_overhead: float = 1.2,
) -> CapacityRequirement:
needed_cores = sqrt_staffed_cores(desires)
needed_network_mbps = simple_network_mbps(desires)
if failover:
# For failover provision at 40% utilization
needed_cores = int(math.ceil(needed_cores * (1 / 0.4)))
needed_network_mbps = int(math.ceil(needed_network_mbps * (1 / 0.4)))
# Assume a Java application that can allocate about 1 GiB/s to heap
# per 2 GiB of heap with some amount of overhead on the network traffic.
# So for example if we have 512 MiB of network traffic there is some
# overhead associated with that...
# TODO: we should probably have the memory bandwidth attached to
# the instance type, e.g. Intel CPUs and AMD CPUs have different
# per core memory bandwidth.
mem_allocation_mbps = needed_network_mbps * jvm_memory_overhead
heap_allocation_gibps = (mem_allocation_mbps / 8) / 1024
network_heap = heap_allocation_gibps * 2
needed_memory_gib = network_heap
return CapacityRequirement(
requirement_type="java-app",
core_reference_ghz=desires.core_reference_ghz,
cpu_cores=certain_int(needed_cores),
mem_gib=certain_float(needed_memory_gib),
network_mbps=certain_float(needed_network_mbps),
context={
"network_heap_gib": network_heap,
"reserved_mem": desires.data_shape.reserved_instance_app_mem_gib,
},
)
def _estimate_java_app_region(
instance: Instance,
drive: Drive,
desires: CapacityDesires,
root_disk_gib: int = 10,
failover: bool = True,
jvm_memory_overhead: float = 2,
zones_per_region: int = 3,
) -> Optional[CapacityPlan]:
if drive.name != "gp2":
return None
requirement = _estimate_java_app_requirement(desires, failover, jvm_memory_overhead)
drive = drive.copy()
drive.size_gib = root_disk_gib
attached_drives = (drive,)
cluster: RegionClusterCapacity = compute_stateless_region(
instance=instance,
needed_cores=int(requirement.cpu_cores.mid),
needed_memory_gib=requirement.mem_gib.mid,
needed_network_mbps=requirement.network_mbps.mid,
core_reference_ghz=requirement.core_reference_ghz,
num_zones=zones_per_region,
)
cluster.cluster_type = "nflx-java-app"
cluster.attached_drives = attached_drives
# Generally don't want giant clusters
# Especially not above 1000 because some load balancers struggle
# with such large clusters
if cluster.count <= 256:
return CapacityPlan(
requirements=Requirements(regional=[requirement]),
candidate_clusters=Clusters(
total_annual_cost=round(Decimal(cluster.annual_cost), 2),
regional=[cluster],
zonal=[],
),
)
return None
class NflxJavaAppCapacityModel(CapacityModel):
@staticmethod
def capacity_plan(
instance: Instance,
drive: Drive,
context: RegionContext,
desires: CapacityDesires,
extra_model_arguments: Dict[str, Any],
) -> Optional[CapacityPlan]:
failover: bool = extra_model_arguments.get("failover", True)
jvm_memory_overhead: float = extra_model_arguments.get(
"jvm_memory_overhead", 1.2
)
root_disk_gib: int = extra_model_arguments.get("root_disk_gib", 10)
return _estimate_java_app_region(
instance=instance,
drive=drive,
desires=desires,
failover=failover,
root_disk_gib=root_disk_gib,
jvm_memory_overhead=jvm_memory_overhead,
zones_per_region=context.zones_in_region,
)
@staticmethod
def description():
return "Netflix Streaming Java App Model"
@staticmethod
def extra_model_arguments() -> Sequence[Tuple[str, str, str]]:
return (
("failover", "bool = 1", "If this app participates in failover"),
(
"jvm_memory_overhead",
"float = 1.2",
"How much overhead does the heap have per read byte",
),
("root_disk_gib", "int = 10", "How many GiB of root volume to attach"),
)
@staticmethod
def regret(
regret_params: CapacityRegretParameters,
optimal_plan: CapacityPlan,
proposed_plan: CapacityPlan,
) -> Dict[str, float]:
regret = super(NflxJavaAppCapacityModel, NflxJavaAppCapacityModel).regret(
regret_params, optimal_plan, proposed_plan
)
regret["disk_space"] = 0
return regret
@staticmethod
def default_desires(user_desires, extra_model_arguments):
if user_desires.query_pattern.access_pattern == AccessPattern.latency:
return CapacityDesires(
query_pattern=QueryPattern(
access_pattern=AccessPattern.latency,
access_consistency=GlobalConsistency(
same_region=Consistency(
target_consistency=AccessConsistency.read_your_writes,
),
cross_region=Consistency(
target_consistency=AccessConsistency.never,
),
),
estimated_mean_read_size_bytes=Interval(
low=128, mid=1024, high=65536, confidence=0.95
),
estimated_mean_write_size_bytes=Interval(
low=64, mid=128, high=1024, confidence=0.95
),
estimated_mean_read_latency_ms=Interval(
low=0.2, mid=1, high=2, confidence=0.98
),
estimated_mean_write_latency_ms=Interval(
low=0.2, mid=1, high=2, confidence=0.98
),
# "Single digit milliseconds SLO"
read_latency_slo_ms=FixedInterval(
minimum_value=0.5,
maximum_value=10,
low=1,
mid=2,
high=5,
confidence=0.98,
),
write_latency_slo_ms=FixedInterval(
low=1, mid=2, high=5, confidence=0.98
),
),
data_shape=DataShape(
# Assume 4 GiB heaps
reserved_instance_app_mem_gib=4
),
)
else:
return CapacityDesires(
query_pattern=QueryPattern(
access_pattern=AccessPattern.latency,
access_consistency=GlobalConsistency(
same_region=Consistency(
target_consistency=AccessConsistency.read_your_writes,
),
cross_region=Consistency(
target_consistency=AccessConsistency.never,
),
),
estimated_mean_read_size_bytes=Interval(
low=128, mid=1024, high=65536, confidence=0.95
),
estimated_mean_write_size_bytes=Interval(
low=64, mid=128, high=1024, confidence=0.95
),
# Throughput ops can be slower
estimated_mean_read_latency_ms=Interval(
low=0.2, mid=4, high=8, confidence=0.98
),
estimated_mean_write_latency_ms=Interval(
low=0.2, mid=1, high=5, confidence=0.98
),
# "Tens of millisecond SLO"
read_latency_slo_ms=FixedInterval(
minimum_value=0.5,
maximum_value=100,
low=1,
mid=5,
high=40,
confidence=0.98,
),
write_latency_slo_ms=FixedInterval(
minimum_value=0.5,
maximum_value=100,
low=1,
mid=5,
high=40,
confidence=0.98,
),
),
data_shape=DataShape(
# Assume 4 GiB heaps
reserved_instance_app_mem_gib=4
),
)
nflx_java_app_capacity_model = NflxJavaAppCapacityModel()
| 39.02974 | 88 | 0.613011 | 5,778 | 0.550338 | 0 | 0 | 5,703 | 0.543195 | 0 | 0 | 1,059 | 0.100867 |
390607443fe47de4159aa9c452011b3665fffa1f | 36 | py | Python | kmmi/exposure/__init__.py | Decitizen/kMMI | 921ef6e45fbec484251444886e246741d7f0120a | [
"MIT"
] | null | null | null | kmmi/exposure/__init__.py | Decitizen/kMMI | 921ef6e45fbec484251444886e246741d7f0120a | [
"MIT"
] | null | null | null | kmmi/exposure/__init__.py | Decitizen/kMMI | 921ef6e45fbec484251444886e246741d7f0120a | [
"MIT"
] | null | null | null | from kmmi.exposure.exposure import * | 36 | 36 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3906c3d350796714fe403c2d9747c8b915ea90fb | 1,251 | py | Python | proxies_list.py | Konstantinos-Papanagnou/LFITester | f08e2eb0fa52d8abcbe787d17468da6ae297c925 | [
"MIT"
] | null | null | null | proxies_list.py | Konstantinos-Papanagnou/LFITester | f08e2eb0fa52d8abcbe787d17468da6ae297c925 | [
"MIT"
] | null | null | null | proxies_list.py | Konstantinos-Papanagnou/LFITester | f08e2eb0fa52d8abcbe787d17468da6ae297c925 | [
"MIT"
] | null | null | null | import random
import requests
def clean_proxies():
proxies = []
with open('proxies', 'r') as handle:
contents = handle.read().strip()
for proxy in contents.split('\n'):
proxies.append(proxy)
proxy2 = []
print(proxies)
for proxy in proxies:
try:
response = requests.get('https://google.com', proxies={'https':'https://'+proxy}, timeout=8, verify=False)
proxy2.append(proxy)
except requests.exceptions.ConnectTimeout:
print(f'[-]\tProxy: {proxy} is taking too long to respond. Removing from the list...')
except requests.exceptions.ProxyError:
print(f'[-]\tProxy: {proxy} is dead. Removing from the list...')
proxies = proxy2
if len(proxies) == 0:
print("All proxies are dead or unavailable. We recommend you to renew the proxy list. In order to do that you need to edit the 'proxies' file.")
print("Execution Halt!")
exit(1)
with open('proxies', 'w') as handle:
for proxy in proxies:
handle.write(proxy + "\n")
def fetch_proxy():
proxies = []
with open('proxies', 'r') as handle:
contents = handle.read().strip()
for proxy in contents.split('\n'):
proxies.append(proxy)
index = random.randint(0,len(proxies)-1)
return {'https':'https://' + proxies[index],
'http':'http://' + proxies[index]}
| 32.076923 | 146 | 0.676259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 407 | 0.32534 |
3907cb2efd59d6150c96b3e7f63e5c918327e278 | 2,263 | py | Python | chromoSpirals.py | zubrik13/coding_intrv_prer | 853a7c8357ad43601313daadcc1c494d403a9aa0 | [
"MIT"
] | null | null | null | chromoSpirals.py | zubrik13/coding_intrv_prer | 853a7c8357ad43601313daadcc1c494d403a9aa0 | [
"MIT"
] | null | null | null | chromoSpirals.py | zubrik13/coding_intrv_prer | 853a7c8357ad43601313daadcc1c494d403a9aa0 | [
"MIT"
] | null | null | null | # chromoSpirals.py
# ----------------
# Code written by Peter Derlien, University of Sheffield, March 2013
# Draws spiralling patterns of circles using the Golden Angle.
# ----------------
# Import from the numpy and matplotlib packages.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.collections import PatchCollection
import matplotlib.patches as mpatches
ox = 0.5
oy = 0.4 # centre of plot
ndiscs = 300
ndiscs = int(input('No. of discs (e.g. 300)? '))
ncols = int(input('no. of colours (1 to 34)? '))
offset = 0.0
# offset = input('offset (in radians) from golden angle? ')
tau = (1 + 5 ** 0.5) / 2.0 # golden ratio approx = 1.618033989
# (2-tau)*2*np.pi is golden angle = c. 2.39996323 radians, or c. 137.5 degrees
inc = (2 - tau) * 2 * np.pi + offset
theta = 0
k = 0.1 # scale factor
drad = k * (1 + 5 ** 0.5) / 4.0 # radius of each disc
minv = maxv = 0 # minv and maxv will be used later to display inputs chosen
# now collect in list 'patches' the locations of all the discs
patches = []
for j in range(1, ndiscs + 1):
r = k * j ** 0.5
theta += inc
x = ox + r * np.cos(theta)
y = oy + r * np.sin(theta)
if y > maxv:
maxv = y
elif y < minv:
minv = y
disc = mpatches.Circle((x, y), drad)
patches.append(disc)
# start building the plot
fig = plt.figure()
ax = plt.axes([0, 0, 1, 1])
# create text to show which inputs the user has chosen
font = "sans-serif"
maxv = maxv * 0.95
nd = 'ndiscs: ' + str(ndiscs)
plt.text(minv, maxv, nd, ha="center", family=font, size=14)
setting = 'angle offset: ' + str(offset)
plt.text(minv, minv, setting, ha="center", family=font, size=14)
nc = 'ncols: ' + str(ncols)
plt.text(maxv, maxv, nc, ha="left", family=font, size=14)
# build colour cycle, using a number between 0 and 100 for each colour
colcycle = []
s = 100 / ncols
for j in range(ndiscs):
colcycle.append((j % ncols) * s)
# bring together the information for locations and colours of discs
collection = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=1.0)
collection.set_array(np.array(colcycle))
ax.add_collection(collection)
ax.set_xticks([])
ax.set_yticks([]) # suppress display of axes
plt.axis('equal')
plt.show() # display the plot we have built | 31.430556 | 78 | 0.660186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 981 | 0.433495 |
3908362f779c22268c5829af9edecb71d16463fa | 1,045 | py | Python | TODO_LIST/TODO_APP/views.py | Amit89499/TODO-APP-DJANGO | 082a4ffb803778378c6a8077ca47cf868bc55ef8 | [
"Apache-2.0"
] | 4 | 2020-06-29T16:00:39.000Z | 2021-05-22T03:40:38.000Z | TODO_LIST/TODO_APP/views.py | Amit89499/TODO-APP-DJANGO | 082a4ffb803778378c6a8077ca47cf868bc55ef8 | [
"Apache-2.0"
] | null | null | null | TODO_LIST/TODO_APP/views.py | Amit89499/TODO-APP-DJANGO | 082a4ffb803778378c6a8077ca47cf868bc55ef8 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render,redirect
from django.http import HttpResponse
from .models import *
from .forms import *
# Create your views here.
def index(request):
tasks = Task.objects.all()
form=TaskForm()
if request.method =='POST':
form = TaskForm(request.POST)
if form.is_valid():
form.save()
return redirect('/')
context = {'tasks':tasks,'form':form}
return render(request,'list.html', context)
def updateTask(request, pk):
task = Task.objects.get(id=pk)
form = TaskForm(instance=task)
if request.method == 'POST':
form = TaskForm(request.POST, instance=task)
if form.is_valid():
form.save()
return redirect('/')
context = {'form':form}
return render(request, 'update_task.html',context)
def deleteTask(request,pk):
item=Task.objects.get(id=pk)
if request.method=='POST':
item.delete()
return redirect('/')
context={'item':item}
return render(request,'delete.html',context)
| 23.222222 | 52 | 0.627751 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.114833 |
3909cb5cbf3d27aacd9d499216668e13a1241a5e | 3,779 | py | Python | serial/splitter.py | tf-czu/gyrorad | eb1c30a9715857a50631de170cecb443457c2752 | [
"MIT"
] | null | null | null | serial/splitter.py | tf-czu/gyrorad | eb1c30a9715857a50631de170cecb443457c2752 | [
"MIT"
] | null | null | null | serial/splitter.py | tf-czu/gyrorad | eb1c30a9715857a50631de170cecb443457c2752 | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""
Split logged data into separate "channels"
usage:
./splitter.py <log file> <GPS|0..3|all>
"""
import sys
FIRST_LINE = "id,timeMs,accX,accY,accZ,temp,gyroX,gyroY,gyroZ\n"
GPS_SEPARATOR_BEGIN = chr(0x2)
GPS_SEPARATOR_END = chr(0x3)
def checksum( s ):
sum = 0
for ch in s:
sum ^= ord(ch)
return "%02X" % (sum)
def ddmm2ddd( s ):
num,frac = ('0000' + s).split('.')
d = float(num[-2:]+'.'+frac)/60.0 + float(num[-4:-2])
return d
def parseNMEA( data ):
ret = []
for line in data.replace('\r','\n').split('\n'):
if '$' in line and '*' in line.split('$')[-1]:
s = line.split('$')[-1].split('*')
if len(s) > 1 and len(s[1]) >= 2:
if checksum(s[0]) == s[1][:2]:
if s[0].startswith("GPRMC"):
s = s[0].split(',')[:7]
if len(s) >= 7 and s[2] == 'A' and s[4] == 'N' and s[6] == 'E':
ret.append( (s[1], ddmm2ddd(s[3]), ddmm2ddd(s[5])) )
elif s[0].startswith("GPGGA"):
s = s[0].split(',')[:6]
if len(s) >= 6 and s[3] == 'N' and s[5] == 'E':
ret.append( (s[1], ddmm2ddd(s[2]), ddmm2ddd(s[4])) )
return ret
def stripHeader( data ):
if FIRST_LINE in data:
return data[ data.find(FIRST_LINE) + len(FIRST_LINE): ]
return data
def splitter( data, selected ):
assert selected in ['GPS','0','1','2','3','ALL'], selected
gpsSection = False
data = stripHeader( data )
result, resultGPS = "", ""
lastGPS = None
records = []
lastSeek = 0
for line in data.split('\n'):
if GPS_SEPARATOR_BEGIN in line:
if GPS_SEPARATOR_END in line:
resultGPS += line.split(GPS_SEPARATOR_BEGIN)[1].split(GPS_SEPARATOR_END)[0]
line = line.split(GPS_SEPARATOR_BEGIN)[0] + line.split(GPS_SEPARATOR_END)[1]
gpsSection = False
else:
resultGPS += line.split(GPS_SEPARATOR_BEGIN)[1]
line = line.split(GPS_SEPARATOR_BEGIN)[0]
gpsSection = True
elif GPS_SEPARATOR_END in line:
resultGPS += line.split(GPS_SEPARATOR_END)[0]
line = line.split(GPS_SEPARATOR_END)[1]
gpsSection = False
elif gpsSection:
resultGPS += line.strip() + '\n'
line = ""
arr = parseNMEA( resultGPS[lastSeek:] )
if len(arr) > 0 and arr[-1] != lastGPS:
lastSeek = max(0, len(resultGPS)-80) # max NMEA line is 80 characters
lastGPS = arr[-1]
records.append( lastGPS )
if len(line.split(',')) >= 9:
if line[:2] not in ['0,','1,','2,','3,']:
parts = line.split(',')
s = parts[-9]
if len(s) > 0:
line = parts[-9][-1] + ',' + ",".join( parts[-8:] )
if line.startswith( selected ) and '*' not in line:
result += line.strip() + '\n'
records.append( [int(x) for x in line.split(',') if '.' not in x] ) # ignore float temperature
if selected == 'GPS':
return resultGPS
if selected == 'ALL':
return records
return result
if __name__ == "__main__":
if len(sys.argv) < 3:
print __doc__
sys.exit(2)
selected = sys.argv[2].upper()
data = splitter( open(sys.argv[1], "rb").read(), selected=selected )
if selected == "GPS":
print data
print "------------------"
print parseNMEA( data )
elif selected == "ALL":
for row in data:
print row
else:
print data
# vim: expandtab sw=4 ts=4
| 32.299145 | 106 | 0.493781 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 472 | 0.124901 |
3909f60bd9bc4dcad09d01754d26fbed50773848 | 11,267 | py | Python | main.py | opengovt/openroads-geostore | 336bdc352252ae34a66746e632ae0b8df66c04c0 | [
"MIT"
] | 1 | 2019-10-11T14:43:53.000Z | 2019-10-11T14:43:53.000Z | main.py | opengovt/openroads-geostore | 336bdc352252ae34a66746e632ae0b8df66c04c0 | [
"MIT"
] | null | null | null | main.py | opengovt/openroads-geostore | 336bdc352252ae34a66746e632ae0b8df66c04c0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import jinja2
import webapp2
import logging
import threading
from mandrill_email import *
from webapp2_extras import routes
from cookie import *
from settings import *
from decorators import *
from functions import *
from google.appengine.api import taskqueue
from google.appengine.datastore.datastore_query import Cursor
# HANDLERS
from application.handlers.pages.geoprocessing \
import GeoprocessingDashboardHandler
from application.handlers.pages.geoprocessing \
import GeoprocessingClassificationHandler
from application.handlers.pages.geoprocessing \
import GeoprocessingToolHandler
from application.handlers.pages.geoprocessing \
import GeoprocessingToolImagesHandler
from application.handlers.pages.geoprocessing \
import GeoprocessedPageHandler
from application.handlers.pages.geoprocessing \
import ForGeoprocessedPageHandler
from application.handlers.pages.statistics import StatisticsDashboard
from application.handlers.pages.statistics import StatisticsDashboard2
from application.handlers.pages.login import LoginHandler
from application.handlers.pages.loginoauth import LoginOauthHandler
from application.handlers.pages.verifylogincode import VerifyLoginCode
from application.handlers.pages.logoutapi import LogoutApiHandler
from application.handlers.pages.projectdashboard import ProjectDashboardHandler
from application.handlers.pages.logout import LogoutHandler
from application.handlers.pages.register import RegisterHandler
from application.handlers.pages.agencyadminregistration \
import AgencyAdminRegistrationHandler
from application.handlers.pages.dashboard import DashboardHandler
from application.handlers.pages.adminregister import AdminRegisterHandler
from application.handlers.pages.upload import UploadHandler
from application.handlers.pages.viewer import ViewerHandler
from application.handlers.pages.import_ import ImportHandler
from application.handlers.pages.invitedenvironment \
import InvitedEnvironmentHandler
from application.handlers.pages.scriptuploading import ScriptUploadingHandler
from application.handlers.pages.publicuserregistration \
import PublicUsersRegistrationHandler
from application.handlers.pages.passwordreset import PasswordResetHandler
from application.handlers.pages.verifyregister import VerifyRegisterHandler
from application.handlers.pages.sendverification import SendVerificationHandler
from application.handlers.pages.usergroups import UserGroupsHandler
from application.handlers.pages.classificationtokml \
import ClassificationToKMLHandler
from application.handlers.pages.environment import EnvironmentHandler
from application.handlers.pages.permission import PermissionHandler
from application.handlers.pages.taskqueueemails import TaskQueueEmailsHandler
from application.handlers.pages.taskcounter import TaskCounterHandler
from application.handlers.pages.taskimage import TaskImageHandler
from application.handlers.api.psgc import PSGCHandler
from application.handlers.api.redflags import RedFlagsHandler
from application.handlers.api.apiproxy import APIProxyHandler
from application.handlers.api.uacsapi import UACSAPIHandler
from application.handlers.api.uacsapiv2 import UACSAPIV2Handler
from application.handlers.api.usersapi import UsersApiHandler
from application.handlers.api.environmentsapi import EnvironmentsApiHandler
from application.handlers.api.usergroupsapi import UserGroupsApiHandler
from application.handlers.api.dataapi import DataApiHandler
from application.handlers.api.logs import LogsHandler
from application.handlers.api.classificationupload \
import ClassificationUploadHandler
from application.handlers.api.apikmldownloader import APIKMLDownloader
from application.handlers.api.dataapiupdate import DataApiUpdateHandler
from application.handlers.api.dataapipublish import DataApiPublishHandler
from application.handlers.api.dataapidetails import DataApiDetailsHandler
from application.handlers.api.kmllength import KMLLengthHandler
from application.handlers.api.program import ProgramAPIHandler
from application.handlers.pages.error import ErrorHandler
from application.handlers.pages.logexception import LogExceptionHandler
from application.handlers.pages.main_ import MainHandler
from application.handlers.pages.program import ProgramHandler
from application.handlers.pages.agency import AgencyHandler
from application.handlers.pages.workspace import WorkspaceHandler
from application.handlers.pages.new_statistics import NewStatisticsDashboard
from application.handlers.pages.generate_statistics import GenerateStatisticsHandler
from application.models.apidata import APIData
from google.appengine.ext import ndb
class TaskRePutHandler(webapp2.RequestHandler):
def post(self):
# get 50 records
n = 50
count = 0
curs = None
if self.request.get('cursor'):
curs = Cursor(urlsafe=self.request.get('cursor'))
if self.request.get('count'):
count = int(self.request.get('count'))
query = APIData.query().order(APIData.created_time)
data, cursor, more = query.fetch_page(n, start_cursor=curs)
# reput
if data:
ndb.put_multi(data)
count += len(data)
logging.debug('count: ' + str(count))
# pass cursor
if len(data) == n and cursor:
taskqueue.add(
url=('/api/v1/JMKr5roUu0EQyssRVv8mvkgXsmQBt3sgNDbfoBIkwoUi59dz'
'zQJnvmQ5jIlNtC4c'),
params={'cursor': cursor.urlsafe(), 'count': str(count)}
)
this_thread = threading.local()
jinja_workspace = jinja2.Environment(
loader=jinja2.FileSystemLoader('application/frontend/'),
autoescape=True,
trim_blocks=True)
jinja_workspace.filters['to_date_format_only'] = to_date_format_only
app = webapp2.WSGIApplication([
routes.DomainRoute(r'<:.*>', [
webapp2.Route('/', MainHandler),
webapp2.Route('/dashboard', DashboardHandler),
webapp2.Route('/dashboard/statistics', StatisticsDashboard),
webapp2.Route('/dashboard/statistics2', StatisticsDashboard2),
# webapp2.Route(r'/statistics/generate/<:.*>', GenerateStatisticsHandler),
webapp2.Route('/statistics/generate', GenerateStatisticsHandler),
webapp2.Route('/statistics', NewStatisticsDashboard),
webapp2.Route(r'/projects/<:.*>/<:.*>/<:.*>/<:.*>/<:.*>/<:.*>',
ProjectDashboardHandler),
webapp2.Route(r'/projects/<:.*>/<:.*>/<:.*>/<:.*>/<:.*>',
ProjectDashboardHandler),
webapp2.Route(r'/projects/<:.*>/<:.*>/<:.*>/<:.*>',
ProjectDashboardHandler),
webapp2.Route(r'/projects/<:.*>/<:.*>/<:.*>', ProjectDashboardHandler),
webapp2.Route(r'/projects/<:.*>/<:.*>', ProjectDashboardHandler),
webapp2.Route(r'/projects/<:.*>', ProjectDashboardHandler),
webapp2.Route(r'/upload/<:.*>/<:.*>/<:.*>/<:.*>', UploadHandler),
webapp2.Route(r'/upload/<:.*>/<:.*>/<:.*>', UploadHandler),
webapp2.Route(r'/upload/<:.*>/<:.*>', UploadHandler),
webapp2.Route(r'/upload/<:.*>', UploadHandler),
webapp2.Route('/projects', ProjectDashboardHandler),
webapp2.Route(r'/programs/<:.*>/<:.*>', ProgramHandler),
webapp2.Route(r'/programs/<:.*>', ProgramHandler),
webapp2.Route('/programs', ProgramHandler),
webapp2.Route(r'/agencies/<:.*>', AgencyHandler),
webapp2.Route('/agencies', AgencyHandler),
webapp2.Route('/viewer', ViewerHandler),
webapp2.Route('/import', ImportHandler),
webapp2.Route(r'/import/<:.*>', ImportHandler),
webapp2.Route(r'/invite/workspace/<:.*>', InvitedEnvironmentHandler),
webapp2.Route(r'/su/<:.*>', ScriptUploadingHandler),
webapp2.Route('/login', LoginHandler),
webapp2.Route('/login/authorize', LoginOauthHandler),
webapp2.Route(r'/login/verify/<:.*>', VerifyLoginCode),
webapp2.Route('/logout', LogoutHandler),
webapp2.Route('/api/logout', LogoutApiHandler),
webapp2.Route('/register', RegisterHandler),
webapp2.Route('/admin/register', AdminRegisterHandler),
webapp2.Route('/register/verify', VerifyRegisterHandler),
webapp2.Route('/register/verify/send', SendVerificationHandler),
webapp2.Route('/agency/admins', AgencyAdminRegistrationHandler),
webapp2.Route('/users/registration', PublicUsersRegistrationHandler),
webapp2.Route('/password/reset', PasswordResetHandler),
webapp2.Route('/groups', UserGroupsHandler),
webapp2.Route(r'/groups/<:.*>', UserGroupsHandler),
webapp2.Route('/workspace', WorkspaceHandler),
webapp2.Route(r'/workspace/<:.*>', WorkspaceHandler),
webapp2.Route('/geoprocessing/dashboard',
GeoprocessingDashboardHandler),
webapp2.Route('/geoprocessing/for_geoprocessing',
ForGeoprocessedPageHandler),
webapp2.Route('/geoprocessing/geoprocessed', GeoprocessedPageHandler),
webapp2.Route('/geoprocessing/classification',
GeoprocessingClassificationHandler),
webapp2.Route('/geoprocessing/tool', GeoprocessingToolHandler),
webapp2.Route('/geoprocessing/tool/images',
GeoprocessingToolImagesHandler),
webapp2.Route('/geoprocessing/kml/download',
ClassificationToKMLHandler),
# TASKQUEUE
webapp2.Route('/tasks/email/send', TaskQueueEmailsHandler),
webapp2.Route('/tasks/counter', TaskCounterHandler),
webapp2.Route('/tasks/images', TaskImageHandler),
# API ENDPOINTS
webapp2.Route('/api/v1/length', KMLLengthHandler),
webapp2.Route(r'/api/v1/programs/<:.*>', ProgramAPIHandler),
webapp2.Route('/api/v1/programs', ProgramAPIHandler),
webapp2.Route('/api/v1/psgc', PSGCHandler),
webapp2.Route('/api/v1/redflags', RedFlagsHandler),
webapp2.Route('/api/v1/proxy', APIProxyHandler),
webapp2.Route('/api/v1/uacs', UACSAPIHandler),
webapp2.Route('/api/v2/uacs', UACSAPIV2Handler),
webapp2.Route('/api/v1/permissions', PermissionHandler),
webapp2.Route('/api/v1/users', UsersApiHandler),
webapp2.Route(r'/api/v1/users/<:.*>', UsersApiHandler),
webapp2.Route('/api/v1/workspaces', EnvironmentsApiHandler),
webapp2.Route(r'/api/v1/workspaces/<:.*>', EnvironmentsApiHandler),
webapp2.Route('/api/v1/groups', UserGroupsApiHandler),
webapp2.Route(r'/api/v1/groups/<:.*>', UserGroupsApiHandler),
webapp2.Route('/api/v1/classification', ClassificationUploadHandler),
webapp2.Route('/api/v1/KML', APIKMLDownloader),
webapp2.Route('/api/v1/data', DataApiHandler),
webapp2.Route(r'/api/v1/data/<:.*>/update', DataApiUpdateHandler),
webapp2.Route(r'/api/v1/data/<:.*>/publish', DataApiPublishHandler),
webapp2.Route(r'/api/v1/data/<:.*>', DataApiDetailsHandler),
webapp2.Route(r'/api/v1/logs', LogsHandler),
webapp2.Route(r'/<:.*>', ErrorHandler)
])
], debug=True)
app.error_handlers[500] = LogExceptionHandler.log_exception
| 50.075556 | 84 | 0.730097 | 903 | 0.080146 | 0 | 0 | 0 | 0 | 0 | 0 | 1,860 | 0.165084 |
3909f9fc72229c5e4f7632df5919c798d9731eae | 36,253 | py | Python | railrl/planner/forward_planner/planner.py | fredshentu/public_model_based_controller | 9301699bc56aa49ba5c699f7d5be299046a8aa0c | [
"MIT"
] | null | null | null | railrl/planner/forward_planner/planner.py | fredshentu/public_model_based_controller | 9301699bc56aa49ba5c699f7d5be299046a8aa0c | [
"MIT"
] | null | null | null | railrl/planner/forward_planner/planner.py | fredshentu/public_model_based_controller | 9301699bc56aa49ba5c699f7d5be299046a8aa0c | [
"MIT"
] | null | null | null | from railrl.data_management.simple_replay_pool import SimpleReplayPool
from railrl.predictors.dynamics_model import FullyConnectedEncoder, InverseModel, ForwardModel
import tensorflow as tf
import time
import numpy as np
from sandbox.rocky.tf.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
from railrl.misc.pyhelper_fns.vis_utils import MyAnimationMulti
def planner_info(arm_loss, box_loss, forward_models_outputs):
return {'arm_loss':arm_loss, 'box_loss':box_loss, \
'forward_models_outputs': forward_models_outputs}
def gather_cols(params, indices, name=None):
"""Gather columns of a 2D tensor.
Args:
params: A 2D tensor.
indices: A 1D tensor. Must be one of the following types: ``int32``, ``int64``.
name: A name for the operation (optional).
Returns:
A 2D Tensor. Has the same type as ``params``.
"""
with tf.op_scope([params, indices], name, "gather_cols") as scope:
# Check input
params = tf.convert_to_tensor(params, name="params")
indices = tf.convert_to_tensor(indices, name="indices")
try:
params.get_shape().assert_has_rank(2)
except ValueError:
raise ValueError('\'params\' must be 2D.')
try:
indices.get_shape().assert_has_rank(1)
except ValueError:
raise ValueError('\'params\' must be 1D.')
# Define op
p_shape = tf.shape(params)
p_flat = tf.reshape(params, [-1])
i_flat = tf.reshape(tf.reshape(tf.range(0, p_shape[0]) * p_shape[1],
[-1, 1]) + indices, [-1])
return tf.reshape(tf.gather(p_flat, i_flat),
[p_shape[0], -1])
"""
Planner takes two states (S_init and S_goal) and output an action.
Fine Tune is out of the scope of Planner
"""
class Planner(object):
def __init__(
self,
dynamic_model,
encoder,
sess
):
self.encoder = encoder
self.dynamic_model = dynamic_model
self.sess = sess
##initialize the model.....
def get_action(S_init, S_goal):
return None
"""
Inverde_model planner should be easy, just return the action
"""
class InverseModelPlanner(object):
def __init__(
self,
dynamic_model,
env,
encoder,
sess = None,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
#re-construct the dynamic model
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
encoder1 = encoder.get_weight_tied_copy(observation_input=self.S_init_ph)
encoder2 = encoder.get_weight_tied_copy(observation_input=self.S_goal_ph)
self.inverse_model = dynamic_model.get_weight_tied_copy(feature_input1=encoder1.output,
feature_input2=encoder2.output)
def get_action(self, S_init, S_goal):
action = self.sess.run(self.inverse_model.output, feed_dict = \
{self.S_init_ph:S_init, self.S_goal_ph: S_goal})
return action
"""
ForwardModel planner, optimize action according to this objective:
min_{a} (S_next - S_goal)^2
"""
class CEMPlanner_arm_coord():
def __init__(
self,
dynamic_model,
encoder,
env,
sess = None,
max_length = 15,
sample_batch_size = 2000,
top_k = 200,
action_penalty=False,
accumulated_loss = False):
self.sample_batch_size = sample_batch_size
self.top_k = top_k
self.env = env
if sess == None:
sess =tf.get_default_session()
self.sess = sess
self.max_length = max_length
self.action_ph = tf.placeholder(tf.float32, [max_length, None, 4])
self.forward_model_list = []
#build the recurrent model w.t. the max length
self.S_init_ph = tf.placeholder(tf.float32, [None, 24])
self.S_goal_ph = tf.placeholder(tf.float32, [None, 24])
#only two feature encoders
self.encoder1 = encoder.get_weight_tied_copy(observation_input=self.S_init_ph)
self.encoder2 = encoder.get_weight_tied_copy(observation_input=self.S_goal_ph)
forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph[0])
self.forward_model_list.append(forward_model)
self.forward_model_output_list = [forward_model.output] #for debug purpose only
for i in range(1,max_length):
forward_model = dynamic_model.get_weight_tied_copy(feature_input = forward_model.output,\
action_input = self.action_ph[i])
self.forward_model_list.append(forward_model)
self.forward_model_output_list.append(forward_model.output)
## objective
def transfer_box_global_tf(obs):
arm2box = gather_cols(obs, [4,5])/10.0
return gather_cols(obs, [21,22]) + arm2box
self.objective_list = []
self.arm_loss_list = []
self.box_loss_list = []
self.objective_topk_index_list = []
current_objective = 0
#objective
for forward_model in self.forward_model_list:
if accumulated_loss:
current_objective += tf.reduce_sum(tf.square(transfer_box_global_tf(forward_model.output)-\
transfer_box_global_tf(self.encoder2.output)), axis = 1)
else:
current_objective = tf.reduce_sum(tf.square(transfer_box_global_tf(forward_model.output)-\
transfer_box_global_tf(self.encoder2.output)), axis = 1)
self.objective_list.append(current_objective)
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(transfer_box_global_tf(forward_model.output)-\
transfer_box_global_tf(self.encoder2.output)))*100)
if action_penalty:
for i in range(len(self.objective_list)):
self.objective_list[i] += tf.reduce_sum(tf.square(self.action_ph),axis = [0,2])*0.5
def get_action(self, S_init, S_goal, steps = 1, plot_loss = False, debug = False, stop_variance = 0.2, stop_itr = 3, init_batch_size = 50000):
assert(steps <= self.max_length)
#fit a multivariable Gaussian
mean_list = None
cov_matrix = None
batch_S_init = np.dot(np.ones([init_batch_size, 1]), S_init.reshape(1,-1))
batch_S_goal = np.dot(np.ones([init_batch_size, 1]), S_goal.reshape(1,-1))
#CEM
actions = np.random.rand(self.max_length, init_batch_size, 4)*2 - 1
objective_list = self.sess.run(self.objective_list[steps-1], feed_dict = {self.action_ph:actions, \
self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
sorted_index = np.argsort(objective_list)[:self.top_k]
# debug
# action_pen, objective_debug = self.sess.run([tf.reduce_sum(tf.square(self.action_ph),axis = [0,2])*0.3, self.objective_list[14]], feed_dict = {self.action_ph:actions, \
# self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
# import pdb; pdb.set_trace()
best_actions = actions[:,sorted_index, :]
trans_best_actions = np.moveaxis(best_actions, 0, 1).reshape(self.top_k, -1)
cov_matrix = np.cov(trans_best_actions.T)
mean_list = np.mean(trans_best_actions.T, axis = 1)
batch_S_init = np.dot(np.ones([self.sample_batch_size, 1]), S_init.reshape(1,-1))
batch_S_goal = np.dot(np.ones([self.sample_batch_size, 1]), S_goal.reshape(1,-1))
for i in range(stop_itr-1):
actions = np.random.multivariate_normal(mean_list, cov_matrix, self.sample_batch_size).reshape(self.sample_batch_size, self.max_length, 4)
actions = np.moveaxis(actions, 0,1)
objective_list = self.sess.run(self.objective_list[steps-1], feed_dict = {self.action_ph:actions, \
self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
sorted_index = np.argsort(objective_list)[:self.top_k]
best_actions = actions[:,sorted_index, :]
trans_best_actions = np.moveaxis(best_actions, 0, 1).reshape(self.top_k, -1)
cov_matrix = np.cov(trans_best_actions.T)
mean_list = np.mean(trans_best_actions.T, axis = 1)
# import pdb; pdb.set_trace()
#if debug, visualize all forward model's output
best_action = best_actions[:,0,:]
arm_loss, box_loss,forward_models_outputs, final_objective = self.sess.run([self.arm_loss_list[0], self.box_loss_list[0], \
self.forward_model_output_list, self.objective_list[steps-1]], \
{self.action_ph: best_action.reshape(15,1,4), \
self.S_init_ph:[S_init], self.S_goal_ph:[S_goal]})
print("final objective")
print(final_objective)
# import pdb; pdb.set_trace()
return best_actions[0,0], {'arm_loss':arm_loss, 'box_loss':box_loss, 'forward_models_outputs':forward_models_outputs[:steps]}
class CEMPlanner():
def __init__(
self,
dynamic_model,
encoder,
env,
sess = None,
pos_only = True,
max_length = 15,
sample_batch_size = 2000,
top_k = 200,
action_penalty=False,
accumulated_loss = False):
self.sample_batch_size = sample_batch_size
self.top_k = top_k
self.env = env
if sess == None:
sess =tf.get_default_session()
self.sess = sess
self.max_length = max_length
self.action_ph = tf.placeholder(tf.float32, [max_length, None, 4])
self.forward_model_list = []
#build the recurrent model w.t. the max length
self.S_init_ph = tf.placeholder(tf.float32, [None]+list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, [None]+list(env.observation_space.shape))
#only two feature encoders
self.encoder1 = encoder.get_weight_tied_copy(observation_input=self.S_init_ph)
self.encoder2 = encoder.get_weight_tied_copy(observation_input=self.S_goal_ph)
forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph[0])
self.forward_model_list.append(forward_model)
self.forward_model_output_list = [forward_model.output] #for debug purpose only
for i in range(1,max_length):
forward_model = dynamic_model.get_weight_tied_copy(feature_input = forward_model.output,\
action_input = self.action_ph[i])
self.forward_model_list.append(forward_model)
self.forward_model_output_list.append(forward_model.output)
## objective
self.objective_list = []
self.arm_loss_list = []
self.box_loss_list = []
self.objective_topk_index_list = []
current_objective = 0
if pos_only:
for forward_model in self.forward_model_list:
if accumulated_loss:
current_objective += tf.reduce_sum(tf.square(gather_cols(forward_model.output, [4,5,6])\
- gather_cols(self.encoder2.output, [4,5,6])), axis = 1)
else:
current_objective = tf.reduce_sum(tf.square(gather_cols(forward_model.output, list(range(4,7)))\
- gather_cols(self.encoder2.output, list(range(4,7)))), axis = 1)
self.objective_list.append(current_objective)
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])))
else:
for forward_model in self.forward_model_list:
self.objective_list.append(tf.reduce_sum(tf.square(forward_model.output[0] - self.encoder2.output[0])))
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])))
if action_penalty:
for i in range(len(self.objective_list)):
self.objective_list[i] += tf.reduce_sum(tf.square(self.action_ph),axis = [0,2])*0.5
def get_action(self, S_init, S_goal, steps = 1, plot_loss = False, debug = False, stop_variance = 0.2, stop_itr = 3, init_batch_size = 50000):
assert(steps <= self.max_length)
#fit a multivariable Gaussian
mean_list = None
cov_matrix = None
batch_S_init = np.dot(np.ones([init_batch_size, 1]), S_init.reshape(1,-1))
batch_S_goal = np.dot(np.ones([init_batch_size, 1]), S_goal.reshape(1,-1))
#CEM
actions = np.random.rand(self.max_length, init_batch_size, 4)*2 - 1
objective_list = self.sess.run(self.objective_list[steps-1], feed_dict = {self.action_ph:actions, \
self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
sorted_index = np.argsort(objective_list)[:self.top_k]
#debug
# action_pen, objective_debug = self.sess.run([tf.reduce_sum(tf.square(self.action_ph),axis = [0,2])*0.3, self.objective_list[14]], feed_dict = {self.action_ph:actions, \
# self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
# import pdb; pdb.set_trace()
best_actions = actions[:,sorted_index, :]
trans_best_actions = np.moveaxis(best_actions, 0, 1).reshape(self.top_k, -1)
cov_matrix = np.cov(trans_best_actions.T)
mean_list = np.mean(trans_best_actions.T, axis = 1)
batch_S_init = np.dot(np.ones([self.sample_batch_size, 1]), S_init.reshape(1,-1))
batch_S_goal = np.dot(np.ones([self.sample_batch_size, 1]), S_goal.reshape(1,-1))
for i in range(stop_itr-1):
actions = np.random.multivariate_normal(mean_list, cov_matrix, self.sample_batch_size).reshape(self.sample_batch_size, self.max_length, 4)
actions = np.moveaxis(actions, 0,1)
objective_list = self.sess.run(self.objective_list[steps-1], feed_dict = {self.action_ph:actions, \
self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
sorted_index = np.argsort(objective_list)[:self.top_k]
best_actions = actions[:,sorted_index, :]
trans_best_actions = np.moveaxis(best_actions, 0, 1).reshape(self.top_k, -1)
cov_matrix = np.cov(trans_best_actions.T)
mean_list = np.mean(trans_best_actions.T, axis = 1)
# import pdb; pdb.set_trace()
#if debug, visualize all forward model's output
best_action = best_actions[:,0,:]
arm_loss, box_loss,forward_models_outputs, final_objective = self.sess.run([self.arm_loss_list[0], self.box_loss_list[0], \
self.forward_model_output_list, self.objective_list[steps-1]], \
{self.action_ph: best_action.reshape(15,1,4), \
self.S_init_ph:[S_init], self.S_goal_ph:[S_goal]})
print("final objective")
print(final_objective)
arm_obj = np.sum(np.square(forward_models_outputs[steps-1][0][:4] - S_goal[:4]))
box_obj = np.sum(np.square(forward_models_outputs[steps-1][0][4:7] - S_goal[4:7]))
print('arm objective is {}, box objective is {}'.format(arm_obj, box_obj))
# import pdb; pdb.set_trace()
return best_actions[0,0], {'arm_loss':arm_loss, 'box_loss':box_loss, 'forward_models_outputs':forward_models_outputs[:steps]}
class FastClippedSgdShootingForwardModelPlanner_cumulated_obj(object):
def __init__(
self,
dynamic_model,
encoder,
env,
init_lr = 0.5,
sess = None,
pos_only = False,
max_length = 15,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
self.init_lr = init_lr
self.max_length = max_length
self.action_ph = tf.placeholder(tf.float32, [max_length, 1, 4])
self.forward_model_list = []
#build the recurrent model w.t. the max length
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
#only two feature encoders
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph[0])
self.forward_model_list.append(forward_model)
for i in range(1,max_length):
forward_model = dynamic_model.get_weight_tied_copy(feature_input = forward_model.output,\
action_input = self.action_ph[i])
self.forward_model_list.append(forward_model)
## objective
self.objective_list = []
self.forward_model_loss_list = []
self.arm_loss_list = []
self.box_loss_list = []
objective = 0
factor = 1
if pos_only:
for forward_model in self.forward_model_list:
factor=factor*0.4
self.forward_model_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:6] - self.encoder2.output[0][:6])))
objective += factor*tf.reduce_sum(tf.square(forward_model.output[0][:6] - self.encoder2.output[0][:6]))
self.objective_list.append(objective)
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])))
else:
for forward_model in self.forward_model_list:
objective += tf.reduce_sum(tf.square(forward_model.output[0] - self.encoder2.output[0]))
self.objective_list.append(objective)
self.action_grad_list = []
for obj in self.objective_list:
#those tail term in action_ph will receive 0 gradient
self.action_grad_list.append(tf.gradients(obj, self.action_ph))
self.vis_tool = MyAnimationMulti(None, numPlots=2, isIm=[0,0], axTitles=['(S1-S_goal)^2', 'sum(S_i-S_goal)^2'])
def get_action(self, S_init, S_goal, steps = None, plot_loss = False):
if steps == None:
steps = 1 #greedy planner
else:
assert(steps <= self.max_length)
action = np.zeros([self.max_length, 1, 4])
action_grad = self.action_grad_list[steps - 1]
# TODO: Find a good stop criteria
now = time.time()
S1_loss_list = []
Sn_loss_list = []
for i in range(0,101):
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal, self.action_ph : action}
S1_loss, Sn_loss = self.sess.run([self.objective_list[0], self.objective_list[steps-1]], feed_dict=feed_dict)
S1_loss_list.append(S1_loss)
Sn_loss_list.append(Sn_loss)
if plot_loss and i%20 ==0:
self.vis_tool._display([[range(i+1), S1_loss_list],[range(i+1), Sn_loss_list]])
gradient = np.array(self.sess.run(action_grad, feed_dict = feed_dict)[0])
if np.isnan(gradient).any():
action = np.random.rand(self.max_length, 1, 4)-0.5
print('nan gradient step{}'.format(i))
import pdb; pdb.set_trace()
else:
if np.linalg.norm(gradient) > steps*4:
gradient = gradient/np.linalg.norm(gradient)*4*steps
action -= gradient/1.0*self.init_lr
action = np.clip(action, -1, 1)
# if i %200 == 0:
# print("#########Optimizing action#########")
# action_loss, predicted_next_state = self.sess.run([self.objective_list[steps-1], self.forward_model_list[steps-1].output], feed_dict = feed_dict)
# box_loss = np.sum(np.square(predicted_next_state[0][4:6] - S_goal[4:6]))
# arm_loss = np.sum(np.square(predicted_next_state[0][0:4] - S_goal[0:4]))
# print("action_loss(sum_square_error(S_goal, S_next)) is {}, box_loss is {}, arm_loss is {}".format(action_loss, box_loss, arm_loss))
# print("current_action is {}".format(action[0][0]))
# # print("current s_next is {}".format(self.sess.run(self.forward_model.output, feed_dict = feed_dict)))
# print("{} sec elapsed for 50 gradient steps".format(time.time() - now))
# now = time.time()
return action[0][0], self.sess.run([self.arm_loss_list[0], self.box_loss_list[0], self.forward_model_list[0].output], feed_dict)
class FastClippedSgdShootingForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
init_lr = 0.5,
sess = None,
pos_only = False,
max_length = 15,
):
self.env = env
if sess == None:
sess =tf.get_default_session()
self.sess = sess
self.init_lr = init_lr
self.max_length = max_length
self.action_ph = tf.placeholder(tf.float32, [max_length, 1, 4])
self.forward_model_list = []
#build the recurrent model w.t. the max length
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
#only two feature encoders
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph[0])
self.forward_model_list.append(forward_model)
self.forward_model_output_list = [forward_model.output]
for i in range(1,max_length):
forward_model = dynamic_model.get_weight_tied_copy(feature_input = forward_model.output,\
action_input = self.action_ph[i])
self.forward_model_list.append(forward_model)
self.forward_model_output_list.append(forward_model.output)
## objective
self.objective_list = []
self.arm_loss_list = []
self.box_loss_list = []
if pos_only:
for forward_model in self.forward_model_list:
self.objective_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:6] - self.encoder2.output[0][:6])))
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])))
else:
for forward_model in self.forward_model_list:
self.objective_list.append(tf.reduce_sum(tf.square(forward_model.output[0] - self.encoder2.output[0])))
self.action_grad_list = []
for obj in self.objective_list:
#those tail term in action_ph will receive 0 gradient
self.action_grad_list.append(tf.gradients(obj, self.action_ph))
self.vis_tool = MyAnimationMulti(None, numPlots=2, isIm=[0,0], axTitles=['(S1-S_goal)^2', '(S_n-S_goal)^2'])
def get_action(self, S_init, S_goal, steps = None, plot_loss = False):
if steps == None:
steps = 1 #greedy planner
else:
assert(steps <= self.max_length)
action = np.zeros([self.max_length, 1, 4])
action_grad = self.action_grad_list[steps - 1]
# TODO: Find a good stop criteria
now = time.time()
S1_loss_list = []
Sn_loss_list = []
for i in range(0,51):
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal, self.action_ph : action}
S1_loss, Sn_loss = self.sess.run([self.box_loss_list[0], self.box_loss_list[steps-1]], feed_dict=feed_dict)
S1_loss_list.append(S1_loss)
Sn_loss_list.append(Sn_loss)
if plot_loss and i %1 == 0:
self.vis_tool._display([[range(i+1), S1_loss_list],[range(i+1), Sn_loss_list]])
gradient = np.array(self.sess.run(action_grad, feed_dict = feed_dict)[0])
if np.isnan(gradient).any():
action = np.random.rand(self.max_length, 1, 4)-0.5
print('nan gradient step{}'.format(i))
import pdb; pdb.set_trace()
else:
if np.linalg.norm(gradient) > steps*4:
gradient = gradient/np.linalg.norm(gradient)*4*steps
action -= gradient/(1.+i*0.05)*self.init_lr
action = np.clip(action, -1, 1)
arm_loss, box_loss, forward_models_outputs = \
self.sess.run([self.arm_loss_list[0], self.box_loss_list[0], \
self.forward_model_output_list], feed_dict)
return action[0][0], planner_info(arm_loss, box_loss, forward_models_outputs[:steps])
class FastClippedSgdForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
action_initializer = None,
init_lr = 1,
sess = None,
pos_only = False,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
# with tf.variable_scope('action_optimizer'):
# self.action = tf.get_variable('planner_action', [1] + list(env.action_space.shape), initializer=action_initializer)
self.action_ph = tf.placeholder(tf.float32, [None, 4])
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
self.forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph)
## objective
if pos_only:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output[0][:6] - self.encoder2.output[0][:6]))
else:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output - self.encoder2.output))
self.arm_loss = tf.reduce_sum(tf.square(self.forward_model.output[0][:4] - self.encoder2.output[0][:4]))
self.box_loss = tf.reduce_sum(tf.square(self.forward_model.output[0][4:6] - self.encoder2.output[0][4:6]))
#Adam optimizer has its own variables. Wrap it by a namescope
self.action_grad = tf.gradients(self.objective, self.action_ph)
# with tf.variable_scope('action_optimizer'):
# self.action_opt = tf.train.AdamOptimizer(init_lr).minimize(self.objective, var_list = [self.clipped_action])
# self.action_gradient = tf.train.AdamOptimizer(init_lr).compute_gradients(self.objective, var_list = [self.action])
def get_action(self, S_init, S_goal):
#first re-initialize everyvariables in "action_optimizer"
# variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope='action_optimizer')
# self.sess.run(tf.initialize_variables(variables))
action = np.random.rand(4)-0.5
# TODO: Find a good stop criteria
now = time.time()
for i in range(0,151):
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal, self.action_ph : [action]}
gradient = self.sess.run([self.action_grad], feed_dict = feed_dict)[0][0][0]
#raises NotImplementedError: ('Trying to optimize unsupported type ', <tf.Tensor 'clip_by_value:0' shape=(1, 4) dtype=float32>)
#this code does not work....
# import pdb; pdb.set_trace()
action -= gradient/(1.+i*0.2)*0.5
action = np.clip(action, -1, 1)
if i %50 == 0:
print("#########Optimizing action#########")
action_loss = self.sess.run(self.objective, feed_dict = feed_dict)
print("action_loss(sum_square_error(S_goal, S_next)) is {}".format(action_loss))
print("current_action is {}".format(action))
# print("current s_next is {}".format(self.sess.run(self.forward_model.output, feed_dict = feed_dict)))
print("{} sec elapsed for 50 gradient steps".format(time.time() - now))
now = time.time()
return action, self.sess.run([ self.arm_loss, self.box_loss], feed_dict = feed_dict)
class SgdForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
action_initializer = None,
init_lr = 1e-1,
sess = None,
pos_only = False,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
##re-construct the model
if action_initializer is None:
action_initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope('action_optimizer'):
self.action = tf.get_variable('planner_action', [1] + list(env.action_space.shape), initializer=action_initializer)
self.clipped_action = tf.clip_by_value(self.action, -1, 1)
# import pdb; pdb.set_trace()
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
self.forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action)
## objective
if pos_only:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output[0][:6] - self.encoder2.output[0][:6]))
else:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output - self.encoder2.output))
#Adam optimizer has its own variables. Wrap it by a namescope
with tf.variable_scope('action_optimizer'):
self.action_opt = tf.train.AdamOptimizer(init_lr).minimize(self.objective, var_list = [self.clipped_action])
# self.action_gradient = tf.train.AdamOptimizer(init_lr).compute_gradients(self.objective, var_list = [self.action])
def get_action(self, S_init, S_goal):
#first re-initialize everyvariables in "action_optimizer"
variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope='action_optimizer')
self.sess.run(tf.initialize_variables(variables))
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal}
# TODO: Find a good stop criteria
now = time.time()
for i in range(0,150):
gradient = self.sess.run([self.action_opt], feed_dict = feed_dict)
#raises NotImplementedError: ('Trying to optimize unsupported type ', <tf.Tensor 'clip_by_value:0' shape=(1, 4) dtype=float32>)
#this code does not work....
if i %50 == 0:
print("#########Optimizing action#########")
action_loss = self.sess.run(self.objective, feed_dict = feed_dict)
print("action_loss(sum_square_error(S_goal, S_next)) is {}".format(action_loss))
print("current_action is {}".format(self.sess.run(self.action)))
# print("current s_next is {}".format(self.sess.run(self.forward_model.output, feed_dict = feed_dict)))
print("{} sec elapsed for 50 gradient steps".format(time.time() - now))
now = time.time()
return self.sess.run([self.action, self.objective], feed_dict = feed_dict)
#debug API
def predict_next_state(self, current_state, action, goal_state):
feed_dict = {self.S_init_ph:current_state, self.S_goal_ph: goal_state}
old_action = self.sess.run(self.action)
#assign new action
self.sess.run(self.action.assign([action]))
next_state, S_init, S_goal, loss = self.sess.run([self.forward_model.output,\
self.encoder1.output,\
self.encoder2.output,\
self.objective], feed_dict = feed_dict)
#assign back the old action
self.sess.run(self.action.assign(old_action))
return next_state, S_init, S_goal, loss
class ClippedSgdForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
action_initializer = None,
init_lr = 1e-1,
sess = None,
pos_only = False,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
##re-construct the model
if action_initializer is None:
action_initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope('action_optimizer'):
self.action = tf.get_variable('planner_action', [1] + list(env.action_space.shape), initializer=action_initializer)
self.clipped_action = tf.clip_by_value(self.action, -1, 1)
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
self.forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action)
## objective
if pos_only:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output[0][:6] - self.encoder2.output[0][:6]))
else:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output - self.encoder2.output))
#Adam optimizer has its own variables. Wrap it by a namescope
with tf.variable_scope('action_optimizer'):
self.action_opt = tf.train.AdamOptimizer(init_lr).minimize(self.objective, var_list = [self.action])
self.action_gradient = tf.train.AdamOptimizer(init_lr).compute_gradients(self.objective, var_list = [self.action])
def get_action(self, S_init, S_goal):
#first re-initialize everyvariables in "action_optimizer"
variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope='action_optimizer')
self.sess.run(tf.initialize_variables(variables))
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal}
# TODO: Find a good stop criteria
now = time.time()
for i in range(0,150):
#normal speed
self.sess.run([self.action_opt], feed_dict = feed_dict)
#slow and will be slower and slower
# self.sess.run([self.clipped_action, self.action.assign(self.clipped_action), self.action_opt], \
# feed_dict = feed_dict)
if i %50 == 0:
print("#########Optimizing action#########")
action_loss = self.sess.run(self.objective, feed_dict = feed_dict)
print("action_loss(sum_square_error(S_goal, S_next)) is {}".format(action_loss))
print("current_action is {}".format(self.sess.run(self.clipped_action)))
# print("current s_next is {}".format(self.sess.run(self.forward_model.output, feed_dict = feed_dict)))
print("{} sec elapsed for 100 gradient steps".format(time.time() - now))
now = time.time()
return self.sess.run([self.action, self.objective], feed_dict = feed_dict)
#debug API
def predict_next_state(self, current_state, action, goal_state):
feed_dict = {self.S_init_ph:current_state, self.S_goal_ph: goal_state}
old_action = self.sess.run(self.action)
#assign new action
self.sess.run(self.action.assign([action]))
next_state, S_init, S_goal, loss = self.sess.run([self.forward_model.output,\
self.encoder1.output,\
self.encoder2.output,\
self.objective], feed_dict = feed_dict)
#assign back the old action
self.sess.run(self.action.assign(old_action))
return next_state, S_init, S_goal, loss
from sandbox.rocky.tf.core.parameterized import Parameterized
class ParameterizedAction(Parameterized):
def __init__(self, env, sess, action_initializer = None):
Parameterized.__init__(self)
if action_initializer is None:
action_initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope('action_optimizer'):
self.action = tf.get_variable('planner_action', [1] + list(env.action_space.shape), initializer=action_initializer)
self.sess = sess
self.env = env
def get_action(self):
return self.sess.run(self.action)
def initalize_action(self):
self.sess.run(tf.initialize_variables(self.action))
return
class ConstrainedForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
sess = None,
pos_only = False,
action_initializer = None,
optimizer = tf.contrib.opt.ScipyOptimizerInterface,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
if action_initializer is None:
action_initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope('action_optimizer'):
self.action = tf.get_variable('planner_action', [1,4], initializer=action_initializer)
## rebuild the dynamic model
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
self.forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action)
## objective
if pos_only:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output[0][:6] - self.encoder2.output[0][:6]))
else:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output - self.encoder2.output))
self.loss = self.objective
self.inequalities = []
for i in range(4):
self.inequalities.append(1-tf.square(self.action[0][i]))
# Our default SciPy optimization algorithm, L-BFGS-B, does not support
# general constraints. Thus we use SLSQP instead.
def get_action(self, S_init, S_goal):
#first re-initialize everyvariables in "action_optimizer"
self.sess.run(tf.initialize_variables([self.action]))
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal}
# need to re-initialize optimizer every time want to use it or it will optimize action without enforcing constrains.
optimizer = tf.contrib.opt.ScipyOptimizerInterface(
self.loss, var_list = [self.action], inequalities=self.inequalities, method='SLSQP')
now = time.time()
optimizer.minimize(self.sess, feed_dict = feed_dict)
print("it takes {} to optimize the action".format(time.time() - now))
return self.sess.run([self.action, self.loss], feed_dict = feed_dict) | 42.500586 | 172 | 0.715582 | 34,139 | 0.941688 | 0 | 0 | 0 | 0 | 0 | 0 | 6,453 | 0.177999 |
390b330abd091d1c3e50678abd28d0cfcb440fe8 | 620 | py | Python | RobinhoodTrader/config.py | jaxbulsara/RobinhoodTrader | a22b4056b8786c1405e3cb06519f17e9ef685dac | [
"MIT"
] | 1 | 2021-04-17T16:24:11.000Z | 2021-04-17T16:24:11.000Z | RobinhoodTrader/config.py | jaxbulsara/RobinhoodTrader | a22b4056b8786c1405e3cb06519f17e9ef685dac | [
"MIT"
] | null | null | null | RobinhoodTrader/config.py | jaxbulsara/RobinhoodTrader | a22b4056b8786c1405e3cb06519f17e9ef685dac | [
"MIT"
] | 1 | 2021-04-17T16:24:16.000Z | 2021-04-17T16:24:16.000Z | from configparser import ConfigParser
import re
def getConfiguration():
configParser = ConfigParser()
configParser.read("config.ini")
return configParser
def getQrCode():
config = getConfiguration()
qrCode = config.get("login", "qrCode", fallback=None)
qrCode = _checkQrCode(config, qrCode)
return qrCode
def _checkQrCode(config, qrCode):
if qrCode:
qrCodePattern = config.get("login", "qrCodePattern")
qrCodeIsValid = re.match(qrCodePattern, qrCode)
if qrCodeIsValid:
qrCode = qrCode
else:
qrCode = None
return qrCode
| 20.666667 | 60 | 0.659677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.079032 |
390cc4f320ef176a0561110f2931fe7231063d55 | 1,969 | py | Python | tb_api_client/test/test_auth_controller_api.py | MOSAIC-LoPoW/oss7-thingsboard-backend-example | 9b289dd7fdbb6e932ca338ad497a7bb1fc84d010 | [
"Apache-2.0"
] | 5 | 2017-11-27T15:48:16.000Z | 2020-09-21T04:18:47.000Z | tb_api_client/test/test_auth_controller_api.py | MOSAIC-LoPoW/oss7-thingsboard-backend-example | 9b289dd7fdbb6e932ca338ad497a7bb1fc84d010 | [
"Apache-2.0"
] | null | null | null | tb_api_client/test/test_auth_controller_api.py | MOSAIC-LoPoW/oss7-thingsboard-backend-example | 9b289dd7fdbb6e932ca338ad497a7bb1fc84d010 | [
"Apache-2.0"
] | 6 | 2018-01-14T17:23:46.000Z | 2019-06-24T13:38:54.000Z | # coding: utf-8
"""
Thingsboard REST API
For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>.
OpenAPI spec version: 2.0
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.apis.auth_controller_api import AuthControllerApi
class TestAuthControllerApi(unittest.TestCase):
""" AuthControllerApi unit test stubs """
def setUp(self):
self.api = swagger_client.apis.auth_controller_api.AuthControllerApi()
def tearDown(self):
pass
def test_activate_user_using_post(self):
"""
Test case for activate_user_using_post
activateUser
"""
pass
def test_change_password_using_post(self):
"""
Test case for change_password_using_post
changePassword
"""
pass
def test_check_activate_token_using_get(self):
"""
Test case for check_activate_token_using_get
checkActivateToken
"""
pass
def test_check_reset_token_using_get(self):
"""
Test case for check_reset_token_using_get
checkResetToken
"""
pass
def test_get_user_using_get(self):
"""
Test case for get_user_using_get
getUser
"""
pass
def test_request_reset_password_by_email_using_post(self):
"""
Test case for request_reset_password_by_email_using_post
requestResetPasswordByEmail
"""
pass
def test_reset_password_using_post(self):
"""
Test case for reset_password_using_post
resetPassword
"""
pass
if __name__ == '__main__':
unittest.main()
| 21.172043 | 149 | 0.661249 | 1,366 | 0.693753 | 0 | 0 | 0 | 0 | 0 | 0 | 1,016 | 0.515998 |
390e452dfc5d623666ee9a6aa2a605d724a0f630 | 585 | py | Python | pylib/mps/util/push_util.py | xkmato/py77 | 9c44d8f8924f47a7331c29fd0287a4bb9416d316 | [
"MIT"
] | null | null | null | pylib/mps/util/push_util.py | xkmato/py77 | 9c44d8f8924f47a7331c29fd0287a4bb9416d316 | [
"MIT"
] | null | null | null | pylib/mps/util/push_util.py | xkmato/py77 | 9c44d8f8924f47a7331c29fd0287a4bb9416d316 | [
"MIT"
] | 2 | 2018-07-16T19:14:11.000Z | 2020-10-15T08:48:32.000Z | #!/usr/bin/env python
"""
A variety of push utility functions
"""
from pylib.util.git_util import GitUtil
__author__ = 'edelman@room77.com (Nicholas Edelman)'
__copyright__ = 'Copyright 2013 Room77, Inc.'
class PushUtil(object):
@classmethod
def get_deployspec_name(cls, cluster_name):
"""given a cluster returns the deployspec name
convention of $cluster-$current_branchname.
Args:
cluster - the cluster name
Returns:
the deployspec name for the current branch and cluster
"""
return '%s-%s' % (cluster_name, GitUtil.get_current_branch())
| 24.375 | 65 | 0.716239 | 374 | 0.639316 | 0 | 0 | 348 | 0.594872 | 0 | 0 | 358 | 0.611966 |
390ea59316bebdcf2ee6aecf82c4ccdade1f6444 | 1,899 | py | Python | shardingpy/parsing/lexer/dialect/mysql.py | hongfuli/sharding-py | a26a64aa9d9196c830e7e2fa4095a58bef608a40 | [
"Apache-2.0"
] | 1 | 2021-01-29T13:29:29.000Z | 2021-01-29T13:29:29.000Z | shardingpy/parsing/lexer/dialect/mysql.py | hongfuli/sharding-py | a26a64aa9d9196c830e7e2fa4095a58bef608a40 | [
"Apache-2.0"
] | null | null | null | shardingpy/parsing/lexer/dialect/mysql.py | hongfuli/sharding-py | a26a64aa9d9196c830e7e2fa4095a58bef608a40 | [
"Apache-2.0"
] | null | null | null | import enum
from shardingpy.parsing.lexer import lexer
from shardingpy.parsing.lexer import token
class MySQLKeyword(enum.IntEnum):
SHOW = 1
DUAL = 2
LIMIT = 3
OFFSET = 4
VALUE = 5
BEGIN = 6
FORCE = 7
PARTITION = 8
DISTINCTROW = 9
KILL = 10
QUICK = 11
BINARY = 12
CACHE = 13
SQL_CACHE = 14
SQL_NO_CACHE = 15
SQL_SMALL_RESULT = 16
SQL_BIG_RESULT = 17
SQL_BUFFER_RESULT = 18
SQL_CALC_FOUND_ROWS = 19
LOW_PRIORITY = 20
HIGH_PRIORITY = 21
OPTIMIZE = 22
ANALYZE = 23
IGNORE = 24
CHANGE = 25
FIRST = 26
SPATIAL = 27
ALGORITHM = 28
COLLATE = 29
DISCARD = 30
IMPORT = 31
VALIDATION = 32
REORGANIZE = 33
EXCHANGE = 34
REBUILD = 35
REPAIR = 36
REMOVE = 37
UPGRADE = 38
KEY_BLOCK_SIZE = 39
AUTO_INCREMENT = 40
AVG_ROW_LENGTH = 41
CHECKSUM = 42
COMPRESSION = 43
CONNECTION = 44
DIRECTORY = 45
DELAY_KEY_WRITE = 46
ENCRYPTION = 47
ENGINE = 48
INSERT_METHOD = 49
MAX_ROWS = 50
MIN_ROWS = 51
PACK_KEYS = 52
ROW_FORMAT = 53
DYNAMIC = 54
FIXED = 55
COMPRESSED = 56
REDUNDANT = 57
COMPACT = 58
STATS_AUTO_RECALC = 59
STATS_PERSISTENT = 60
STATS_SAMPLE_PAGES = 61
DISK = 62
MEMORY = 63
ROLLUP = 64
RESTRICT = 65
STRAIGHT_JOIN = 66
REGEXP = 67
class MySQLLexer(lexer.Lexer):
dictionary = token.Dictionary(MySQLKeyword)
def __init__(self, sql):
super().__init__(sql, MySQLLexer.dictionary)
def is_hint_begin(self):
return self.get_current_char(0) == '/' and self.get_current_char(1) == '*' and self.get_current_char(2) == '!'
def is_comment_begin(self):
return self.get_current_char(0) == '#' or super().is_comment_begin()
def is_variable_begin(self):
return self.get_current_char(0) == '@'
| 20.868132 | 118 | 0.61664 | 1,794 | 0.944708 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.007899 |
390eef8ac9197704b04a6094a11431c7d2503cdc | 1,937 | py | Python | Day6/6.py | thatguyandy27/AdventOfCode2021 | 90c4c27a7a9ec91844c8bf7d17d62586d3ec1913 | [
"Apache-2.0"
] | null | null | null | Day6/6.py | thatguyandy27/AdventOfCode2021 | 90c4c27a7a9ec91844c8bf7d17d62586d3ec1913 | [
"Apache-2.0"
] | null | null | null | Day6/6.py | thatguyandy27/AdventOfCode2021 | 90c4c27a7a9ec91844c8bf7d17d62586d3ec1913 | [
"Apache-2.0"
] | null | null | null | input = [1, 1, 1, 1, 1, 1, 1, 4, 1, 2, 1, 1, 4, 1, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 3, 1, 1, 2, 1, 2, 1, 3, 3, 4, 1, 4, 1, 1, 3, 1, 1, 5, 1, 1, 1, 1, 4, 1, 1, 5, 1, 1, 1, 4, 1, 5, 1, 1, 1, 3, 1, 1, 5, 3, 1, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 2, 4, 1, 1, 1, 1, 4, 1, 2, 2, 1, 1, 1, 3, 1, 2, 5, 1, 4, 1, 1, 1, 3, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1, 4, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 5, 1, 1, 1, 4, 1, 1, 5, 1, 1, 5, 3, 3, 5, 3, 1, 1,
1, 4, 1, 1, 1, 1, 1, 1, 5, 3, 1, 2, 1, 1, 1, 4, 1, 3, 1, 5, 1, 1, 2, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 4, 3, 2, 1, 2, 4, 1, 3, 1, 5, 1, 2, 1, 4, 1, 1, 1, 1, 1, 3, 1, 4, 1, 1, 1, 1, 3, 1, 3, 3, 1, 4, 3, 4, 1, 1, 1, 1, 5, 1, 3, 3, 2, 5, 3, 1, 1, 3, 1, 3, 1, 1, 1, 1, 4, 1, 1, 1, 1, 3, 1, 5, 1, 1, 1, 4, 4, 1, 1, 5, 5, 2, 4, 5, 1, 1, 1, 1, 5, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 1, 3, 1, 1, 2, 1, 1]
new_num = 8
reset_num = 6
def getFishCounts(input):
fishes = [0] * (new_num + 1)
for i in input:
fishes[i] += 1
return fishes
def simDay(fishes):
newFishes = [0] * (new_num + 1)
# Move counters down
for i in range(0, new_num):
newFishes[i] = fishes[i + 1]
# Move the zeros back to 7
newFishes[reset_num] += fishes[0]
# Create new fishes
newFishes[8] = fishes[0]
return newFishes
def runSim(input, days):
fishes = getFishCounts(input)
for d in range(days):
fishes = simDay(fishes)
# print(f'Day: {d}: ', fishes)
return sum(fishes)
if __name__ == '__main__':
# test = runSim([3, 4, 3, 1, 2], 80)
# print(test)
isPart1 = False
if isPart1:
total = runSim(input, 80)
print('The answer is:', total)
else:
total = runSim(input, 256)
print('The answer is:', total)
# else:
# total = findWorstVents(filename, False)
# print('The answer is:', total)
| 33.396552 | 461 | 0.453278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 274 | 0.141456 |
3910105eb3747b233c26c9482e9ecf9e10eab188 | 6,067 | py | Python | xgds_core/util.py | xgds/xgds_core | 7c2f8d56ca56321f6a9331cda38b40b503fade04 | [
"Apache-2.0"
] | 1 | 2019-02-13T21:02:18.000Z | 2019-02-13T21:02:18.000Z | xgds_core/util.py | xgds/xgds_core | 7c2f8d56ca56321f6a9331cda38b40b503fade04 | [
"Apache-2.0"
] | 2 | 2020-07-16T02:51:17.000Z | 2021-05-06T23:34:15.000Z | xgds_core/util.py | xgds/xgds_core | 7c2f8d56ca56321f6a9331cda38b40b503fade04 | [
"Apache-2.0"
] | 1 | 2017-10-04T18:15:16.000Z | 2017-10-04T18:15:16.000Z | # __BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# __END_LICENSE__
import os
import sys
import pytz
import datetime
import time
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from django.conf import settings
from django.core.cache import caches
import urlparse
import json
import requests
if settings.XGDS_CORE_REDIS:
from redisUtil import queueRedisData
from geocamUtil.datetimeJsonEncoder import DatetimeJsonEncoder
def get100Years():
theNow = timezone.now() + relativedelta(years=100)
return theNow
def addPort(url, port, http=True):
''' Add a port to a url '''
if port:
parsed = urlparse.urlparse(url)
replaced = parsed._replace(netloc="{}:{}".format(parsed.hostname, port))
if http:
replaced = replaced._replace(scheme='http')
return replaced.geturl()
return url
def callUrl(url, username, password, method='GET', data=None, shareSession=False):
''' WARNING If you are calling this a lot of times then you will be opening a new connection and instead it should
be run outside with a pycroraptor service.'''
if shareSession and settings.XGDS_CORE_REDIS:
# POST THIS ON SHARED SESSION FOR REDIS
config = {'url':url,
'username': username,
'password': password,
'method': method,
'data': data}
queueRedisData(settings.XGDS_CORE_REDIS_SESSION_MANAGER, json.dumps(config, cls=DatetimeJsonEncoder))
return
else:
s = requests.Session()
if username:
s.auth = (username, password)
if method == 'POST':
return s.post(url, data=data)
elif method == 'GET':
return s.get(url)
def deletePostKey(post, theKey):
try:
if theKey in post:
mutable = post._mutable
post._mutable = True
del post[theKey]
post._mutable = mutable
except:
pass
return post
def insertIntoPath(original, insertion='rest'):
""" Insert a string after the first block in a path, for example
/my/original/path,insertion
/my/INSERTION/original/path
"""
slashIndex = original.index('/',1)
newString = '%s/%s%s' % (original[0:slashIndex], insertion, original[slashIndex:len(original)])
return newString
def get_all_subclasses(the_class, check_meta_abstract=True, top=True):
"""
Returns all the subclasses of the given class
:param the_class: parent class
:param check_meta_abstract: False to not check the abstract setting on a django model
:param top: true if this is the 'top call', ie non recursive call
:return: set of all subclasses
"""
kids = the_class.__subclasses__()
result = set(kids).union(
[s for c in kids for s in get_all_subclasses(c, check_meta_abstract, False)])
if top:
if check_meta_abstract:
non_abstract_result = []
for k in result:
if not k._meta.abstract:
non_abstract_result.append(k)
result = non_abstract_result
return sorted(result)
def build_relative_path(full_path, prefix='/', split_on='/data/'):
"""
Given a full file path on the hard drive return a relative path to the data directory
ie /full/path/to/data/my/file
:param full_path: The original full path to a file
:param prefix: The prefix of the result
:param split_on: The string in the path to split on, included in the result.
:return: the relative path, ie '/data/my/file
"""
splits = full_path.split(split_on)
return os.path.join(prefix, split_on, splits[-1])
def get_file_size(input_file):
"""
Return the file size in bytes from an open file
:param input_file:
:return:
"""
old_file_position = input_file.tell()
input_file.seek(0, os.SEEK_END)
size = input_file.tell()
input_file.seek(old_file_position, os.SEEK_SET)
return size
def persist_error(error, stacktrace=None):
cache = caches['default']
# Get current list of error keys or default to empty list
error_keys = cache.get('error_keys', [])
# Use the current process name as the key for this error
key = os.path.basename(sys.argv[0])
timestamp = time.time()
if isinstance(error, Exception):
error_string = str(error.__class__.__name__) + " (" + str(error) + ")"
else:
error_string = str(error)
value = {
'timestamp': int(timestamp),
'error': str(error_string),
'stacktrace': str(stacktrace),
}
# Store the error
cache.set(key, value)
# If this is a new error key, update the error keys to include it
if key not in error_keys:
error_keys.append(key)
cache.set('error_keys', error_keys)
def get_persisted_error_keys():
return caches['default'].get('error_keys', [])
def get_persisted_error(key):
return caches['default'].get(key)
def get_persisted_errors():
keys = get_persisted_error_keys()
errors = {}
for k in keys:
errors[k] = get_persisted_error(k)
return errors
def delete_persisted_error(key):
cache = caches['default']
cache.delete(key)
error_keys = cache.get('error_keys', [])
if key in error_keys:
error_keys.remove(key)
cache.set('error_keys', error_keys)
| 31.273196 | 118 | 0.670513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,294 | 0.378111 |
391022300e688e6c5ab4b77b3a3105685361a314 | 732 | py | Python | src/pycrunchbase/__init__.py | ngzhian/pycrunchbase | 4dbe65d6fc07ce89334b7bf142342b90f29df64b | [
"MIT"
] | 67 | 2015-02-15T03:02:00.000Z | 2021-07-04T02:12:29.000Z | src/pycrunchbase/__init__.py | ngzhian/pycrunchbase | 4dbe65d6fc07ce89334b7bf142342b90f29df64b | [
"MIT"
] | 29 | 2015-02-16T02:04:50.000Z | 2020-12-02T18:06:17.000Z | src/pycrunchbase/__init__.py | ngzhian/pycrunchbase | 4dbe65d6fc07ce89334b7bf142342b90f29df64b | [
"MIT"
] | 44 | 2015-02-26T05:43:10.000Z | 2020-12-02T02:11:39.000Z | from .pycrunchbase import (
CrunchBase,
)
from .resource import (
Acquisition,
Address,
Category,
Degree,
FundingRound,
Fund,
Image,
Investment,
IPO,
Job,
Location,
News,
Organization,
Page,
PageItem,
Person,
Product,
Relationship,
StockExchange,
Video,
Website,
)
__version__ = "0.3.9"
__all__ = [
'Acquisition',
'Address',
'Category',
'Degree',
'FundingRound',
'Fund',
'Image',
'Investment',
'IPO',
'Job',
'Location',
'News',
'Organization',
'Page',
'PageItem',
'Person',
'Product',
'Relationship',
'StockExchange',
'Video',
'Website',
'CrunchBase'
]
| 13.309091 | 27 | 0.534153 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 216 | 0.295082 |
39108aaa5f16646a3ed5c0e0afaec3e5dff388ad | 107 | py | Python | dropconnect_tensorflow/__init__.py | AryaAftab/dropconnect-tensorflow | 648db31e8d60b4de4bf6e37e5a18e2b220ac1616 | [
"MIT"
] | 2 | 2021-08-31T15:51:55.000Z | 2021-10-18T07:19:19.000Z | dropconnect_tensorflow/__init__.py | AryaAftab/dropconnect-tensorflow | 648db31e8d60b4de4bf6e37e5a18e2b220ac1616 | [
"MIT"
] | null | null | null | dropconnect_tensorflow/__init__.py | AryaAftab/dropconnect-tensorflow | 648db31e8d60b4de4bf6e37e5a18e2b220ac1616 | [
"MIT"
] | null | null | null | from dropconnect_tensorflow.dropconnect_tensorflow import DropConnectDense, DropConnectConv2D, DropConnect
| 53.5 | 106 | 0.915888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
391330adf74bdbec8290fd413925031e57ebc29a | 202 | py | Python | venv/Lib/site-packages/Database/es/es_utils.py | jhonniel/Queuing-python | 1b117dc7e4b3274b2f8fe72cce4beea363f563ef | [
"MIT"
] | null | null | null | venv/Lib/site-packages/Database/es/es_utils.py | jhonniel/Queuing-python | 1b117dc7e4b3274b2f8fe72cce4beea363f563ef | [
"MIT"
] | null | null | null | venv/Lib/site-packages/Database/es/es_utils.py | jhonniel/Queuing-python | 1b117dc7e4b3274b2f8fe72cce4beea363f563ef | [
"MIT"
] | null | null | null | def none_check(value):
if value is None:
return False
else:
return True
def is_empty(any_type_value):
if any_type_value:
return False
else:
return True
| 15.538462 | 29 | 0.60396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3915f2af8263e59a99c7a8c508220c1dd51ea0ed | 995 | py | Python | hstrat/test/test_helpers/test_is_nonincreasing.py | mmore500/hstrat | 7fedcf3a7203e1e6c99ac16f4ec43ad160da3e6c | [
"MIT"
] | null | null | null | hstrat/test/test_helpers/test_is_nonincreasing.py | mmore500/hstrat | 7fedcf3a7203e1e6c99ac16f4ec43ad160da3e6c | [
"MIT"
] | 3 | 2022-02-28T17:33:57.000Z | 2022-02-28T21:41:33.000Z | hstrat/test/test_helpers/test_is_nonincreasing.py | mmore500/hstrat | 7fedcf3a7203e1e6c99ac16f4ec43ad160da3e6c | [
"MIT"
] | null | null | null | import unittest
from hstrat.helpers import is_nonincreasing
class TestIsNondecreasing(unittest.TestCase):
# tests can run independently
_multiprocess_can_split_ = True
def test_empty(self):
assert is_nonincreasing([])
def test_singleton(self):
assert is_nonincreasing(['a'])
assert is_nonincreasing([0])
assert is_nonincreasing([1])
def test_nondecreasing(self):
assert is_nonincreasing(reversed([
*range(10),
]))
assert is_nonincreasing(reversed([
0,
*range(10),
]))
assert is_nonincreasing(reversed([
0,
0,
*range(10),
*range(9,18),
]))
def test_decreasing(self):
assert not is_nonincreasing([
-1,
0,
])
assert not is_nonincreasing(reversed([
*range(10),
*range(2),
]))
if __name__ == '__main__':
unittest.main()
| 22.111111 | 46 | 0.548744 | 884 | 0.888442 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.042211 |
391607816e23b937bc5c359e015ede93c028afbe | 8,984 | py | Python | HW1/HW1.py | hsuan81/2020spring_NTNU_IR | 72203bffd4595a6f435934bd80224d6726721223 | [
"MIT"
] | null | null | null | HW1/HW1.py | hsuan81/2020spring_NTNU_IR | 72203bffd4595a6f435934bd80224d6726721223 | [
"MIT"
] | null | null | null | HW1/HW1.py | hsuan81/2020spring_NTNU_IR | 72203bffd4595a6f435934bd80224d6726721223 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from docx import Document
from docx.shared import Cm
import math
def split_file(file):
"""split the file by different queries into seperate list element and return one list as a whole. """
answer = [[]]
j = 0
for i in file:
if i == "\n":
j += 1
if j == 16: # answer list only needs to add 15 additional empty sublists
pass
else:
answer.append([])
elif i[:3] == "VOM":
answer[j].append(i)
return answer
def calculate(retrieve, order, relevant_number):
"""Return recall and precision of each found relevant element."""
recall = round(retrieve / relevant_number, 4)
precision = round(retrieve / order, 4)
return recall, precision
def recall_interval(recall):
"""Return the interval of recall classified to 11 equal intervals of 10 (the field range is 0-100)"""
return ((recall*10)//1)*10
def compare_interpolate(recall_area):
"""
o the interpolation within each interval and beyond accoording to the function.
Return the lists of precisions and interpolated recalls.
"""
final_r = []
final_p = []
for i in range(100, -1, -10):
final_r.append(i)
if recall_area[i] != []:
# interpolate if the max precision is smaller than the larger interval
if i != 100 and recall_area[i][0][0]*100 < final_p[-1]:
final_p.append(final_p[-1])
else:
final_p.append(recall_area[i][0][0]*100)
# if no precision is in the interval, use the precision of larger interval
else:
final_p.append(final_p[-1])
return final_p, final_r
def mean_average_precision(answer_set, relevant_set):
"""calculte mean average precision by summing up all precision and
divide the sum by teh number of relevant elements."""
order = 0
retrieve = 0
sum = 0
relevant_number = len(relevant_set)
for i in range(len(answer_set)):
order += 1
for j in relevant_set:
if answer_set[i][:21] == j[:21]:
retrieve += 1
recall, precision = calculate(retrieve, order, relevant_number)
# r.append(recall)
# p.append(precision)
sum += precision
if retrieve > len(relevant_set):
break
# compute the mean average precision
mean_ap = sum/relevant_number
return mean_ap
def interpolate(answer_set, relevant_set):
order = 0
retrieve = 0
recall_area = {0:[], 10:[], 20:[], 30:[], 40:[], 50:[], 60:[], 70:[], 80:[], 90:[], 100:[]}
r = []
p = []
relevant_number = len(relevant_set)
for i in range(len(answer_set)):
order += 1
for j in relevant_set:
if answer_set[i][:21] == j[:21]:
retrieve += 1
recall, precision = calculate(retrieve, order, relevant_number)
recall_area[recall_interval(recall)].append((precision, recall))
r.append(recall)
p.append(precision)
if retrieve > len(relevant_set):
break
# interpolation of the precision
for i in recall_area.values():
i.sort(reverse = True)
final_p, final_r = compare_interpolate(recall_area)
final_r = []
final_p = []
for i in range(100, -1, -10):
final_r.append(i)
if recall_area[i] != []:
if i != 100 and recall_area[i][0][0]*100 < final_p[-1]:
# interpolate if the max precision is smaller than the larger interval
final_p.append(final_p[-1])
else:
final_p.append(recall_area[i][0][0]*100)
else:
# if no precision is in the interval, use the precision of larger interval
final_p.append(final_p[-1])
return final_r, final_p
with open('HW1_ResultsTrainSet.txt', 'r') as answer_set:
answer = split_file(answer_set)
with open('HW1_AssessmentTrainSet.txt', 'r') as relevant_set:
relevance = split_file(relevant_set)
total_precision = {x:[] for x in range(100, -1, -10)}
for i in range(16):
r, p = interpolate(answer[i], relevance[i])
for i in r:
total_precision[i].append(p[r.index(i)])
final_precision = []
final_recall = [x for x in range(100, -1, -10)]
for i in total_precision.values():
sum = 0
for j in i:
sum += j
result = sum/16
if final_precision != [] and result < final_precision[-1]:
# interpolate if the max precision is smaller than the larger interval
final_precision.append(final_precision[-1])
else:
final_precision.append(result)
plt.plot(final_recall, final_precision, marker = ".")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("Interpolated Recall-Precision Curve", loc = 'center')
#set x, y axis to fixed range
plt.axis([0,100,0,100])
plt.savefig("interpolate.png")
plt.close("interpolate.png")
plt.clf()
plt.cla()
# calculate map
total_ap = 0
for i in range(16):
mAP = mean_average_precision(answer[i], relevance[i])
total_ap += mAP
total_map = round(total_ap/16, 8)
map_answer = "mAP = " + str(total_map)
print(map_answer)
# calculate dcg
# score range is 0-3 and equally distributed among the relevant set
def assign_score(relevant_set):
"""Assign score to each relevant element in descending order and return the score list."""
section = len(relevance[0])//3
score = []
s = 3
for i in range(3):
if s == 1:
num = len(relevance[0]) - len(score)
score.extend([s]*num)
else:
score.extend([s]*section)
s -= 1
return score
def gain(answer_set, relevant_set, score_list):
"""Form a score list based on the answer set and return the rank list."""
rank_list = []
order_list = []
for i in range(len(answer_set)):
item = answer_set[i][:21] + "\n"
if item in relevant_set:
rank_list.append(score_list[relevant_set.index(item)])
order_list.append(item)
else:
rank_list.append(0)
order_list.append("no")
c = rank_list.count(0)
return rank_list
def cumulative_gain(rank_list):
"""Calculate the cumulative gain based on the rank list and return a list."""
cumulative_set = []
cumulative_set.append(rank_list[0])
for i in range(1, len(rank_list)):
cg = cumulative_set[i-1] + rank_list[i]
cumulative_set.append(cg)
return cumulative_set
def discounted_cumulative_gain(rank_list):
"""Calculate the discounted cumulative gain based on the input rank list and return a list."""
discounted_cg = []
discounted_cg.append(rank_list[0])
for i in range(1, len(rank_list)):
d = rank_list[i]/math.log2(i+1)
dcg = d + discounted_cg[i-1]
discounted_cg.append(dcg)
return discounted_cg
def ideal_dcg(score, answer_set_number):
"""Calculate the ideal discounted cumulative gain based on a descending rank list and return a list."""
ideal_set = score
added = answer_set_number - len(score)
ideal_set.extend([0]*added)
idgc = discounted_cumulative_gain(ideal_set)
return idgc
def normalized_dcg(query_number, answer_set, relevant_set, score, rank_list):
"""Calculate normalized discounted cumulative gain of various queries and return a list."""
total_dcg = []
total_idcg = []
for i in range(query_number):
dcg = discounted_cumulative_gain(rank_list)
total_dcg.append(dcg)
idcg = ideal_dcg(score, len(answer_set[i]))
total_idcg.append(idcg)
final_idcg = 0
final_dcg = 0
total_ndcg = []
for i in range(len(answer_set[0])):
for j in range(query_number):
final_idcg += total_idcg[j][i]
final_dcg += total_dcg[j][i]
ndcg = final_dcg / final_idcg
total_ndcg.append(ndcg)
return total_ndcg
score = assign_score(relevance[0])
rank = gain(answer[0], relevance[0], score)
cg = cumulative_gain(rank)
discounted_cumulative_gain(rank)
ndcg = normalized_dcg(16, answer, relevance, score, rank)
plt.plot(ndcg, 'g')
plt.xlabel("Answer Set")
plt.title("Normalized Discounted Cumulated Gain", loc = 'center')
plt.axis([0, 2500, 0 , 1])
plt.savefig("NDCG.png")
# combine graph and answer into one document
document = Document()
document.add_heading('Information Retrieval HW1', 0)
p1 = document.add_paragraph('Interpolated precision recall curve', style = 'List Number')
document.add_picture('interpolate.png', width=Cm(12))
p2 = document.add_paragraph('Mean average precision\n', style = 'List Number')
p2.add_run(map_answer)
p3 = document.add_paragraph('Normalized discounted cumulated gain', style = 'List Number')
document.add_picture('NDCG.png', width=Cm(12))
document.save('90899703Y_HW1.docx')
| 31.412587 | 107 | 0.628896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,241 | 0.249443 |
3916b6e22cde8396683b52bfab98fb85a404ecf8 | 2,745 | py | Python | flowder/utils.py | amir-khakshour/flowder | d4560b2faea300bb2a4815f13815085215ef7019 | [
"MIT"
] | 3 | 2016-06-20T18:33:18.000Z | 2019-11-10T11:52:27.000Z | flowder/utils.py | amir-khakshour/flowder | d4560b2faea300bb2a4815f13815085215ef7019 | [
"MIT"
] | null | null | null | flowder/utils.py | amir-khakshour/flowder | d4560b2faea300bb2a4815f13815085215ef7019 | [
"MIT"
] | null | null | null | import csv
import re
import netifaces as ni
from twisted.internet import defer
from twisted.names import client
from pygear.logging import log
from pygear.core.six.moves.urllib.parse import urlparse, urljoin
from .interfaces import ITaskStorage
csv.register_dialect('pipes', delimiter='|')
client_callback_schemes = ['http', 'https']
default_scheme = 'http'
client_scheme_re = re.compile(r'^(%s)' % '|'.join(client_callback_schemes))
ip_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
ip_scheme_re = re.compile(r"^(%s)://(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" % '|'.join(client_callback_schemes))
def get_tasks_db(config, app):
return app.getComponent(ITaskStorage)
def prepare_url(url):
if not client_scheme_re.match(url):
url = default_scheme + url
return url
def get_interface_ip(eth):
eth = ni.ifaddresses(eth)
return eth[2][0]['addr']
@defer.inlineCallbacks
def parse_clients_list(file_path):
trusted_clients = None
# @TODO create a service to read trusted clients from DB
try:
trusted_clients = open(file_path, 'r').readlines()
trusted_clients = map(lambda c: c.replace('\n', ''), trusted_clients)
except IOError:
_clients = []
log.warn("Trusted clinets list not found.")
clients_list = {}
if trusted_clients:
for row in csv.reader(trusted_clients, dialect='pipes', quotechar='!'):
_host, _user, _pass = row
if ip_re.match(_host):
_ip = _host
else:
_host = prepare_url(_host)
parsed_url = urlparse(_host)
_ip = yield client.getHostByName(parsed_url.netloc)
clients_list[_ip] = {'host': _host, 'user': _user, 'pass': _pass}
defer.returnValue(clients_list)
def get_callback_auth_details(url, trusted_clients):
match = ip_scheme_re.match(url)
if not match or len(match.groups()) < 2:
ip = client.getHostByName(url)
else:
scheme, ip = match.groups()
for client_ip, details in trusted_clients.iteritems():
if ip == client_ip:
return details['user'], details['pass']
return None
def get_serve_uri(config):
rest_port = config.getint('rest_port', 4000)
eth = config.get('eth', 'eth1') # private IP
rest_host = 'http://%s' % (get_interface_ip(eth))
files_static_serve_path = config.get('static_serve_path', 'files')
if rest_host.endswith('/'):
rest_host = rest_host[:-1]
base_url = '{0}:{1}/'.format(rest_host, rest_port)
if not files_static_serve_path.endswith('/'):
files_static_serve_path += '/'
return urljoin(base_url, files_static_serve_path)
def get_file_path(filename, base_path):
pass | 29.202128 | 110 | 0.651002 | 0 | 0 | 883 | 0.321676 | 906 | 0.330055 | 0 | 0 | 363 | 0.13224 |
3916c5cd7ecc9f09a6d7a100bb034bad85ff33d8 | 6,437 | py | Python | neuralparticles/scripts/hyper_search.py | senliontec/NeuralParticles | 8ede22bfb43e60be175b9cef19045c1c7b1ffb73 | [
"MIT"
] | null | null | null | neuralparticles/scripts/hyper_search.py | senliontec/NeuralParticles | 8ede22bfb43e60be175b9cef19045c1c7b1ffb73 | [
"MIT"
] | null | null | null | neuralparticles/scripts/hyper_search.py | senliontec/NeuralParticles | 8ede22bfb43e60be175b9cef19045c1c7b1ffb73 | [
"MIT"
] | null | null | null | import os
import json
import math
from neuralparticles.tensorflow.tools.hyper_parameter import HyperParameter, ValueType, SearchType
from neuralparticles.tensorflow.tools.hyper_search import HyperSearch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import keras
from neuralparticles.tensorflow.models.PUNet import PUNet
from neuralparticles.tools.param_helpers import *
from neuralparticles.tools.data_helpers import load_patches_from_file, PatchExtractor, get_data_pair, extract_particles
from neuralparticles.tensorflow.tools.eval_helpers import EvalCallback, EvalCompleteCallback
import numpy as np
data_path = getParam("data", "data/")
config_path = getParam("config", "config/version_00.txt")
verbose = int(getParam("verbose", 0)) != 0
gpu = getParam("gpu", "")
epochs = int(getParam("epochs", 0))
eval_cnt = int(getParam("eval_cnt", 5))
eval_dataset = getParam("eval_d", []) #'18,18,18,19,19'
eval_t = getParam("eval_t", []) #'5,5,6,6,7'
eval_var = getParam("eval_v", []) #'0,0,0,0,0'
eval_patch_idx = getParam("eval_i", []) #'11,77,16,21,45'
if len(eval_dataset) > 0:
eval_dataset = list(map(int, eval_dataset.split(',')))
if len(eval_t) > 0:
eval_t = list(map(int, eval_t.split(',')))
if len(eval_var) > 0:
eval_var = list(map(int, eval_var.split(',')))
if len(eval_patch_idx) > 0:
eval_patch_idx = list(map(float, eval_patch_idx.split(',')))
i=0
hyper_teams = []
while(True):
hyper_par = getParam("hyper%d"%i, None)
i += 1
if hyper_par is None:
break
else:
hyper_teams.append(HyperParameter.parse(hyper_par))
checkUnusedParams()
src_path = data_path + "patches/source/"
ref_path = data_path + "patches/reference/"
model_path = data_path + "models/"
if not os.path.exists(model_path):
os.mkdir(model_path)
tmp_folder = backupSources(data_path)
tmp_model_path = tmp_folder + "models/"
os.mkdir(tmp_model_path)
tmp_eval_path = tmp_folder + "eval/"
os.mkdir(tmp_eval_path)
if not gpu is "":
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
with open(config_path, 'r') as f:
config = json.loads(f.read())
with open(os.path.dirname(config_path) + '/' + config['data'], 'r') as f:
data_config = json.loads(f.read())
with open(os.path.dirname(config_path) + '/' + config['preprocess'], 'r') as f:
pre_config = json.loads(f.read())
with open(os.path.dirname(config_path) + '/' + config['train'], 'r') as f:
train_config = json.loads(f.read())
if verbose:
print("Config Loaded:")
print(config)
print(data_config)
print(pre_config)
print(train_config)
# copy config files into tmp
np.random.seed(data_config['seed'])
#tf.set_random_seed(data_config['seed'])
if epochs == 0:
epochs = train_config['epochs']
config_dict = {**data_config, **pre_config, **train_config}
punet = PUNet(**config_dict)
if len(eval_dataset) < eval_cnt:
eval_dataset.extend(np.random.randint(int(data_config['data_count'] * train_config['train_split']), data_config['data_count'], eval_cnt-len(eval_dataset)))
if len(eval_t) < eval_cnt:
t_start = min(train_config['t_start'], data_config['frame_count']-1)
t_end = min(train_config['t_end'], data_config['frame_count'])
eval_t.extend(np.random.randint(t_start, t_end, eval_cnt-len(eval_t)))
if len(eval_var) < eval_cnt:
eval_var.extend([0]*(eval_cnt-len(eval_var)))
if len(eval_patch_idx) < eval_cnt:
eval_patch_idx.extend(np.random.random(eval_cnt-len(eval_patch_idx)))
tmp_model_path = '%s%s_%s' % (tmp_model_path, data_config['prefix'], config['id'])
fig_path = '%s_loss' % tmp_model_path
src_path = "%s%s_%s-%s" % (src_path, data_config['prefix'], data_config['id'], pre_config['id']) + "_d%03d_var%02d_pvar%02d_%03d"
ref_path = "%s%s_%s-%s" % (ref_path, data_config['prefix'], data_config['id'], pre_config['id']) + "_d%03d_var%02d_pvar%02d_%03d"
print(src_path)
print(ref_path)
print("Load Training Data")
src_data, ref_data = load_patches_from_file(data_path, config_path)
idx = np.arange(src_data[0].shape[0])
np.random.shuffle(idx)
src_data = [s[idx] for s in src_data]
ref_data = ref_data[idx]
print("Load Eval Data")
factor_d = math.pow(pre_config['factor'], 1/data_config['dim'])
patch_size = pre_config['patch_size'] * data_config['res'] / factor_d
patch_size_ref = pre_config['patch_size_ref'] * data_config['res']
eval_patch_extractors = []
eval_ref_datas = []
eval_src_patches = []
eval_ref_patches = []
for i in range(len(eval_dataset)):
(eval_src_data, eval_sdf_data, eval_par_aux), (eval_ref_data, eval_ref_sdf_data) = get_data_pair(data_path, config_path, eval_dataset[i], eval_t[i], eval_var[i])
eval_ref_datas.append(eval_ref_data)
np.random.seed(100)
eval_patch_extractors.append(PatchExtractor(eval_src_data, eval_sdf_data, patch_size, pre_config['par_cnt'], pre_config['surf'], pre_config['stride'], aux_data=eval_par_aux, features=train_config['features'], pad_val=pre_config['pad_val'], bnd=data_config['bnd']/factor_d))
p_idx = int(eval_patch_idx[i] * len(eval_patch_extractors[i].positions))
eval_src_patches.append(eval_patch_extractors[i].get_patch(p_idx,False))
eval_ref_patches.append(extract_particles(eval_ref_data, eval_patch_extractors[i].positions[p_idx] * factor_d, pre_config['par_cnt_ref'], patch_size_ref/2, pre_config['pad_val'])[0])
print("Eval with dataset %d, timestep %d, var %d, patch idx %d" % (eval_dataset[i], eval_t[i], eval_var[i], p_idx))
print("Eval trunc src: %d" % (np.count_nonzero(eval_src_patches[i][0][:,:,:1] != pre_config['pad_val'])))
print("Eval trunc ref: %d" % (np.count_nonzero(eval_ref_patches[i][:,:1] != pre_config['pad_val'])))
config_dict['src'] = src_data
config_dict['ref'] = ref_data
config_dict['callbacks'] = [(EvalCallback(tmp_eval_path + "eval_patch", eval_src_patches, eval_ref_patches,
train_config['features'], multiple_runs=True, z=None if data_config['dim'] == 2 else 0, verbose=1)),
(EvalCompleteCallback(tmp_eval_path + "eval", eval_patch_extractors, eval_ref_datas,
factor_d, data_config['res'], multiple_runs=True, z=None if data_config['dim'] == 2 else data_config['res']//2, verbose=1))]
hs = HyperSearch(punet, hyper_teams, output_folder=tmp_folder)
del config_dict['epochs']
history = hs.search(epochs, **config_dict)
keras.utils.plot_model(punet.model, tmp_model_path + '.pdf')
| 38.088757 | 277 | 0.71291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,003 | 0.155818 |
3919d5f72e3810fe88a92ba0a040bb6448088b25 | 413 | py | Python | app.py | ranjankoirala1998/sms-with-twilio-api | c3fd8b002b47eafc4e6092078a74e294941e87e8 | [
"MIT"
] | null | null | null | app.py | ranjankoirala1998/sms-with-twilio-api | c3fd8b002b47eafc4e6092078a74e294941e87e8 | [
"MIT"
] | null | null | null | app.py | ranjankoirala1998/sms-with-twilio-api | c3fd8b002b47eafc4e6092078a74e294941e87e8 | [
"MIT"
] | null | null | null | from twilio.rest import Client
from scrapper import get_body
from os import environ
account_sid = environ['ACCOUNT_SID']
auth_token = environ['AUTH_TOKEN']
phone_num = '+9779862074364'
def send_sms():
client = Client(account_sid, auth_token)
sms = client.messages.create(
from_= '+17725772292',
body = get_body(),
to = phone_num
)
print("Executed Successfully!!") | 18.772727 | 44 | 0.677966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.193705 |
391a5ba570f3e424763ecfe5d6242c832be51961 | 575 | py | Python | app/jwt.py | smolveau/Simple-Flask-Web-App-CI-CD | 99d0459cbcbbc8726968d7d191226fbdab46445e | [
"MIT"
] | null | null | null | app/jwt.py | smolveau/Simple-Flask-Web-App-CI-CD | 99d0459cbcbbc8726968d7d191226fbdab46445e | [
"MIT"
] | null | null | null | app/jwt.py | smolveau/Simple-Flask-Web-App-CI-CD | 99d0459cbcbbc8726968d7d191226fbdab46445e | [
"MIT"
] | null | null | null | # app/jwt.py
from os import environ as env
from itsdangerous import (
TimedJSONWebSignatureSerializer as Serializer,
BadSignature,
SignatureExpired,
)
def generate_jwt(claims, expiration=172800):
s = Serializer(env.get("SECRET_KEY"), expires_in=expiration)
return s.dumps(claims).decode("utf-8")
def load_jwt(token):
s = Serializer(env.get("SECRET_KEY"))
try:
data = s.loads(token)
except SignatureExpired as err:
raise Exception(str(err))
except BadSignature as err:
raise Exception(str(err))
return data
| 23 | 64 | 0.692174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.074783 |
391b5631270f881e44d8df135add12b1dee89004 | 1,918 | py | Python | src/mblib/httpclient.py | odra/mbctl | 39f40842c9e62bc59162a14ea13e17b212967067 | [
"MIT"
] | null | null | null | src/mblib/httpclient.py | odra/mbctl | 39f40842c9e62bc59162a14ea13e17b212967067 | [
"MIT"
] | null | null | null | src/mblib/httpclient.py | odra/mbctl | 39f40842c9e62bc59162a14ea13e17b212967067 | [
"MIT"
] | null | null | null | """
HTTP client module.
"""
import requests
from requests.auth import HTTPBasicAuth
from requests_kerberos import HTTPKerberosAuth
from . import errors
class KRBAuth:
"""
Kerberos authenticaton type.
"""
principal = None
hostname_override = None
def __init__(self, principal=None, hostname_override=None):
self.principal = principal
self.hostname_override = hostname_override
def auth(self):
params = {}
if self.principal:
params['principal'] = self.principal
if self.hostname_override:
params['hostname_override'] = self.hostname_override
return HTTPKerberosAuth(**params)
class BasicAuth:
username = ''
password = ''
def __init__(self, usernae, password):
self.username = username
self.password = password
def auth(self):
return HTTPBasicAuth(self.username, self.password)
class NoAuth:
"""
No authentication type.
"""
def auth(self):
"""
This method does nothing, just a place holder for the
"authentication interface".
"""
return None
class Client:
"""
A simple HTTP client to be used within the CLI code.
"""
base_url = None
auth = None
def __init__(self, base_url, auth=NoAuth()):
"""
Initializes the object with a base url and authentication type.
Auth type can be 'basic' or 'krb' and defaults to None
if no value is provided.
"""
self.auth = auth
if base_url:
self.base_url = base_url
def request(self, path, method='GET', data=None, headers=None):
"""
Execute a request based on method parameters.
Return a tuple containing the status_code and text output.
"""
url = f'{self.base_url}{path}'
try:
res = requests.request(method, url, data=data, headers=headers, auth=self.auth.auth())
except requests.exceptions.RequestException as e:
raise errors.MBError(str(e))
return (res.status_code, res.text)
| 21.795455 | 92 | 0.679353 | 1,753 | 0.913973 | 0 | 0 | 0 | 0 | 0 | 0 | 623 | 0.324818 |
391b9e2db43316337b1ceff240764f306d2bb877 | 532 | py | Python | students/K33422/Izmaylova_Anna/web_lab2/tours2/tours_app/view_create_user.py | Anna0102/ITMO_ICT_WebDevelopment_2021-2022 | 1a361329eabccefa5bd9f3d22e1b5dbdb950c85e | [
"MIT"
] | null | null | null | students/K33422/Izmaylova_Anna/web_lab2/tours2/tours_app/view_create_user.py | Anna0102/ITMO_ICT_WebDevelopment_2021-2022 | 1a361329eabccefa5bd9f3d22e1b5dbdb950c85e | [
"MIT"
] | null | null | null | students/K33422/Izmaylova_Anna/web_lab2/tours2/tours_app/view_create_user.py | Anna0102/ITMO_ICT_WebDevelopment_2021-2022 | 1a361329eabccefa5bd9f3d22e1b5dbdb950c85e | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.generic.edit import CreateView
from .models import Users
from .forms import UserForm
from django.contrib.auth.views import LoginView
# представление для создания пользователя
class UserCreateView(CreateView):
form_class = UserForm
success_url = '/users_create/' # куда перейдет сайт, когда мы успешно зарегистируемся
template_name = 'users_create_form.html'
# представление для входа на сайт
class UserLogin(LoginView):
template_name = "registration/login.html" | 33.25 | 88 | 0.802632 | 309 | 0.482813 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.471875 |
391d44965c14b96eb455acf08b6102f375e6e9ba | 5,105 | py | Python | mtp_api/apps/prison/tests/test_utils.py | ministryofjustice/mtp-api | b1c34c29e4aa9f48598cb060abe1368ae7686e0b | [
"MIT"
] | 5 | 2016-01-05T12:21:35.000Z | 2020-10-28T17:06:02.000Z | mtp_api/apps/prison/tests/test_utils.py | ministryofjustice/mtp-api | b1c34c29e4aa9f48598cb060abe1368ae7686e0b | [
"MIT"
] | 209 | 2015-06-12T09:39:41.000Z | 2022-03-21T16:01:19.000Z | mtp_api/apps/prison/tests/test_utils.py | ministryofjustice/mtp-api | b1c34c29e4aa9f48598cb060abe1368ae7686e0b | [
"MIT"
] | 1 | 2021-04-11T06:19:23.000Z | 2021-04-11T06:19:23.000Z | from collections import defaultdict
from copy import copy
import re
import json
from django.conf import settings
from django.test import override_settings, TestCase
import responses
from prison.models import PrisonerLocation
from prison.tests.utils import (
load_prisoner_locations_from_dev_prison_api,
random_prisoner_number,
random_prisoner_name,
random_prisoner_dob,
)
class LoadPrisonerLocationsFromDevPrisonAPITestCase(TestCase):
fixtures = [
'initial_types.json',
'initial_groups.json',
'dev_prison_api_prisons.json',
]
def setUp(self):
self.prison_ids = [
'BWI', # HMP Berwyn
'NMI', # HMP Nottingham
'WLI', # HMP Wayland
]
prisoners_per_prison = {
'BWI': 1,
'NMI': 5,
'WLI': 2,
}
# Dictionaries with data returned by mocked API
self.api_live_roll = defaultdict(list)
self.api_offenders_info = {}
# Location of each test prisoner
self.prisoner_location = {}
for prison_id, n_prisoners in prisoners_per_prison.items():
for _ in range(n_prisoners):
prisoner_id = random_prisoner_number()
prisoner_info = self.random_prisoner()
self.api_live_roll[prison_id].append(prisoner_id)
self.api_offenders_info[prisoner_id] = prisoner_info
self.prisoner_location[prisoner_id] = prison_id
self.n_prisoners_desired = 5
# 1 prisoner from BWI
self.expected_prisoner_ids = self.api_live_roll['BWI']
# first 2 prisoners from NMI
prisoners = copy(self.api_live_roll['NMI'])
prisoners.sort()
self.expected_prisoner_ids = self.expected_prisoner_ids + prisoners[:2]
# another 2 prisoners from WLI
self.expected_prisoner_ids = self.expected_prisoner_ids + self.api_live_roll['WLI']
def random_prisoner(self):
full_name = random_prisoner_name()
first_name = full_name.split(' ')[0]
last_name = full_name.split(' ')[1]
return {
'given_name': first_name,
'middle_names': '',
'surname': last_name,
'date_of_birth': str(random_prisoner_dob()),
# HMPPS Prison API returns more information not included here
}
def get_live_roll_callback(self, request):
# Mock for `GET prison/PRISON_ID/live_roll`
prison_id = request.path_url.split('/')[-2]
live_roll = {
'noms_ids': self.api_live_roll[prison_id],
}
return (200, {}, json.dumps(live_roll))
def get_offender_info_callback(self, request):
# Mock for `GET offenders/PRISONER_ID`
prisoner_id = request.path_url.split('/')[-1]
prisoner_info = self.api_offenders_info[prisoner_id]
return (200, {}, json.dumps(prisoner_info))
@override_settings(
HMPPS_CLIENT_SECRET='test-secret',
HMPPS_AUTH_BASE_URL='https://sign-in-dev.hmpps.local/auth/',
HMPPS_PRISON_API_BASE_URL='https://api-dev.prison.local/',
)
def test_load_prisoner_locations_from_dev_prison_api(self):
n_prisoner_locations_before = PrisonerLocation.objects.count()
with responses.RequestsMock() as rsps:
rsps.add(
responses.POST,
f'{settings.HMPPS_AUTH_BASE_URL}oauth/token',
json={
'access_token': 'amanaccesstoken',
'expires_in': 3600,
},
)
rsps.add_callback(
responses.GET,
re.compile(f'{settings.HMPPS_PRISON_API_BASE_URL}api/v1/prison/[A-Z]+/live_roll'),
callback=self.get_live_roll_callback,
)
rsps.add_callback(
responses.GET,
re.compile(f'{settings.HMPPS_PRISON_API_BASE_URL}api/v1/offenders/*'),
callback=self.get_offender_info_callback,
)
load_prisoner_locations_from_dev_prison_api(self.n_prisoners_desired)
n_prisoner_locations_after = PrisonerLocation.objects.count()
n_prisoner_locations_created = n_prisoner_locations_after - n_prisoner_locations_before
self.assertEqual(self.n_prisoners_desired, n_prisoner_locations_created)
for prisoner_id in self.expected_prisoner_ids:
prisoner_info = self.api_offenders_info[prisoner_id]
prison_id = self.prisoner_location[prisoner_id]
location = PrisonerLocation.objects.filter(
prisoner_number=prisoner_id,
prison_id=prison_id,
)
self.assertTrue(location.exists())
location = location.first()
self.assertEqual(location.prisoner_number, prisoner_id)
expected_name = prisoner_info['given_name'] + ' ' + prisoner_info['surname']
self.assertEqual(location.prisoner_name, expected_name)
self.assertEqual(str(location.prisoner_dob), prisoner_info['date_of_birth'])
| 35.950704 | 98 | 0.632713 | 4,712 | 0.923017 | 0 | 0 | 2,173 | 0.425661 | 0 | 0 | 865 | 0.169442 |
391e24e69f46da70a4ab835a6e27d5809e258db2 | 7,885 | py | Python | metrics/__init__.py | MauTrib/gnn-en-folie | 3ca639919a2b285a41641717f4131107c015b510 | [
"Apache-2.0"
] | null | null | null | metrics/__init__.py | MauTrib/gnn-en-folie | 3ca639919a2b285a41641717f4131107c015b510 | [
"Apache-2.0"
] | null | null | null | metrics/__init__.py | MauTrib/gnn-en-folie | 3ca639919a2b285a41641717f4131107c015b510 | [
"Apache-2.0"
] | null | null | null | from metrics.preprocess import edgefeat_converter, fulledge_converter, node_converter
from metrics.common import fulledge_compute_f1, edgefeat_compute_f1, node_compute_f1, node_total
from metrics.mcp import fulledge_total as mcp_fulledge_total, edgefeat_total as mcp_edgefeat_total
from metrics.tsp import tsp_edgefeat_converter_sparsify, tsp_fulledge_compute_f1
from models.base_model import GNN_Abstract_Base_Class
EMBED_TYPES = {
'rsnode': 'node',
'rsedge': 'edge'
}
MCP_VARIANTS = ('mcp', 'mcphard', 'mcptrue', 'mcptruehard')
def get_trainval_fulledge_metric(problem):
if problem=='tsp':
return tsp_fulledge_compute_f1
elif problem=='mcp':
return fulledge_compute_f1
elif problem=='sbm':
return fulledge_compute_f1
elif problem=='hhc':
return fulledge_compute_f1
else:
raise NotImplementedError(f"Train/val metric for fulledge problem {problem} has not been implemented.")
def get_test_fulledge_metric(problem):
if problem in MCP_VARIANTS:
return fulledge_compute_f1
else:
raise NotImplementedError(f"Test metric for fulledge problem {problem} has not been implemented.")
def get_trainval_edgefeat_metric(problem):
if problem=='tsp':
return edgefeat_compute_f1
elif problem=='mcp':
return edgefeat_compute_f1
elif problem=='hhc':
return edgefeat_compute_f1
elif problem=='sbm':
return edgefeat_compute_f1
else:
raise NotImplementedError(f"Train/val metric for edge problem {problem} has not been implemented.")
def get_test_edgefeat_metric(problem):
if problem in MCP_VARIANTS:
return edgefeat_compute_f1
else:
raise NotImplementedError(f"Test metric for edge problem {problem} has not been implemented.")
def get_trainval_node_metric(problem):
if problem in MCP_VARIANTS:
return node_compute_f1
elif problem == 'sbm':
return node_compute_f1
raise NotImplementedError(f"Train/val metric for node problem {problem} has not been implemented.")
def get_test_node_metric(problem):
if problem in MCP_VARIANTS:
return node_compute_f1
elif problem == 'sbm':
return node_compute_f1
raise NotImplementedError(f"Test metric for node problem {problem} has not been implemented.")
def get_trainval_metric(eval, problem):
if eval=='edge':
eval_fn = get_trainval_edgefeat_metric(problem)
elif eval=='fulledge':
eval_fn = get_trainval_fulledge_metric(problem)
elif eval=='node':
eval_fn = get_trainval_node_metric(problem)
else:
raise NotImplementedError(f"Eval method {eval} not implemented")
return eval_fn
def get_test_metric(eval, problem):
if eval=='edge':
eval_fn = get_test_edgefeat_metric(problem)
elif eval=='fulledge':
eval_fn = get_test_fulledge_metric(problem)
elif eval=='node':
eval_fn = get_test_node_metric(problem)
else:
raise NotImplementedError(f"Eval method {eval} not implemented")
return eval_fn
def get_preprocessing(embed, eval, problem):
if embed=='edge':
if eval=='edge':
if problem=='tsp':
return tsp_edgefeat_converter_sparsify
elif problem in (MCP_VARIANTS + ('sbm', 'hhc')):
return edgefeat_converter
else:
raise NotImplementedError(f"Preprocessing for {embed=}, {eval=}, {problem=} not implemented")
elif eval=='fulledge':
if problem in (MCP_VARIANTS + ('sbm','tsp')):
return fulledge_converter
else:
raise NotImplementedError(f"Preprocessing for {embed=}, {eval=}, {problem=} not implemented")
else:
raise NotImplementedError(f"Unknown eval '{eval}' for embedding type 'edge'.")
elif embed=='node':
if eval=='node':
if problem in (MCP_VARIANTS + ('sbm', 'hhc')):
return node_converter
else:
raise NotImplementedError(f"Preprocessing for {embed=}, {eval=}, {problem=} not implemented")
else:
raise NotImplementedError(f"Unknown eval '{eval}' for embedding type 'edge'.")
else:
raise NotImplementedError(f"Embed {embed} not implemented.")
def get_preprocess_additional_args(problem: str, config: dict):
if problem=='tsp':
return {'sparsify': config['data']['train']['sparsify']}
return {}
def assemble_metric_function(preprocess_function, eval_function, preprocess_additional_args=None):
if preprocess_additional_args is None:
preprocess_additional_args = {}
def final_function(raw_scores, target, **kwargs):
l_inferred, l_targets, l_adjacency = preprocess_function(raw_scores, target, **kwargs, **preprocess_additional_args)
try: #We try to add the list of adjacencies to the eval function
result = eval_function(l_inferred, l_targets, l_adjacency)
except TypeError as type_error:
str_error = " ".join(str(type_error).split(' ')[1:])
if str_error=="takes 2 positional arguments but 3 were given": #The eval function doesn't handle the adjacencies (OLD functions)
result = eval_function(l_inferred, l_targets)
else: #In case it's another error, raise it
raise type_error
return result
return final_function
def setup_trainval_metric(pl_model: GNN_Abstract_Base_Class, config: dict, soft=True)-> None:
problem = config['problem']
embed = config['arch']['embedding']
embed = EMBED_TYPES.get(embed, embed)
eval = config['arch']['eval']
try:
preprocess_function = get_preprocessing(embed, eval, problem)
eval_fn = get_trainval_metric(eval, problem)
preprocess_additional_args = get_preprocess_additional_args(problem, config)
metric_fn = assemble_metric_function(preprocess_function=preprocess_function, eval_function=eval_fn, preprocess_additional_args=preprocess_additional_args)
pl_model.attach_metric_function(metric_fn, start_using_metric=True)
except NotImplementedError as ne:
if not soft:
raise ne
print(f"There was a problem with the train_val setup metric. I'll let it go anyways, but additional metrics won't be saved. Error stated is: {ne}")
def setup_test_metric(pl_model: GNN_Abstract_Base_Class, config: dict)-> None:
problem = config['problem']
embed = config['arch']['embedding']
embed = EMBED_TYPES.get(embed, embed)
eval = config['arch']['eval']
preprocess_function = get_preprocessing(embed, eval, problem)
eval_fn = get_test_metric(eval, problem)
preprocess_additional_args = get_preprocess_additional_args(problem, config)
metric_fn = assemble_metric_function(preprocess_function=preprocess_function, eval_function=eval_fn, preprocess_additional_args=preprocess_additional_args)
pl_model.attach_metric_function(metric_fn, start_using_metric=True)
def setup_metric(pl_model: GNN_Abstract_Base_Class, config: dict, soft=False, istest=False) -> None:
"""
Attaches a metric to the Pytorch Lightning model. This metric can be different in train_val and test cases.
If the metric in test hasn't been implemented, it will try to use the train_val one.
- pl_model : Pytorch Lightning model, child of GNN_Abstract_Base_Class
- config : Config with all the parameters configured as in the file 'default_config.yaml'
- soft : if set to False, will raise an error if the train_val metric hasn't been implemented. If True, will let it pass with a warning
"""
if istest:
try:
setup_test_metric(pl_model, config)
return None
except NotImplementedError:
print('Test metric not found, using train_val metric...')
setup_trainval_metric(pl_model, config, soft)
| 44.548023 | 163 | 0.701966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,107 | 0.267216 |
391e4057449370d286751ffea0205e08d3058736 | 1,538 | py | Python | route_builder/graph.py | vadimkholodilo/routes-generator | 0bf19bce55ecfc2974020fbb8bc51d55bb73fd1a | [
"MIT"
] | null | null | null | route_builder/graph.py | vadimkholodilo/routes-generator | 0bf19bce55ecfc2974020fbb8bc51d55bb73fd1a | [
"MIT"
] | null | null | null | route_builder/graph.py | vadimkholodilo/routes-generator | 0bf19bce55ecfc2974020fbb8bc51d55bb73fd1a | [
"MIT"
] | null | null | null | """
Copyright 2020 Vadim Kholodilo <vadimkholodilo@gmail.com>, Iulia Durova <yulianna199820@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# Class to represent a graph
# This class must have the following interface:
# Constructor creates an empty graph, no parameters.
# addVertex(value) adds vertex to the graph
# addEdge(v1, v2, weight) adds edge from v1 to v2 with a certain weight
# getWeight(v1, v2) returns weight of an edge between v1 and v2, if the edge does not exist returns -1
# Dijkstra algorithm will be added in future
| 102.533333 | 461 | 0.786736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,527 | 0.992848 |
391f566a4819260dcb8114df4800170f63917127 | 128 | py | Python | grpc/clients/python/vegaapiclient/generated/wallet/v1/__init__.py | legg/api | a818834f8a935b802af3b01b4237e64ed41ab3f2 | [
"MIT"
] | 6 | 2021-05-20T15:30:46.000Z | 2022-02-22T12:06:39.000Z | grpc/clients/python/vegaapiclient/generated/wallet/v1/__init__.py | legg/api | a818834f8a935b802af3b01b4237e64ed41ab3f2 | [
"MIT"
] | 29 | 2021-03-16T11:58:05.000Z | 2021-10-05T14:04:45.000Z | vegaapiclient/generated/vega/wallet/v1/__init__.py | vegaprotocol/sdk-python | 2491f62704afd806a47cb8467a7edf0dd65bbf1b | [
"MIT"
] | 6 | 2021-05-07T06:43:02.000Z | 2022-03-29T07:18:01.000Z | from . import wallet_pb2_grpc as wallet_grpc
from . import wallet_pb2 as wallet
__all__ = [
"wallet_grpc",
"wallet",
]
| 16 | 44 | 0.703125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.164063 |
391fd257874f9d7d38c999096e0d1db1bb5907fe | 678 | py | Python | noisysystem_temp/CutoffPhi.py | Tom271/InteractingParticleSystems | 1cfc8b228077c2465e71d82cc288d713d3755392 | [
"MIT"
] | 1 | 2019-10-22T19:48:22.000Z | 2019-10-22T19:48:22.000Z | noisysystem_temp/CutoffPhi.py | Tom271/InteractingParticleSystems | 1cfc8b228077c2465e71d82cc288d713d3755392 | [
"MIT"
] | 1 | 2019-10-22T21:32:19.000Z | 2019-10-22T21:32:19.000Z | noisysystem_temp/CutoffPhi.py | Tom271/InteractingParticleSystems | 1cfc8b228077c2465e71d82cc288d713d3755392 | [
"MIT"
] | 1 | 2019-10-22T19:49:38.000Z | 2019-10-22T19:49:38.000Z | import particle.processing as processing
particles = 480
test_params = {
"particle_count": 2 * [particles], # (3 * np.arange(8, 150, 16)).tolist(),
"gamma": [0.05],
"G": ["Smooth"],
"scaling": ["Local"],
"D": [1.0],
"phi": ["Gamma"],
"initial_dist_x": [
"one_cluster",
"two_clusters",
"three_clusters",
"four_clusters",
],
"initial_dist_v": ["pos_normal_dn", "pos_const_near_0"],
"T_end": [20.0],
"dt": [0.01],
"option": ["numba"],
}
history = processing.get_master_yaml(yaml_path="experiments_ran")
fn = "cutoff_phi_test"
processing.run_experiment(test_params, history, experiment_name=fn)
| 22.6 | 79 | 0.597345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 287 | 0.423304 |
3922a73eca65de4ee19b6e71f0f70623869553df | 238 | py | Python | examples/client-context/client.py | barberj/bridge-python | 1c33df5fa1d92ac6c54bbb6d868c71e1f883e8fe | [
"MIT"
] | null | null | null | examples/client-context/client.py | barberj/bridge-python | 1c33df5fa1d92ac6c54bbb6d868c71e1f883e8fe | [
"MIT"
] | null | null | null | examples/client-context/client.py | barberj/bridge-python | 1c33df5fa1d92ac6c54bbb6d868c71e1f883e8fe | [
"MIT"
] | null | null | null | from BridgePython import Bridge
bridge = Bridge(api_key='myapikey')
class PongHandler(object):
def pong(self):
print ("PONG!")
bridge.store_service("pong", PongHandler())
bridge.get_service("ping").ping()
bridge.connect()
| 18.307692 | 43 | 0.710084 | 70 | 0.294118 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.121849 |
3922c5e1b8bbf49e5010cddfa330544ec3762fa2 | 239 | py | Python | lidi/home/views.py | campovski/lidi | 9699e62e70e679970816e29ca7618c9ed0146c7e | [
"Apache-2.0"
] | null | null | null | lidi/home/views.py | campovski/lidi | 9699e62e70e679970816e29ca7618c9ed0146c7e | [
"Apache-2.0"
] | 21 | 2017-06-03T14:16:14.000Z | 2018-05-29T07:28:27.000Z | lidi/home/views.py | campovski/lidi | 9699e62e70e679970816e29ca7618c9ed0146c7e | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
def index(request):
try:
return render(request, 'home/index.html', {'user': request.session['user']})
except KeyError:
return render(request, 'home/index.html', {'user': None})
| 26.555556 | 84 | 0.656904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.217573 |
3922d1c4bdc9f843e699052bf6a3c54eb1d262ef | 1,918 | py | Python | src/models/fcnet.py | ArlindKadra/DeepLearning | 4e9ffe39bbb8722ca658522e6b6d26c6f2291ef6 | [
"Apache-2.0"
] | 4 | 2018-09-07T13:30:16.000Z | 2021-05-04T18:01:53.000Z | src/models/fcnet.py | ArlindKadra/DeepLearning | 4e9ffe39bbb8722ca658522e6b6d26c6f2291ef6 | [
"Apache-2.0"
] | null | null | null | src/models/fcnet.py | ArlindKadra/DeepLearning | 4e9ffe39bbb8722ca658522e6b6d26c6f2291ef6 | [
"Apache-2.0"
] | 1 | 2018-09-06T15:30:36.000Z | 2018-09-06T15:30:36.000Z | import torch.nn as nn
class FcNet(nn.Module):
def __init__(self, config, input_features, nr_labels):
super(FcNet, self).__init__()
self.config = config
# create the blocks
self.layers = self._make_block(self.config["num_layers"], input_features)
self.fc_layer = nn.Linear(self.config["num_units_%i" % self.config["num_layers"]], int(nr_labels))
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
out = self.layers(x)
out = self.fc_layer(out)
return out
def _make_block(self, nr_layers, input_features):
blocks = list()
blocks.append(BasicBlock(input_features, self.config, 1))
for i in range(2, nr_layers + 1):
blocks.append(BasicBlock(self.config["num_units_%i" % (i-1)], self.config, i))
return nn.Sequential(*blocks)
class BasicBlock(nn.Module):
def __init__(self, in_features, config, block_nr):
super(BasicBlock, self).__init__()
self.dropout_activated = True if config['activate_dropout'] == 'Yes' else False
self.batch_norm_activated = True if config['activate_batch_norm'] == 'Yes' else False
self.training = True
self.linear = nn.Linear(in_features, config['num_units_%i' % block_nr])
self.relu = nn.ReLU(inplace=True)
if self.dropout_activated:
self.dropout = nn.Dropout(p=config['dropout_%i' % block_nr])
if self.batch_norm_activated:
self.batch_norm = nn.BatchNorm1d(config['num_units_%i' % block_nr])
def forward(self, x):
out = self.linear(x)
out = self.relu(out)
if self.dropout_activated:
out = self.dropout(out)
if self.batch_norm_activated:
out = self.batch_norm(out)
return out
| 32.508475 | 106 | 0.620438 | 1,890 | 0.985401 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.08342 |
392319a9342f295f87a01e4b65c39850a3a7bba7 | 24 | py | Python | lib/galaxy/version.py | natefoo/galaxy-beta2 | 3af3bf5742fbf0f7d301a2a8c548a3e153544448 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/version.py | natefoo/galaxy-beta2 | 3af3bf5742fbf0f7d301a2a8c548a3e153544448 | [
"CC-BY-3.0"
] | 1 | 2015-02-21T18:48:19.000Z | 2015-02-27T15:50:32.000Z | lib/galaxy/version.py | natefoo/galaxy-beta2 | 3af3bf5742fbf0f7d301a2a8c548a3e153544448 | [
"CC-BY-3.0"
] | 3 | 2015-02-22T13:34:16.000Z | 2020-10-01T01:28:04.000Z | VERSION_MAJOR = "15.03"
| 12 | 23 | 0.708333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.291667 |
3923dfa242a117dd52d99cfb05d8706c69aadef0 | 16,438 | py | Python | vspreview/toolbars/comp/toolbar.py | wwww-wwww/vs-preview | 3aaad4f2242ca1d490dccb895efe27e823162d2a | [
"Apache-2.0"
] | null | null | null | vspreview/toolbars/comp/toolbar.py | wwww-wwww/vs-preview | 3aaad4f2242ca1d490dccb895efe27e823162d2a | [
"Apache-2.0"
] | null | null | null | vspreview/toolbars/comp/toolbar.py | wwww-wwww/vs-preview | 3aaad4f2242ca1d490dccb895efe27e823162d2a | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
import os
import string
import random
import logging
import vapoursynth as vs
from pathlib import Path
from requests import Session
from functools import partial
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from typing import Any, Mapping, Callable, Dict, Final, List, NamedTuple, Optional, Set, cast
from PyQt5.QtGui import QImage
from PyQt5.QtCore import QObject, QThread, pyqtSignal
from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar
from ...utils import set_qobject_names
from ...widgets import ComboBox, FrameEdit
from ...models import PictureTypes, VideoOutputs
from ...core import AbstractMainWindow, AbstractToolbar, PictureType, try_load, main_window
from .settings import CompSettings
_MAX_ATTEMPTS_PER_PICTURE_TYPE: Final[int] = 50
def select_frames(clip: vs.VideoNode, indices: List[int]) -> vs.VideoNode:
return clip.std.BlankClip(length=len(indices)).std.FrameEval(lambda n: clip[indices[n]])
class WorkerConfiguration(NamedTuple):
outputs: VideoOutputs
collection_name: str
public: bool
nsfw: bool
optimise: bool
remove_after: Optional[int]
frames: List[int]
compression: int
path: Path
class Worker(QObject):
finished = pyqtSignal()
progress_bar = pyqtSignal(int)
progress_status = pyqtSignal(str, int, int)
outputs: VideoOutputs
is_finished = False
def _progress_update_func(self, value: int, endvalue: int) -> None:
if value == 0:
self.progress_bar.emit(0)
else:
self.progress_bar.emit(int(100 * value / endvalue))
def run(self, conf: WorkerConfiguration) -> None:
self.conf = conf
all_images: List[List[Path]] = []
try:
for i, output in enumerate(conf.outputs):
if self.is_finished:
raise StopIteration
self.progress_status.emit('extract', i + 1, len(conf.outputs))
path_name = conf.path / output.name
path_name.mkdir(parents=True)
max_num = max(conf.frames)
path_images = [
path_name / (f'{output.name}_' + f'{f}'.zfill(len("%i" % max_num)) + '.png')
for f in conf.frames
]
def _save(n: int, f: vs.VideoFrame) -> vs.VideoFrame:
if self.is_finished:
raise StopIteration
QImage(cast(bytes, f[0]), f.width, f.height, QImage.Format_RGB32).save(
str(path_images[n]), 'PNG', conf.compression
)
return f
decimated = select_frames(output.prepared.clip, conf.frames)
clip = decimated.std.ModifyFrame(decimated, _save)
with open(os.devnull, 'wb') as devnull:
clip.output(devnull, y4m=False, progress_update=self._progress_update_func)
if self.is_finished:
raise StopIteration
all_images.append(sorted(path_images))
except StopIteration:
return self.finished.emit('')
fields: Dict[str, Any] = {}
for i, (output, images) in enumerate(zip(conf.outputs, all_images)):
if self.is_finished:
return self.finished.emit('')
for j, (image, frame) in enumerate(zip(images, conf.frames)):
if self.is_finished:
return self.finished.emit('') # type: ignore
fields[f'comparisons[{j}].name'] = str(frame)
fields[f'comparisons[{j}].images[{i}].name'] = output.name
fields[f'comparisons[{j}].images[{i}].file'] = (image.name, image.read_bytes(), 'image/png')
self.progress_status.emit('upload', 0, 0)
with Session() as sess:
sess.get('https://slow.pics/api/comparison')
if self.is_finished:
return self.finished.emit('')
head_conf = {
'collectionName': conf.collection_name,
'public': str(conf.public).lower(),
'optimizeImages': str(conf.optimise).lower(),
'hentai': str(conf.nsfw).lower(),
}
if conf.remove_after is not None:
head_conf |= {'removeAfter': str(conf.remove_after)}
def _monitor_cb(monitor: MultipartEncoderMonitor) -> None:
self._progress_update_func(monitor.bytes_read, monitor.len)
files = MultipartEncoder(head_conf | fields)
monitor = MultipartEncoderMonitor(files, _monitor_cb)
response = sess.post(
'https://slow.pics/api/comparison',
monitor.to_string(), # type: ignore
headers={
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.5",
"Content-Length": str(files.len),
"Content-Type": files.content_type,
"Origin": "https://slow.pics/",
"Referer": "https://slow.pics/comparison",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
),
"X-XSRF-TOKEN": sess.cookies.get_dict()["XSRF-TOKEN"] # noqa
}
)
self.progress_status.emit(f'https://slow.pics/c/{response.text}', 0, 0)
self.finished.emit()
class CompToolbar(AbstractToolbar):
_storable_attrs = ('settings', 'visibility')
_thread_running = False
__slots__ = (
*_storable_attrs, 'random_frames_control', 'manual_frames_lineedit',
'current_frame_checkbox', 'is_public_checkbox', 'is_nsfw_checkbox',
'output_url_lineedit', 'output_url_copy_button', 'start_upload_button', 'stop_upload_button',
'upload_progressbar', 'upload_status_label', 'upload_status_elements'
)
def __init__(self, main: AbstractMainWindow) -> None:
super().__init__(main, CompSettings())
self.setup_ui()
set_qobject_names(self)
def setup_ui(self) -> None:
layout = QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
random_frames_label = QLabel('Num Random Frames:', self)
layout.addWidget(random_frames_label)
self.random_frames_control = FrameEdit(self)
layout.addWidget(self.random_frames_control)
manual_frames_label = QLabel('Additional Frames:', self)
layout.addWidget(manual_frames_label)
self.manual_frames_lineedit = QLineEdit(self)
self.manual_frames_lineedit.setPlaceholderText('frame,frame,frame')
layout.addWidget(self.manual_frames_lineedit)
self.current_frame_checkbox = QCheckBox('Current Frame', self)
self.current_frame_checkbox.setChecked(True)
layout.addWidget(self.current_frame_checkbox)
layout.addWidget(self.get_separator())
picture_type_label = QLabel('Filter per Picture Type:', self)
layout.addWidget(picture_type_label)
self.pic_type_combox = ComboBox[PictureType](self)
self.pic_type_combox.setModel(PictureTypes())
self.pic_type_combox.setEditable(True)
self.pic_type_combox.setInsertPolicy(QComboBox.InsertAtCurrent)
self.pic_type_combox.setDuplicatesEnabled(True)
self.pic_type_combox.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.pic_type_combox.view().setMinimumWidth(self.pic_type_combox.minimumSizeHint().width())
temp_width = self.pic_type_combox.minimumSizeHint().width()
self.pic_type_combox.setMinimumWidth(temp_width + temp_width // 10)
self.pic_type_combox.setCurrentIndex(0)
layout.addWidget(self.pic_type_combox)
layout.addWidget(self.get_separator())
self.is_public_checkbox = QCheckBox('Public', self)
self.is_public_checkbox.setChecked(True)
layout.addWidget(self.is_public_checkbox)
self.is_nsfw_checkbox = QCheckBox('NSFW', self)
self.is_nsfw_checkbox.setChecked(False)
layout.addWidget(self.is_nsfw_checkbox)
layout.addWidget(self.get_separator())
self.output_url_lineedit = QLineEdit('https://slow.pics/c/', self)
self.output_url_lineedit.setEnabled(False)
layout.addWidget(self.output_url_lineedit)
self.output_url_copy_button = QPushButton(self)
self.output_url_copy_button.clicked.connect(self.on_copy_output_url_clicked)
self.output_url_copy_button.setText('⎘')
layout.addWidget(self.output_url_copy_button)
self.start_upload_button = QPushButton('Upload to slow.pics', self)
self.start_upload_button.clicked.connect(self.on_start_upload)
layout.addWidget(self.start_upload_button)
self.stop_upload_button = QPushButton('Stop Uploading', self)
self.stop_upload_button.clicked.connect(self.on_stop_upload)
self.stop_upload_button.setVisible(False)
layout.addWidget(self.stop_upload_button)
upload_separator = self.get_separator()
layout.addWidget(upload_separator)
self.upload_progressbar = QProgressBar(self)
self.upload_progressbar.setGeometry(200, 80, 250, 20)
self.upload_progressbar.setValue(0)
layout.addWidget(self.upload_progressbar)
self.upload_status_label = QLabel(self)
layout.addWidget(self.upload_status_label)
self.update_status_label('extract')
self.upload_status_elements = (
upload_separator, self.upload_progressbar,
self.upload_status_label
)
self.update_upload_status_visibility(False)
layout.addStretch()
layout.addStretch()
def on_copy_output_url_clicked(self, checked: bool | None = None) -> None:
self.main.clipboard.setText(self.output_url_lineedit.text())
self.main.show_message('Slow.pics URL copied to clipboard')
def update_upload_status_visibility(self, visible: bool) -> None:
for element in self.upload_status_elements:
element.setVisible(visible)
def on_start_upload(self) -> None:
if self._thread_running:
return
self.start_upload_button.setVisible(False)
self.stop_upload_button.setVisible(True)
self.upload_to_slowpics()
def on_end_upload(self, forced: bool = False) -> None:
self.start_upload_button.setVisible(True)
self.stop_upload_button.setVisible(False)
self._thread_running = False
self.upload_thread.deleteLater()
if forced:
self.upload_status_label.setText("Stopped!")
else:
self.upload_status_label.setText("Finished!")
def on_stop_upload(self) -> None:
self.upload_worker.is_finished = True
self.on_end_upload(forced=True)
def update_status_label(self, kind: str, curr: int | None = None, total: int | None = None) -> None:
message = ''
moreinfo = f" {curr or '?'}/{total or '?'} " if curr or total else ''
if kind == 'extract':
message = 'Extracting'
elif kind == 'upload':
message = 'Uploading'
elif kind == 'search':
message = 'Searching'
else:
return self.output_url_lineedit.setText(kind)
self.upload_status_label.setText(f'{message}{moreinfo}...')
def _rand_num_frames(self, checked: Set[int], rand_func: Callable[[], int]) -> int:
rnum = rand_func()
while rnum in checked:
rnum = rand_func()
return rnum
def _select_samples_ptypes(self, num_frames: int, k: int, picture_type: PictureType) -> List[int]:
samples: Set[int] = set()
_max_attempts = 0
_rnum_checked: Set[int] = set()
while len(samples) < k:
_attempts = 0
while True:
self.update_status_label('search', _attempts, _MAX_ATTEMPTS_PER_PICTURE_TYPE)
if len(_rnum_checked) >= num_frames:
raise ValueError(f'There aren\'t enough of {picture_type} in these clips')
rnum = self._rand_num_frames(_rnum_checked, partial(random.randrange, start=0, stop=num_frames))
_rnum_checked.add(rnum)
if all(
f.props['_PictType'].decode('utf-8') == str(picture_type)[0]
for f in vs.core.std.Splice(
[select_frames(out.prepared.clip, [rnum]) for out in self.main.outputs], True
).frames()
):
break
_attempts += 1
_max_attempts += 1
if _attempts > _MAX_ATTEMPTS_PER_PICTURE_TYPE:
logging.warning(
f'{_MAX_ATTEMPTS_PER_PICTURE_TYPE} attempts were made for sample {len(samples)} '
f'and no match found for {picture_type}; stopping iteration...')
break
if _max_attempts > (curr_max_att := _MAX_ATTEMPTS_PER_PICTURE_TYPE * k):
raise RecursionError(f'Comp: attempts max of {curr_max_att} has been reached!')
if _attempts < _MAX_ATTEMPTS_PER_PICTURE_TYPE:
samples.add(rnum)
self.upload_progressbar.setValue(int())
self.upload_progressbar.setValue(int(100 * len(samples) / k))
return list(samples)
def get_slowpics_conf(self) -> WorkerConfiguration:
self.update_upload_status_visibility(True)
clips: Dict[str, vs.VideoNode]
num = int(self.random_frames_control.value())
frames: List[int] = list(
map(int, filter(None, [x.strip() for x in self.manual_frames_lineedit.text().split(',')]))
)
picture_type = self.pic_type_combox.currentData()
lens = set(out.prepared.clip.num_frames for out in self.main.outputs)
if len(lens) != 1:
logging.warning('Outputted clips don\'t all have the same length!')
lens_n = min(lens)
path = Path(main_window().config_dir) / ''.join(random.choices(string.ascii_uppercase + string.digits, k=16))
path.mkdir(parents=True)
if num:
if picture_type is PictureType.UNSET:
samples = random.sample(range(lens_n), num)
else:
logging.info('Making samples according to specified picture types...')
samples = self._select_samples_ptypes(lens_n, num, picture_type)
else:
samples = []
if len(frames):
samples.extend(frames)
if self.current_frame_checkbox.isChecked():
samples.append(int(self.main.current_frame))
return WorkerConfiguration(
self.main.outputs, 'Function Test',
self.is_public_checkbox.isChecked(), self.is_nsfw_checkbox.isChecked(),
True, None, sorted(set(samples)), -1, path
)
def upload_to_slowpics(self) -> None:
self.upload_thread = QThread()
self.upload_worker = Worker()
self.upload_worker.moveToThread(self.upload_thread)
self.upload_thread.started.connect(
partial(self.upload_worker.run, conf=self.get_slowpics_conf())
)
self.upload_worker.finished.connect(self.upload_thread.quit)
self.upload_worker.finished.connect(self.upload_worker.deleteLater)
self.upload_thread.finished.connect(self.on_end_upload)
self.upload_worker.progress_bar.connect(self.upload_progressbar.setValue)
self.upload_worker.progress_status.connect(self.update_status_label)
self.upload_thread.start()
self._thread_running = True
def __getstate__(self) -> Mapping[str, Any]:
return {
attr_name: getattr(self, attr_name)
for attr_name in self._storable_attrs
}
def __setstate__(self, state: Mapping[str, Any]) -> None:
try_load(state, 'visibility', bool, self.on_toggle)
try_load(state, 'settings', CompSettings, self.__setattr__)
| 38.227907 | 117 | 0.625989 | 15,393 | 0.936314 | 0 | 0 | 0 | 0 | 0 | 0 | 1,839 | 0.111861 |
39241952bc6bc92767ff1742cea6d0733b20db90 | 3,737 | py | Python | tweets.py | s-broda/capstoneproject | 816fe144db6dc7eb430e5e1cc14937c63a8fc4b0 | [
"MIT"
] | null | null | null | tweets.py | s-broda/capstoneproject | 816fe144db6dc7eb430e5e1cc14937c63a8fc4b0 | [
"MIT"
] | 7 | 2020-03-24T18:13:33.000Z | 2022-02-10T01:12:31.000Z | tweets.py | s-broda/capstoneproject | 816fe144db6dc7eb430e5e1cc14937c63a8fc4b0 | [
"MIT"
] | null | null | null | # see https://www.spinningbytes.com/resources/germansentiment/ and https://github.com/aritter/twitter_download for obtaining the data.
import os
from pathlib import Path
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from conversion import convert_examples_to_features, convert_text_to_examples
def load_datasets(data_dir, num_categories, test_size):
data = pd.read_csv(os.path.join(data_dir, "downloaded.tsv"), sep="\t", na_values="Not Available",
names=["id", "sentiment", "tweet_id", "?", "text"], index_col='id')
data = data.dropna(how='any')[['sentiment', 'text']]
data['sentiment'][data['sentiment'] == 'neutral'] = 2
data['sentiment'][data['sentiment'] == 'negative'] = 0
data['sentiment'][data['sentiment'] == 'positive'] = 1
if num_categories == 2:
data = data[np.logical_not(data.sentiment==2)]
X = data['text']
y = data['sentiment']
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=0)
return (X_train, y_train, X_test, y_test)
def get_tweets_data(data_dir, subtask, num_categories, tokenizer, max_seq_length, test_size):
fn = os.path.join(data_dir, "data_"+subtask+"_"+str(num_categories)+"cat_"+str(max_seq_length)+".npz")
if Path(fn).is_file():
f= np.load(fn)
train_input_ids = f['train_input_ids']
train_input_masks = f['train_input_masks']
train_segment_ids = f['train_segment_ids']
train_labels = f['train_labels']
test_input_ids = f['test_input_ids']
test_input_masks = f['test_input_masks']
test_segment_ids = f['test_segment_ids']
test_labels = f['test_labels']
f.close()
else:
X_train, y_train, X_test, y_test = load_datasets(data_dir, num_categories, test_size)
# Create datasets (Only take up to max_seq_length words for memory)
train_text = X_train.to_list()
train_text = [" ".join(t.split()[0:max_seq_length]) for t in train_text]
train_text = np.array(train_text, dtype=object)[:, np.newaxis]
train_label = y_train.tolist()
test_text = X_test.tolist()
test_text = [" ".join(t.split()[0:max_seq_length]) for t in test_text]
test_text = np.array(test_text, dtype=object)[:, np.newaxis]
test_label = y_test.tolist()
# Convert data to InputExample format
train_examples = convert_text_to_examples(train_text, train_label)
test_examples = convert_text_to_examples(test_text, test_label)
# Convert to features
(
train_input_ids,
train_input_masks,
train_segment_ids,
train_labels,
) = convert_examples_to_features(
tokenizer, train_examples, max_seq_length=max_seq_length
)
(
test_input_ids,
test_input_masks,
test_segment_ids,
test_labels,
) = convert_examples_to_features(
tokenizer, test_examples, max_seq_length=max_seq_length
)
np.savez(fn,
train_input_ids=train_input_ids,
train_input_masks=train_input_masks,
train_segment_ids=train_segment_ids,
train_labels=train_labels,
test_input_ids=test_input_ids,
test_input_masks=test_input_masks,
test_segment_ids=test_segment_ids,
test_labels=test_labels
)
return (
train_input_ids,
train_input_masks,
train_segment_ids,
train_labels,
test_input_ids,
test_input_masks,
test_segment_ids,
test_labels
)
| 38.525773 | 134 | 0.644902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 628 | 0.168049 |
39291dcb4bbcc2d0f6388acdff47bf0a4970b898 | 1,642 | py | Python | guiHandling/webHandler.py | Mstpyt/Faceit-Overlay | e76165817bc121fbc570edd874a2c117817e7a12 | [
"MIT"
] | 6 | 2021-04-10T13:36:46.000Z | 2021-08-08T17:35:38.000Z | guiHandling/webHandler.py | kvanja19/Faceit-Overlay | e76165817bc121fbc570edd874a2c117817e7a12 | [
"MIT"
] | 1 | 2021-05-16T17:23:38.000Z | 2021-05-17T19:49:56.000Z | guiHandling/webHandler.py | kvanja19/Faceit-Overlay | e76165817bc121fbc570edd874a2c117817e7a12 | [
"MIT"
] | 2 | 2021-05-13T06:59:20.000Z | 2021-11-02T10:25:50.000Z | """ -------------------------------------------------------------------------------------------------------------------
WEB HANDLING
---------------------------------------------------------------------------------------------------------------------"""
import logging
from dearpygui import core
from config import DBNAME
from database import sqlite3db
from functions import config_functions
def save_web():
"""
"""
web_only = core.get_value("Open in Browser Only##Browser")
web_app = core.get_value("Open in Browser and App##Browser")
web_update = web_only, web_app
sqlite3db.TExecSql(DBNAME, """
UPDATE CFG_WEB SET WEB_ONLY = ?,
WEB_APP = ?
""", web_update)
def save_font():
logging.info("Start save_font")
font_family = core.get_value("Font Family##Web")
font_size = core.get_value("##BrowserTextSize")
bg_image = core.get_value("##BgImage")
upd = font_size, font_family, bg_image
iRv = config_functions.check_if_config_entry_exists("""
SELECT COUNT(*) FROM WEB_PARAMETERS
""")
if iRv > 0:
sqlite3db.TExecSql(DBNAME, """
UPDATE WEB_PARAMETERS SET FONT_SIZE = ?,
FONT_FAMILY = ?,
BG_IMAGE = ?
""", upd)
else:
sqlite3db.TExecSql(DBNAME, """
INSERT INTO WEB_PARAMETERS
VALUES (?,?,?)""", upd)
| 37.318182 | 121 | 0.433009 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 957 | 0.582826 |
392a607723b67c251d4511eda973648c6b07d74b | 3,959 | py | Python | examples/tensorflow/image_recognition/slim/main.py | daisyden/lpot | d8709bb73ce13cfc0fd760845e0be40af22f5a45 | [
"Apache-2.0"
] | null | null | null | examples/tensorflow/image_recognition/slim/main.py | daisyden/lpot | d8709bb73ce13cfc0fd760845e0be40af22f5a45 | [
"Apache-2.0"
] | null | null | null | examples/tensorflow/image_recognition/slim/main.py | daisyden/lpot | d8709bb73ce13cfc0fd760845e0be40af22f5a45 | [
"Apache-2.0"
] | null | null | null | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import numpy as np
from argparse import ArgumentParser
import tensorflow as tf
# from lpot.adaptor.tf_utils.util import write_graph
from nets_factory import TFSlimNetsFactory
import copy
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
tf.compat.v1.disable_eager_execution()
from inception_v4 import inception_v4, inception_v4_arg_scope
def save(model, path):
from tensorflow.python.platform import gfile
f = gfile.GFile(path, 'wb')
try:
f.write(model.as_graph_def().SerializeToString())
except AttributeError as no_model:
print("None of the quantized models fits the \
accuracy criteria: {0}".format(no_model))
except Exception as exc:
print("Unexpected error while saving the model: {0}".format(exc))
def main(_):
arg_parser = ArgumentParser(description='Parse args')
arg_parser.add_argument("--input-graph",
help='Specify the slim model',
dest='input_graph')
arg_parser.add_argument("--output-graph",
help='Specify tune result model save dir',
dest='output_graph')
arg_parser.add_argument("--config", default=None, help="tuning config")
arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark')
arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use lpot to tune.')
args = arg_parser.parse_args()
factory = TFSlimNetsFactory()
# user specific model can register to slim net factory
input_shape = [None, 299, 299, 3]
factory.register('inception_v4', inception_v4, input_shape, inception_v4_arg_scope)
if args.input_graph.endswith('.ckpt'):
# directly get the topology name from input_graph
topology = args.input_graph.rsplit('/', 1)[-1].split('.', 1)[0]
# get the model func from net factory
assert topology in factory.default_slim_models, \
'only support topology {}'.format(factory.default_slim_models)
net = copy.deepcopy(factory.networks_map[topology])
model_func = net.pop('model')
arg_scope = net.pop('arg_scope')()
inputs_shape = net.pop('input_shape')
kwargs = net
images = tf.compat.v1.placeholder(name='input', dtype=tf.float32, \
shape=inputs_shape)
from lpot.adaptor.tf_utils.util import get_slim_graph
model = get_slim_graph(args.input_graph, model_func, arg_scope, images, **kwargs)
else:
model = args.input_graph
if args.tune:
from lpot import Quantization
quantizer = Quantization(args.config)
q_model = quantizer(model)
save(q_model, args.output_graph)
if args.benchmark:
from lpot import Benchmark
evaluator = Benchmark(args.config)
results = evaluator(model=model)
for mode, result in results.items():
acc, batch_size, result_list = result
latency = np.array(result_list).mean() / batch_size
print('\n{} mode benchmark result:'.format(mode))
print('Accuracy is {:.3f}'.format(acc))
print('Batch size = {}'.format(batch_size))
print('Latency: {:.3f} ms'.format(latency * 1000))
print('Throughput: {:.3f} images/sec'.format(1./ latency))
if __name__ == '__main__':
tf.compat.v1.app.run()
| 37.349057 | 101 | 0.678202 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,390 | 0.351099 |
392cf46d90da25ce8a11ce807453e2474844bf87 | 867 | py | Python | visitors/models.py | maxhamz/prieds_test_hospital_queue_be | 44529f65dcd167caa48c84926e118d86a7d38b92 | [
"MIT"
] | null | null | null | visitors/models.py | maxhamz/prieds_test_hospital_queue_be | 44529f65dcd167caa48c84926e118d86a7d38b92 | [
"MIT"
] | null | null | null | visitors/models.py | maxhamz/prieds_test_hospital_queue_be | 44529f65dcd167caa48c84926e118d86a7d38b92 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Visitor(models.Model):
MALE = 'M'
FEMALE = 'F'
OTHER = 'X'
GENDER_OPTIONS = [
(MALE, 'Male'),
(FEMALE, 'Female'),
(OTHER, 'Other')
]
dtRegistered = models.DateTimeField(auto_now_add=True)
strFullName = models.CharField(max_length=256, blank=False)
eGender = models.CharField(max_length=2,
choices=GENDER_OPTIONS,
default=OTHER) # SELECT M, F, OR X
dtBirth = models.DateField(max_length=8,
auto_now=False,
auto_now_add=False) # YYYY-MM-DD FORMAT
strGovtIdNo = models.CharField(max_length=16, blank=False)
strAddress = models.TextField(default='Indonesia')
class Meta:
ordering = ['dtRegistered'] | 32.111111 | 70 | 0.573241 | 808 | 0.931949 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.137255 |
392daab33fb8dcb9b0b5a07380a98a73e1208a33 | 1,751 | py | Python | muchbettermoments.py | mirca/muchbettermoments | 8cc2bf18ff52abf86151a12358434691bea0857d | [
"MIT"
] | 1 | 2019-07-01T18:25:35.000Z | 2019-07-01T18:25:35.000Z | muchbettermoments.py | mirca/muchbettermoments | 8cc2bf18ff52abf86151a12358434691bea0857d | [
"MIT"
] | null | null | null | muchbettermoments.py | mirca/muchbettermoments | 8cc2bf18ff52abf86151a12358434691bea0857d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["quadratic_2d"]
import numpy as np
def quadratic_2d(data):
"""
Compute the quadratic estimate of the centroid in a 2d-array.
Args:
data (2darray): two dimensional data array
Returns
center (tuple): centroid estimate on the row and column directions,
respectively
"""
arg_data_max = np.argmax(data)
i, j = np.unravel_index(arg_data_max, data.shape)
z_ = data[i-1:i+2, j-1:j+2]
# our quadratic function is defined as
# f(x, y | a, b, c, d, e, f) := a + b * x + c * y + d * x^2 + e * xy + f * y^2
# therefore, the best fit coeffiecients are given as
# note that they are unique and the uncertainty in each of them (#TODO) can be
# computed following the derivations done by Vakili & Hogg (2016) and
# Teague & Foreman-Mackey (2018)
try:
a = (-z_[0,0] + 2*z_[0,1] - z_[0,2] + 2*z_[1,0] + 5*z_[1,1] + 2*z_[1,2] -
z_[2,0] + 2*z_[2,1] - z_[2,2]) / 9
b = (-z_[0,0] - z_[0,1] - z_[0,2] + z_[2,0] + z_[2,1] + z_[2,2]) / 6
c = (-z_[0,0] + z_[0,2] - z_[1,0] + z_[1,2] - z_[2,0] + z_[2,2]) / 6
d = (z_[0,0] + z_[0,1] + z_[0,2] - z_[1,0]*2 - z_[1,1]*2 - z_[1,2]*2 +
z_[2,0] + z_[2,1] + z_[2,2])/6
e = (z_[0,0] - z_[0,2] - z_[2,0] + z_[2,2]) * .25
f = (z_[0,0] - 2 * z_[0,1] + z_[0,2] + z_[1,0] - 2 * z_[1,1] + z_[1,2] +
z_[2,0] - 2 * z_[2,1] + z_[2,2]) / 6
except IndexError:
return (i, j)
# see https://en.wikipedia.org/wiki/Quadratic_function
det = 4 * d * f - e ** 2
xm = - (2 * f * b - c * e) / det
ym = - (2 * d * c - b * e) / det
return (i+xm, j+ym)
| 37.255319 | 82 | 0.503141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 703 | 0.401485 |
392ddea8f0c6de5f8409bf9bfb268b8f61a56946 | 416 | py | Python | script/run_WOA.py | cyy111/metaheuristics | 9d885e4c9e9f39ad22baa9ea5d263d5daa276f88 | [
"Apache-2.0"
] | 104 | 2020-09-07T01:24:19.000Z | 2022-03-30T13:11:21.000Z | script/run_WOA.py | luanedge/metaheuristics | 9d885e4c9e9f39ad22baa9ea5d263d5daa276f88 | [
"Apache-2.0"
] | 3 | 2020-05-12T03:54:16.000Z | 2020-06-06T01:12:31.000Z | script/run_WOA.py | luanedge/metaheuristics | 9d885e4c9e9f39ad22baa9ea5d263d5daa276f88 | [
"Apache-2.0"
] | 40 | 2020-08-30T14:29:37.000Z | 2022-03-30T17:33:26.000Z | from models.multiple_solution.swarm_based.WOA import BaseWOA, BaoWOA
from utils.FunctionUtil import square_function
## Setting parameters
root_paras = {
"problem_size": 30,
"domain_range": [-1, 1],
"print_train": True,
"objective_func": square_function
}
woa_paras = {
"epoch": 100,
"pop_size": 250
}
## Run model
md = BaoWOA(root_algo_paras=root_paras, woa_paras=woa_paras)
md._train__()
| 20.8 | 68 | 0.716346 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.257212 |
392e0fa49127c7455cbbc085060a98c4b07b4219 | 600 | py | Python | mmdetection_pipeline/tests/mmdet_test.py | KonstantinSviridov/mmdetection_pipeline | 7e17c4bb48af28713d018e087907f7295ef68d7e | [
"MIT"
] | null | null | null | mmdetection_pipeline/tests/mmdet_test.py | KonstantinSviridov/mmdetection_pipeline | 7e17c4bb48af28713d018e087907f7295ef68d7e | [
"MIT"
] | 2 | 2019-12-13T04:40:34.000Z | 2019-12-13T04:41:19.000Z | mmdetection_pipeline/tests/mmdet_test.py | musket-ml/instance_segmentation_pipeline | 7e17c4bb48af28713d018e087907f7295ef68d7e | [
"MIT"
] | null | null | null | import unittest
from musket_core import projects
from musket_core import parralel
import os
fl=__file__
fl=os.path.dirname(fl)
class TestCoders(unittest.TestCase):
def test_basic_network(self):
pr = projects.Project(os.path.join(fl, "project"))
exp = pr.byName("exp01")
tasks = exp.fit()
executor = parralel.get_executor(1, 1)
executor.execute(tasks)
r = exp.result()
self.assertGreaterEqual(r, 0, "Result should be greater then zero")
self.assertTrue(isinstance(r, float), "result should be float")
print(r)
pass | 28.571429 | 75 | 0.663333 | 470 | 0.783333 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.126667 |
392e7ae4d68761df65c87509c9deacee55a05db5 | 470 | py | Python | setup.py | CentryPlan/dataclassframe | 2135c35f7a6495e8b482055fd74a2a4ba8c8a90b | [
"MIT"
] | 321 | 2020-11-02T18:02:07.000Z | 2022-03-19T21:40:15.000Z | setup.py | CentryPlan/dataclassframe | 2135c35f7a6495e8b482055fd74a2a4ba8c8a90b | [
"MIT"
] | 3 | 2020-11-04T03:43:10.000Z | 2020-12-30T03:01:24.000Z | setup.py | CentryPlan/dataclassframe | 2135c35f7a6495e8b482055fd74a2a4ba8c8a90b | [
"MIT"
] | 5 | 2020-11-03T08:50:33.000Z | 2020-12-24T21:05:41.000Z | #!/usr/bin/env python3
"""
Based on template: https://github.com/FedericoStra/cython-package-example
"""
from setuptools import setup
with open("requirements.txt") as fp:
install_requires = fp.read().strip().split("\n")
with open("requirements_dev.txt") as fp:
dev_requires = fp.read().strip().split("\n")
setup(
install_requires=install_requires,
extras_require={
"dev": dev_requires,
"docs": ["sphinx", "sphinx-rtd-theme"]
}
)
| 21.363636 | 73 | 0.66383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.4 |
392e81963b0ccd94345db1a8d6229e0ef20fd753 | 3,038 | py | Python | Paleo_DB_Rip.py | matt-oak/DinoFinder | 8a66c6da77dae01b6083155d724479e02abb8440 | [
"MIT"
] | null | null | null | Paleo_DB_Rip.py | matt-oak/DinoFinder | 8a66c6da77dae01b6083155d724479e02abb8440 | [
"MIT"
] | null | null | null | Paleo_DB_Rip.py | matt-oak/DinoFinder | 8a66c6da77dae01b6083155d724479e02abb8440 | [
"MIT"
] | null | null | null | #Paleo_DB_Rip.py
#Python script to programmatically web-scrape from paleobiodb.org
#Author: Matt Oakley
#Date: 08/15/2016
# Imports #
from bs4 import BeautifulSoup
from time import sleep
from geopy.geocoders import Nominatim
import urllib2
import pycountry
import wget
import sys
import os.path
import codecs
# Globals #
listed_dinos = ["Tyrannosaurus", "Stegosaurus", "Velociraptor", "Triceratops", "Spinosaurus"]
def retrieve_webpage(dino_name):
#Retrieve the HTML for the specific dinosaur and return the page in string format
URL = "https://paleobiodb.org/data1.2/occs/list.txt?base_name=" + dino_name + "&show=loc"
page = urllib2.urlopen(URL)
page_str = str(BeautifulSoup(page, "lxml")).splitlines()
return page_str
def extract_webpage_header(web_page):
#Extract the header from the list
header = web_page[0]
header_elements = header.split("\"")
#Get rid of delimeter elements (commas)
header_elements[:] = [x for x in header_elements if x != ","]
return header_elements
def construct_location_string(county, state, cc):
#Convert country-code to full-name of country
try:
country = pycountry.countries.get(alpha2 = cc)
country = str(country.name)
except KeyError:
return None
#Construct location string usable by geopy
if county != "":
location = county + ", " + state + ", " + country
return location
else:
location = state + ", " + country
return location
def construct_GPS_coords(location):
#Construct the lat/lon of different locations
geolocator = Nominatim()
coords = geolocator.geocode(location)
sleep(1)
if coords == None:
pass
else:
return (coords.latitude, coords.longitude)
def parse_locations(web_page):
#Get the indexes of country code, state, and county
header = extract_webpage_header(web_page)
index_of_country = header.index("cc")
index_of_state = header.index("state")
index_of_county = header.index("county")
coords_list = []
#For all locations, get the lat/lon coordinates and output to list
for i in range(1, len(web_page) - 1):
entry = web_page[i].split("\"")
entry[:] = [x for x in entry if x != ","]
country = entry[index_of_country]
state = entry[index_of_state]
county = entry[index_of_county]
location = construct_location_string(county, state, country)
print location
#Coords Format: (Lat, Lon)
coords = construct_GPS_coords(location)
coords_list.append(coords)
return coords_list
def output_locations(locations, dino):
filename = "dinosaur_locs/" + dino + ".txt"
output_file = open(filename, "w")
for i in range(0, len(locations)):
location_str = str(locations[i])
output_file.write(location_str + "\n")
def check_if_file_exists(dino):
filename = "dinosaur_locs/" + dino + ".txt"
if os.path.isfile(filename):
return 1
else:
return 0
for i in range(0, len(listed_dinos)):
file_bool = check_if_file_exists(listed_dinos[i])
web_page = retrieve_webpage(listed_dinos[i])
if file_bool == 0:
locations = parse_locations(web_page)
output_locations(locations, listed_dinos[i])
else:
print "kek"
continue | 28.392523 | 93 | 0.737986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 813 | 0.26761 |
392fd4d7d4ab0ef07297911fa104492a550448f4 | 10,015 | py | Python | body/body_textEditor.py | XiantaoCheng/Structure | 5a12452dbe03fd37baf3059578cd1dd10f25e161 | [
"MIT"
] | 1 | 2020-01-15T02:02:59.000Z | 2020-01-15T02:02:59.000Z | body/body_textEditor.py | XiantaoCheng/Structure | 5a12452dbe03fd37baf3059578cd1dd10f25e161 | [
"MIT"
] | null | null | null | body/body_textEditor.py | XiantaoCheng/Structure | 5a12452dbe03fd37baf3059578cd1dd10f25e161 | [
"MIT"
] | null | null | null | import sys, re
if __name__=='__main__':
sys.path.append(sys.path[0]+'\\..')
from body.bone import NetP
from body.soul import Karma
from body.body_motor import Motor
from body.body_pool import Pool
from body.body_brain import Brain
from body.body_debugger import Debugger
from tools import tools_sl, tools_basic
from PyQt5.QtWidgets import QTextEdit, QApplication, QMessageBox, QFontDialog
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QTextCursor, QFont
# import matlab.engine
class Editor(QTextEdit):
def __init__(self,name):
super().__init__()
self.m_self=None
self.m_pool=None
self.m_motor=None
self.m_debugger=None
self.m_screen=None
self.m_plainText=None
self.m_readPtr=None
self.m_currentFile=''
self.m_changed=False
self.textChanged.connect(self.changed)
self.m_systemMark='\n-----------系统-----------\n'
def initialize(self,point):
if point==None:
point=NetP('editor')
self.m_self=point
point.m_dev=self
point.m_permission=0
pt_text=tools_basic.getPoint(point,'m_plainText','text')
pt_pool=tools_basic.getPoint(point,'m_pool','pool')
pt_motor=tools_basic.getPoint(point,'m_motor','compiler')
pt_debugger=tools_basic.getPoint(point,'m_debugger','debugger')
pt_screen=tools_basic.getPoint(point,'m_screen','screen')
self.modifyPtStruct(pt_debugger,pt_motor,pt_pool)
self.m_plainText=pt_text
self.setReadPtr(pt_text)
self.m_pool=Pool(pt_pool)
self.m_motor=Motor(pt_motor)
self.m_debugger=Debugger(pt_debugger)
self.m_screen=Brain(pt_screen)
self.m_pool.register(self.m_screen.m_self)
self.m_pool.register(self.m_debugger.m_self)
self.updateByPts()
self.setFont(QFont('宋体'))
self.setStyleSheet('font: 20px;')
self.show()
def modifyPtStruct(self,pt_debugger,pt_motor,pt_pool):
tools_basic.setPoint(pt_debugger,'m_motor',pt_motor)
tools_basic.setPoint(pt_motor,'m_source',pt_pool)
pt_lib=tools_basic.getPoint(pt_pool,'m_lib')
tools_basic.setPoint(pt_lib,'m_motor',pt_motor)
def resizeEvent(self, QResizeEvent):
self.updateSysPts()
return super().resizeEvent(QResizeEvent)
def keyPressEvent(self, QKeyEvent):
modifier=QApplication.keyboardModifiers()
if modifier==Qt.ControlModifier:
if QKeyEvent.key()==Qt.Key_S:
self.saveAsFile()
elif QKeyEvent.key()==Qt.Key_R:
self.runCode()
elif QKeyEvent.key()==Qt.Key_T:
self.debugCode()
elif QKeyEvent.key()==Qt.Key_Q:
self.setReadPtr(self.m_plainText)
return super().keyPressEvent(QKeyEvent)
def openFile(self,fileName):
[text1,text2]=self.readFile(fileName)
if text1==None and text2==None:
return False
self.m_currentFile=fileName
self.loadText(text1,text2)
self.m_changed=False
self.updateState()
return True
def readFile(self,fileName):
try:
f=open(fileName,encoding='gbk')
except:
print("The file, "+fileName+", doesn't exist.")
return [None,None]
try:
textGbk=f.read()
except:
textGbk=None
f.close()
f=open(fileName,encoding='utf-8')
try:
textUtf=f.read()
except:
textUtf=None
f.close()
return [textGbk,textUtf]
def loadText(self,text1,text2):
head=None
if text1==None:
code,ni=self.fixFormat(text2)
elif text2==None:
code,ni=self.fixFormat(text1)
else:
code1,n1=self.fixFormat(text1)
code2,n2=self.fixFormat(text2)
if n1==-1:
code=code2
else:
code=code1
list_pt=tools_basic.buildPoints_tokener(code)
# for point in list_pt:
# point.m_permission=0
# if point.m_db[0]!=None or point.m_db[1]!=None:
# continue
# for con in point.m_con:
# if con.m_db[1]==point:
# break
# head=point
head=list_pt[0]
self.initialize(head)
# for point in list_pt:
# if point.m_name=='in':
# print(point.info(),point.m_permission)
def fixFormat(self,text):
ni=text.find(self.m_systemMark)
# old fashion
if ni!=0:
# code='editor(,);m_plainText(editor,text);text\"'+code+'\"(,);'
code=self.transferCode(text)
# new fashion
else:
code=text[len(self.m_systemMark):]
return code,ni
def transferCode(self,text):
plainText,sysPt,nrmPt=self.takeParts_oldFasion(text)
code='editor(,);m_plainText(editor,text);text\"'+plainText\
+'\"(,);m_pool(editor,pool);pool(,);m_contain(pool,points);'+\
'points\"'+nrmPt+'\"(,);'
return code
def takeParts_oldFasion(self,wholeText):
normalMark='\n----------普通----------\n'
systemMark='\n----------系统----------\n'
n=wholeText.rfind(normalMark)
if n==-1:
return [wholeText,'','']
s=wholeText.rfind(systemMark,0,n)
if s==-1:
return [wholeText,'','']
return [wholeText[0:s],wholeText[s+len(systemMark):n],wholeText[n+len(normalMark):]]
def saveAsFile(self,fileName=None):
if fileName==None:
fileName=self.m_currentFile
if fileName=='':
QMessageBox.Warning(self,"Save failed!","Warning: the file name can't be empty")
text=self.m_systemMark+self.saveText()
f=open(fileName,'+w')
f.write(text)
f.close()
self.m_currentFile=fileName
self.m_changed=False
self.updateState()
def saveText(self):
list_pt=tools_basic.getAllSystemPt(self.m_self)
return tools_basic.writeStdCode([],list_pt)
def updateState(self):
title=''
if self.m_changed==True:
title='*'
i=self.m_currentFile.rfind('\\')
if i+1==len(self.m_currentFile):
i=-1
title+=self.m_currentFile[i+1:]
if self.m_readPtr!=self.m_plainText:
title+=': '+self.m_readPtr.info(1)
self.setWindowTitle(title)
def changed(self):
self.m_changed=True
self.updateState()
if self.m_self!=None:
# pt_text=tools_basic.getPoint(self.m_self,'m_plainText')
# pt_text.m_text=self.toPlainText()
self.m_readPtr.m_text=self.toPlainText()
def runCode(self):
# complete the selection area
text=self.toPlainText()
cursor=self.textCursor()
s=cursor.selectionStart()
e=cursor.selectionEnd()
ns=text.rfind('\n',0,s)+1
ne=text.find('\n',e,-1)
cursor=self.selectText(ns,ne)
code=cursor.selectedText().replace("\u2029",'\n')
# operate code
operation_pool=self.m_motor.m_inputs
if self.m_self not in operation_pool:
operation_pool.append(self.m_self)
outputs=self.m_motor.runCode(code)
operation_pool.remove(self.m_self)
self.m_pool.input(outputs)
def debugCode(self):
# complete the selection area
text=self.toPlainText()
cursor=self.textCursor()
s=cursor.selectionStart()
e=cursor.selectionEnd()
ns=text.rfind('\n',0,s)+1
ne=text.find('\n',e,-1)
cursor=self.selectText(ns,ne)
code=cursor.selectedText().replace("\u2029",'\n')
#debug
if self.m_debugger.isVisible()==False:
self.m_debugger.setVisible(True)
self.m_debugger.reset(code)
def setReadPtr(self,pt_text):
self.m_readPtr=pt_text
self.setPlainText(pt_text.m_text)
def selectText(self,start,end):
cursor=self.textCursor()
cursor.movePosition(QTextCursor.Start)
cursor.movePosition(QTextCursor.Right,QTextCursor.MoveAnchor,start)
if end==-1:
cursor.movePosition(QTextCursor.End,QTextCursor.KeepAnchor)
else:
cursor.movePosition(QTextCursor.Right,QTextCursor.KeepAnchor,end-start)
self.setTextCursor(cursor)
return cursor
######## functions interact with points
def updateSysPts(self):
pt_x=tools_basic.getPoint(self.m_self,'m_x')
pt_y=tools_basic.getPoint(self.m_self,'m_y')
pt_height=tools_basic.getPoint(self.m_self,'m_height')
pt_width=tools_basic.getPoint(self.m_self,'m_width')
pt_x.m_name=str(self.geometry().x())
pt_y.m_name=str(self.geometry().y())
pt_width.m_name=str(self.geometry().width())
pt_height.m_name=str(self.geometry().height())
def updateByPts(self):
pt_x=tools_basic.getPoint(self.m_self,'m_x','300')
pt_y=tools_basic.getPoint(self.m_self,'m_y','300')
pt_height=tools_basic.getPoint(self.m_self,'m_height','600')
pt_width=tools_basic.getPoint(self.m_self,'m_width','300')
x=int(pt_x.m_name)
y=int(pt_y.m_name)
width=int(pt_width.m_name)
height=int(pt_height.m_name)
self.setGeometry(x,y,width,height)
if __name__=="__main__":
app=QApplication(sys.argv)
editor=Editor("editor")
if len(sys.argv)<2:
print("Invalid file name!")
else:
print(sys.argv[1])
editor.openFile(sys.argv[1])
sys.exit(app.exec_()) | 33.383333 | 93 | 0.582227 | 9,261 | 0.923238 | 0 | 0 | 0 | 0 | 0 | 0 | 1,331 | 0.132689 |
3930fe21eb5cd0ba519b6ebaa5b9946853840b7a | 246 | py | Python | test/test_add_group.py | eugene1smith/homeworks | 47edd36feddf4965b6a9c3d665406ebdf928af85 | [
"Apache-2.0"
] | null | null | null | test/test_add_group.py | eugene1smith/homeworks | 47edd36feddf4965b6a9c3d665406ebdf928af85 | [
"Apache-2.0"
] | null | null | null | test/test_add_group.py | eugene1smith/homeworks | 47edd36feddf4965b6a9c3d665406ebdf928af85 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from model.group import group
def test_add_group(app):
app.group.create(group(name="Name", header="Head", footer="Footer"))
def test_add_empty_group(app):
app.group.create(group(name="", header="", footer=""))
| 22.363636 | 72 | 0.670732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.199187 |
3932406768d964a4c7968afb2b7511f4c0d4b671 | 2,569 | py | Python | apps/events/views.py | seanlefevre/openduty | 34ab21117f114ccc808d8b0aa2cb801c819bdb86 | [
"MIT"
] | 145 | 2016-04-11T06:53:13.000Z | 2022-03-22T05:15:49.000Z | apps/events/views.py | seanlefevre/openduty | 34ab21117f114ccc808d8b0aa2cb801c819bdb86 | [
"MIT"
] | 78 | 2017-09-24T10:59:49.000Z | 2022-02-12T07:36:27.000Z | apps/events/views.py | seanlefevre/openduty | 34ab21117f114ccc808d8b0aa2cb801c819bdb86 | [
"MIT"
] | 30 | 2016-04-11T06:53:16.000Z | 2021-12-29T11:39:26.000Z | from django.views.generic import DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from schedule.models import Calendar
from schedule.views import CreateEventView, EditEventView, EventMixin
from apps.events.forms import CustomEventForm
class CustomCreateEventView(CreateEventView):
form_class = CustomEventForm
template_name = 'event/edit.html'
def get_context_data(self, **kwargs):
context = super(CustomCreateEventView, self).get_context_data(**kwargs)
calendar = get_object_or_404(Calendar, slug=self.kwargs.get('calendar_slug'))
extra_context = {
"calendar": calendar,
}
context.update(extra_context)
return context
def form_valid(self, form):
super(CustomCreateEventView, self).form_valid(form)
messages.error(self.request, 'Event created successfully.')
return HttpResponseRedirect(
reverse('calendar_details', kwargs={'calendar_slug': self.kwargs.get('calendar_slug')})
)
class CustomUpdateEventView(EditEventView):
form_class = CustomEventForm
template_name = 'event/edit.html'
def get_context_data(self, **kwargs):
context = super(CustomUpdateEventView, self).get_context_data(**kwargs)
calendar = get_object_or_404(Calendar, slug=self.kwargs.get('calendar_slug'))
extra_context = {
"calendar": calendar,
}
context.update(extra_context)
return context
def form_valid(self, form):
super(CustomUpdateEventView, self).form_valid(form)
messages.error(self.request, 'Event edited successfully.')
return HttpResponseRedirect(
reverse('calendar_details', kwargs={'calendar_slug': self.kwargs.get('calendar_slug')})
)
class CustomDeleteEventView(LoginRequiredMixin, EventMixin, DeleteView):
"""Delete Event"""
template_name = 'event/delete.html'
def get_success_url(self):
return reverse('calendar_details', args=[self.kwargs.get('calendar_slug')])
def get_context_data(self, **kwargs):
context = super(CustomDeleteEventView, self).get_context_data(**kwargs)
calendar = get_object_or_404(Calendar, slug=self.kwargs.get('calendar_slug'))
context.update(
{
'event': self.object,
'calendar': calendar
}
)
return context
| 36.183099 | 99 | 0.695601 | 2,145 | 0.834955 | 0 | 0 | 0 | 0 | 0 | 0 | 339 | 0.131958 |
39374b6e345f46dad950a9f9736af1abd9167fd0 | 2,055 | py | Python | examples/experiments_code/amazon_reviews/sentiment_subsampling.py | fossabot/textlytics | d172211316d688604bcd18d3581c3aac26dcc404 | [
"MIT"
] | 26 | 2016-12-05T19:37:27.000Z | 2021-01-03T21:48:23.000Z | examples/experiments_code/amazon_reviews/sentiment_subsampling.py | fossabot/textlytics | d172211316d688604bcd18d3581c3aac26dcc404 | [
"MIT"
] | 3 | 2017-07-15T13:33:18.000Z | 2020-09-21T11:39:37.000Z | examples/experiments_code/amazon_reviews/sentiment_subsampling.py | fossabot/textlytics | d172211316d688604bcd18d3581c3aac26dcc404 | [
"MIT"
] | 14 | 2017-05-29T22:19:35.000Z | 2021-01-03T21:48:24.000Z | import dill
import glob
import csv
import os
from os.path import basename, join
from joblib import Parallel, delayed
domain_path = '/datasets/amazon-data/new-julian/domains'
domain_subdirectory = 'only-overall-lemma-and-label-sampling-1-3-5'
domain_files = glob.glob(join(domain_path,
'only-overall-lemma-and-label/*.csv'))
all_stars_count = {}
output_csv = join(domain_path, domain_subdirectory)
try:
os.makedirs(output_csv)
except OSError:
if not os.path.isdir(output_csv):
raise
def stars(domain_file):
stars_count = [0, 0, 0, 0, 0]
stars_used = [1, 3, 5]
with open(domain_file, 'r') as f:
for line in f:
l = line.replace('\r\n', '').split(',')
stars_count[int(l[0]) - 1] += 1
f_name = '{}.csv'.format(basename(domain_file).split('.')[0])
min_count = min(stars_count)
print '\nDomain: {}\nStars count: {}\nMin star count: {}\n'.format(f_name,
stars_count,
min_count)
stars_count = [0, 0, 0, 0, 0]
with open(domain_file, 'r') as f:
with open(join(output_csv, f_name), 'w') as csv_file:
sent_writer = csv.writer(csv_file, delimiter=',', quotechar=' ',
quoting=csv.QUOTE_MINIMAL)
for line in f:
l = line.replace('\r\n', '').split(',')
star_label = int(l[0])
idx = star_label - 1
stars_count[idx] += 1
if stars_count[idx] <= min_count and star_label in stars_used:
sent_writer.writerow(l)
return {f_name: {'distribution': stars_count,
'star_threshold': min_count,
'skip_stars': stars_used}
}
results = Parallel(n_jobs=-1)(delayed(stars)(i) for i in domain_files)
with open(join(domain_path, domain_subdirectory, 'results.pkl'), 'w') as f:
dill.dump(results, f) | 33.688525 | 83 | 0.547932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.137226 |
393790c6c74505311873a4d3f3d4433885613ee4 | 289 | py | Python | image_augmentation/preprocessing/__init__.py | tanzhenyu/image_augmentation | d1f8cc35cf25438556e7934e8e6c78827819ea9d | [
"Apache-2.0"
] | 6 | 2020-08-26T18:54:42.000Z | 2020-11-22T02:45:37.000Z | image_augmentation/preprocessing/__init__.py | tanzhenyu/image_augmentation | d1f8cc35cf25438556e7934e8e6c78827819ea9d | [
"Apache-2.0"
] | 3 | 2020-07-13T13:44:09.000Z | 2022-02-10T02:12:46.000Z | image_augmentation/preprocessing/__init__.py | tanzhenyu/image_augmentation | d1f8cc35cf25438556e7934e8e6c78827819ea9d | [
"Apache-2.0"
] | 1 | 2021-03-24T09:51:22.000Z | 2021-03-24T09:51:22.000Z | from image_augmentation.preprocessing.preprocess import cifar_baseline_augmentation, cifar_standardization
from image_augmentation.preprocessing.preprocess import imagenet_baseline_augmentation, imagenet_standardization
from image_augmentation.preprocessing import efficientnet_preprocess
| 72.25 | 112 | 0.927336 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3938f66ebcb4941fe419b94358a44fc04fa3be72 | 10,406 | py | Python | __init__.py | mechanicalnull/sourcery_pane | dd133ebf553d6e05383acfd53f3ef5a1e5ae3a72 | [
"MIT"
] | 3 | 2019-07-15T13:30:43.000Z | 2020-02-02T16:45:41.000Z | __init__.py | mechanicalnull/sourcery_pane | dd133ebf553d6e05383acfd53f3ef5a1e5ae3a72 | [
"MIT"
] | null | null | null | __init__.py | mechanicalnull/sourcery_pane | dd133ebf553d6e05383acfd53f3ef5a1e5ae3a72 | [
"MIT"
] | 4 | 2020-02-02T16:45:48.000Z | 2021-11-01T19:02:28.000Z | from binaryninjaui import DockHandler, DockContextHandler, UIActionHandler, getMonospaceFont
from PySide2 import QtCore
from PySide2.QtCore import Qt
from PySide2.QtWidgets import (QApplication, QHBoxLayout, QVBoxLayout, QLabel, QWidget,
QPlainTextEdit, QSizePolicy, QFormLayout, QPushButton, QLineEdit)
from PySide2.QtGui import (QFont, QFontMetrics, QTextCursor)
from binaryninja import log_warn, log_info
import subprocess
import os
def addr2line(executable, offset):
"""Returns the line of source like "<file>:<line #>:<function_name>"
Returns "ERROR: str(exception)" or "?" on failure."""
addr2line_invocation = "addr2line -e %s -a 0x%x -f" % (executable, offset)
child = subprocess.Popen(addr2line_invocation.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = child.communicate()
try:
if not isinstance(out, str):
out = out.decode()
output_lines = out.split("\n")
#output_address = output_lines[0] # "0x00025ff4"
function_name = output_lines[1].strip() # e.g. "png_get_current_pass_number"
source_line = output_lines[2].strip() # e.g. "/home/wintermute/targets/libpng-1.6.36/pngtrans.c:861"
except Exception as e:
log_warn("[!] Exception encountered in addr2line: %s" % str(e))
log_info(" stdout: %s" % out)
log_info(" stderr: %s" % err)
return "ERROR: %s" % str(e)
if source_line.startswith("??") or source_line.endswith("?"):
return "?"
return ":".join((source_line, function_name))
# Module global in case scripting is needed
panes = []
class SourceryPane(QWidget, DockContextHandler):
def __init__(self, parent, name):
global panes
panes.append(self)
QWidget.__init__(self, parent)
DockContextHandler.__init__(self, self, name)
self.actionHandler = UIActionHandler()
self.actionHandler.setupActionHandler(self)
# Top: Headers with line info
header_layout = QFormLayout()
self.function_info = QLabel("")
self.line_info = QLabel("")
header_layout.addRow(self.tr("Function:"), self.function_info)
header_layout.addRow(self.tr("Line:"), self.line_info)
# Middle: main source display pane
textbox_layout = QVBoxLayout()
self.textbox = QPlainTextEdit()
self.textbox.setLineWrapMode(QPlainTextEdit.LineWrapMode.NoWrap)
self.textbox.setReadOnly(True)
font = getMonospaceFont(self)
self.textbox.setFont(font)
font = QFontMetrics(font)
self.textbox.setMinimumWidth(40 * font.averageCharWidth())
self.textbox.setMinimumHeight(30 * font.lineSpacing())
textbox_layout.addWidget(self.textbox)
# Bottom: buttons for stop/start, and substitution paths
footer_layout = QVBoxLayout()
sync_button_layout = QHBoxLayout()
self.sync_button = QPushButton("Turn Source Sync Off")
sync_button_layout.addWidget(self.sync_button)
path_layout = QFormLayout()
self.original_path = QLineEdit()
self.substitute_path = QLineEdit()
self.substitute_path_button = QPushButton("Do Path Substitution")
path_layout.addRow(self.tr("Original Path:"), self.original_path)
path_layout.addRow(self.substitute_path_button, self.substitute_path)
footer_layout.addLayout(sync_button_layout)
footer_layout.addLayout(path_layout)
# Putting all the child layouts together
layout = QVBoxLayout()
layout.addLayout(header_layout)
layout.addLayout(textbox_layout)
layout.addLayout(footer_layout)
self.setLayout(layout)
# Set up button signals
self.substitute_path_button.clicked.connect(self.do_path_substitution)
self.sync_button.clicked.connect(self.toggle_sync)
# Actual storage variables
self.bv = None
self.filename = None
self.do_sync = True
self.path_substitutions = {}
self.failed_substitutions = []
def do_path_substitution(self):
original_path = self.original_path.text()
new_path = self.substitute_path.text()
if isinstance(original_path, bytes):
original_path = original_path.decode()
new_path = new_path()
if original_path == "":
log_warn("Path substitution error: Original path can't be blank")
elif new_path == "":
if original_path in self.path_substitutions:
old_sub = self.path_substitutions.pop(original_path)
log_info("Removed path substitution: %s -> %s" % (original_path, old_sub))
else:
log_warn("Path substitution error: New substitute path can't be blank")
else:
self.path_substitutions[original_path] = new_path
log_info("Added path substitution: %s -> %s" % (original_path, new_path))
self.failed_substitutions = [] # clear failures when new path added
def toggle_sync(self):
if self.do_sync is True:
self.do_sync = False
self.sync_button.setText("Turn Source Sync On")
else: # self.do_sync is False:
self.do_sync = True
self.sync_button.setText("Turn Source Sync Off")
def set_text(self, text):
self.textbox.setPlainText(text)
def set_line(self, text):
self.line_info.setText(text)
def set_function(self, text):
self.function_info.setText(text)
def check_path_substitution(self, path):
"""Checks for files using path substitutions, going from longest to shortest original path"""
sorted_original_paths = sorted(self.path_substitutions.keys(),
key=lambda k: len(k), reverse=True)
candidate_matches = []
for candidate_path in sorted_original_paths:
if candidate_path in path:
substitute_pattern = self.path_substitutions[candidate_path]
substitute_path = path.replace(candidate_path, substitute_pattern)
substitute_path = os.path.expanduser(substitute_path)
candidate_matches.append(substitute_path)
if os.path.exists(substitute_path):
return substitute_path
# Only log_warn once per file, and only if the user has tried to add translations
if path not in self.failed_substitutions:
if len(self.path_substitutions) > 0:
log_warn("Failed to find substitution for %s" % path)
log_info("Current substitution paths:")
for orig_path, sub_path in self.path_substitutions.items():
log_info(" %s => %s" % (orig_path, sub_path))
log_info("Matching patterns' failed substitute paths:")
for candidate in candidate_matches:
log_info(" %s" % candidate)
self.failed_substitutions.append(path)
return ""
def update_source(self, current_location):
source_line = addr2line(self.filename, current_location)
line_number_int = -1
text = ""
function_name = ""
if source_line.startswith("?"):
line_text = "No source mapping for address 0x%x" % current_location
elif source_line.startswith("ERROR:"):
line_text = "%s" % source_line
else:
filepath, line_number_str, function_name = source_line.split(":")
# handle lines like: "16 (discriminator 1)"
line_number_int = int(line_number_str.split(' ')[0])
line_text = "%s:%s" % (filepath, line_number_str)
# Check for the file, then for subsitutions
if not os.path.exists(filepath):
new_path = self.check_path_substitution(filepath)
if new_path == "":
self.textbox.setLineWrapMode(QPlainTextEdit.LineWrapMode.WidgetWidth)
text = '[!] Source file "%s" not found\n' % filepath
text += '[*] Associated line info: "%s"' % source_line
else:
filepath = new_path
# If we still don't have a good path, the text is set to the correct error
if os.path.exists(filepath):
self.textbox.setLineWrapMode(QPlainTextEdit.LineWrapMode.NoWrap)
with open(filepath, "r") as f:
text = f.read()
self.set_text(text)
self.set_line(line_text)
self.set_function(function_name)
if line_number_int != -1:
self.set_cursor(line_number_int)
else:
self.reset_cursor()
def reset_cursor(self):
doc = self.textbox.document()
cursor = QTextCursor(doc)
cursor.movePosition(QTextCursor.Start)
self.textbox.setTextCursor(cursor)
def set_cursor(self, line_number):
doc = self.textbox.document()
cursor = QTextCursor(doc)
cursor.movePosition(QTextCursor.Start)
for _ in range(line_number - 1):
cursor.movePosition(QTextCursor.Down)
cursor.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor)
self.textbox.setTextCursor(cursor)
self.textbox.centerCursor()
def notifyOffsetChanged(self, offset):
if self.filename:
if self.do_sync:
self.update_source(offset)
def shouldBeVisible(self, view_frame):
if view_frame is None:
return False
else:
return True
def notifyViewChanged(self, view_frame):
if view_frame is None:
pass
else:
self.bv = view_frame.actionContext().binaryView
self.filename = self.bv.file.original_filename
def contextMenuEvent(self, event):
self.m_contextMenuManager.show(self.m_menu, self.actionHandler)
@staticmethod
def create_widget(name, parent, data = None):
return SourceryPane(parent, name)
def addDynamicDockWidget():
mw = QApplication.allWidgets()[0].window()
dock_handler = mw.findChild(DockHandler, '__DockHandler')
dock_handler.addDockWidget("Sourcery Pane",
SourceryPane.create_widget, Qt.RightDockWidgetArea, Qt.Vertical, True)
addDynamicDockWidget()
| 41.624 | 109 | 0.635307 | 8,441 | 0.811167 | 0 | 0 | 105 | 0.01009 | 0 | 0 | 1,667 | 0.160196 |
393910313cbbed71c6e0e72f44096de0f1b773f4 | 291 | py | Python | goodrich/python_primer/c119.py | saurabhkhattry/data-structure-algorithm-design | b56e5a049a1ef326b9214b6cc39d115001351176 | [
"Apache-2.0"
] | null | null | null | goodrich/python_primer/c119.py | saurabhkhattry/data-structure-algorithm-design | b56e5a049a1ef326b9214b6cc39d115001351176 | [
"Apache-2.0"
] | null | null | null | goodrich/python_primer/c119.py | saurabhkhattry/data-structure-algorithm-design | b56e5a049a1ef326b9214b6cc39d115001351176 | [
"Apache-2.0"
] | null | null | null | """
C 1.19
---------------------------------
Problem Statement : Demonstrate how to use Python’s list comprehension syntax to produce
the list [ a , b , c , ..., z ], but without having to type all 26 such
characters literally.
Author : Saurabh
"""
print([chr(x + 97) for x in range(26)])
| 24.25 | 88 | 0.604811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 250 | 0.853242 |
3939aa1d856a574c19f27e62b940f244af52e4da | 596 | py | Python | Lib/compiler/readonly/util.py | isabella232/cinder-1 | 428669a9a925287f192ab361226e5a8ca3fb74d9 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/compiler/readonly/util.py | isabella232/cinder-1 | 428669a9a925287f192ab361226e5a8ca3fb74d9 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/compiler/readonly/util.py | isabella232/cinder-1 | 428669a9a925287f192ab361226e5a8ca3fb74d9 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | from __future__ import annotations
from ast import AST, Subscript, Name, Call
READONLY_ANNOTATION: str = "Readonly"
READONLY_CALL: str = "readonly"
READONLY_FUNC: str = "readonly_func"
def is_readonly_annotation(node: AST) -> bool:
return (
isinstance(node, Subscript)
and isinstance(node.value, Name)
and node.value.id == READONLY_ANNOTATION
)
def is_readonly_wrapped(node: AST) -> bool:
return isinstance(node, Name) and node.id == READONLY_CALL
def is_readonly_func(node: AST) -> bool:
return isinstance(node, Name) and node.id == READONLY_FUNC
| 24.833333 | 62 | 0.714765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.058725 |
393a060942d745ceccf01b98ea1e8e8466b9fafc | 11,360 | py | Python | test/Velvet_server_test.py | kbaseapps/Velvet | 00afa308d7b0f1b87b53446b9b3a96dfddaca7ac | [
"MIT"
] | 1 | 2020-01-13T18:53:30.000Z | 2020-01-13T18:53:30.000Z | test/Velvet_server_test.py | kbaseapps/Velvet | 00afa308d7b0f1b87b53446b9b3a96dfddaca7ac | [
"MIT"
] | 3 | 2017-08-08T23:36:26.000Z | 2019-12-06T22:40:09.000Z | test/Velvet_server_test.py | kbaseapps/Velvet | 00afa308d7b0f1b87b53446b9b3a96dfddaca7ac | [
"MIT"
] | 4 | 2017-08-08T20:39:39.000Z | 2019-02-18T14:45:13.000Z | # -*- coding: utf-8 -*-
import os # noqa: F401
import os.path
import shutil
import time
import unittest
from configparser import ConfigParser
from os import environ
from pprint import pformat
from pprint import pprint # noqa: F401
from Velvet.VelvetImpl import Velvet
from Velvet.VelvetServer import MethodContext
from Velvet.authclient import KBaseAuth as _KBaseAuth
from installed_clients.ReadsUtilsClient import ReadsUtils
from installed_clients.WorkspaceClient import Workspace as workspaceService
from installed_clients.baseclient import ServerError
class VelvetTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = environ.get('KB_AUTH_TOKEN', None)
cls.token = token
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('Velvet'):
print(nameval[0] + '=' + nameval[1])
cls.cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
authServiceUrl = cls.cfg['auth-service-url']
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'Velvet',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL, token=token)
cls.serviceImpl = Velvet(cls.cfg)
cls.scratch = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
cls.shockURL = cls.cfg['shock-url']
cls.handleURL = cls.cfg['handle-service-url']
cls.readUtilsImpl = ReadsUtils(cls.callback_url, token=cls.token)
cls.staged = {}
cls.nodes_to_delete = []
cls.handles_to_delete = []
#cls.setupTestData()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_Velvet_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
# borrowed from Megahit - call this method to get the WS object info of a Paired End Library (will
# upload the example data if this is the first time the method is called during tests)
def getPairedEndLibInfo(self):
if hasattr(self.__class__, 'pairedEndLibInfo'):
return self.__class__.pairedEndLibInfo
# 1) upload files to shock
shared_dir = "/kb/module/work/tmp"
#forward_data_file = '../work/small.forward.fq'
forward_data_file = 'data/small.forward.fq'
#forward_data_file = '../work/GW456A_trim_reads_unpaired_rev.single.fastq'
forward_file = os.path.join(shared_dir, os.path.basename(forward_data_file))
shutil.copy(forward_data_file, forward_file)
#reverse_data_file = '../work/small.reverse.fq'
reverse_data_file = 'data/small.reverse.fq'
reverse_file = os.path.join(shared_dir, os.path.basename(reverse_data_file))
shutil.copy(reverse_data_file, reverse_file)
ru = ReadsUtils(os.environ['SDK_CALLBACK_URL'])
paired_end_ref = ru.upload_reads({'fwd_file': forward_file,
#'rev_file': reverse_file,
'sequencing_tech': 'artificial reads',
'interleaved': 0, 'wsname': self.getWsName(),
'name': 'test.pe.reads'})['obj_ref']
new_obj_info = self.wsClient.get_object_info_new({'objects': [{'ref': paired_end_ref}]})
self.__class__.pairedEndLibInfo = new_obj_info[0]
print ('paired reads uploaded:\n')
pprint (pformat(new_obj_info))
return new_obj_info[0]
@classmethod
def make_ref(self, object_info):
return str(object_info[6]) + '/' + str(object_info[0]) + \
'/' + str(object_info[4])
# Uncomment to skip this test
@unittest.skip("skipped test_run_velveth")
def test_velveth(self):
# get the test data
out_folder = os.path.join(self.scratch, 'velvet_output_dir')
if not os.path.exists(out_folder):
os.makedirs(out_folder)
rc1 = {
'read_type': 'long',
'file_format': 'fastq.gz',
'file_layout': 'interleaved',
'read_file_info' : {
'read_file_name': 'ecoli_ref-5m-trim.fastq.gz'
}
}
rc2 = {
'read_type': 'longPaired',
'file_format': 'fasta.gz',
'file_layout': 'interleaved',
'read_file_info' : {
'read_file_name': 'ecoli-reads-5m-dn-paired.fa.gz'
}
}
rc3 = {
'read_type': 'shortPaired',
'file_format': 'fastq',
'file_layout': 'separate',
'read_file_info' : {
'read_file_name': 'small.reverse.fq',
'left_file': 'small.forward.fq',
'right_file': 'small.reverse.fq',
}
}
pe_lib_info = self.getPairedEndLibInfo()
print(pe_lib_info)
obj_ids = [{'ref':pe_lib_info[7] + '/' + pe_lib_info[1]}]
ws_info = self.wsClient.get_object_info_new({'objects': obj_ids})
reads_params = []
reftoname = {}
for wsi, oid in zip(ws_info, obj_ids):
ref = oid['ref']
reads_params.append(ref)
obj_name = wsi[1]
reftoname[ref] = wsi[7] + '/' + obj_name
readcli = ReadsUtils(self.callback_url, token=self.token)
typeerr = ('Supported types: KBaseFile.SingleEndLibrary ' +
'KBaseFile.PairedEndLibrary ' +
'KBaseAssembly.SingleEndLibrary ' +
'KBaseAssembly.PairedEndLibrary')
try:
reads = readcli.download_reads({'read_libraries': reads_params,
'interleaved': 'false',
'gzipped': None
})['files']
except ServerError as se:
print('logging stacktrace from dynamic client error')
print(se.data)
if typeerr in se.message:
prefix = se.message.split('.')[0]
raise ValueError(
prefix + '. Only the types ' +
'KBaseAssembly.PairedEndLibrary ' +
'and KBaseFile.PairedEndLibrary are supported')
else:
raise
print ('Got reads data from converter:\n' + pformat(reads))
reads_data = []
for ref in reads:
reads_name = reftoname[ref]
f = reads[ref]['files']
seq_tech = reads[ref]["sequencing_tech"]
if f['type'] == 'interleaved':
reads_data.append({'fwd_file': f['fwd'], 'type':'interleaved',
'seq_tech': seq_tech})
elif f['type'] == 'paired':
reads_data.append({'fwd_file': f['fwd'], 'rev_file': f['rev'],
'type':'separated', 'seq_tech': seq_tech})
elif f['type'] == 'single':
reads_data.append({'fwd_file': f['fwd'], 'type':'single',
'seq_tech': seq_tech})
else:
raise ValueError('Something is very wrong with read lib' + reads_name)
params = {
'workspace_name': pe_lib_info[7],
'out_folder': out_folder,
'hash_length': 21,
'reads_channels': [rc1, rc2, rc3]#tests passed
#'reads_files': reads_data
}
result = self.getImpl().exec_velveth(params)
self.assertTrue(os.path.isfile(os.path.join(self.scratch, params['out_folder'] + '/Roadmaps')))
self.assertTrue(os.path.isfile(os.path.join(self.scratch, params['out_folder'] + '/Sequences')))
print('RESULT from velveth is saved in:\n' + os.path.join(self.scratch, params['out_folder']))
pprint('Returned value by Velveth is: ' + str(result))
return result
# Uncomment to skip this test
@unittest.skip("skipped test_run_velvetg")
def test_velvetg(self):
# run velvetg
#work_folder = self.velveth()[0]
#print "Returned work folder from velveth call: " + work_folder
params = {
'workspace_name': self.getWsName(),
'output_contigset_name': 'test_contigset',
'min_contig_length': 500,
'cov_cutoff': 5.2
}
result = self.getImpl().run_velvetg(self.getContext(), params)
self.assertTrue(os.path.isfile(os.path.join(self.scratch, params['wk_folder'] + '/LastGraph')))
self.assertTrue(os.path.isfile(os.path.join(self.scratch, params['wk_folder'] + '/Log')))
print('RESULT from velvetg is saved in:\n' + os.path.join(self.scratch, params['wk_folder']))
pprint('Returned value by Velvetg is: ' + str(result))
return result
# Uncomment to skip this test
# HIDE @unittest.skip("skipped test_run_velvet")
def test_run_velvet(self):
# get the test data
pe_lib_info = self.getPairedEndLibInfo()
pprint(pe_lib_info)
# velvet parameters
params = {
'workspace_name': self.getWsName(),
'output_contigset_name': 'Velvet_test_contigset',
'hash_length': 21,
'read_libraries':[self.make_ref(pe_lib_info)],
'min_contig_length': 300,
'cov_cutoff': 5.2,
'read_trkg': '',
'amos_file': 'yes',
'exp_cov': 21.3,
'ins_length': 400
}
result = self.getImpl().run_velvet(self.getContext(), params)
if not result[0]['report_ref'] is None:
rep = self.wsClient.get_objects2({'objects': [{'ref': result[0]['report_ref']}]})['data'][0]
print('REPORT object:')
pprint(rep)
self.assertEqual(rep['info'][1].rsplit('_', 1)[0], 'kb_velvet_report')
self.assertEqual(rep['info'][2].split('-', 1)[0], 'KBaseReport.Report')
else:
print('Velvet failed!')
| 40 | 108 | 0.56118 | 10,799 | 0.950616 | 0 | 0 | 7,229 | 0.636356 | 0 | 0 | 3,392 | 0.298592 |
393bca90bdc4ef1bbf86856f262735858fce5471 | 681 | py | Python | scale/product/apps.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 121 | 2015-11-18T18:15:33.000Z | 2022-03-10T01:55:00.000Z | scale/product/apps.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 1,415 | 2015-12-23T23:36:04.000Z | 2022-01-07T14:10:09.000Z | scale/product/apps.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 66 | 2015-12-03T20:38:56.000Z | 2020-07-27T15:28:11.000Z | """Defines the application configuration for the product application"""
from __future__ import unicode_literals
from django.apps import AppConfig
class ProductConfig(AppConfig):
"""Configuration for the product application"""
name = 'product'
label = 'product'
verbose_name = 'Product'
def ready(self):
"""Registers the product implementations with other applications."""
from job.configuration.data.data_file import DATA_FILE_STORE
from product.configuration.product_data_file import ProductDataFileStore
# Register product files for the data file store
DATA_FILE_STORE['DATA_FILE_STORE'] = ProductDataFileStore()
| 32.428571 | 80 | 0.743025 | 531 | 0.779736 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.408223 |
393bd4af7328cb9c5923034c08fd8225175f6552 | 1,248 | py | Python | lattly_tests/converter_tests.py | yfarrugia/lattly | 9c8d02ece253d9f61b09d66bc87b097a15970619 | [
"BSD-2-Clause"
] | null | null | null | lattly_tests/converter_tests.py | yfarrugia/lattly | 9c8d02ece253d9f61b09d66bc87b097a15970619 | [
"BSD-2-Clause"
] | null | null | null | lattly_tests/converter_tests.py | yfarrugia/lattly | 9c8d02ece253d9f61b09d66bc87b097a15970619 | [
"BSD-2-Clause"
] | null | null | null | __author__ = 'yanikafarrugia'
import unittest
import lattly_service.converter
class ConverterTests(unittest.TestCase):
def test_degrees_to_radians(self):
rad = lattly_service.converter.Converter.degrees_to_radians(120)
self.assertEqual(rad, 2.0943951023931953)
self.assertIsNotNone(rad)
self.assertTrue(rad > 2)
def test_radians_to_degrees(self):
deg = lattly_service.converter.Converter.radians_to_degrees(1.57)
self.assertIsNotNone(deg)
self.assertTrue(deg < 90.0)
self.assertTrue(deg > 89.9)
self.assertEqual(deg, 89.954373835539243)
def test_radians_to_cartesian(self):
car = lattly_service.converter.Converter.radians_to_cartesian(0.73091096, -1.5294285)
self.assertIsNotNone(car)
self.assertTrue(car[0] > 0.03079231)
self.assertTrue(car[1] < -0.74392960)
self.assertTrue(car[2] > 0.66754818)
def test_cartesian_to_radians(self):
carty = [0.12824063, -0.75020731, 0.64125282]
rad = lattly_service.converter.Converter.cartesian_to_radians(carty)
self.assertIsNotNone(rad)
self.assertTrue(rad[0] > 0.70015084)
self.assertTrue(rad[1] < -1.40149245)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(ConverterTests)
unittest.TextTestRunner(verbosity = 2).run(suite)
| 32 | 87 | 0.776442 | 1,017 | 0.814904 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.020833 |
393cbe1ff632625ba7a044d6e1e2447604c538f6 | 527 | py | Python | setup.py | halfstrik/vindinium-client | 41d4a25e0c72f80eb08335d29dfd38c7dc9f17b8 | [
"BSD-2-Clause"
] | null | null | null | setup.py | halfstrik/vindinium-client | 41d4a25e0c72f80eb08335d29dfd38c7dc9f17b8 | [
"BSD-2-Clause"
] | null | null | null | setup.py | halfstrik/vindinium-client | 41d4a25e0c72f80eb08335d29dfd38c7dc9f17b8 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='vindinium-client',
version='0.1.0',
description='Client for Vindinium.org',
long_description=readme,
author='Sergey Petrunin',
author_email='halfstrik@gmail.com',
url='https://github.com/halfstrik/vendinium-client',
license=license,
packages=find_packages(),
install_requires=['requests==2.18.4'],
)
| 21.958333 | 56 | 0.662239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 197 | 0.373814 |
393d67b0059464a2e4620d4516cb5c2070a663d8 | 2,012 | py | Python | forte/processors/tests/machine_translation_processor_test.py | tcl326/forte | d0d7b8b97da5e1d507dfa7cd4ec51d96067770b8 | [
"Apache-2.0"
] | null | null | null | forte/processors/tests/machine_translation_processor_test.py | tcl326/forte | d0d7b8b97da5e1d507dfa7cd4ec51d96067770b8 | [
"Apache-2.0"
] | null | null | null | forte/processors/tests/machine_translation_processor_test.py | tcl326/forte | d0d7b8b97da5e1d507dfa7cd4ec51d96067770b8 | [
"Apache-2.0"
] | null | null | null | """This module tests Machine Translation processor."""
import unittest
import os
import tempfile
import shutil
from ddt import ddt, data, unpack
from texar.torch import HParams
from forte.pipeline import Pipeline
from forte.data.readers import MultiPackSentenceReader
from forte.processors import MicrosoftBingTranslator
from ft.onto.base_ontology import Token, Sentence
@unittest.skip("BingTranslator will be moved into examples. A texar model will "
"be used to write NMT processor.")
@ddt
class TestMachineTranslationProcessor(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
@data((["Hallo, Guten Morgen",
"Das ist Forte. Ein tool für NLP"],))
@unpack
def test_pipeline(self, texts):
for idx, text in enumerate(texts):
file_path = os.path.join(self.test_dir, f"{idx+1}.txt")
with open(file_path, 'w') as f:
f.write(text)
nlp = Pipeline()
reader_config = HParams({"input_pack_name": "input",
"output_pack_name": "output"},
MultiPackSentenceReader.default_hparams())
nlp.set_reader(reader=MultiPackSentenceReader(), config=reader_config)
translator_config = HParams(
{"src_language": "de", "target_language": "en",
"in_pack_name": "input", "out_pack_name": "result"}, None)
nlp.add_processor(MicrosoftBingTranslator(),
config=translator_config)
nlp.initialize()
english_results = ["Hey good morning", "This is Forte. A tool for NLP"]
for idx, m_pack in enumerate(nlp.process_dataset(self.test_dir)):
self.assertEqual(set(m_pack._pack_names),
set(["input", "output", "result"]))
self.assertEqual(m_pack.get_pack("result").text,
english_results[idx] + "\n")
| 37.259259 | 80 | 0.628728 | 1,502 | 0.74615 | 0 | 0 | 1,638 | 0.813711 | 0 | 0 | 441 | 0.219076 |
393e4214a661900f13fdc9d5d96fe6733a98f9a7 | 1,715 | py | Python | autodriver/src/autodriver/image_capture.py | rel1c/robocar | 6e83391b84873781c839cfc57a9fc1a49f641dbb | [
"MIT"
] | null | null | null | autodriver/src/autodriver/image_capture.py | rel1c/robocar | 6e83391b84873781c839cfc57a9fc1a49f641dbb | [
"MIT"
] | null | null | null | autodriver/src/autodriver/image_capture.py | rel1c/robocar | 6e83391b84873781c839cfc57a9fc1a49f641dbb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import cv2
from picamera.array import PiRGBArray
from picamera import PiCamera
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from models.ros_publisher import ROSPublisher
class ImageCapture(ROSPublisher):
"""Captures and converts openCV image data to ROS Image messages."""
def __init__(self, name, topic, width, height, frame_rate=32, rotation=180):
super(ImageCapture, self).__init__(name, topic, Image, frame_rate)
self.lens = PiCamera(resolution=(width, height), framerate=frame_rate)
self.lens.rotation = rotation
self.converter = CvBridge()
def start(self):
super(ImageCapture, self).start()
def publish(self):
raw = PiRGBArray(self.lens, size=self.lens.resolution)
while not rospy.is_shutdown():
for frame in self.lens.capture_continuous(raw, format='bgr', use_video_port=True):
# grab numpy representation of image, initialize the timestamp and occupied/unoccupied text
image = frame.array
msg = self.converter.cv2_to_imgmsg(image, 'bgr8')
try:
self.sender.publish(msg)
except CvBridgeError as e:
rospy.logerr('Image Capture error: %s', e)
# figure out how to exit from node (can't just call return)
# clear stream for next frame
raw.truncate(0)
def __del__(self):
self.lens.close()
if __name__ == '__main__':
camera = ImageCapture('image_capture', 'image_data', 320, 240)
rospy.loginfo('Image capture started')
camera.start()
camera.publish()
| 35 | 107 | 0.64898 | 1,293 | 0.753936 | 0 | 0 | 0 | 0 | 0 | 0 | 364 | 0.212245 |
39406eb207149313aba4ec89a02427f69ca6fbfe | 612 | py | Python | lesson6/solution_simple_functions.py | vinaymayar/python-game-workshop | e990f51815c2080a0d702c9d90dac8e8c2a35d45 | [
"MIT"
] | 1 | 2016-10-11T19:27:08.000Z | 2016-10-11T19:27:08.000Z | lesson6/solution_simple_functions.py | vinaymayar/python-game-workshop | e990f51815c2080a0d702c9d90dac8e8c2a35d45 | [
"MIT"
] | null | null | null | lesson6/solution_simple_functions.py | vinaymayar/python-game-workshop | e990f51815c2080a0d702c9d90dac8e8c2a35d45 | [
"MIT"
] | null | null | null | """lesson6/solution_simple_functions.py
Contains solutions for simple functions.
"""
# Exercise 1: Write a function that prints your name and try calling it.
# Work in this file and not in the Python shell. Defining functions in
# a Python shell is difficult. Remember to name your function something
# that indicates its purpose.
def print_my_name():
print("Vinay Mayar")
print_my_name()
# Exercise 2: Write a function that uses your function from Exercise 1
# to print your name 10 times.
def print_my_name_ten_times():
for ctr in range(10):
print_my_name()
print_my_name_ten_times()
| 24.48 | 72 | 0.751634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 443 | 0.723856 |
39407878cc72837d87538645a4bdabbbbb973ff4 | 21,829 | py | Python | pydl/tests/test_rnn.py | nash911/PyDL | b0b6f599184c0046f503b9ee1703dc3dfe9a89f2 | [
"MIT"
] | null | null | null | pydl/tests/test_rnn.py | nash911/PyDL | b0b6f599184c0046f503b9ee1703dc3dfe9a89f2 | [
"MIT"
] | null | null | null | pydl/tests/test_rnn.py | nash911/PyDL | b0b6f599184c0046f503b9ee1703dc3dfe9a89f2 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------
# MIT License
#
# Copyright (c) [2021] [Avinash Ranganath]
#
# This code is part of the library PyDL <https://github.com/nash911/PyDL>
# This code is licensed under MIT license (see LICENSE.txt for details)
# ------------------------------------------------------------------------
import unittest
import numpy as np
import numpy.testing as npt
import itertools
from collections import OrderedDict
import copy
from pydl.nn.rnn import RNN
from pydl import conf
class TestRNN(unittest.TestCase):
def test_score_fn(self):
def test(inp, w, seq_len, true_out, bias=False):
num_neur = w['hidden'].shape[0]
rnn = RNN(inp, num_neur, w, bias, seq_len)
out_rnn = np.zeros((1, num_neur), dtype=conf.dtype)
for _ in range(seq_len):
out_rnn = rnn.score_fn({'h': out_rnn, 'inp': inp})
npt.assert_almost_equal(out_rnn, true_out, decimal=5)
# Manually calculated
# -------------------
X = np.ones((1, 3), dtype=conf.dtype)
wh = np.ones((7, 7), dtype=conf.dtype)
wx = np.random.rand(3, 7)
w = {'hidden': wh, 'inp': wx}
bias = np.random.rand(7)
true_out = np.array([np.sum(wx) + np.sum(bias)] * 7).reshape(1, -1) + \
np.sum(wx, axis=0, keepdims=True) + bias
test(X, w, seq_len=2, true_out=true_out, bias=bias)
# Combinatorial Test Cases
# ------------------------
feature_size = [1, 2, 3, 5, 6, 11]
num_neurons = [1, 2, 3, 5, 6, 11]
scale = [1e-6, 1e-3, 1e-1, 1e-0, 2, 3, 10]
batch = 1
for feat, neur, scl in list(itertools.product(feature_size, num_neurons, scale)):
X = np.ones((batch, feat), dtype=conf.dtype)
wh = np.ones((neur, neur), dtype=conf.dtype)
wx = np.random.rand(feat, neur) * scl
w = {'hidden': wh, 'inp': wx}
bias = np.random.rand(neur) * scl
true_out = np.array([np.sum(wx) + np.sum(bias)] * neur).reshape(1, -1) + \
np.sum(wx, axis=0, keepdims=True) + bias
test(X, w, seq_len=2, true_out=true_out, bias=bias)
def test_forward(self):
def test(inp, w, seq_len, true_out, bias=False, init_h_state=None, actv_fn='Sigmoid',
p=None, mask=None, architecture_type='many_to_many'):
num_neur = w['hidden'].shape[0]
rnn = RNN(inp, num_neur, w, bias, seq_len=seq_len, activation_fn=actv_fn,
architecture_type=architecture_type, dropout=p,
tune_internal_states=(False if init_h_state is None else True))
if init_h_state is not None:
rnn.init_hidden_state = init_h_state
rnn.reset_internal_states()
out_rnn = rnn.forward(inp, mask=mask)
# Check if the output has the right keys
npt.assert_equal(out_rnn.keys(), true_out.keys())
for k, v in out_rnn.items():
npt.assert_almost_equal(v, true_out[k], decimal=5)
# Combinatorial Test Cases
# ------------------------
sequence_length = [1, 2, 3, 5, 6, 11]
reduce_size = [0, 1]
feature_size = [1, 2, 3, 5, 6, 11]
num_neurons = [1, 2, 3, 5, 6, 11]
one_hot = [True, False]
scale = [1e-6, 1e-3, 1e-1, 1e-0, 2]
dropout = [True, False]
architecture_type = ['many_to_many', 'many_to_one']
tune_internal_states = [True, False]
for seq_len, r_size, feat, neur, oh, scl, dout, a_type, tune in list(itertools.product(
sequence_length, reduce_size, feature_size, num_neurons, one_hot, scale, dropout,
architecture_type, tune_internal_states)):
batch_size = seq_len - (r_size if seq_len > 1 else 0)
if oh:
X = np.zeros((batch_size, feat), dtype=conf.dtype)
rnd_idx = np.random.randint(feat, size=batch_size)
X[range(batch_size), rnd_idx] = 1
else:
X = np.random.uniform(-scl, scl, (batch_size, feat))
wh = np.random.rand(neur, neur) * scl
wx = np.random.rand(feat, neur) * scl
w = {'hidden': wh, 'inp': wx}
bias = np.random.rand(neur) * scl
# Linear
if tune:
h_init = np.array(np.random.rand(1, neur), dtype=conf.dtype) * scl
h = np.copy(h_init)
else:
h = np.zeros((1, neur), dtype=conf.dtype)
h_init = None
true_out_linear = OrderedDict()
p = None
mask = None
for i, x in enumerate(X):
h = np.matmul(h, wh) + np.matmul(x.reshape(1, -1), wx) + bias
if dout:
if p is None:
p = np.random.rand()
mask = list()
mask.append(np.array(np.random.rand(*h.shape) < p, dtype=conf.dtype) / p)
layer_out = h * mask[-1]
else:
layer_out = h
if a_type == 'many_to_one':
if i == batch_size - 1:
true_out_linear = OrderedDict()
true_out_linear[i + 1] = layer_out
else:
true_out_linear[i + 1] = layer_out
test(X, w, seq_len, true_out_linear, bias, h_init, actv_fn='Linear', p=p, mask=mask,
architecture_type=a_type)
# Sigmoid
if tune:
h_init = np.array(np.random.rand(1, neur), dtype=conf.dtype) * scl
h = np.copy(h_init)
h = 1.0 / (1.0 + np.exp(-h))
else:
h = np.zeros((1, neur), dtype=conf.dtype)
h_init = None
true_out_sigmoid = OrderedDict()
p = None
mask = None
for i, x in enumerate(X):
score = np.matmul(h, wh) + np.matmul(x.reshape(1, -1), wx) + bias
h = 1.0 / (1.0 + np.exp(-score))
if dout:
if p is None:
p = np.random.rand()
mask = list()
mask.append(np.array(np.random.rand(*h.shape) < p, dtype=conf.dtype))
layer_out = h * mask[-1]
else:
layer_out = h
if a_type == 'many_to_one':
if i == batch_size - 1:
true_out_sigmoid = OrderedDict()
true_out_sigmoid[i + 1] = layer_out
else:
true_out_sigmoid[i + 1] = layer_out
test(X, w, seq_len, true_out_sigmoid, bias, h_init, actv_fn='Sigmoid', p=p, mask=mask,
architecture_type=a_type)
# Tanh
if tune:
h_init = np.array(np.random.rand(1, neur), dtype=conf.dtype) * scl
h = np.copy(h_init)
h = (2.0 / (1.0 + np.exp(-2.0 * h))) - 1.0
else:
h = np.zeros((1, neur), dtype=conf.dtype)
h_init = None
true_out_tanh = OrderedDict()
p = None
mask = None
for i, x in enumerate(X):
score = np.matmul(h, wh) + np.matmul(x.reshape(1, -1), wx) + bias
h = (2.0 / (1.0 + np.exp(-2.0 * score))) - 1.0
if dout:
if p is None:
p = np.random.rand()
mask = list()
mask.append(np.array(np.random.rand(*h.shape) < p, dtype=conf.dtype))
layer_out = h * mask[-1]
else:
layer_out = h
if a_type == 'many_to_one':
if i == batch_size - 1:
true_out_tanh = OrderedDict()
true_out_tanh[i + 1] = layer_out
else:
true_out_tanh[i + 1] = layer_out
test(X, w, seq_len, true_out_tanh, bias, h_init, actv_fn='Tanh', p=p, mask=mask,
architecture_type=a_type)
# ReLU
if tune:
h_init = np.array(np.random.rand(1, neur), dtype=conf.dtype) * scl
h = np.copy(h_init)
h = np.maximum(0, h)
else:
h = np.zeros((1, neur), dtype=conf.dtype)
h_init = None
true_out_relu = OrderedDict()
p = None
mask = None
for i, x in enumerate(X):
score = np.matmul(h, wh) + np.matmul(x.reshape(1, -1), wx) + bias
h = np.maximum(0, score)
if dout:
if p is None:
p = np.random.rand()
mask = list()
mask.append(np.array(np.random.rand(*h.shape) < p, dtype=conf.dtype) / p)
layer_out = h * mask[-1]
else:
layer_out = h
if a_type == 'many_to_one':
if i == batch_size - 1:
true_out_relu = OrderedDict()
true_out_relu[i + 1] = layer_out
else:
true_out_relu[i + 1] = layer_out
test(X, w, seq_len, true_out_relu, bias, h_init, actv_fn='ReLU', p=p, mask=mask,
architecture_type=a_type)
# SoftMax
if tune:
h_init = np.array(np.random.rand(1, neur), dtype=conf.dtype) * scl
h = np.copy(h_init)
h = np.exp(h) / np.sum(np.exp(h), axis=-1, keepdims=True)
else:
h = np.zeros((1, neur), dtype=conf.dtype)
h_init = None
true_out_softmax = OrderedDict()
p = None
mask = None
for i, x in enumerate(X):
score = np.matmul(h, wh) + np.matmul(x.reshape(1, -1), wx) + bias
unnorm_prob = np.exp(score)
h = unnorm_prob / np.sum(unnorm_prob, axis=-1, keepdims=True)
if dout:
if p is None:
p = np.random.rand()
mask = list()
mask.append(np.array(np.random.rand(*h.shape) < p, dtype=conf.dtype))
layer_out = h * mask[-1]
else:
layer_out = h
if a_type == 'many_to_one':
if i == batch_size - 1:
true_out_softmax = OrderedDict()
true_out_softmax[i + 1] = layer_out
else:
true_out_softmax[i + 1] = layer_out
test(X, w, seq_len, true_out_softmax, bias, h_init, actv_fn='Softmax', p=p, mask=mask,
architecture_type=a_type)
def test_backward_gradients_finite_difference(self):
self.delta = 1e-6
tol = 8
def test(inp, w, seq_len, inp_grad, bias=False, init_hidden_state=None, actv_fn='Sigmoid',
p=None, mask=None, architecture_type='many_to_many'):
num_neur = w['hidden'].shape[0]
wh = w['hidden']
wx = w['inp']
rnn = RNN(inp, num_neur, w, bias, seq_len=seq_len, activation_fn=actv_fn,
architecture_type=architecture_type, dropout=p,
tune_internal_states=(False if init_hidden_state is None else True))
if init_hidden_state is not None:
rnn.init_hidden_state = init_hidden_state
rnn.reset_internal_states()
_ = rnn.forward(inp, mask=mask)
inputs_grad = rnn.backward(inp_grad)
hidden_weights_grad = rnn.hidden_weights_grad
input_weights_grad = rnn.input_weights_grad
bias_grad = rnn.bias_grad
hidden_grad = rnn.hidden_state_grad
# Hidden weights finite difference gradients
hidden_weights_finite_diff = np.empty(hidden_weights_grad.shape)
for i in range(hidden_weights_grad.shape[0]):
for j in range(hidden_weights_grad.shape[1]):
w_delta = np.zeros_like(wh)
w_delta[i, j] = self.delta
rnn.hidden_weights = wh + w_delta
lhs = copy.deepcopy(rnn.forward(inp, mask=mask))
rnn.hidden_weights = wh - w_delta
rhs = copy.deepcopy(rnn.forward(inp, mask=mask))
lhs_sum = np.zeros_like(list(lhs.values())[0])
rhs_sum = np.zeros_like(list(rhs.values())[0])
for k in list(lhs.keys()):
if k > 0:
lhs_sum += lhs[k] * inp_grad[k]
rhs_sum += rhs[k] * inp_grad[k]
hidden_weights_finite_diff[i, j] = \
np.sum(((lhs_sum - rhs_sum) / (2 * self.delta)))
rnn.hidden_weights = wh
# Input weights finite difference gradients
input_weights_finite_diff = np.empty(input_weights_grad.shape)
for i in range(input_weights_grad.shape[0]):
for j in range(input_weights_grad.shape[1]):
w_delta = np.zeros_like(wx)
w_delta[i, j] = self.delta
rnn.input_weights = wx + w_delta
lhs = copy.deepcopy(rnn.forward(inp, mask=mask))
rnn.input_weights = wx - w_delta
rhs = copy.deepcopy(rnn.forward(inp, mask=mask))
lhs_sum = np.zeros_like(list(lhs.values())[0])
rhs_sum = np.zeros_like(list(rhs.values())[0])
for k in list(lhs.keys()):
if k > 0:
lhs_sum += lhs[k] * inp_grad[k]
rhs_sum += rhs[k] * inp_grad[k]
input_weights_finite_diff[i, j] = \
np.sum(((lhs_sum - rhs_sum) / (2 * self.delta)))
rnn.input_weights = wx
# Bias finite difference gradients
bias_finite_diff = np.empty(bias_grad.shape)
for i in range(bias_grad.shape[0]):
bias_delta = np.zeros(bias.shape, dtype=conf.dtype)
bias_delta[i] = self.delta
rnn.bias = bias + bias_delta
lhs = copy.deepcopy(rnn.forward(inp, mask=mask))
rnn.bias = bias - bias_delta
rhs = copy.deepcopy(rnn.forward(inp, mask=mask))
lhs_sum = np.zeros_like(list(lhs.values())[0])
rhs_sum = np.zeros_like(list(rhs.values())[0])
for k in list(lhs.keys()):
if k > 0:
lhs_sum += lhs[k] * inp_grad[k]
rhs_sum += rhs[k] * inp_grad[k]
bias_finite_diff[i] = \
np.sum(((lhs_sum - rhs_sum) / (2 * self.delta)))
rnn.bias = bias
# Inputs finite difference gradients
inputs_grad = np.vstack(reversed(list(inputs_grad.values())))
inputs_finite_diff = np.empty(inputs_grad.shape)
for i in range(inp.shape[0]):
for j in range(inp.shape[1]):
i_delta = np.zeros(inp.shape, dtype=conf.dtype)
i_delta[i, j] = self.delta
lhs = copy.deepcopy(rnn.forward(inp + i_delta, mask=mask))
rhs = copy.deepcopy(rnn.forward(inp - i_delta, mask=mask))
lhs_sum = np.zeros_like(list(lhs.values())[0])
rhs_sum = np.zeros_like(list(rhs.values())[0])
for k in list(lhs.keys()):
if k > 0:
lhs_sum += lhs[k] * inp_grad[k]
rhs_sum += rhs[k] * inp_grad[k]
inputs_finite_diff[i, j] = \
np.sum(((lhs_sum - rhs_sum) / (2 * self.delta)), keepdims=False)
if init_hidden_state is not None:
# Initial hidden state finite difference gradients
hidden_finite_diff = np.empty(hidden_grad.shape)
for i in range(init_hidden_state.shape[0]):
for j in range(init_hidden_state.shape[1]):
h_delta = np.zeros(init_hidden_state.shape, dtype=conf.dtype)
h_delta[i, j] = self.delta
rnn.init_hidden_state = init_hidden_state + h_delta
rnn.reset_internal_states()
lhs = copy.deepcopy(rnn.forward(inp, mask=mask))
rnn.init_hidden_state = init_hidden_state - h_delta
rnn.reset_internal_states()
rhs = copy.deepcopy(rnn.forward(inp, mask=mask))
lhs_sum = np.zeros_like(list(lhs.values())[0])
rhs_sum = np.zeros_like(list(rhs.values())[0])
for k in list(lhs.keys()):
if k > 0:
lhs_sum += lhs[k] * inp_grad[k]
rhs_sum += rhs[k] * inp_grad[k]
hidden_finite_diff[i, j] = \
np.sum(((lhs_sum - rhs_sum) / (2 * self.delta)), keepdims=False)
rnn.init_hidden_state = init_hidden_state
rnn.reset_internal_states()
npt.assert_almost_equal(hidden_weights_grad, hidden_weights_finite_diff, decimal=tol)
npt.assert_almost_equal(input_weights_grad, input_weights_finite_diff, decimal=tol)
npt.assert_almost_equal(inputs_grad, inputs_finite_diff, decimal=tol)
if init_hidden_state is not None:
npt.assert_almost_equal(hidden_grad, hidden_finite_diff, decimal=tol)
if not actv_fn == 'ReLU':
npt.assert_almost_equal(bias_grad, bias_finite_diff, decimal=tol)
# if not actv_fn == 'Softmax':
# # Hidden weights gradient check
# grad_diff = (abs(hidden_weights_grad - hidden_weights_finite_diff) /
# (abs(hidden_weights_grad + hidden_weights_finite_diff) + 1e-64))
# error_threshold = np.ones_like(grad_diff) * 1e-5
# npt.assert_array_less(grad_diff, error_threshold)
#
# # Input weights gradient check
# grad_diff = (abs(input_weights_grad - input_weights_finite_diff) /
# (abs(input_weights_grad + input_weights_finite_diff) + 1e-64))
# error_threshold = np.ones_like(grad_diff) * 1e-5
# npt.assert_array_less(grad_diff, error_threshold)
#
# # Inputs gradient check
# grad_diff = (abs(inputs_grad - inputs_finite_diff) /
# (abs(inputs_grad + inputs_finite_diff) + 1e-64))
# error_threshold = np.ones_like(grad_diff) * 1e-5
# npt.assert_array_less(grad_diff, error_threshold)
# Combinatorial Test Cases
# ------------------------
sequence_length = [1, 2, 3, 11]
reduce_size = [0, 1]
feature_size = [1, 2, 3, 11]
num_neurons = [1, 2, 3, 11]
one_hot = [True, False]
scale = [1e-2]
unit_inp_grad = [True, False]
activation_fn = ['Linear', 'Sigmoid', 'Tanh', 'ReLU', 'Softmax']
dropout = [True, False]
architecture_type = ['many_to_many', 'many_to_one']
tune_internal_states = [True, False]
repeat = list(range(1))
for seq_len, r_size, feat, neur, oh, scl, unit, actv, dout, a_type, tune, r in \
list(itertools.product(sequence_length, reduce_size, feature_size, num_neurons, one_hot,
scale, unit_inp_grad, activation_fn, dropout, architecture_type,
tune_internal_states, repeat)):
batch_size = seq_len - (r_size if seq_len > 1 else 0)
# Initialize inputs
if oh:
X = np.zeros((batch_size, feat), dtype=conf.dtype)
rnd_idx = np.random.randint(feat, size=batch_size)
X[range(batch_size), rnd_idx] = 1
else:
X = np.random.uniform(-scl, scl, (batch_size, feat))
# Initialize weights and bias
wh = np.random.rand(neur, neur) * scl
wx = np.random.rand(feat, neur) * scl
w = {'hidden': wh, 'inp': wx}
bias = np.random.rand(neur) * scl
init_h_state = np.random.rand(1, neur) if tune else None
# Initialize input gradients
inp_grad = OrderedDict()
if a_type == 'many_to_many':
for s in range(1, batch_size + 1):
inp_grad[s] = np.ones((1, neur), dtype=conf.dtype) if unit else \
np.random.uniform(-1, 1, (1, neur))
else:
inp_grad[batch_size] = np.ones((1, neur), dtype=conf.dtype) if unit else \
np.random.uniform(-1, 1, (1, neur))
# Set dropout mask
if dout:
p = np.random.rand()
mask = np.array(np.random.rand(batch_size, neur) < p, dtype=conf.dtype)
if actv in ['Linear', 'ReLU']:
mask /= p
else:
p = None
mask = None
test(X, w, seq_len, inp_grad, bias, init_h_state, actv, p, mask, a_type)
if __name__ == '__main__':
unittest.main()
| 46.247881 | 100 | 0.492922 | 21,240 | 0.973018 | 0 | 0 | 0 | 0 | 0 | 0 | 2,204 | 0.100967 |
3940c721245b515751e5bd8b6c8ca3c7c862fc8d | 416 | py | Python | hcloud/helpers/descriptors.py | rebost/hcloud-python | 148184eec3d76b6d51cb76855dff6ae305a0e165 | [
"MIT"
] | 1 | 2019-10-23T01:00:08.000Z | 2019-10-23T01:00:08.000Z | hcloud/helpers/descriptors.py | rebost/hcloud-python | 148184eec3d76b6d51cb76855dff6ae305a0e165 | [
"MIT"
] | null | null | null | hcloud/helpers/descriptors.py | rebost/hcloud-python | 148184eec3d76b6d51cb76855dff6ae305a0e165 | [
"MIT"
] | 1 | 2019-06-19T17:53:10.000Z | 2019-06-19T17:53:10.000Z | # -*- coding: utf-8 -*-
from dateutil.parser import isoparse
class ISODateTime(object):
def __init__(self, initval=None):
self.val = initval
def __get__(self, obj, obj_type):
return self.val
def __set__(self, obj, string_date):
if string_date is None:
self.val = None
else:
# 2016-01-30T23:50+00:00
self.val = isoparse(string_date)
| 23.111111 | 44 | 0.59375 | 352 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.112981 |
3941703025fe2beb80c23167be7d3cc82ac99a96 | 3,317 | py | Python | src/classification_report.py | cognibit/Text-Normalization-Demo | 36355f4a2c5187948fe786b7318259151f9a9db6 | [
"Apache-2.0"
] | 66 | 2018-06-04T05:19:49.000Z | 2022-01-08T23:15:13.000Z | src/classification_report.py | cognibit/Text-Normalization-Demo | 36355f4a2c5187948fe786b7318259151f9a9db6 | [
"Apache-2.0"
] | 1 | 2019-07-02T14:44:44.000Z | 2019-07-03T14:54:24.000Z | src/classification_report.py | cognibit/Text-Normalization-Demo | 36355f4a2c5187948fe786b7318259151f9a9db6 | [
"Apache-2.0"
] | 7 | 2018-06-12T14:22:00.000Z | 2022-02-22T01:18:12.000Z | # Copyright 2018 Cognibit Solutions LLP.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Generates classification report for the trained XGBoost models
"""
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix as cm
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report as report
def preprocessing(results, truth):
# preprocessing
results.loc[truth['before']==truth['after'],'truth']='RemainSelf'
results.loc[truth['before']!=truth['after'],'truth']='ToBeNormalized'
truth['class']=''
truth.loc[truth['before']!=truth['after'],'class']='ToBeNormalized'
truth.loc[truth['before']==truth['after'],'class']='RemainSelf'
return results, truth
def f1_scores(results, truth):
print(report(truth['class'].tolist(), results['class'].tolist()))
def confusion_matrix(results, truth, lang):
matrix = cm(truth['class'].tolist(), results['class'].tolist())
plot_confusion_matrix(matrix, classes=['ToBeNormalized', 'RemainSelf'],
title='XGBoost Confusion Matrix [{}]'.format(lang))
def pr_curve(results, truth, lang):
truth.loc[truth['class']=='ToBeNormalized', 'class'] = 1
truth.loc[truth['class']=='RemainSelf', 'class'] = 0
results.loc[results['class']=='ToBeNormalized', 'class'] = 1
results.loc[results['class']=='RemainSelf', 'class'] = 0
average_precision = average_precision_score(truth['class'].tolist(), results['class'].tolist())
precision, recall, threshold = precision_recall_curve(truth['class'].tolist(), results['class'].tolist())
plt.step(recall, precision, color='b', alpha=0.2, where='post')
plt.fill_between(recall, precision, alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall Curve: AP={0:0.2f} [{1}]'.format(average_precision, lang))
plt.show()
def plot_confusion_matrix(cm, classes,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
| 38.126437 | 106 | 0.680434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,343 | 0.404884 |
3945dcb9dd48db259f43d38679d34c16f4543743 | 913 | py | Python | Joints/Pelvis.py | lcremer/Maya_Rigging | 8fe07e5f8d021a6828608bca4bf74e04f023b1cd | [
"Unlicense"
] | null | null | null | Joints/Pelvis.py | lcremer/Maya_Rigging | 8fe07e5f8d021a6828608bca4bf74e04f023b1cd | [
"Unlicense"
] | null | null | null | Joints/Pelvis.py | lcremer/Maya_Rigging | 8fe07e5f8d021a6828608bca4bf74e04f023b1cd | [
"Unlicense"
] | null | null | null | """
Creates Pelvis
"""
import maya.cmds as mc
from ..Utils import String as String
class Pelvis():
def __init__(self,
characterName = '',
suffix = '',
name = 'Pelvis',
parent = ''):
"""
@return: returns end joint
"""
self.characterName = characterName
self.suffix = suffix
self.name = name
mc.select(cl = True)
self.topJoint = mc.joint(n = String.combineWith_((characterName, name, suffix)), p = (0,3,0))
self.endJoint = self.topJoint
if parent:
mc.delete(mc.pointConstraint(parent, self.topJoint))
mc.delete(mc.orientConstraint(parent, self.topJoint))
mc.parent(self.topJoint, parent)
mc.select(cl = True)
#return {'topJoint':topJoint, 'endJoint':topJoint} | 27.666667 | 101 | 0.521358 | 826 | 0.902732 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.148634 |
3946a4dc9131883ea55e7662c3cdeaaec6befe76 | 2,311 | py | Python | src/get_weather.py | Sphinxxx1984/Welcome_system | 7e5e4d0232ef9d57afc952414fa0a4c63cd7583f | [
"MIT"
] | null | null | null | src/get_weather.py | Sphinxxx1984/Welcome_system | 7e5e4d0232ef9d57afc952414fa0a4c63cd7583f | [
"MIT"
] | 6 | 2020-01-28T22:14:36.000Z | 2022-02-09T23:33:25.000Z | src/get_weather.py | Sphinxxx1984/Welcome_system | 7e5e4d0232ef9d57afc952414fa0a4c63cd7583f | [
"MIT"
] | 2 | 2019-09-06T00:57:57.000Z | 2019-09-06T05:05:23.000Z |
from multiprocessing import Pipe
import requests
import json
import time
cur_file = "../data/cur_weather.json"
today_file = "../data/today_weather.json"
def write2file(data, json_file):
with open(json_file, 'w') as f:
f.write(json.dumps(data))
f.close()
# class Weather(object):
# def __init__(self, time, week, weath, temper, now_weather):
# self.time = time
# self.week = week
# self.weath = weath
# self.temper = temper
# self.now_weather = now_weather
#
#
# def show(self):
# print("日期:" + self.time + " " + self.week)
# print("气候:" + self.weath)
# print("气温:" + self.temper)
#
# def write2file(self, filename):
# objectdumps2file(self, filename)
def get_weather_info(mode):
url = 'https://www.tianqiapi.com/api/'
headers = {'User-Agent': 'Mozilla/5.0'}
version = ''
if mode == 1:
version = 'v1'
if mode == 2:
version = 'v6'
params = {
'version': version,
'appid': '18833238',
'appsecret': 'PM9hiniT',
}
res = requests.get(url, params=params, headers=headers)
res.encoding = 'utf-8'
weather = json.loads(res.text)
return weather
def get_weather(argspipe):
# city_name = args.city_name
while True:
recv_val = argspipe.recv()
if recv_val == 1:
cur_weather = get_weather_info(1)
if cur_weather is None:
argspipe.send(-1)
else:
write2file(cur_weather, cur_file)
argspipe.send(1)
elif recv_val == 2:
today_weather = get_weather_info(2)
if today_weather is None:
argspipe.send(-1)
else:
write2file(today_weather, today_file)
argspipe.send(1)
else:
argspipe.send(-1)
time.sleep(1)
# def parse_arguments(argv):
# parser = argparse.ArgumentParser()
# parser.add_argument('model', type=int, help='1 for get now weather, 2 for get today weather')
# return parser.parse_args(argv)
if __name__ == '__main__':
cur_weather = get_weather_info(1)
if cur_weather is None:
print("-1")
else:
write2file(cur_weather, cur_file)
print("0")
| 21.801887 | 99 | 0.568152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 909 | 0.390296 |
39472851d8582ebdbbe1dc8fbca3e5ec7f61d4f4 | 14,640 | py | Python | spts/gui/options.py | FilipeMaia/spts | de4eb2920b675537da611c7301d4d5a9565a1ab1 | [
"BSD-2-Clause"
] | null | null | null | spts/gui/options.py | FilipeMaia/spts | de4eb2920b675537da611c7301d4d5a9565a1ab1 | [
"BSD-2-Clause"
] | 5 | 2021-03-26T11:37:40.000Z | 2021-03-31T09:20:40.000Z | spts/gui/options.py | FilipeMaia/spts | de4eb2920b675537da611c7301d4d5a9565a1ab1 | [
"BSD-2-Clause"
] | 1 | 2021-03-24T11:07:41.000Z | 2021-03-24T11:07:41.000Z | import os.path
import logging
logger = logging.getLogger("MSI_GUI")
from PyQt5 import QtCore, QtGui
class Options:
def __init__(self, mainWindow):
self.general_box = GeneralBox(mainWindow)
self.raw_tab = RawTab(mainWindow)
self.process_tab = ProcessTab(mainWindow)
self.denoise_tab = DenoiseTab(mainWindow)
self.threshold_tab = ThresholdTab(mainWindow)
self.detect_tab = DetectTab(mainWindow)
self.analyse_tab = AnalyseTab(mainWindow)
def load_all(self):
self.general_box.load_all()
self.raw_tab.load_all()
self.process_tab.load_all()
self.denoise_tab.load_all()
self.threshold_tab.load_all()
self.detect_tab.load_all()
self.analyse_tab.load_all()
def connect_all(self):
self.general_box.connect_all()
self.raw_tab.connect_all()
self.process_tab.connect_all()
self.denoise_tab.connect_all()
self.threshold_tab.connect_all()
self.detect_tab.connect_all()
self.analyse_tab.connect_all()
class GeneralBox:
def __init__(self, mainWindow):
self.w = mainWindow
self.dataFilenameLineEdit = self.w.ui.dataFilenameLineEdit
self.i0SpinBox = self.w.ui.i0SpinBox
self.nFramesSpinBox = self.w.ui.nFramesSpinBox
self.outputLevelComboBox = self.w.ui.outputLevelComboBox
def connect_all(self):
self.dataFilenameLineEdit.editingFinished.connect(self._set_filename)
self.i0SpinBox.valueChanged.connect(self._on_i0_changed)
self.nFramesSpinBox.valueChanged.connect(self._on_n_frames_changed)
self.outputLevelComboBox.currentIndexChanged.connect(self._on_output_level_changed)
def load_all(self):
c = self.w.conf["general"]
self.dataFilenameLineEdit.setText(c["filename"])
self.i0SpinBox.setValue(c["i0"])
self.nFramesSpinBox.setValue(c["n_images"])
self.outputLevelComboBox.setCurrentIndex(c["output_level"])
def _set_filename(self):
filename = str(self.dataFilenameLineEdit.text())
self.w.conf["general"]["filename"] = filename
filename_full = self.w.preferences.data_mount_prefix + filename
if os.path.isfile(filename_full):
self.w._init_worker()
else:
logger.warning("File %s cannot be found. You might want to check data mount prefix under preferences." % filename_full)
def _on_i0_changed(self):
self.w.conf["general"]["i0"] = self.i0SpinBox.value()
def _on_n_frames_changed(self):
n_images = self.nFramesSpinBox.value()
self.w.conf["general"]["n_images"] = n_images
def _on_output_level_changed(self):
self.w.conf["general"]["output_level"] = self.outputLevelComboBox.currentIndex()
def _on_open_data(self):
filename = QtGui.QFileDialog.getOpenFileName(self.w, "Open CXI data file", "", "CXI Files (*.cxi)")
if isinstance(filename, tuple):
if filename[0]:
filename = filename[0]
if filename:
self.dataFilenameLineEdit.setText(filename)
self._set_filename()
names_data_types = ["1_raw", "2_process", "3_denoise", "4_threshold", "5_detect", "6_analyse"]
class RawTab:
def __init__(self, mainWindow):
self.w = mainWindow
self.dataSetNameRawLineEdit = self.w.ui.dataSetNameRawLineEdit
self.saturationLevelSpinBox = self.w.ui.saturationLevelSpinBox
self.xMinSpinBox = self.w.ui.xMinSpinBox
self.xMaxSpinBox = self.w.ui.xMaxSpinBox
self.yMinSpinBox = self.w.ui.yMinSpinBox
self.yMaxSpinBox = self.w.ui.yMaxSpinBox
self.skipSaturatedCheckBox = self.w.ui.skipSaturatedCheckBox
self.subtractConstRawSpinBox = self.w.ui.subtractConstRawSpinBox
def connect_all(self):
self.dataSetNameRawLineEdit.editingFinished.connect(self._on_data_set_name_changed)
self.saturationLevelSpinBox.valueChanged.connect(self._on_saturation_level_changed)
self.xMinSpinBox.valueChanged.connect(self._on_xmin_changed)
self.xMaxSpinBox.valueChanged.connect(self._on_xmax_changed)
self.yMinSpinBox.valueChanged.connect(self._on_ymin_changed)
self.yMaxSpinBox.valueChanged.connect(self._on_ymax_changed)
self.skipSaturatedCheckBox.stateChanged.connect(self._on_skip_saturated_changed)
self.subtractConstRawSpinBox.valueChanged.connect(self._on_subtract_const_changed)
def load_all(self):
c = self.w.conf["raw"]
self.dataSetNameRawLineEdit.setText(c["dataset_name"])
self.saturationLevelSpinBox.setValue(c["saturation_level"])
self.xMinSpinBox.setValue(c["xmin"])
self.xMaxSpinBox.setValue(c["xmax"])
self.yMinSpinBox.setValue(c["ymin"])
self.yMaxSpinBox.setValue(c["ymax"])
self.skipSaturatedCheckBox.setChecked(c["skip_saturated_frames"])
self.subtractConstRawSpinBox.setValue(c["subtract_constant"])
def _clear_dt_cache(self):
self.w.clear_cache_types(names_data_types)
def _on_data_set_name_changed(self):
self.w.conf["raw"]["dataset_name"] = str(self.dataSetNameRawLineEdit.text())
self._clear_dt_cache()
def _on_saturation_level_changed(self):
self.w.conf["raw"]["saturation_level"] = self.saturationLevelSpinBox.value()
self._clear_dt_cache()
def _on_xmin_changed(self):
xmin = self.xMinSpinBox.value()
xmax = self.w.conf["raw"]["xmax"]
if xmin >= xmax:
self.w.conf["raw"]["xmin"] = xmax - 1
else:
self.w.conf["raw"]["xmin"] = xmin
self._clear_dt_cache()
def _on_xmax_changed(self):
xmin = self.w.conf["raw"]["xmin"]
xmax = self.xMaxSpinBox.value()
if xmax <= xmin:
self.w.conf["raw"]["xmax"] = xmin + 1
else:
self.w.conf["raw"]["xmax"] = xmax
self._clear_dt_cache()
def _on_ymin_changed(self):
ymin = self.yMinSpinBox.value()
ymax = self.w.conf["raw"]["ymax"]
if ymin >= ymax:
self.w.conf["raw"]["ymin"] = ymax - 1
else:
self.w.conf["raw"]["ymin"] = ymin
self._clear_dt_cache()
def _on_ymax_changed(self):
ymin = self.w.conf["raw"]["ymin"]
ymax = self.yMaxSpinBox.value()
if ymax <= ymin:
self.w.conf["raw"]["ymax"] = ymin + 1
else:
self.w.conf["raw"]["ymax"] = ymax
self._clear_dt_cache()
def _on_skip_saturated_changed(self):
self.w.conf["raw"]["skip_saturated_frames"] = self.skipSaturatedCheckBox.isChecked()
self._clear_dt_cache()
def _on_subtract_const_changed(self):
self.w.conf["raw"]["subtract_constant"] = self.subtractConstRawSpinBox.value()
self._clear_dt_cache()
class ProcessTab:
def __init__(self, mainWindow):
self.w = mainWindow
self.dataSetNameLineEdit = self.w.ui.dataSetNameLineEdit
self.subtractConstSpinBox = self.w.ui.subtractConstSpinBox
self.floorCutCheckBox = self.w.ui.floorCutCheckBox
self.floorCutSpinBox = self.w.ui.floorCutSpinBox
self.cmcXCheckBox = self.w.ui.cmcXCheckBox
self.cmcYCheckBox = self.w.ui.cmcYCheckBox
def connect_all(self):
self.dataSetNameLineEdit.editingFinished.connect(self._on_data_set_name_changed)
self.subtractConstSpinBox.valueChanged.connect(self._on_subtract_const_changed)
self.floorCutCheckBox.stateChanged.connect(self._on_floor_cut_toggled)
self.floorCutSpinBox.valueChanged.connect(self._on_floor_cut_changed)
self.cmcXCheckBox.stateChanged.connect(self._on_cmcx_changed)
self.cmcYCheckBox.stateChanged.connect(self._on_cmcy_changed)
def load_all(self):
c = self.w.conf["process"]
self.dataSetNameLineEdit.setText(c["dataset_name"])
self.subtractConstSpinBox.setValue(c["subtract_constant"])
if c["floor_cut_level"] is None:
self.floorCutCheckBox.setChecked(False)
self.floorCutSpinBox.setReadOnly(True)
self.floorCutSpinBox.setValue(0.)
else:
self.floorCutCheckBox.setChecked(True)
self.floorCutSpinBox.setReadOnly(False)
self.floorCutSpinBox.setValue(c["floor_cut_level"])
self.cmcXCheckBox.setChecked(c["cmcx"])
self.cmcYCheckBox.setChecked(c["cmcy"])
def _clear_dt_cache(self):
self.w.clear_cache_types(names_data_types[1:])
def _on_data_set_name_changed(self):
self.w.conf["process"]["dataset_name"] = str(self.dataSetNameLineEdit.text())
self._clear_dt_cache()
def _on_subtract_const_changed(self):
self.w.conf["process"]["subtract_constant"] = self.subtractConstSpinBox.value()
self._clear_dt_cache()
def _on_floor_cut_toggled(self):
checked = self.floorCutCheckBox.isChecked()
self.floorCutSpinBox.setReadOnly(not checked)
if checked:
self._on_floor_cut_changed()
else:
self.w.conf["process"]["floor_cut_level"] = None
self._clear_dt_cache()
def _on_floor_cut_changed(self):
self.w.conf["process"]["floor_cut_level"] = self.floorCutSpinBox.value()
self._clear_dt_cache()
def _on_cmcx_changed(self):
self.w.conf["process"]["cmcx"] = self.cmcXCheckBox.isChecked()
self._clear_dt_cache()
def _on_cmcy_changed(self):
self.w.conf["process"]["cmcy"] = self.cmcYCheckBox.isChecked()
self._clear_dt_cache()
denoise_methods = ["gauss", "gauss2"]#, "hist"]
class DenoiseTab:
def __init__(self, mainWindow):
self.w = mainWindow
self.methodComboBox = self.w.ui.methodComboBox
self.sigmaDoubleSpinBox = self.w.ui.sigmaDoubleSpinBox
def connect_all(self):
self.methodComboBox.currentIndexChanged.connect(self._on_method_changed)
self.sigmaDoubleSpinBox.valueChanged.connect(self._on_sigma_changed)
def load_all(self):
c = self.w.conf["denoise"]
self.methodComboBox.setCurrentIndex(denoise_methods.index(c["method"]))
self.sigmaDoubleSpinBox.setValue(c["sigma"])
def _clear_dt_cache(self):
self.w.clear_cache_types(names_data_types[2:])
def _on_method_changed(self):
self.w.conf["denoise"]["method"] = denoise_methods[self.methodComboBox.currentIndex()]
self._clear_dt_cache()
def _on_sigma_changed(self):
self.w.conf["denoise"]["sigma"] = self.sigmaDoubleSpinBox.value()
self._clear_dt_cache()
class ThresholdTab:
def __init__(self, mainWindow):
self.w = mainWindow
self.thresholdDoubleSpinBox = self.w.ui.thresholdDoubleSpinBox
self.fillHolesCheckBox = self.w.ui.fillHolesCheckBox
def connect_all(self):
self.thresholdDoubleSpinBox.valueChanged.connect(self._on_threshold_changed)
self.fillHolesCheckBox.stateChanged.connect(self._on_threshold_changed)
def load_all(self):
c = self.w.conf["threshold"]
self.thresholdDoubleSpinBox.setValue(c["threshold"])
self.fillHolesCheckBox.setChecked(c["fill_holes"])
def _clear_dt_cache(self):
self.w.clear_cache_types(names_data_types[3:])
def _on_threshold_changed(self):
self.w.conf["threshold"]["threshold"] = self.thresholdDoubleSpinBox.value()
self.w.conf["threshold"]["fill_holes"] = self.fillHolesCheckBox.isChecked()
self._clear_dt_cache()
peak_centering_methods = ["center_to_max", "center_of_mass"]
class DetectTab:
def __init__(self, mainWindow):
self.w = mainWindow
self.minDistDoubleSpinBox = self.w.ui.minDistDoubleSpinBox
self.methodComboBox = self.w.ui.methodComboBox_2
self.nParticlesMaxSpinBox = self.w.ui.nParticlesMaxSpinBox
def connect_all(self):
self.minDistDoubleSpinBox.valueChanged.connect(self._on_min_dist_changed)
self.methodComboBox.currentIndexChanged.connect(self._on_method_changed)
self.nParticlesMaxSpinBox.valueChanged.connect(self._on_n_particles_max_changed)
def _clear_dt_cache(self):
self.w.clear_cache_types(names_data_types[4:])
def load_all(self):
c = self.w.conf["detect"]
self.minDistDoubleSpinBox.setValue(c["min_dist"])
self.methodComboBox.setCurrentIndex(peak_centering_methods.index(c["peak_centering"]))
self.nParticlesMaxSpinBox.setValue(c["n_particles_max"])
def _on_min_dist_changed(self):
self.w.conf["detect"]["min_dist"] = self.minDistDoubleSpinBox.value()
self._clear_dt_cache()
def _on_method_changed(self):
self.w.conf["detect"]["peak_centering"] = peak_centering_methods[self.methodComboBox.currentIndex()]
self._clear_dt_cache()
def _on_n_particles_max_changed(self):
self.w.conf["detect"]["n_particles_max"] = self.nParticlesMaxSpinBox.value()
self._clear_dt_cache()
integration_modes = ["windows", "labels"]
class AnalyseTab:
def __init__(self, mainWindow):
self.w = mainWindow
self.modeComboBox = self.w.ui.modeComboBox
self.windowSizeSpinBox = self.w.ui.windowSizeSpinBox
self.circleWindowCheckBox = self.w.ui.circleWindowCheckBox
def connect_all(self):
self.modeComboBox.currentIndexChanged.connect(self._on_mode_changed)
self.windowSizeSpinBox.valueChanged.connect(self._on_window_size_changed)
self.circleWindowCheckBox.stateChanged.connect(self._on_circle_window_changed)
def load_all(self):
c = self.w.conf["analyse"]
self.modeComboBox.setCurrentIndex(integration_modes.index(c["integration_mode"]))
self.windowSizeSpinBox.setValue(c["window_size"])
self.circleWindowCheckBox.setChecked(c["circle_window"])
def _clear_dt_cache(self):
self.w.clear_cache_types(names_data_types[5:])
def _on_mode_changed(self):
self.w.conf["analyse"]["integration_mode"] = integration_modes[self.modeComboBox.currentIndex()]
self._clear_dt_cache()
def _on_window_size_changed(self):
self.w.conf["analyse"]["window_size"] = self.windowSizeSpinBox.value()
self._clear_dt_cache()
def _on_circle_window_changed(self):
self.w.conf["analyse"]["circle_window"] = self.circleWindowCheckBox.isChecked()
self._clear_dt_cache()
| 38.730159 | 131 | 0.676571 | 14,204 | 0.970219 | 0 | 0 | 0 | 0 | 0 | 0 | 1,326 | 0.090574 |
3947cf0eb2a75dba28fc123b089a7c041e8b5a2d | 5,800 | py | Python | narx_double_descent.py | antonior92/narx-double-descent | a3aaed7b974a43bd37af64e7db7cd81fc59087d3 | [
"MIT"
] | 6 | 2021-06-20T01:58:26.000Z | 2022-02-16T10:13:57.000Z | narx_double_descent.py | antonior92/narx-double-descent | a3aaed7b974a43bd37af64e7db7cd81fc59087d3 | [
"MIT"
] | null | null | null | narx_double_descent.py | antonior92/narx-double-descent | a3aaed7b974a43bd37af64e7db7cd81fc59087d3 | [
"MIT"
] | null | null | null | import numpy as np
from models import *
from datasets import *
from util import parse_funct_arguments
import pickle
import itertools
def mse(y_true, y_mdl):
return np.mean((y_true - y_mdl)**2)
def train(mdl, dset):
# Get train
u_train, y_train = dset.get_train()
# Fit
X_train, z_train = construct_linear_system(u_train, y_train, dset.nus, dset.nys)
mdl = mdl.fit(X_train, z_train)
return mdl
def evaluate(mdl, dset):
# Get test
u_train, y_train = dset.get_train()
u_test, y_test = dset.get_test()
X_train, z_train = construct_linear_system(u_train, y_train, dset.nus, dset.nys)
X_test, z_test = construct_linear_system(u_test, y_test, dset.nus, dset.nys)
# One-step-ahead prediction
y_pred_train = back_to_original_shape(mdl.predict(X_train), n_seq=y_train.shape[1], n_out=y_train.shape[2])
y_pred_test = back_to_original_shape(mdl.predict(X_test), n_seq=y_test.shape[1], n_out=y_test.shape[2])
# Free run simulation
simulate = DynamicalSystem(dset.nys, dset.nus, mdl.predict, sd_v=0, sd_w=0)
y_sim_train = simulate(u_train)[simulate.order:, ...]
y_sim_test = simulate(u_test)[simulate.order:, ...]
d = {'mdl': repr(mdl), 'dset': repr(dset),
'mse_pred_train': mse(y_train[simulate.order:, ...], y_pred_train),
'mse_pred_test': mse(y_test[simulate.order:, ...], y_pred_test),
'mse_sim_train': mse(y_train[simulate.order:, ...], y_sim_train),
'mse_sim_test': mse(y_test[simulate.order:, ...], y_sim_test)
}
if hasattr(mdl, 'param_norm'):
d['param_norm'] = mdl.param_norm
pred_train = {'z_pred_train': y_pred_train, 'z_sim_train': y_sim_train}
pred_test = {'z_pred_test': y_pred_test, 'z_sim_test': y_sim_test}
return d, pred_train, pred_test
# ---- Main script ----
if __name__ == "__main__":
from tqdm import tqdm
import pandas as pd
import argparse
import os
parser = argparse.ArgumentParser(description='Estimate NARX model for different n features / n samples rate.')
parser.add_argument('-r', '--repetitions', default=1, type=int,
help='number of repetitions')
parser.add_argument('-o', '--output', default='./performance.csv',
help='output csv file.')
parser.add_argument('-d', '--dset', type=str, default='ChenDSet',
help='number of repetitions')
parser.add_argument('-m', '--nonlinear_model', default='RBFSampler',
help='number of repetitions')
parser.add_argument('-n', '--num_points', default=60, type=int,
help='number of points')
parser.add_argument('-l', '--lower_proportion', default=-1, type=float,
help='the lowest value for the proportion (n features / n samples) is 10^l.')
parser.add_argument('-u', '--upper_proportion', default=2, type=float,
help='the upper value for the proportion (n features / n samples) is 10^u.')
parser.add_argument('-s', '--save_models', nargs='?', default='', const='./models',
help='save intermediary models.')
parser.add_argument('-w', '--reuse_weights', action='store_true',
help='use weights from previous model (with less features) when estimate the next one.')
args, unk = parser.parse_known_args()
# Saving models (when needed)
if args.save_models:
if not os.path.isdir(args.save_models):
os.mkdir(args.save_models)
def save_mdl(mdl):
fname = os.path.join(args.save_models, repr(mdl)+'.pkl')
with open(fname, 'wb') as f:
pickle.dump(mdl, f)
else:
def save_mdl(_mdl):
pass
# Get model (from command line)
ModelTmp = eval(args.nonlinear_model)
Model, _, unk = parse_funct_arguments(ModelTmp, unk, free_arguments=['n_features', 'random_state'])
# Get dataset (from the command line)
DatasetTmp = eval(args.dset)
Dataset, _, unk = parse_funct_arguments(DatasetTmp, unk)
dset = Dataset()
tqdm.write("Estimating baseline performance...")
baseline_mdl = Linear()
baseline_list = []
for seed in tqdm(range(args.repetitions)):
np.random.seed(seed)
d, pred_train, pred_test = evaluate(train(baseline_mdl, dset), dset)
d['seed'] = seed
d['proportion'] = 0 # To signal it is the baseline (n features being a constant)
baseline_list.append(d)
# Save model
save_mdl(baseline_mdl)
df = pd.DataFrame(baseline_list)
df.to_csv(args.output, index=False)
tqdm.write("Done")
tqdm.write("Estimating performance as a function of proportion...")
list_dict = []
underp = np.logspace(args.lower_proportion, 0, args.num_points // 2)
overp = np.logspace(0.00001, args.upper_proportion, args.num_points - args.num_points // 2)
proportions = np.concatenate((underp, overp))
run_instances = list(itertools.product(range(args.repetitions), proportions))
prev_mdl = None # used only if reuse_weights is True
num_samples = dset.effective_num_train_samples
for seed, proportion in tqdm(run_instances):
n_features = int(proportion * num_samples)
mdl = Model(n_features=n_features, random_state=seed)
if args.reuse_weights and hasattr(mdl, 'reuse_weights_from_mdl'):
if prev_mdl is not None:
mdl.reuse_weights_from_mdl(prev_mdl)
prev_mdl = mdl
d, pred_train, pred_test = evaluate(train(mdl, dset), dset)
d['proportion'] = proportion
d['seed'] = seed
df = df.append(d, ignore_index=True)
df.to_csv(args.output, index=False)
# Save model
save_mdl(mdl)
tqdm.write("Done")
| 42.335766 | 114 | 0.643793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,331 | 0.229483 |
394975f50a2ee885d791abad087804c62ac53785 | 6,000 | py | Python | tests/artifactcache/push.py | samkenxstream/buildstream | 2164ac3ad2854eea30f85af6af2bc8a0b8754f3f | [
"Apache-2.0"
] | null | null | null | tests/artifactcache/push.py | samkenxstream/buildstream | 2164ac3ad2854eea30f85af6af2bc8a0b8754f3f | [
"Apache-2.0"
] | null | null | null | tests/artifactcache/push.py | samkenxstream/buildstream | 2164ac3ad2854eea30f85af6af2bc8a0b8754f3f | [
"Apache-2.0"
] | null | null | null | # Pylint doesn't play well with fixtures and dependency injection from pytest
# pylint: disable=redefined-outer-name
import os
import pytest
from buildstream import _yaml
from buildstream._project import Project
from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
from buildstream._testing import cli # pylint: disable=unused-import
from tests.testutils import create_artifact_share, create_split_share, dummy_context
# Project directory
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project",)
# Push the given element and return its artifact key for assertions.
def _push(cli, cache_dir, project_dir, config_file, target):
with dummy_context(config=config_file) as context:
# Load the project manually
project = Project(project_dir, context)
project.ensure_fully_loaded()
# Assert that the element's artifact is cached
element = project.load_elements(["target.bst"])[0]
element_key = cli.get_element_key(project_dir, "target.bst")
assert cli.artifact.is_cached(cache_dir, element, element_key)
# Create a local artifact cache handle
artifactcache = context.artifactcache
# Initialize remotes
context.initialize_remotes(True, True, None, None)
# Query local cache
element._load_artifact(pull=False)
assert artifactcache.has_push_remotes(plugin=element), "No remote configured for element target.bst"
assert element._push(), "Push operation failed"
return element_key
@pytest.mark.datafiles(DATA_DIR)
def test_push(cli, tmpdir, datafiles):
project_dir = str(datafiles)
# First build the project without the artifact cache configured
result = cli.run(project=project_dir, args=["build", "target.bst"])
result.assert_success()
# Assert that we are now cached locally
assert cli.get_element_state(project_dir, "target.bst") == "cached"
# Set up an artifact cache.
with create_artifact_share(os.path.join(str(tmpdir), "artifactshare")) as share:
# Configure artifact share
rootcache_dir = os.path.join(str(tmpdir), "cache")
user_config_file = str(tmpdir.join("buildstream.conf"))
user_config = {
"scheduler": {"pushers": 1},
"artifacts": {"servers": [{"url": share.repo, "push": True,}]},
"cachedir": rootcache_dir,
}
# Write down the user configuration file
_yaml.roundtrip_dump(user_config, file=user_config_file)
element_key = _push(cli, rootcache_dir, project_dir, user_config_file, "target.bst")
assert share.get_artifact(cli.get_artifact_name(project_dir, "test", "target.bst", cache_key=element_key))
@pytest.mark.datafiles(DATA_DIR)
def test_push_split(cli, tmpdir, datafiles):
project_dir = str(datafiles)
# First build the project without the artifact cache configured
result = cli.run(project=project_dir, args=["build", "target.bst"])
result.assert_success()
# Assert that we are now cached locally
assert cli.get_element_state(project_dir, "target.bst") == "cached"
indexshare = os.path.join(str(tmpdir), "indexshare")
storageshare = os.path.join(str(tmpdir), "storageshare")
# Set up an artifact cache.
with create_split_share(indexshare, storageshare) as (index, storage):
rootcache_dir = os.path.join(str(tmpdir), "cache")
user_config = {
"scheduler": {"pushers": 1},
"artifacts": {
"servers": [
{"url": index.repo, "push": True, "type": "index"},
{"url": storage.repo, "push": True, "type": "storage"},
],
},
"cachedir": rootcache_dir,
}
config_path = str(tmpdir.join("buildstream.conf"))
_yaml.roundtrip_dump(user_config, file=config_path)
element_key = _push(cli, rootcache_dir, project_dir, config_path, "target.bst")
proto = index.get_artifact_proto(
cli.get_artifact_name(project_dir, "test", "target.bst", cache_key=element_key)
)
assert storage.get_cas_files(proto) is not None
@pytest.mark.datafiles(DATA_DIR)
def test_push_message(tmpdir, datafiles):
project_dir = str(datafiles)
# Set up an artifact cache.
artifactshare = os.path.join(str(tmpdir), "artifactshare")
with create_artifact_share(artifactshare) as share:
# Configure artifact share
rootcache_dir = os.path.join(str(tmpdir), "cache")
user_config_file = str(tmpdir.join("buildstream.conf"))
user_config = {
"scheduler": {"pushers": 1},
"artifacts": {"servers": [{"url": share.repo, "push": True,}]},
"cachedir": rootcache_dir,
}
# Write down the user configuration file
_yaml.roundtrip_dump(user_config, file=user_config_file)
with dummy_context(config=user_config_file) as context:
# Load the project manually
project = Project(project_dir, context)
project.ensure_fully_loaded()
# Create a local artifact cache handle
artifactcache = context.artifactcache
# Initialize remotes
context.initialize_remotes(True, True, None, None)
assert artifactcache.has_push_remotes()
command = remote_execution_pb2.Command(
arguments=["/usr/bin/gcc", "--help"],
working_directory="/buildstream-build",
output_directories=["/buildstream-install"],
)
# Push the message object
command_digest = artifactcache.push_message(project, command)
message_hash, message_size = command_digest.hash, command_digest.size_bytes
assert message_hash and message_size
message_digest = remote_execution_pb2.Digest(hash=message_hash, size_bytes=message_size)
assert share.has_object(message_digest)
| 38.461538 | 114 | 0.6695 | 0 | 0 | 0 | 0 | 4,423 | 0.737167 | 0 | 0 | 1,566 | 0.261 |
394c9655ba556f0a547343acb0be6e59aaec3220 | 3,006 | py | Python | app/app.py | HAKSOAT/Basafa | f110ec3b175deb3486525e17795e832de5baa108 | [
"Apache-2.0"
] | 22 | 2020-01-12T18:48:51.000Z | 2021-03-31T22:20:50.000Z | app/app.py | HAKSOAT/Basafa | f110ec3b175deb3486525e17795e832de5baa108 | [
"Apache-2.0"
] | 1 | 2020-12-08T16:44:30.000Z | 2020-12-08T16:44:30.000Z | app/app.py | HAKSOAT/Basafa | f110ec3b175deb3486525e17795e832de5baa108 | [
"Apache-2.0"
] | 1 | 2020-04-08T20:57:52.000Z | 2020-04-08T20:57:52.000Z | import logging
from app.config import oapi, app_api, redis, LAST_MENTION_ID
from app.fetcher import fetch
from app.utils import compile_tweet_link, process_tweet_text, get_most_similar_tweets, send_tweet, \
get_action, ActionType, send_no_reference_tweet
import tweepy
logging.basicConfig(filename='app.log', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
def run():
logging.info('Starting session')
last_mention_id = redis.get('id') or LAST_MENTION_ID
logging.info(f'Last mention id: {last_mention_id}')
if last_mention_id:
mentions = oapi.mentions_timeline(int(last_mention_id))[::-1]
else:
mentions = oapi.mentions_timeline()[::-1]
for mention in mentions:
# Get mention details
mentioner = mention.author.screen_name
mention_id = mention.id
mention_text = mention.text
# Check if mention is a call to action
action = get_action(mention_text)
if not action:
redis.set('id', mention_id)
logging.info(f'{mention_text} by {mentioner} is not valid')
continue
# Get tweet details
redis.set('id', mention_id)
logging.info(f'{mention_text} by {mentioner} is valid')
try:
tweet = app_api.get_status(mention.in_reply_to_status_id, tweet_mode='extended')
except tweepy.error.TweepError as e:
logging.error(e, exc_info=True)
if e.args[0][0]['code'] == 144:
send_no_reference_tweet(mentioner, mention_id)
continue
tweet_id = tweet.id
tweet_datetime = tweet.created_at
tweet_date = tweet_datetime.strftime('%Y-%m-%d')
# Check if clone has been requested of tweet before
if redis.get(tweet_id):
sent_tweet = redis.get(f'{action}: {tweet_id}')
send_tweet(mentioner, mention_id, None, sent_tweet)
continue
tweet_text = process_tweet_text(tweet.full_text)
tweet_details = {'text': tweet_text, 'id': tweet_id}
if action == ActionType.old.value:
fetched_tweets = fetch(tweet_text, action, tweet_date, tweet_datetime)
elif action == ActionType.new.value:
fetched_tweets = fetch(tweet_text, action, tweet_date, tweet_datetime)
else:
fetched_tweets = fetch(tweet_text, action)
similar_tweets = get_most_similar_tweets(fetched_tweets, tweet_details, 3)
links = []
for similar_tweet in similar_tweets:
link = compile_tweet_link(similar_tweet)
links.append(link)
try:
sent_tweet = send_tweet(mentioner, mention_id, links)
cached_tweet_timeout = 60 * 60
redis.set(f'{action}: {tweet_id}', sent_tweet, ex=cached_tweet_timeout)
logging.info(f'Tweet Sent to @{mentioner}')
except tweepy.error.TweepError as e:
logging.error(e, exc_info=True)
| 35.785714 | 102 | 0.641051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.151697 |
394e239ac65530d286a58a397f1543181a576923 | 3,649 | py | Python | attachments/models.py | javango/django-attachments | f50152229056728778a0a1db1de34646f66efa24 | [
"BSD-3-Clause"
] | null | null | null | attachments/models.py | javango/django-attachments | f50152229056728778a0a1db1de34646f66efa24 | [
"BSD-3-Clause"
] | null | null | null | attachments/models.py | javango/django-attachments | f50152229056728778a0a1db1de34646f66efa24 | [
"BSD-3-Clause"
] | null | null | null | import os
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.core.files.storage import DefaultStorage
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import slugify
# attachment storage can be customised using ATTACHMENTS_STORAGE.
storage = getattr(settings, 'ATTACHMENTS_STORAGE', None) or DefaultStorage()
class AttachmentManager(models.Manager):
def attachments_for_object(self, obj):
object_type = ContentType.objects.get_for_model(obj)
return self.filter(content_type__pk=object_type.id,
object_id=obj.id)
def attachment_upload(instance, filename):
"""
Stores the attachment in a "per module/appname/primary key" folder
"""
co = instance.content_object
try:
object_id = co.slug() if callable(co.slug) else co.slug
except AttributeError:
object_id = co.pk
if object_id == '':
object_id = co.pk
extras = [
'%s_%s' % (co._meta.app_label,
co._meta.object_name.lower()
),
object_id]
# slugify filename before returning its path.
base, ext = os.path.splitext(filename.lower())
filename = slugify(base) + ext
fullname = 'attachments/%s/%s/%s' % (
tuple(extras) + (filename,))
templates = '/%s/%s/%s'
while len(fullname) > 100:
try:
extras.pop()
templates = templates[:-3]
except IndexError:
break
fullname = 'attachments' + (templates % (
tuple(extras) + (filename,)))
if len(fullname) > 100:
base, ext = os.path.splitext(fullname)
fullname = 'attachments/%s%s' % (base[:30], ext)
return fullname
class Attachment(models.Model):
objects = AttachmentManager()
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
creator = models.ForeignKey(User, related_name="created_attachments", verbose_name=_('creator'))
attachment_file = models.FileField(_('attachment'),
upload_to=attachment_upload, storage=storage)
created = models.DateTimeField(_('created'), auto_now_add=True)
modified = models.DateTimeField(_('modified'), auto_now=True)
safe = models.BooleanField (_('safe'), default=False)
mime_type = models.CharField(_('mime type'), max_length=50, null=True, blank=True,
help_text=_('leave empty to handle by file extension'))
display_name = models.CharField(_('display name'), max_length=256,
null=True, blank=True,
help_text=_('displayed as link text for attachment.'))
class Meta:
ordering = ['-created']
permissions = (
('delete_foreign_attachments', 'Can delete foreign attachments'),
)
def __unicode__(self):
return ('%s attached %s' %
(self.creator.username,
self.display_name or self.attachment_file.name))
@property
def link_name(self):
return self.display_name or self.filename
@property
def filename(self):
return os.path.split(self.attachment_file.name)[1]
@models.permalink
def get_absolute_url(self):
return ('attachments.views.retrieve_attachment', [str(self.id)])
url = property(get_absolute_url)
| 34.102804 | 100 | 0.648945 | 1,969 | 0.5396 | 0 | 0 | 298 | 0.081666 | 0 | 0 | 609 | 0.166895 |
394eab59f9ddad5f0a7d648d203df7753ee2aeca | 1,769 | py | Python | src/models/dive.py | Skorp7/backend | 0fe06cbe6146f4c863983d7059387b05d9df5894 | [
"MIT"
] | null | null | null | src/models/dive.py | Skorp7/backend | 0fe06cbe6146f4c863983d7059387b05d9df5894 | [
"MIT"
] | null | null | null | src/models/dive.py | Skorp7/backend | 0fe06cbe6146f4c863983d7059387b05d9df5894 | [
"MIT"
] | null | null | null | from pymodm import MongoModel, fields
from models.target import Target
from models.user import User
class Dive(MongoModel):
diver = fields.ReferenceField(User)
target = fields.ReferenceField(Target)
created_at = fields.DateTimeField()
location_correct = fields.BooleanField()
new_x_coordinate = fields.CharField(blank=True)
new_y_coordinate = fields.CharField(blank=True)
new_location_explanation = fields.CharField(blank=True)
change_text = fields.CharField(blank=True)
miscellaneous = fields.CharField(blank=True)
class Meta:
connection_alias = 'app'
final = True
@staticmethod
def create(
diver,
target,
location_correct,
created_at,
new_x_coordinate=None,
new_y_coordinate=None,
new_location_explanation=None,
change_text=None,
miscellaneous=None
):
dive = Dive(
diver,
target,
created_at,
location_correct,
new_x_coordinate,
new_y_coordinate,
new_location_explanation,
change_text,
miscellaneous
)
dive.save()
return dive
def to_json(self):
return {
'id': str(self._id) or None,
'diver': self.diver.to_json(),
'target': self.target.to_json(),
'location_correct': self.location_correct,
'created_at': str(self.created_at),
'miscellanious': self.miscellaneous,
'change_text': self.change_text,
'new_x_coordinate': self.new_x_coordinate,
'new_y_coordinate': self.new_y_coordinate,
'new_location_explanation': self.new_location_explanation,
}
| 29.483333 | 70 | 0.616167 | 1,666 | 0.941775 | 0 | 0 | 580 | 0.327869 | 0 | 0 | 144 | 0.081402 |
394feb4118d6e9b0b87ec29bc0be9b581f2100ed | 3,193 | py | Python | evaluate.py | uw-biomedical-ml/oct-irf-train | ebf8631f96883ec5ed91574201b05818f95c0f7d | [
"BSD-3-Clause"
] | 1 | 2021-07-24T06:44:06.000Z | 2021-07-24T06:44:06.000Z | evaluate.py | uw-biomedical-ml/oct-irf-train | ebf8631f96883ec5ed91574201b05818f95c0f7d | [
"BSD-3-Clause"
] | 5 | 2020-09-25T22:35:32.000Z | 2022-02-09T23:37:02.000Z | evaluate.py | uw-biomedical-ml/oct-irf-train | ebf8631f96883ec5ed91574201b05818f95c0f7d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from PIL import Image
import sys, glob, tqdm, os
import numpy as np
from colour import Color
def usage():
print("./evaluate.py <imgdir> <outdir> <mode>")
print("")
print("\timgdir = folder of OCT B scans")
print("\toutdir = EMPTY folder to output segmentation masks")
print("\tmode = mask, mask_blend, blend")
sys.exit(-1)
if len(sys.argv) != 4:
usage()
import deeplearning.unet
(_, indir, outdir, mode) = sys.argv
if not os.path.isdir(indir):
print("ERROR: %s is not a directory" % indir)
sys.exit(-1)
if not os.path.isdir(outdir):
print("ERROR: %s is not a directory" % outdir)
sys.exit(-1)
if len(glob.glob("%s/*" % outdir)) != 0:
print("ERROR: %s is not empty" % outdir)
sys.exit(-1)
imgs = []
for f in glob.glob("%s/*" % indir):
(_, ext) = os.path.splitext(f)
if ext in [".jpg", ".png", ".jpeg"]:
imgs.append(f)
if len(imgs) == 0:
print("ERROR: %s has no images!" % indir)
sys.exit(-1)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
model = deeplearning.unet.get_unet()
model.load_weights("runs/weights.hdf5", by_name=True)
image_rows = 432
image_cols = 32
my_cm = []
colors = list(Color("yellow").range_to(Color("red"),1001))
for c in colors:
my_cm.append((255 * np.array(c.rgb)).astype(np.uint8))
my_cm = np.array(my_cm)
for f in tqdm.tqdm(imgs):
ji = Image.open(f)
img = np.array(ji)
img = img.astype(np.float)
img -= 28.991758347
img /= 46.875888824
totaloutput = np.zeros((img.shape[0], img.shape[1], 32))
ym = np.argmax(np.sum(img, axis=1))
y0 = int(ym - image_rows / 2)
y1 = int(ym + image_rows / 2)
if y0 < 0:
y0 = 0
if y1 >= img.shape[0]:
y1 = img.shape[0] - 1
for dx in tqdm.tqdm(range(0, img.shape[1] - 32)):
sliori = np.zeros((image_rows, image_cols), dtype=np.float)
sliori[0:y1-y0, :] = img[y0:y1, dx:dx+image_cols]
imgsbatch = sliori.reshape((1, 1, image_rows,image_cols))
output = model.predict(imgsbatch, batch_size=1)
totaloutput[y0:y1,dx:dx+image_cols,dx % 32] = output[0,0,0:y1-y0,:]
totaloutput = np.mean(totaloutput, 2)
if (mode == "mask"):
# for binary masks
mask = (totaloutput > 0.5)
mask = np.uint8(mask)
mask *= 255
mask = Image.fromarray(mask)
mask.save(f.replace(indir,outdir))
elif (mode == "mask_blend"):
# for masked heatmap overlay
mask = (totaloutput < 0.5)
mask = np.uint8(mask)
mask *= 255
mask = Image.fromarray(mask)
mapped_data = np.zeros((totaloutput.shape[0], totaloutput.shape[1],3), dtype="uint8")
totalint = (1000 * totaloutput).astype(np.uint16)
mapped_data = my_cm[totalint]
j = Image.fromarray(mapped_data).convert('RGBA')
ji = ji.convert("RGBA")
Image.composite(ji, j,mask).save(f.replace(indir,outdir))
elif (mode == "blend"):
# for blend overlay
totalint = (1000 * totaloutput).astype(np.uint16)
mapped_data = my_cm[totalint]
j = Image.fromarray(mapped_data).convert('RGBA')
ji = ji.convert("RGBA")
Image.blend(ji, j,0.5).save(f.replace(indir,outdir))
print("\n\nFinished.")
| 29.293578 | 86 | 0.61259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 519 | 0.162543 |
3950e84c703f64f3f48f3a37d0f1cd0486c9f552 | 29,604 | py | Python | interfaz.py | ifigueroa065/Voluntariado | 375eab96adc7a95f8204244f942840bdce47c8b5 | [
"MIT"
] | null | null | null | interfaz.py | ifigueroa065/Voluntariado | 375eab96adc7a95f8204244f942840bdce47c8b5 | [
"MIT"
] | null | null | null | interfaz.py | ifigueroa065/Voluntariado | 375eab96adc7a95f8204244f942840bdce47c8b5 | [
"MIT"
] | null | null | null | from tkinter import *
import os
from datetime import datetime
import webbrowser
from tkinter import messagebox
from tkinter import ttk
import tkinter.filedialog
import tkinter as tk
import openpyxl
from REPORTE import *
datos = [] #reporte
precios = [] #precios
preciosmq=[] #precios mq
subtotales = []
def CREAR_INTERFAZ():
def DIALOGO():
fd= tkinter.Tk()
fd.withdraw()
ruta=tkinter.filedialog.askopenfilename(
initialdir="C:",
filetypes=(
("Libro de Excel", "*.xlsx"),
("Libro de Excel 97 a Excel 2003", "*.xls"),
("Todos los Archivos de Excel","*.*")
),
title = "ABRIR ARCHIVO"
)
if ruta=="":
messagebox.showinfo(message="Debe cargar un archivo", title="ERROR")
else:
try:
print("------> "+ ruta)
rut.set("CARGA EXITOSA")
book2 = openpyxl.load_workbook(ruta, data_only=True)
celdas2 = book2.active
for row in range(2,celdas2.max_row +1):
if(celdas2.cell(row,1).value is not None):
precios.append(Datos(celdas2.cell(row,1).value,celdas2.cell(row,2).value, celdas2.cell(row,3).value))
finally:
print(" ************************** ")
print(" SUCCESSFULLY ")
print(" ************************** ")
def DIALOGO2():
fd= tkinter.Tk()
fd.withdraw()
ruta=tkinter.filedialog.askopenfilename(
initialdir="C:",
filetypes=(
("Libro de Excel", "*.xlsx"),
("Libro de Excel 97 a Excel 2003", "*.xls"),
("Todos los Archivos de Excel","*.*")
),
title = "ABRIR ARCHIVO"
)
if ruta=="":
messagebox.showinfo(message="Debe cargar un archivo", title="ERROR")
else:
try:
print("------> "+ ruta)
zm1.set("CARGA EXITOSA")
book2 = openpyxl.load_workbook(ruta, data_only=True)
celdas2 = book2.active
for row in range(2,celdas2.max_row +1):
if(celdas2.cell(row,1).value is not None):
preciosmq.append(Datos(celdas2.cell(row,1).value,celdas2.cell(row,2).value, celdas2.cell(row,3).value))
finally:
print(" ************************** ")
print(" SUCCESSFULLY ")
print(" ************************** ")
def DIALOGO_REPORTE():
TP=TIPO.get()
fd= tkinter.Tk()
fd.withdraw()
ruta=tkinter.filedialog.askopenfilename(
initialdir="C:",
filetypes=(
("Libro de Excel", "*.xlsx"),
("Libro de Excel 97 a Excel 2003", "*.xls"),
("Todos los Archivos de Excel","*.*")
),
title = "ABRIR ARCHIVO"
)
if ruta=="":
messagebox.showinfo(message="Debe cargar un archivo", title="ERROR")
else:
try:
print("------> "+ ruta)
rut.set("CARGA EXITOSA")
book = openpyxl.load_workbook(ruta, data_only=True)
celdas = book.active
for row in range(2,celdas.max_row):
if(celdas.cell(row,1).value is not None):
datos.append(Reporte(celdas.cell(row,1).value, celdas.cell(row,2).value, celdas.cell(row,3).value))
if TP=="MQ":
print("--------------IMPRIMIENDO SUBTOTALES-------------")
x=0
contador=0
while x<len(datos):
for i in preciosmq:
if datos[x].nombre.upper().replace(" ", "")==i.nombre.upper().replace(" ", ""):
contador+=1
subtotal=datos[x].entregado_usuario*i.precio
print(str(contador)+ ")" +datos[x].nombre +"="+ str(subtotal))
subtotales.append(Subtotal(contador,datos[x].codigo,datos[x].nombre,datos[x].entregado_usuario,subtotal))
break
x+=1
print("----------------------------------------")
TOTAL=0
for i in subtotales:
TOTAL+=i.subtotal
print("TOTAL = Q"+ str(TOTAL))
else:
print("--------------IMPRIMIENDO SUBTOTALES-------------")
x=0
contador=0
while x<len(datos):
for i in precios:
if datos[x].nombre.upper().replace(" ", "")==i.nombre.upper().replace(" ", ""):
contador+=1
subtotal=datos[x].entregado_usuario*i.precio
print(str(contador)+ ")" +datos[x].nombre +"="+ str(subtotal))
subtotales.append(Subtotal(contador,datos[x].codigo,datos[x].nombre,datos[x].entregado_usuario,subtotal))
break
x+=1
print("----------------------------------------")
TOTAL=0
for i in subtotales:
TOTAL+=i.subtotal
print("TOTAL = Q"+ str(TOTAL))
finally:
print(" ************************** ")
print(" SUCCESSFULLY ")
print(" ************************** ")
def VER_REPORTE():
#obteniendo datos de inputs
A=año.get()
MO=Mes_inicial.get()
M=Mes_final.get()
DEPA=dpto.get()
AR=area.get()
MUN=municipio.get()
TIPS=t_servicio.get()
SERV=servicio.get()
DIST=distrito.get()
f = open('REPORTE.html','w', encoding="utf-8")
f.write("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>ÁREA DE SALUD</title>
<link href="img/icono.ico" rel="icon">
<!-- Custom fonts for this template-->
<link href="vendor/fontawesome-free/css/all.min.css" rel="stylesheet" type="text/css">
<link
href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
rel="stylesheet">
<!-- Custom styles for this template-->
<link href="css/sb-admin-2.min.css" rel="stylesheet">
<link href="vendor/datatables/dataTables.bootstrap4.min.css" rel="stylesheet">
</head>
<body id="page-top">
<!-- Page Wrapper -->
<div id="wrapper">
<!-- Sidebar -->
<ul class="navbar-nav bg-gradient-primary sidebar sidebar-dark accordion" id="accordionSidebar">
<!-- Sidebar - Brand -->
<a class="sidebar-brand d-flex align-items-center justify-content-center" href="REPORTE.html">
<div class="sidebar-brand-icon rotate-n-15">
<i class="fas fa-laugh-wink"></i>
</div>
<div class="sidebar-brand-text mx-3">ANALISIS</div>
</a>
<!-- Divider -->
<hr class="sidebar-divider my-0">
<!-- Nav Item - Dashboard -->
<li class="nav-item active">
<a class="nav-link" href="REPORTE.html">
<i class="fas fa-bars"></i>
<span>REPORTE</span></a>
</li>
<!-- Divider -->
<hr class="sidebar-divider">
<!-- Heading -->
<div class="sidebar-heading">
OTROS
</div>
<!-- Nav Item - Utilities Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseUtilities"
aria-expanded="true" aria-controls="collapseUtilities">
<i class="fas fa-fw fa-2x"></i>
<span>BRESS</span>
</a>
</li>
<!-- Divider -->
<hr class="sidebar-divider d-none d-md-block">
<!-- Sidebar Toggler (Sidebar) -->
<div class="text-center d-none d-md-inline">
<button class="rounded-circle border-0" id="sidebarToggle"></button>
</div>
</ul>
<!-- End of Sidebar -->
<!-- Content Wrapper -->
<div id="content-wrapper" class="d-flex flex-column">
<!-- Main Content -->
<div id="content">
<!-- Topbar -->
<nav class="navbar navbar-expand navbar-light bg-white topbar mb-4 static-top shadow">
<!-- Sidebar Toggle (Topbar) -->
<button id="sidebarToggleTop" class="btn btn-link d-md-none rounded-circle mr-3">
<i class="fa fa-bars"></i>
</button>
<!-- Topbar Navbar -->
<ul class="navbar-nav ml-auto">
<!-- Nav Item - Search Dropdown (Visible Only XS) -->
<li class="nav-item dropdown no-arrow d-sm-none">
<a class="nav-link dropdown-toggle" href="#" id="searchDropdown" role="button"
data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<i class="fas fa-search fa-fw"></i>
</a>
</li>
<div class="topbar-divider d-none d-sm-block"></div>
<!-- Nav Item - User Information -->
<li class="nav-item dropdown no-arrow">
<a class="nav-link dropdown-toggle" href="#" id="userDropdown" role="button"
data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<span class="mr-2 d-none d-lg-inline text-gray-600 small">Administrador</span>
<img class="img-profile rounded-circle"
src="img/undraw_profile.svg">
</a>
</li>
</ul>
</nav>
<!-- End of Topbar -->
<!-- Begin Page Content -->
<div class="container-fluid">
<!-- Page Heading -->
<div class="d-sm-flex align-items-center justify-content-between mb-4">
<h1 class="h3 mb-0 text-gray-800">ÁREA DE SALUD DE CHIMALTENANGO</h1>
<a href="#" class="d-none d-sm-inline-block btn btn-sm btn-primary shadow-sm"><i
class="fas fa-download fa-sm text-white-50"></i> Descargar Reporte</a>
</div>
<!-- Content Row -->
<div class="row">
<!-- Earnings (Monthly) Card Example -->
<div class="col-xl-3 col-md-6 mb-4">
<div class="card border-left-primary shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-primary text-uppercase mb-1">
Departamento</div>
<div class="h5 mb-0 font-weight-bold text-gray-800">
""")
f.write(DEPA) #DEPARTAMENTO
f.write("""
</div>
</div>
<div class="col-auto">
<i class="fas fa-fw"></i>
</div>
</div>
</div>
</div>
</div>
<!-- Earnings (Monthly) Card Example -->
<div class="col-xl-3 col-md-6 mb-4">
<div class="card border-left-success shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-success text-uppercase mb-1">
Distrito</div>
<div class="h5 mb-0 font-weight-bold text-gray-800">
""")
f.write(DIST) #DISTRITO
f.write("""
</div>
</div>
<div class="col-auto">
<i class="fas fa-fw"></i>
</div>
</div>
</div>
</div>
</div>
<!-- Earnings (Monthly) Card Example -->
<div class="col-xl-3 col-md-6 mb-4">
<div class="card border-left-info shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-info text-uppercase mb-1">Del Mes
</div>
<div class="row no-gutters align-items-center">
<div class="col-auto">
<div class="h5 mb-0 mr-3 font-weight-bold text-gray-800">
""")
f.write(MO) #MES INICIAL
f.write("""
</div>
</div>
</div>
</div>
<div class="col-auto">
<i class="fas fa-calendar fa-2x text-gray-300"></i>
</div>
</div>
</div>
</div>
</div>
<!-- Pending Requests Card Example -->
<div class="col-xl-3 col-md-6 mb-4">
<div class="card border-left-warning shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-warning text-uppercase mb-1">
Al mes</div>
<div class="h5 mb-0 font-weight-bold text-gray-800">
""")
f.write(M) #MES FINAL
f.write("""
</div>
</div>
<div class="col-auto">
<i class="fas fa-calendar fa-2x text-gray-300"></i>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Content Row -->
<div class="row">
<!-- TABLA RESUMEN-->
<h1 class="h3 mb-2 text-gray-800">
""")
f.write(MUN) # MUNICIPIO
f.write("""
</h1>
<p class="mb-4">Reporte de Balance, Requisición y Envío de Suministros</p>
<!-- TABLA DE MEDICAMENTOS Y MÉDIDO QUIRURGICO -->
<div class="card shadow mb-4">
<div class="card-header py-3">
<h6 class="m-0 font-weight-bold text-primary">
""")
f.write(TIPS) #TIPO DE SERVICIO
f.write("""
</h6>
</div>
<div class="card-body">
<div class="table-responsive">
<table class="table table-bordered" id="dataTable" width="100%" cellspacing="0">
<thead>
<tr>
<th>Número de orden</th>
<th>Código</th>
<th>Descripción de Articulo/Producto</th>
<th>Unidad de Medida</th>
<th>Cantidad Autorizada</th>
<th>Cantidad despachada</th>
<th>Subtotal</th>
</tr>
</thead>
<tfoot>
<th>Número de orden</th>
<th>Código</th>
<th>Descripción de Articulo/Producto</th>
<th>Unidad de Medida</th>
<th>Cantidad Autorizada</th>
<th>Cantidad despachada</th>
<th>Subtotal </th>
</tfoot>
<tbody>
""")
for i in subtotales:
p="{0:.2f}".format(float(i.subtotal))
f.write("<tr>")
f.write(" <td><center>"+str(i.id)+"</center></td>"
+"<td><center>"+str(i.codigo)+"</center></td>"
+"<td><center>"+str(i.nombre)+"</center></td>"
+"<td><center>"+"x"+"</center></td>"
+"<td><center>"+str(i.entregado)+"</center></td>"
+"<td><center>"+str(i.entregado)+"</center></td>"
+"<td><center>"+ "Q"+str(p)+"</center></td>"
)
f.write("<t/r>")
f.write("""
</tbody>
</table>
</div>
</div>
</div>
<!-- Content Row -->
<div class="row">
<!-- Content Column -->
<div class="col-auto">
</div>
</div>
</div>
<!-- /.container-fluid -->
</div>
<!-- End of Main Content -->
<!-- Footer -->
<footer class="sticky-footer bg-white">
<div class="container my-auto">
<div class="copyright text-center my-auto">
<span>© Facultad de Ingeniería 2021</span>
</div>
</div>
</footer>
<!-- End of Footer -->
</div>
<!-- End of Content Wrapper -->
</div>
<!-- End of Page Wrapper -->
<!-- Scroll to Top Button-->
<a class="scroll-to-top rounded" href="#page-top">
<i class="fas fa-angle-up"></i>
</a>
<!-- Logout Modal-->
<div class="modal fade" id="logoutModal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Ready to Leave?</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="login.html">Logout</a>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="vendor/jquery/jquery.min.js"></script>
<script src="vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Core plugin JavaScript-->
<script src="vendor/jquery-easing/jquery.easing.min.js"></script>
<!-- Custom scripts for all pages-->
<script src="js/sb-admin-2.min.js"></script>
<!-- Page level plugins -->
<script src="vendor/chart.js/Chart.min.js"></script>
<!-- Page level custom scripts -->
<script src="js/demo/chart-area-demo.js"></script>
<script src="js/demo/chart-pie-demo.js"></script>
<!-- Page level plugins -->
<script src="vendor/datatables/jquery.dataTables.min.js"></script>
<script src="vendor/datatables/dataTables.bootstrap4.min.js"></script>
<!-- Page level custom scripts -->
<script src="js/demo/datatables-demo.js"></script>
</body>
</html>
""")
f.close()
webbrowser.open_new_tab('REPORTE.html')
#--------------CREANDO VENTANA PRINCIPAL--------------
root=Tk()
root.title("VOLUNTARIADO")
root.iconbitmap('img\icono.ico')
rut=StringVar()
zm1=StringVar()
nt=ttk.Notebook(root)
nt.pack(fill="both",expand="yes")
s = ttk.Style()
# Create style used by default for all Frames
s.configure('TFrame', background='#1F618D')
#--------------FRAME INICIO--------------
s.configure('Frame1.TFrame', background='#1F618D')
V1 = ttk.Frame(nt, style='Frame1.TFrame')
nt.add(V1, text="INICIO")
#--------------FRAME CARGAR ARCHIVOS--------------
s.configure('Frame2.TFrame', background='#1F618D')
V2 = ttk.Frame(nt, style='Frame2.TFrame')
nt.add(V2, text="PRECIOS")
Label(V2,textvariable=rut,font="Helvetica 16",bg="#1F618D").place(x=100,y=280)
rut.set("NO SE HA CARGADO NADA")
Button(V2,text="SELECCIONAR ARCHIVO",command=DIALOGO,font="Helvetica 12",height=5,width=25).place(x=120, y=110)
Label(V2,textvariable=zm1,font="Helvetica 16",bg="#1F618D").place(x=560,y=280)
zm1.set("NO SE HA CARGADO NADA")
Button(V2,text="SELECCIONAR ARCHIVO",command=DIALOGO2,font="Helvetica 12",height=5,width=25).place(x=520, y=110)
L1=StringVar()
l2=StringVar()
l3=StringVar()
xo=IntVar()
yo=IntVar()
Label(V2,textvariable=L1,font="Helvetica 16",bg="#1F618D").place(x=30,y=30)
L1.set("CARGAR ARCHIVO DE PRECIOS (MED)")
Label(V2,textvariable=l2,font="Helvetica 16",bg="#1F618D").place(x=500,y=30)
l2.set("CARGAR ARCHIVO DE PRECIOS (MQ)")
#--------------FRAME REPORTES--------------
s.configure('Frame3.TFrame', background='#1F618D')
V3 = ttk.Frame(nt, style='Frame3.TFrame')
nt.add(V3, text=" VISUALIZAR REPORTE")
icodoct=PhotoImage(file="img\doct.png")
icodoct.subsample(1,1)
#Button(V3,image=icodoct,font="Helvetica 14",width=300,height=300).place(x=100, y=130)
Label(V3,textvariable=rut,font="Helvetica 16",bg="#1F618D").place(x=150,y=400)
rut.set("NO SE HA CARGADO NADA")
Button(V3,text="SELECCIONAR ARCHIVO",command=DIALOGO_REPORTE,font="Helvetica 12").place(x=250, y=350)
Button(V3,text="VER REPORTE",command=VER_REPORTE,height=5,width=25,font="Helvetica 12").place(x=650, y=350)
L6=StringVar()
año=StringVar()
dpto=StringVar()
area=StringVar()
distrito=StringVar()
municipio=StringVar()
t_servicio=StringVar()
servicio=StringVar()
l9=StringVar()
l8=StringVar()
l7=StringVar()
l6=StringVar()
l5=StringVar()
l4=StringVar()
a=StringVar()
b=StringVar()
c=StringVar()
Label(V3,textvariable=L6,font="Helvetica 16",bg="#1F618D").place(x=70,y=30)
L6.set("DATOS PARA EL REPORTE")
Label(V3,textvariable=l9,font="Helvetica 12",bg="#1F618D",fg="white").place(x=75,y=140)
l9.set("Departamento")
"""Label(V3,textvariable=l8,font="Helvetica 12",bg="#1F618D",fg="white").place(x=75,y=180)
l8.set("Area")
Label(V3,textvariable=l7,font="Helvetica 12",bg="#1F618D",fg="white").place(x=75,y=220)
l7.set("Distrito")"""
Label(V3,textvariable=l6,font="Helvetica 12",bg="#1F618D",fg="white").place(x=75,y=180)
l6.set("Municipio")
Label(V3,textvariable=l5,font="Helvetica 12",bg="#1F618D",fg="white").place(x=475,y=180)
"""l5.set("Tipo de Servicio")
Label(V3,textvariable=l4,font="Helvetica 12",bg="#1F618D",fg="white").place(x=475,y=220)
l4.set("Servicio")"""
Label(V3,textvariable=a,font="Helvetica 12",bg="#1F618D",fg="white").place(x=450,y=40)
a.set("Año")
Label(V3,textvariable=b,font="Helvetica 12",bg="#1F618D",fg="white").place(x=570,y=40)
b.set("Del Mes")
Label(V3,textvariable=c,font="Helvetica 12",bg="#1F618D",fg="white").place(x=760,y=40)
c.set("Al mes")
Entry(V3,textvariable=año,font="Helvetica 11",width=5).place(x=500,y=40)
#Entry(V3,textvariable=Mes_inicial,font="Helvetica 11",width=10).place(x=650,y=40)
#Entry(V3,textvariable=Mes_final,font="Helvetica 11",width=10).place(x=820,y=40)
Mes_inicial=ttk.Combobox(V3,width=10,font="Helvetica 11",state="readonly")
Mes_inicial.place(x=650,y=40)
Mes_inicial['values']=('Enero','Febrero','Marzo ','Abril','Mayo','Junio','Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre')
Mes_final=ttk.Combobox(V3,width=10,font="Helvetica 11",state="readonly")
Mes_final.place(x=820,y=40)
Mes_final['values']=('Enero','Febrero','Marzo ','Abril','Mayo','Junio','Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre')
TIPO=ttk.Combobox(V3,width=10,font="Helvetica 14",state="readonly")
TIPO.place(x=100,y=350)
TIPO['values']=('MED','MQ')
Entry(V3,textvariable=dpto,font="Helvetica 12").place(x=200,y=140)
#Entry(V3,textvariable=distrito,font="Helvetica 12").place(x=200,y=180)
#Entry(V3,textvariable=t_servicio,font="Helvetica 12").place(x=200,y=220)
Entry(V3,textvariable=distrito,font="Helvetica 12").place(x=200,y=180)
#Entry(V3,textvariable=municipio,font="Helvetica 12").place(x=600,y=180)
#Entry(V3,textvariable=servicio,font="Helvetica 12").place(x=600,y=220)
root.geometry("950x550")
root.mainloop()
CREAR_INTERFAZ() | 41.520337 | 141 | 0.420011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20,928 | 0.706526 |
3954ca2aed7ee1c085bbfe2f96be79e42677eab1 | 16,156 | py | Python | deep_hipsc_tracking/plotting/compartment_plot.py | JackToppen/deep-hipsc-tracking | 9d7e07814f26e3f76603bba1a945ae05e88733db | [
"BSD-3-Clause"
] | 2 | 2021-05-19T02:10:04.000Z | 2021-05-27T01:26:54.000Z | deep_hipsc_tracking/plotting/compartment_plot.py | JackToppen/deep-hipsc-tracking | 9d7e07814f26e3f76603bba1a945ae05e88733db | [
"BSD-3-Clause"
] | 1 | 2021-05-17T23:17:32.000Z | 2021-05-19T15:17:21.000Z | deep_hipsc_tracking/plotting/compartment_plot.py | JackToppen/deep-hipsc-tracking | 9d7e07814f26e3f76603bba1a945ae05e88733db | [
"BSD-3-Clause"
] | 1 | 2021-05-17T22:51:17.000Z | 2021-05-17T22:51:17.000Z | """ Plot data split by compartments
Classes:
* :py:class:`CompartmentPlot`: compartment plotting tool
"""
# Standard lib
from typing import Tuple, Optional, Dict
# 3rd party imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# Our own imports
from .styling import set_plot_style
from .utils import bootstrap_ci, get_histogram
# Classes
class CompartmentPlot(object):
""" Plot data split by multiple compartments
:param int n_compartments:
How many compartments to split the data into
:param int topk:
How many samples to take from each compartment
"""
def __init__(self,
n_compartments: int,
topk: Optional[int] = None,
figsize: Tuple[int] = (8, 8),
plot_style: str = 'dark',
suffix: str = '.png'):
self.n_compartments = n_compartments
self.topk = topk
self.figsize = figsize
self.plot_style = plot_style
self.suffix = suffix
# Color palettes for the different compartments
self.colors = (['blue', 'orange', 'green', 'red', 'purple', 'grey'])[:n_compartments]
self.palletes = [sns.color_palette(c.capitalize()+'s', n_colors=10)
for c in self.colors]
# Calculated values
self._bin_indices = None
self._bin_values = None
self._xdata = None
self._xcolumn = None
self._ycolumn = None
self._plotdata = None
self._distdata = None
self._total_count = None
def calc_indices(self, values: np.ndarray):
""" Calculate the indicies for each bin
:param ndarray values:
The values to use to generate the bins
"""
if self.topk is None:
self.topk = values.shape[0] // self.n_compartments
if values.shape[0] < self.topk * self.n_compartments:
err = 'Got too few values for {} samples of {} compartments: {}'
err = err.format(self.topk, self.n_compartments, values.shape[0])
raise ValueError(err)
print(f'Spliting into {self.n_compartments} compartments of {self.topk} samples each')
# Sort all the indices
indices = np.argsort(values)
# Split into even bins of size topk
bin_start = np.floor(np.linspace(0, indices.shape[0]-self.topk, self.n_compartments))
bin_start[bin_start < 0] = 0
bin_end = bin_start + self.topk
bin_end[bin_end > indices.shape[0]] = indices.shape[0]
# Extract the sorted bins for each compartment
self._bin_indices = [indices[int(s):int(e)] for s, e in zip(bin_start, bin_end)]
def calc_bin(self,
bin_value: np.ndarray,
label: str,
total_count: int) -> Dict[str, float]:
""" Calculate all the stats for a single bin
:param ndarray bin_value:
The 2D array of n timepoints x k samples
:param str label:
The label for this category
:param int total_count:
The total number of samples in this bin
:returns:
A dictionary of bin stats for plotting
"""
bin_mean = np.nanmean(bin_value, axis=1)
bin_std = np.nanstd(bin_value, axis=1)
bin5, bin25, bin50, bin75, bin95 = np.nanpercentile(bin_value, [5, 25, 50, 75, 95], axis=1)
bin_mean_ci0, bin_mean_ci1 = bootstrap_ci(bin_value, func=np.nanmean, axis=1)
assert bin_mean_ci0.shape == bin_mean.shape
assert bin_mean_ci1.shape == bin_mean.shape
bin_median_ci0, bin_median_ci1 = bootstrap_ci(bin_value, func=np.nanmedian, axis=1)
assert bin_median_ci0.shape == bin50.shape
assert bin_median_ci0.shape == bin50.shape
# Work out how many samples/bin we have in each timepoint
bin_count = np.sum(~np.isnan(bin_value), axis=1)
bin_support = bin_count / total_count
bin_support[~np.isfinite(bin_support)] = 0
# Stash all the values for later
return {
'mean' + label: bin_mean,
'mean ci low' + label: bin_mean_ci0,
'mean ci high' + label: bin_mean_ci1,
'std' + label: bin_std,
'p5' + label: bin5,
'p25' + label: bin25,
'p50' + label: bin50,
'p50 ci low' + label: bin_median_ci0,
'p50 ci high' + label: bin_median_ci1,
'p75' + label: bin75,
'p95' + label: bin95,
'count' + label: bin_count,
'support' + label: bin_support,
}
def split_comparison(self,
data: Dict[str, np.ndarray],
xcolumn: str,
ycolumn: str,
integrate_values: bool = False):
""" Split the comparison by the bins
:param dict[str, Any] data:
A dictionary containing the xcolumn and ycolumn data
:param str xcolumn:
The column containing the shared time vector to plot along
:param str ycolumn:
The column containing the values to bin along
:param bool integrate_values:
If True, integrate the resulting statistics over the xdata range
"""
xdata = data[xcolumn]
plotdata = {
xcolumn: xdata,
}
values = np.stack(data[ycolumn], axis=1)
total_count = np.sum(~np.isnan(values), axis=1)
if values.shape[0] != xdata.shape[0]:
raise ValueError('Expected {} with shape {}, got {}'.format(ycolumn, xdata.shape[0], values.shape[0]))
bin_values = []
# Add a set for all the values
plotdata.update(self.calc_bin(values, f' {ycolumn} all', total_count))
for i, indices in enumerate(self._bin_indices):
bin_value = values[:, indices]
bin_values.append(bin_value)
label = f' {ycolumn} bin{i+1}'
plotdata.update(self.calc_bin(bin_value, label, total_count))
self._plotdata = plotdata
self._xdata = xdata
self._xcolumn = xcolumn
self._ycolumn = ycolumn
self._bin_values = bin_values
self._total_count = total_count
def calc_envelope(self, label: str, envelope: str = 'std') -> Tuple[float]:
""" Calculate the envelope (high/low) stats for a label
:param str label:
The label to calculate the envelope for
:param str envelope:
Which stats to calculate the envelope with
:returns:
A tuple of low, high values
"""
plotdata = self._plotdata
if envelope == 'std':
value_mean = plotdata['mean' + label]
value_std = plotdata['std' + label]
value_st = value_mean - value_std
value_ed = value_mean + value_std
elif envelope == 'mean ci':
value_st = plotdata['mean ci low' + label]
value_ed = plotdata['mean ci high' + label]
elif envelope == 'median ci':
value_st = plotdata['p50 ci low' + label]
value_ed = plotdata['p50 ci high' + label]
elif envelope == 'iqr':
value_st = plotdata['p25' + label]
value_ed = plotdata['p75' + label]
else:
raise ValueError('Unknown envelope function "{}"'.format(envelope))
return value_st, value_ed
def plot_raw_tracks(self, outfile=None, xlabel=None, ylabel=None):
""" Plot individual raw tracks """
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=self.figsize)
for i, bin_value in enumerate(self._bin_values):
ax.set_prop_cycle(color=self.palletes[i])
ax.plot(self._xdata, bin_value, '-')
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
style.show(outfile=outfile, fig=fig)
def plot_mean_tracks(self, outfile=None, xlabel=None, ylabel=None, envelope='std', mode='split'):
""" Mean and deviation envelope
:param Path outfile:
If not None, the file to write out
:param str xlabel:
Label for the x-axis (time)
:param str ylabel:
Label for the y-axis (category)
"""
plotdata = self._plotdata
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=self.figsize)
if mode == 'split':
for i in range(self.n_compartments):
label = ' {} bin{}'.format(self._ycolumn, i+1)
value_mean = plotdata['mean' + label]
value_st, value_ed = self.calc_envelope(label, envelope)
ax.fill_between(self._xdata, value_st, value_ed,
facecolor=self.colors[i], alpha=0.5)
ax.plot(self._xdata, value_mean, '-', color=self.colors[i], linewidth=2)
elif mode == 'all':
label = ' {} all'.format(self._ycolumn)
value_mean = plotdata['mean' + label]
value_st, value_ed = self.calc_envelope(label, envelope)
ax.fill_between(self._xdata, value_st, value_ed,
facecolor='b', alpha=0.5)
ax.plot(self._xdata, value_mean, '-', color='b', linewidth=2)
else:
raise ValueError('Unknown mode {}'.format(mode))
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
style.show(outfile=outfile, fig=fig)
def plot_median_tracks(self, outfile=None, xlabel=None, ylabel=None, envelope='iqr', mode='split'):
""" Median and 25/75% envelope """
plotdata = self._plotdata
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=self.figsize)
if mode == 'split':
for i in range(self.n_compartments):
label = ' {} bin{}'.format(self._ycolumn, i+1)
value_mid = plotdata['p50' + label]
value_st, value_ed = self.calc_envelope(label, envelope)
ax.fill_between(self._xdata, value_st, value_ed,
facecolor=self.colors[i], alpha=0.5)
ax.plot(self._xdata, value_mid, '-', color=self.colors[i], linewidth=2)
elif mode == 'all':
label = ' {} all'.format(self._ycolumn)
value_mean = plotdata['p50' + label]
value_st, value_ed = self.calc_envelope(label, envelope)
ax.fill_between(self._xdata, value_st, value_ed,
facecolor='b', alpha=0.5)
ax.plot(self._xdata, value_mean, '-', color='b', linewidth=2)
else:
raise ValueError('Unknown mode {}'.format(mode))
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
style.show(outfile=outfile, fig=fig)
def plot_track_support(self, outfile=None, xlabel=None, ylabel=None):
""" Plot how many tracks are in a given bin at a given time """
plotdata = self._plotdata
with set_plot_style(self.plot_style) as style:
fig_x, fig_y = self.figsize
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(fig_x*2, fig_y))
ax1.plot(self._xdata, self._total_count, '-k', linewidth=2)
ax2.hlines([100], np.min(self._xdata), np.max(self._xdata), colors=['k'], linewidth=2)
for i in range(self.n_compartments):
label = ' {} bin{}'.format(self._ycolumn, i+1)
count = plotdata['count' + label]
support = plotdata['support' + label]
ax1.plot(self._xdata, count, '-', color=self.colors[i], linewidth=2)
ax2.plot(self._xdata, support*100, '-', color=self.colors[i], linewidth=2)
if xlabel is not None:
ax1.set_xlabel(xlabel)
ax2.set_xlabel(xlabel)
ax1.set_ylabel('Num Tracks')
ax2.set_ylabel('Percent Total Tracks')
ax1.set_ylim([0, np.max(self._total_count)*1.02])
ax2.set_ylim([0, 102])
style.show(outfile=outfile, fig=fig)
def plot_dist_histogram(self, values, outfile=None, xlabel=None, ylabel=None):
""" Plot where on the histogram each value occurs
:param ndarray values:
The values to generate a histogram for
:param Path outfile:
If not None, the path to save the plot to
"""
# Histogram the distribution and which compartments are being labeled
_, _, kernel_x, kernel_y = get_histogram(values, bins=10, kernel_smoothing=True)
compartment_values = [values[indices] for indices in self._bin_indices]
distdata = {
'compartment': [],
'value': [],
'density': [],
}
# Now, plot each compartment on the total histogram
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=self.figsize)
ax.plot(kernel_x, kernel_y, '-', color='gray')
distdata['compartment'].extend(0 for _ in kernel_x)
distdata['value'].extend(kernel_x)
distdata['density'].extend(kernel_y)
for i, compartment_value in enumerate(compartment_values):
compartment_min = np.min(compartment_value)
compartment_max = np.max(compartment_value)
kernel_mask = np.logical_and(kernel_x >= compartment_min,
kernel_x <= compartment_max)
compartment_x = kernel_x[kernel_mask]
compartment_y = kernel_y[kernel_mask]
distdata['compartment'].extend(i+1 for _ in compartment_x)
distdata['value'].extend(compartment_x)
distdata['density'].extend(compartment_y)
ax.fill_between(compartment_x, 0, compartment_y,
facecolor=self.colors[i], alpha=0.5)
ax.plot(compartment_x, compartment_y, '-',
color=self.colors[i], linewidth=2)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
style.show(outfile=outfile, fig=fig)
self._distdata = distdata
def save_plotdata(self, outfile, suffix='.csv'):
""" Save the plot data """
if self._plotdata is None:
raise ValueError('No distribution data, call split_comparison first')
outfile = outfile.parent / (outfile.stem + suffix)
print('Writing distribution data to {}'.format(outfile))
plotdata = pd.DataFrame(self._plotdata)
if suffix == '.csv':
plotdata.to_csv(str(outfile), header=True, index=False)
elif suffix == '.xlsx':
plotdata.to_excel(str(outfile), header=True, index=False)
else:
raise KeyError('Unknown plot data output file type: {}'.format(outfile))
def save_distdata(self, outfile, suffix='.csv'):
""" Save the distribution data """
if self._distdata is None:
raise ValueError('No distribution data, call plot_dist_histogram first')
outfile = outfile.parent / (outfile.stem + suffix)
print('Writing distribution data to {}'.format(outfile))
distdata = pd.DataFrame(self._distdata)
if suffix == '.csv':
distdata.to_csv(str(outfile), header=True, index=False)
elif suffix == '.xlsx':
distdata.to_excel(str(outfile), header=True, index=False)
else:
raise KeyError('Unknown dist data output file type: {}'.format(outfile))
| 40.189055 | 114 | 0.573533 | 15,757 | 0.975303 | 0 | 0 | 0 | 0 | 0 | 0 | 3,841 | 0.237744 |
1a2ec0438436e70f3f58ef493bca2cccbd7f42d3 | 10,874 | py | Python | utils/perm_utils.py | IBM/NeuronAlignment | 5b82b60666db1fac72e53db07529a3328ee549c4 | [
"Apache-2.0"
] | 3 | 2020-09-09T01:23:34.000Z | 2021-12-23T16:56:00.000Z | utils/perm_utils.py | IBM/NeuronAlignment | 5b82b60666db1fac72e53db07529a3328ee549c4 | [
"Apache-2.0"
] | null | null | null | utils/perm_utils.py | IBM/NeuronAlignment | 5b82b60666db1fac72e53db07529a3328ee549c4 | [
"Apache-2.0"
] | null | null | null | import torch
import numpy as np
def train_perm_orth(train_loader, model, optimizer, scheduler, criterion, regularizer=None, rho=1E-4, delta=0.5,
nu=1E-2, eps=1E-3, tau=1E-2, lagrange_pen=1E-2, perm_flag=True, t_step=40):
if perm_flag:
tau_min = 1E-24
tau_max = 1E-1
c = None
lam_lm = []
for p in optimizer.param_groups[0]['params']:
lam_lm.append(torch.zeros_like(p))
k_iter = 0
ts = torch.empty(len(train_loader), device=model.device).uniform_(0.0, 1.0)
with torch.no_grad():
for p in optimizer.param_groups[0]['params']:
p.data = torch.rand_like(p.data)
p.data, _, _ = torch.svd(p.data)
input_cml = []
target_cml = []
t_cml = []
inner_iter = 0
loss = 0.0
loss_obj = 0.0
for iter, (input, target) in enumerate(train_loader):
t = ts[iter]
input = input.to(model.device, non_blocking=False)
target = target.to(model.device, non_blocking=False)
output = model(input, perm_train=True, t=t)
input_all = input
target_all = target
new_loss = criterion(output, target_all)
loss_obj += new_loss
# This part is for the augmented Lagrangian method
int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)
loss += new_loss + int_pen
inner_iter += 1
input_cml.append(input.clone())
target_cml.append(target.clone())
t_cml.append(t.clone())
if inner_iter % t_step == 0:
optimizer.zero_grad()
loss.backward()
grad_norm = 0.0
violator = 0.0
for p in optimizer.param_groups[0]['params']:
param_norm = p.grad.data.norm(2)
grad_norm += param_norm.item() ** 2
violator += torch.sum((torch.matmul(p.data.t(), p.data) - torch.eye(p.data.shape[0],
device=p.device)) ** 2)
grad_norm = grad_norm ** (1. / 2)
if c is None:
c = loss.clone().item()
q_opt = 1
loss_inner = loss.clone()
print('Iteration: %03d, Loss %.2E, Objective %.2E, Negative Penalty: %.2E,'
'Grad Norm: %.2E, Ortho Violation: %.2E, tau: %.2E' %
(k_iter, loss_inner.item(), loss_obj.item(), int_pen.item(), grad_norm, violator.item(), tau))
# Compute F for defining Y function
F_list = []
with torch.no_grad():
for p in optimizer.param_groups[0]['params']:
f = torch.matmul(p.grad.data, p.t().data) - torch.matmul(p.data, p.grad.t().data)
F_list.append(f)
# Store old parameters
params_old = [None] * len(optimizer.param_groups[0]['params'])
for idx, param in enumerate(optimizer.param_groups[0]['params']):
params_old[idx] = param.clone()
grads_old = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]
# Compute the values of Y(tau) and Y'(tau), store them into the model
Y_t, Y_ft_prime = compute_ytau(tau, F_list, optimizer.param_groups[0]['params'])
for p, y_t in zip(optimizer.param_groups[0]['params'], Y_t):
p.data = y_t.clone()
loss_inner = 0.0
for t_2, input_2, target_2 in zip(t_cml, input_cml, target_cml):
output = model(input_2, perm_train=True, t=t_2)
loss_inner += criterion(output, target_2)
int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)
loss_inner += int_pen
optimizer.zero_grad()
loss_inner.backward()
grads_new = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]
with torch.no_grad():
dF_dt = 0.0
for g_new, y_ft_p in zip(grads_new, Y_ft_prime):
df = g_new * (y_ft_p / torch.norm(y_ft_p.data))
df = torch.sum(df)
dF_dt += df.item()
threshold_flag = True
k_inner = 0
while threshold_flag:
with torch.no_grad():
threshold = c + rho * tau * dF_dt
if loss_inner.item() >= threshold:
# Compute Y for smaller value of tau
with torch.no_grad():
tau *= delta
Y_t, Y_ft_prime = compute_ytau(tau, F_list, optimizer.param_groups[0]['params'])
for p, y_t in zip(optimizer.param_groups[0]['params'], Y_t):
p.data = y_t.clone()
loss_old = loss_inner.clone()
loss_inner = 0.0
for t_2, input_2, target_2 in zip(t_cml, input_cml, target_cml):
output = model(input_2, perm_train=True, t=t_2)
loss_inner += criterion(output, target_2)
int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)
loss_inner += int_pen
optimizer.zero_grad()
loss_inner.backward()
grads_new = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]
k_inner += 1
if (loss_inner.item() - loss_old.item()) / (1 + loss_old.item()) < 1E-5:
threshold_flag = False
else:
threshold_flag = False
with torch.no_grad():
c = (nu * q_opt * c + loss_inner.item())
q_opt = nu * q_opt + 1
c = c / q_opt
bb_num = 0.0
bb_denom = 0.0
yy_sum = 0.0
for p_old, g_old, p_new, g_new in zip(params_old, grads_old, optimizer.param_groups[0]['params'],
grads_new):
s_bb = p_new - p_old
y_bb = g_new - g_old
bb_num += torch.sum(s_bb ** 2)
bb_denom += torch.sum(s_bb * y_bb)
yy_sum += torch.sum(y_bb ** 2)
tau_bb = bb_num / torch.abs(bb_denom)
tau_bb = tau_bb.item()
tau_bb2 = torch.abs(bb_denom) / yy_sum
tau_bb2 = tau_bb2.item()
tau_bb = np.minimum(tau_bb, tau_bb2)
tau = np.minimum(tau_bb, tau_max)
tau = np.maximum(tau, tau_min)
lam_lm, lagrange_pen = integer_penalty_update(optimizer.param_groups[0]['params'], lam_lm,
lagrange_pen)
loss_inner = 0.0
for t_2, input_2, target_2 in zip(t_cml, input_cml, target_cml):
output = model(input_2, perm_train=True, t=t_2)
loss_obj = criterion(output, target_2)
int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)
loss_inner += loss_obj + int_pen
optimizer.zero_grad()
loss_inner.backward()
grads_new = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]
grad_norm = 0.0
for g_new in grads_new:
gn = g_new.norm(2)
grad_norm += gn.item() ** 2
grad_norm = grad_norm ** (1. / 2)
k_iter += 1
input_cml = []
target_cml = []
t_cml = []
loss = 0.0
loss_obj = 0.0
model.train()
loss_sum = 0.0
correct = 0.0
change_P = np.nan
params_before = [None] * len(optimizer.param_groups[0]['params'])
if nu is not None:
for idx, param in enumerate(optimizer.param_groups[0]['params']):
params_before[idx] = param.clone().detach()
optimizer.step()
lr = scheduler.get_lr()[0]
with torch.no_grad():
for param, param_o in zip(optimizer.param_groups[0]['params'], params_old):
param.data = 1 / (1 + lr / nu) * (param + lr / nu * param_o)
output = model(input_all, perm_train=True)
loss = criterion(output, target_all)
if regularizer is not None:
loss += regularizer(model)
loss_sum += loss.item() * input.size(0)
pred = output.data.argmax(1, keepdim=True)
correct += pred.eq(target_all.data.view_as(pred)).sum().item()
return {
'loss': loss_sum / len(train_loader.dataset),
'accuracy': correct * 100.0 / len(train_loader.dataset),
'change_perm': change_P
}
def hard_int_penalty(p_list, pen=1E1):
pen_loss = 0.0
for p in p_list:
p_mask = p.data * (p.data <= 0)
pen_loss += pen * torch.sum(p_mask ** 2)
return pen_loss
def integer_penalty(p_list, lam_list, mu):
pen_loss = 0.0
for p, lam in zip(p_list, lam_list):
mask = (p - lam / mu) <= 0
mask_alt = (p - lam / mu) > 0
p_l = torch.sum((- lam * p + 0.5 * mu * (p ** 2)) * mask)
p_l += torch.sum((-1/(2 * mu) * lam ** 2) * mask_alt)
pen_loss += p_l
return pen_loss
def integer_penalty_update(p_list, lam_list, mu):
new_lam_list = []
with torch.no_grad():
for p, lam in zip(p_list, lam_list):
upd = lam - mu * p
new_lam_list.append(upd * (upd > 0))
new_mu = mu * 1.01
return new_lam_list, new_mu
def compute_ytau(tau, f_list, p_list):
y_tau = []
y_tau_prime = []
for p, f in zip(p_list, f_list):
eye = torch.eye(f.shape[0], device=f.device)
qmat_inv = torch.inverse(eye + tau / 2 * f)
y_ft = torch.matmul(qmat_inv, eye - tau / 2 * f)
y_ft = torch.matmul(y_ft, p)
y_ft_prime = - torch.matmul(qmat_inv, f)
y_ft_prime = torch.matmul(y_ft_prime, (p + y_ft) / 2)
y_tau.append(y_ft.clone())
y_tau_prime.append(y_ft_prime.clone())
return y_tau, y_tau_prime | 42.311284 | 117 | 0.492551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 549 | 0.050487 |
1a30a95b720fecbddf3339b56bb201d9cdb0ad1d | 374 | py | Python | src/test_main.py | kkworden/python-pipenv-bootstrap | 64f846f44240a093a2a707a8c3252b913799e177 | [
"MIT"
] | null | null | null | src/test_main.py | kkworden/python-pipenv-bootstrap | 64f846f44240a093a2a707a8c3252b913799e177 | [
"MIT"
] | null | null | null | src/test_main.py | kkworden/python-pipenv-bootstrap | 64f846f44240a093a2a707a8c3252b913799e177 | [
"MIT"
] | null | null | null | from unittest import mock
import unittest
import pytest
from .main import some_func
class TestMain(unittest.TestCase):
@pytest.fixture(autouse=True)
def _setup_service(self):
self.mock_object = mock.MagicMock()
def test_some_func(self):
assert some_func() == 3
# def test_mock(self):
# assert self.mock_object.some_method.called | 20.777778 | 52 | 0.705882 | 286 | 0.764706 | 0 | 0 | 103 | 0.275401 | 0 | 0 | 70 | 0.187166 |
1a31249dd4025a966d8f9e01d3235e3a9810453b | 566 | py | Python | venv/lib/python3.8/site-packages/keras/api/_v2/keras/applications/densenet/__init__.py | JIANG-CX/data_labeling | 8d2470bbb537dfc09ed2f7027ed8ee7de6447248 | [
"MIT"
] | 1 | 2021-05-24T10:08:51.000Z | 2021-05-24T10:08:51.000Z | venv/lib/python3.8/site-packages/keras/api/_v2/keras/applications/densenet/__init__.py | JIANG-CX/data_labeling | 8d2470bbb537dfc09ed2f7027ed8ee7de6447248 | [
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/keras/api/_v2/keras/applications/densenet/__init__.py | JIANG-CX/data_labeling | 8d2470bbb537dfc09ed2f7027ed8ee7de6447248 | [
"MIT"
] | null | null | null | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.keras.applications.densenet namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from keras.applications.densenet import DenseNet121
from keras.applications.densenet import DenseNet169
from keras.applications.densenet import DenseNet201
from keras.applications.densenet import decode_predictions
from keras.applications.densenet import preprocess_input
del _print_function
| 33.294118 | 82 | 0.844523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.337456 |