max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
qnap_locate_parser.py
|
killruana/snippets
| 0
|
12500
|
<reponame>killruana/snippets
#!/usr/bin/env python3
import json
import sys
import clize
def get_file_handler(input_file):
if input_file is None:
return sys.stdin
return open(input_file, 'r')
@clize.clize
def main(input_file=None):
result = {'datas': []}
with get_file_handler(input_file) as input_handler:
result = json.load(input_handler)
for data in result['datas']:
print(data['filename'])
if __name__ == '__main__':
clize.run(main)
| 2.5625
| 3
|
evennia/contrib/tutorial_examples/mirror.py
|
lootcrawl/evennia
| 0
|
12501
|
"""
TutorialMirror
A simple mirror object to experiment with.
"""
from evennia import DefaultObject
from evennia.utils import make_iter, is_iter
from evennia import logger
class TutorialMirror(DefaultObject):
"""
A simple mirror object that
- echoes back the description of the object looking at it
- echoes back whatever is being sent to its .msg - to the
sender, if given, otherwise to the location of the mirror.
"""
def return_appearance(self, looker, **kwargs):
"""
This formats the description of this object. Called by the 'look' command.
Args:
looker (Object): Object doing the looking.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
if isinstance(looker, self.__class__):
# avoid infinite recursion by having two mirrors look at each other
return "The image of yourself stretches into infinity."
return f"{self.key} shows your reflection:\n{looker.db.desc}"
def msg(self, text=None, from_obj=None, **kwargs):
"""
Simply override .msg to echo back to the messenger or to the current
location.
Args:
text (str or tuple, optional): The message to send. This
is treated internally like any send-command, so its
value can be a tuple if sending multiple arguments to
the `text` oob command.
from_obj (obj or iterable)
given, at_msg_send will be called. This value will be
passed on to the protocol. If iterable, will execute hook
on all entities in it.
"""
if not text:
text = "<silence>"
text = text[0] if is_iter(text) else text
if from_obj:
for obj in make_iter(from_obj):
obj.msg(f'{self.key} echoes back to you:\n"{text}".')
elif self.location:
self.location.msg_contents(f'{self.key} echoes back:\n"{text}".', exclude=[self])
else:
# no from_obj and no location, just log
logger.log_msg(f"{self.key}.msg was called without from_obj and .location is None.")
| 3.4375
| 3
|
python/patternlock.py
|
Floozutter/silly
| 0
|
12502
|
<gh_stars>0
from tkinter import Tk
from turtle import ScrolledCanvas, TurtleScreen, RawTurtle
DIGIT2POS = dict(zip(
"123456789",
((100 * (j - 1), 100 * (-i + 1)) for i in range(3) for j in range(3))
))
def draw_dots(turt: RawTurtle) -> None:
penstate = turt.pen()
turt.penup()
for x, y in DIGIT2POS.values():
turt.setheading(turt.towards(x, y))
turt.goto(x, y)
turt.dot()
turt.pen(pen = penstate)
def draw_pattern(turt: RawTurtle, pattern: str) -> None:
penstate = turt.pen()
turt.penup()
for x, y in map(lambda digit: DIGIT2POS[digit], pattern):
turt.setheading(turt.towards(x, y))
turt.goto(x, y)
turt.pendown()
turt.dot()
turt.pen(pen = penstate)
def main(pattern: str) -> None:
master = Tk()
canvas = ScrolledCanvas(master)
canvas.pack()
screen = TurtleScreen(canvas)
screen.colormode(255)
turt = RawTurtle(screen)
draw_dots(turt)
turt.pencolor((178, 34, 34))
draw_pattern(turt, pattern)
screen.mainloop()
if __name__ == "__main__":
main("61834927")
| 3.359375
| 3
|
synthesizing/gui/python-portmidi-0.0.7/test_pyportmidi.py
|
Chiel92/MusicTheory
| 0
|
12503
|
<reponame>Chiel92/MusicTheory
#!/usr/bin/env python
# test code for PyPortMidi
# a port of a subset of test.c provided with PortMidi
# <NAME>
# harrison [at] media [dot] mit [dot] edu
# March 15, 2005: accommodate for SysEx messages and preferred list formats
# SysEx test code contributed by <NAME>
# February 27, 2005: initial release
import pypm
import array
import time
NUM_MSGS = 100 # number of MIDI messages for input before closing
INPUT=0
OUTPUT=1
def PrintDevices(InOrOut):
for loop in range(pypm.CountDevices()):
interf,name,inp,outp,opened = pypm.GetDeviceInfo(loop)
if ((InOrOut == INPUT) & (inp == 1) |
(InOrOut == OUTPUT) & (outp ==1)):
print loop, name," ",
if (inp == 1): print "(input) ",
else: print "(output) ",
if (opened == 1): print "(opened)"
else: print "(unopened)"
print
def TestInput():
PrintDevices(INPUT)
dev = int(raw_input("Type input number: "))
MidiIn = pypm.Input(dev)
print "Midi Input opened. Reading ",NUM_MSGS," Midi messages..."
# MidiIn.SetFilter(pypm.FILT_ACTIVE | pypm.FILT_CLOCK)
for cntr in range(1,NUM_MSGS+1):
while not MidiIn.Poll(): pass
MidiData = MidiIn.Read(1) # read only 1 message at a time
print "Got message ",cntr,": time ",MidiData[0][1],", ",
print MidiData[0][0][0]," ",MidiData[0][0][1]," ",MidiData[0][0][2], MidiData[0][0][3]
# NOTE: most Midi messages are 1-3 bytes, but the 4 byte is returned for use with SysEx messages.
del MidiIn
def TestOutput():
latency = int(raw_input("Type latency: "))
print
PrintDevices(OUTPUT)
dev = int(raw_input("Type output number: "))
MidiOut = pypm.Output(dev, latency)
print "Midi Output opened with ",latency," latency"
dummy = raw_input("ready to send program 1 change... (type RETURN):")
MidiOut.Write([[[0xc0,0,0],pypm.Time()]])
dummy = raw_input("ready to note-on... (type RETURN):")
MidiOut.Write([[[0x90,60,100],pypm.Time()]])
dummy = raw_input("read to note-off... (type RETURN):")
MidiOut.Write([[[0x90,60,0],pypm.Time()]])
dummy = raw_input("ready to note-on (short form)... (type RETURN):")
MidiOut.WriteShort(0x90,60,100)
dummy = raw_input("ready to note-off (short form)... (type RETURN):")
MidiOut.WriteShort(0x90,60,0)
print
print "chord will arpeggiate if latency > 0"
dummy = raw_input("ready to chord-on/chord-off... (type RETURN):")
chord = [60, 67, 76, 83, 90]
ChordList = []
MidiTime = pypm.Time()
for i in range(len(chord)):
ChordList.append([[0x90,chord[i],100], MidiTime + 1000 * i])
MidiOut.Write(ChordList)
while pypm.Time() < MidiTime + 1000 + len(chord) * 1000 : pass
ChordList = []
# seems a little odd that they don't update MidiTime here...
for i in range(len(chord)):
ChordList.append([[0x90,chord[i],0], MidiTime + 1000 * i])
MidiOut.Write(ChordList)
print("Sending SysEx messages...")
# sending with timestamp = 0 should be the same as sending with
# timestamp = pypm.Time()
dummy = raw_input("ready to send a SysEx string with timestamp = 0 ... (type RETURN):")
MidiOut.WriteSysEx(0,'\xF0\x7D\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\xF7')
dummy = raw_input("ready to send a SysEx list with timestamp = pypm.Time() ... (type RETURN):")
MidiOut.WriteSysEx(pypm.Time(), [0xF0, 0x7D, 0x10, 0x11, 0x12, 0x13, 0xF7])
dummy = raw_input("ready to close and terminate... (type RETURN):")
del MidiOut
# main code begins here
pypm.Initialize() # always call this first, or OS may crash when you try to open a stream
x=0
while (x<1) | (x>2):
print """
enter your choice...
1: test input
2: test output
"""
x=int(raw_input())
if x==1: TestInput()
else: TestOutput()
pypm.Terminate()
| 2.5
| 2
|
cnn_model/Compute_accuarcy.py
|
csJd/dg_text_contest_2018
| 0
|
12504
|
<gh_stars>0
#coding=utf-8
import pandas as pd
def get_labels(init_file,predict_file):
init_label = []
predict_label = []
pd_init = pd.read_csv(init_file,sep="^",header=None)
for index,row in pd_init.iterrows():
init_label.append(row[0])
pd_predict = pd.read_csv(predict_file,sep=",",header=None)
for index,row in pd_predict.iterrows():
predict_label.append(row[0])
print(predict_label)
print(init_label)
correct_count = 0
error_index = []
for i in range(len(init_label)):
if init_label[i] == predict_label[i]:
correct_count += 1
else:
error_index.append(i)
print("correct_count : "+str(correct_count))
correct_rate = correct_count / len(pd_predict)
return correct_rate,error_index
if __name__ == "__main__":
correct_rate,error_index = get_labels("../processed_data/dev_processed_data_split.csv","./result/result_predict.txt")
print("correct_rate : "+str(correct_rate))
print("error_email : "+str(error_index))
| 3.0625
| 3
|
ex01/arquivo/__init__.py
|
duartele/exerc-python
| 0
|
12505
|
from ex01.funcoes import *
def arqExiste(nome):
try:
a = open(nome, 'rt') #rt = read text
a.close()
except FileNotFoundError:
return False
else:
return True
def criarArq(nome):
try:
a = open(nome, 'wt+') #wt = write text and + = create one if it not exists
a.close()
except:
print('Hove um erro na criacao do arquivo')
else:
print('Arquivo criado com sucesso')
def lerArq(nome):
try:
a = open(nome, 'rt')
except:
print('Hove um erro na leitura do arquivo')
else:
cabecalho('PESSOAS CADASTRADAS')
for linha in a:
dado = linha.split(';')
dado[1] = dado[1].replace('\n', '')
print(f'{dado[0]:<30}{dado[1]:>3} anos')
finally:
a.close()
def cadastrar(arquivo, nome='desconhecido', idade=0):
try:
a = open(arquivo, 'at') # at = append no txt
except:
print('Houve um erro ao cadastrar')
else:
try:
a.write(f'{nome};{idade}\n')
except:
print('Houve erro ao executar a.write')
else:
print('Novo registro adicionado com sucesso')
a.close()
| 3.609375
| 4
|
ck_airport.py
|
58565856/checkinpanel
| 3
|
12506
|
<reponame>58565856/checkinpanel<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
:author @Icrons
cron: 20 10 * * *
new Env('机场签到');
"""
import json
import re
import traceback
import requests
import urllib3
from notify_mtr import send
from utils import get_data
urllib3.disable_warnings()
class SspanelQd(object):
def __init__(self, check_items):
self.check_items = check_items
@staticmethod
def checkin(url, email, password):
url = url.rstrip("/")
email = email.split("@")
if len(email) > 1:
email = email[0] + "%40" + email[1]
else:
email = email[0]
session = requests.session()
"""
以下 except 都是用来捕获当 requests 请求出现异常时,
通过捕获然后等待网络情况的变化,以此来保护程序的不间断运行
"""
try:
session.get(url, verify=False)
except requests.exceptions.ConnectionError:
msg = url + "\n" + "网络不通"
return msg
except requests.exceptions.ChunkedEncodingError:
msg = url + "\n" + "分块编码错误"
return msg
except Exception:
msg = url + "\n" + "未知错误,请查看日志"
print(f"未知错误,错误信息:\n{traceback.format_exc()}")
return msg
login_url = url + "/auth/login"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
}
post_data = "email=" + email + "&passwd=" + password + "&code="
post_data = post_data.encode()
try:
res = session.post(login_url, post_data, headers=headers, verify=False)
res_str = res.text.encode("utf-8").decode("unicode_escape")
print(f"{url} 接口登录返回信息:{res_str}")
res_dict = json.loads(res_str)
if res_dict.get("ret") == 0:
msg = url + "\n" + str(res_dict.get("msg"))
return msg
except Exception:
msg = url + "\n" + "登录失败,请查看日志"
print(f"登录失败,错误信息:\n{traceback.format_exc()}")
return msg
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
"Referer": url + "/user",
}
try:
response = session.post(
url + "/user/checkin", headers=headers, verify=False
)
res_str = response.text.encode("utf-8").decode("unicode_escape")
print(f"{url} 接口签到返回信息:{res_str}")
res_dict = json.loads(res_str)
check_msg = res_dict.get("msg")
if check_msg:
msg = url + "\n" + str(check_msg)
else:
msg = url + "\n" + str(res_dict)
except Exception:
msg = url + "\n" + "签到失败,请查看日志"
print(f"签到失败,错误信息:\n{traceback.format_exc()}")
info_url = url + "/user"
response = session.get(info_url, verify=False)
"""
以下只适配了editXY主题
"""
try:
level = re.findall(r'\["Class", "(.*?)"],', response.text)[0]
day = re.findall(r'\["Class_Expire", "(.*)"],', response.text)[0]
rest = re.findall(r'\["Unused_Traffic", "(.*?)"]', response.text)[0]
msg = (
url
+ "\n- 今日签到信息:"
+ str(msg)
+ "\n- 用户等级:"
+ str(level)
+ "\n- 到期时间:"
+ str(day)
+ "\n- 剩余流量:"
+ str(rest)
)
except Exception:
pass
return msg
def main(self):
msg_all = ""
for check_item in self.check_items:
# 机场地址
url = str(check_item.get("url"))
# 登录信息
email = str(check_item.get("email"))
password = str(check_item.get("password"))
if url and email and password:
msg = self.checkin(url=url, email=email, password=password)
else:
msg = "配置错误"
msg_all += msg + "\n\n"
return msg_all
if __name__ == "__main__":
data = get_data()
_check_items = data.get("AIRPORT", [])
res = SspanelQd(check_items=_check_items).main()
send("机场签到", res)
| 2.359375
| 2
|
src/04_exploration/03_determine_fire_season.py
|
ranarango/fuegos-orinoquia
| 0
|
12507
|
# -----------------------------------------------------------------------
# Author: <NAME>
#
# Purpose: Determines the fire season for each window. The fire season is
# defined as the minimum number of consecutive months that contain more
# than 80% of the burned area (Archibald ett al 2013; Abatzoglou et al.
# 2018).
#
# References:
# * <NAME>., <NAME>., <NAME>., & Bradstock,
# <NAME>. (2013). Defining pyromes and global syndromes of fire regimes.
# Proceedings of the National Academy of Sciences of the United States
# of America, 110(16), 6442–6447.
#
# * <NAME>., <NAME>., <NAME>., <NAME>., &
# <NAME>. (2018). Global patterns of interannual climate–fire
# relationships. Global Change Biology, 24(11), 5164–5175.
# -----------------------------------------------------------------------
import os
from calendar import month_abbr
import pandas as pd
from src.utils.constants import REGIONS, BURNED_AREA_THRESHOLD
if __name__ == "__main__":
# Project's root
os.chdir("../..")
output_folder = "results/csv"
if not os.path.exists(output_folder):
os.makedirs(output_folder)
df = pd.DataFrame(columns=["window", "months"])
for region in REGIONS:
month_groups = pd.read_excel(
f"results/xlsx/{region['name']}/fire_groups.xlsx", sheet_name="Month"
)
# Compute 80% threshold.
threshold = month_groups["area"].sum() * BURNED_AREA_THRESHOLD
# Sort months from larger to smallest burned area and compute the
# cumulative sum.
sorted_groups = month_groups.sort_values(by="area", ascending=False)
sorted_groups = sorted_groups.reset_index(drop=True)
sorted_groups["cumulative_area"] = sorted_groups["area"].cumsum()
# Get the months with the largest burned area that compose more
# than 80% of the total burned area and change from month int to
# month abbreviation.
above_threshold = sorted_groups["cumulative_area"] >= threshold
fire_season_months = sorted_groups["month"].loc[:above_threshold.idxmax()]
fire_season_months = fire_season_months.sort_values()
fire_season_months = fire_season_months.apply(lambda x: month_abbr[x])
months = fire_season_months.str.cat(sep="-")
df = df.append({"window": region["name"], "months": months}, ignore_index=True)
save_to = os.path.join(output_folder, "fire_season_months.csv")
df.to_csv(save_to, index=False)
| 2.75
| 3
|
holo/modules/blender.py
|
chinarjoshi/holo
| 1
|
12508
|
<reponame>chinarjoshi/holo
import bpy
import json
from bpy.types import SpaceView3D
from bpy.app.handlers import persistent
from mathutils import Quaternion, Matrix, Vector
from holo.gestures import prediction_from_camera
def duplicate_window(window_type: str = 'INVOKE_DEFAULT') -> None:
"""Duplicates a new window into bpy.data.screens from current active window."""
context_window = bpy.context.copy()
context_window['area'] = [area for area in bpy.context.screen.areas if area.type == 'VIEW_3D'][0]
bpy.ops.screen.area_dupli(context_window, window_type)
def convert_quadview(area: SpaceView3D) -> None:
"""Converts a given window into quad-view."""
region = [region for region in RENDER_AREA.regions if region.type == 'WINDOW'][0]
override = {'area': RENDER_AREA, 'region': region, 'edit_object': bpy.context.edit_object}
bpy.ops.screen.region_quadview(override)
def configure_scene(screen_data: SpaceView3D) -> None:
"""Removes all overlay elements from the 3D viewport."""
screen_data.shading.background_type = 'VIEWPORT'
screen_data.shading.background_color = (0, 0, 0)
screen_data.overlay.show_overlays = False
for attribute in 'show_gizmo', 'show_region_toolbar', 'show_region_tool_header':
setattr(screen_data, attribute, False)
def initial_config(values: list) -> None:
"""Sets the camera position and rotation values during initialization of new frame."""
for index, window in enumerate(values):
for key, attribute in window.items():
if key not in {'perspective_matrix', 'window_matrix'}: # BUG These values are read only and need a setter
setattr(QUAD_VIEWS[index], key, attribute)
def transform_rotate(direction: 'str', confidence: int) -> None:
"""Given a direction and confidence value (Out of 100%), rotate the object by its corresponding vector."""
magnitude = confidence / 100
if direction not in {'retract', 'expand'}:
bpy.ops.transform.rotate(
value=magnitude,
orient_axis='Z',
orient_type='VIEW',
orient_matrix=((0.85153, 0.277963, -0.44456),
(0.15535, 0.676067, 0.720278),
(0.500763, -0.6824, 0.53251)),
orient_matrix_type='VIEW',
mirror=True, use_proportional_edit=False,
proportional_edit_falloff='SMOOTH',
proportional_size=1,
use_proportional_connected=False,
use_proportional_projected=False)
else:
for window in QUAD_VIEWS:
window.view_distance += magnitude if direction == 'expand' else magnitude * -1
def get_gestures() -> None:
"""Retrieves gestures from camera and applies the corresponding tranformation to the object."""
rotation_mapping = {
'Fist' : 'X',
'L' : 'Y',
'Okay' : 'Z',
}
for gesture in prediction_from_camera():
transform_rotate(direction=rotation_mapping(gesture.gesture), magnitude=gesture.confidence)
def initial_config_values() -> list:
"""Returns initial config values as a convenience utility."""
return [
{
"view_distance": 4.183098793029785,
"view_location": Vector((-0.8385156989097595, 0.05902576446533203, 0.48941677808761597)),
"view_perspective": "PERSP",
"view_rotation": Quaternion((0.6414357423782349, -0.6326250433921814, 0.3170725703239441, 0.2963286340236664))
},
{
"view_distance": 4.183099269866943,
"view_location": Vector((-0.4491613209247589, 1.5609432458877563, 0.014791678637266159)),
"view_perspective": "PERSP",
"view_rotation": Quaternion((0.4915403723716736, 0.6154682636260986, -0.25714513659477234, -0.559877872467041)),
},
{
"view_distance": 5.019718647003174,
"view_location": Vector((-0.9179283380508423, -0.46830159425735474, 0.334771990776062)),
"view_perspective": "PERSP",
"view_rotation": Quaternion((-0.22622741758823395, 0.6814441084861755, -0.1789524108171463, 0.6726300716400146))
},
{
"view_distance": 5.019718647003174,
"view_location": Vector((0.797123372554779, 0.7804675102233887, 0.635741114616394)),
"view_perspective": "PERSP",
"view_rotation": Quaternion((0.687656581401825, 0.6367506384849548, -0.2974682152271271, 0.1821804791688919))
}
]
if __name__ == '__main__':
duplicate_window()
RENDER_AREA = bpy.data.window_managers[0].windows[-1].screen.areas[0]
MAIN_VIEW = [area for area in bpy.data.window_managers[0].windows[0].screen.areas if area.type == 'VIEW_3D'][0].spaces[0].region_3d
QUAD_VIEWS = RENDER_AREA.spaces[0].region_quadviews
convert_quadview(area=RENDER_AREA)
configure_scene(screen_data=RENDER_AREA.spaces[0])
initial_config(initial_config_values())
get_gestures()
# bpy.data.window_managers[0].windows[1].screen.areas[0].spaces[0].region_3d.view_rotation.rotate(Euler((1, 10, .1)))
for window in bpy.data.window_managers[0].windows: # let's find what's what
for area in window.screen.areas:
if area.type == 'VIEW_3D':
if len(area.spaces[0].region_quadviews) > 0: #if quadviews are active
quad_views = area.spaces[0].region_quadviews
else:
main_view = area.spaces[0].region_3d
@persistent # This makes it stay if another file is opened
def update_handler(dummy):
for every_view in QUAD_VIEWS:
every_view.view_location = MAIN_VIEW.view_location
every_view.view_distance = MAIN_VIEW.view_distance
bpy.app.handlers.frame_change_post.append(update_handler)
| 2.0625
| 2
|
ncp/models/det_mix_ncp.py
|
JoeMWatson/ncp
| 2
|
12509
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow_probability import distributions as tfd
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from ncp import tools
def network(inputs, config):
hidden = inputs
for size in config.layer_sizes:
hidden = tf.layers.dense(hidden, size, tf.nn.leaky_relu)
mean = tf.layers.dense(hidden, 1)
noise = tf.layers.dense(hidden, 1, tf.nn.softplus) + 1e-6
uncertainty = tf.layers.dense(hidden, 1, None)
return mean, noise, uncertainty
def define_graph(config):
network_tpl = tf.make_template('network', network, config=config)
inputs = tf.placeholder(tf.float32, [None, config.num_inputs])
targets = tf.placeholder(tf.float32, [None, 1])
num_visible = tf.placeholder(tf.int32, [])
batch_size = tf.to_float(tf.shape(inputs)[0])
data_mean, data_noise, data_uncertainty = network_tpl(inputs)
ood_inputs = inputs + tf.random_normal(
tf.shape(inputs), 0.0, config.noise_std)
ood_mean, ood_noise, ood_uncertainty = network_tpl(ood_inputs)
losses = [
-tfd.Normal(data_mean, data_noise).log_prob(targets),
-tfd.Bernoulli(data_uncertainty).log_prob(0),
-tfd.Bernoulli(ood_uncertainty).log_prob(1),
]
if config.center_at_target:
losses.append(-tfd.Normal(ood_mean, ood_noise).log_prob(targets))
loss = sum(tf.reduce_sum(loss) for loss in losses) / batch_size
optimizer = tf.train.AdamOptimizer(config.learning_rate)
gradients, variables = zip(*optimizer.compute_gradients(
loss, colocate_gradients_with_ops=True))
if config.clip_gradient:
gradients, _ = tf.clip_by_global_norm(gradients, config.clip_gradient)
optimize = optimizer.apply_gradients(zip(gradients, variables))
data_uncertainty = tf.sigmoid(data_uncertainty)
if not config.center_at_target:
data_mean = (1 - data_uncertainty) * data_mean + data_uncertainty * 0
data_noise = (1 - data_uncertainty) * data_noise + data_uncertainty * 0.1
return tools.AttrDict(locals())
| 1.992188
| 2
|
api/models/target.py
|
zanachka/proxy-service
| 1
|
12510
|
"""
DB operations for Targets
"""
from api.models.base import DBModel
class TargetDB(DBModel):
'''DBModel for the targets table'''
tablename = 'targets'
| 1.625
| 2
|
myTeam.py
|
alexrichardson21/PacmanDQNAgent
| 0
|
12511
|
<filename>myTeam.py
# myTeam.py
# ---------
# Licensing Infoesmation: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by <NAME>
# (<EMAIL>) and <NAME> (<EMAIL>).
# Student side autograding was added by <NAME>, <NAME>, and
# <NAME> (<EMAIL>).
# TO DISCUSS:
# Walkthru
# Replay Func
# Agent state vs position
# Normalizing state values
# Actions vs. Legal Actions
# Reward Func
import random
import time
import math
import json
import os
from util import nearestPoint
from collections import deque
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from game import Directions
from captureAgents import CaptureAgent
#################
# Team creation #
#################
def createTeam(firstIndex, secondIndex, isRed,
first='OffDQNAgent', second='DefDQNAgent'):
"""
This function should return a list of two agents that will form the
team, initialized using firstIndex and secondIndex as their agent
index numbers. isRed is True if the red team is being created, and
will be False if the blue team is being created.
As a potentially helpful development aid, this function can take
additional string-valued keyword arguments ("first" and "second" are
such arguments in the case of this function), which will come from
the --redOpts and --blueOpts command-line arguments to capture.py.
For the nightly contest, however, your team will be created without
any extra arguments, so you should make sure that the default
behavior is what you want for the nightly contest.
"""
# The following line is an example only; feel free to change it.
return [eval(first)(firstIndex), eval(second)(secondIndex)]
##########
# Agents #
##########
class DQNAgent(CaptureAgent):
def registerInitialState(self, gs):
"""
This method handles the initial setup of the
agent to populate useful fields (such as what team
we're on).
A distanceCalculator instance caches the maze distances
between each pair of positions, so your agents can use:
self.distancer.getDistance(p1, p2)
IMPORTANT: This method may run for at most 15 seconds.
"""
'''
Make sure you do not delete the following line. If you would like to
use Manhattan distances instead of maze distances in order to save
on initialization time, please take a look at
CaptureAgent.registerInitialState in captureAgents.py.
'''
'''
Your initialization code goes here, if you need any.
'''
print("REGISTERING INITIAL STATE... \n\n")
train = True
self.EPISODES = 10000
self.memory = deque(maxlen=2000)
self.alpha = 0.05
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.05
self.epsilon_decay = 0.999
self.learning_rate = 0.002
self.epsilon = self.epsilon_min
self.start = gs.getAgentPosition(self.index)
CaptureAgent.registerInitialState(self, gs)
self.actions = ['Stop', 'North', 'South', 'East', 'West']
cols = len(gs.data.layout.layoutText[0])
rows = len(gs.data.layout.layoutText)
self.input_shape = rows*cols
self.output_shape = len(self.actions)
if os.path.exists('DQNAgent%d.h5' % self.index):
self.model.load_weights("agent%d.h5" % self.index)
else:
self.model = self._build_model()
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(64, input_dim=self.input_shape))
model.add(Dense(32))
model.add(Dense(self.output_shape, activation='linear'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
return model
# DEPRECATED
def train(self, gs):
batch_size = 32
print("Beginning training ...")
for e in range(self.EPISODES):
state = gs.getAgentState(self.index)
legal_actions = gs.getLegalActions(self.index)
best_index = self.act(gs)
best_action = self.chooseAction(gs)
next_gs = self.getSuccessor(gs, best_action)
next_state = next_gs.getAgentState(self.index)
reward = self.getReward(next_gs, gs)
self.remember(gs, best_index, reward, next_gs)
with open("memory.json", "w") as write_file:
json.dump((self.index,
gs.getAgentPosition(self.index),
best_action, reward,
next_gs.getAgentPosition(self.index)
), write_file)
gs = next_gs
if len(self.memory) > batch_size:
self.replay(batch_size)
if (e % 100 == 0):
print("Episode: %d" % e)
self.model.save_weights("agent%d.h5" % self.index)
print('Finished Training!')
def remember(self, state, action, reward, next_state):
self.memory.append((state, action, reward, next_state))
def replay(self, batch_size):
# Samples random memories of batch_size
minibatch = random.sample(self.memory, batch_size)
# For each memory
avg_loss = []
for gs, action, reward, next_gs in minibatch:
state = gs.getAgentState(self.index)
next_state = next_gs.getAgentState(self.index)
# Update to q value
gs_q_vals = self.model.predict(self.preprocessGS(gs))
best_q_val = np.amax(gs_q_vals[0])
next_best_q_val = np.amax(
self.model.predict(self.preprocessGS(next_gs))[0])
diff = (reward + self.gamma * next_best_q_val) - best_q_val
gs_q_vals[0][self.actions.index(action)] = diff
loss = self.model.fit(self.preprocessGS(gs),
gs_q_vals, epochs=1, verbose=0)
avg_loss += loss.history['loss']
# print("Replay Avg Loss: " + str(np.average(avg_loss)))
# Decrease epsilon
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def getSuccessor(self, gs, action):
"""
Finds the next successor which is a grid position (location tuple).
"""
successor = gs.generateSuccessor(self.index, action)
pos = successor.getAgentState(self.index).getPosition()
if pos != nearestPoint(pos):
# Only half a grid position was covered
return successor.generateSuccessor(self.index, action)
else:
return successor
def chooseAction(self, gs):
"""
Picks among actions randomly.
"""
# state = gs.getAgentPosition(self.index)
# actions = gs.getLegalActions(self.index)
'''
You should change this in your own agent.
'''
"*** YOUR CODE HERE ***"
batch_size = 16
# Update memory if possible
last_gs = self.getPreviousObservation()
if last_gs:
next_gs = self.getCurrentObservation()
if next_gs.data.timeleft <= 5:
self.model.save('DQNAgent%d.h5' % self.index)
reward = self.getReward(gs, last_gs)
action = self.getDirection(last_gs.getAgentPosition(
self.index), gs.getAgentPosition(self.index))
self.memory.append((last_gs, action, reward, gs))
with open("memory.json", "w") as write_file:
json.dump((self.index,
last_gs.getAgentPosition(self.index),
action, reward,
gs.getAgentPosition(self.index)
), write_file)
# Replay
if len(self.memory) > batch_size:
self.replay(batch_size)
legal_actions = gs.getLegalActions(self.index)
# Random Action
if np.random.rand() <= self.epsilon:
best_action = random.choice(legal_actions)
# Best Action
else:
act_values = self.model.predict(self.preprocessGS(gs))
legal_actions_i = [self.actions.index(a) for a in legal_actions]
best_action = np.argmax(act_values[0][legal_actions_i])
best_action = self.actions[legal_actions_i[best_action]]
return best_action # returns action
def preprocessGS(self, gs):
data = []
layout = gs.data.layout.layoutText
# new_layout = np.zeros(((16,)))
for i, row in enumerate(layout):
new_row = row.replace(" ", "0") \
.replace("%", "5") \
.replace(".", "6") \
.replace("o", "7")
data += [float(x) / float(7) for x in list(new_row)]
# + [str(self.actions.index(action))]
return np.reshape(np.asarray(data, dtype=float).flatten(), (1, self.input_shape))
def min_dist_to_food(self, gs, agent_pos):
food_pos = []
for i, r in enumerate(self.getFood(gs).data):
for j, c in enumerate(r):
if self.getFood(gs).data[i][j]:
food_pos += [(i, j)]
return np.min([self.getMazeDistance(agent_pos, f)
for f in food_pos])
def min_dist_to_op(self, gs, agent_pos):
op_pos = [gs.getAgentPosition(i)
for i in self.getOpponents(gs)]
return np.min([self.getMazeDistance(agent_pos, f)
for f in op_pos])
def isAgentDead(self, new_gs, old_gs):
new_loc = new_gs.getAgentPosition(self.index)
old_loc = old_gs.getAgentPosition(self.index)
op_pos = [new_gs.getAgentPosition(i)
for i in self.getOpponents(new_gs)]
if old_loc in op_pos and new_loc == self.start:
return True
return False
def isOpDead(self, new_gs, old_gs):
op_i = self.getOpponents(new_gs)
new_op_locs = [new_gs.getAgentPosition(i) for i in op_i]
old_op_locs = [old_gs.getAgentPosition(i) for i in op_i]
old_loc = old_gs.getAgentPosition(self.index)
new.gs.getAgentState.start
if old_loc in op_pos and new_loc == self.start:
return True
return False
def getDirection(self, prev_pos, curr_pos):
if prev_pos[0] < curr_pos[0]:
return 'West'
elif prev_pos[0] > curr_pos[0]:
return 'East'
else:
if prev_pos[1] < curr_pos[1]:
return 'North'
elif prev_pos[1] > curr_pos[1]:
return 'South'
else:
return 'Stop'
class OffDQNAgent(DQNAgent):
def getReward(self, new_gs, old_gs):
# init
new_agent = new_gs.getAgentState(self.index)
old_agent = old_gs.getAgentState(self.index)
new_loc = new_gs.getAgentPosition(self.index)
old_loc = old_gs.getAgentPosition(self.index)
# op_pos = [new_gs.getAgentPosition(i)
# for i in self.getOpponents(new_gs)]
food_pos = []
for i, r in enumerate(self.getFood(old_gs).data):
for j, c in enumerate(r):
if self.getFood(old_gs).data[i][j]:
food_pos += [(i, j)]
reward = 0
# Move closer to food
reward += 20.0 * (self.min_dist_to_food(old_gs, old_loc) -
self.min_dist_to_food(old_gs, new_loc)) / float(old_agent.numCarrying + 1) - 3.0
# No movement
if old_loc == new_loc:
reward -= 4.0
# Close to Food
reward += (50.0 - self.min_dist_to_food(old_gs, new_loc)) / 10.0
# Holding too many
reward -= new_agent.numCarrying * 1.5
# pick up dot
r, c = new_loc
if self.getFood(old_gs).data[r][c]:
reward += 50.0
# return dots to side
reward += 200.0 * (new_agent.numReturned - old_agent.numReturned)
# died
if self.isAgentDead(new_gs, old_gs):
reward -= 500.0
# close to op
if new_agent.isPacman:
old_distances = min(
old_gs.agentDistances[self.getOpponents(old_gs)])
new_distances = min(
old_gs.agentDistances[self.getOpponents(old_gs)])
if new_distances < 4:
reward -= (5 - new_distances) * 20.0
with open("off_rewards.json", "w") as write_file:
json.dump(reward, write_file)
return reward
class DefDQNAgent(DQNAgent):
def getReward(self, new_gs, old_gs):
# init
new_agent = new_gs.getAgentState(self.index)
old_agent = old_gs.getAgentState(self.index)
new_loc = new_gs.getAgentPosition(self.index)
old_loc = old_gs.getAgentPosition(self.index)
# op_pos = [old_gs.getAgentPosition(i)
# for i in self.getOpponents(old_gs)]
op_indices = self.getOpponents(old_gs)
reward = 0
# if not (new_agent.isPacman):
# min_dist_to_op = self.min_dist_to_op(old_gs, new_loc)
# reward = float(50) - min_dist_to_op
# if(min_dist_to_op == 0):
# reward += 200
if new_agent.isPacman:
reward -= 50
# living penalty while on defensive side -> reward = -.03
# if not (new_agent.isPacman):
# reward -= .03
# # capture opponent -> 20
# min_dist_to_op = self.min_dist_to_op(old_gs, new_loc)
# if(min_dist_to_op == 0):
# reward += 20
# # Opponent far -> -1 Opponent close -> 1
# else:
# reward += math.abs(min_dist_to_op / float(50) - 1)
# living penalty while on offensive side -> reward = -.05
# else:
# reward -= .05
# # died -> -50
# if self.isDead(new_gs, old_gs):
# reward -= 50
# # if opponent returns dots -> reward = -3 * num returned
# old_num_returned = [old_gs.getAgentState(i).numReturned
# for i in op_indices]
# new_num_returned = [new_gs.getAgentState(i).numReturned
# for i in op_indices]
# reward -= 3 * (sum(new_num_returned) - sum(old_num_returned))
with open("def_rewards.json", "w") as write_file:
json.dump(reward, write_file)
return reward
| 2.296875
| 2
|
wordBreak2.py
|
saai/LeetcodePythonSolutions
| 0
|
12512
|
<gh_stars>0
class Solution:
# @param s, a string
# @param wordDict, a set<string>
# @return a string[]
def wordBreak(self, s, wordDict):
n = len(s)
res = []
chars = ''.join(wordDict)
for i in xrange(n):
if s[i] not in chars:
return res
lw = s[-1]
lw_end = False
for word in wordDict:
if word[-1] == lw:
lw_end = True
if not lw_end:
return res
self.dfs(s,[],wordDict,res)
return res
def dfs(self, s, path,wordDict,res):
if not s:
res.append(' '.join(path[:]))
return
for i in range(1,len(s)+1):
c = s[:i]
if c in wordDict:
path.append(c)
self.dfs(s[i:],path,wordDict,res)
path.pop()
| 3.1875
| 3
|
setup.py
|
astrodeepnet/sbi_experiments
| 3
|
12513
|
from setuptools import setup, find_packages
setup(
name='SBIExperiments',
version='0.0.1',
url='https://github.com/astrodeepnet/sbi_experiments',
author='<NAME> and friends',
description='Package for numerical experiments of SBI tools',
packages=find_packages(),
install_requires=[
'numpy>=1.19.2',
'jax>=0.2.0',
'tensorflow_probability>=0.14.1',
'scikit-learn>=0.21',
'jaxopt>=0.2'
],
)
| 1.085938
| 1
|
deploy/deploy_asterisk_provider2.py
|
orpolaczek/astricon-2017-demos
| 0
|
12514
|
<reponame>orpolaczek/astricon-2017-demos<gh_stars>0
import datetime
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from pprint import pprint
Engine = get_driver(Provider.ELASTICHOSTS)
driver = Engine("733b7dc7-7498-4db4-9dc4-74d3fee8abed",
secret="<KEY>",
secure=False)
images = driver.list_images()
sizes = driver.list_sizes()
IMAGE_ID = '38df09864d854b76b5023878ffc80161'
image = [i for i in images if i.id == IMAGE_ID][0]
pprint(images)
pprint(sizes)
node = driver.deploy_node(
name="astricon-{}".format(datetime.datetime.now().strftime('%Y-%m-%dt%H%M%S')),
image=image,
size=sizes[3],
script='deploy-script.sh',
enable_root=True,
vnc_password="<PASSWORD>")
print("Waiting for Node")
driver.wait_until_running([node], 10, 1000)
print("Node is now running")
| 2.125
| 2
|
todo/admin.py
|
haidoro/TODO_lesson
| 0
|
12515
|
<gh_stars>0
from django.contrib import admin
from .models import TodoModel
admin.site.register(TodoModel)
| 1.210938
| 1
|
assignments/06-python-first-lines/first_lines.py
|
antoniog1995/biosys-analytics
| 0
|
12516
|
#!/usr/bin/env python3
"""
Author : antoniog1
Date : 2019-02-21
Purpose: Rock the Casbah
"""
import argparse
import sys
import os
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('positional', metavar='DIR', type = str, help='A positional argument', nargs="+")
parser.add_argument('-w', '--width', help='A named integer argument', metavar='int', type=int, default=50)
return parser.parse_args()
# --------------------------------------------------
def warn(msg):
"""Print a message to STDERR"""
print(msg, file=sys.stderr)
# --------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
warn(msg)
sys.exit(1)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
width = args.width
directory = args.positional
for dir_name in directory:
dir_dict = {}
if not os.path.isdir(dir_name):
warn('"{}" is not a directory'.format(dir_name))
continue
print(dir_name)
for filename in os.listdir(dir_name):
path = os.path.join(dir_name,filename)
with open(path) as f:
first_line = f.readline().rstrip()
dir_dict[first_line] = filename
for line, file in sorted(dir_dict.items()):
num_per = width - len(line) - len(file)
ellipses = "." * num_per
print('{} {} {}'.format(line,ellipses,file))
# --------------------------------------------------
if __name__ == '__main__':
main()
| 3.4375
| 3
|
lvmsurveysim/target/target.py
|
albireox/lvmsurveysim
| 0
|
12517
|
<filename>lvmsurveysim/target/target.py
#!/usr/bin/env python
# encoding: utf-8
#
# @Author: <NAME>
# @Date: Oct 10, 2017
# @Filename: target.py
# @License: BSD 3-Clause
# @Copyright: <NAME>
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import pathlib
import yaml
from . import regions
from .. import config
class Target(object):
"""A representation of an astronomical target.
Defines a target, including target centre, area on the sky, surface
brightnes, etc. See the section :ref:`target-defining` for more
information.
Parameters:
name (str):
The identifier of this target, e.g., ``'M81'``.
coords (tuple or `~astropy.coordinates.SkyCoord`):
A tuple of ``(ra, dec)`` in degrees or a
`~astropy.coordinates.SkyCoord` describing the centre of the
target. If the region is of type ``polygon``, ``coords`` must
be a list of vertices as indicated in `~.regions.PolygonalRegion`.
region_type (str):
One of the valid region types for `~.regions.Region`.
region_params (dict):
A dictionary of parameters to be passed to `~.regions.Region`.
Example:
>>> target = Target('MyTarget', coords=(169, 65), region_type='circle',
>>> region_params={'r': 0.1})
>>> target
<Region 'MyTarget'>
>>> target.region
<CircularRegion (coords=<SkyCoord (ICRS): (ra, dec) in deg
( 169., 65.)>, r=0.100 deg)>
"""
def __init__(self, name, coords, region_type, region_params={}):
self.name = name
self.coords = coords
self.region = self._create_region(coords, region_type, region_params)
def __repr__(self):
return f'<Region {self.name!r}>'
@staticmethod
def _create_region(coords, region_type, region_params):
"""Returns a `.regions.Region` with the target on the sky."""
return regions.Region(region_type, coords, **region_params)
@classmethod
def from_target_list(cls, name, target_list=None):
"""Returns an instance of `.Target` from a target list.
Initialises a new target whose parameters have been previously defined
in a target list. Target lists must be YAML files in which each
target has attributes ``coords``, ``region_params``, and
``region_params``, defined as in :ref:`target-defining`. For example:
.. code-block:: yaml
M81:
coords: [148.888333, 69.0652778]
region_type: 'ellipse'
region_params:
a: 0.209722
b: 0.106958333
pa: 149
Parameters:
name (str):
The identifier for the target. Must be defined in the target
list file.
target_list (str, `~pathlib.Path`, or None):
The path to the YAML file containing the target list. If
``None``, default to the target list contained in ``lvmcore``.
Example:
>>> from lvmsurveysim.target import Target
>>> m81 = Target.from_target_list('M81')
"""
if target_list is None:
target_list = pathlib.Path(
os.path.expanduser(os.path.expandvars(config['target_list'])))
else:
target_list = pathlib.Path(target_list)
assert target_list.exists()
targets = yaml.load(open(str(target_list)))
assert name in targets, 'target not found in target list.'
target = targets[name]
return cls(name, target['coords'], region_type=target['region_type'],
region_params=target['region_params'])
def plot(self, **kwargs):
"""Plots the target.
Parameters:
kwargs (dict):
Keyword arguments to be pased to `.regions.Region.plot`.
"""
return self.region.plot(**kwargs)
| 2.53125
| 3
|
samples/snippets/test_export_to_bigquery.py
|
renovate-bot/python-contact-center-insights
| 4
|
12518
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
import google.auth
from google.cloud import bigquery
import pytest
import export_to_bigquery
GCLOUD_TESTS_PREFIX = "python_samples_tests"
@pytest.fixture
def project_id():
_, project_id = google.auth.default()
return project_id
@pytest.fixture
def unique_id():
uuid_hex = uuid.uuid4().hex[:8]
return f"{GCLOUD_TESTS_PREFIX}_{uuid_hex}"
@pytest.fixture
def bigquery_resources(project_id, unique_id):
# Create a BigQuery dataset.
bigquery_client = bigquery.Client()
dataset_id = unique_id
table_id = unique_id
dataset = bigquery.Dataset(f"{project_id}.{dataset_id}")
dataset.location = "US"
bigquery_client.create_dataset(dataset, timeout=30)
# Create a BigQuery table under the created dataset.
table = bigquery.Table(f"{project_id}.{dataset_id}.{table_id}")
bigquery_client.create_table(table)
yield dataset_id, table_id
# Delete the BigQuery dataset and table.
bigquery_client.delete_dataset(dataset_id, delete_contents=True)
def test_export_data_to_bigquery(capsys, project_id, bigquery_resources):
dataset_id, table_id = bigquery_resources
export_to_bigquery.export_to_bigquery(project_id, project_id, dataset_id, table_id)
out, err = capsys.readouterr()
assert "Exported data to BigQuery" in out
| 1.984375
| 2
|
Author/admin.py
|
CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution
| 3
|
12519
|
<gh_stars>1-10
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import *
# Register your models here.
def set_active(modeladmin, request, queryset):
for user in queryset:
user.is_active = True
user.save()
set_active.short_description = 'Set Account Status: Active'
def deactivate(modeladmin, request, queryset):
for user in queryset:
user.is_active = False
user.save()
deactivate.short_description = 'Set Account Status: Inactive'
class AuthorAdmin(UserAdmin):
# display fields
fieldsets = (
(None, {'fields': ('email', 'displayName','github')}),
(('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', '<PASSWORD>', '<PASSWORD>'),
}),
)
ordering = ('email',)
search_fields = ('email', 'displayName')
# list_display = ('email', 'displayName', 'is_staff', 'url')
list_display = ('email', 'auth_pk', 'displayName', 'github', 'is_active', 'is_staff', 'url')
actions = [set_active, deactivate,]
# admin.site.unregister(User)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Inbox)
admin.site.register(Like)
admin.site.register(Liked)
admin.site.register(FriendRequest)
admin.site.register(Followers)
| 2.203125
| 2
|
TP2/pyApp/venv/lib/python3.8/site-packages/pyloco/task.py
|
MariusBallot/09-2021-Robotics-EFREI-Files
| 0
|
12520
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""task module."""
from __future__ import unicode_literals
import sys
import os
import pydoc
import time
import json
import logging
import collections
import pkg_resources
import subprocess
import webbrowser
import websocket
from pyloco.parse import TaskArgParser, PylocoArgParser
from pyloco.proxy import ParentProxy
from pyloco.util import (load_pymod, type_check, pyloco_print, OS, urlparse, teval,
split_assert_expr, get_port, pack_websocket_message,
is_ipv6, pyloco_import, PylocoPickle, import_modulepath)
from pyloco.error import TestError, InternalError, UsageError
from pyloco.base import Object, Global, pyloco_builtins
def load_taskclass(taskpath, argv, subargv):
if not taskpath:
return None, None, None, None
# TODO: handle aliased task
if isinstance(taskpath, type):
if issubclass(taskpath, Task):
return taskpath, argv, subargv, None
raise UsageError("Not compatible task type: %s" % type(taskpath))
# TODO: move to callsite to load_taskclass
objs = {}
while "--import" in argv:
idx = argv.index("--import")
mpath = argv.pop(idx+1)
argv.pop(idx)
key, obj = import_modulepath(mpath)
objs[key] = obj
task_class = None
_p = taskpath.split("#", 1)
if len(_p) == 2:
taskpath, fragment = [x.strip() for x in _p]
else:
fragment = ""
if os.path.exists(taskpath):
mods = []
if os.path.isfile(taskpath):
head, base = os.path.split(taskpath)
if base.endswith(".py"):
mods.append(load_pymod(head, base[:-3]))
elif base.endswith(".plx"):
from pyloco.plxtask import PlXTask
task_class = PlXTask
argv.insert(0, taskpath)
elif base.endswith(".plz"):
from pyloco.plztask import PlZTask
task_class = PlZTask
argv.insert(0, taskpath)
elif os.path.isdir(taskpath):
# TODO: support Python package
pass
import pdb; pdb.set_trace()
candidates = {}
for mod in mods:
for name in dir(mod):
if not name.startswith("_"):
obj = getattr(mod, name)
if (type(obj) == type(Task) and issubclass(obj, Task) and
(obj.__module__ is None or
not obj.__module__.startswith("pyloco."))):
candidates[name] = obj
if candidates:
if fragment:
if hasattr(candidates, fragment):
task_class = getattr(candidates, fragment)
else:
raise UsageError("No task is found with a fragment of "
"'%s'." % fragment)
elif len(candidates) == 1:
task_class = candidates.popitem()[1]
else:
raise UsageError(
"More than one frame are found."
"Please add fragment to select one: %s" %
list(candidates.keys())
)
if task_class:
setattr(task_class, "_path_", os.path.abspath(taskpath))
#else:
# raise UsageError("Task class is not found. Please check path: %s" % taskpath)
if task_class is None:
from pyloco.manage import _ManagerBase
if taskpath in _ManagerBase._default_tasks_:
task_class = _ManagerBase._default_tasks_[taskpath]
if task_class is None:
for ep in pkg_resources.iter_entry_points(group='pyloco.task'):
if taskpath == ep.name:
task_class = ep.load()
from pyloco.plxtask import PlXTask
if task_class is PlXTask:
task_mod = pyloco_import(taskpath)
task_dir = os.path.dirname(task_mod.__file__)
argv.insert(0, os.path.join(task_dir, getattr(task_mod, "plx")))
break
if not task_class:
from pyloco.mgmttask import mgmt_tasks
from pyloco.stdtask import standard_tasks
if taskpath in mgmt_tasks:
task_class = mgmt_tasks[taskpath]
elif taskpath in standard_tasks:
task_class = standard_tasks[taskpath]
# TODO support remote task
# if not task_class:
#
# url = urlparse(taskpath)
#
# if url.netloc or url.scheme:
# argv.insert(0, taskpath)
# task_class = RemoteTask
if not task_class:
raise UsageError("Task '%s' is not found. Please check path." % taskpath)
return task_class, argv, subargv, objs
def taskclass(taskpath):
cls, _, _, _ = load_taskclass(taskpath, [], [])
return cls
class Task(Object):
"""Base class for pyloco Tasks
"""
_version_ = "0.1.0"
_argparser_ = TaskArgParser
def __new__(cls, parent, *vargs, **kwargs):
obj = super(Task, cls).__new__(cls)
obj.parent = parent
obj.subargv = None
obj.taskattr = {}
if not hasattr(obj, "_name_"):
obj._name_ = kwargs.pop("name", cls.__name__)
#obj._parser = TaskArgParser(obj)
obj._parser = cls._argparser_(obj)
obj._env = {"__builtins__": pyloco_builtins,
"__arguments__": {}}
obj._fwddefs = {}
obj._fwds = {}
obj._shrdefs = {}
#obj._rdcdefs = {}
obj._rdcs = {}
obj._logger = None
obj._verbose = False
obj._websocket_server = None
obj._websocket_client = None
obj._webserver = None
obj.tglobal = Global()
obj.parse_known_args = False
obj.unknown_args = []
return obj
def clone(self):
Task(self.parent)
def add_logger(self, logpath):
root, ext = os.path.splitext(logpath)
if ext == ".log":
self.parent.log_setup(filename=logpath)
else:
self.parent.log_setup(filename=logpath+".log")
self._logger = logging.getLogger(self.get_name())
def _log_level(self, level, *vargs, **kwargs):
logger = self._logger if self._logger else self.parent._logger
if logger:
getattr(logger, level)(*vargs, **kwargs)
def log_debug(self, *vargs, **kwargs):
self._log_level("debug", *vargs, **kwargs)
def log_info(self, *vargs, **kwargs):
self._log_level("info", *vargs, **kwargs)
def log_warn(self, *vargs, **kwargs):
self._log_level("warn", *vargs, **kwargs)
def log_warning(self, *vargs, **kwargs):
self._log_level("warning", *vargs, **kwargs)
def log_error(self, *vargs, **kwargs):
self._log_level("error", *vargs, **kwargs)
def log_critical(self, *vargs, **kwargs):
self._log_level("critical", *vargs, **kwargs)
def log_exception(self, *vargs, **kwargs):
self._log_level("exception", *vargs, **kwargs)
def get_name(self):
return self.parent.shared["parent_name"] + "." + self._name_
def get_mgrname(self):
return self.get_name().split(".")[0]
def get_proxy(self, proxycls=None, inherit_shared=False):
if proxycls is None:
proxycls = ParentProxy
proxy = proxycls(self)
if inherit_shared:
proxy.shared.update(self.parent.shared)
return proxy
def _register_check(self, dest):
if not dest:
raise UsageError("Incorrect name: %s" % dest)
if dest.startswith("_"):
raise UsageError("'Forward-name' should not start with an "
"underscore ('_'): %s" % dest)
if dest in self._fwddefs:
raise UsageError("'%s' is already registered for forwarding" %
dest)
if dest in self._shrdefs:
raise UsageError("'%s' is already registered for sharing" % dest)
#if dest in self._rdcdefs:
# raise UsageError("'%s' is already registered for reducing" % dest)
def register_forward(self, dest, type=None, help=None):
self._register_check(dest)
self._fwddefs[dest] = (type, help)
def register_shared(self, dest, type=None, help=None):
self._register_check(dest)
self._shrdefs[dest] = (type, help)
#def register_reduce(self, dest, type=None, help=None):
# self._register_check(dest)
# self._rdcdefs[dest] = (type, help)
def _add_transfer(self, defs, cont, **kwargs):
for dest, value in kwargs.items():
if dest not in defs:
raise UsageError("'%s' is not registered for data transfer." %
dest)
if type_check(value, defs[dest][0]):
cont[dest] = value
else:
if isinstance(value, str) and os.path.isfile(value):
import pdb; pdb.set_trace() # noqa: E702
else:
raise TestError("Data transfer type check failure: %s" % dest)
def add_forward(self, **kwargs):
self._add_transfer(self._fwddefs, self._fwds, **kwargs)
def add_shared(self, **kwargs):
self._add_transfer(self._shrdefs, self.parent.shared, **kwargs)
#def add_reduce(self, **kwargs):
# self._add_transfer(self._rdcdefs, self._rcds, **kwargs)
def write_pickle(self, pickler, data):
return data
def pre_perform(self, targs):
if targs.log:
self.add_logger(targs.log)
if targs.verbose:
self._verbose = True
if hasattr(targs, "assert_input") and targs.assert_input:
env = {"__builtins__": pyloco_builtins}
for k, v in self._env.items():
if not k.startswith("_"):
env[k] = v
for key, value in targs.__dict__.items():
if key == "assert_input":
continue
env[key] = value
for boolexpr in targs.assert_input:
for varg in boolexpr.vargs:
assert_result = eval(varg, env)
if assert_result:
if self._verbose:
pyloco_print('\nINPUT TEST PASSED with "%s"' %
varg)
else:
pairs = split_assert_expr(varg)
if not pairs:
raise TestError(
"\nINPUT TEST FAILED with '%s' =>"
" not True" % varg
)
elif len(pairs) == 1:
sep, (lexpr, rexpr) = pairs.popitem()
msg = (
"\nINPUT TEST(%s) is FAILED.\n "
"Left expr(%s) of '%s' is evaluated to '%s'"
" and\n right expr(%s) of '%s' "
"is evaluated to '%s'.\n"
) % (varg, lexpr, sep, eval(lexpr, env), rexpr,
sep, eval(rexpr, env))
raise TestError(msg)
else:
msg = (
"\nINPUT TEST(%s) FAILED: detected multiple"
" possibilities of this test failure\n") % varg
idx = 0
for sep, (lexpr, rexpr) in pairs.items():
idx += 1
try:
msg += (
"CASE%d:\n Left expr(%s)" " of"
" '%s' is evaluated to '%s' and\n"
" right expr(%s) of '%s' is "
"evaluated to '%s'.\n"
) % (idx, lexpr, sep, eval(lexpr, env),
rexpr, sep, eval(rexpr, env))
except Exception:
pass
raise TestError(msg)
# if targs.import_module:
# modpath = targs.import_module
# head, base = os.path.split(modpath)
# mod = None
#
# if os.path.isfile(modpath) and modpath.endswith(".py"):
# modname = base[:-3]
# mod = load_pymod(head, modname)
#
# elif (os.path.isdir(modpath) and
# os.path.isfile(os.path.join(modpath, "__init__.py"))):
# if base[-1] == os.sep:
# modname = base[:-1]
#
# else:
# modname = base
#
# mod = load_pymod(head, modname)
#
# else:
# try:
# modname = modpath
# mod = pyloco_import(modname)
#
# except ModuleNotFoundError as err:
# raise UsageError("'%s' module is not found." % modname)
# if mod:
# self._env[modname] = mod
if targs.calculate:
for calc in targs.calculate:
for expr in calc.vargs:
self._env["_"] = teval(expr, self._env)
for lhs, rhs in calc.kwargs.items():
self._env[lhs.strip()] = teval(rhs, self._env)
if targs.webapp:
appath = targs.webapp
# TODO: reuse webserver and websocket
# TODO: user-provided js can control if reuse or not through
# websocket init msg
if appath.endswith(".js"):
webapp = os.path.abspath(appath)[:-3]
elif appath.endswith(".plw"):
import pdb; pdb.set_trace() # noqa: E702
else:
webapp = os.path.abspath(appath)
here = os.path.dirname(__file__)
websocket_port = get_port()
websocket_path = os.path.join(here, "websocket.py")
webserver_port = get_port()
webserver_path = os.path.join(here, "webserver.py")
self._websocket_server = subprocess.Popen(
[sys.executable, websocket_path, str(websocket_port),
str(webserver_port)], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._webserver = subprocess.Popen(
[sys.executable, webserver_path, str(webserver_port),
str(websocket_port)] + [webapp], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
webbrowser.open("http://127.0.0.1:%d" % webserver_port)
if OS == "windows" and is_ipv6():
self._websocket_client = websocket.create_connection(
"ws://[::1]:%d/" % websocket_port)
else:
self._websocket_client = websocket.create_connection(
"ws://127.0.0.1:%d/" % websocket_port)
self._websocket_client.send("pyloco")
maxiter = 100
count = 0
while count < maxiter:
self._websocket_client.send("check_browser")
out = self._websocket_client.recv()
if out == "True":
break
time.sleep(0.1)
count += 1
def send_websocket_message(self, sender, msgtype, msg):
if self._websocket_client:
self._websocket_client.send(
json.dumps(pack_websocket_message(sender, msgtype, msg))
)
self._websocket_client.recv()
def post_perform(self, targs):
if targs.webapp:
appath = targs.webapp
wait2close = self.taskattr.get("webapp.wait2close", True)
if wait2close:
if self._websocket_server:
pyloco_print("Waiting for '%s' to be completed..." %
appath, end="")
sys.stdout.flush()
self._websocket_server.communicate(input=None)
if self._websocket_client:
self._websocket_client.close()
if self._webserver:
self._webserver.communicate(input=None)
pyloco_print("DONE.")
sys.stdout.flush()
env = dict(self._env)
env.update(self.parent.shared)
env.update(self._fwds)
lenv = {}
if targs.forward:
try:
for fwd in targs.forward:
for varg in fwd.vargs:
self._fwds[varg] = env[varg]
for dest, value in fwd.kwargs.items():
self._fwds[dest] = eval(value, env, lenv)
except Exception as err:
raise UsageError("failed on forwarding: %s" % str(err))
if targs.shared:
self._handle_sharedarg(targs.shared)
if hasattr(targs, "assert_output") and targs.assert_output:
aenv = {"__builtins__": pyloco_builtins}
for k, v in self._env.items():
if not k.startswith("_"):
aenv[k] = v
aenv.update(self.parent.shared)
aenv.update(self._fwds)
for boolexpr in targs.assert_output:
for varg in boolexpr.vargs:
assert_result = eval(varg, aenv)
if assert_result:
if self._verbose:
pyloco_print(
'\nOUTPUT TEST PASSED with "%s"' % varg
)
else:
pairs = split_assert_expr(varg)
if not pairs:
raise TestError(
"\nOUTPUT TEST FAILED with '%s' =>"
" not True" % varg
)
elif len(pairs) == 1:
sep, (lexpr, rexpr) = pairs.popitem()
msg = (
"\nOUTPUT TEST(%s) is FAILED.\n "
"Left expr(%s) of '%s' is evaluated to '%s'"
" and\n right expr(%s) of '%s' "
"is evaluated to '%s'.\n"
) % (varg, lexpr, sep, eval(lexpr, aenv), rexpr,
sep, eval(rexpr, aenv))
raise TestError(msg)
else:
msg = (
"\nOUTPUT TEST(%s) FAILED: detected multiple"
" possibilities of this test failure\n"
) % varg
idx = 0
for sep, (lexpr, rexpr) in pairs.items():
idx += 1
try:
msg += (
"CASE%d:\n Left expr(%s)" " of"
" '%s' is evaluated to '%s' and\n"
" right expr(%s) of '%s' is "
"evaluated to '%s'.\n"
) % (idx, lexpr, sep, eval(lexpr, aenv),
rexpr, sep, eval(rexpr, aenv))
except Exception:
pass
raise TestError(msg)
if targs.write_pickle:
ppf = PylocoPickle()
data = dict(self.parent.shared)
data.update(self._fwds)
data.pop("parent_name", None)
pdata = self.write_pickle(ppf, data)
ppf.dump(pdata, targs.write_pickle)
def write_pickle(self, pickler, data):
return data
def read_pickle(self, path):
import pdb; pdb.set_trace()
def _handle_sharedarg(self, shared, forwards):
try:
env = dict(self._env)
env.update(forwards)
for shr in shared:
for varg in shr.vargs:
self.parent.shared[varg] = env[varg]
for dest, value in shr.kwargs.items():
self.parent.shared[dest] = eval(value, env, {})
except Exception as err:
raise UsageError("failed on sharing variable: %s" % str(err))
def run(self, argv, subargv=None, forward=None):
"""task run function
"""
self.subargv = subargv
# attribute setting
if forward is None:
forward = {}
elif not isinstance(forward, dict):
raise InternalError("forward is not a dict type: %s" %
str(forward))
fpenv = {}
fpenv.update(forward)
fpenv.update(self.parent.shared)
if "--read-pickle" in argv:
idx = argv.index("--read-pickle")
ppath = argv.pop(idx+1)
argv.pop(idx)
ppickle = PylocoPickle()
penv = ppickle.load(ppath)
fpenv.update(penv)
# argument parsing
targs, self.unknown_args = self._parser.parse_args(argv, fpenv, parse_known_args=self.parse_known_args)
# pre perform
self.pre_perform(targs)
self.send_websocket_message("pyloco", "task", "Task '%s' is started."
% self._name_)
# perform
if hasattr(self, "_group_perform"):
retval = self._group_perform(targs)
else:
if "_pathid_" in fpenv and isinstance(fpenv["_pathid_"], int):
self._env["_pathid_"] = fpenv["_pathid_"]
retval = self.perform(targs)
if retval is None:
retval = 0
self.send_websocket_message("pyloco", "task", "Task '%s' is finished."
% self._name_)
# post perform
self.post_perform(targs)
_fwds = self._fwds
self._fwds = {}
return retval, _fwds
def perform(self, targs):
"""task perform functiion
Task should implement this function.
"""
raise NotImplementedError("'perform' method is not implemented in %s." % str(self.__class__))
def add_data_argument(self, *vargs, **kwargs):
self._parser.add_data_argument(*vargs, **kwargs)
def del_data_argument(self, name):
self._parser.del_data_argument(name)
def set_data_argument(self, *vargs, **kwargs):
self._parser.set_data_argument(*vargs, **kwargs)
def add_option_argument(self, *vargs, **kwargs):
self._parser.add_option_argument(*vargs, **kwargs)
class OptionTask(Task):
def _lines(name, title, tasks):
lines = [title]
lines.append("-"*len(title))
for task in sorted(tasks):
docs = tasks[task].__doc__
if docs:
lines.append("{0:10} : {1}".format(task,
pydoc.splitdoc(docs)[0]))
else:
lines.append("{0:10} : {0}".format(task))
return lines
def show_installed_tasks(self, tasks):
#installed_tasks = dict((n, t) for n, t in tasks.items)
return self._lines("installed tasks", tasks)
def show_standard_tasks(self):
from pyloco.stdtask import standard_tasks
return self._lines("standard tasks", standard_tasks)
def show_mgmt_tasks(self):
from pyloco.mgmttask import mgmt_tasks
return self._lines("management tasks", mgmt_tasks)
def run(self, argv, subargv=None, forward=None):
mgrname = self.parent.get_managerattr("name")
mgrver = self.parent.get_managerattr("version")
if not argv:
print(self.parent.get_managerattr("usage").format(manager=mgrname))
return 0, None
usage = self.parent.get_managerattr("usage").format(manager=mgrname)
if "--verbose" in argv:
long_desc = self.parent.get_managerattr("long_description")
list_help = self.parent.get_managerattr("list_help").format(
manager=mgrname)
epilog = self.parent.get_managerattr("epilog")
desc = long_desc + " " + list_help
parser = PylocoArgParser(mgrname, mgrver, description=desc,
usage=usage, epilog=epilog)
else:
desc = self.parent.get_managerattr("description")
parser = PylocoArgParser(mgrname, mgrver, description=desc,
usage=usage)
targs = parser.parse_args(argv)
if targs.list:
pyloco_print("")
pyloco_print("Please run '%s <task> -h' for task-specific "
"information." % mgrname)
# installed
installed_tasks = collections.OrderedDict()
default_tasks = self.parent.get_managerattr("default_tasks")
if default_tasks is not None:
for name, cls in default_tasks.items():
installed_tasks[name] = cls
for ep in pkg_resources.iter_entry_points(group='pyloco.task'):
if ep.name not in installed_tasks:
task_class = ep.load()
installed_tasks[ep.name] = task_class
pyloco_print("")
for line in self.show_installed_tasks(installed_tasks):
pyloco_print(line)
pyloco_print("")
for line in self.show_standard_tasks():
pyloco_print(line)
pyloco_print("")
for line in self.show_mgmt_tasks():
pyloco_print(line)
elif targs.verbose:
parser.print_help()
return 0, None
class RemoteTask(Task):
"""Remote task
RemoteTask downloads a remote task and runs it locally.
"""
_version_ = "0.1.0"
def run(self, argv, subargv=None, forward=None):
raise Exception("REMOTETASK")
import pdb; pdb.set_trace() # noqa: E702
class StandardTask(Task):
_installation_ = """'{name}' task is one of pyloco standard tasks.
Standard tasks are already installed when pyloco was installed."""
class ManagementTask(Task):
_installation_ = """'{name}' task is one of pyloco management tasks.
Management tasks are always available once pyloco is installed."""
| 2.015625
| 2
|
azure-iot-device/azure/iot/device/aio/__init__.py
|
olivakar/azure-iot-sdk-python
| 0
|
12521
|
<filename>azure-iot-device/azure/iot/device/aio/__init__.py<gh_stars>0
"""Azure IoT Device Library - Asynchronous
This library provides asynchronous clients for communicating with Azure IoT services
from an IoT device.
"""
from azure.iot.device.iothub.aio import *
from azure.iot.device.provisioning.aio import *
from . import patch_documentation
# Dynamically patch the clients to add shim implementations for all the inherited methods.
# This is necessary to generate accurate online docs.
# It SHOULD not impact the functionality of the methods themselves in any way.
# NOTE In the event of addition of new methods and generation of accurate documentation
# for those methods we have to append content to "patch_documentation.py" file.
# In order to do so please uncomment the "patch.add_shims" lines below,
# enable logging with level "DEBUG" in a python terminal and do
# "import azure.iot.device". The delta between the newly generated output
# and the existing content of "patch_documentation.py" should be appended to
# the function "execute_patch_for_sync" in "patch_documentation.py".
# Once done please again omment out the "patch.add_shims" lines below.
# patch.add_shims_for_inherited_methods(IoTHubDeviceClient) # noqa: F405
# patch.add_shims_for_inherited_methods(IoTHubModuleClient) # noqa: F405
# patch.add_shims_for_inherited_methods(ProvisioningDeviceClient) # noqa: F405
patch_documentation.execute_patch_for_async()
| 1.632813
| 2
|
lino_xl/lib/reception/__init__.py
|
khchine5/xl
| 1
|
12522
|
<reponame>khchine5/xl<gh_stars>1-10
# -*- coding: UTF-8 -*-
# Copyright 2013-2016 <NAME>
#
# License: BSD (see file COPYING for details)
"""This module is for managing a reception desk and a waiting queue:
register clients into a waiting queue as they present themselves at a
reception desk (Empfangsschalter), and unregister them when they leave
again.
It depends on :mod:`lino_xl.lib.cal`. It does not add any model, but
adds some workflow states, actions and tables.
Extended by :mod:`lino_welfare.modlib.reception`.
.. autosummary::
:toctree:
models
workflows
"""
from lino.api import ad, _
class Plugin(ad.Plugin):
"See :class:`lino.core.Plugin`."
verbose_name = _("Reception")
needs_plugins = ['lino.modlib.system', 'lino_xl.lib.cal']
required_user_groups = 'reception'
"""The required user groups for viewing actors of this plugin.
This is overridden by Lino Welfare to include "coaching".
This way of configuring permissions is an example for why it would
be useful to replace user groups by a UserType class (and to
populate UserTypes with subclasses of it).
"""
def setup_main_menu(config, site, user_type, m):
app = site.plugins.reception
m = m.add_menu(app.app_name, app.verbose_name)
m.add_action('cal.EntriesByDay')
m.add_action('reception.WaitingVisitors')
m.add_action('reception.BusyVisitors')
m.add_action('reception.GoneVisitors')
# MyWaitingVisitors is maybe not needed as a menu entry since it
# is also a get_dashboard_items. if i remove it then i must edit
# `pcsw_tests.py`. Waiting for user feedback before doing this.
m.add_action('reception.MyWaitingVisitors')
| 2.21875
| 2
|
old/accent_analyser/rules/RuleRemoveThe.py
|
stefantaubert/eng2ipa-accent-transformer
| 0
|
12523
|
from accent_analyser.rules.EngRule import EngRule
class RuleRemoveThe(EngRule):
def __init__(self, likelihood=1.0):
super().__init__(likelihood)
def _convert_core(self, words: list, current_index: int):
word = words[current_index].content
if word == "the":
return ""
else:
return word
| 2.75
| 3
|
ssrl/providers/base.py
|
AspirinGeyer/PySSRL
| 6
|
12524
|
<reponame>AspirinGeyer/PySSRL
# -*- coding:utf-8 -*-
class BaseProvider(object):
@staticmethod
def loads(link_url):
raise NotImplementedError("Implemetion required.")
@staticmethod
def dumps(conf):
raise NotImplementedError("Implemetion required.")
| 1.96875
| 2
|
tornado_demo/web2py/applications/examples/controllers/global.py
|
ls-2018/tips
| 2
|
12525
|
session.forget()
def get(args):
if args[0].startswith('__'):
return None
try:
obj = globals(), get(args[0])
for k in range(1, len(args)):
obj = getattr(obj, args[k])
return obj
except:
return None
def vars():
"""the running controller function!"""
title = '.'.join(request.args)
attributes = {}
if not request.args:
(doc, keys, t, c, d, value) = ('Global variables', globals(), None, None, [], None)
elif len(request.args) < 3:
obj = get(request.args)
if obj:
doc = getattr(obj, '__doc__', 'no documentation')
keys = dir(obj)
t = type(obj)
c = getattr(obj, '__class__', None)
d = getattr(obj, '__bases__', None)
for key in keys:
a = getattr(obj, key, None)
if a and not isinstance(a, DAL):
doc1 = getattr(a, '__doc__', '')
t1 = type(a)
c1 = getattr(a, '__class__', None)
d1 = getattr(a, '__bases__', None)
key = '.'.join(request.args) + '.' + key
attributes[key] = (doc1, t1, c1, d1)
else:
doc = 'Unkown'
keys = []
t = c = d = None
else:
raise HTTP(400)
return dict(
title=title,
args=request.args,
t=t,
c=c,
d=d,
doc=doc,
attributes=attributes,
)
| 2.171875
| 2
|
medium/python3/c0108_223_rectangle-area/00_leetcode_0108.py
|
drunkwater/leetcode
| 0
|
12526
|
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#223. Rectangle Area
#Find the total area covered by two rectilinear rectangles in a 2D plane.
#Each rectangle is defined by its bottom left corner and top right corner as shown in the figure.
#Assume that the total area is never beyond the maximum possible value of int.
#Credits:
#Special thanks to @mithmatt for adding this problem, creating the above image and all test cases.
#class Solution:
# def computeArea(self, A, B, C, D, E, F, G, H):
# """
# :type A: int
# :type B: int
# :type C: int
# :type D: int
# :type E: int
# :type F: int
# :type G: int
# :type H: int
# :rtype: int
# """
# Time Is Money
| 3.40625
| 3
|
serialTest.py
|
fmuno003/SeniorDesign
| 1
|
12527
|
import serial
import RPi.GPIO as GPIO
import time
ser=serial.Serial("/dev/ttyACM0",9600)
start_time = time.time()
imu = open("IMU.txt","w")
while time.time() - start_time <= 1:
ser.readline()
while time.time() - start_time <= 8:
read_ser=ser.readline()
if float(read_ser) == 0.00:
pass
else:
read = read_ser.strip('\n')
imu.write(read)
imu.write('\n')
imu.close()
| 2.796875
| 3
|
src/pyg_base/_zip.py
|
nclarey/pyg-base
| 0
|
12528
|
from pyg_base._types import is_iterable
from pyg_base._loop import len0
__all__ = ['zipper', 'lens']
def lens(*values):
"""
measures (and enforces) a common length across all values
:Parameters:
----------------
*values : lists
Raises
------
ValueError
if you have values with multi lengths.
:Returns:
-------
int
common length.
:Example:
--------------
>>> assert lens() == 0
>>> assert lens([1,2,3], [2,4,5]) == 3
>>> assert lens([1,2,3], [2,4,5], [6]) == 3
"""
if len0(values) == 0:
return 0
all_lens = [len0(value) for value in values]
lens = set(all_lens) - {1}
if len(lens)>1:
raise ValueError('found multiple lengths %s '%lens)
return list(lens)[0] if lens else 1
def zipper(*values):
"""
a safer version of zip
:Examples: zipper works with single values as well as full list:
---------------
>>> assert list(zipper([1,2,3], 4)) == [(1, 4), (2, 4), (3, 4)]
>>> assert list(zipper([1,2,3], [4,5,6])) == [(1, 4), (2, 5), (3, 6)]
>>> assert list(zipper([1,2,3], [4,5,6], [7])) == [(1, 4, 7), (2, 5, 7), (3, 6, 7)]
>>> assert list(zipper([1,2,3], [4,5,6], None)) == [(1, 4, None), (2, 5, None), (3, 6, None)]
>>> assert list(zipper((1,2,3), np.array([4,5,6]), None)) == [(1, 4, None), (2, 5, None), (3, 6, None)]
:Examples: zipper rejects multi-length lists
---------------
>>> import pytest
>>> with pytest.raises(ValueError):
>>> zipper([1,2,3], [4,5])
:Parameters:
----------------
*values : lists
values to be zipped
:Returns:
-------
zipped values
"""
values = [list(value) if isinstance(value, zip) else value if is_iterable(value) else [value] for value in values]
n = lens(*values)
values = [value * n if len(value) == 1 else value for value in values]
return zip(*values)
| 3.765625
| 4
|
cli/pawls/preprocessors/grobid.py
|
vtcaregorodtcev/pawls-1
| 0
|
12529
|
<reponame>vtcaregorodtcev/pawls-1
import json
from typing import List
import requests
from pawls.preprocessors.model import Page
def fetch_grobid_structure(pdf_file: str, grobid_host: str = "http://localhost:8070"):
files = {
"input": (pdf_file, open(pdf_file, "rb"), "application/pdf", {"Expires": "0"})
}
url = "{}/api/processPdfStructure".format(grobid_host)
resp = requests.post(url, files=files)
if resp.status_code == 200:
return json.loads(resp.text)
else:
raise Exception("Grobid returned status code {}".format(resp.status_code))
def parse_annotations(grobid_structure) -> List[Page]:
pages = []
for grobid_page in grobid_structure["tokens"]["pages"]:
tokens = []
for token in grobid_page["tokens"]:
tokens.append(
dict(
text=token["text"],
x=token["x"],
y=token["y"],
width=token["width"],
height=token["height"],
)
)
page = dict(
page=dict(
width=grobid_page["page"]["width"],
height=grobid_page["page"]["height"],
index=grobid_page["page"]["pageNumber"] - 1,
),
tokens=tokens,
)
pages.append(page)
return pages
def process_grobid(
pdf_file: str,
grobid_host: str = "http://localhost:8070"
):
"""
Integration for importing annotations from grobid.
Depends on a grobid API built from our fork https://github.com/allenai/grobid.
Fetches a PDF by sha, sends it to the Grobid API and returns them.
pdf_file: str
The path to the pdf file to process.
grobid_host: str (optional, default="http://localhost:8070")
The forked grobid API which we use to produce the annotations.
"""
grobid_structure = fetch_grobid_structure(pdf_file, grobid_host)
annotations = parse_annotations(grobid_structure)
return annotations
| 2.65625
| 3
|
scripts/run_custom_eslint_tests.py
|
lheureuxe13/oppia
| 4
|
12530
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for running tests for custom eslint checks."""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
import subprocess
import sys
from core import python_utils
from scripts import common
def main():
"""Run the tests."""
node_path = os.path.join(common.NODE_PATH, 'bin', 'node')
nyc_path = os.path.join('node_modules', 'nyc', 'bin', 'nyc.js')
mocha_path = os.path.join('node_modules', 'mocha', 'bin', 'mocha')
filepath = 'scripts/linters/custom_eslint_checks/rules/'
proc_args = [node_path, nyc_path, mocha_path, filepath]
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
encoded_tests_stdout, encoded_tests_stderr = proc.communicate()
# Standard and error output is in bytes, we need to decode the line to
# print it.
tests_stdout = encoded_tests_stdout.decode('utf-8')
tests_stderr = encoded_tests_stderr.decode('utf-8')
if tests_stderr:
python_utils.PRINT(tests_stderr)
sys.exit(1)
python_utils.PRINT(tests_stdout)
if 'failing' in tests_stdout:
python_utils.PRINT('---------------------------')
python_utils.PRINT('Tests not passed')
python_utils.PRINT('---------------------------')
sys.exit(1)
else:
python_utils.PRINT('---------------------------')
python_utils.PRINT('All tests passed')
python_utils.PRINT('---------------------------')
coverage_result = re.search = re.search(
r'All files\s*\|\s*(?P<stmts>\S+)\s*\|\s*(?P<branch>\S+)\s*\|\s*'
r'(?P<funcs>\S+)\s*\|\s*(?P<lines>\S+)\s*\|\s*', tests_stdout)
if (coverage_result.group('stmts') != '100' or
coverage_result.group('branch') != '100' or
coverage_result.group('funcs') != '100' or
coverage_result.group('lines') != '100'):
raise Exception('Eslint test coverage is not 100%')
if __name__ == '__main__':
main()
| 2.359375
| 2
|
nmpc_mhe/tst_algorithmsv2_nmpc_hi_t0115_setp.py
|
joycezyu/cappresse
| 0
|
12531
|
from __future__ import print_function
from pyomo.environ import *
from pyomo.core.base import Constraint, Objective, Suffix, minimize
from pyomo.opt import ProblemFormat, SolverFactory
from nmpc_mhe.dync.NMPCGenv2 import NmpcGen
from nmpc_mhe.mods.bfb.nob5_hi_t import bfb_dae
from snap_shot import snap
import sys, os
import itertools, sys
from numpy.random import normal as npm
# SWITCH TO JUST ONE COLLOCATION POINT AND FINITE ELEMENT
states = ["Hgc", "Nsc", "Hsc", "Hge", "Nse", "Hse"]
# x_noisy = ["Ngb", "Hgb", "Ngc", "Hgc", "Nsc", "Hsc", "Nge", "Hge", "Nse", "Hse", "mom"]
# x_noisy = ["Hse"]
x_noisy = ["Hgc", "Nsc", "Hsc", "Hge", "Nse", "Hse"]
u = ["u1"]
u_bounds = {"u1":(162.183495794 * 0.0005, 162.183495794 * 10000)}
ref_state = {("c_capture", ((),)): 0.63}
# Known targets 0.38, 0.4, 0.5
nfe_mhe = 10
y = ["Tgb", "vg"]
nfet = 10
ncpx = 3
nfex = 5
tfe = [i for i in range(1, nfe_mhe + 1)]
lfe = [i for i in range(1, nfex + 1)]
lcp = [i for i in range(1, ncpx + 1)]
lc = ['c', 'h', 'n']
y_vars = {
"Tgb": [i for i in itertools.product(lfe, lcp)],
"vg": [i for i in itertools.product(lfe, lcp)]
}
# x_vars = dict()
x_vars = {
# "Nge": [i for i in itertools.product(lfe, lcp, lc)],
# "Hge": [i for i in itertools.product(lfe, lcp)],
"Nsc": [i for i in itertools.product(lfe, lcp, lc)],
"Hsc": [i for i in itertools.product(lfe, lcp)],
"Nse": [i for i in itertools.product(lfe, lcp, lc)],
"Hse": [i for i in itertools.product(lfe, lcp)],
"Hgc": [i for i in itertools.product(lfe, lcp)],
"Hge": [i for i in itertools.product(lfe, lcp)],
# "mom": [i for i in itertools.product(lfe, lcp)]
}
# States -- (5 * 3 + 6) * fe_x * cp_x.
# For fe_x = 5 and cp_x = 3 we will have 315 differential-states.
e = NmpcGen(bfb_dae, 400/nfe_mhe, states, u,
ref_state=ref_state, u_bounds=u_bounds,
nfe_tnmpc=nfe_mhe, ncp_tnmpc=1,
nfe_t=5, ncp_t=1)
# 10 fe & _t=1000 definitely degenerate
# 10 fe & _t=900 definitely degenerate
# 10 fe & _t=120 sort-of degenerate
# 10 fe & _t=50 sort-of degenerate
# 10 fe & _t=50 eventually sort-of degenerate
# 10 fe & _t=1 eventually sort-of degenerate
e.SteadyRef.dref = snap
e.load_iguess_steady()
e.SteadyRef.create_bounds()
e.solve_steady_ref()
e.SteadyRef.report_zL(filename="mult_ss")
e.load_d_s(e.PlantSample)
e.PlantSample.create_bounds()
e.solve_dyn(e.PlantSample)
q_cov = {}
for i in tfe:
for j in itertools.product(lfe, lcp, lc):
q_cov[("Nse", j), ("Nse", j), i] = 7525.81478168 * 0.005
q_cov[("Nsc", j), ("Nsc", j), i] = 117.650089456 * 0.005
# q_cov[("Nse", j), ("Nse", j), i] = 735.706082714 * 0.005
for i in tfe:
for j in itertools.product(lfe, lcp):
# q_cov[("Hge", j), ("Hge", j), i] = 2194.25390583 * 0.005
q_cov[("Hse", j), ("Hse", j), i] = 731143.716603 * 0.005
q_cov[("Hsc", j), ("Hsc", j), i] = 16668.3312216 * 0.005
q_cov[("Hge", j), ("Hge", j), i] = 2166.86838591 * 0.005
q_cov[("Hgc", j), ("Hgc", j), i] = 47.7911012193 * 0.005
# q_cov[("mom", j), ("mom", j), i] = 1.14042251669 * 0.005
# for i in lfe:
# for j in [(1,1, 'c'), (5,3, 'c')]:
# m_cov[("yb", j), ("yb", j), i] = 1e-04
u_cov = {}
for i in [i for i in range(1, nfe_mhe+1)]:
u_cov["u1", i] = 162.183495794 * 0.005
m_cov = {}
for i in tfe:
for j in itertools.product(lfe, lcp):
m_cov[("Tgb", j), ("Tgb", j), i] = 40 * 0.005
m_cov[("vg", j), ("vg", j), i] = 0.902649386907 * 0.005
e.find_target_ss() #: Compute target-steady state (beforehand)
#: Create NMPC
e.create_nmpc()
e.update_targets_nmpc()
e.compute_QR_nmpc(n=-1)
e.new_weights_olnmpc(10000, 1e+08)
e.solve_dyn(e.PlantSample, stop_if_nopt=True)
ipsr = SolverFactory('ipopt', executable="/home/dav0/Apps/IpoptSR/Ipopt/build/bin/ipoptSR")
ref_state = {("c_capture", ((),)): 0.50}
e.find_target_ss(ref_state=ref_state) #: Compute target-steady state (beforehand)
e.update_targets_nmpc()
e.compute_QR_nmpc(n=-1)
e.new_weights_olnmpc(10000, 1e+08)
for i in range(1, 1000):
ps = e.solve_dyn(e.PlantSample, stop_if_nopt=False)
e.PlantSample.write_nl(name="baddie.nl")
e.PlantSample.pprint(filename="baddie.txt")
e.PlantSample.snap_shot(filename="baddie.py")
e.PlantSample.report_zL(filename="bad_bounds")
if ps != 0:
e.PlantSample.write_nl(name="baddie.nl")
e.PlantSample.pprint(filename="baddie.txt")
e.PlantSample.snap_shot(filename="baddie.py")
e.PlantSample.report_zL(filename="bad_bounds")
e.solve_dyn(e.PlantSample, stop_if_nopt=True)
e.update_state_real() # update the current state
e.update_soi_sp_nmpc()
#
e.initialize_olnmpc(e.PlantSample, "real")
e.load_init_state_nmpc(src_kind="state_dict", state_dict="real")
stat_nmpc = e.solve_dyn(e.olnmpc, skip_update=False, max_cpu_time=300)
# if stat_nmpc != 0:
# stat_nmpc = e.solve_dyn(e.olnmpc,
# stop_if_nopt=True,
# skip_update=False,
# iter_max=300, ma57_pivtol=1e-12)
if stat_nmpc != 0:
strategy = 1
if strategy == 1:
if e.nfe_tnmpc == 1:
pass
else:
e.create_nmpc(newnfe=e.ncp_tnmpc-1, newncp=1)
e.update_targets_nmpc()
e.compute_QR_nmpc(n=-1)
e.new_weights_olnmpc(10000, 1e+02)
e.initialize_olnmpc(e.PlantSample, "real")
e.load_init_state_nmpc(src_kind="state_dict", state_dict="real")
stat_nmpc = e.solve_dyn(e.olnmpc, skip_update=False, max_cpu_time=300)
else:
e.olnmpc.write_nl(name="bad.nl")
# e.olnmpc.pprint(filename="bad_" + str(i))
with open("ipopt.opt", "w") as f:
f.write("linear_solver ma57\n"
"ma57_dep_tol 1e-8\nbig_M 1e30\n")
f.close()
ipsr.solve(e.olnmpc, tee=True)
e.update_u(e.olnmpc)
e.print_r_nmpc()
e.cycleSamPlant(plant_step=True)
e.plant_uinject(e.PlantSample, src_kind="dict", nsteps=10, skip_homotopy=True)
# e.plant_input_gen(e.PlantSample, "mod", src=e.ss2)
| 1.8125
| 2
|
csvapi/security.py
|
quaxsze/csvapi
| 15
|
12532
|
from urllib.parse import urlparse
from quart import current_app as app, request, jsonify
def filter_referrers():
filters = app.config.get('REFERRERS_FILTER')
if not filters:
return None
referrer = request.referrer
if referrer:
parsed = urlparse(referrer)
for filter in filters:
if parsed.hostname.endswith(filter):
return None
return jsonify({
'ok': False,
'error': 'Unauthorized',
}), 403
| 2.515625
| 3
|
linkit/models.py
|
what-digital/linkit
| 8
|
12533
|
from django.db import models
from filer.fields.file import FilerFileField
class FakeLink(models.Model):
"""
In our widget we need to manually render a AdminFileFormField. Basically for every other Field type this is not
a problem at all, but Failer needs a rel attribute which consists of a reverse relationship. We fake it
with this model.
"""
fake_file = FilerFileField(blank=True, null=True, on_delete=models.CASCADE)
| 2.171875
| 2
|
tests/helper.py
|
nirs/python-manhole
| 0
|
12534
|
from __future__ import print_function
import atexit
import errno
import logging
import os
import select
import signal
import sys
import time
from process_tests import setup_coverage
TIMEOUT = int(os.getenv('MANHOLE_TEST_TIMEOUT', 10))
SOCKET_PATH = '/tmp/manhole-socket'
OUTPUT = sys.__stdout__
def handle_sigterm(signo, _frame):
# Simulate real termination
print("Terminated", file=OUTPUT)
sys.exit(128 + signo)
# Handling sigterm ensure that atexit functions are called, and we do not leave
# leftover /tmp/manhole-pid sockets.
signal.signal(signal.SIGTERM, handle_sigterm)
@atexit.register
def log_exit():
print("In atexit handler.", file=OUTPUT)
def setup_greenthreads(patch_threads=False):
try:
from gevent import monkey
monkey.patch_all(thread=False)
except (ImportError, SyntaxError):
pass
try:
import eventlet
eventlet.monkey_patch(thread=False)
except (ImportError, SyntaxError):
pass
def do_fork():
pid = os.fork()
if pid:
@atexit.register
def cleanup():
try:
os.kill(pid, signal.SIGINT)
time.sleep(0.2)
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
os.waitpid(pid, 0)
else:
time.sleep(TIMEOUT * 10)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='[pid=%(process)d - %(asctime)s]: %(name)s - %(levelname)s - %(message)s',
)
test_name = sys.argv[1]
try:
setup_coverage()
if os.getenv('PATCH_THREAD', False):
import manhole
setup_greenthreads(True)
else:
setup_greenthreads(True)
import manhole
if test_name == 'test_activate_on_usr2':
manhole.install(activate_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_install_once':
manhole.install()
try:
manhole.install()
except manhole.AlreadyInstalled:
print('ALREADY_INSTALLED')
else:
raise AssertionError("Did not raise AlreadyInstalled")
elif test_name == 'test_stderr_doesnt_deadlock':
import subprocess
manhole.install()
for i in range(50):
print('running iteration', i)
p = subprocess.Popen(['true'])
print('waiting for process', p.pid)
p.wait()
print('process ended')
path = '/tmp/manhole-%d' % p.pid
if os.path.exists(path):
os.unlink(path)
raise AssertionError(path + ' exists !')
print('SUCCESS')
elif test_name == 'test_fork_exec':
manhole.install(reinstall_delay=5)
print("Installed.")
time.sleep(0.2)
pid = os.fork()
print("Forked, pid =", pid)
if pid:
os.waitpid(pid, 0)
path = '/tmp/manhole-%d' % pid
if os.path.exists(path):
os.unlink(path)
raise AssertionError(path + ' exists !')
else:
try:
time.sleep(1)
print("Exec-ing `true`")
os.execvp('true', ['true'])
finally:
os._exit(1)
print('SUCCESS')
elif test_name == 'test_activate_on_with_oneshot_on':
manhole.install(activate_on='USR2', oneshot_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_interrupt_on_accept':
def handle_usr2(_sig, _frame):
print('Got USR2')
signal.signal(signal.SIGUSR2, handle_usr2)
import ctypes
import ctypes.util
libpthread_path = ctypes.util.find_library("pthread")
if not libpthread_path:
raise ImportError
libpthread = ctypes.CDLL(libpthread_path)
if not hasattr(libpthread, "pthread_setname_np"):
raise ImportError
pthread_kill = libpthread.pthread_kill
pthread_kill.argtypes = [ctypes.c_void_p, ctypes.c_int]
pthread_kill.restype = ctypes.c_int
manhole.install(sigmask=None)
for i in range(15):
time.sleep(0.1)
print("Sending signal to manhole thread ...")
pthread_kill(manhole._INST.ident, signal.SIGUSR2)
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_oneshot_on_usr2':
manhole.install(oneshot_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name.startswith('test_signalfd_weirdness'):
if 'negative' in test_name:
manhole.install(sigmask=None)
else:
manhole.install(sigmask=[signal.SIGCHLD])
time.sleep(0.3) # give the manhole a bit enough time to start
print('Starting ...')
import signalfd
signalfd.sigprocmask(signalfd.SIG_BLOCK, [signal.SIGCHLD])
fd = signalfd.signalfd(0, [signal.SIGCHLD], signalfd.SFD_NONBLOCK|signalfd.SFD_CLOEXEC)
for i in range(200):
print('Forking %s:' % i)
pid = os.fork()
print(' - [%s/%s] forked' % (i, pid))
if pid:
while 1:
print(' - [%s/%s] selecting on: %s' % (i, pid, [fd]))
read_ready, _, errors = select.select([fd], [], [fd], 1)
if read_ready:
try:
print(' - [%s/%s] reading from signalfd ...' % (i, pid))
print(' - [%s] read from signalfd: %r ' % (i, os.read(fd, 128)))
break
except OSError as exc:
print(' - [%s/%s] reading from signalfd failed with errno %s' % (i, pid, exc.errno))
else:
print(' - [%s/%s] reading from signalfd failed - not ready !' % (i, pid))
if 'negative' in test_name:
time.sleep(1)
if errors:
raise RuntimeError("fd has error")
else:
print(' - [%s/%s] exiting' % (i, pid))
os._exit(0)
time.sleep(TIMEOUT * 10)
elif test_name == 'test_auth_fail':
manhole.get_peercred = lambda _: (-1, -1, -1)
manhole.install()
time.sleep(TIMEOUT * 10)
elif test_name == 'test_socket_path':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT * 10)
elif test_name == 'test_daemon_connection':
manhole.install(daemon_connection=True)
time.sleep(TIMEOUT)
elif test_name == 'test_socket_path_with_fork':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT)
do_fork()
elif test_name == 'test_locals':
manhole.install(socket_path=SOCKET_PATH,
locals={'k1': 'v1', 'k2': 'v2'})
time.sleep(TIMEOUT)
elif test_name == 'test_locals_after_fork':
manhole.install(locals={'k1': 'v1', 'k2': 'v2'})
do_fork()
elif test_name == 'test_redirect_stderr_default':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT)
elif test_name == 'test_redirect_stderr_disabled':
manhole.install(socket_path=SOCKET_PATH, redirect_stderr=False)
time.sleep(TIMEOUT)
elif test_name == 'test_sigmask':
manhole.install(socket_path=SOCKET_PATH, sigmask=[signal.SIGUSR1])
time.sleep(TIMEOUT)
else:
manhole.install()
time.sleep(0.3) # give the manhole a bit enough time to start
if test_name == 'test_simple':
time.sleep(TIMEOUT * 10)
elif test_name == 'test_with_forkpty':
time.sleep(1)
pid, masterfd = os.forkpty()
if pid:
@atexit.register
def cleanup():
try:
os.kill(pid, signal.SIGINT)
time.sleep(0.2)
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
while not os.waitpid(pid, os.WNOHANG)[0]:
try:
os.write(2, os.read(masterfd, 1024))
except OSError as e:
print("Error while reading from masterfd:", e)
else:
time.sleep(TIMEOUT * 10)
elif test_name == 'test_with_fork':
time.sleep(1)
do_fork()
else:
raise RuntimeError('Invalid test spec.')
except: # pylint: disable=W0702
print('Died with %s.' % sys.exc_info()[0].__name__, file=OUTPUT)
import traceback
traceback.print_exc(file=OUTPUT)
print('DIED.', file=OUTPUT)
| 1.953125
| 2
|
ecommerce/User/admin.py
|
AwaleRohin/commerce-fm
| 18
|
12535
|
from django.contrib import admin
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from . import models
if settings.HAS_ADDITIONAL_USER_DATA:
try:
class UserProfileInline(admin.TabularInline):
model = models.UserProfile
extra = 0
except (Exception, KeyError) as e:
raise ImproperlyConfigured("User/admin.py:: Multi Vendor is turned on.")
class UserAdmin(admin.ModelAdmin):
list_display = ['get_full_name', 'email', 'is_verified']
search_fields = ['get_full_name', 'email', 'date_joined', 'username']
list_filter = ('groups',)
if settings.HAS_ADDITIONAL_USER_DATA:
inlines = [ UserProfileInline, ]
def save_model(self, request, obj, form, change):
if 'password' in form.changed_data:
obj.set_password(request.POST['password'])
obj.save()
admin.site.register(models.User, UserAdmin)
admin.site.register(models.IpAddress)
admin.site.register(models.CityFromIpAddress)
admin.site.register(models.Marketing)
| 2.21875
| 2
|
client/external/xp_tracker.py
|
Suirdna/OR-Origin
| 0
|
12536
|
from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer
from client.config import config as c, language as l
from discord.ext import commands, tasks
from client.external.hiscores import hiscores_xp
from PIL import Image, ImageDraw, ImageFont
import discord, locale
class xp_tracker(commands.Cog):
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
name = 'xp_tracker'
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
@staticmethod
async def fun_xptracker(ctx):
try:
path = c.GUILD_PATH['special_member.json'].format(ctx.guild.id)
guild_l = await origin.get_language(ctx.guild.id)
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['organizer']]
if await permissions.get_user_permission(path, target_keys, target_values) or ctx.author.id == ctx.guild.owner.id or ctx.author.id == c.CLIENT_ADMINISTRATION_ID:
if ctx.message.content == '.xptracker':
path3 = c.ORIGIN_PATH['embed.tracker.json']
json_string = await json_manager.get_json(path3)
new_json_string = {'data': []}
for key, value in json_string[guild_l]['xp_tracker']['tracker'].items():
if int(key) == 1:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): value['value']
})
else:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): value['value']
})
await embed_creator.create_embed(ctx, discord.Color.dark_green(), False, ctx.guild.icon_url, c.CLIENT_ICON, l.xp_tracker[guild_l]['embed_1'].format(ctx.guild.name), new_json_string['data'], False)
else:
await ctx.author.send(l.user_permissions[guild_l]['msg_restricted_1'])
except Exception as error:
await exception.error(error)
async def fun_addxpevent(self, ctx):
try:
path1 = c.GUILD_PATH['special_member.json'].format(ctx.guild.id)
guild_l = await origin.get_language(ctx.guild.id)
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['organizer']]
if await permissions.get_user_permission(path1, target_keys, target_values) or ctx.author.id == ctx.guild.owner.id or ctx.author.id == c.CLIENT_ADMINISTRATION_ID:
STRING = str(ctx.message.content).split(' ')
if len(STRING) >= 9:
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
server_config = await json_manager.get_json(path)
LIST1 = self.PRE
LIST2 = self.NAME
LIST3 = self.ICON
DATA1 = await json_manager.get_data(path2)
ID = await origin.randomize()
STATUS = True
STATUS2 = False
while STATUS:
for data in DATA1:
if data['id'] == ID:
STATUS2 = True
if not STATUS2:
STATUS = False
else:
ID = await origin.randomize()
EXTRA = ''
NAME = ''
for value in LIST2:
if str(value).lower() == STRING[2].lower():
NAME = str(value)
for index, event in enumerate(LIST1):
if STRING[2] == event:
RUSH = None
if STRING[1].isdigit() and int(STRING[1]) > 1:
RUSH = l.xp_tracker[guild_l]['configuration']['rush_point'].format(locale.format_string('%d', int(STRING[1]), grouping=True))
path4 = c.ORIGIN_PATH['embed.tracker.json']
DESCRIPTION = l.xp_tracker[guild_l]['description_1'].format(
ctx.author.mention,
STRING[4], STRING[6], NAME, STRING[5] if not RUSH else l.xp_tracker[guild_l]['extra_4'], STRING[7] if not RUSH else l.xp_tracker[guild_l]['extra_4'], RUSH if RUSH else ''
)
if len(STRING) >= 8:
for value in STRING[8:]:
EXTRA += '{} '.format(value)
json_string = await json_manager.get_json(path4)
new_json_string = {'data': []}
for key, value in json_string[guild_l]['xp_tracker']['addevent'].items():
if int(key) == 1:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): str(value['value']).format(EXTRA)
})
if int(key) == 2:
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): value['value']
})
if STRING[1].isdigit():
mode_type = 0
if int(STRING[1]) == c.EVENT_MODE[0]:
mode_type = 1
elif int(STRING[1]) >= c.EVENT_MODE[1]:
mode_type = 2
EVENT_CHANNEL = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['events'])
embed = await embed_creator.create_embed(ctx, discord.Color.dark_green(), False, ctx.guild.icon_url, LIST3[index], l.xp_tracker[guild_l]['embed_2'].format(ctx.guild.name), new_json_string['data'], False, False, EVENT_CHANNEL, DESCRIPTION)
json_string = {'id': ID, 'user_id': ctx.author.id, 'message_id': embed.id, 'event_name': STRING[2], 'xp_target': int(STRING[1]), 'prize_count': int(STRING[3]), 'date_start': STRING[4], 'date_end': STRING[5], 'time_start': int(STRING[6]), 'time_end': int(STRING[7]), 'participants': 0, 'status': 0, 'type': mode_type, 'win_message': 0}
await json_manager.create(path2, json_string)
await ctx.author.send(l.xp_tracker[guild_l]['msg_success_1'])
CHANNEL1 = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['chat0'])
CHANNEL2 = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['chat1'])
if CHANNEL1:
await CHANNEL1.send(l.xp_tracker[guild_l]['msg_post_1'].format(NAME, server_config['events']))
if CHANNEL2:
await CHANNEL2.send(l.xp_tracker[guild_l]['msg_post_1'].format(NAME, server_config['events']))
else:
await ctx.author.send(l.xp_tracker[guild_l]['msg_badformat_1'])
else:
await ctx.author.send(l.xp_tracker[guild_l]['msg_badformat_1'])
else:
await ctx.author.send(l.user_permissions[guild_l]['msg_restricted_1'])
except Exception as error:
await exception.error(error)
@staticmethod
async def fun_removeallxp(ctx, system=None):
try:
guild_l = await origin.get_language(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
path1 = c.GUILD_PATH['special_member.json'].format(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
path2 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
path3 = c.GUILD_PATH['event.json'].format(ctx.guild.id if hasattr(ctx, 'guild') else ctx)
LIST1 = await json_manager.get_data(path3)
NEW_LIST1 = {'data': []}
NEW_LIST2 = {'data': []}
if hasattr(ctx, 'guild'):
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['organizer']]
if await permissions.get_user_permission(path1, target_keys, target_values) or ctx.author.id == ctx.guild.owner.id or ctx.author.id == c.CLIENT_ADMINISTRATION_ID:
for data in LIST1:
if data['type'] == 0 and data['status'] == 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 3 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 4 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
await json_manager.clear_and_update(path2, NEW_LIST1)
await json_manager.clear_and_update(path3, NEW_LIST2)
await ctx.author.send(l.xp_tracker[guild_l]['msg_success_2'])
else:
await ctx.author.send(l.user_permissions[guild_l]['msg_restricted_1'])
elif system == 1:
if LIST1:
for data in LIST1:
if data['type'] == 0 and data['status'] == 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 3 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
elif data['type'] == 4 and data['status'] >= 0:
NEW_LIST2['data'].append(data)
await json_manager.clear_and_update(path2, NEW_LIST1)
await json_manager.clear_and_update(path3, NEW_LIST2)
except Exception as error:
await exception.error(error)
async def fun_axp(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
STRING = str(ctx.message.content).split(' ')
if len(STRING) >= 2:
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
path1 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
server_config = await json_manager.get_json(path)
LIST1 = await json_manager.get_data(path1)
LIST2 = await json_manager.get_data(path2)
CHECK = True
user = self.client.get_user(ctx.author.id)
STATUS1 = False
STATUS2 = False
EVENT_NAME = []
EVENT_LIST_DATA = []
SAFE_CHECK = 0
userName = ''
for name in STRING[1:]:
userName += '{} '.format(name)
userName = userName.replace('_', ' ')
userName = userName.rstrip()
for value in LIST1:
if value['user_id'] == ctx.author.id or value['user_rsn'] == userName:
STATUS1 = True
if not STATUS1:
for value2 in LIST2:
if value2['type'] == 1 or value2['type'] == 2:
STATUS2 = True
EVENT_NAME.append(value2['event_name'])
SUM = value2['participants'] + 1
EVENT_LIST_DATA.append({'id': value2['id'], 'type': value2['type'], 'sum': SUM})
if STATUS2:
while CHECK:
USERNAME = userName.replace(' ', '%20')
USER = hiscores_xp.Hiscores(USERNAME, 'N')
USERNAME = USERNAME.replace('%20', ' ')
if USER.status != 404:
if hasattr(USER, 'stats'):
CHECK = False
json_string = {'user_id': ctx.author.id, 'user_username': ctx.author.mention, 'user_rsn': userName}
for value in EVENT_NAME:
json_string.update({value: USER.stats[value]['experience']})
json_string.update({'{}_current'.format(value): USER.stats[value]['experience']})
await json_manager.create(path1, json_string)
for event_data in EVENT_LIST_DATA:
await json_manager.update(path2, 'id', event_data['id'], 'participants', event_data['sum'])
path4 = c.GUILD_PATH['{}.ini'.format(self.name)].format(ctx.guild.id)
role_id = await ini_manager.get_data('SECTION1', 'EVENT_ROLE', path4)
role = await discord_manager.get_role(self.client, ctx.guild.id, int(role_id))
if role:
user = await discord_manager.get_member(self.client, ctx.guild.id, ctx.author.id)
await user.add_roles(role, reason='{}'.format(c.DISCORD_MESSAGES['event_role_added']), atomic=True)
await ctx.send(l.xp_tracker[guild_l]['msg_1'].format(USERNAME, server_config['events']))
else:
SAFE_CHECK += 1
if SAFE_CHECK >= 10:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
await user.send(l.xp_tracker[guild_l]['msg_2'])
else:
EVENT_STATUS = False
MEMBER_DATA = None
for MEMBER in LIST1:
if ctx.author.id == MEMBER['user_id']:
MEMBER_DATA = MEMBER
for EVENT in LIST2:
for key, value in MEMBER_DATA.items():
if (EVENT['type'] == 1 or EVENT['type'] == 2) and key == EVENT['event_name']:
EVENT_STATUS = True
if not EVENT_STATUS and (EVENT['type'] == 1 or EVENT['type'] == 2):
EVENT_STATUS = False
CHECK = True
while CHECK:
USERNAME = userName.replace(' ', '%20')
USER = hiscores_xp.Hiscores(USERNAME, 'N')
if USER.status != 404:
if hasattr(USER, 'stats'):
CHECK = False
target_keys = ['{}'.format(EVENT['event_name']), '{}_current'.format(EVENT['event_name'])]
target_values = [USER.stats[EVENT['event_name']]['experience'], USER.stats[EVENT['event_name']]['experience']]
await json_manager.update(path1, 'user_id', ctx.author.id, target_keys, target_values)
await user.send(l.xp_tracker[guild_l]['msg_6'].format(str(EVENT['event_name']).capitalize()))
for value2 in LIST2:
if value2['type'] == 1 or value2['type'] == 2:
EVENT_NAME.append(value2['event_name'])
SUM = value2['participants'] + 1
EVENT_LIST_DATA.append({'id': value2['id'], 'type': value2['type'], 'sum': SUM})
for event_data in EVENT_LIST_DATA:
await json_manager.update(path2, 'id', event_data['id'], 'participants', event_data['sum'])
else:
SAFE_CHECK += 1
if SAFE_CHECK >= 10:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_1'])
else:
EVENT_STATUS = False
await user.send(l.xp_tracker[guild_l]['msg_7'])
else:
await ctx.send(l.xp_tracker[guild_l]['msg_badformat_2'].format(ctx.author.mention))
except Exception as error:
await exception.error(error)
async def fun_xpupdate(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
guild_t = await origin.get_region(ctx.guild.id)
path1 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
LIST1 = await json_manager.get_data(path1)
LIST2 = await json_manager.get_data(path2)
CHECK = True
user = self.client.get_user(ctx.author.id)
guild_current = await server_timer.get_current_time(guild_t)
STATUS1 = False
STATUS2 = False
EVENT_NAME = []
SAFE_CHECK = 0
MEMBER = None
userName = ''
for value in LIST1:
if value['user_id'] == ctx.author.id:
STATUS1 = True
userName = value['user_rsn']
MEMBER = value
if STATUS1:
for value2 in LIST2:
if value2['type'] == 1 or value2['type'] == 2:
STATUS2 = True
EVENT_NAME.append(value2['event_name'])
if STATUS2:
while CHECK:
USERNAME = userName.replace(' ', '%20')
USER = hiscores_xp.Hiscores(USERNAME, 'N')
if USER.status != 404:
if hasattr(USER, 'stats'):
CHECK = False
for value in EVENT_NAME:
await json_manager.update(path1, 'user_id', ctx.author.id, '{}_current'.format(value), USER.stats[value]['experience'])
client_message = 'Guild id: {} | Event: {} | RSN: {} | Registration XP: {} | Current XP: {} | Guild time: {} | Status: {}'.format(ctx.guild.id, value, userName, MEMBER[value], USER.stats[value]['experience'], guild_current.strftime('%H:%M'), 'XP self update')
await console_interface.console_message('XP self update', client_message)
await user.send(l.xp_tracker[guild_l]['msg_success_4'])
else:
SAFE_CHECK += 1
if SAFE_CHECK >= 10:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_3'])
else:
CHECK = False
await user.send(l.xp_tracker[guild_l]['msg_error_4'].format(userName))
else:
await user.send(l.xp_tracker[guild_l]['msg_2'])
else:
await user.send(l.xp_tracker[guild_l]['msg_error_5'])
except Exception as error:
await exception.error(error)
async def fun_xprank(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
path1 = c.GUILD_PATH['{}.ini'.format(self.name)].format(ctx.guild.id)
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
path3 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
ini = await ini_manager.get_ini(path1)
LIST1 = self.PNG
LIST2 = self.PRE
INFO_PANEL_IMAGE = self.INFO_PANEL_IMAGE
INFO_PANEL_FIRST_IMAGE = self.INFO_PANEL_FIRST_IMAGE
INFO_PANEL_SECOND_IMAGE = self.INFO_PANEL_SECOND_IMAGE
INFO_PANEL_THIRD_IMAGE = self.INFO_PANEL_THIRD_IMAGE
COLOR_PLACE_FIRST = (255, 30, 215)
COLOR_PLACE_SECOND = (0, 174, 255)
COLOR_PLACE_THIRD = (255, 31, 31)
COLOR_PLACE_DEFAULT = (0, 239, 0)
FONT_PATH = self.FONT_PATH
INFO_PANEL_OBJECT = None
RANK = 0
sum = None
CHANNEL_PERMISSIONS = int(ini['CHANNEL_PERMISSIONS']['STATUS'])
server_config = await json_manager.get_json(path)
CHANNEL_STATUS = True
if CHANNEL_PERMISSIONS == 1:
pass
else:
if ctx.message.channel.id == server_config['chat0']:
CHANNEL_STATUS = False
if CHANNEL_STATUS:
STRING = str(ctx.message.content).split(' ')
def get_id(data_value):
return int(data_value.get('sum'))
if len(STRING) == 1:
user = self.client.get_user(ctx.author.id)
else:
DCID = await origin.find_and_replace(STRING[1])
user = self.client.get_user(DCID)
TEMP_DATA = await json_manager.get_data(path2)
DATA1 = []
DATA2 = await json_manager.get_data(path3)
DATA3 = []
STATUS = None
for value in TEMP_DATA:
if value['type'] == 1 or value['type'] == 2:
DATA1.append(value)
if DATA1:
for index, data in enumerate(DATA1):
if DATA2:
for index2, data2 in enumerate(DATA2):
for key, value in data2.items():
if str(data['event_name']) == str(key):
sum = data2['{}_current'.format(key)] - data2[key]
DATA3.append({'user_rsn': data2['user_rsn'], 'user_id': data2['user_id'], 'sum': sum})
for index3, value3 in enumerate(LIST2):
if str(value3) == str(key):
INFO_PANEL_OBJECT = LIST1[index3]
DATA3.sort(key=get_id, reverse=True)
for index3, data3 in enumerate(DATA3):
RANK += 1
if RANK == 1:
PLACE_IMAGE = INFO_PANEL_FIRST_IMAGE
PLACE_COLOR = COLOR_PLACE_FIRST
elif RANK == 2:
PLACE_IMAGE = INFO_PANEL_SECOND_IMAGE
PLACE_COLOR = COLOR_PLACE_SECOND
elif RANK == 3:
PLACE_IMAGE = INFO_PANEL_THIRD_IMAGE
PLACE_COLOR = COLOR_PLACE_THIRD
else:
PLACE_IMAGE = INFO_PANEL_IMAGE
PLACE_COLOR = COLOR_PLACE_DEFAULT
if hasattr(user, 'id'):
if user.id == data3['user_id']:
with Image.open(PLACE_IMAGE).convert('RGBA') as im:
with Image.open(INFO_PANEL_OBJECT).convert('RGBA') as im2:
size1 = im.size
size2 = im2.size
y = int(size1[1] / 2) - int(size2[1] / 2)
im.paste(im2, (18, y), im2)
draw = ImageDraw.Draw(im)
font = ImageFont.truetype(FONT_PATH, 16)
draw.text((50, y - 12), l.xp_tracker[guild_l]['configuration']['rsn'], PLACE_COLOR, font=font)
draw.text((50, y + 2), l.xp_tracker[guild_l]['configuration']['rank'], PLACE_COLOR, font=font)
draw.text((50, y + 18), l.xp_tracker[guild_l]['configuration']['xp'], PLACE_COLOR, font=font)
draw.text((110 if guild_l == 'LT' else 95, y - 12), '{}'.format(data3['user_rsn']), (255, 255, 255), font=font)
draw.text((130 if guild_l == 'LT' else 100, y + 2), '{}'.format(RANK), (255, 255, 255), font=font)
draw.text((98 if guild_l == 'LT' else 70, y + 18), '{} XP'.format(locale.format_string('%d', data3['sum'], grouping=True)), (255, 255, 255), font=font)
TEMP_FILE = '{}_{}_{}.png'.format(data3['user_rsn'], data['event_name'], sum)
im.save(TEMP_FILE, 'PNG')
rank = open(TEMP_FILE, 'rb')
await ctx.send(file=discord.File(rank))
rank.close()
await file_manager.delete_file(TEMP_FILE)
STATUS = True
if not STATUS:
await ctx.send(l.xp_tracker[guild_l]['msg_error_6'].format(ctx.author.mention))
RANK = 0
DATA3.clear()
else:
await ctx.send(l.xp_tracker[guild_l]['msg_4'])
else:
await ctx.send(l.xp_tracker[guild_l]['msg_5'])
else:
await ctx.send(l.module_permissions[guild_l]['msg_restricted'])
except Exception as error:
await exception.error(error)
async def fun_xpstats(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
server_config = await json_manager.get_json(path)
path1 = c.GUILD_PATH['{}.ini'.format(self.name)].format(ctx.guild.id)
ini = await ini_manager.get_ini(path1)
CHANNEL_PERMISSIONS = int(ini['CHANNEL_PERMISSIONS']['STATUS'])
CHANNEL_STATUS = True
if CHANNEL_PERMISSIONS == 1:
pass
else:
if ctx.message.channel.id == server_config['chat0']:
CHANNEL_STATUS = False
if CHANNEL_STATUS:
path2 = c.GUILD_PATH['event.json'].format(ctx.guild.id)
path3 = c.GUILD_PATH['tracker.json'].format(ctx.guild.id)
LIST1 = self.ICON
LIST2 = self.PRE
IMAGE = None
EVENT_NAME = None
await origin.get_locale()
TEMP_DATA = await json_manager.get_data(path2)
DATA1 = []
DATA2 = await json_manager.get_data(path3)
DATA3 = []
for value in TEMP_DATA:
if value['type'] == 1 or value['type'] == 2:
DATA1.append(value)
def get_id(INFO):
return int(INFO.get('sum'))
if DATA1:
for data1 in DATA1:
if DATA2:
for data2 in DATA2:
for key, value in data2.items():
if str(key) == str(data1['event_name']):
sum = data2['{}_current'.format(key)]-data2[key]
DATA3.append({'user_username': data2['user_username'], 'user_rsn': data2['user_rsn'], 'sum': sum})
if data1['type'] == 1:
EVENT_NAME = '{} [ S ]'.format(str(data1['event_name']).capitalize())
if data1['type'] == 2:
EVENT_NAME = '{} [ R ]'.format(str(data1['event_name']).capitalize())
for index, value3 in enumerate(LIST2):
if str(value3) == str(key):
IMAGE = LIST1[index]
DATA3.sort(key=get_id, reverse=True)
path4 = c.ORIGIN_PATH['embed.tracker.json']
json_string = await json_manager.get_json(path4)
new_json_string = {'data': []}
STRING = ''
SUM = 0
for key, value in json_string[guild_l]['xp_tracker']['stats'].items():
if DATA3:
if int(key) == 1:
for index, data in enumerate(DATA3):
index = index + 1
if index <= 10:
if index == 1:
title = ':first_place: {}'.format(l.DISCORD_TOP[guild_l][index - 1])
elif index == 2:
title = ':second_place: {}'.format(l.DISCORD_TOP[guild_l][index - 1])
elif index == 3:
title = ':third_place: {}'.format(l.DISCORD_TOP[guild_l][index - 1])
else:
title = '{}'.format(l.DISCORD_TOP[guild_l][index - 1])
STRING += l.xp_tracker[guild_l]['configuration']['current_xp'].format(title, data['user_username'], data['user_rsn'], locale.format_string('%d', data['sum'], grouping=True))
SUM += data['sum']
STRING += l.xp_tracker[guild_l]['configuration']['total_xp'].format(locale.format_string('%d', SUM, grouping=True))
new_json_string['data'].append({
'name{}'.format(key): value['name'].format('\u200D'),
'value{}'.format(key): str(value['value']).format(STRING)
})
else:
STRING += l.xp_tracker[guild_l]['configuration']['total_xp'].format(locale.format_string('%d', SUM, grouping=True))
new_json_string['data'].append({
'name{}'.format(key): value['name'],
'value{}'.format(key): str(value['value']).format(ctx.guild.name)
})
await embed_creator.create_embed(ctx, discord.Color.dark_green(), False, ctx.guild.icon_url, IMAGE, l.xp_tracker[guild_l]['embed_3'].format(ctx.guild.name, EVENT_NAME), new_json_string['data'], False)
DATA3.clear()
else:
await ctx.send(l.xp_tracker[guild_l]['msg_4'])
else:
await ctx.send(l.xp_tracker[guild_l]['msg_5'])
else:
await ctx.send(l.module_permissions[guild_l]['msg_restricted'])
except Exception as error:
await exception.error(error)
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
@tasks.loop(count=1)
async def variable_init(self):
try:
path_global = c.GUILD_PATH['{}_g.ini'.format(self.name)]
ini = await ini_manager.get_ini(path_global)
self.PRE = await json_manager.get_ini_list(path_global, 'CONSTANT2', 'PRE')
self.NAME = await json_manager.get_ini_list(path_global, 'CONSTANT1', 'NAME')
self.ICON = await json_manager.get_ini_list(path_global, 'CONSTANT3', 'ICON')
self.PNG = await json_manager.get_ini_list(path_global, 'CONSTANT5', 'PNG')
self.INFO_PANEL_IMAGE = ini['CONSTANT5']['INFO_PANEL']
self.INFO_PANEL_FIRST_IMAGE = ini['CONSTANT5']['INFO_PANEL_FIRST']
self.INFO_PANEL_SECOND_IMAGE = ini['CONSTANT5']['INFO_PANEL_SECOND']
self.INFO_PANEL_THIRD_IMAGE = ini['CONSTANT5']['INFO_PANEL_THIRD']
self.FONT_PATH = ini['CONSTANT5']['FONT']
await console_interface.console_message(c.CLIENT_MESSAGES['variable_init'].format(self.name))
except Exception as error:
await exception.error(error)
def __init__(self, client):
self.PRE = None
self.NAME = None
self.ICON = None
self.PNG = None
self.INFO_PANEL_IMAGE = None
self.INFO_PANEL_FIRST_IMAGE = None
self.INFO_PANEL_SECOND_IMAGE = None
self.INFO_PANEL_THIRD_IMAGE = None
self.FONT_PATH = None
self.variable_init.start()
self.client = client
@commands.command()
async def xptracker(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xptracker)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def addxpevent(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_addxpevent)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def removeallxp(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_removeallxp)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def axp(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_axp)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def xpupdate(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xpupdate)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def xprank(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xprank)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def xpstats(self, ctx):
try:
if str(ctx.message.channel.type) == 'text':
await permissions.module_status_check(ctx, self.name, c.GUILD_PATH['{}.ini'.format(self.name)], self.fun_xpstats)
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
def setup(client):
client.add_cog(xp_tracker(client))
| 2
| 2
|
days/01/part1.py
|
gr3yknigh1/aoc2021
| 0
|
12537
|
<filename>days/01/part1.py<gh_stars>0
from __future__ import annotations
import os
import collections
BASE_PATH = os.path.dirname(__file__)
INPUT_PATH = os.path.join(BASE_PATH, "input.txt")
OUTPUT_PATH = os.path.join(BASE_PATH, "output.txt")
def proceed_buffer(buffer: str) -> list[int]:
return [int(line) for line in buffer.splitlines()]
def main() -> int:
buffer: str = ""
with open(INPUT_PATH, mode='r', encoding="utf-8") as f:
buffer = f.read()
measurements: list[int] = proceed_buffer(buffer)
measurements_counter = collections.Counter()
output_buffer: str = ""
prev: int = None
for i in measurements:
if prev is None:
output_buffer += f"{i} (N/A - no previous measurement)\n"
measurements_counter["None"] += 1
elif prev > i:
output_buffer += f"{i} (decrease)\n"
measurements_counter["Decreased"] += 1
elif prev < i:
output_buffer += f"{i} (increase)\n"
measurements_counter["Increased"] += 1
elif prev == i:
output_buffer += f"{i} (not changed)\n"
measurements_counter["Equal"] += 1
prev = i
output_buffer += "\n====\n"
output_buffer += "Total:\n"
output_buffer += f": {measurements_counter}"
with open(OUTPUT_PATH, mode='w', encoding="utf-8") as f:
f.write(output_buffer)
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 2.984375
| 3
|
src/ensemble_nn/agent_nn.py
|
AbhinavGopal/ts_tutorial
| 290
|
12538
|
<reponame>AbhinavGopal/ts_tutorial
"""Agents for neural net bandit problems.
We implement three main types of agent:
- epsilon-greedy (fixed epsilon, annealing epsilon)
- dropout (arXiv:1506.02142)
- ensemble sampling
All code is specialized to the setting of 2-layer fully connected MLPs.
"""
import numpy as np
import numpy.random as rd
from base.agent import Agent
from ensemble_nn.env_nn import TwoLayerNNBandit
class TwoLayerNNEpsilonGreedy(Agent):
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
epsilon_param=0.0,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Epsilon-greedy agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
epsilon_param: fixed epsilon choice.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.W1 = 1e-2 * rd.randn(hidden_dim, input_dim) # initialize weights
self.W2 = 1e-2 * rd.randn(hidden_dim)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.epsilon_param = epsilon_param
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps # number of gradient steps we
# take during each time period
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.reward_hist = np.zeros(self.T)
def _model_forward(self, input_actions):
"""Neural network forward pass.
Args:
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1, axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
out = np.sum(relu_out * self.W2, axis=1)
cache = (input_actions, affine_out, relu_out)
return out, cache
def _model_backward(self, out, cache, y):
"""Neural network backward pass (for backpropagation).
Args:
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
drelu_out = dout[:, np.newaxis] * self.W2
mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def _update_model(self, t):
"""Update the model by taking a few gradient steps."""
for i in range(self.num_gradient_steps):
# sample minibatch
batch_ind = rd.randint(t + 1, size=self.batch_size)
action_batch = self.action_hist[batch_ind]
reward_batch = self.reward_hist[batch_ind]
out, cache = self._model_forward(action_batch)
dW1, dW2 = self._model_backward(out, cache, reward_batch)
dW1 /= self.batch_size
dW2 /= self.batch_size
dW1 += 2 / (self.prior_var * (t + 1)) * self.W1
dW2 += 2 / (self.prior_var * (t + 1)) * self.W2
self.W1 -= self.lr * dW1
self.W2 -= self.lr * dW2
def update_observation(self, observation, action, reward):
"""Learn from observations."""
t = observation
self.action_hist[t] = self.actions[action]
self.reward_hist[t] = reward
self._update_model(t)
self.lr *= self.lr_decay
def pick_action(self, observation):
"""Fixed epsilon-greedy action selection."""
u = rd.rand()
if u < self.epsilon_param:
action = rd.randint(self.num_actions)
else:
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNEpsilonGreedyAnnealing(TwoLayerNNEpsilonGreedy):
"""Epsilon-greedy with an annealing epsilon:
epsilon = self.epsilon_param / (self.epsilon_param + t)
"""
def pick_action(self, observation):
"""Overload pick_action to dynamically recalculate epsilon-greedy."""
t = observation
epsilon = self.epsilon_param / (self.epsilon_param + t)
u = rd.rand()
if u < epsilon:
action = rd.randint(self.num_actions)
else:
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNDropout(TwoLayerNNEpsilonGreedy):
"""Dropout is used to represent model uncertainty.
ICML paper suggests this is Bayesian uncertainty: arXiv:1506.02142.
Follow up work suggests that this is flawed: TODO(iosband) add link.
"""
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
drop_prob=0.5,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Dropout agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
drop_prob: probability of randomly zero-ing out weight component.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.W1 = 1e-2 * rd.randn(hidden_dim, input_dim)
self.W2 = 1e-2 * rd.randn(hidden_dim)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.p = drop_prob
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.reward_hist = np.zeros(self.T)
def _model_forward(self, input_actions):
"""Neural network forward pass.
Note that dropout remains "on" so that forward pass is stochastic.
Args:
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1, axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
dropout_mask = rd.rand(*relu_out.shape) > self.p
dropout_out = relu_out * dropout_mask
out = np.sum(dropout_out * self.W2, axis=1)
cache = (input_actions, affine_out, relu_out, dropout_mask, dropout_out)
return out, cache
def _model_backward(self, out, cache, y):
"""Neural network backward pass (for backpropagation).
Args:
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out, dropout_mask, dropout_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
ddropout_out = dout[:, np.newaxis] * self.W2
drelu_out = ddropout_out * dropout_mask
relu_mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = relu_mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def pick_action(self, observation):
"""Select the greedy action according to the output of a stochastic
forward pass."""
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNEnsembleSampling(Agent):
"""An ensemble sampling agent maintains an ensemble of neural nets, each
fitted to a perturbed prior and perturbed observations."""
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
num_models=10,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Ensemble sampling agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
num_models: Number of ensemble models to train.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.M = num_models
# initialize models by sampling perturbed prior means
self.W1_model_prior = np.sqrt(prior_var) * rd.randn(self.M, hidden_dim,
input_dim)
self.W2_model_prior = np.sqrt(prior_var) * rd.randn(self.M, hidden_dim)
self.W1 = np.copy(self.W1_model_prior)
self.W2 = np.copy(self.W2_model_prior)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.model_reward_hist = np.zeros((self.M, self.T))
def _model_forward(self, m, input_actions):
"""Neural network forward pass for single model of ensemble.
Args:
m: index of which network to evaluate.
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1[m], axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
out = np.sum(relu_out * self.W2[m], axis=1)
cache = (input_actions, affine_out, relu_out)
return out, cache
def _model_backward(self, m, out, cache, y):
"""Neural network backward pass (for backpropagation) for single network.
Args:
m: index of which network to evaluate.
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
drelu_out = dout[:, np.newaxis] * self.W2[m]
mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def _update_model(self, m, t):
"""Apply SGD to model m."""
for i in range(self.num_gradient_steps):
# sample minibatch
batch_ind = rd.randint(t + 1, size=self.batch_size)
action_batch = self.action_hist[batch_ind]
reward_batch = self.model_reward_hist[m][batch_ind]
out, cache = self._model_forward(m, action_batch)
dW1, dW2 = self._model_backward(m, out, cache, reward_batch)
dW1 /= self.batch_size
dW2 /= self.batch_size
dW1 += 2 / (self.prior_var * (t + 1)) * (
self.W1[m] - self.W1_model_prior[m])
dW2 += 2 / (self.prior_var * (t + 1)) * (
self.W2[m] - self.W2_model_prior[m])
self.W1[m] -= self.lr * dW1
self.W2[m] -= self.lr * dW2
return
def update_observation(self, observation, action, reward):
"""Learn from observations, shared across all models.
However, perturb the reward independently for each model and then update.
"""
t = observation
self.action_hist[t] = self.actions[action]
for m in range(self.M):
m_noise = np.sqrt(self.noise_var) * rd.randn()
self.model_reward_hist[m, t] = reward + m_noise
self._update_model(m, t)
self.lr *= self.lr_decay
def pick_action(self, observation):
"""Select action via ensemble sampling.
Choose active network uniformly at random, then act greedily wrt that model.
"""
m = rd.randint(self.M)
model_out, _ = self._model_forward(m, self.actions)
action = np.argmax(model_out)
return action
| 3.375
| 3
|
leaflet_storage/management/commands/storagei18n.py
|
Biondilbiondo/django-leaflet-storage-concurrent-editing
| 0
|
12539
|
<filename>leaflet_storage/management/commands/storagei18n.py<gh_stars>0
import io
import os
from django.core.management.base import BaseCommand
from django.conf import settings
from django.contrib.staticfiles import finders
from django.template.loader import render_to_string
from django.utils.translation import to_locale
class Command(BaseCommand):
def handle(self, *args, **options):
for code, name in settings.LANGUAGES:
code = to_locale(code)
print("Processing", name)
path = finders.find('storage/src/locale/{code}.json'.format(
code=code))
if not path:
print("No file for", code, "Skipping")
else:
with io.open(path, "r", encoding="utf-8") as f:
print("Found file", path)
self.render(code, f.read())
def render(self, code, json):
path = os.path.join(
settings.STATIC_ROOT,
"storage/src/locale/",
"{code}.js".format(code=code)
)
with io.open(path, "w", encoding="utf-8") as f:
content = render_to_string('leaflet_storage/locale.js', {
"locale": json,
"locale_code": code
})
print("Exporting to", path)
f.write(content)
| 2.3125
| 2
|
spikemetrics/metrics.py
|
MarineChap/spikemetrics
| 0
|
12540
|
# Copyright © 2019. <NAME>. All rights reserved.
import numpy as np
import pandas as pd
from collections import OrderedDict
import math
import warnings
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import silhouette_score
from scipy.spatial.distance import cdist
from scipy.stats import chi2
from scipy.ndimage.filters import gaussian_filter1d
from .utils import Epoch
from .utils import printProgressBar, get_spike_positions
def calculate_metrics(spike_times, spike_clusters, amplitudes, pc_features, pc_feature_ind, params,
duration, channel_locations=None, cluster_ids=None, epochs=None, seed=None, verbose=True):
""" Calculate metrics for all units on one probe
Inputs:
------
spike_times : numpy.ndarray (num_spikes x 0)
Spike times in seconds (same timebase as epochs)
spike_clusters : numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time
pc_features : numpy.ndarray (num_spikes x num_pcs x num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind : numpy.ndarray (num_units x num_channels)
Channel indices of PCs for each unit
epochs : list of Epoch objects
contains information on Epoch start and stop times
duration : length of recording (seconds)
channel_locations : numpy.ndarray (num_channels x 2)
Channel locations (if None, a linear geometry is assumed)
params : dict of parameters
'isi_threshold' : minimum time for isi violations
'min_isi'
'num_channels_to_compare'
'max_spikes_for_unit'
'max_spikes_for_nn'
'n_neighbors'
'drift_metrics_interval_s'
'drift_metrics_min_spikes_per_interval'
Outputs:
--------
metrics : pandas.DataFrame
one column for each metric
one row per unit per epoch
"""
metrics = pd.DataFrame()
if epochs is None:
epochs = [Epoch('complete_session', 0, np.inf)]
total_units = np.max(spike_clusters) + 1
total_epochs = len(epochs)
for epoch in epochs:
in_epoch = np.logical_and(spike_times >= epoch.start_time, spike_times < epoch.end_time)
spikes_in_epoch = np.sum(in_epoch)
spikes_for_nn = min(spikes_in_epoch, params['max_spikes_for_nn'])
spikes_for_silhouette = min(spikes_in_epoch, params['n_silhouette'])
print("Calculating isi violations")
isi_viol = calculate_isi_violations(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
isi_threshold=params['isi_threshold'],
min_isi=params['min_isi'],
duration=duration,
verbose=verbose)
print("Calculating presence ratio")
presence_ratio = calculate_presence_ratio(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
duration=duration, verbose=verbose)
print("Calculating firing rate")
firing_rate = calculate_firing_rates(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units, duration=duration, verbose=verbose)
print("Calculating amplitude cutoff")
amplitude_cutoff = calculate_amplitude_cutoff(spike_clusters=spike_clusters[in_epoch],
amplitudes=amplitudes[in_epoch],
total_units=total_units,
verbose=verbose)
print("Calculating PC-based metrics")
isolation_distance, l_ratio, d_prime, nn_hit_rate, nn_miss_rate = \
calculate_pc_metrics(spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
num_channels_to_compare=params['num_channels_to_compare'],
max_spikes_for_cluster=params['max_spikes_for_unit'],
spikes_for_nn=spikes_for_nn,
n_neighbors=params['n_neighbors'],
channel_locations=
channel_locations,
seed=seed,
verbose=verbose)
print("Calculating silhouette score")
silhouette_score = calculate_silhouette_score(spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
spikes_for_silhouette=spikes_for_silhouette,
seed=seed, verbose=verbose)
print("Calculating drift metrics")
max_drift, cumulative_drift = calculate_drift_metrics(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
interval_length=params['drift_metrics_interval_s'],
min_spikes_per_interval=
params['drift_metrics_min_spikes_per_interval'],
channel_locations=
channel_locations,
verbose=verbose)
if cluster_ids is None:
cluster_ids_out = np.arange(total_units)
else:
cluster_ids_out = cluster_ids
epoch_name = [epoch.name] * len(cluster_ids_out)
metrics = pd.concat((metrics, pd.DataFrame(data=OrderedDict((('cluster_id', cluster_ids_out),
('firing_rate', firing_rate),
('presence_ratio', presence_ratio),
('isi_violation', isi_viol),
('amplitude_cutoff', amplitude_cutoff),
('isolation_distance', isolation_distance),
('l_ratio', l_ratio),
('d_prime', d_prime),
('nn_hit_rate', nn_hit_rate),
('nn_miss_rate', nn_miss_rate),
('silhouette_score', silhouette_score),
('max_drift', max_drift),
('cumulative_drift', cumulative_drift),
('epoch_name', epoch_name),
)))))
return metrics
# ===============================================================
# HELPER FUNCTIONS TO LOOP THROUGH CLUSTERS:
# ===============================================================
def calculate_isi_violations(spike_times, spike_clusters, total_units, isi_threshold, min_isi, duration,
spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
viol_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
viol_rates[cluster_id], num_violations = isi_violations(spike_times[for_this_cluster],
duration=duration,
isi_threshold=isi_threshold,
min_isi=min_isi)
return viol_rates
def calculate_presence_ratio(spike_times, spike_clusters, total_units, duration, spike_cluster_subset=None,
verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
ratios = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
ratios[cluster_id] = presence_ratio(spike_times[for_this_cluster],
duration=duration)
return ratios
def calculate_num_spikes(spike_times, spike_clusters, total_units, spike_cluster_subset=None, verbose=True):
num_spikes = np.zeros((total_units,))
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
num_spikes[cluster_id] = len(spike_times[for_this_cluster])
return num_spikes
def calculate_firing_rates(spike_times, spike_clusters, total_units, duration, spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
firing_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
firing_rates[cluster_id] = firing_rate(spike_times[for_this_cluster],
duration=duration)
return firing_rates
def calculate_amplitude_cutoff(spike_clusters, amplitudes, total_units, spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
amplitude_cutoffs = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
amplitude_cutoffs[cluster_id] = amplitude_cutoff(amplitudes[for_this_cluster])
return amplitude_cutoffs
def calculate_pc_metrics(spike_clusters, total_units, pc_features, pc_feature_ind,
num_channels_to_compare, max_spikes_for_cluster, spikes_for_nn,
n_neighbors, channel_locations, min_num_pcs=10, metric_names=None,
seed=None, spike_cluster_subset=None, verbose=True):
"""
Computes metrics from projection of waveforms to principal components
including: isolation distance, l ratio, d prime, nn hit rate, nn miss rate
Parameters
----------
spike_clusters: numpy.ndarray (num_spikes,)
Unit ID for each spike time
total_units: int
Total number of units
pc_features: numpy.ndarray (num_spikes, num_pcs, num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind: numpy.ndarray (num_units, num_channels)
Channel indices of PCs for each unit
num_channels_to_compare: int
Number of channels around the max channel over which to compute the
metrics (e.g. only units from these channels will be considered for the
nearest neighbor metrics)
max_spikes_for_cluster: int
Total number of spikes to use for computing the metrics
spikes_for_nn: int
Number of spikes in a unit to use for computing nearest neighbor metrics
(nn_hit_rate, nn_miss_rate)
n_neighbors: int
Number of nearest neighbor spikes to compare membership
channel_locations: array, (channels, 2)
(x,y) location of channels; used to identify neighboring channels
min_num_pcs: int, default=10
Minimum number of spikes a unit must have to compute these metrics
metric_names: list of str, default=None
List of metrics to compute
seed: int, default=None
Random seed for subsampling spikes from the unit
spike_cluster_subset: numpy.array (units,), default=None
If specified compute metrics for only these units
verbose: bool, default=True
Prints out progress bar if True
Returns (all 1d numpy.arrays)
-------
isolation_distances
l_ratios
d_primes
nn_hit_rates
nn_miss_rates
"""
if metric_names is None:
metric_names = ['isolation_distance', 'l_ratio', 'd_prime', 'nearest_neighbor']
if num_channels_to_compare > channel_locations.shape[0]:
num_channels_to_compare = channel_locations.shape[0]
all_cluster_ids = np.unique(spike_clusters)
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = all_cluster_ids
peak_channels = np.zeros((total_units,), dtype='uint16')
neighboring_channels = np.zeros((total_units, num_channels_to_compare))
isolation_distances = np.zeros((total_units,))
l_ratios = np.zeros((total_units,))
d_primes = np.zeros((total_units,))
nn_hit_rates = np.zeros((total_units,))
nn_miss_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(all_cluster_ids):
for_unit = np.squeeze(spike_clusters == cluster_id)
pc_max = np.argmax(np.mean(pc_features[for_unit, 0, :], 0))
peak_channels[idx] = pc_feature_ind[idx, pc_max]
# find neighboring channels
neighboring_channels[idx] = find_neighboring_channels(pc_feature_ind[idx, pc_max],
pc_feature_ind[idx, :],
num_channels_to_compare,
channel_locations)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(idx + 1, total_units)
peak_channel = peak_channels[idx]
# units_for_channel: index (not ID) of units defined at the target unit's peak channel
units_for_channel, channel_index = np.unravel_index(np.where(pc_feature_ind.flatten() == peak_channel)[0],
pc_feature_ind.shape)
# units_in_range: list of bool, True for units whose peak channels are in the neighborhood of target unit
units_in_range = [channel in neighboring_channels[idx] for channel in peak_channels[units_for_channel]]
channels_to_use = neighboring_channels[idx]
# only get index of units who are in the neighborhood of target unit
units_for_channel = units_for_channel[units_in_range]
spike_counts = np.zeros(units_for_channel.shape)
for idx2, cluster_id2 in enumerate(units_for_channel):
spike_counts[idx2] = np.sum(spike_clusters == all_cluster_ids[cluster_id2])
# index of target unit within the subset of units in its neighborhood (including itself)
this_unit_idx = np.where(units_for_channel == idx)[0]
if spike_counts[this_unit_idx] > max_spikes_for_cluster:
relative_counts = spike_counts / spike_counts[this_unit_idx] * max_spikes_for_cluster
else:
relative_counts = spike_counts
all_pcs = np.zeros((0, pc_features.shape[1], channels_to_use.size))
all_labels = np.zeros((0,))
for idx2, cluster_id2 in enumerate(units_for_channel):
try:
channel_mask = make_channel_mask(cluster_id2, pc_feature_ind, channels_to_use)
except IndexError:
# Occurs when pc_feature_ind does not contain all channels of interest
# In that case, we will exclude this unit for the calculation
print('Unit outside the range set by channel_to_use, skipping...')
pass
else:
subsample = int(relative_counts[idx2])
index_mask = make_index_mask(spike_clusters, all_cluster_ids[cluster_id2], min_num=0, max_num=subsample,
seed=seed)
pcs = get_unit_pcs(pc_features, index_mask, channel_mask)
labels = np.ones((pcs.shape[0],)) * all_cluster_ids[cluster_id2]
all_pcs = np.concatenate((all_pcs, pcs), 0)
all_labels = np.concatenate((all_labels, labels), 0)
all_pcs = np.reshape(all_pcs, (all_pcs.shape[0], pc_features.shape[1] * channels_to_use.size))
if all_pcs.shape[0] > min_num_pcs:
if 'isolation_distance' in metric_names or 'l_ratio' in metric_names:
isolation_distances[idx], l_ratios[idx] = mahalanobis_metrics(all_pcs, all_labels,
cluster_id)
else:
isolation_distances[idx] = np.nan
l_ratios[idx] = np.nan
if 'd_prime' in metric_names:
d_primes[idx] = lda_metrics(all_pcs, all_labels, cluster_id)
else:
d_primes[idx] = np.nan
if 'nearest_neighbor' in metric_names:
nn_hit_rates[idx], nn_miss_rates[idx] = nearest_neighbors_metrics(all_pcs, all_labels,
cluster_id,
spikes_for_nn,
n_neighbors)
else:
nn_hit_rates[idx] = np.nan
nn_miss_rates[idx] = np.nan
else:
print(f'Unit {str(cluster_id)} only has ' + str(
all_pcs.shape[0]) + ' spikes, which is not enough to compute metric; assigning nan...')
isolation_distances[idx] = np.nan
l_ratios[idx] = np.nan
d_primes[idx] = np.nan
nn_hit_rates[idx] = np.nan
nn_miss_rates[idx] = np.nan
return isolation_distances, l_ratios, d_primes, nn_hit_rates, nn_miss_rates
def calculate_silhouette_score(spike_clusters,
total_units,
pc_features,
pc_feature_ind,
spikes_for_silhouette,
seed=None,
spike_cluster_subset=None,
verbose=True):
random_spike_inds = np.random.RandomState(seed=seed).permutation(spike_clusters.size)
random_spike_inds = random_spike_inds[:spikes_for_silhouette]
num_pc_features = pc_features.shape[1]
num_channels = np.max(pc_feature_ind) + 1
all_pcs = np.zeros((spikes_for_silhouette, num_channels * num_pc_features))
for idx, i in enumerate(random_spike_inds):
unit_id = spike_clusters[i]
channels = pc_feature_ind[unit_id, :]
for j in range(0, num_pc_features):
all_pcs[idx, channels + num_channels * j] = pc_features[i, j, :]
cluster_labels = spike_clusters[random_spike_inds]
all_cluster_ids = np.unique(spike_clusters)
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = all_cluster_ids
SS = np.empty((total_units, total_units))
SS[:] = np.nan
seen_unit_pairs = set()
for idx1, i in enumerate(cluster_ids):
if verbose:
printProgressBar(idx1 + 1, len(cluster_ids))
for idx2, j in enumerate(all_cluster_ids):
if (i, j) not in seen_unit_pairs and (j, i) not in seen_unit_pairs and i != j:
inds = np.in1d(cluster_labels, np.array([i, j]))
X = all_pcs[inds, :]
labels = cluster_labels[inds]
if len(labels) > 2:
SS[i, j] = silhouette_score(X, labels, random_state=seed)
seen_unit_pairs.add((i, j))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
a = np.nanmin(SS, 0)
b = np.nanmin(SS, 1)
return np.array([np.nanmin([a, b]) for a, b in zip(a, b)])
def calculate_drift_metrics(spike_times,
spike_clusters,
total_units,
pc_features,
pc_feature_ind,
interval_length,
min_spikes_per_interval,
vertical_channel_spacing=10,
channel_locations=None,
spike_cluster_subset=None,
verbose=True):
max_drift = np.zeros((total_units,))
cumulative_drift = np.zeros((total_units,))
positions = get_spike_positions(spike_clusters, pc_features, pc_feature_ind, channel_locations,
vertical_channel_spacing)
interval_starts = np.arange(np.min(spike_times), np.max(spike_times), interval_length)
interval_ends = interval_starts + interval_length
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, len(cluster_ids))
in_cluster = spike_clusters == cluster_id
times_for_cluster = spike_times[in_cluster]
positions_for_cluster = positions[in_cluster]
median_positions = []
for t1, t2 in zip(interval_starts, interval_ends):
in_range = (times_for_cluster > t1) * (times_for_cluster < t2)
if np.sum(in_range) >= min_spikes_per_interval:
median_positions.append(np.median(positions_for_cluster[in_range], 0))
else:
median_positions.append([np.nan, np.nan])
median_positions = np.array(median_positions)
# Extract emi-matrix of shifts in positions (used to extract max_drift and cum_drift)
position_diffs = np.zeros((len(median_positions), len(median_positions)))
for i, pos_i in enumerate(median_positions):
for j, pos_j in enumerate(median_positions):
if j > i:
if not np.isnan(pos_i[0]) and not np.isnan(pos_j[0]):
position_diffs[i, j] = np.linalg.norm(pos_i - pos_j)
else:
position_diffs[i, j] = 0
# Maximum drift among all periods
if np.any(position_diffs > 0):
max_drift[cluster_id] = np.around(np.max(position_diffs[position_diffs > 0]), 2)
# The +1 diagonal contains the step-by-step drifts between intervals.
# Summing them up we obtain cumulative drift
cumulative_drift[cluster_id] = np.around(np.sum(np.diag(position_diffs, 1)), 2)
else:
# not enough spikes
max_drift[cluster_id] = 0
cumulative_drift[cluster_id] = 0
return max_drift, cumulative_drift
# ==========================================================
# IMPLEMENTATION OF ACTUAL METRICS:
# ==========================================================
def isi_violations(spike_train, duration, isi_threshold, min_isi=0):
"""Calculate Inter-Spike Interval (ISI) violations for a spike train.
Based on metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Originally written in Matlab by <NAME> (https://github.com/cortex-lab/sortingQuality)
Converted to Python by <NAME>
Inputs:
-------
spike_train : array of monotonically increasing spike times (in seconds) [t1, t2, t3, ...]
duration : length of recording (seconds)
isi_threshold : threshold for classifying adjacent spikes as an ISI violation
- this is the biophysical refractory period
min_isi : minimum possible inter-spike interval (default = 0)
- this is the artificial refractory period enforced by the data acquisition system
or post-processing algorithms
Outputs:
--------
fpRate : rate of contaminating spikes as a fraction of overall rate
- higher values indicate more contamination
num_violations : total number of violations detected
"""
isis_initial = np.diff(spike_train)
if min_isi > 0:
duplicate_spikes = np.where(isis_initial <= min_isi)[0]
spike_train = np.delete(spike_train, duplicate_spikes + 1)
isis = np.diff(spike_train)
num_spikes = len(spike_train)
num_violations = sum(isis < isi_threshold)
violation_time = 2 * num_spikes * (isi_threshold - min_isi)
total_rate = firing_rate(spike_train, duration)
violation_rate = num_violations / violation_time
fpRate = violation_rate / total_rate
return fpRate, num_violations
def presence_ratio(spike_train, duration, num_bin_edges=101):
"""Calculate fraction of time the unit is present within an epoch.
Inputs:
-------
spike_train : array of spike times
duration : length of recording (seconds)
num_bin_edges : number of bin edges for histogram
- total bins = num_bin_edges - 1
Outputs:
--------
presence_ratio : fraction of time bins in which this unit is spiking
"""
h, b = np.histogram(spike_train, np.linspace(0, duration, num_bin_edges))
return np.sum(h > 0) / (num_bin_edges - 1)
def firing_rate(spike_train, duration):
"""Calculate firing rate for a spike train.
If either temporal bound is not specified, the first and last spike time are used by default.
Inputs:
-------
spike_train : array of spike times (in seconds)
duration : length of recording (in seconds)
Outputs:
--------
fr : float
Firing rate in Hz
"""
fr = spike_train.size / duration
return fr
def amplitude_cutoff(amplitudes, num_histogram_bins=500, histogram_smoothing_value=3):
""" Calculate approximate fraction of spikes missing from a distribution of amplitudes
Assumes the amplitude histogram is symmetric (not valid in the presence of drift)
Inspired by metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Input:
------
amplitudes : numpy.ndarray
Array of amplitudes (don't need to be in physical units)
num_histogram_bins : int
Number of bins for calculating amplitude histogram
histogram_smoothing_value : float
Gaussian filter window for smoothing amplitude histogram
Output:
-------
fraction_missing : float
Fraction of missing spikes (ranges between 0 and 0.5)
If more than 50% of spikes are missing, an accurate estimate isn't possible
"""
h, b = np.histogram(amplitudes, num_histogram_bins, density=True)
pdf = gaussian_filter1d(h, histogram_smoothing_value)
support = b[:-1]
peak_index = np.argmax(pdf)
G = np.argmin(np.abs(pdf[peak_index:] - pdf[0])) + peak_index
bin_size = np.mean(np.diff(support))
fraction_missing = np.sum(pdf[G:]) * bin_size
fraction_missing = np.min([fraction_missing, 0.5])
return fraction_missing
def mahalanobis_metrics(all_pcs, all_labels, this_unit_id):
""" Calculates isolation distance and L-ratio (metrics computed from Mahalanobis distance)
Based on metrics described in Schmitzer-Torbert et al. (2005) Neurosci 131: 1-11
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
Outputs:
--------
isolation_distance : float
Isolation distance of this unit
l_ratio : float
L-ratio for this unit
"""
pcs_for_this_unit = all_pcs[all_labels == this_unit_id, :]
pcs_for_other_units = all_pcs[all_labels != this_unit_id, :]
mean_value = np.expand_dims(np.mean(pcs_for_this_unit, 0), 0)
try:
VI = np.linalg.inv(np.cov(pcs_for_this_unit.T))
except np.linalg.linalg.LinAlgError: # case of singular matrix
return np.nan, np.nan
mahalanobis_other = np.sort(cdist(mean_value,
pcs_for_other_units,
'mahalanobis', VI=VI)[0])
mahalanobis_self = np.sort(cdist(mean_value,
pcs_for_this_unit,
'mahalanobis', VI=VI)[0])
n = np.min([pcs_for_this_unit.shape[0], pcs_for_other_units.shape[0]]) # number of spikes
if n >= 2:
dof = pcs_for_this_unit.shape[1] # number of features
l_ratio = np.sum(1 - chi2.cdf(pow(mahalanobis_other, 2), dof)) / mahalanobis_self.shape[0]
isolation_distance = pow(mahalanobis_other[n - 1], 2)
# if math.isnan(l_ratio):
# print("NaN detected", mahalanobis_other, VI)
else:
l_ratio = np.nan
isolation_distance = np.nan
return isolation_distance, l_ratio
def lda_metrics(all_pcs, all_labels, this_unit_id):
""" Calculates d-prime based on Linear Discriminant Analysis
Based on metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
Outputs:
--------
d_prime : float
Isolation distance of this unit
l_ratio : float
L-ratio for this unit
"""
X = all_pcs
y = np.zeros((X.shape[0],), dtype='bool')
y[all_labels == this_unit_id] = True
lda = LDA(n_components=1)
X_flda = lda.fit_transform(X, y)
flda_this_cluster = X_flda[np.where(y)[0]]
flda_other_cluster = X_flda[np.where(np.invert(y))[0]]
d_prime = (np.mean(flda_this_cluster) - np.mean(flda_other_cluster)) / np.sqrt(
0.5 * (np.std(flda_this_cluster) ** 2 + np.std(flda_other_cluster) ** 2))
return d_prime
def nearest_neighbors_metrics(all_pcs, all_labels, this_unit_id, spikes_for_nn, n_neighbors):
""" Calculates unit contamination based on NearestNeighbors search in PCA space
Based on metrics described in Chung, Magland et al. (2017) Neuron 95: 1381-1394
A is a (hopefully) representative subset of cluster X
NN_hit(X) = 1/k \sum_i=1^k |{x in A such that ith closest neighbor is in X}| / |A|
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
spikes_for_nn : Int
number of spikes to use (calculation can be very slow when this number is >20000)
n_neighbors : Int
number of neighbors to use
Outputs:
--------
hit_rate : float
Fraction of neighbors for target cluster that are also in target cluster
miss_rate : float
Fraction of neighbors outside target cluster that are in target cluster
"""
total_spikes = all_pcs.shape[0]
ratio = spikes_for_nn / total_spikes
this_unit = all_labels == this_unit_id
X = np.concatenate((all_pcs[this_unit, :], all_pcs[np.invert(this_unit), :]), 0)
n = np.sum(this_unit)
if ratio < 1:
inds = np.arange(0, X.shape[0] - 1, 1 / ratio).astype('int')
X = X[inds, :]
n = int(n * ratio)
nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
this_cluster_inds = np.arange(n)
this_cluster_nearest = indices[:n, 1:].flatten()
other_cluster_nearest = indices[n:, 1:].flatten()
hit_rate = np.mean(this_cluster_nearest < n)
miss_rate = np.mean(other_cluster_nearest < n)
return hit_rate, miss_rate
# ==========================================================
# HELPER FUNCTIONS:
# ==========================================================
def make_index_mask(spike_clusters, unit_id, min_num, max_num, seed=None):
""" Create a mask for the spike index dimensions of the pc_features array
Inputs:
-------
spike_clusters : numpy.ndarray (num_spikes x 0)
Contains cluster IDs for all spikes in pc_features array
unit_id : Int
ID for this unit
min_num : Int
Minimum number of spikes to return; if there are not enough spikes for this unit, return all False
max_num : Int
Maximum number of spikes to return; if too many spikes for this unit, return a random subsample
seed: int
Random seed for reproducibility
Output:
-------
index_mask : numpy.ndarray (boolean)
Mask of spike indices for pc_features array
"""
index_mask = spike_clusters == unit_id
inds = np.where(index_mask)[0]
if len(inds) < min_num:
index_mask = np.zeros((spike_clusters.size,), dtype='bool')
else:
index_mask = np.zeros((spike_clusters.size,), dtype='bool')
order = np.random.RandomState(seed=seed).permutation(inds.size)
index_mask[inds[order[:max_num]]] = True
return index_mask
def make_channel_mask(unit_id, pc_feature_ind, channels_to_use):
""" Create a mask for the channel dimension of the pc_features array
Inputs:
-------
unit_id : Int
ID for this unit
pc_feature_ind : np.ndarray
Channels used for PC calculation for each unit
channels_to_use : np.ndarray
Channels to use for calculating metrics
Output:
-------
channel_mask : numpy.ndarray
Channel indices to extract from pc_features array
"""
these_inds = pc_feature_ind[unit_id, :]
channel_mask = [np.argwhere(these_inds == i)[0][0] for i in channels_to_use]
return np.array(channel_mask)
def get_unit_pcs(these_pc_features, index_mask, channel_mask):
""" Use the index_mask and channel_mask to return PC features for one unit
Inputs:
-------
these_pc_features : numpy.ndarray (float)
Array of pre-computed PC features (num_spikes x num_PCs x num_channels)
index_mask : numpy.ndarray (boolean)
Mask for spike index dimension of pc_features array
channel_mask : numpy.ndarray (boolean)
Mask for channel index dimension of pc_features array
Output:
-------
unit_PCs : numpy.ndarray (float)
PCs for one unit (num_spikes x num_PCs x num_channels)
"""
unit_PCs = these_pc_features[index_mask, :, :]
unit_PCs = unit_PCs[:, :, channel_mask]
return unit_PCs
def find_neighboring_channels(peak_channel, channel_list, num_channels_to_compare, channel_locations):
"""
Finds k nearest channels to the peak channel of a unit
Parameters
----------
peak_channel: int
ID of channel with largest waveform amplitude
channel_list: numpy.ndarray
IDs of channels being considered
num_channels_to_compare: int
Number of nearest channels to return
channel_locations: numpy.ndarray, (n_channels, 2)
x,y coordinates of the channels in channel_list
Returns
-------
neighboring_channels: array_like
id of k channels that neighbor peak channel (including the peak channel itself)
"""
# get peak channel location
channel_idx = list(channel_list).index(peak_channel)
peak_channel_location = channel_locations[channel_idx]
# compute pairwise distance
distances = [np.linalg.norm(peak_channel_location - loc) for loc in channel_locations]
# get k closest channels (+1 because distance 0 is peak_channel)
neighboring_channels_inds = np.argsort(distances)[:num_channels_to_compare]
neighboring_channels = channel_list[neighboring_channels_inds]
return neighboring_channels
| 2.515625
| 3
|
pdpbox/pdp_plot_utils.py
|
flinder/PDPbox
| 0
|
12541
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import copy
from .pdp_calc_utils import _sample_data, _find_onehot_actual, _find_closest
from sklearn.cluster import MiniBatchKMeans, KMeans
def _pdp_plot_title(n_grids, feature_name, ax, multi_flag, which_class, plot_params):
"""
Draw pdp plot title
:param n_grids: number of grids
:param feature_name: name of the feature
:param ax: axes to plot on
:param multi_flag: whether it is a subplot of a multi-classes plot
:param which_class: which class to plot
:param plot_params: values of plot parameters
"""
font_family = 'Arial'
title = 'PDP for %s' % feature_name
subtitle = "Number of unique grid points: %d" % n_grids
title_fontsize = 15
subtitle_fontsize = 12
if plot_params is not None:
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'title' in plot_params.keys():
title = plot_params['title']
if 'title_fontsize' in plot_params.keys():
title_fontsize = plot_params['title_fontsize']
if 'subtitle_fontsize' in plot_params.keys():
subtitle_fontsize = plot_params['subtitle_fontsize']
ax.set_facecolor('white')
if multi_flag:
ax.text(0, 0.7, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
ax.text(0, 0.45, "For Class %d" % which_class, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family)
ax.text(0, 0.25, subtitle, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family, color='grey')
else:
ax.text(0, 0.7, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
ax.text(0, 0.4, subtitle, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family, color='grey')
ax.axis('off')
def _axes_modify(font_family, ax, top=False, right=False, legend=False):
# modify the axes
for tick in ax.get_xticklabels():
tick.set_fontname(font_family)
for tick in ax.get_yticklabels():
tick.set_fontname(font_family)
ax.set_facecolor('white')
ax.tick_params(axis='both', which='major', labelsize=10, labelcolor='#424242', colors='#9E9E9E')
for d in ['top', 'bottom', 'right', 'left']:
ax.spines[d].set_visible(False)
if not legend:
if top:
ax.get_xaxis().tick_top()
elif right:
ax.get_yaxis().tick_right()
else:
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.grid(True, 'major', 'x', ls='--', lw=.5, c='k', alpha=.3)
ax.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.3)
else:
ax.set_xticks([])
ax.set_yticks([])
def _pdp_plot(pdp_isolate_out, feature_name, center, plot_org_pts, plot_lines, frac_to_plot,
cluster, n_cluster_centers, cluster_method, x_quantile, ax, plot_params):
"""
Plot partial dependent plot
:param pdp_isolate_out: instance of pdp_isolate_obj
a calculated pdp_isolate_obj instance
:param feature_name: string
name of the feature, not necessary the same as the column name
:param center: boolean, default=True
whether to center the plot
:param plot_org_pts: boolean, default=False
whether to plot out the original points
:param plot_lines: boolean, default=False
whether to plot out the individual lines
:param frac_to_plot: float or integer, default=1
how many points or lines to plot, can be a integer or a float
:param cluster: boolean, default=False
whether to cluster the individual lines and only plot out the cluster centers
:param n_cluster_centers: integer, default=None
number of cluster centers
:param cluster_method: string, default=None
cluster method to use, default is KMeans, if 'approx' is passed, MiniBatchKMeans is used
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param ax: axes to plot on
:param plot_params: dict, default=None
values of plot parameters
"""
font_family = 'Arial'
xticks_rotation = 0
if plot_params is not None:
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'xticks_rotation' in plot_params.keys():
xticks_rotation = plot_params['xticks_rotation']
# modify axes
_axes_modify(font_family, ax)
ax.set_xlabel(feature_name, fontsize=10)
feature_type = pdp_isolate_out.feature_type
feature_grids = pdp_isolate_out.feature_grids
display_columns = pdp_isolate_out.display_columns
actual_columns = pdp_isolate_out.actual_columns
if feature_type == 'binary' or feature_type == 'onehot' or x_quantile:
x = range(len(feature_grids))
ax.set_xticks(x)
ax.set_xticklabels(display_columns, rotation=xticks_rotation)
else:
# for numeric feature
x = feature_grids
ice_lines = copy.deepcopy(pdp_isolate_out.ice_lines)
pdp_y = copy.deepcopy(pdp_isolate_out.pdp)
# whether to fill between std upper and lower
# whether to highlight pdp line
std_fill = True
pdp_hl = False
# whether to center the plot
if center:
pdp_y -= pdp_y[0]
for col in feature_grids[1:]:
ice_lines[col] -= ice_lines[feature_grids[0]]
ice_lines['actual_preds'] -= ice_lines[feature_grids[0]]
ice_lines[feature_grids[0]] = 0
if cluster or plot_lines:
std_fill = False
pdp_hl = True
if cluster:
_ice_cluster_plot(x=x, ice_lines=ice_lines, feature_grids=feature_grids, n_cluster_centers=n_cluster_centers,
cluster_method=cluster_method, ax=ax, plot_params=plot_params)
else:
ice_plot_data = _sample_data(ice_lines=ice_lines, frac_to_plot=frac_to_plot)
_ice_line_plot(x=x, ice_plot_data=ice_plot_data, feature_grids=feature_grids, ax=ax, plot_params=plot_params)
if plot_org_pts:
ice_lines_temp = ice_lines.copy()
if feature_type == 'onehot':
ice_lines_temp['x'] = ice_lines_temp[actual_columns].apply(lambda x: _find_onehot_actual(x), axis=1)
ice_lines_temp = ice_lines_temp[~ice_lines_temp['x'].isnull()].reset_index(drop=True)
elif feature_type == 'numeric':
feature_grids = pdp_isolate_out.feature_grids
ice_lines_temp = ice_lines_temp[(ice_lines_temp[actual_columns[0]] >= feature_grids[0])
& (ice_lines_temp[actual_columns[0]] <= feature_grids[-1])]
if x_quantile:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]].apply(lambda x: _find_closest(x, feature_grids))
else:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]]
else:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]]
ice_plot_data_pts = _sample_data(ice_lines=ice_lines_temp, frac_to_plot=frac_to_plot)
_ice_plot_pts(ice_plot_data_pts=ice_plot_data_pts, ax=ax, plot_params=plot_params)
std = ice_lines[feature_grids].std().values
_pdp_std_plot(x=x, y=pdp_y, std=std, std_fill=std_fill, pdp_hl=pdp_hl, ax=ax, plot_params=plot_params)
def _pdp_std_plot(x, y, std, std_fill, pdp_hl, ax, plot_params):
"""
PDP basic plot
:param x: x axis values
:param y: pdp values
:param std: std values
:param std_fill: whether to fill between std upper and lower
:param pdp_hl: whether to highlight pdp line
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
upper = y + std
lower = y - std
pdp_color = '#1A4E5D'
pdp_hl_color = '#FEDC00'
pdp_linewidth = 2
zero_color = '#E75438'
zero_linewidth = 1.5
fill_color = '#66C2D7'
fill_alpha = 0.2
markersize = 5
if plot_params is not None:
if 'pdp_color' in plot_params.keys():
pdp_color = plot_params['pdp_color']
if 'pdp_hl_color' in plot_params.keys():
pdp_hl_color = plot_params['pdp_hl_color']
if 'pdp_linewidth' in plot_params.keys():
pdp_linewidth = plot_params['pdp_linewidth']
if 'zero_color' in plot_params.keys():
zero_color = plot_params['zero_color']
if 'zero_linewidth' in plot_params.keys():
zero_linewidth = plot_params['zero_linewidth']
if 'fill_color' in plot_params.keys():
fill_color = plot_params['fill_color']
if 'fill_alpha' in plot_params.keys():
fill_alpha = plot_params['fill_alpha']
if 'markersize' in plot_params.keys():
markersize = plot_params['markersize']
if pdp_hl:
ax.plot(x, y, color=pdp_hl_color, linewidth=pdp_linewidth * 3, alpha=0.8)
ax.plot(x, y, color=pdp_color, linewidth=pdp_linewidth, marker='o', markersize=markersize)
ax.plot(x, [0] * y, linestyle='--', linewidth=zero_linewidth, color=zero_color)
if std_fill:
ax.fill_between(x, upper, lower, alpha=fill_alpha, color=fill_color)
ax.set_ylim(np.min([np.min(lower) * 2, 0]), np.max([np.max(upper) * 2, 0]))
def _ice_plot_pts(ice_plot_data_pts, ax, plot_params):
"""
Plot the real data points
:param ice_plot_data_pts: data points to plot
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
point_size = 50
point_pos_color = '#5BB573'
point_neg_color = '#E75438'
if plot_params is not None:
if 'point_size' in plot_params.keys():
point_size = plot_params['point_size']
if 'point_pos_color' in plot_params.keys():
point_pos_color = plot_params['point_pos_color']
if 'point_neg_color' in plot_params.keys():
point_neg_color = plot_params['point_neg_color']
ice_plot_data_pts['color'] = ice_plot_data_pts['actual_preds'].apply(lambda x: point_pos_color if x >= 0 else point_neg_color)
ax.scatter(ice_plot_data_pts['x'], ice_plot_data_pts['actual_preds'], s=point_size, marker="+", linewidth=1,
color=ice_plot_data_pts['color'])
def _ice_line_plot(x, ice_plot_data, feature_grids, ax, plot_params):
"""
Plot the ice lines
:param x: x axis values
:param ice_plot_data: ice lines to plot
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
linewidth = np.max([1.0 / np.log10(ice_plot_data.shape[0]), 0.3])
linealpha = np.max([1.0 / np.log10(ice_plot_data.shape[0]), 0.3])
line_cmap = 'Blues'
if plot_params is not None:
if 'line_cmap' in plot_params.keys():
line_cmap = plot_params['line_cmap']
colors = plt.get_cmap(line_cmap)(np.linspace(0, 1, 20))[5:15]
for i in range(len(ice_plot_data)):
y = list(ice_plot_data[feature_grids].iloc[i].values)
ax.plot(x, y, linewidth=linewidth, c=colors[i % 10], alpha=linealpha)
def _ice_cluster_plot(x, ice_lines, feature_grids, n_cluster_centers, cluster_method, ax, plot_params):
"""
Cluster the ice lines and plot out the cluster centers
:param x: x axis values
:param ice_lines: ice lines
:param n_cluster_centers: number of cluster centers
:param cluster_method: cluster method
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
if cluster_method == 'approx':
kmeans = MiniBatchKMeans(n_clusters=n_cluster_centers, random_state=0, verbose=0)
else:
kmeans = KMeans(n_clusters=n_cluster_centers, random_state=0, n_jobs=1)
kmeans.fit(ice_lines[feature_grids])
cluster_plot_data = pd.DataFrame(kmeans.cluster_centers_, columns=feature_grids)
cluster_cmap = 'Blues'
if plot_params is not None:
if 'cluster_cmap' in plot_params.keys():
cluster_cmap = plot_params['cluster_cmap']
colors = plt.get_cmap(cluster_cmap)(np.linspace(0, 1, 20))[5:15]
for i in range(len(cluster_plot_data)):
y = list(cluster_plot_data[feature_grids].iloc[i].values)
ax.plot(x, y, linewidth=1, c=colors[i % 10])
def _pdp_interact_plot_title(pdp_interact_out, feature_names, ax,
multi_flag, which_class, only_inter, plot_params):
"""
Draw pdp interaction plot title
:param pdp_interact_out: instance of pdp_interact_obj
:param feature_name: name of the features
:param ax: axes to plot on
:param figsize: figure size
:param multi_flag: whether it is a subplot of a multi-classes plot
:param which_class: which class to plot
:param only_inter: whether only draw interaction plot
:param plot_params: values of plot parameters
"""
font_family = 'Arial'
title = 'Interaction PDP between %s and %s' % (feature_names[0], feature_names[1])
title_fontsize = 14
subtitle_fontsize = 12
if type(pdp_interact_out) == dict:
subtitle1 = 'Number of unique grid points of %s: %d' % (
feature_names[0], len(pdp_interact_out['class_0'].feature_grids[0]))
subtitle2 = 'Number of unique grid points of %s: %d' % (
feature_names[1], len(pdp_interact_out['class_0'].feature_grids[1]))
else:
subtitle1 = 'Number of unique grid points of %s: %d' % (
feature_names[0], len(pdp_interact_out.feature_grids[0]))
subtitle2 = 'Number of unique grid points of %s: %d' % (
feature_names[1], len(pdp_interact_out.feature_grids[1]))
if plot_params is not None:
if 'pdp_inter' in plot_params.keys():
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'title' in plot_params.keys():
title = plot_params['title']
if 'title_fontsize' in plot_params.keys():
title_fontsize = plot_params['title_fontsize']
if 'subtitle_fontsize' in plot_params.keys():
subtitle_fontsize = plot_params['subtitle_fontsize']
ax.set_facecolor('white')
if only_inter:
ax.text(0, 0.8, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
if multi_flag:
ax.text(0, 0.62, "For Class %d" % which_class, va="top", ha="left", fontsize=title_fontsize,
fontname=font_family)
ax.text(0, 0.45, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.3, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.55, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.4, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.6, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
if multi_flag:
ax.text(0, 0.53, "For Class %d" % which_class, va="top", ha="left", fontsize=title_fontsize,
fontname=font_family)
ax.text(0, 0.4, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.35, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.4, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.35, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.axis('off')
def _pdp_interact_plot(pdp_interact_out, feature_names, center, plot_org_pts, plot_lines, frac_to_plot, cluster,
n_cluster_centers, cluster_method, x_quantile, figsize, plot_params, multi_flag, which_class):
"""
Plot interaction plot
:param pdp_interact_out: instance of pdp_interact_obj
a calculated pdp_interact_obj instance
:param feature_names: list of feature names
:param center: boolean, default=True
whether to center the plot
:param plot_org_pts: boolean, default=False
whether to plot out the original points
:param plot_lines: boolean, default=False
whether to plot out the individual lines
:param frac_to_plot: float or integer, default=1
how many points or lines to plot, can be a integer or a float
:param cluster: boolean, default=False
whether to cluster the individual lines and only plot out the cluster centers
:param n_cluster_centers: integer, default=None
number of cluster centers
:param cluster_method: string, default=None
cluster method to use, default is KMeans, if 'approx' is passed, MiniBatchKMeans is used
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param figsize: figure size
:param plot_params: dict, default=None
values of plot parameters
:param multi_flag: boolean, default=False
whether it is a subplot of a multi-class plot
:param which_class: integer, default=None
must not be None under multi-class mode
"""
if figsize is None:
fig = plt.figure(figsize=(15, 15))
else:
fig = plt.figure(figsize=figsize)
pdp_plot_params = None
if plot_params is not None:
if 'pdp' in plot_params.keys():
pdp_plot_params = plot_params['pdp']
gs = GridSpec(2, 2)
ax0 = plt.subplot(gs[0, 0])
_pdp_interact_plot_title(pdp_interact_out=pdp_interact_out, feature_names=feature_names, ax=ax0,
multi_flag=multi_flag, which_class=which_class, only_inter=False, plot_params=plot_params)
ax1 = plt.subplot(gs[0, 1])
_pdp_plot(pdp_isolate_out=pdp_interact_out.pdp_isolate_out1, feature_name=feature_names[0], center=center,
plot_org_pts=plot_org_pts, plot_lines=plot_lines, frac_to_plot=frac_to_plot, cluster=cluster,
n_cluster_centers=n_cluster_centers, cluster_method=cluster_method, x_quantile=x_quantile,
ax=ax1, plot_params=pdp_plot_params)
ax2 = plt.subplot(gs[1, 0])
_pdp_plot(pdp_isolate_out=pdp_interact_out.pdp_isolate_out2, feature_name=feature_names[1], center=center,
plot_org_pts=plot_org_pts, plot_lines=plot_lines, frac_to_plot=frac_to_plot, cluster=cluster,
n_cluster_centers=n_cluster_centers, cluster_method=cluster_method, x_quantile=x_quantile, ax=ax2,
plot_params=pdp_plot_params)
ax3 = plt.subplot(gs[1, 1])
_pdp_contour_plot(pdp_interact_out=pdp_interact_out, feature_names=feature_names, x_quantile=x_quantile,
ax=ax3, fig=fig, plot_params=plot_params)
class ColorBarLocator(object):
def __init__(self, pax, pad=60, width=20):
self.pax = pax
self.pad = pad
self.width = width
def __call__(self, ax, renderer):
x, y, w, h = self.pax.get_position().bounds
fig = self.pax.get_figure()
inv_trans = fig.transFigure.inverted()
pad, _ = inv_trans.transform([self.pad, 0])
width, _ = inv_trans.transform([self.width, 0])
return [x, y - pad, w, width]
def _pdp_contour_plot(pdp_interact_out, feature_names, x_quantile, ax, fig, plot_params):
"""
Plot PDP contour
:param pdp_interact_out: instance of pdp_interact_obj
a calculated pdp_interact_obj instance
:param feature_names: list of feature names
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param ax: axes to plot on
:param fig: plt figure
:param plot_params: dict, default=None
values of plot parameters
"""
font_family = 'Arial'
contour_color = 'white'
contour_cmap = 'viridis'
xticks_rotation = 0
if plot_params is not None:
if 'pdp_inter' in plot_params.keys():
if 'contour_color' in plot_params['pdp_inter'].keys():
contour_color = plot_params['pdp_inter']['contour_color']
if 'contour_cmap' in plot_params['pdp_inter'].keys():
contour_cmap = plot_params['pdp_inter']['contour_cmap']
if 'font_family' in plot_params['pdp_inter'].keys():
font_family = plot_params['pdp_inter']['font_family']
if 'xticks_rotation' in plot_params.keys():
xticks_rotation = plot_params['xticks_rotation']
_axes_modify(font_family, ax)
feature_types = pdp_interact_out.feature_types
pdp = copy.deepcopy(pdp_interact_out.pdp)
new_feature_names = []
for i, feature_type in enumerate(feature_types):
if feature_type == 'onehot':
new_col = 'onehot_%d' % (i)
pdp[new_col] = pdp.apply(lambda x: list(x[pdp_interact_out.features[i]]).index(1), axis=1)
new_feature_names.append(new_col)
else:
new_feature_names.append(pdp_interact_out.features[i])
if (feature_types[0] == 'numeric') and x_quantile:
pdp[new_feature_names[0]] = pdp[new_feature_names[0]].apply(
lambda x: list(pdp_interact_out.feature_grids[0]).index(x))
if (feature_types[1] == 'numeric') and x_quantile:
pdp[new_feature_names[1]] = pdp[new_feature_names[1]].apply(
lambda x: list(pdp_interact_out.feature_grids[1]).index(x))
X, Y = np.meshgrid(pdp[new_feature_names[0]].unique(), pdp[new_feature_names[1]].unique())
Z = []
for i in range(X.shape[0]):
zs = []
for j in range(X.shape[1]):
x = X[i, j]
y = Y[i, j]
z = pdp[(pdp[new_feature_names[0]] == x) & (pdp[new_feature_names[1]] == y)]['preds'].values[0]
zs.append(z)
Z.append(zs)
Z = np.array(Z)
if feature_types[0] == 'onehot':
ax.set_xticks(range(X.shape[1]))
ax.set_xticklabels(pdp_interact_out.pdp_isolate_out1.display_columns, rotation=xticks_rotation)
elif feature_types[0] == 'binary':
ax.set_xticks([0, 1])
ax.set_xticklabels(pdp_interact_out.pdp_isolate_out1.display_columns, rotation=xticks_rotation)
else:
if x_quantile:
ax.set_xticks(range(len(pdp_interact_out.feature_grids[0])))
ax.set_xticklabels(pdp_interact_out.feature_grids[0], rotation=xticks_rotation)
if feature_types[1] == 'onehot':
ax.set_yticks(range(Y.shape[0]))
ax.set_yticklabels(pdp_interact_out.pdp_isolate_out2.display_columns)
elif feature_types[1] == 'binary':
ax.set_yticks([0, 1])
ax.set_yticklabels(pdp_interact_out.pdp_isolate_out2.display_columns)
else:
if x_quantile:
ax.set_yticks(range(len(pdp_interact_out.feature_grids[1])))
ax.set_yticklabels(pdp_interact_out.feature_grids[1])
level = np.min([X.shape[0], X.shape[1]])
c1 = ax.contourf(X, Y, Z, N=level, origin='lower', cmap=contour_cmap)
c2 = ax.contour(c1, levels=c1.levels, colors=contour_color, origin='lower')
ax.clabel(c2, contour_label_fontsize=9, inline=1)
ax.set_xlabel(feature_names[0], fontsize=10)
ax.set_ylabel(feature_names[1], fontsize=10)
ax.get_yaxis().tick_right()
if fig is not None:
cax = fig.add_axes([0, 0, 0, 0], axes_locator=ColorBarLocator(ax))
fig.colorbar(c1, cax=cax, orientation='horizontal')
| 2.671875
| 3
|
tests/basic_step_tests.py
|
kodexa-ai/kodexa
| 1
|
12542
|
import os
import pytest
from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer
def get_test_directory():
return os.path.dirname(os.path.abspath(__file__)) + "/../test_documents/"
@pytest.mark.skip
def test_html_rollup():
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news.kdxa'), 'rb').read())
# before rollup
assert document.select('//a')[0].content == 'HSBC'
assert document.select('//a')[1].content == 'Hang Seng Index'
assert len(document.select('//*[contentRegex(".*Hang Seng Index.*")]')[0].get_content_parts()) == 1
# Collapse out all the <a> tags
step = RollupTransformer(collapse_type_res=["a"])
step.process(document)
# after rollup
assert len(document.select('//a')) == 0
# see where the href rolled up
assert document.select('//*[contentRegex(".*Hang Seng Index.*")]')[0].get_all_content() == 'The London-headquartered bank is a heavyweight component of the Hang Seng Index . HSBC shares in Hong Kong closed 2.78% lower.'
assert len(document.select('//*[contentRegex(".*Hang Seng Index.*")]')[0].get_content_parts()) == 3
def test_tag_key_value():
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store')
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 45
assert context.get_store('test_store').rows[14][0] == 'LOC'
assert context.get_store('test_store').rows[14][1] == 'Europe'
def test_tag_key_value_include_exclude():
# Testing include parameter
include_tags = ['DATE', 'LOC']
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store', include=include_tags)
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 11
# Testing exclude parameter
exclude_tags = ['DATE', 'LOC']
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store', exclude=exclude_tags)
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 34
# Testing both include and exclude parameters
include_tags = ['LOC']
exclude_tags = ['DATE']
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store', include=include_tags, exclude=exclude_tags)
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 5
# Testing both include - this should be the same as before as 'exclude' shouldn't have really done anything
include_tags = ['LOC']
document = Document.from_msgpack(open(os.path.join(get_test_directory(), 'news-tagged.kdxa'), 'rb').read())
step = TagsToKeyValuePairExtractor(store_name='test_store', include=include_tags)
context = PipelineContext()
step.process(document, context)
assert context.get_store('test_store').count() == 5
@pytest.mark.skip
def test_rollup_of_pdf():
# first test - collapsing words and lines up to their common parent
test_doc = Document.from_kdxa(get_test_directory() + '20200709.kdxa')
# how many pre-rollup lines?
assert len(test_doc.select('//line')) == 3824
# how many pre-rollup words?
assert len(test_doc.select('//word')) == 52903
# how many pre-rollup content-areas?
assert len(test_doc.select('//content-area')) == 817
# what is the pre-rollup length of ALL the content in the document?
assert len(test_doc.get_root().get_all_content()) == 329792
rollup_pipeline = Pipeline(test_doc)
rollup_pipeline.add_step(RollupTransformer(collapse_type_res=["word", "line"], separator_character=' '))
rollup_pipeline.run()
collapsed_doc = rollup_pipeline.context.output_document
# how many post-rollup lines?
assert len(test_doc.select('//line')) == 0
# how many post-rollup words?
assert len(test_doc.select('//word')) == 0
# how many post-rollup content-areas?
assert len(test_doc.select('//content-area')) == 817
# what is the post-rollup length of ALL the content in the document?
assert len(test_doc.get_root().get_all_content()) == 329792
assert len(collapsed_doc.select("//content-area")[12].get_all_content()) == 235
# second test - just collapse the line up to its parent (content-area) - roll up the line's children
test_doc = Document.from_kdxa(get_test_directory() + '20200709.kdxa')
rollup_pipeline = Pipeline(test_doc)
rollup_pipeline.add_step(
RollupTransformer(collapse_type_res=["line"], separator_character=' ', get_all_content=True))
rollup_pipeline.run()
collapsed_doc = rollup_pipeline.context.output_document
# how many post-rollup lines?
assert len(test_doc.select('//line')) == 0
# how many post-rollup words?
assert len(test_doc.select('//word')) == 0
# how many post-rollup content-areas?
assert len(test_doc.select('//content-area')) == 817
# what is the post-rollup length of ALL the content in the document?
assert len(test_doc.get_root().get_all_content()) == 329792
# verify that we can collapse line nodes AND include their children
assert len(collapsed_doc.select("//content-area")[12].get_all_content()) == 235
# third test - select specific nodes in which we'll do the roll ups
test_doc = Document.from_kdxa(get_test_directory() + '20200709.kdxa')
node_selector = "//content-area[contentRegex('.*LOAN AGREEMENT.*', true)]"
# verify we have 3 nodes match this selector
node_matches = test_doc.select(node_selector)
assert len(node_matches) == 3
# before we rollup, let's make sure the matching nodes conform to known expectations
assert len(node_matches[0].select('//word')) == 2
assert len(node_matches[0].select('//line')) == 1
assert len(node_matches[0].select('//content-area')) == 1
assert len(node_matches[0].get_all_content()) == 14
assert len(node_matches[1].select('//word')) == 2
assert len(node_matches[1].select('//line')) == 1
assert len(node_matches[1].select('//content-area')) == 1
assert len(node_matches[1].get_all_content()) == 14
assert len(node_matches[2].select('//word')) == 71
assert len(node_matches[2].select('//line')) == 6
assert len(node_matches[2].select('//content-area')) == 1
assert len(node_matches[2].get_all_content()) == 500
rollup_pipeline = Pipeline(test_doc)
rollup_pipeline.add_step(RollupTransformer(selector="//content-area[contentRegex('.*LOAN AGREEMENT.*', true)]",
collapse_type_res=["line"], separator_character=' ',
get_all_content=True))
rollup_pipeline.run()
collapsed_doc = rollup_pipeline.context.output_document
# check those matching nodes - we shouldn't have any words or lines, but
# all other node_types should exist and the content should stay the same.
assert len(node_matches[0].select('//word')) == 0
assert len(node_matches[0].select('//line')) == 0
assert len(node_matches[0].select('//content-area')) == 1
assert len(node_matches[0].get_all_content()) == 14
assert len(node_matches[1].select('//word')) == 0
assert len(node_matches[1].select('//line')) == 0
assert len(node_matches[1].select('//content-area')) == 1
assert len(node_matches[1].get_all_content()) == 14
assert len(node_matches[2].select('//word')) == 0
assert len(node_matches[2].select('//line')) == 0
assert len(node_matches[2].select('//content-area')) == 1
assert len(node_matches[2].get_all_content()) == 500
# how many post-rollup lines? (still have some lines, but fewer than we started with)
assert len(test_doc.select('//line')) == 3816
# how many post-rollup words? (still have some words, but fewer than we started with)
assert len(test_doc.select('//word')) == 52828
# how many post-rollup content-areas? (same number of content-areas)
assert len(test_doc.select('//content-area')) == 817
# what is the post-rollup length of ALL the content in the document?
assert len(test_doc.get_root().get_all_content()) == 329792
# verify that we can collapse line nodes AND include their children
assert len(collapsed_doc.select("//content-area")[12].get_all_content()) == 235
| 2.171875
| 2
|
dftimewolf/lib/containers/__init__.py
|
fooris/dftimewolf
| 1
|
12543
|
<reponame>fooris/dftimewolf<filename>dftimewolf/lib/containers/__init__.py
"""Make containers available here."""
from .report import Report
from .threat_intelligence import ThreatIntelligence
from .stackdriver import StackdriverLogs
| 1.210938
| 1
|
egs/skl_historical_poly_regression_variable_window_overmqtt/client_mqtt_random.py
|
COMEA-TUAS/mcx-public
| 0
|
12544
|
<reponame>COMEA-TUAS/mcx-public
#!/usr/bin/env python3
"""Script for simulating IOT measurement stream to ModelConductor experiment."""
import pandas as pd
import numpy as np
import sqlalchemy as sqla
from datetime import datetime as dt
from time import sleep, time
import logging
import sys, os, asyncio
from hbmqtt.client import MQTTClient, ConnectException
from hbmqtt.version import get_version
from docopt import docopt
from hbmqtt.utils import read_yaml_config
from hbmqtt.mqtt.constants import QOS_0, QOS_1, QOS_2
logger = logging.getLogger(__name__)
formatter = "[%(asctime)s] :: %(levelname)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=formatter)
csv_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'experiment_2019-10-03_20-37-36.csv')
data = np.random.rand(100, 4)
data = np.insert(data, 0, np.arange(100), axis=1)
data = pd.DataFrame(data, columns =['time', 'A', 'B', 'C', 'D'])
BROKER_URL = "mqtt://localhost:1883"
def main():
if sys.version_info[:2] < (3, 4):
logger.fatal("Error: Python 3.4+ is required")
sys.exit(-1)
config = None
config = read_yaml_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'default_client.yaml'))
logger.debug("Using default configuration")
loop = asyncio.get_event_loop()
client_id = "mqtt_publisher_exp"
client = MQTTClient(client_id=client_id, config=config, loop=loop)
try:
logger.info("%s Connecting to broker" % client.client_id)
loop.run_until_complete(client.connect(uri=BROKER_URL))
qos = QOS_1
topic = "topic_1"
for _, row in data.iterrows():
row['TIMING_client_request_timestamp'] = time()
message = row.to_json().encode(encoding='utf-8')
logger.info("%s Publishing to '%s'" % (client.client_id, topic))
loop.run_until_complete(client.publish(topic, message, qos))
sleep(0.1)
except KeyboardInterrupt:
loop.run_until_complete(client.disconnect())
logger.info("%s Disconnected from broker" % client.client_id)
except ConnectException as ce:
logger.fatal("connection to '%s' failed: %r" % (BROKER_URL, ce))
except asyncio.CancelledError as cae:
logger.fatal("Publish canceled due to prvious error")
if __name__ == "__main__":
main()
| 2.078125
| 2
|
5/challenge2.py
|
roryeiffe/Adent-of-Code
| 0
|
12545
|
<reponame>roryeiffe/Adent-of-Code
import sys
import math
L = []
f = open(sys.argv[1],"r")
for item in f:
L.append(item.strip())
def find_id(sequence):
rows = sequence[:7]
seats = sequence[7:]
upper = 127
lower = 0
for letter in rows:
half = math.ceil((upper-lower)/2)
if letter == "F":
upper -= half
if letter == "B":
lower += half
row = upper
lower = 0
upper = 7
for letter in seats:
half = math.ceil((upper-lower)/2)
if letter == "L":
upper -= half
if letter == "R":
lower += half
seat = lower
return 8*row+seat
ids = []
max_id = 0
for sequence in L:
id = find_id(sequence)
ids.append(id)
if id > max_id:
max_id = id
ids.sort()
old = 35
for id in ids:
print(id)
old = id
| 3.09375
| 3
|
Injector/injector.py
|
MateusGabi/Binary-Hacking-on-Super-Mario
| 1
|
12546
|
# -*- coding: utf-8 -*-
"""
Injector.
A partir de um arquivo binario, de uma tabela binaria gerada com o Finder,
e um arquivo de substituição, o Injector é capaz de injetar um texto
no binario trocando o texto in-game
O Injector faz automaticamente a adequação do tamanho do texto ao tamanho da caixa,
truncando se maior e colocando corretamente as quebras de linha
@author <NAME>
"""
from __future__ import print_function
import os
import sys
import binascii
import pickle
class Injector:
def __init__(self, sfc, tbl, substituto):
self.sfc = sfc
self.tbl = tbl
self.substituto = substituto
self.bytefile = None
self.dictionary = None
self.inv_dictionary = None
self.offset = 0
"""
pega o arquivo e retorna seus bytes em um array de bytes
"""
def fileToByteArray(self):
with open(self.sfc, 'rb') as f:
hexdata = binascii.hexlify(f.read())
self.bytefile = map(''.join, zip(hexdata[::2], hexdata[1::2]))
"""
Lê a tabela binaria de conversao
"""
def readBinaryTbl(self):
with open(self.tbl, 'rb') as btblobj:
self.dictionary = pickle.Unpickler(btblobj).load()
self.offset = self.dictionary["offset"]
del self.dictionary["offset"]
self.inv_dictionary = {v: k for k, v in self.dictionary.items()}
def inject(self):
_txt = []
char_count = 0
with open(self.substituto, "r") as _txtfile:
_txt = _txtfile.read().replace('\n', '')
for numero_linha in xrange(1, 9):
for numero_coluna in xrange(1, 18):
try:
self.bytefile[self.offset] = self.inv_dictionary[_txt[char_count]]
if numero_coluna is 18:
self.bytefile[self.offset] = self.inv_dictionary[_txt[char_count]+"\n"]
except IndexError:
pass
char_count = char_count + 1
self.offset = self.offset + 1
# with open(self.sfc.replace(".sfc", ".modified.sfc"), "wb") as sfc_file:
sfc_file = open(self.sfc.replace(".sfc", ".modified.sfc"), "wb")
for byte in self.bytefile:
sfc_file.write(
binascii.unhexlify(byte)
)
"""
Entry-point da classe
"""
def run(self):
self.fileToByteArray()
self.readBinaryTbl()
self.inject()
if __name__ == '__main__':
if len(sys.argv) != 4:
print("Use: python extractor.py [sfc] [tbl] [substituto]")
sys.exit(1)
sfc = sys.argv[1]
tbl = sys.argv[2]
substituto = sys.argv[3]
if os.path.exists(sfc) and os.path.isfile(tbl):
inj = Injector(sfc, tbl, substituto)
inj.run()
| 3.046875
| 3
|
var/spack/repos/scs_io/packages/cudnn/package.py
|
scs-lab/spack
| 0
|
12547
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Cudnn(Package):
"""NVIDIA cuDNN is a GPU-accelerated library of primitives for deep
neural networks"""
homepage = "https://developer.nvidia.com/cudnn"
# Latest versions available at:
# https://developer.nvidia.com/rdp/cudnn-download
# Archived versions available at:
# https://developer.nvidia.com/rdp/cudnn-archive
# Note that download links don't work from command line,
# need to use modified URLs like in url_for_version.
maintainers = ['adamjstewart']
# cuDNN 8.0.2
version('8.0.2.39-11.0-linux-x64',
sha256='672f46288b8edd98f8d156a4f1ff518201ca6de0cff67915ceaa37f6d6d86345')
version('8.0.2.39-11.0-linux-ppc64le',
sha256='b7c1ce5b1191eb007ba3455ea5f497fdce293a646545d8a6ed93e9bb06d7f057')
version('8.0.2.39-10.2-linux-x64',
sha256='c9cbe5c211360f3cfbc0fb104f0e9096b37e53f89392525679f049276b2f701f')
version('8.0.2.39-10.2-linux-ppc64le',
sha256='c32325ff84a8123491f2e58b3694885a9a672005bc21764b38874688c0e43262')
version('8.0.2.39-10.1-linux-x64',
sha256='82148a68bd6bdaab93af5e05bb1842b8ccb3ab7de7bed41f609a7616c102213d')
version('8.0.2.39-10.1-linux-ppc64le',
sha256='8196ec4f031356317baeccefbc4f61c8fccb2cf0bdef0a6431438918ddf68fb9')
# cuDNN 8.0
version('8.0.0.180-11.0-linux-x64',
sha256='9e75ea70280a77de815e0bdc85d08b67e081bc99a708b574092142344d2ba07e')
version('8.0.0.180-11.0-linux-ppc64le',
sha256='1229e94731bbca63ee7f5a239f4e1838a51a301d896f3097fbf7377d74704060')
version('8.0.0.180-10.2-linux-x64',
sha256='0c87c12358ee2b99d57c2a8c7560e3bb93e54bb929f5f8bec4964a72a2bb261d')
version('8.0.0.180-10.2-linux-ppc64le',
sha256='59e4ad6db15fcc374976e8052fe39e3f30f34079710fb3c7751a64c853d9243f')
# cuDNN 7.6.5
version('7.6.5.32-10.2-linux-x64',
sha256='600267f2caaed2fd58eb214ba669d8ea35f396a7d19b94822e6b36f9f7088c20',
preferred=True)
version('7.6.5.32-10.2-linux-ppc64le',
sha256='7dc08b6ab9331bfd12207d4802c61db1ad7cace7395b67a6e7b16efa0335668b')
version('7.6.5.32-10.1-linux-x64',
sha256='7eaec8039a2c30ab0bc758d303588767693def6bf49b22485a2c00bf2e136cb3')
version('7.6.5.32-10.1-osx-x64',
sha256='8ecce28a5ed388a2b9b2d239e08d7c550f53b79288e6d9e5eb4c152bfc711aff')
version('7.6.5.32-10.1-linux-ppc64le',
sha256='97b2faf73eedfc128f2f5762784d21467a95b2d5ba719825419c058f427cbf56')
version('7.6.5.32-10.0-linux-x64',
sha256='28355e395f0b2b93ac2c83b61360b35ba6cd0377e44e78be197b6b61b4b492ba')
version('7.6.5.32-10.0-osx-x64',
sha256='6fa0b819374da49102e285ecf7fcb8879df4d0b3cc430cc8b781cdeb41009b47')
version('7.6.5.32-10.0-linux-ppc64le',
sha256='b1717f4570083bbfc6b8b59f280bae4e4197cc1cb50e9d873c05adf670084c5b')
version('7.6.5.32-9.2-linux-x64',
sha256='a2a2c7a8ba7b16d323b651766ee37dcfdbc2b50d920f73f8fde85005424960e4')
version('7.6.5.32-9.2-linux-ppc64le',
sha256='a11f44f9a827b7e69f527a9d260f1637694ff7c1674a3e46bd9ec054a08f9a76')
version('7.6.5.32-9.0-linux-x64',
sha256='bd0a4c0090d5b02feec3f195738968690cc2470b9bc6026e6fe8ff245cd261c8')
# cuDNN 7.6.4
version('7.6.4.38-10.1-linux-x64',
sha256='32091d115c0373027418620a09ebec3658a6bc467d011de7cdd0eb07d644b099')
version('7.6.4.38-10.1-osx-x64',
sha256='bfced062c3689ced2c1fb49c7d5052e6bc3da6974c1eb707e4dcf8cd209d4236')
version('7.6.4.38-10.1-linux-ppc64le',
sha256='f3615fea50986a4dfd05d7a0cf83396dfdceefa9c209e8bf9691e20a48e420ce')
version('7.6.4.38-10.0-linux-x64',
sha256='417bb5daf51377037eb2f5c87649000ca1b9cec0acb16cfe07cb1d3e9a961dbf')
version('7.6.4.38-10.0-osx-x64',
sha256='af01ab841caec25087776a6b8fc7782883da12e590e24825ad1031f9ae0ed4b1')
version('7.6.4.38-10.0-linux-ppc64le',
sha256='c1725ad6bd7d7741e080a1e6da4b62eac027a94ac55c606cce261e3f829400bb')
version('7.6.4.38-9.2-linux-x64',
sha256='c79156531e641289b6a6952888b9637059ef30defd43c3cf82acf38d67f60a27')
version('7.6.4.38-9.2-linux-ppc64le',
sha256='98d8aae2dcd851558397a9a30b73242f257e1556be17c83650e63a0685969884')
version('7.6.4.38-9.0-linux-x64',
sha256='8db78c3623c192d4f03f3087b41c32cb0baac95e13408b5d9dabe626cb4aab5d')
# cuDNN 7.6.3
version('7.6.3.30-10.1-linux-x64',
sha256='352557346d8111e2f954c494be1a90207103d316b8777c33e62b3a7f7b708961')
version('7.6.3.30-10.1-linux-ppc64le',
sha256='f274735a8fc31923d3623b1c3d2b1d0d35bb176687077c6a4d4353c6b900d8ee')
# cuDNN 7.5.1
version('7.5.1.10-10.1-linux-x64',
sha256='2c833f43c9147d9a25a20947a4c5a5f5c33b2443240fd767f63b330c482e68e0')
version('7.5.1.10-10.1-linux-ppc64le',
sha256='a9e23bc83c970daec20874ccd1d8d80b648adf15440ecd0164818b330b1e2663')
version('7.5.1.10-10.0-linux-x64',
sha256='c0a4ec438920aa581dd567117b9c316745b4a451ac739b1e04939a3d8b229985')
version('7.5.1.10-10.0-linux-ppc64le',
sha256='d9205718da5fbab85433476f9ff61fcf4b889d216d6eea26753bbc24d115dd70')
# cuDNN 7.5.0
version('7.5.0.56-10.1-linux-x64',
sha256='c31697d6b71afe62838ad2e57da3c3c9419c4e9f5635d14b683ebe63f904fbc8')
version('7.5.0.56-10.1-linux-ppc64le',
sha256='15415eb714ab86ab6c7531f2cac6474b5dafd989479b062776c670b190e43638')
version('7.5.0.56-10.0-linux-x64',
sha256='701097882cb745d4683bb7ff6c33b8a35c7c81be31bac78f05bad130e7e0b781')
version('7.5.0.56-10.0-linux-ppc64le',
sha256='f0c1cbd9de553c8e2a3893915bd5fff57b30e368ef4c964d783b6a877869e93a')
# cuDNN 7.3.0
version('7.3.0.29-9.0-linux-x64',
sha256='403f9043ff2c7b2c5967454872275d07bca11fd41dfc7b21995eadcad6dbe49b')
# cuDNN 7.2.1
version('7.2.1.38-9.0-linux-x64',
sha256='cf007437b9ac6250ec63b89c25f248d2597fdd01369c80146567f78e75ce4e37')
# cuDNN 7.1.3
version('7.1.3-9.1-linux-x64',
sha256='dd616d3794167ceb923d706bf73e8d6acdda770751492b921ee6827cdf190228')
version('7.1.3-9.1-linux-ppc64le',
sha256='e3b4837f711b98a52faacc872a68b332c833917ef3cf87c0108f1d01af9b2931')
# cuDNN 6.0
version('6.0-8.0-linux-x64',
sha256='9b09110af48c9a4d7b6344eb4b3e344daa84987ed6177d5c44319732f3bb7f9c')
# cuDNN 5.1
version('5.1-8.0-linux-x64',
sha256='c10719b36f2dd6e9ddc63e3189affaa1a94d7d027e63b71c3f64d449ab0645ce')
# CUDA 10.2
depends_on('cuda@10.2.0:11.0.2', when='@7.6.5.32-10.2-linux-x64')
# CUDA 10.1
depends_on('cuda@10.1.0:11.0.2', when='@7.6.5.32-10.1-osx-x64')
depends_on('cuda@10.1.0:11.0.2', when='@7.6.5.32-10.1-linux-x64')
depends_on('cuda@10.1.0:11.0.2', when='@7.6.5.32-10.1-linux-ppc64le')
depends_on('cuda@10.1.0:11.0.2', when='@7.6.4.38-10.1-osx-x64')
depends_on('cuda@10.1.0:11.0.2', when='@7.6.4.38-10.1-linux-x64')
depends_on('cuda@10.1.0:11.0.2', when='@7.6.4.38-10.1-linux-ppc64le')
depends_on('cuda@10.1.0:11.0.2', when='@7.6.3.30-10.1-linux-x64')
depends_on('cuda@10.1.0:11.0.2', when='@7.6.3.30-10.1-linux-ppc64le')
depends_on('cuda@10.1.0:10.1.999', when='@7.5.0.56-10.1-linux-x64')
depends_on('cuda@10.1.0:10.1.999', when='@7.5.0.56-10.1-linux-ppc64le')
# CUDA 10.0
depends_on('cuda@10.0.0:11.0.2', when='@7.6.5.32-10.0-osx-x64')
depends_on('cuda@10.0.0:11.0.2', when='@7.6.5.32-10.0-linux-x64')
depends_on('cuda@10.0.0:11.0.2', when='@7.6.5.32-10.0-linux-ppc64le')
depends_on('cuda@10.0.0:11.0.2', when='@7.6.4.38-10.0-osx-x64')
depends_on('cuda@10.0.0:11.0.2', when='@7.6.4.38-10.0-linux-x64')
depends_on('cuda@10.0.0:11.0.2', when='@7.6.4.38-10.0-linux-ppc64le')
depends_on('cuda@10.0.0:11.0.2', when='@7.5.1.10-10.0-linux-x64')
depends_on('cuda@10.0.0:11.0.2', when='@7.5.1.10-10.0-linux-ppc64le')
depends_on('cuda@10.0.0:11.0.2', when='@7.5.0.56-10.0-linux-x64')
depends_on('cuda@10.0.0:11.0.2', when='@7.5.0.56-10.0-linux-ppc64le')
# CUDA 9.2
depends_on('cuda@9.2.0:9.2.999', when='@7.6.5.32-9.2-linux-x64')
depends_on('cuda@9.2.0:9.2.999', when='@7.6.5.32-9.2-linux-ppc64le')
depends_on('cuda@9.2.0:9.2.999', when='@7.6.4.38-9.2-linux-x64')
depends_on('cuda@9.2.0:9.2.999', when='@7.6.4.38-9.2-linux-ppc64le')
# CUDA 9.1
depends_on('cuda@9.1.0:9.1.999', when='@7.1.3-9.1-linux-x64')
depends_on('cuda@9.1.0:9.1.999', when='@7.1.3-9.1-linux-ppc64le')
# CUDA 9.0
depends_on('cuda@9.0.0:9.0.999', when='@7.6.5.32-9.0-linux-x64')
depends_on('cuda@9.0.0:9.0.999', when='@7.6.4.38-9.0-linux-x64')
depends_on('cuda@9.0.0:9.0.999', when='@7.3.0.29-9.0-linux-x64')
depends_on('cuda@9.0.0:9.0.999', when='@7.2.1.38-9.0-linux-x64')
# CUDA 8.0
depends_on('cuda@8.0.0:8.0.999', when='@6.0-8.0-linux-x64')
depends_on('cuda@8.0.0:8.0.999', when='@5.1-8.0-linux-x64')
def url_for_version(self, version):
url = 'https://developer.download.nvidia.com/compute/redist/cudnn/v{0}/cudnn-{1}-v{2}.tgz'
if version >= Version('7.2'):
directory = version[:3]
ver = version[:4]
cuda = version[4:]
elif version >= Version('7.1'):
directory = version[:3]
ver = version[:2]
cuda = version[3:]
elif version >= Version('7.0'):
directory = version[:3]
ver = version[0]
cuda = version[3:]
else:
directory = version[:2]
ver = version[:2]
cuda = version[2:]
return url.format(directory, cuda, ver)
def setup_run_environment(self, env):
if 'target=ppc64le: platform=linux' in self.spec:
env.set('cuDNN_ROOT', os.path.join(
self.prefix, 'targets', 'ppc64le-linux'))
def install(self, spec, prefix):
install_tree('.', prefix)
if 'target=ppc64le: platform=linux' in spec:
symlink(os.path.join(prefix, 'targets', 'ppc64le-linux', 'lib'),
prefix.lib)
symlink(
os.path.join(prefix, 'targets', 'ppc64le-linux', 'include'),
prefix.include)
| 1.703125
| 2
|
python/svm.py
|
mwalton/em-machineLearning
| 0
|
12548
|
import numpy as np
import argparse
import os.path
import plots as plot
from sklearn.preprocessing import StandardScaler
from sklearn.grid_search import GridSearchCV
import time
from sklearn import svm
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
from sklearn.cross_validation import StratifiedKFold
def loadData(XPath, yPath):
X = np.genfromtxt(XPath, delimiter=",", dtype="float32")
y = np.genfromtxt(yPath, delimiter=",", dtype="float32")
return (X, y)
def convertToClasses(targetVector):
return np.argmax(targetVector[:,1:5], axis=1)
def standardize(featureVector):
scaler = StandardScaler()
return scaler.fit_transform(featureVector)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-x", "--xTrain", required = True,
help = "path to training feature set")
ap.add_argument("-y", "--yTrain", required = True,
help = "path to training target set")
ap.add_argument("-X", "--xTest", required = True,
help = "path to testing feature set")
ap.add_argument("-Y", "--yTest", required = True,
help = "path to testing target set")
ap.add_argument("-o", "--optimize", type = int, default = 0,
help = "optomization mode: 0 use default, 1 optomize, 2 use pkl model if possible")
ap.add_argument("-m", "--multiClass", type = int, default=1,
help = "exclusive multi class or regression")
ap.add_argument("-p", "--pickle", default="models/svmModel.pkl",
help = "pickle dump of model (output if optomize = 1, input if optomize = 0)")
ap.add_argument("-v", "--visualize", type=int, default=0,
help = "whether or not to show visualizations after a run")
args = vars(ap.parse_args())
(trainX, trainY) = loadData(args["xTrain"], args["yTrain"])
(testX, testY) = loadData(args["xTest"], args["yTest"])
# required scaling for SVM
trainX = standardize(trainX)
testX = standardize(testX)
if (args["multiClass"] == 1):
trainY = convertToClasses(trainY)
testY = convertToClasses(testY)
# check to see if a grid search should be done
if args["optimize"] == 1:
#configure stratified k-fold cross validation
cv = StratifiedKFold(y=trainY, n_folds=4, shuffle=True)
# perform a grid search on the 'C' and 'gamma' parameter
# of SVM
print "SEARCHING SVM"
C_range = 2. ** np.arange(-15, 15, step=1)
gamma_range = 2. ** np.arange(-15, 15, step=1)
param_grid = dict(gamma=gamma_range, C=C_range)
start = time.time()
gs = GridSearchCV(svm.SVC(), param_grid=param_grid, cv=cv, n_jobs = -1, verbose = 2)
gs.fit(trainX, trainY)
# print diagnostic information to the user and grab the
# best model
print "done in %0.3fs" % (time.time() - start)
print "best score: %0.3f" % (gs.best_score_)
print "SVM PARAMETERS"
bestParams = gs.best_estimator_.get_params()
# loop over the parameters and print each of them out
# so they can be manually set
print("Best Estimator: %s" % gs.best_estimator_)
#for p in sorted(params.keys()):
# print "\t %s: %f" % (p, bestParams[p])
print("Accuracy Score On Validation Set: %s\n" % accuracy_score(testY, gs.predict(testX)))
# show a reminder message
print "\nIMPORTANT"
print "Now that your parameters have been searched, manually set"
print "them and re-run this script with --optomize 0"
joblib.dump(gs.best_estimator_, args["pickle"])
# otherwise, use the manually specified parameters
else:
# evaluate using SVM
if (os.path.isfile(args["pickle"]) and args["optimize"] == 2):
clf = joblib.load(args["pickle"])
else:
clf = svm.SVC()
clf.fit(trainX, trainY)
print "SVM PERFORMANCE"
pred = clf.predict(testX)
print classification_report(testY, pred)
print("Accuracy Score: %s\n" % accuracy_score(testY, pred))
if (args["visualize"] == 1):
plot.accuracy(testY, pred, "SVM")
| 2.453125
| 2
|
aio_logstash/formatter.py
|
SinaKhorami/aio-logstash
| 4
|
12549
|
<gh_stars>1-10
import abc
import json
import logging
import socket
import sys
import time
import aio_logstash
import traceback
from aio_logstash import constants
from datetime import datetime, date
class BaseFormatter(logging.Formatter):
def __init__(self, message_type='aio_logstash', fqdn=False):
super().__init__()
self._message_type = message_type
self._host = socket.getfqdn() if fqdn else socket.gethostname()
self._interpreter = sys.executable
self._interpreter_vesion = '{major}.{minor}.{micro}'.format(
major=sys.version_info.major,
minor=sys.version_info.minor,
micro=sys.version_info.micro
)
self._program_name = sys.argv[0]
@staticmethod
def _format_timestamp(_time):
tstamp = datetime.utcfromtimestamp(_time)
return tstamp.strftime("%Y-%m-%dT%H:%M:%S") + ".%03d" % (tstamp.microsecond / 1000) + "Z"
@staticmethod
def _format_stack_trace(exc_info):
if exc_info:
return ''.join(traceback.format_exception(*exc_info))
return None
@staticmethod
def _serialize(message):
return bytes(json.dumps(message), encoding='utf-8')
@abc.abstractmethod
def format(self, record):
pass
def _get_base_fields(self):
base_fields = {
'host': self._host,
'type': self._message_type,
'interpreter': self._interpreter,
'interpreter_version': self._interpreter_vesion,
'program': self._program_name,
'aio_logstash_version': aio_logstash.__version__,
}
return base_fields
def _get_record_fields(self, record):
record_fields = {
'message': record.getMessage(),
'pid': record.process,
'func_name': record.funcName,
'line': record.lineno,
'logger_name': record.name,
'path': record.pathname,
'thread_name': record.threadName,
'level': record.levelname,
'process_name': record.processName,
'stack_trace': self._format_stack_trace(record.exc_info)
}
return record_fields
def _get_extra_fields(self, record):
extra_fields = dict()
for k, v in record.__dict__.items():
if k not in constants.LOG_RECORD_DEFAULT_ATTRIBUTES:
extra_fields[k] = self._get_value_repr(v)
return extra_fields
def _get_value_repr(self, value):
easy_types = (bool, float, type(None), str, int)
if isinstance(value, dict):
return {k: self._get_value_repr(v) for k, v in value.items()}
elif isinstance(value, (tuple, list)):
return [self._get_value_repr(v) for v in value]
elif isinstance(value, (datetime, date)):
return self._format_timestamp(time.mktime(value.timetuple()))
elif isinstance(value, easy_types):
return value
else:
return repr(value)
class V1Formatter(BaseFormatter):
def format(self, record):
message = {
'@timestamp': self._format_timestamp(record.created),
'@version': '1'
}
base_fields = self._get_base_fields()
message.update(base_fields)
record_fields = self._get_record_fields(record)
message.update(record_fields)
extra_fields = self._get_extra_fields(record)
message.update({
'extra': extra_fields
})
return self._serialize(message)
| 2.109375
| 2
|
.venv/lib/python2.7/site-packages/celery/events/cursesmon.py
|
MansoorHanif/FYP-web-app
| 4
|
12550
|
# -*- coding: utf-8 -*-
"""Graphical monitor of Celery events using curses."""
from __future__ import absolute_import, print_function, unicode_literals
import curses
import sys
import threading
from datetime import datetime
from itertools import count
from textwrap import wrap
from time import time
from math import ceil
from celery import VERSION_BANNER
from celery import states
from celery.app import app_or_default
from celery.five import items, values
from celery.utils.text import abbr, abbrtask
__all__ = ['CursesMonitor', 'evtop']
BORDER_SPACING = 4
LEFT_BORDER_OFFSET = 3
UUID_WIDTH = 36
STATE_WIDTH = 8
TIMESTAMP_WIDTH = 8
MIN_WORKER_WIDTH = 15
MIN_TASK_WIDTH = 16
# this module is considered experimental
# we don't care about coverage.
STATUS_SCREEN = """\
events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all}
"""
class CursesMonitor(object): # pragma: no cover
"""A curses based Celery task monitor."""
keymap = {}
win = None
screen_delay = 10
selected_task = None
selected_position = 0
selected_str = 'Selected: '
foreground = curses.COLOR_BLACK
background = curses.COLOR_WHITE
online_str = 'Workers online: '
help_title = 'Keys: '
help = ('j:down k:up i:info t:traceback r:result c:revoke ^c: quit')
greet = 'celery events {0}'.format(VERSION_BANNER)
info_str = 'Info: '
def __init__(self, state, app, keymap=None):
self.app = app
self.keymap = keymap or self.keymap
self.state = state
default_keymap = {
'J': self.move_selection_down,
'K': self.move_selection_up,
'C': self.revoke_selection,
'T': self.selection_traceback,
'R': self.selection_result,
'I': self.selection_info,
'L': self.selection_rate_limit,
}
self.keymap = dict(default_keymap, **self.keymap)
self.lock = threading.RLock()
def format_row(self, uuid, task, worker, timestamp, state):
mx = self.display_width
# include spacing
detail_width = mx - 1 - STATE_WIDTH - 1 - TIMESTAMP_WIDTH
uuid_space = detail_width - 1 - MIN_TASK_WIDTH - 1 - MIN_WORKER_WIDTH
if uuid_space < UUID_WIDTH:
uuid_width = uuid_space
else:
uuid_width = UUID_WIDTH
detail_width = detail_width - uuid_width - 1
task_width = int(ceil(detail_width / 2.0))
worker_width = detail_width - task_width - 1
uuid = abbr(uuid, uuid_width).ljust(uuid_width)
worker = abbr(worker, worker_width).ljust(worker_width)
task = abbrtask(task, task_width).ljust(task_width)
state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH)
timestamp = timestamp.ljust(TIMESTAMP_WIDTH)
row = '{0} {1} {2} {3} {4} '.format(uuid, worker, task,
timestamp, state)
if self.screen_width is None:
self.screen_width = len(row[:mx])
return row[:mx]
@property
def screen_width(self):
_, mx = self.win.getmaxyx()
return mx
@property
def screen_height(self):
my, _ = self.win.getmaxyx()
return my
@property
def display_width(self):
_, mx = self.win.getmaxyx()
return mx - BORDER_SPACING
@property
def display_height(self):
my, _ = self.win.getmaxyx()
return my - 10
@property
def limit(self):
return self.display_height
def find_position(self):
if not self.tasks:
return 0
for i, e in enumerate(self.tasks):
if self.selected_task == e[0]:
return i
return 0
def move_selection_up(self):
self.move_selection(-1)
def move_selection_down(self):
self.move_selection(1)
def move_selection(self, direction=1):
if not self.tasks:
return
pos = self.find_position()
try:
self.selected_task = self.tasks[pos + direction][0]
except IndexError:
self.selected_task = self.tasks[0][0]
keyalias = {curses.KEY_DOWN: 'J',
curses.KEY_UP: 'K',
curses.KEY_ENTER: 'I'}
def handle_keypress(self):
try:
key = self.win.getkey().upper()
except Exception: # pylint: disable=broad-except
return
key = self.keyalias.get(key) or key
handler = self.keymap.get(key)
if handler is not None:
handler()
def alert(self, callback, title=None):
self.win.erase()
my, mx = self.win.getmaxyx()
y = blank_line = count(2)
if title:
self.win.addstr(next(y), 3, title,
curses.A_BOLD | curses.A_UNDERLINE)
next(blank_line)
callback(my, mx, next(y))
self.win.addstr(my - 1, 0, 'Press any key to continue...',
curses.A_BOLD)
self.win.refresh()
while 1:
try:
return self.win.getkey().upper()
except Exception: # pylint: disable=broad-except
pass
def selection_rate_limit(self):
if not self.selected_task:
return curses.beep()
task = self.state.tasks[self.selected_task]
if not task.name:
return curses.beep()
my, mx = self.win.getmaxyx()
r = 'New rate limit: '
self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE)
self.win.addstr(my - 2, len(r) + 3, ' ' * (mx - len(r)))
rlimit = self.readline(my - 2, 3 + len(r))
if rlimit:
reply = self.app.control.rate_limit(task.name,
rlimit.strip(), reply=True)
self.alert_remote_control_reply(reply)
def alert_remote_control_reply(self, reply):
def callback(my, mx, xs):
y = count(xs)
if not reply:
self.win.addstr(
next(y), 3, 'No replies received in 1s deadline.',
curses.A_BOLD + curses.color_pair(2),
)
return
for subreply in reply:
curline = next(y)
host, response = next(items(subreply))
host = '{0}: '.format(host)
self.win.addstr(curline, 3, host, curses.A_BOLD)
attr = curses.A_NORMAL
text = ''
if 'error' in response:
text = response['error']
attr |= curses.color_pair(2)
elif 'ok' in response:
text = response['ok']
attr |= curses.color_pair(3)
self.win.addstr(curline, 3 + len(host), text, attr)
return self.alert(callback, 'Remote Control Command Replies')
def readline(self, x, y):
buffer = str()
curses.echo()
try:
i = 0
while 1:
ch = self.win.getch(x, y + i)
if ch != -1:
if ch in (10, curses.KEY_ENTER): # enter
break
if ch in (27,):
buffer = str()
break
buffer += chr(ch)
i += 1
finally:
curses.noecho()
return buffer
def revoke_selection(self):
if not self.selected_task:
return curses.beep()
reply = self.app.control.revoke(self.selected_task, reply=True)
self.alert_remote_control_reply(reply)
def selection_info(self):
if not self.selected_task:
return
def alert_callback(mx, my, xs):
my, mx = self.win.getmaxyx()
y = count(xs)
task = self.state.tasks[self.selected_task]
info = task.info(extra=['state'])
infoitems = [
('args', info.pop('args', None)),
('kwargs', info.pop('kwargs', None))
] + list(info.items())
for key, value in infoitems:
if key is None:
continue
value = str(value)
curline = next(y)
keys = key + ': '
self.win.addstr(curline, 3, keys, curses.A_BOLD)
wrapped = wrap(value, mx - 2)
if len(wrapped) == 1:
self.win.addstr(
curline, len(keys) + 3,
abbr(wrapped[0],
self.screen_width - (len(keys) + 3)))
else:
for subline in wrapped:
nexty = next(y)
if nexty >= my - 1:
subline = ' ' * 4 + '[...]'
elif nexty >= my:
break
self.win.addstr(
nexty, 3,
abbr(' ' * 4 + subline, self.screen_width - 4),
curses.A_NORMAL,
)
return self.alert(
alert_callback, 'Task details for {0.selected_task}'.format(self),
)
def selection_traceback(self):
if not self.selected_task:
return curses.beep()
task = self.state.tasks[self.selected_task]
if task.state not in states.EXCEPTION_STATES:
return curses.beep()
def alert_callback(my, mx, xs):
y = count(xs)
for line in task.traceback.split('\n'):
self.win.addstr(next(y), 3, line)
return self.alert(
alert_callback,
'Task Exception Traceback for {0.selected_task}'.format(self),
)
def selection_result(self):
if not self.selected_task:
return
def alert_callback(my, mx, xs):
y = count(xs)
task = self.state.tasks[self.selected_task]
result = (getattr(task, 'result', None) or
getattr(task, 'exception', None))
for line in wrap(result or '', mx - 2):
self.win.addstr(next(y), 3, line)
return self.alert(
alert_callback,
'Task Result for {0.selected_task}'.format(self),
)
def display_task_row(self, lineno, task):
state_color = self.state_colors.get(task.state)
attr = curses.A_NORMAL
if task.uuid == self.selected_task:
attr = curses.A_STANDOUT
timestamp = datetime.utcfromtimestamp(
task.timestamp or time(),
)
timef = timestamp.strftime('%H:%M:%S')
hostname = task.worker.hostname if task.worker else '*NONE*'
line = self.format_row(task.uuid, task.name,
hostname,
timef, task.state)
self.win.addstr(lineno, LEFT_BORDER_OFFSET, line, attr)
if state_color:
self.win.addstr(lineno,
len(line) - STATE_WIDTH + BORDER_SPACING - 1,
task.state, state_color | attr)
def draw(self):
with self.lock:
win = self.win
self.handle_keypress()
x = LEFT_BORDER_OFFSET
y = blank_line = count(2)
my, mx = win.getmaxyx()
win.erase()
win.bkgd(' ', curses.color_pair(1))
win.border()
win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5))
next(blank_line)
win.addstr(next(y), x, self.format_row('UUID', 'TASK',
'WORKER', 'TIME', 'STATE'),
curses.A_BOLD | curses.A_UNDERLINE)
tasks = self.tasks
if tasks:
for row, (uuid, task) in enumerate(tasks):
if row > self.display_height:
break
if task.uuid:
lineno = next(y)
self.display_task_row(lineno, task)
# -- Footer
next(blank_line)
win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4)
# Selected Task Info
if self.selected_task:
win.addstr(my - 5, x, self.selected_str, curses.A_BOLD)
info = 'Missing extended info'
detail = ''
try:
selection = self.state.tasks[self.selected_task]
except KeyError:
pass
else:
info = selection.info()
if 'runtime' in info:
info['runtime'] = '{0:.2f}'.format(info['runtime'])
if 'result' in info:
info['result'] = abbr(info['result'], 16)
info = ' '.join(
'{0}={1}'.format(key, value)
for key, value in items(info)
)
detail = '... -> key i'
infowin = abbr(info,
self.screen_width - len(self.selected_str) - 2,
detail)
win.addstr(my - 5, x + len(self.selected_str), infowin)
# Make ellipsis bold
if detail in infowin:
detailpos = len(infowin) - len(detail)
win.addstr(my - 5, x + len(self.selected_str) + detailpos,
detail, curses.A_BOLD)
else:
win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL)
# Workers
if self.workers:
win.addstr(my - 4, x, self.online_str, curses.A_BOLD)
win.addstr(my - 4, x + len(self.online_str),
', '.join(sorted(self.workers)), curses.A_NORMAL)
else:
win.addstr(my - 4, x, 'No workers discovered.')
# Info
win.addstr(my - 3, x, self.info_str, curses.A_BOLD)
win.addstr(
my - 3, x + len(self.info_str),
STATUS_SCREEN.format(
s=self.state,
w_alive=len([w for w in values(self.state.workers)
if w.alive]),
w_all=len(self.state.workers),
),
curses.A_DIM,
)
# Help
self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD)
self.safe_add_str(my - 2, x + len(self.help_title), self.help,
curses.A_DIM)
win.refresh()
def safe_add_str(self, y, x, string, *args, **kwargs):
if x + len(string) > self.screen_width:
string = string[:self.screen_width - x]
self.win.addstr(y, x, string, *args, **kwargs)
def init_screen(self):
with self.lock:
self.win = curses.initscr()
self.win.nodelay(True)
self.win.keypad(True)
curses.start_color()
curses.init_pair(1, self.foreground, self.background)
# exception states
curses.init_pair(2, curses.COLOR_RED, self.background)
# successful state
curses.init_pair(3, curses.COLOR_GREEN, self.background)
# revoked state
curses.init_pair(4, curses.COLOR_MAGENTA, self.background)
# greeting
curses.init_pair(5, curses.COLOR_BLUE, self.background)
# started state
curses.init_pair(6, curses.COLOR_YELLOW, self.foreground)
self.state_colors = {states.SUCCESS: curses.color_pair(3),
states.REVOKED: curses.color_pair(4),
states.STARTED: curses.color_pair(6)}
for state in states.EXCEPTION_STATES:
self.state_colors[state] = curses.color_pair(2)
curses.cbreak()
def resetscreen(self):
with self.lock:
curses.nocbreak()
self.win.keypad(False)
curses.echo()
curses.endwin()
def nap(self):
curses.napms(self.screen_delay)
@property
def tasks(self):
return list(self.state.tasks_by_time(limit=self.limit))
@property
def workers(self):
return [hostname for hostname, w in items(self.state.workers)
if w.alive]
class DisplayThread(threading.Thread): # pragma: no cover
def __init__(self, display):
self.display = display
self.shutdown = False
threading.Thread.__init__(self)
def run(self):
while not self.shutdown:
self.display.draw()
self.display.nap()
def capture_events(app, state, display): # pragma: no cover
def on_connection_error(exc, interval):
print('Connection Error: {0!r}. Retry in {1}s.'.format(
exc, interval), file=sys.stderr)
while 1:
print('-> evtop: starting capture...', file=sys.stderr)
with app.connection_for_read() as conn:
try:
conn.ensure_connection(on_connection_error,
app.conf.broker_connection_max_retries)
recv = app.events.Receiver(conn, handlers={'*': state.event})
display.resetscreen()
display.init_screen()
recv.capture()
except conn.connection_errors + conn.channel_errors as exc:
print('Connection lost: {0!r}'.format(exc), file=sys.stderr)
def evtop(app=None): # pragma: no cover
"""Start curses monitor."""
app = app_or_default(app)
state = app.events.State()
display = CursesMonitor(state, app)
display.init_screen()
refresher = DisplayThread(display)
refresher.start()
try:
capture_events(app, state, display)
except Exception:
refresher.shutdown = True
refresher.join()
display.resetscreen()
raise
except (KeyboardInterrupt, SystemExit):
refresher.shutdown = True
refresher.join()
display.resetscreen()
if __name__ == '__main__': # pragma: no cover
evtop()
| 2.421875
| 2
|
features/extraction/3_extraction/feature_extractors/utilization.py
|
bayesimpact/readmission-risk
| 19
|
12551
|
"""A feature extractor for patients' utilization."""
from __future__ import absolute_import
import logging
import pandas as pd
from sutter.lib import postgres
from sutter.lib.feature_extractor import FeatureExtractor
log = logging.getLogger('feature_extraction')
class UtilizationExtractor(FeatureExtractor):
"""
Generates features related to the number of previous ER visits.
Features:
`pre_[n]_month_[adm_type]` - Number of [adm_type] (emergency, inpatient, outpatient) visits
during the [n] (3, 6, 12) month before admission
`er_visits_lace` - LACE score associated with number of ER visits:
the greater of number of emergency visits
during the 6 month before admission or 4.
"""
def extract(self):
query = """
SELECT
*
FROM {}.bayes_vw_feature_utilization
""".format(self._schema)
engine = postgres.get_connection()
res = pd.read_sql(query, engine)
log.info('The pre-pivot table has %d rows.' % len(res))
pivoted = pd.pivot_table(data=res, index='hsp_acct_study_id', columns='pre_adm_type',
aggfunc=sum, dropna=True, fill_value=0,
values=['pre_3_month', 'pre_6_month', 'pre_12_month'])
df_columns = [top + "_" + bottom.lower() for top, bottom in pivoted.columns.values]
df = pd.DataFrame(index=res.hsp_acct_study_id.unique())
df[df_columns] = pivoted
df.fillna(0, inplace=True)
df['er_visits_lace'] = df['pre_6_month_emergency'].apply(lambda cnt: min(cnt, 4))
return self.emit_df(df)
| 2.953125
| 3
|
safe_control_gym/math_and_models/normalization.py
|
catgloss/safe-control-gym
| 120
|
12552
|
"""Perform normalization on inputs or rewards.
"""
import numpy as np
import torch
from gym.spaces import Box
def normalize_angle(x):
"""Wraps input angle to [-pi, pi].
"""
return ((x + np.pi) % (2 * np.pi)) - np.pi
class RunningMeanStd():
"""Calulates the running mean and std of a data stream.
Attributes:
mean (np.array): mean of data stream.
var (np.array): variance of data stream.
count (float): total count of data steam.
"""
def __init__(self, epsilon=1e-4, shape=()):
"""Initializes containers for data mean and variance.
Args:
epsilon (float): helps with arithmetic issues.
shape (tuple): the shape of the data stream's output.
"""
self.mean = np.zeros(shape, np.float64)
self.var = np.ones(shape, np.float64)
self.count = epsilon
def update(self, arr):
"""Update current stats with a new stream of data.
Args:
arr (np.array): 1D array of data, (batch_size, *shape).
"""
batch_mean = np.mean(arr, axis=0)
batch_var = np.var(arr, axis=0)
batch_count = arr.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
"""Util function for `update` method.
"""
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * self.count
m_b = batch_var * batch_count
m_2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = m_2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
class BaseNormalizer(object):
"""Template/default normalizer.
Attributes:
read_only (bool): if to freeze the current stats being tracked.
"""
def __init__(self, read_only=False):
self.read_only = read_only
def set_read_only(self):
self.read_only = True
def unset_read_only(self):
self.read_only = False
def __call__(self, x, *args, **kwargs):
"""Invokes normalization on the given input.
"""
return x
def state_dict(self):
"""Returns snapshot of current stats.
"""
return {}
def load_state_dict(self, _):
"""Restores the stats from a snapshot.
"""
pass
class MeanStdNormalizer(BaseNormalizer):
"""Normalize by the running average.
"""
def __init__(self, shape=(), read_only=False, clip=10.0, epsilon=1e-8):
"""Initializes the data stream tracker.
Args:
shape (tuple): shape of data being tracked.
read_only (bool): if to freeze the tracker.
clip (float): bounds on the data.
epsilon (float): offset to provide divide-by-zero.
"""
super().__init__(read_only)
self.read_only = read_only
self.rms = RunningMeanStd(shape=shape)
self.clip = clip
self.epsilon = epsilon
def __call__(self, x):
"""Update tracker given data, optionally normalize the data.
"""
x = np.asarray(x)
if not self.read_only:
self.rms.update(x)
return np.clip(
(x - self.rms.mean) / np.sqrt(self.rms.var + self.epsilon),
-self.clip, self.clip)
def state_dict(self):
return {'mean': self.rms.mean, 'var': self.rms.var}
def load_state_dict(self, saved):
self.rms.mean = saved['mean']
self.rms.var = saved['var']
class RewardStdNormalizer(MeanStdNormalizer):
"""Reward normalization by running average of returns.
Papers:
* arxiv.org/pdf/1808.04355.pdf
* arxiv.org/pdf/1810.12894.pdf
Also see:
* github.com/openai/baselines/issues/538
"""
def __init__(self, gamma=0.99, read_only=False, clip=10.0, epsilon=1e-8):
"""Initializes the data stream tracker.
Args:
gamma (float): discount factor for rewards.
read_only (bool): if to freeze the tracker.
clip (float): bounds on the data.
epsilon (float): offset to provide divide-by-zero.
"""
# Reward has default shape (1,) or just ().
super().__init__((), read_only, clip, epsilon)
self.gamma = gamma
self.ret = None
def __call__(self, x, dones):
"""Update tracker given reward, optionally normalize the reward (only scaling).
"""
x = np.asarray(x)
if not self.read_only:
# Track running average of forward discounted returns.
if self.ret is None:
self.ret = np.zeros(x.shape[0])
self.ret = self.ret * self.gamma + x
self.rms.update(self.ret)
# Prevent information leak from previous episodes.
self.ret[dones.astype(np.long)] = 0
return np.clip(x / np.sqrt(self.rms.var + self.epsilon), -self.clip, self.clip)
class RescaleNormalizer(BaseNormalizer):
"""Apply constant scaling.
"""
def __init__(self, coef=1.0):
"""Initializes with fixed scaling constant.
Args:
coef (float): scaling coefficient.
"""
super().__init__(self)
self.coef = coef
def __call__(self, x):
"""Scale the input.
"""
if not isinstance(x, torch.Tensor):
x = np.asarray(x)
return self.coef * x
class ImageNormalizer(RescaleNormalizer):
"""Scale image pixles from [0,255] to [0,1].
"""
def __init__(self):
super().__init__(self, 1.0 / 255)
class ActionUnnormalizer(BaseNormalizer):
"""Assumes policy output action is in [-1,1], unnormalize it for gym env.
"""
def __init__(self, action_space):
"""Defines the mean and std for the bounded action space.
"""
super().__init__()
assert isinstance(action_space, Box), "action space must be gym.spaces.Box"
low, high = action_space.low, action_space.high
self.mean = (low + high) / 2.0
self.std = (high - low) / 2.0
def __call__(self, action):
"""Unnormalizes given input action.
"""
x = np.asarray(action)
return self.mean + x * self.std
| 3.328125
| 3
|
networking/pycat.py
|
itsbriany/PythonSec
| 1
|
12553
|
<reponame>itsbriany/PythonSec
#!/usr/bin/python
import socket
import threading
import sys # Support command line args
import getopt # Support command line option parsing
import os # Kill the application
import signal # Catch an interrupt
import time # Thread sleeping
# Global variables definitions
target = ""
port = False
listen = False
command = ""
upload = False
# This tool should be able to replace netcat
# The tool should be able to act as a server and as a client depending on the arguments
###############################################################################
# Start menu
def menu():
print "pycat, a python implementation of netcat"
print ""
print "Usage:"
print ""
print "-h, --help: Display this menu"
print "-t, --target: The IP to bind to"
print "-l, --listen: Listen mode (act as a server)"
print "-p, --port: The port number to bind to"
print "-c, --command: The command you wish to execute via pycat"
print "-u --upload: Set this flag to upload a file"
print ""
print ""
print "By default, pycat will act as a client unless the -p flag is specified"
print ""
print "Examples will happen later..."
print ""
sys.exit(0)
###############################################################################
# Connect as a client
def connectMode(client_socket, address):
global kill_thread
# Get raw input which is terminated with \n
try:
while True:
buffer = raw_input()
buffer += "\n"
if buffer == "quit\n" or buffer == "q\n":
client_socket.close()
sys.exit(0)
if not client_socket:
print "[!!] No connection on the other end!"
client_socket.close()
break
client_socket.send(buffer)
except Exception as err:
print "[!!] Caught exception in client thread: %s!" % err
client_socket.close()
###############################################################################
# Handle the connection from the client.
def handle_client(client_socket, address):
print "[*] Got a connection from %s:%d" % (address[0], address[1])
try:
while True:
# Wait for a response
request = client_socket.recv(4096)
# If the client disconnects, request is 0
if not request:
break
# Output what the client has given us
print request
client_socket.close()
except Exception as err:
print "[!!] Caught exception in server thread: %s" % err
client_socket.close()
sys.exit(0)
###############################################################################
# This is the listening functionality of the program
def serverMode():
global target
global port
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not len(target):
target = "0.0.0.0"
try:
server.bind((target, port))
except socket.error as err:
print err
sys.exit(0)
server.listen(5)
print "[*] Listening on %s:%d" % (target, port)
while True:
try:
# This will wait until we get a connection
client, address = server.accept()
# Create a thread to handle incoming responses
# Daemonic threads will die as soon as the main thread dies
listen_thread = threading.Thread(target = handle_client, args = (client, address))
listen_thread.daemon = True
listen_thread.start()
# Create a thread to handle outgoing requests
client_thread = threading.Thread(target = connectMode, args = (client, address))
client_thread.daemon = True
client_thread.start()
time.sleep(1)
'''
# The problem is that python does NOT pass by refernece!
This means that the sockets are simply copies and the actual socket that gets closed
does not do anything!
'''
except (KeyboardInterrupt, SystemExit):
print "Cleaning up sockets..."
client.close()
sys.stdout.write("Exiting form main thread...\n")
sys.exit(0)
###############################################################################
# main definition
def main():
global target
global listen
global port
global command
global upload
# Set the option
# If the options are not parsing properly, then try gnu_getopt
if not len(sys.argv[1:]):
menu()
try:
options, remainder = getopt.getopt(sys.argv[1:], 'ht:lp:cu', ['help', 'target', 'listen', 'port', 'command', 'upload'])
except getopt.GetoptError as err:
print str(err)
menu()
for opt, arg in options:
if opt in ('-h', '--help'):
menu()
elif opt in ('-t', '--target'):
target = arg
elif opt in ('-l', '--listen'):
listen = True
elif opt in ('-p', '--port'):
port = int(arg)
elif opt in ('-c', '--command'):
command = arg
elif opt in ('-u', '--upload'):
upload = True
else:
assert False, "Invalid option" # This throws an error
print "Target: %s" % target
print "Listen: %s" % listen
print "Port: %d" % port
if port > 0:
if not listen and len(target):
print "Client mode"
elif listen:
serverMode()
else: # This could probably be cleaned up a little since the functions will have looping
menu()
else:
menu()
###############################################################################
# Program execution
try:
main()
except KeyboardInterrupt:
print ""
sys.exit(0)
| 3
| 3
|
code/striatal_model/neuron_model_tuning.py
|
weidel-p/go-robot-nogo-robot
| 1
|
12554
|
<gh_stars>1-10
import nest
import pylab as pl
import pickle
from nest import voltage_trace
from nest import raster_plot as rplt
import numpy as np
from params import *
seed = [np.random.randint(0, 9999999)] * num_threads
def calcFI():
#amplitudesList = np.arange(3.5,4.5,0.1)
amplitudesList = np.arange(100, 500, 50.)
listD1 = []
listD2 = []
for amp in amplitudesList:
nest.ResetKernel()
nest.SetKernelStatus({"resolution": timestep, "overwrite_files": True, "rng_seeds": seed,
"print_time": True, "local_num_threads": num_threads})
nest.CopyModel("iaf_cond_alpha", "d1", d1_params)
#nest.CopyModel("izhikevich", "d1", d1_params_iz)
nest.CopyModel("iaf_cond_alpha", "d2", d2_params)
#nest.CopyModel("izhikevich", "d2", d2_params_iz)
d1 = nest.Create("d1", 1)
d2 = nest.Create("d2", 1)
dc = nest.Create("dc_generator", 1)
sd = nest.Create("spike_detector", 2)
mult = nest.Create("multimeter", 1, params={
"withgid": True, "withtime": True, "record_from": ["V_m"]})
nest.Connect(d1, [sd[0]])
nest.Connect(d2, [sd[1]])
nest.Connect(dc, d1)
nest.Connect(dc, d2)
nest.Connect(mult, d1)
nest.Connect(mult, d2)
nest.SetStatus(dc, params={"amplitude": amp})
nest.Simulate(10000.)
evs_d1 = nest.GetStatus([sd[0]])[0]["events"]["senders"]
ts_d1 = nest.GetStatus([sd[0]])[0]["events"]["times"]
evs_d2 = nest.GetStatus([sd[1]])[0]["events"]["senders"]
ts_d2 = nest.GetStatus([sd[1]])[0]["events"]["times"]
listD1.append(len(ts_d1) / 10.0)
listD2.append(len(ts_d2) / 10.0)
# voltage_trace.from_device(mult)
# pl.show()
FI = dict()
FI["d1"] = listD1
FI["d2"] = listD2
pickle.dump(FI, open("../../data/FI.pickle", "w"))
pl.figure()
pl.text(70, 62, "A", fontweight='bold', fontsize=15)
pl.plot(amplitudesList, listD1, 'bo-', label='D1', linewidth=1.5)
pl.plot(amplitudesList, listD2, 'go-', label='D2', linewidth=1.5)
pl.legend(loc='best')
pl.xlabel("Amplitude(pA)", fontweight='bold', fontsize=14)
pl.ylabel("Firing rate (sps)", fontweight='bold', fontsize=14)
for x in pl.gca().get_xticklabels():
x.set_fontweight('bold')
x.set_fontsize(10)
for x in pl.gca().get_yticklabels():
x.set_fontweight('bold')
x.set_fontsize(10)
pl.savefig("../../data/FI.pdf")
print "d1", FI["d1"], "d2", FI["d2"], amplitudesList
pl.figure()
voltage_trace.from_device(mult)
pl.show()
def checkConninMV():
nest.ResetKernel()
nest.SetKernelStatus({"resolution": timestep, "overwrite_files": True, "rng_seeds": seed,
"print_time": True, "local_num_threads": num_threads})
nest.CopyModel("iaf_cond_alpha", "d21", d2_params)
#nest.CopyModel("izhikevich", "d1", d1_params_iz)
nest.CopyModel("iaf_cond_alpha", "d22", d2_params)
#nest.CopyModel("izhikevich", "d2", d2_params_iz)
d21 = nest.Create("d21", 1)
d22 = nest.Create("d22", 1)
nest.SetStatus(d22, {'I_e': 27.}) # Has to be tuned so that d2 is at -80
# nest.SetStatus(d1,{'I_e':69.}) # Has to be tuned so that d1 is at -80
dc = nest.Create("dc_generator", 1)
sd = nest.Create("spike_detector", 2)
mult = nest.Create("multimeter", 1, params={
"withgid": True, "withtime": True, "record_from": ["V_m"]})
nest.Connect(d21, [sd[0]])
nest.Connect(d22, [sd[1]])
nest.Connect(dc, d21)
# nest.Connect(dc,d2)
# nest.Connect(mult,d1)
nest.Connect(mult, d22)
nest.Connect(d21, d22, syn_spec={'weight': jd2d2})
nest.SetStatus(dc, params={"amplitude": 250.})
nest.Simulate(1000.)
evs_d1 = nest.GetStatus([sd[0]])[0]["events"]["senders"]
ts_d1 = nest.GetStatus([sd[0]])[0]["events"]["times"]
V_m = nest.GetStatus(mult)[0]["events"]["V_m"]
ts = nest.GetStatus(mult)[0]["events"]["times"]
inds = np.where(ts > 400.)
Vmmin = np.min(V_m[inds])
print "conn_strength", Vmmin + 80.
# pl.figure(1)
# rplt.from_device(sd)
pl.figure(2)
voltage_trace.from_device(mult)
pl.plot(ts_d1, np.ones(len(ts_d1)) * -80., 'r|', markersize=10)
pl.show()
calcFI()
# checkConninMV()
| 2.046875
| 2
|
fastf1/tests/test_livetiming.py
|
JellybeanAsh/Fast-F1
| 690
|
12555
|
<filename>fastf1/tests/test_livetiming.py
import os
from fastf1.core import Session, Weekend
from fastf1.livetiming.data import LiveTimingData
def test_file_loading_w_errors():
# load file with many errors and invalid data without crashing
livedata = LiveTimingData('fastf1/testing/reference_data/livedata/with_errors.txt')
livedata.load()
def test_file_loading():
# load a valid file
livedata = LiveTimingData('fastf1/testing/reference_data/livedata/2021_1_FP3.txt')
livedata.load()
weekend = Weekend(2021, 1)
session = Session(weekend=weekend, session_name='test_session')
session.load_laps(with_telemetry=True, livedata=livedata)
assert session.laps.shape == (274, 26)
assert session.car_data['44'].shape == (17362, 10)
def test_duplicate_removal(tmpdir):
# create a temporary file with two identical lines of data
tmpfile = os.path.join(tmpdir, 'tmpfile.txt')
data = "['TimingAppData', {'Lines': {'22': {'Stints': {'0': {" \
"'LapFlags': 0, 'Compound': 'UNKNOWN', 'New': 'false'," \
"'TyresNotChanged': '0', 'TotalLaps': 0, 'StartLaps':" \
"0}}}}}, '2021-03-27T12:00:32.086Z']\n"
with open(tmpfile, 'w') as fobj:
fobj.write(data)
fobj.write(data)
livedata = LiveTimingData(tmpfile)
assert len(livedata.get('TimingAppData')) == 1
livedata = LiveTimingData(tmpfile, remove_duplicates=False)
assert len(livedata.get('TimingAppData')) == 2
| 2.65625
| 3
|
src/plot/S0_read_jld2.py
|
OUCyf/NoiseCC
| 4
|
12556
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 21 20:09:08 2021
######################
##### read h5 ########
######################
# 1.read h5-file
h5_file = h5py.File(files[1],'r')
# 2.show all keys in h5-file
h5_file.keys()
# 3.循环读取所有 keys in h5-file
for key in h5_file.keys():
onekey = key
onekey_name = h5_file[key].name
# 4.已知某个group的 key "NN"
h5_file["NN"]
h5_file["NN"].keys()
f_dict = dict(h5_file["NN"])
f_dict.keys() # 所有的keyword
# 5.读取 group 的 datasets
data = f_dict["data"][()] # 建议
data = f_dict["data"].value # data 是 numpy 的 ndarray 多维数组模式
trace = data[0] # 某一道数据
# 6.读取 group 的 Int Float 类型
baz = f_dict["baz"].value
baz = h5_file["NN"]["baz"].value
# 7.读取 group 的 字符串
# encode的作用是将unicode编码转换成其他编码的字符串,如str2.encode(‘utf8’),表示将unicode编码的字符串str2转换成utf8编码
comp = h5_file["NN"]["comp"].value[0].decode('utf-8')
# 8. 关闭文件
f_dict.close()
######################
##### write h5 ########
######################
@author: yf
"""
#%%
import numpy as np
import h5py
import os
import glob
#%% 1. set parameter
file = "../../data/BJ.081_BJ.084__2020_04_11_00_00_00T2021_04_13_00_00_00__all.jld2"
chan = "NN"
dt = 0.005
#%% 2. read h5
# open file
f = h5py.File(file,'r')
# read data
data = f[chan]["data"][0]
# read parameters
azi = f[chan]["azi"][()]
baz = f[chan]["baz"][()]
maxlag = f[chan]["maxlag"][()]
cc_len = f[chan]["cc_len"][()]
cc_step = f[chan]["cc_step"][()]
corr_type = f[chan]["corr_type"][()]
comp = f[chan]["comp"][()]
dist = f[chan]["dist"][()] # dist = f[chan]["dist"].value
lat = f[chan]["lat"][()]
lon = f[chan]["lon"][()]
N_glob = f[chan]["N_glob"][()]
N_read = f[chan]["N_read"][()]
N_good = f[chan]["N_good"][()]
name = f[chan]["name"][()][0].decode('utf-8')
# close h5-file
f.close()
| 2.484375
| 2
|
setup.py
|
MrJakeSir/theming
| 3
|
12557
|
<filename>setup.py
from distutils.core import setup
setup(
name = 'colormate',
packages = ['colormate'],
version = '0.2214',
license='MIT',
description = 'A package to theme terminal scripts with custom colors and text formatting',
author = 'Rodrigo',
author_email = '<EMAIL>',
url = 'https://github.com/mrjakesir/themify',
download_url = 'https://github.com/MrJakeSir/themify/archive/refs/tags/v_0.3.1.tar.gz',
keywords = ['Colors', 'Scripting', 'Theme', 'Theming'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| 1.335938
| 1
|
tests/test_compound_where.py
|
WinVector/data_algebra
| 37
|
12558
|
import data_algebra
import data_algebra.test_util
from data_algebra.data_ops import * # https://github.com/WinVector/data_algebra
import data_algebra.util
import data_algebra.SQLite
def test_compount_where_and():
d = data_algebra.default_data_model.pd.DataFrame(
{
"a": ["a", "b", None, None],
"b": ["c", None, "d", None],
"x": [1, 2, None, None],
"y": [3, None, 4, None],
}
)
ops = describe_table(d, table_name="d").select_rows(
'a == "a" and b == "c" and x > 0 and y < 4'
)
db_handle = data_algebra.SQLite.SQLiteModel().db_handle(conn=None)
sql = db_handle.to_sql(ops)
assert isinstance(sql, str)
expect = data_algebra.default_data_model.pd.DataFrame(
{"a": ["a"], "b": ["c"], "x": [1.0], "y": [3.0],}
)
data_algebra.test_util.check_transform(ops=ops, data=d, expect=expect)
def test_compount_where_amp():
d = data_algebra.default_data_model.pd.DataFrame(
{
"a": ["a", "b", None, None],
"b": ["c", None, "d", None],
"x": [1, 2, None, None],
"y": [3, None, 4, None],
}
)
ops = describe_table(d, table_name="d").select_rows(
'a == "a" & b == "c" & x > 0 & y < 4'
)
db_handle = data_algebra.SQLite.SQLiteModel().db_handle(conn=None)
sql = db_handle.to_sql(ops)
assert isinstance(sql, str)
expect = data_algebra.default_data_model.pd.DataFrame(
{"a": ["a"], "b": ["c"], "x": [1.0], "y": [3.0],}
)
data_algebra.test_util.check_transform(ops=ops, data=d, expect=expect)
| 2.515625
| 3
|
students/admin.py
|
eustone/sms
| 0
|
12559
|
<gh_stars>0
from django.contrib import admin
from .models import Student
# Register your models here.
class StudentAdmin(admin.ModelAdmin):
list_display = ('first_name','middle_name',
'last_name','identification_number')
search_fields = ('first_name','middle_name',
'last_name','identification_number')
admin.site.register(Student,StudentAdmin)
| 1.984375
| 2
|
lshmm/viterbi/vit_diploid_variants_samples.py
|
jeromekelleher/lshmm
| 0
|
12560
|
<filename>lshmm/viterbi/vit_diploid_variants_samples.py
"""Collection of functions to run Viterbi algorithms on dipoid genotype data, where the data is structured as variants x samples."""
import numba as nb
import numpy as np
# https://github.com/numba/numba/issues/1269
@nb.njit
def np_apply_along_axis(func1d, axis, arr):
"""Create numpy-like functions for max, sum etc."""
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result
@nb.njit
def np_amax(array, axis):
"""Numba implementation of numpy vectorised maximum."""
return np_apply_along_axis(np.amax, axis, array)
@nb.njit
def np_sum(array, axis):
"""Numba implementation of numpy vectorised sum."""
return np_apply_along_axis(np.sum, axis, array)
@nb.njit
def np_argmax(array, axis):
"""Numba implementation of numpy vectorised argmax."""
return np_apply_along_axis(np.argmax, axis, array)
# def forwards_viterbi_dip_naive(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((m, n, n))
# P = np.zeros((m, n, n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V[0,:,:] = 1/(n**2) * e[0,index]
# r_n = r/n
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# # Get the vector to maximise over
# v = np.zeros((n,n))
# for k1 in range(n):
# for k2 in range(n):
# v[k1, k2] = V[l-1,k1, k2]
# if ((k1 == j1) and (k2 == j2)):
# v[k1, k2] *= ((1 - r[l])**2 + 2*(1-r[l]) * r_n[l] + r_n[l]**2)
# elif ((k1 == j1) or (k2 == j2)):
# v[k1, k2] *= (r_n[l] * (1 - r[l]) + r_n[l]**2)
# else:
# v[k1, k2] *= r_n[l]**2
# V[l,j1,j2] = np.amax(v) * e[l, index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V[l,:,:])
# V[l,:,:] *= 1/c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
@nb.njit
def forwards_viterbi_dip_naive(n, m, G, s, e, r):
"""Naive implementation of LS diploid Viterbi algorithm."""
# Initialise
V = np.zeros((m, n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V[0, j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
for j1 in range(n):
for j2 in range(n):
# Get the vector to maximise over
v = np.zeros((n, n))
for k1 in range(n):
for k2 in range(n):
v[k1, k2] = V[l - 1, k1, k2]
if (k1 == j1) and (k2 == j2):
v[k1, k2] *= (
(1 - r[l]) ** 2 + 2 * (1 - r[l]) * r_n[l] + r_n[l] ** 2
)
elif (k1 == j1) or (k2 == j2):
v[k1, k2] *= r_n[l] * (1 - r[l]) + r_n[l] ** 2
else:
v[k1, k2] *= r_n[l] ** 2
V[l, j1, j2] = np.amax(v) * e[l, index[j1, j2]]
P[l, j1, j2] = np.argmax(v)
c[l] = np.amax(V[l, :, :])
V[l, :, :] *= 1 / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
# def forwards_viterbi_dip_naive_low_mem(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((n,n))
# P = np.zeros((m,n,n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V_previous = 1/(n**2) * e[0,index]
# r_n = r/n
# # Take a look at Haploid Viterbi implementation in Jeromes code and see if we can pinch some ideas.
# # Diploid Viterbi, with smaller memory footprint.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# # Get the vector to maximise over
# v = np.zeros((n,n))
# for k1 in range(n):
# for k2 in range(n):
# v[k1, k2] = V_previous[k1, k2]
# if ((k1 == j1) and (k2 == j2)):
# v[k1, k2] *= ((1 - r[l])**2 + 2*(1-r[l]) * r_n[l] + r_n[l]**2)
# elif ((k1 == j1) or (k2 == j2)):
# v[k1, k2] *= (r_n[l] * (1 - r[l]) + r_n[l]**2)
# else:
# v[k1, k2] *= r_n[l]**2
# V[j1,j2] = np.amax(v) * e[l,index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V)
# V_previous = np.copy(V) / c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
@nb.njit
def forwards_viterbi_dip_naive_low_mem(n, m, G, s, e, r):
"""Naive implementation of LS diploid Viterbi algorithm, with reduced memory."""
# Initialise
V = np.zeros((n, n))
V_previous = np.zeros((n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V_previous[j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
# Take a look at Haploid Viterbi implementation in Jeromes code and see if we can pinch some ideas.
# Diploid Viterbi, with smaller memory footprint.
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
for j1 in range(n):
for j2 in range(n):
# Get the vector to maximise over
v = np.zeros((n, n))
for k1 in range(n):
for k2 in range(n):
v[k1, k2] = V_previous[k1, k2]
if (k1 == j1) and (k2 == j2):
v[k1, k2] *= (
(1 - r[l]) ** 2 + 2 * (1 - r[l]) * r_n[l] + r_n[l] ** 2
)
elif (k1 == j1) or (k2 == j2):
v[k1, k2] *= r_n[l] * (1 - r[l]) + r_n[l] ** 2
else:
v[k1, k2] *= r_n[l] ** 2
V[j1, j2] = np.amax(v) * e[l, index[j1, j2]]
P[l, j1, j2] = np.argmax(v)
c[l] = np.amax(V)
V_previous = np.copy(V) / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
# def forwards_viterbi_dip_low_mem(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((n, n))
# P = np.zeros((m,n,n)).astype(np.int64)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V_previous = 1/(n**2) * e[0,index]
# c = np.ones(m)
# r_n = r/n
# # Diploid Viterbi, with smaller memory footprint, rescaling, and using the structure of the HMM.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# c[l] = np.amax(V_previous)
# argmax = np.argmax(V_previous)
# V_previous *= 1/c[l]
# V_rowcol_max = np_amax(V_previous, 0)
# arg_rowcol_max = np_argmax(V_previous, 0)
# no_switch = (1 - r[l])**2 + 2*(r_n[l]*(1 - r[l])) + r_n[l]**2
# single_switch = r_n[l]*(1 - r[l]) + r_n[l]**2
# double_switch = r_n[l]**2
# j1_j2 = 0
# for j1 in range(n):
# for j2 in range(n):
# V_single_switch = max(V_rowcol_max[j1], V_rowcol_max[j2])
# P_single_switch = np.argmax(np.array([V_rowcol_max[j1], V_rowcol_max[j2]]))
# if P_single_switch == 0:
# template_single_switch = j1*n + arg_rowcol_max[j1]
# else:
# template_single_switch = arg_rowcol_max[j2]*n + j2
# V[j1,j2] = V_previous[j1,j2] * no_switch # No switch in either
# P[l, j1, j2] = j1_j2
# # Single or double switch?
# single_switch_tmp = single_switch * V_single_switch
# if (single_switch_tmp > double_switch):
# # Then single switch is the alternative
# if (V[j1,j2] < single_switch * V_single_switch):
# V[j1,j2] = single_switch * V_single_switch
# P[l, j1, j2] = template_single_switch
# else:
# # Double switch is the alternative
# if V[j1, j2] < double_switch:
# V[j1, j2] = double_switch
# P[l, j1, j2] = argmax
# V[j1,j2] *= e[l, index[j1, j2]]
# j1_j2 += 1
# V_previous = np.copy(V)
# ll = np.sum(np.log10(c)) + np.log10(np.amax(V))
# return V, P, ll
@nb.njit
def forwards_viterbi_dip_low_mem(n, m, G, s, e, r):
"""LS diploid Viterbi algorithm, with reduced memory."""
# Initialise
V = np.zeros((n, n))
V_previous = np.zeros((n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V_previous[j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
# Diploid Viterbi, with smaller memory footprint, rescaling, and using the structure of the HMM.
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
c[l] = np.amax(V_previous)
argmax = np.argmax(V_previous)
V_previous *= 1 / c[l]
V_rowcol_max = np_amax(V_previous, 0)
arg_rowcol_max = np_argmax(V_previous, 0)
no_switch = (1 - r[l]) ** 2 + 2 * (r_n[l] * (1 - r[l])) + r_n[l] ** 2
single_switch = r_n[l] * (1 - r[l]) + r_n[l] ** 2
double_switch = r_n[l] ** 2
j1_j2 = 0
for j1 in range(n):
for j2 in range(n):
V_single_switch = max(V_rowcol_max[j1], V_rowcol_max[j2])
P_single_switch = np.argmax(
np.array([V_rowcol_max[j1], V_rowcol_max[j2]])
)
if P_single_switch == 0:
template_single_switch = j1 * n + arg_rowcol_max[j1]
else:
template_single_switch = arg_rowcol_max[j2] * n + j2
V[j1, j2] = V_previous[j1, j2] * no_switch # No switch in either
P[l, j1, j2] = j1_j2
# Single or double switch?
single_switch_tmp = single_switch * V_single_switch
if single_switch_tmp > double_switch:
# Then single switch is the alternative
if V[j1, j2] < single_switch * V_single_switch:
V[j1, j2] = single_switch * V_single_switch
P[l, j1, j2] = template_single_switch
else:
# Double switch is the alternative
if V[j1, j2] < double_switch:
V[j1, j2] = double_switch
P[l, j1, j2] = argmax
V[j1, j2] *= e[l, index[j1, j2]]
j1_j2 += 1
V_previous = np.copy(V)
ll = np.sum(np.log10(c)) + np.log10(np.amax(V))
return V, P, ll
# def forwards_viterbi_dip_naive_vec(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((m,n,n))
# P = np.zeros((m,n,n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V[0,:,:] = 1/(n**2) * e[0,index]
# r_n = r/n
# # Jumped the gun - vectorising.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# v = (r_n[l]**2) * np.ones((n,n))
# v[j1,j2] += (1-r[l])**2
# v[j1, :] += (r_n[l] * (1 - r[l]))
# v[:, j2] += (r_n[l] * (1 - r[l]))
# v *= V[l-1,:,:]
# V[l,j1,j2] = np.amax(v) * e[l,index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V[l,:,:])
# V[l,:,:] *= 1/c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
@nb.jit
def forwards_viterbi_dip_naive_vec(n, m, G, s, e, r):
"""Vectorised LS diploid Viterbi algorithm using numpy."""
# Initialise
V = np.zeros((m, n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
r_n = r / n
for j1 in range(n):
for j2 in range(n):
index_tmp = (
4 * np.int64(np.equal(G[0, j1, j2], s[0, 0]))
+ 2 * np.int64((G[0, j1, j2] == 1))
+ np.int64(s[0, 0] == 1)
)
V[0, j1, j2] = 1 / (n ** 2) * e[0, index_tmp]
# Jumped the gun - vectorising.
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
for j1 in range(n):
for j2 in range(n):
v = (r_n[l] ** 2) * np.ones((n, n))
v[j1, j2] += (1 - r[l]) ** 2
v[j1, :] += r_n[l] * (1 - r[l])
v[:, j2] += r_n[l] * (1 - r[l])
v *= V[l - 1, :, :]
V[l, j1, j2] = np.amax(v) * e[l, index[j1, j2]]
P[l, j1, j2] = np.argmax(v)
c[l] = np.amax(V[l, :, :])
V[l, :, :] *= 1 / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
def forwards_viterbi_dip_naive_full_vec(n, m, G, s, e, r):
"""Fully vectorised naive LS diploid Viterbi algorithm using numpy."""
char_both = np.eye(n * n).ravel().reshape((n, n, n, n))
char_col = np.tile(np.sum(np.eye(n * n).reshape((n, n, n, n)), 3), (n, 1, 1, 1))
char_row = np.copy(char_col).T
rows, cols = np.ogrid[:n, :n]
# Initialise
V = np.zeros((m, n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
index = (
4 * np.equal(G[0, :, :], s[0, 0]).astype(np.int64)
+ 2 * (G[0, :, :] == 1).astype(np.int64)
+ np.int64(s[0, 0] == 1)
)
V[0, :, :] = 1 / (n ** 2) * e[0, index]
r_n = r / n
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
v = (
(r_n[l] ** 2)
+ (1 - r[l]) ** 2 * char_both
+ (r_n[l] * (1 - r[l])) * (char_col + char_row)
)
v *= V[l - 1, :, :]
P[l, :, :] = np.argmax(v.reshape(n, n, -1), 2) # Have to flatten to use argmax
V[l, :, :] = v.reshape(n, n, -1)[rows, cols, P[l, :, :]] * e[l, index]
c[l] = np.amax(V[l, :, :])
V[l, :, :] *= 1 / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
@nb.jit
def backwards_viterbi_dip(m, V_last, P):
"""Run a backwards pass to determine the most likely path."""
assert V_last.ndim == 2
assert V_last.shape[0] == V_last.shape[1]
# Initialisation
path = np.zeros(m).astype(np.int64)
path[m - 1] = np.argmax(V_last)
# Backtrace
for j in range(m - 2, -1, -1):
path[j] = P[j + 1, :, :].ravel()[path[j + 1]]
return path
def get_phased_path(n, path):
"""Obtain the phased path."""
return np.unravel_index(path, (n, n))
@nb.jit
def path_ll_dip(n, m, G, phased_path, s, e, r):
"""Evaluate log-likelihood path through a reference panel which results in sequence s."""
index = (
4 * np.int64(np.equal(G[0, phased_path[0][0], phased_path[1][0]], s[0, 0]))
+ 2 * np.int64(G[0, phased_path[0][0], phased_path[1][0]] == 1)
+ np.int64(s[0, 0] == 1)
)
log_prob_path = np.log10(1 / (n ** 2) * e[0, index])
old_phase = np.array([phased_path[0][0], phased_path[1][0]])
r_n = r / n
for l in range(1, m):
index = (
4 * np.int64(np.equal(G[l, phased_path[0][l], phased_path[1][l]], s[0, l]))
+ 2 * np.int64(G[l, phased_path[0][l], phased_path[1][l]] == 1)
+ np.int64(s[0, l] == 1)
)
current_phase = np.array([phased_path[0][l], phased_path[1][l]])
phase_diff = np.sum(~np.equal(current_phase, old_phase))
if phase_diff == 0:
log_prob_path += np.log10(
(1 - r[l]) ** 2 + 2 * (r_n[l] * (1 - r[l])) + r_n[l] ** 2
)
elif phase_diff == 1:
log_prob_path += np.log10(r_n[l] * (1 - r[l]) + r_n[l] ** 2)
else:
log_prob_path += np.log10(r_n[l] ** 2)
log_prob_path += np.log10(e[l, index])
old_phase = current_phase
return log_prob_path
| 2.625
| 3
|
src/test/test_pg_function.py
|
gyana/alembic_utils
| 0
|
12561
|
<filename>src/test/test_pg_function.py
from alembic_utils.pg_function import PGFunction
from alembic_utils.replaceable_entity import register_entities
from alembic_utils.testbase import TEST_VERSIONS_ROOT, run_alembic_command
TO_UPPER = PGFunction(
schema="public",
signature="toUpper(some_text text default 'my text!')",
definition="""
returns text
as
$$ begin return upper(some_text) || 'abc'; end; $$ language PLPGSQL;
""",
)
def test_create_revision(engine) -> None:
register_entities([TO_UPPER])
run_alembic_command(
engine=engine,
command="revision",
command_kwargs={"autogenerate": True, "rev_id": "1", "message": "create"},
)
migration_create_path = TEST_VERSIONS_ROOT / "1_create.py"
with migration_create_path.open() as migration_file:
migration_contents = migration_file.read()
assert "op.create_entity" in migration_contents
assert "op.drop_entity" in migration_contents
assert "op.replace_entity" not in migration_contents
assert "from alembic_utils.pg_function import PGFunction" in migration_contents
# Execute upgrade
run_alembic_command(engine=engine, command="upgrade", command_kwargs={"revision": "head"})
# Execute Downgrade
run_alembic_command(engine=engine, command="downgrade", command_kwargs={"revision": "base"})
def test_update_revision(engine) -> None:
engine.execute(TO_UPPER.to_sql_statement_create())
# Update definition of TO_UPPER
UPDATED_TO_UPPER = PGFunction(
TO_UPPER.schema,
TO_UPPER.signature,
r'''returns text as
$$
select upper(some_text) || 'def' -- """ \n \\
$$ language SQL immutable strict;''',
)
register_entities([UPDATED_TO_UPPER])
# Autogenerate a new migration
# It should detect the change we made and produce a "replace_function" statement
run_alembic_command(
engine=engine,
command="revision",
command_kwargs={"autogenerate": True, "rev_id": "2", "message": "replace"},
)
migration_replace_path = TEST_VERSIONS_ROOT / "2_replace.py"
with migration_replace_path.open() as migration_file:
migration_contents = migration_file.read()
assert "op.replace_entity" in migration_contents
assert "op.create_entity" not in migration_contents
assert "op.drop_entity" not in migration_contents
assert "from alembic_utils.pg_function import PGFunction" in migration_contents
# Execute upgrade
run_alembic_command(engine=engine, command="upgrade", command_kwargs={"revision": "head"})
# Execute Downgrade
run_alembic_command(engine=engine, command="downgrade", command_kwargs={"revision": "base"})
def test_noop_revision(engine) -> None:
engine.execute(TO_UPPER.to_sql_statement_create())
register_entities([TO_UPPER])
output = run_alembic_command(
engine=engine,
command="revision",
command_kwargs={"autogenerate": True, "rev_id": "3", "message": "do_nothing"},
)
migration_do_nothing_path = TEST_VERSIONS_ROOT / "3_do_nothing.py"
with migration_do_nothing_path.open() as migration_file:
migration_contents = migration_file.read()
assert "op.create_entity" not in migration_contents
assert "op.drop_entity" not in migration_contents
assert "op.replace_entity" not in migration_contents
assert "from alembic_utils" not in migration_contents
# Execute upgrade
run_alembic_command(engine=engine, command="upgrade", command_kwargs={"revision": "head"})
# Execute Downgrade
run_alembic_command(engine=engine, command="downgrade", command_kwargs={"revision": "base"})
def test_drop(engine) -> None:
# Manually create a SQL function
engine.execute(TO_UPPER.to_sql_statement_create())
# Register no functions locally
register_entities([], schemas=["public"])
run_alembic_command(
engine=engine,
command="revision",
command_kwargs={"autogenerate": True, "rev_id": "1", "message": "drop"},
)
migration_create_path = TEST_VERSIONS_ROOT / "1_drop.py"
with migration_create_path.open() as migration_file:
migration_contents = migration_file.read()
assert "op.drop_entity" in migration_contents
assert "op.create_entity" in migration_contents
assert "from alembic_utils" in migration_contents
assert migration_contents.index("op.drop_entity") < migration_contents.index("op.create_entity")
# Execute upgrade
run_alembic_command(engine=engine, command="upgrade", command_kwargs={"revision": "head"})
# Execute Downgrade
run_alembic_command(engine=engine, command="downgrade", command_kwargs={"revision": "base"})
def test_has_no_parameters(engine) -> None:
# Error was occuring in drop statement when function had no parameters
# related to parameter parsing to drop default statements
SIDE_EFFECT = PGFunction(
schema="public",
signature="side_effect()",
definition="""
returns integer
as
$$ select 1; $$ language SQL;
""",
)
# Register no functions locally
register_entities([SIDE_EFFECT], schemas=["public"])
run_alembic_command(
engine=engine,
command="revision",
command_kwargs={"autogenerate": True, "rev_id": "1", "message": "no_arguments"},
)
migration_create_path = TEST_VERSIONS_ROOT / "1_no_arguments.py"
with migration_create_path.open() as migration_file:
migration_contents = migration_file.read()
assert "op.drop_entity" in migration_contents
# Execute upgrade
run_alembic_command(engine=engine, command="upgrade", command_kwargs={"revision": "head"})
# Execute Downgrade
run_alembic_command(engine=engine, command="downgrade", command_kwargs={"revision": "base"})
def test_ignores_extension_functions(engine) -> None:
# Extensions contain functions and don't have local representations
# Unless they are excluded, every autogenerate migration will produce
# drop statements for those functions
try:
engine.execute("create extension if not exists unaccent;")
register_entities([], schemas=["public"])
run_alembic_command(
engine=engine,
command="revision",
command_kwargs={"autogenerate": True, "rev_id": "1", "message": "no_drops"},
)
migration_create_path = TEST_VERSIONS_ROOT / "1_no_drops.py"
with migration_create_path.open() as migration_file:
migration_contents = migration_file.read()
assert "op.drop_entity" not in migration_contents
finally:
engine.execute("drop extension if exists unaccent;")
| 2.109375
| 2
|
proof_of_work/multiagent/turn_based/v6/environmentv6.py
|
michaelneuder/parkes_lab_fa19
| 0
|
12562
|
<filename>proof_of_work/multiagent/turn_based/v6/environmentv6.py
import numpy as np
np.random.seed(0)
class Environment(object):
def __init__(self, alpha, T, mining_cost=0.5):
self.alpha = alpha
self.T = T
self.current_state = None
self.mining_cost = mining_cost
def reset(self):
self.current_state = (0, 0)
return self.current_state
def getNextStateAdopt(self, rand_val):
self.current_state = (0, 0)
return np.asarray(self.current_state), 0
def getNextStateOverride(self, rand_val):
a, h = self.current_state
if a <= h:
self.current_state = (0, 0)
return np.asarray(self.current_state), -100
self.current_state = (a - h - 1, 0)
return np.asarray(self.current_state), h + 1
def getNextStateMine(self, rand_val):
a, h = self.current_state
if (a == self.T) or (h == self.T):
return self.getNextStateAdopt(rand_val)
if rand_val < self.alpha:
self.current_state = (a + 1, h)
else:
self.current_state = (a, h + 1)
return np.asarray(self.current_state), -1*self.alpha*self.mining_cost
def takeAction(self, action, rand_val=None):
assert(action in [0, 1, 2])
if not rand_val:
rand_val = np.random.uniform()
if action == 0:
return self.getNextStateAdopt(rand_val)
elif action == 1:
return self.getNextStateOverride(rand_val)
else:
return self.getNextStateMine(rand_val)
def main():
env = Environment(alpha=0.35, T=9)
print(env.reset(0.01))
print(env.takeAction(2, 0.01))
print(env.takeAction(1, 0.01))
if __name__ == "__main__":
main()
| 2.640625
| 3
|
passgen-py/setup.py
|
hassanselim0/PassGen
| 0
|
12563
|
from setuptools import setup, find_packages
setup(
name='passgen-py',
packages=find_packages(),
version='1.1',
description='Generate Passwords Deterministically based on a Master Password.',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
],
python_requires='>=3.6, <4',
entry_points={
'console_scripts': [
'passgen=src:cli',
],
},
install_requires=['click', 'pyperclip'],
)
| 1.296875
| 1
|
python-peculiarities/source/MultiplicationComplication.py
|
noamt/presentations
| 0
|
12564
|
<gh_stars>0
# https://codegolf.stackexchange.com/a/11480
multiplication = []
for i in range(10):
multiplication.append(i * (i + 1))
for x in multiplication:
print(x)
| 3.765625
| 4
|
bin/mkSampleInfo.py
|
icbi-lab/nextNEOpi
| 24
|
12565
|
<reponame>icbi-lab/nextNEOpi
#!/usr/bin/env python
"""
Requirements:
* Python >= 3.7
* Pysam
Copyright (c) 2021 <NAME> <<EMAIL>>
MIT License <http://opensource.org/licenses/MIT>
"""
RELEASE = False
__version_info__ = (
"0",
"1",
)
__version__ = ".".join(__version_info__)
__version__ += "-dev" if not RELEASE else ""
import os
import sys
import argparse
def parse_csin(csin_fh, csin_info):
for line in csin_fh:
if line.find("MHC I CSiN") != -1:
_, csin_v = line.split(" = ")
csin_info["MHCI"] = round(float(csin_v.strip()), 3)
if line.find("MHC II CSiN") != -1:
_, csin_v = line.split(" = ")
csin_info["MHCII"] = round(float(csin_v.strip()), 3)
if line.find("Total CSiN") != -1:
_, csin_v = line.split(" = ")
csin_info["combined"] = round(float(csin_v.strip()), 3)
return csin_info
def parse_tmb(tmb_fh, tmb_info, tmb_type):
for line in tmb_fh:
if line.find("Coverage") != -1:
_, v = line.split("\t")
if tmb_type == "all":
tmb_info["cov_genome"] = v.strip()
if tmb_type == "coding":
tmb_info["cov_coding"] = v.strip()
if line.find("Variants") != -1:
_, v = line.split("\t")
if tmb_type == "all":
tmb_info["variants_tot"] = v.strip()
if tmb_type == "coding":
tmb_info["variants_coding"] = v.strip()
if line.find("Mutational load (") != -1:
_, v = line.split("\t")
if tmb_type == "all":
tmb_info["TMB"] = round(float(v.strip()), 3)
if tmb_type == "coding":
tmb_info["TMB_coding"] = round(float(v.strip()), 3)
if line.find("Mutational load clonal") != -1:
_, v = line.split("\t")
if tmb_type == "all":
tmb_info["TMB_clonal"] = round(float(v.strip()), 3)
if tmb_type == "coding":
tmb_info["TMB_clonal_coding"] = round(float(v.strip()), 3)
return tmb_info
def write_output(out_fh, tmb_info, csin_info, sample_name):
header_fields = [
"SampleID",
"TMB",
"TMB_clonal",
"TMB_coding",
"TMB_clonal_coding",
"variants_total",
"variants_coding",
"coverage_genome",
"coverage_coding",
"CSiN_MHC_I",
"CSiN_MHC_II",
"CSiN_combined",
]
data_fields = [
sample_name,
tmb_info["TMB"],
tmb_info["TMB_clonal"],
tmb_info["TMB_coding"],
tmb_info["TMB_clonal_coding"],
tmb_info["variants_tot"],
tmb_info["variants_coding"],
tmb_info["cov_genome"],
tmb_info["cov_coding"],
csin_info["MHCI"],
csin_info["MHCII"],
csin_info["combined"],
]
out_fh.write("\t".join(header_fields) + "\n")
out_fh.write("\t".join(map(str, data_fields)) + "\n")
def _file_write(fname):
"""Returns an open file handle if the given filename exists."""
return open(fname, "w")
def _file_read(fname):
"""Returns an open file handle if the given filename exists."""
return open(fname, "r")
if __name__ == "__main__":
usage = __doc__.split("\n\n\n")
parser = argparse.ArgumentParser(description="Compile sample info sheet")
parser.add_argument(
"--tmb",
required=True,
type=_file_read,
help="TMB file",
)
parser.add_argument(
"--tmb_coding",
required=True,
type=_file_read,
help="TMB coding file",
)
parser.add_argument(
"--csin",
required=True,
type=_file_read,
help="CSiN file",
)
parser.add_argument(
"--out",
required=True,
type=_file_write,
help="Output file",
)
parser.add_argument(
"--sample_name",
required=True,
type=str,
help="Sample name",
)
parser.add_argument(
"--version", action="version", version="%(prog)s " + __version__
)
args = parser.parse_args()
tmb = args.tmb
tmb_coding = args.tmb_coding
csin = args.csin
out = args.out
sample_name = args.sample_name
tmb_info = {
"cov_genome": 0,
"cov_coding": 0,
"variants_tot": 0,
"variants_coding": 0,
"TMB": 0,
"TMB_clonal": 0,
"TMB_coding": 0,
"TMB_clonal_coding": 0,
}
csin_info = {"MHCI": 0, "MHCII": 0, "combined": 0}
tmb_info = parse_tmb(tmb, tmb_info, "all")
tmb_info = parse_tmb(tmb_coding, tmb_info, "coding")
csin_info = parse_csin(csin, csin_info)
write_output(out, tmb_info, csin_info, sample_name)
out.close()
| 2.0625
| 2
|
garrick.py
|
SebNickel/garrick
| 0
|
12566
|
#!/usr/bin/env python
import sys
import colorama
from pick_db_file import pick_db_file
import db_connection
import card_repository
from review_cards import review_cards
from new_card import new_card
from new_cards import new_cards
import review
from user_colors import print_info, print_instruction, print_error
from usage_info import print_usage_info
def main():
# Initialise colorama
colorama.init()
valid_args = ['-n', '-n2', '-s', '-s2', '-e', '-e2', '-b', '-bf', '-bb', '-bs', '-bl', '-br']
if len(sys.argv) > 1 and sys.argv[1] not in valid_args:
print_usage_info(sys.argv)
if sys.argv[1] not in ['-h', '--help']:
sys.exit(1)
sys.exit()
db_file = pick_db_file()
conn, cursor = db_connection.connect(db_file)
card_repository.create_table_if_not_exists(conn, cursor)
if len(sys.argv) == 1:
table_is_empty = card_repository.check_if_empty(cursor)
if table_is_empty:
print_error("You don't have any cards yet.")
print_instruction(
'Create some cards by launching garrick with one of the following options first:'
)
print_instruction('\t-n\tCreate cards starting in one-way mode.')
print_instruction('\t-n2\tCreate cards starting in two-way mode.')
print_instruction('\t-s\tCreate cards starting in single-line and one-way mode.')
print_instruction('\t-s2\tCreate cards starting in single-line and two-way mode.')
print_instruction('\t-e\tCreate cards starting in editor mode and in one-way mode.')
print_instruction('\t-s2\tCreate cards starting in editor mode and in two-way mode.')
else:
review.review(conn, cursor)
elif sys.argv[1] == '-n':
new_cards(conn, cursor, two_way_card=False, single_line_mode=False, editor_mode=False)
elif sys.argv[1] == '-n2':
new_cards(conn, cursor, two_way_card=True, single_line_mode=False, editor_mode=False)
elif sys.argv[1] == '-s':
new_cards(conn, cursor, two_way_card=False, single_line_mode=True, editor_mode=False)
elif sys.argv[1] == '-s2':
new_cards(conn, cursor, two_way_card=True, single_line_mode=True, editor_mode=False)
elif sys.argv[1] == '-e':
new_cards(conn, cursor, two_way_card=False, single_line_mode=False, editor_mode=True)
elif sys.argv[1] == '-e2':
new_cards(conn, cursor, two_way_card=True, single_line_mode=False, editor_mode=True)
elif sys.argv[1] == '-b':
review.browse_by_regex(conn, cursor)
elif sys.argv[1] == '-bf':
review.browse_by_regex_front(conn, cursor)
elif sys.argv[1] == '-bb':
review.browse_by_regex_back(conn, cursor)
elif sys.argv[1] == '-bs':
review.browse_by_score(conn, cursor)
elif sys.argv[1] == '-bl':
review.browse_by_last_viewed(conn, cursor)
elif sys.argv[1] == '-br':
review.browse_by_last_viewed_reverse(conn, cursor)
print_info('Kbai')
db_connection.disconnect(conn, cursor)
if __name__ == '__main__':
main()
| 2.4375
| 2
|
perceiver/train/dataset.py
|
kawa-work/deepmind-research
| 10,110
|
12567
|
<reponame>kawa-work/deepmind-research
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet dataset with pre-processing and augmentation.
Deng, et al CVPR 2009 - ImageNet: A large-scale hierarchical image database.
https://image-net.org/
"""
import enum
from typing import Any, Generator, Mapping, Optional, Sequence, Text, Tuple
import jax
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
from perceiver.train import autoaugment
Batch = Mapping[Text, np.ndarray]
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
AUTOTUNE = tf.data.experimental.AUTOTUNE
INPUT_DIM = 224 # The number of pixels in the image resize.
class Split(enum.Enum):
"""ImageNet dataset split."""
TRAIN = 1
TRAIN_AND_VALID = 2
VALID = 3
TEST = 4
@classmethod
def from_string(cls, name: Text) -> 'Split':
return {'TRAIN': Split.TRAIN, 'TRAIN_AND_VALID': Split.TRAIN_AND_VALID,
'VALID': Split.VALID, 'VALIDATION': Split.VALID,
'TEST': Split.TEST}[name.upper()]
@property
def num_examples(self):
return {Split.TRAIN_AND_VALID: 1281167, Split.TRAIN: 1271167,
Split.VALID: 10000, Split.TEST: 50000}[self]
def load(
split: Split,
*,
is_training: bool,
# batch_dims should be:
# [device_count, per_device_batch_size] or [total_batch_size]
batch_dims: Sequence[int],
augmentation_settings: Mapping[str, Any],
# The shape to which images are resized.
im_dim: int = INPUT_DIM,
threadpool_size: int = 48,
max_intra_op_parallelism: int = 1,
) -> Generator[Batch, None, None]:
"""Loads the given split of the dataset."""
start, end = _shard(split, jax.host_id(), jax.host_count())
im_size = (im_dim, im_dim)
total_batch_size = np.prod(batch_dims)
tfds_split = tfds.core.ReadInstruction(_to_tfds_split(split),
from_=start, to=end, unit='abs')
ds = tfds.load('imagenet2012:5.*.*', split=tfds_split,
decoders={'image': tfds.decode.SkipDecoding()})
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = threadpool_size
options.experimental_threading.max_intra_op_parallelism = (
max_intra_op_parallelism)
options.experimental_optimization.map_parallelization = True
if is_training:
options.experimental_deterministic = False
ds = ds.with_options(options)
if is_training:
if jax.host_count() > 1:
# Only cache if we are reading a subset of the dataset.
ds = ds.cache()
ds = ds.repeat()
ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=0)
else:
if split.num_examples % total_batch_size != 0:
raise ValueError(f'Test/valid must be divisible by {total_batch_size}')
def crop_augment_preprocess(example):
image, _ = _preprocess_image(
example['image'], is_training, im_size, augmentation_settings)
label = tf.cast(example['label'], tf.int32)
out = {'images': image, 'labels': label}
if is_training:
if augmentation_settings['cutmix']:
out['mask'] = cutmix_padding(*im_size)
out['cutmix_ratio'] = tf.reduce_mean(out['mask'])
if augmentation_settings['mixup_alpha'] is not None:
beta = tfp.distributions.Beta(
augmentation_settings['mixup_alpha'],
augmentation_settings['mixup_alpha'])
out['mixup_ratio'] = beta.sample()
return out
ds = ds.map(crop_augment_preprocess, num_parallel_calls=AUTOTUNE)
# Mixup/cutmix by temporarily batching (using the per-device batch size):
use_cutmix = augmentation_settings['cutmix']
use_mixup = augmentation_settings['mixup_alpha'] is not None
if is_training and (use_cutmix or use_mixup):
inner_batch_size = batch_dims[-1]
# Apply mixup, cutmix, or mixup + cutmix on batched data.
# We use data from 2 batches to produce 1 mixed batch.
ds = ds.batch(inner_batch_size * 2)
if not use_cutmix and use_mixup:
ds = ds.map(my_mixup, num_parallel_calls=AUTOTUNE)
elif use_cutmix and not use_mixup:
ds = ds.map(my_cutmix, num_parallel_calls=AUTOTUNE)
elif use_cutmix and use_mixup:
ds = ds.map(my_mixup_cutmix, num_parallel_calls=AUTOTUNE)
# Unbatch for further processing.
ds = ds.unbatch()
for batch_size in reversed(batch_dims):
ds = ds.batch(batch_size)
ds = ds.prefetch(AUTOTUNE)
yield from tfds.as_numpy(ds)
# cutmix_padding, my_cutmix, my_mixup, and my_mixup_cutmix taken from:
# https://github.com/deepmind/deepmind-research/blob/master/nfnets/dataset.py
def cutmix_padding(h, w):
"""Returns image mask for CutMix.
Taken from (https://github.com/google/edward2/blob/master/experimental
/marginalization_mixup/data_utils.py#L367)
Args:
h: image height.
w: image width.
"""
r_x = tf.random.uniform([], 0, w, tf.int32)
r_y = tf.random.uniform([], 0, h, tf.int32)
# Beta dist in paper, but they used Beta(1,1) which is just uniform.
image1_proportion = tf.random.uniform([])
patch_length_ratio = tf.math.sqrt(1 - image1_proportion)
r_w = tf.cast(patch_length_ratio * tf.cast(w, tf.float32), tf.int32)
r_h = tf.cast(patch_length_ratio * tf.cast(h, tf.float32), tf.int32)
bbx1 = tf.clip_by_value(tf.cast(r_x - r_w // 2, tf.int32), 0, w)
bby1 = tf.clip_by_value(tf.cast(r_y - r_h // 2, tf.int32), 0, h)
bbx2 = tf.clip_by_value(tf.cast(r_x + r_w // 2, tf.int32), 0, w)
bby2 = tf.clip_by_value(tf.cast(r_y + r_h // 2, tf.int32), 0, h)
# Create the binary mask.
pad_left = bbx1
pad_top = bby1
pad_right = tf.maximum(w - bbx2, 0)
pad_bottom = tf.maximum(h - bby2, 0)
r_h = bby2 - bby1
r_w = bbx2 - bbx1
mask = tf.pad(
tf.ones((r_h, r_w)),
paddings=[[pad_top, pad_bottom], [pad_left, pad_right]],
mode='CONSTANT',
constant_values=0)
mask.set_shape((h, w))
return mask[..., None] # Add channel dim.
def my_cutmix(batch):
"""Apply CutMix: https://arxiv.org/abs/1905.04899."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
mask = batch['mask'][:bs]
images = (mask * batch['images'][:bs] + (1.0 - mask) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = batch['cutmix_ratio'][:bs]
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup(batch):
"""Apply mixup: https://arxiv.org/abs/1710.09412."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
ratio = batch['mixup_ratio'][:bs, None, None, None]
images = (ratio * batch['images'][:bs] + (1.0 - ratio) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = ratio[..., 0, 0, 0] # Unsqueeze
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup_cutmix(batch):
"""Apply mixup to half the batch, and cutmix to the other."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 4
mixup_ratio = batch['mixup_ratio'][:bs, None, None, None]
mixup_images = (mixup_ratio * batch['images'][:bs]
+ (1.0 - mixup_ratio) * batch['images'][bs:2*bs])
mixup_labels = batch['labels'][:bs]
mixup_mix_labels = batch['labels'][bs:2*bs]
cutmix_mask = batch['mask'][2*bs:3*bs]
cutmix_images = (cutmix_mask * batch['images'][2*bs:3*bs]
+ (1.0 - cutmix_mask) * batch['images'][-bs:])
cutmix_labels = batch['labels'][2*bs:3*bs]
cutmix_mix_labels = batch['labels'][-bs:]
cutmix_ratio = batch['cutmix_ratio'][2*bs : 3*bs]
return {'images': tf.concat([mixup_images, cutmix_images], axis=0),
'labels': tf.concat([mixup_labels, cutmix_labels], axis=0),
'mix_labels': tf.concat([mixup_mix_labels, cutmix_mix_labels], 0),
'ratio': tf.concat([mixup_ratio[..., 0, 0, 0], cutmix_ratio], axis=0)}
def _to_tfds_split(split: Split) -> tfds.Split:
"""Returns the TFDS split appropriately sharded."""
# NOTE: Imagenet did not release labels for the test split used in the
# competition, so it has been typical at DeepMind to consider the VALID
# split the TEST split and to reserve 10k images from TRAIN for VALID.
if split in (
Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID):
return tfds.Split.TRAIN
else:
assert split == Split.TEST
return tfds.Split.VALIDATION
def _shard(
split: Split, shard_index: int, num_shards: int) -> Tuple[int, int]:
"""Returns [start, end) for the given shard index."""
assert shard_index < num_shards
arange = np.arange(split.num_examples)
shard_range = np.array_split(arange, num_shards)[shard_index]
start, end = shard_range[0], (shard_range[-1] + 1)
if split == Split.TRAIN:
# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000].
offset = Split.VALID.num_examples
start += offset
end += offset
return start, end
def _preprocess_image(
image_bytes: tf.Tensor,
is_training: bool,
image_size: Sequence[int],
augmentation_settings: Mapping[str, Any],
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Returns processed and resized images."""
# Get the image crop.
if is_training:
image, im_shape = _decode_and_random_crop(image_bytes)
image = tf.image.random_flip_left_right(image)
else:
image, im_shape = _decode_and_center_crop(image_bytes)
assert image.dtype == tf.uint8
# Optionally apply RandAugment: https://arxiv.org/abs/1909.13719
if is_training:
if augmentation_settings['randaugment'] is not None:
# Input and output images are dtype uint8.
image = autoaugment.distort_image_with_randaugment(
image,
num_layers=augmentation_settings['randaugment']['num_layers'],
magnitude=augmentation_settings['randaugment']['magnitude'])
# Resize and normalize the image crop.
# NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without
# clamping overshoots. This means values returned will be outside the range
# [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]).
image = tf.image.resize(
image, image_size, tf.image.ResizeMethod.BICUBIC)
image = _normalize_image(image)
return image, im_shape
def _normalize_image(image: tf.Tensor) -> tf.Tensor:
"""Normalize the image to zero mean and unit variance."""
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def _distorted_bounding_box_crop(
image_bytes: tf.Tensor,
*,
jpeg_shape: tf.Tensor,
bbox: tf.Tensor,
min_object_covered: float,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
max_attempts: int,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Generates cropped_image using one of the bboxes randomly distorted."""
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
jpeg_shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = [offset_y, offset_x, target_height, target_width]
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
im_shape = tf.stack([target_height, target_width])
return image, im_shape
def _decode_whole_image(image_bytes: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
image = tf.io.decode_jpeg(image_bytes, channels=3)
im_shape = tf.io.extract_jpeg_shape(image_bytes, output_type=tf.int32)
return image, im_shape
def _decode_and_random_crop(
image_bytes: tf.Tensor
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Make a random crop of INPUT_DIM."""
if image_bytes.dtype == tf.dtypes.string:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
jpeg_shape = tf.shape(image_bytes)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image, im_shape = _distorted_bounding_box_crop(
image_bytes,
jpeg_shape=jpeg_shape,
bbox=bbox,
min_object_covered=0.1,
aspect_ratio_range=(3 / 4, 4 / 3),
area_range=(0.08, 1.0),
max_attempts=10)
if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))):
# If the random crop failed fall back to center crop.
image, im_shape = _decode_and_center_crop(image_bytes, jpeg_shape)
return image, im_shape
def _center_crop(image, crop_dim):
"""Center crops an image to a target dimension."""
image_height = image.shape[0]
image_width = image.shape[1]
offset_height = ((image_height - crop_dim) + 1) // 2
offset_width = ((image_width - crop_dim) + 1) // 2
return tf.image.crop_to_bounding_box(
image, offset_height, offset_width, crop_dim, crop_dim)
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
if image_bytes.dtype == tf.dtypes.string:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
jpeg_shape = tf.shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
padded_center_crop_size = tf.cast(
((INPUT_DIM / (INPUT_DIM + 32)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = [offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size]
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
im_shape = tf.stack([padded_center_crop_size, padded_center_crop_size])
return image, im_shape
| 2.203125
| 2
|
pyapprox/manipulate_polynomials.py
|
samtx/pyapprox
| 0
|
12568
|
import numpy as np
from scipy.special import factorial
from pyapprox.indexing import hash_array
from pyapprox.indexing import compute_hyperbolic_level_indices
def multiply_multivariate_polynomials(indices1,coeffs1,indices2,coeffs2):
"""
TODO: instead of using dictionary to colect terms consider using
unique_indices,repeated_idx=np.unique(
indices[active_idx,:],axis=1,return_inverse=True)
as is done in multivariate_polynomials.conditional_moments_of_polynomial_chaos_expansion. Choose which one is faster
Parameters
----------
index : multidimensional index
multidimensional index specifying the polynomial degree in each
dimension
Returns
-------
"""
num_vars = indices1.shape[0]
num_indices1 = indices1.shape[1]
num_indices2 = indices2.shape[1]
assert num_indices1==coeffs1.shape[0]
assert num_indices2==coeffs2.shape[0]
assert num_vars==indices2.shape[0]
indices_dict = dict()
max_num_indices = num_indices1*num_indices2
indices = np.empty((num_vars,max_num_indices),int)
coeffs = np.empty((max_num_indices),float)
kk = 0
for ii in range(num_indices1):
index1 = indices1[:,ii]
coeff1 = coeffs1[ii]
for jj in range(num_indices2):
index= index1+indices2[:,jj]
key = hash_array(index)
coeff = coeff1*coeffs2[jj]
if key in indices_dict:
coeffs[indices_dict[key]]+=coeff
else:
indices_dict[key]=kk
indices[:,kk]=index
coeffs[kk]=coeff
kk+=1
indices = indices[:,:kk]
coeffs = coeffs[:kk]
return indices, coeffs
def coeffs_of_power_of_nd_linear_polynomial(num_vars, degree, linear_coeffs):
"""
Compute the polynomial (coefficients and indices) obtained by raising
a linear multivariate polynomial (no constant term) to some power.
Parameters
----------
num_vars : integer
The number of variables
degree : integer
The power of the linear polynomial
linear_coeffs: np.ndarray (num_vars)
The coefficients of the linear polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
The coefficients of the new polynomial
indices : np.ndarray (num_vars, num_terms)
The set of multivariate indices that define the new polynomial
"""
assert len(linear_coeffs)==num_vars
coeffs, indices=multinomial_coeffs_of_power_of_nd_linear_polynomial(
num_vars, degree)
for ii in range(indices.shape[1]):
index = indices[:,ii]
for dd in range(num_vars):
degree = index[dd]
coeffs[ii] *= linear_coeffs[dd]**degree
return coeffs, indices
def substitute_polynomial_for_variables_in_polynomial(
indices_in,coeffs_in,indices,coeffs,var_idx):
num_vars, num_terms = indices.shape
new_indices = []
new_coeffs = []
for ii in range(num_terms):
index = indices[:,ii]
pows = index[var_idx]
ind,cf = substitute_polynomial_for_variables_in_single_basis_term(
indices_in,coeffs_in,index,coeffs[ii],var_idx,pows)
new_indices.append(ind)
new_coeffs.append(cf)
new_indices = np.hstack(new_indices)
new_coeffs = np.vstack(new_coeffs)
return new_indices, new_coeffs
def substitute_polynomial_for_variables_in_single_basis_term(
indices_in,coeffs_in,basis_index,basis_coeff,var_idx,global_var_idx,
num_global_vars):
"""
var_idx : np.ndarray (nsub_vars)
The dimensions in basis_index which will be substituted
global_var_idx : [ np.ndarray(nvars[ii]) for ii in num_inputs]
The index of the active variables for each input
"""
num_inputs = var_idx.shape[0]
assert num_inputs==len(indices_in)
assert num_inputs==len(coeffs_in)
assert basis_coeff.shape[0]==1
assert var_idx.max()<basis_index.shape[0]
assert basis_index.shape[1]==1
assert len(global_var_idx)==num_inputs
# store input indices in global_var_idx
temp = []
for ii in range(num_inputs):
ind = np.zeros((num_global_vars,indices_in[ii].shape[1]))
ind[global_var_idx,:] = indices_in[ii]
temp.append(ind)
indices_in = temp
jj=0
degree = basis_index[var_idx[jj]]
c1,ind1 = coeffs_of_power_of_polynomial(
indices_in,coeffs_in[:,jj:jj+1],degree)
for jj in range(1,var_idx.shape[0]):
degree = basis_index[var_idx[jj]]
c2,ind2 = coeffs_of_power_of_polynomial(
indices_in,coeffs_in[:,jj:jj+1],degree)
ind1,c1 = multiply_multivariate_polynomials(ind1,c1,ind2,c2)
# this mask may be wrong. I might be confusing global and var idx
mask = np.ones(basis_index.shape[0],dtype=bool); mask[var_idx]=False
print(ind1.shape,mask.shape)
ind1[mask,:] += basis_index[mask]
c1*=basis_coeff
return ind1, c1
def composition_of_polynomials(indices_list,coeffs_list):
npolys = len(indices_list)
assert npolys==len(coeffs_list)
for ii in range(1,npolys):
new_poly = 2
return new_poly
def coeffs_of_power_of_polynomial(indices, coeffs, degree):
"""
Compute the polynomial (coefficients and indices) obtained by raising
a multivariate polynomial to some power.
TODO: Deprecate coeffs_of_power_of_nd_linear_polynomial as that function
can be obtained as a special case of this function
Parameters
----------
indices : np.ndarray (num_vars,num_terms)
The indices of the multivariate polynomial
coeffs: np.ndarray (num_vars)
The coefficients of the polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
The coefficients of the new polynomial
indices : np.ndarray (num_vars, num_terms)
The set of multivariate indices that define the new polynomial
"""
num_vars, num_terms = indices.shape
assert indices.shape[1]==coeffs.shape[0]
multinomial_coeffs, multinomial_indices = \
multinomial_coeffs_of_power_of_nd_linear_polynomial(num_terms, degree)
new_indices = np.zeros((num_vars,multinomial_indices.shape[1]))
new_coeffs = np.tile(multinomial_coeffs[:,np.newaxis],coeffs.shape[1])
for ii in range(multinomial_indices.shape[1]):
multinomial_index = multinomial_indices[:,ii]
for dd in range(num_terms):
deg = multinomial_index[dd]
new_coeffs[ii] *= coeffs[dd]**deg
new_indices[:,ii] += indices[:,dd]*deg
return new_coeffs, new_indices
def group_like_terms(coeffs, indices):
if coeffs.ndim==1:
coeffs = coeffs[:,np.newaxis]
num_vars,num_indices = indices.shape
indices_dict = {}
for ii in range(num_indices):
key = hash_array(indices[:,ii])
if not key in indices_dict:
indices_dict[key] = [coeffs[ii],ii]
else:
indices_dict[key] = [indices_dict[key][0]+coeffs[ii],ii]
new_coeffs = np.empty((len(indices_dict),coeffs.shape[1]))
new_indices = np.empty((num_vars,len(indices_dict)),dtype=int)
ii=0
for key, item in indices_dict.items():
new_indices[:,ii] = indices[:,item[1]]
new_coeffs[ii] = item[0]
ii+=1
return new_coeffs, new_indices
def multinomial_coefficient(index):
"""Compute the multinomial coefficient of an index [i1,i2,...,id].
Parameters
----------
index : multidimensional index
multidimensional index specifying the polynomial degree in each
dimension
Returns
-------
coeff : double
the multinomial coefficient
"""
level = index.sum()
denom = np.prod(factorial(index))
coeff = factorial(level)/denom
return coeff
def multinomial_coefficients(indices):
coeffs = np.empty((indices.shape[1]),float)
for i in range(indices.shape[1]):
coeffs[i] = multinomial_coefficient(indices[:,i])
return coeffs
def multinomial_coeffs_of_power_of_nd_linear_polynomial(num_vars,degree):
""" Compute the multinomial coefficients of the individual terms
obtained when taking the power of a linear polynomial
(without constant term).
Given a linear multivariate polynomial e.g.
e.g. (x1+x2+x3)**2 = x1**2+2*x1*x2+2*x1*x3+2*x2**2+x2*x3+x3**2
return the coefficients of each quadratic term, i.e.
[1,2,2,1,2,1]
Parameters
----------
num_vars : integer
the dimension of the multivariate polynomial
degree : integer
the power of the linear polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
the multinomial coefficients of the polynomial obtained when
raising the linear multivariate polynomial to the power=degree
indices: np.ndarray (num_terms)
the indices of the polynomial obtained when
raising the linear multivariate polynomial to the power=degree
"""
indices = compute_hyperbolic_level_indices(num_vars,degree,1.0)
coeffs = multinomial_coefficients(indices)
return coeffs, indices
def add_polynomials(indices_list, coeffs_list):
"""
Add many polynomials together.
Example:
p1 = x1**2+x2+x3, p2 = x2**2+2*x3
p3 = p1+p2
return the degrees of each term in the the polynomial
p3 = x1**2+x2+3*x3+x2**2
[2, 1, 1, 2]
and the coefficients of each of these terms
[1., 1., 3., 1.]
Parameters
----------
indices_list : list [np.ndarray (num_vars,num_indices_i)]
List of polynomial indices. indices_i may be different for each
polynomial
coeffs_list : list [np.ndarray (num_indices_i,num_qoi)]
List of polynomial coefficients. indices_i may be different for each
polynomial. num_qoi must be the same for each list element.
Returns
-------
indices: np.ndarray (num_vars,num_terms)
the polynomial indices of the polynomial obtained from
summing the polynomials. This will be the union of the indices
of the input polynomials
coeffs: np.ndarray (num_terms,num_qoi)
the polynomial coefficients of the polynomial obtained from
summing the polynomials
"""
num_polynomials = len(indices_list)
assert num_polynomials==len(coeffs_list)
indices_dict = dict()
indices = []
coeff = []
ii=0; kk=0
for jj in range(indices_list[ii].shape[1]):
assert coeffs_list[ii].ndim==2
assert coeffs_list[ii].shape[0]==indices_list[ii].shape[1]
index=indices_list[ii][:,jj]
indices_dict[hash_array(index)]=kk
indices.append(index)
coeff.append(coeffs_list[ii][jj,:].copy())
kk+=1
for ii in range(1,num_polynomials):
#print indices_list[ii].T,num_polynomials
assert coeffs_list[ii].ndim==2
assert coeffs_list[ii].shape[0]==indices_list[ii].shape[1]
for jj in range(indices_list[ii].shape[1]):
index=indices_list[ii][:,jj]
key = hash_array(index)
if key in indices_dict:
nn = indices_dict[key]
coeff[nn]+=coeffs_list[ii][jj,:]
else:
indices_dict[key]=kk
indices.append(index)
coeff.append(coeffs_list[ii][jj,:].copy())
kk+=1
indices = np.asarray(indices).T
coeff = np.asarray(coeff)
return indices, coeff
def get_indices_double_set(indices):
"""
Given muultivariate indices
[i1,i2,...,]
Compute its double set by
[i1*i1,i1*i2,...,i2*i2,i2*i3...]
The double set will only contain unique indices
Parameters
----------
indices : np.ndarray (num_vars,num_indices)
The initial indices
Returns
-------
double_set_indices : np.ndarray (num_vars,num_indices)
The double set of indices
"""
dummy_coeffs = np.zeros(indices.shape[1])
double_set_indices = multiply_multivariate_polynomials(
indices,dummy_coeffs,indices,dummy_coeffs)[0]
return double_set_indices
def shift_momomial_expansion(coef,shift,scale):
assert coef.ndim==1
shifted_coef = np.zeros_like(coef)
shifted_coef[0]=coef[0]
nterms = coef.shape[0]
for ii in range(1,nterms):
temp = np.polynomial.polynomial.polypow([1,-shift],ii)
shifted_coef[:ii+1] += coef[ii]*temp[::-1]/scale**ii
return shifted_coef
#Some of these functions can be replaced by numpy functions described at
#https://docs.scipy.org/doc/numpy/reference/routines.polynomials.polynomial.html
| 2.6875
| 3
|
core/data/load_data.py
|
Originofamonia/mcan-vqa
| 0
|
12569
|
<filename>core/data/load_data.py
# --------------------------------------------------------
# mcan-vqa (Deep Modular Co-Attention Networks)
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> https://github.com/cuiyuhao1996
# --------------------------------------------------------
import h5py
import pickle
import random
import numpy as np
from numpy.random import default_rng
import pandas as pd
import glob, json, torch, time
from torch.utils.data import Dataset, DataLoader
from core.data.data_utils import img_feat_path_load, img_feat_load, ques_load, tokenize, ans_stat
from core.data.data_utils import pad_img_feat, proc_ques, proc_ans, proc_mimic_ans
class CustomDataset(Dataset):
def __init__(self, opt):
self.opt = opt
# ---- Raw data loading ----
# Loading all image paths
# if self.opt.preload:
self.img_feat_path_list = []
split_list = opt.split[opt.run_mode].split('+') # change this for split
# split_list = ['test']
for split in split_list:
if split in ['train', 'val', 'test']:
self.img_feat_path_list += glob.glob(opt.img_feat_path[split] + '*.npz')
# Loading question word list
self.stat_ques_list = \
json.load(open(opt.question_path['train'], 'r'))['questions'] + \
json.load(open(opt.question_path['val'], 'r'))['questions'] + \
json.load(open(opt.question_path['test'], 'r'))['questions'] + \
json.load(open(opt.question_path['vg'], 'r'))['questions']
# Loading answer word list
# self.stat_ans_list = \
# json.load(open(__C.answer_path['train'], 'r'))['annotations'] + \
# json.load(open(__C.answer_path['val'], 'r'))['annotations']
# Loading question and answer list
self.ques_list = []
self.ans_list = []
# split_list = opt.split[opt.run_mode].split('+')
# split_list = ['train', 'val', 'test']
for split in split_list:
self.ques_list += json.load(open(opt.question_path[split], 'r'))['questions']
# if opt.run_mode in ['train']:
self.ans_list += json.load(open(opt.answer_path[split], 'r'))['annotations']
# Define run data size
if opt.run_mode in ['train']:
self.data_size = self.ans_list.__len__()
else:
self.data_size = self.ques_list.__len__()
print('== Dataset size:', self.data_size)
# ---- Data statistic ----
# {image id} -> {image feature absolutely path}
if self.opt.preload:
print('==== Pre-Loading features ...')
time_start = time.time()
self.iid_to_img_feat = img_feat_load(self.img_feat_path_list)
time_end = time.time()
print('==== Finished in {}s'.format(int(time_end-time_start)))
else:
self.iid_to_img_feat_path = img_feat_path_load(self.img_feat_path_list)
# {question id} -> {question}
self.qid_to_ques = ques_load(self.ques_list)
# Tokenize
self.token_to_ix, self.pretrained_emb = tokenize(self.stat_ques_list, opt.use_glove)
self.token_size = self.token_to_ix.__len__()
print('== Question token vocab size:', self.token_size)
# Answers statistic
# Make answer dict during training does not guarantee
# the same order of {ans_to_ix}, so we published our
# answer dict to ensure that our pre-trained model
# can be adapted on each machine.
# Thanks to <NAME> (https://github.com/lichengunc)
# for finding this bug and providing the solutions.
# self.ans_to_ix, self.ix_to_ans = ans_stat(self.stat_ans_list, __C.ANS_FREQ)
self.ans_to_ix, self.ix_to_ans = ans_stat('core/data/answer_dict.json')
self.ans_size = self.ans_to_ix.__len__()
print('== Answer vocab size (occurr more than {} times):'.format(8), self.ans_size)
print('load dataset finished.')
def __getitem__(self, idx):
# For code safety
img_feat_iter = np.zeros(1)
ques_ix_iter = np.zeros(1)
ans_iter = np.zeros(1)
# Process ['train'] and ['val', 'test'] respectively
if self.opt.run_mode in ['train']:
# Load the run data from list
ans = self.ans_list[idx]
ques = self.qid_to_ques[str(ans['question_id'])]
# Process image feature from (.npz) file
if self.opt.preload:
img_feat_x = self.iid_to_img_feat[str(ans['image_id'])]
else:
img_feats = np.load(self.iid_to_img_feat_path[str(ans['image_id'])])
img_feat_x = img_feats['x'].transpose((1, 0))
img_feat_iter = pad_img_feat(img_feat_x, self.opt.img_feat_pad_size)
boxes = pad_img_feat(img_feats['bbox'], self.opt.img_feat_pad_size)
# Process question
ques_ix_iter = proc_ques(ques, self.token_to_ix, self.opt.max_token)
# Process answer
ans_iter = proc_ans(ans, self.ans_to_ix)
return torch.from_numpy(img_feat_iter), \
torch.from_numpy(ques_ix_iter), \
torch.from_numpy(ans_iter), torch.from_numpy(boxes), torch.tensor([idx]), self.opt.run_mode
else:
# Load the run data from list
ques = self.ques_list[idx]
# # Process image feature from (.npz) file
# img_feat = np.load(self.iid_to_img_feat_path[str(ques['image_id'])])
# img_feat_x = img_feat['x'].transpose((1, 0))
# Process image feature from (.npz) file
if self.opt.preload:
img_feat_x = self.iid_to_img_feat[str(ques['image_id'])]
else:
img_feats = np.load(self.iid_to_img_feat_path[str(ques['image_id'])])
img_feat_x = img_feats['x'].transpose((1, 0))
img_feat_iter = pad_img_feat(img_feat_x, self.opt.img_feat_pad_size)
# Process question
ques_ix_iter = proc_ques(ques, self.token_to_ix, self.opt.max_token)
# only works for batchsize=1
return torch.from_numpy(img_feat_iter), \
torch.from_numpy(ques_ix_iter), \
torch.from_numpy(ans_iter), img_feats, idx
def __len__(self):
return self.data_size
class MIMICDatasetBase(Dataset):
"""
MIMIC dataset base: includes everything but (train/val/test) qa pickles
"""
def __init__(self, opt) -> None:
super().__init__()
self.opt = opt
# self.chexpert_df = pd.read_csv(f'/home/qiyuan/2021summer/physionet.org/files/mimic-cxr-jpg/2.0.0/mimic-cxr-2.0.0-chexpert.csv.gz')
f1 = h5py.File(opt.cxr_img_feat_path, 'r')
print(f'keys: {f1.keys()}')
self.image_features = f1['image_features'] # [377110, 60, 1024], 36 ana + 24 finding
# self.bbox_label = f1['bbox_label'] # [377k, 60]
# self.image_adj_matrix = f1['image_adj_matrix'] # [377k, 100, 100]
self.image_bb = f1['image_bb'] # [377k, 60, 4]
# self.pos_boxes = f1['pos_boxes'] # [377k, 2]
# self.semantic_adj_matrix = f1['semantic_adj_matrix'] # [377k, 100, 100]
self.spatial_features = f1['spatial_features'] # [377k, 60, 6]
f5 = h5py.File(opt.ana_pool_finding_path, 'r')
print(f'f5 keys: {f5.keys()}')
# anatomical box pooled findings feature
self.ana_pooled_feats = f5['image_features'] # [377k, 26, 1024]
self.v_dim = self.ana_pooled_feats.chunks[-1] # visual feat dim
self.s_dim = self.spatial_features.chunks[-1] # spatial dim
with open(opt.mimic_ans_dict_path['ans2idx'], 'rb') as f3:
self.ans_to_ix = pickle.load(f3)
# because no_finding becomes yes or no, so 15 labels
with open(opt.mimic_ans_dict_path['idx2ans'], 'rb') as f4:
self.ix_to_ans = pickle.load(f4)
self.ans_size = self.ans_to_ix.__len__() # was self.ans_to_ix.__len__()
print('== Answer vocab size (occurr more than {} times):'.format(8), self.ans_size)
print('load mimic base dataset finished.')
class MIMICDatasetSplit(MIMICDatasetBase):
"""
train/val/test split of MIMIC QA dataset
"""
def __init__(self, opt) -> None:
super().__init__(opt)
with open(opt.mimic_qa_path[opt.run_mode], 'rb') as f2:
self.qa = pickle.load(f2) # qa pairs
# if opt.run_mode == 'train':
# self.qa = random.sample(self.qa, 20000)
self.token_to_ix, self.pretrained_emb = tokenize(self.qa, opt.use_glove)
self.token_size = self.token_to_ix.__len__()
self.data_size = self.qa.__len__()
print('== Question token vocab size:', self.token_size)
def __getitem__(self, idx):
img_feat_iter = np.zeros(1)
ques_ix_iter = np.zeros(1)
ans_iter = np.zeros(1)
qa = self.qa[idx]
# subject_id = int(qa['subject_id'][:-2])
# study_id = int(qa['study_id'][:-2])
# multi_label = (self.chexpert_df[(self.chexpert_df['study_id']==study_id) & (self.chexpert_df['subject_id']==subject_id)] > 0).values
# multi_label = multi_label[0][2:].astype('float32')
# Process question
ques_ix_iter = proc_ques(qa, self.token_to_ix, self.opt.max_token)
# Process answer
ans_iter = np.array(proc_mimic_ans(qa['answer'], self.ans_to_ix)) # only train for yes
if self.opt.run_mode in ['train']:
# randomly dropout some dim of features
rand_dim = np.random.choice(np.arange(self.v_dim), replace=False,
size=int(self.v_dim * 0.2))
img_feats = np.copy(self.image_features[qa['image']]) # must, or can't dropout
img_feats[:, rand_dim] = 0
# img_feats = np.array(self.image_features[qa['image']])
# ana_find_feats = np.array(self.ana_pooled_feats[qa['image']])
# img_feats = ana_find_feats
img_feat_iter = pad_img_feat(img_feats, self.opt.img_feat_pad_size)
# return torch.from_numpy(img_feat_iter), \
# torch.from_numpy(ques_ix_iter), torch.from_numpy(ans_iter), \
# torch.tensor([idx]), # self.opt.run_mode
else: # ['val', 'test']
img_feats = self.image_features[qa['image']]
img_feat_iter = pad_img_feat(img_feats, self.opt.img_feat_pad_size)
boxes = pad_img_feat(self.image_bb[qa['image']], self.opt.img_feat_pad_size)
# only works for batchsize=1
return torch.from_numpy(img_feat_iter), \
torch.from_numpy(ques_ix_iter), torch.from_numpy(ans_iter), \
torch.tensor([idx]) # img_feats, boxes,
def __len__(self):
return self.data_size
class CustomLoader(DataLoader):
def __init__(self, dataset, opt):
# self.dataset = dataset
self.opt = opt
self.init_kwargs = {
'dataset': dataset,
'batch_size': self.opt.batch_size,
'shuffle': True,
'collate_fn': self.collate_fn,
'num_workers': self.opt.num_workers,
'pin_memory': self.opt.pin_mem,
'drop_last': True,
}
super().__init__(**self.init_kwargs)
@staticmethod
def collate_fn(data):
# if data[0][-1] == 'train':
# img_feat_iter, ques_ix_iter, ans_iter, idx = zip(*data)
# img_feat_iter = torch.stack(img_feat_iter, dim=0)
# ques_ix_iter = torch.stack(ques_ix_iter, dim=0)
# ans_iter = torch.stack(ans_iter, dim=0)
# idx = torch.stack(idx, dim=0)
# # multi_label = torch.stack(multi_label, dim=0)
# return img_feat_iter, ques_ix_iter, ans_iter, idx
# else:
img_feat_iter, ques_ix_iter, ans_iter, idx = zip(*data)
img_feat_iter = torch.stack(img_feat_iter, dim=0)
ques_ix_iter = torch.stack(ques_ix_iter, dim=0)
ans_iter = torch.stack(ans_iter, dim=0)
idx = torch.stack(idx, dim=0)
# multi_label = torch.stack(multi_label, dim=0)
return img_feat_iter, ques_ix_iter, ans_iter, idx
# tentatively removed img_feats, boxes,
| 2.078125
| 2
|
src/scrapers/models/__init__.py
|
jskroodsma/helpradar
| 0
|
12570
|
<reponame>jskroodsma/helpradar<gh_stars>0
from .database import Db
from .initiatives import InitiativeBase, Platform, ImportBatch, InitiativeImport, BatchImportState, InitiativeGroup
| 1.046875
| 1
|
spyder/widgets/ipythonconsole/debugging.py
|
Bhanditz/spyder
| 1
|
12571
|
<filename>spyder/widgets/ipythonconsole/debugging.py
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Widget that handles communications between a console in debugging
mode and Spyder
"""
import ast
from qtpy.QtCore import Qt
from qtconsole.rich_jupyter_widget import RichJupyterWidget
class DebuggingWidget(RichJupyterWidget):
"""
Widget with the necessary attributes and methods to handle
communications between a console in debugging mode and
Spyder
"""
# --- Public API --------------------------------------------------
def write_to_stdin(self, line):
"""Send raw characters to the IPython kernel through stdin"""
self.kernel_client.input(line)
def set_spyder_breakpoints(self):
"""Set Spyder breakpoints into a debugging session"""
if self._reading:
self.kernel_client.input(
"!get_ipython().kernel._set_spyder_breakpoints()")
def dbg_exec_magic(self, magic, args=''):
"""Run an IPython magic while debugging."""
code = "!get_ipython().kernel.shell.run_line_magic('{}', '{}')".format(
magic, args)
self.kernel_client.input(code)
def refresh_from_pdb(self, pdb_state):
"""
Refresh Variable Explorer and Editor from a Pdb session,
after running any pdb command.
See publish_pdb_state in utils/ipython/spyder_kernel.py and
notify_spyder in utils/site/sitecustomize.py and
"""
if 'step' in pdb_state and 'fname' in pdb_state['step']:
fname = pdb_state['step']['fname']
lineno = pdb_state['step']['lineno']
self.sig_pdb_step.emit(fname, lineno)
if 'namespace_view' in pdb_state:
self.sig_namespace_view.emit(ast.literal_eval(
pdb_state['namespace_view']))
if 'var_properties' in pdb_state:
self.sig_var_properties.emit(ast.literal_eval(
pdb_state['var_properties']))
# ---- Private API (overrode by us) ----------------------------
def _handle_input_request(self, msg):
"""Save history and add a %plot magic."""
if self._hidden:
raise RuntimeError('Request for raw input during hidden execution.')
# Make sure that all output from the SUB channel has been processed
# before entering readline mode.
self.kernel_client.iopub_channel.flush()
def callback(line):
# Save history to browse it later
self._control.history.append(line)
# This is the Spyder addition: add a %plot magic to display
# plots while debugging
if line.startswith('%plot '):
line = line.split()[-1]
code = "__spy_code__ = get_ipython().run_cell('%s')" % line
self.kernel_client.input(code)
else:
self.kernel_client.input(line)
if self._reading:
self._reading = False
self._readline(msg['content']['prompt'], callback=callback,
password=msg['content']['password'])
def _event_filter_console_keypress(self, event):
"""Handle Key_Up/Key_Down while debugging."""
key = event.key()
if self._reading:
self._control.current_prompt_pos = self._prompt_pos
if key == Qt.Key_Up:
self._control.browse_history(backward=True)
return True
elif key == Qt.Key_Down:
self._control.browse_history(backward=False)
return True
elif key in (Qt.Key_Return, Qt.Key_Enter):
self._control.reset_search_pos()
return super(DebuggingWidget,
self)._event_filter_console_keypress(event)
else:
return super(DebuggingWidget,
self)._event_filter_console_keypress(event)
| 2.578125
| 3
|
python/testData/completion/relativeImport/pkg/main.after.py
|
jnthn/intellij-community
| 2
|
12572
|
from .string import <caret>
| 1.054688
| 1
|
code/scripts/train_fxns_nonimage.py
|
estherrolf/representation-matters
| 1
|
12573
|
<gh_stars>1-10
import numpy as np
import sklearn.metrics
from dataset_chunking_fxns import subsample_df_by_groups
import sklearn
import sklearn.linear_model
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import time
# learning function: logistic regression multi-class
def fit_logistic_regression_multiclass(X,
y,
seed,
model_kwargs = {'penalty': 'l2', 'C':1},
weights=None):
if weights is None:
weights = np.ones(len(y))
else:
weights = weights
clf = sklearn.linear_model.LogisticRegression(**model_kwargs,
random_state = seed,
multi_class='multinomial',
max_iter=1000,
n_jobs = None)
clf.fit(X, y, sample_weight=weights)
return clf
# learning function: logistic regression
def fit_logistic_regression(X,
y,
seed,
model_kwargs = {'penalty': 'l2', 'C':1},
weights=None):
if weights is None:
weights = np.ones(len(y))
else:
weights = weights
clf = sklearn.linear_model.LogisticRegression(**model_kwargs,
random_state = seed,
multi_class='ovr',
max_iter=5000,
n_jobs = None)
clf.fit(X, y, sample_weight=weights)
return clf
def fit_rf_classifier(X, y, seed,
model_kwargs = {'max_depth': None, 'n_estimators': 100},
weights=None):
clf = RandomForestClassifier(**model_kwargs, random_state = seed, n_jobs=20)
if weights is None:
weights = np.ones(y.shape)
clf.fit(X, y, sample_weight=weights)
return clf
def fit_rf_regressor(X, y, seed,
model_kwargs = {'max_depth': None, 'n_estimators': 100},
weights=None):
clf = RandomForestRegressor(**model_kwargs, random_state = seed, n_jobs=20)
if weights is None:
weights = 1.0
clf.fit(X, y, sample_weight = weights)
return clf
def fit_ridge_regression(X, y, seed,
model_kwargs = {'alpha': 1.0},
weights=None):
reg = sklearn.linear_model.Ridge(**model_kwargs, normalize=True, random_state = seed, solver='svd')
if weights is None:
weights = np.ones(len(y))
reg.fit(X, y, sample_weight=weights)
return reg
def subset_and_train(data,
features,
group_key,
label_key,
subset_sizes,
pred_fxn,
model_kwargs,
acc_fxns,
predict_prob=False,
reweight = False,
reweight_target_dist = None,
fold_key = 'fold',
eval_key='test',
seed_start = 0,
num_seeds = 5,
verbose=True):
accs_total, accs_by_group = {}, {}
for acc_key in acc_fxns.keys():
accs_total[acc_key] = np.zeros((subset_sizes.shape[1],num_seeds))
accs_by_group[acc_key] = np.zeros((2,subset_sizes.shape[1],num_seeds))
groups = [[x] for x in range(subset_sizes.shape[0])]
# run the training
for s,seed in enumerate(range(seed_start,seed_start + num_seeds)):
rs_this = np.random.RandomState(seed)
print(seed,": ", end='')
for i in range(subset_sizes.shape[1]):
t1 = time.time()
print(i, end = ' ')
group_sizes_this = subset_sizes[:,i]
if verbose:
print(group_sizes_this, end = '')
# subsample the dataset (training points only)
data_subset = subsample_df_by_groups(data,
group_key,
groups,
fold_key = fold_key,
group_sizes = group_sizes_this,
rs = rs_this,
keep_test_val = True, shuffle=True)
data_subset_train = data_subset[data_subset[fold_key] == 'train']
# eval on the following set
data_subset_val = data_subset[data_subset[fold_key] == eval_key]
# index into features
train_idxs_this_round = data_subset_train['X_idxs']
val_idxs_this_round = data_subset_val['X_idxs']
X_train = features[train_idxs_this_round]
X_val = features[val_idxs_this_round]
y_train, g_train = data_subset_train[label_key].values, data_subset_train[group_key].values
y_val, g_val = data_subset_val[label_key].values, data_subset_val[group_key].values
if reweight:
# weights per group
group_fracs_this = group_sizes_this / group_sizes_this.sum()
train_weights_per_group = np.array(reweight_target_dist) / group_fracs_this
# print('train_weights_per_group ', train_weights_per_group)
# print(train_weights_per_group)
# weight per instance
train_weights = np.array(train_weights_per_group)[g_train.astype(int)]
# scale so that weights sum to n_train
train_weights = len(train_weights) * train_weights / train_weights.sum()
else:
train_weights = None
# allow for passing in lists of model kwargs, in case HPs need to change with allocation
if isinstance(model_kwargs, (list)):
model_kwargs_this = model_kwargs[i]
if verbose:
print(model_kwargs_this)
else:
model_kwargs_this = model_kwargs
clf = pred_fxn(X_train, y_train, seed,
weights=train_weights, model_kwargs=model_kwargs_this)
if predict_prob:
# take probability of class 1 as the prediction
preds = clf.predict_proba(X_val)[:,1]
else:
preds = clf.predict(X_val)
# if preds are already binary, this won't change anything
rounded_preds = np.asarray([int(p > 0.5) for p in preds])
for acc_key, acc_fxn in acc_fxns.items():
if acc_key == 'acc':
accs_total[acc_key][i,s] = acc_fxn(y_val, rounded_preds)
else:
accs_total[acc_key][i,s] = acc_fxn(y_val, preds)
for g in range(2):
for acc_key, acc_fxn in acc_fxns.items():
if acc_key == 'acc':
accs_by_group[acc_key][g,i,s] = acc_fxn(y_val[g_val == g], rounded_preds[g_val == g])
else:
accs_by_group[acc_key][g,i,s] = acc_fxn(y_val[g_val == g], preds[g_val == g])
t2 = time.time()
#print()
if verbose:
print('took {0} minutes'.format((t2-t1)/60))
print()
return accs_by_group, accs_total
def cv_subset_and_train(data,
features,
group_key,
label_key,
subset_sizes,
pred_fxn,
model_kwargs,
acc_fxns,
predict_prob = False,
reweight=False,
reweight_target_dist=None,
num_seeds = 5,
verbose=True):
accs_total, accs_by_group = {}, {}
for acc_key in acc_fxns.keys():
accs_total[acc_key] = np.zeros((subset_sizes.shape[1],num_seeds))
accs_by_group[acc_key] = np.zeros((2,subset_sizes.shape[1],num_seeds))
for seed in range(num_seeds):
r = subset_and_train(data,
features,
group_key=group_key,
label_key=label_key,
subset_sizes=subset_sizes,
pred_fxn = pred_fxn,
model_kwargs = model_kwargs,
acc_fxns = acc_fxns,
reweight=reweight,
reweight_target_dist = reweight_target_dist,
predict_prob = predict_prob,
eval_key='val',
fold_key = 'cv_fold_{0}'.format(seed),
seed_start = seed,
num_seeds = 1,
verbose=verbose)
accs_by_group_this_seed, accs_total_this_seed = r
for acc_key in acc_fxns.keys():
accs_total[acc_key][:,seed] = accs_total_this_seed[acc_key].reshape(-1)
accs_by_group[acc_key][:,:,seed] = accs_by_group_this_seed[acc_key].reshape(2,-1)
return accs_by_group, accs_total
| 2.578125
| 3
|
MITx-6.00.1x-EDX-Introduction-to-Computer-Science/Week-7/PSET-7/phraseTriggers.py
|
lilsweetcaligula/MIT6.00.1x
| 0
|
12574
|
"""
PSET-7
Part 2: Triggers (PhraseTriggers)
At this point, you have no way of writing a trigger that matches on
"New York City" -- the only triggers you know how to write would be
a trigger that would fire on "New" AND "York" AND "City" -- which
also fires on the phrase "New students at York University love the
city". It's time to fix this. Since here you're asking for an exact
match, we will require that the cases match, but we'll be a little
more flexible on word matching. So, "New York City" will match:
* New York City sees movie premiere
* In the heart of New York City's famous cafe
* New York Cityrandomtexttoproveapointhere
but will not match:
* I love new york city
* I love New York City!!!!!!!!!!!!!!
PROBLEM 9
Implement a phrase trigger (PhraseTrigger) that fires when a given
phrase is in any of the story's subject, title, or summary. The
phrase should be an argument to the class's constructor.
"""
# Enter your code for WordTrigger, TitleTrigger,
# SubjectTrigger, SummaryTrigger, and PhraseTrigger in this box
class WordTrigger(Trigger):
def __init__(self, word):
self.word = word
def internalAreCharsEqualIgnoreCase(self, c1, c2):
if type(c1) != str or type(c2) != str:
raise TypeError("Arg not of type str")
if len(c1) > 1 or len(c2) > 1:
raise TypeError("Expected a char. Length not equal to 1")
return c1[0] == c2[0] or \
(ord(c1[0]) > 0x60 and (ord(c1[0]) - 0x20 == ord(c2[0])) or ord(c1[0]) < 0x5A and (ord(c1[0]) + 0x20 == ord(c2[0])))
def isWordIn(self, text):
"""
Returns True if word is present in text as
whole word. False otherwise.
"""
charsMatched = 0
firstCharMatchInd = -1
for i in range( len(text) ):
if self.internalAreCharsEqualIgnoreCase(text[i], self.word[0]):
# case-insensitive check for text[i] == self.word[0]
firstCharMatchInd = i
charsMatched += 1
wordInd = 1
while wordInd < len(self.word) and wordInd + firstCharMatchInd < len(text):
if self.internalAreCharsEqualIgnoreCase(self.word[wordInd], text[wordInd + firstCharMatchInd]):
# case-insensitive check for self.word[wordInd] == text[wordInd + firstCharMatchInd]
charsMatched += 1
wordInd += 1
elif self.internalAreCharsEqualIgnoreCase(self.word[wordInd], self.word[0]):
# case-insensitive check for text[i] == self.word[0]
charsMatched = 1
firstCharMatchInd = wordInd + firstCharMatchInd
wordInd = firstCharMatchInd
continue
else:
charsMatched = 0
i = wordInd + firstCharMatchInd
break
if charsMatched == len(self.word):
if len(self.word) == len(text):
return True
elif firstCharMatchInd > 0 and firstCharMatchInd + len(self.word) == len(text):
if text[firstCharMatchInd - 1].isspace() or text[firstCharMatchInd - 1] in string.punctuation:
return True
elif firstCharMatchInd == 0 and firstCharMatchInd + len(self.word) + 1 < len(text):
if text[firstCharMatchInd + len(self.word)].isspace() or text[firstCharMatchInd + len(self.word)] in string.punctuation:
return True
else:
if (text[firstCharMatchInd - 1].isspace() or text[firstCharMatchInd - 1] in string.punctuation) \
and (text[firstCharMatchInd + len(self.word)].isspace() or text[firstCharMatchInd + len(self.word)] in string.punctuation):
return True
return False
class TitleTrigger(WordTrigger):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
return self.isWordIn( story.getTitle() )
class SubjectTrigger(WordTrigger):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
return self.isWordIn( story.getSubject() )
class SummaryTrigger(WordTrigger):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
return self.isWordIn( story.getSummary() )
class PhraseTrigger(Trigger):
def __init__(self, phrase):
self.word = phrase
def isWordIn(self, text):
charsMatched = 0
firstCharMatchInd = -1
for i in range( len(text) ):
if text[i] == self.word[0]:
firstCharMatchInd = i
charsMatched += 1
wordInd = 1
while wordInd < len(self.word) and wordInd + firstCharMatchInd < len(text):
if self.word[wordInd] == text[wordInd + firstCharMatchInd]:
charsMatched += 1
wordInd += 1
elif self.word[wordInd] == self.word[0]:
charsMatched = 1
firstCharMatchInd = wordInd + firstCharMatchInd
wordInd = firstCharMatchInd
continue
else:
charsMatched = 0
i = wordInd + firstCharMatchInd
break
if charsMatched == len(self.word):
return True
return False
def evaluate(self, story):
return self.isWordIn( story.getTitle() ) or \
self.isWordIn( story.getSubject() ) or \
self.isWordIn( story.getSummary() )
| 3.734375
| 4
|
vault/tests/unit/test_views.py
|
Natan7/vault
| 1
|
12575
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from mock import Mock, patch
from vault.tests.fakes import fake_request
from vault.views import SetProjectView
from django.utils.translation import ugettext as _
class SetProjectTest(TestCase):
def setUp(self):
self.view = SetProjectView.as_view()
self.request = fake_request(method='GET')
self.request.user.is_authenticated = lambda: False
def tearDown(self):
patch.stopall()
def test_set_project_needs_authentication(self):
response = self.view(self.request)
self.assertEqual(response.status_code, 302)
@patch('vault.views.switch')
def test_set_new_project_id_to_session(self, mock_switch):
self.request.user.is_authenticated = lambda: True
self.assertEqual(self.request.session.get('project_id'), '1')
response = self.view(self.request, project_id=2)
self.assertEqual(self.request.session.get('project_id'), 2)
@patch('vault.views.switch')
def test_set_new_project_id_to_session_exception(self, mock_switch):
self.request.user.is_authenticated = lambda: True
mock_switch.side_effect = ValueError()
self.assertEqual(self.request.session.get('project_id'), '1')
response = self.view(self.request, project_id=2)
self.assertEqual(self.request.session.get('project_id'), 2)
msgs = [msg for msg in self.request._messages]
self.assertEqual(msgs[0].message, _('Unable to change your project.'))
| 2.34375
| 2
|
qa/tasks/cephfs/test_dump_tree.py
|
rpratap-bot/ceph
| 4
|
12576
|
<gh_stars>1-10
from tasks.cephfs.cephfs_test_case import CephFSTestCase
import random
import os
class TestDumpTree(CephFSTestCase):
def get_paths_to_ino(self):
inos = {}
p = self.mount_a.run_shell(["find", "./"])
paths = p.stdout.getvalue().strip().split()
for path in paths:
inos[path] = self.mount_a.path_to_ino(path, False)
return inos
def populate(self):
self.mount_a.run_shell(["git", "clone",
"https://github.com/ceph/ceph-qa-suite"])
def test_basic(self):
self.mount_a.run_shell(["mkdir", "parent"])
self.mount_a.run_shell(["mkdir", "parent/child"])
self.mount_a.run_shell(["touch", "parent/child/file"])
self.mount_a.run_shell(["mkdir", "parent/child/grandchild"])
self.mount_a.run_shell(["touch", "parent/child/grandchild/file"])
inos = self.get_paths_to_ino()
tree = self.fs.mds_asok(["dump", "tree", "/parent/child", "1"])
target_inos = [inos["./parent/child"], inos["./parent/child/file"],
inos["./parent/child/grandchild"]]
for ino in tree:
del target_inos[target_inos.index(ino['ino'])] # don't catch!
assert(len(target_inos) == 0)
def test_random(self):
random.seed(0)
self.populate()
inos = self.get_paths_to_ino()
target = random.sample(inos.keys(), 1)[0]
if target != "./":
target = os.path.dirname(target)
subtree = [path for path in inos.keys() if path.startswith(target)]
target_inos = [inos[path] for path in subtree]
tree = self.fs.mds_asok(["dump", "tree", target[1:]])
for ino in tree:
del target_inos[target_inos.index(ino['ino'])] # don't catch!
assert(len(target_inos) == 0)
target_depth = target.count('/')
maxdepth = max([path.count('/') for path in subtree]) - target_depth
depth = random.randint(0, maxdepth)
target_inos = [inos[path] for path in subtree \
if path.count('/') <= depth + target_depth]
tree = self.fs.mds_asok(["dump", "tree", target[1:], str(depth)])
for ino in tree:
del target_inos[target_inos.index(ino['ino'])] # don't catch!
assert(len(target_inos) == 0)
| 2
| 2
|
catkin_ws/src/devel_scripts/stepper.py
|
AROMAeth/robo_code
| 0
|
12577
|
<gh_stars>0
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
control_pins = [7,11,13,15]
for pin in control_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
halfstep_seq = [
[1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1],
[1,0,0,1]
]
# speed from 0 to 1 (one being the fastest)
# steps 50 steps = one rotation
def move_backward(steps, speed):
for i in range(steps):
for halfstep in range(8):
for pin in range(4):
GPIO.output(control_pins[pin], halfstep_seq[halfstep][pin])
time.sleep(max(0.001/speed,0.001))
def move_forward(steps, speed):
for i in range(steps):
for halfstep in range(7,-1,-1):
for pin in range(4):
GPIO.output(control_pins[pin], halfstep_seq[halfstep][pin])
time.sleep(max(0.001/speed,0.001))
for k in range(1,10,1):
move_forward(50,0.1)
time.sleep(0.5)
#move_forward(50,0.25)
time.sleep(1)
#move_backward(500,0.5)
GPIO.cleanup()
| 2.984375
| 3
|
Experimental/OpenCVExp.py
|
awesomesauce12/6DBytes-CV
| 1
|
12578
|
import numpy as np
import cv2
import os
import math
os.system("fswebcam -r 507x456 --no-banner image11.jpg")
def showImage(capImg):
cv2.imshow('img', capImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
img = cv2.imread('image11.jpg',-1)
height, width, channel = img.shape
topy= height
topx = width
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_color = np.array([0,255,255])
upper_color = np.array([0,255,255])
mask = cv2.inRange(hsv, lower_color, upper_color)
res = cv2.bitwise_and(img,img, mask=mask)
'''def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img, (x,y), 100, (255,255,255), -1)'''
'''cap = cv2.VideoCapture(-1)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('hjhj', gray)
if cv2.waitKey(0) & 0xFF -- ord('q'):
break
cap.release()
cv2.destroyAllWindows()'''
propx = (topx/512)
propy = (topy/512)
'''lineX1 = int(0*propx)
lineY2 = int(0*propy)
lineX2 = int(511*propx)
lineY1 = int(511*propy)
img = cv2.line(img, (lineX1,lineY1), (lineX2, lineY2), (255,255,255), 5)'''
w = 100*(propx+propy)/2
x1 = int(topx/2 - w/2)
x2 = int(topx/2 + w/2)
y1 = int(topy/2 + w/2)
y2 = int(topy/2 - w/2)
img = cv2.rectangle(res, (x1,y1), (x2,y2), (0,255,0),3)
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
showImage(img)
ret, thresh = cv2.threshold(img, 15, 250, 0)
showImage(thresh)
image, contours, heirarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#showImage(image)
cv2.drawContours(img, contours, 0, (0,255,0), 3)
showImage(img)
print('Num of Contours ', len(contours))
cnt = contours[0]
M = cv2.moments(cnt)
print (M)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
area = cv2.contourArea(cnt)
print (cx)
print (cy)
print (area)
'''xCircle = 40*propx
xCircle = int(xCircle)
yCircle = xCircle
radCircle = xCircle
img = cv2.circle(img, (xCircle, yCircle), radCircle, (0,0,255),-1)
x3 = int(topx - 60*propx)
y3 = int(topy - 110*propy)
minAx = int(50*propx)
majAx = int(100*propy)
img = cv2.ellipse(img, (x3, y3), (minAx,majAx), 0, 0, 360, (0,150,255), -1)'''
'''pt1X = int(70*propx)
pt1Y = int(60*propy)
pt2X = int(154*propx)
pt2Y = int(23*propy)
pt3X = int(500*propx)
pt3Y = int(3*propy)'''
#pts = np.array([[pt1X, pt1Y], [pt2X, pt2Y], [pt3X, pt3Y]], np.int32)
#pts = pts.reshape((-1,1,2))
#img = cv2.polylines(img, [pts], True, (100,100,234))
#font = cv2.FONT_HERSHEY_SIMPLEX
#startPtX = int(240*propx)
#startPtY = int(240*propy)
#scale = 2*(propx + propy)/2
#cv2.putText(img, 'Apurva', (startPtX, startPtY), font, scale, (210, 80, 150), 4, cv2.LINE_AA)
#cv2.imshow("kl", img)
'''cv2.setMouseCallback('kl', draw_circle)'''
''''''
#cv2.imshow('frame', img)
#cv2.imshow('mask',mask)
cv2.imshow('res',res)
'''sd = img[130:200, 175:245]
img[20:90, 140:210]=sd
cv2.imshow("kl", img)'''
cv2.waitKey(0)
cv2.destroyAllWindows()
| 2.6875
| 3
|
poem/Poem/urls_public.py
|
kzailac/poem
| 0
|
12579
|
<reponame>kzailac/poem
from django.conf.urls import include
from django.http import HttpResponseRedirect
from django.urls import re_path
from Poem.poem_super_admin.admin import mysuperadmin
urlpatterns = [
re_path(r'^$', lambda x: HttpResponseRedirect('/poem/superadmin/')),
re_path(r'^superadmin/', mysuperadmin.urls),
re_path(r'^saml2/', include(('djangosaml2.urls', 'poem'),
namespace='saml2')),
]
| 1.617188
| 2
|
optimism/ReadMesh.py
|
btalamini/optimism
| 0
|
12580
|
import json
from optimism.JaxConfig import *
from optimism import Mesh
def read_json_mesh(meshFileName):
with open(meshFileName, 'r', encoding='utf-8') as jsonFile:
meshData = json.load(jsonFile)
coordinates = np.array(meshData['coordinates'])
connectivity = np.array(meshData['connectivity'], dtype=int)
nodeSets = {}
for key in meshData['nodeSets']:
nodeSets[key] = np.array(meshData['nodeSets'][key])
sideSets = {}
exodusSideSets = meshData['sideSets']
for key in exodusSideSets:
elements = np.array(exodusSideSets[key][0], dtype=int)
sides = np.array(exodusSideSets[key][1], dtype=int)
sideSets[key] = np.column_stack((elements, sides))
blocks=None
return Mesh.construct_mesh_from_basic_data(coordinates, connectivity,
blocks, nodeSets, sideSets)
| 2.703125
| 3
|
networkx/algorithms/approximation/ramsey.py
|
rakschahsa/networkx
| 445
|
12581
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Ramsey numbers.
"""
# Copyright (C) 2011 by
# <NAME> <<EMAIL>>
# All rights reserved.
# BSD license.
import networkx as nx
from ...utils import arbitrary_element
__all__ = ["ramsey_R2"]
__author__ = """<NAME> (<EMAIL>)"""
def ramsey_R2(G):
r"""Approximately computes the Ramsey number `R(2;s,t)` for graph.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
max_pair : (set, set) tuple
Maximum clique, Maximum independent set.
"""
if not G:
return set(), set()
node = arbitrary_element(G)
nbrs = nx.all_neighbors(G, node)
nnbrs = nx.non_neighbors(G, node)
c_1, i_1 = ramsey_R2(G.subgraph(nbrs).copy())
c_2, i_2 = ramsey_R2(G.subgraph(nnbrs).copy())
c_1.add(node)
i_2.add(node)
# Choose the larger of the two cliques and the larger of the two
# independent sets, according to cardinality.
return max(c_1, c_2, key=len), max(i_1, i_2, key=len)
| 3.34375
| 3
|
pysyte/oss/linux.py
|
git-wwts/pysyte
| 1
|
12582
|
"""Linux-specific code"""
from pysyte.types import paths
def xdg_home():
"""path to $XDG_CONFIG_HOME
>>> assert xdg_home() == paths.path('~/.config').expand()
"""
return paths.environ_path("XDG_CONFIG_HOME", "~/.config")
def xdg_home_config(filename):
"""path to that file in $XDG_CONFIG_HOME
>>> assert xdg_home_config('fred') == paths.path('~/.config/fred').expand()
"""
return xdg_home() / filename
def xdg_dirs():
"""paths in $XDG_CONFIG_DIRS"""
return paths.environ_paths("XDG_CONFIG_DIRS")
def xdg_homes():
return [xdg_home()]
bash_paste = "xclip -selection clipboard"
bash_copy = "xclip -selection clipboard -o"
| 2.640625
| 3
|
osu/osu_overlay.py
|
HQupgradeHQ/Daylight
| 2
|
12583
|
<filename>osu/osu_overlay.py<gh_stars>1-10
import mpv
import keyboard
import time
p = mpv.MPV()
p.play("song_name.mp4")
def play_pause():
p.pause = not p.pause
keyboard.add_hotkey("e", play_pause)
def full():
p.fullscreen = not p.fullscreen
keyboard.add_hotkey("2", full)
def go_to_start():
p.time_pos = 2
keyboard.add_hotkey("1", go_to_start)
while 1:
time.sleep(40)
| 2.53125
| 3
|
test/unit/common/test_db.py
|
dreamhost/swift
| 0
|
12584
|
<gh_stars>0
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for swift.common.db """
from __future__ import with_statement
import hashlib
import os
import unittest
from shutil import rmtree, copy
from StringIO import StringIO
from time import sleep, time
from uuid import uuid4
import simplejson
import sqlite3
import swift.common.db
from swift.common.db import AccountBroker, chexor, ContainerBroker, \
DatabaseBroker, DatabaseConnectionError, dict_factory, get_db_connection
from swift.common.utils import normalize_timestamp
from swift.common.exceptions import LockTimeout
class TestDatabaseConnectionError(unittest.TestCase):
def test_str(self):
err = \
DatabaseConnectionError(':memory:', 'No valid database connection')
self.assert_(':memory:' in str(err))
self.assert_('No valid database connection' in str(err))
err = DatabaseConnectionError(':memory:',
'No valid database connection', timeout=1357)
self.assert_(':memory:' in str(err))
self.assert_('No valid database connection' in str(err))
self.assert_('1357' in str(err))
class TestDictFactory(unittest.TestCase):
def test_normal_case(self):
conn = sqlite3.connect(':memory:')
conn.execute('CREATE TABLE test (one TEXT, two INTEGER)')
conn.execute('INSERT INTO test (one, two) VALUES ("abc", 123)')
conn.execute('INSERT INTO test (one, two) VALUES ("def", 456)')
conn.commit()
curs = conn.execute('SELECT one, two FROM test')
self.assertEquals(dict_factory(curs, curs.next()),
{'one': 'abc', 'two': 123})
self.assertEquals(dict_factory(curs, curs.next()),
{'one': 'def', 'two': 456})
class TestChexor(unittest.TestCase):
def test_normal_case(self):
self.assertEquals(chexor('d41d8cd98f00b204e9800998ecf8427e',
'new name', normalize_timestamp(1)),
'4f2ea31ac14d4273fe32ba08062b21de')
def test_invalid_old_hash(self):
self.assertRaises(TypeError, chexor, 'oldhash', 'name',
normalize_timestamp(1))
def test_no_name(self):
self.assertRaises(Exception, chexor,
'd41d8cd98f00b204e9800998ecf8427e', None, normalize_timestamp(1))
class TestGetDBConnection(unittest.TestCase):
def test_normal_case(self):
conn = get_db_connection(':memory:')
self.assert_(hasattr(conn, 'execute'))
def test_invalid_path(self):
self.assertRaises(DatabaseConnectionError, get_db_connection,
'invalid database path / name')
class TestDatabaseBroker(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(os.path.dirname(__file__), 'db')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def test_memory_db_init(self):
broker = DatabaseBroker(':memory:')
self.assertEqual(broker.db_file, ':memory:')
self.assertRaises(AttributeError, broker.initialize,
normalize_timestamp('0'))
def test_disk_db_init(self):
db_file = os.path.join(self.testdir, '1.db')
broker = DatabaseBroker(db_file)
self.assertEqual(broker.db_file, db_file)
self.assert_(broker.conn is None)
def test_initialize(self):
self.assertRaises(AttributeError,
DatabaseBroker(':memory:').initialize,
normalize_timestamp('1'))
stub_dict = {}
def stub(*args, **kwargs):
for key in stub_dict.keys():
del stub_dict[key]
stub_dict['args'] = args
for key, value in kwargs.items():
stub_dict[key] = value
broker = DatabaseBroker(':memory:')
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
self.assert_(hasattr(stub_dict['args'][0], 'execute'))
self.assertEquals(stub_dict['args'][1], '0000000001.00000')
with broker.get() as conn:
conn.execute('SELECT * FROM outgoing_sync')
conn.execute('SELECT * FROM incoming_sync')
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
self.assert_(hasattr(stub_dict['args'][0], 'execute'))
self.assertEquals(stub_dict['args'][1], '0000000001.00000')
with broker.get() as conn:
conn.execute('SELECT * FROM outgoing_sync')
conn.execute('SELECT * FROM incoming_sync')
def test_delete_db(self):
def init_stub(conn, put_timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
conn.execute('INSERT INTO test_stat (id) VALUES (?)',
(str(uuid4),))
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
stub_called = [False]
def delete_stub(*a, **kw):
stub_called[0] = True
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker._initialize = init_stub
# Initializes a good broker for us
broker.initialize(normalize_timestamp('1'))
self.assert_(broker.conn is not None)
broker._delete_db = delete_stub
stub_called[0] = False
broker.delete_db('2')
self.assert_(stub_called[0])
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker.db_type = 'test'
broker._initialize = init_stub
broker.initialize(normalize_timestamp('1'))
broker._delete_db = delete_stub
stub_called[0] = False
broker.delete_db('2')
self.assert_(stub_called[0])
# ensure that metadata was cleared
m2 = broker.metadata
self.assert_(not any(v[0] for v in m2.itervalues()))
self.assert_(all(v[1] == normalize_timestamp('2')
for v in m2.itervalues()))
def test_get(self):
broker = DatabaseBroker(':memory:')
got_exc = False
try:
with broker.get() as conn:
conn.execute('SELECT 1')
except Exception:
got_exc = True
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
got_exc = False
try:
with broker.get() as conn:
conn.execute('SELECT 1')
except Exception:
got_exc = True
self.assert_(got_exc)
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
conn.execute('CREATE TABLE test (one TEXT)')
try:
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("1")')
raise Exception('test')
conn.commit()
except Exception:
pass
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
with broker.get() as conn:
self.assertEquals(
[r[0] for r in conn.execute('SELECT * FROM test')], [])
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
with broker.get() as conn:
self.assertEquals(
[r[0] for r in conn.execute('SELECT * FROM test')], ['1'])
orig_renamer = swift.common.db.renamer
try:
swift.common.db.renamer = lambda a, b: b
qpath = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(self.testdir))))
if qpath:
qpath += '/quarantined/tests/db'
else:
qpath = 'quarantined/tests/db'
# Test malformed database
copy(os.path.join(os.path.dirname(__file__),
'malformed_example.db'),
os.path.join(self.testdir, '1.db'))
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker.db_type = 'test'
exc = None
try:
with broker.get() as conn:
conn.execute('SELECT * FROM test')
except Exception, err:
exc = err
self.assertEquals(str(exc),
'Quarantined %s to %s due to malformed database' %
(self.testdir, qpath))
# Test corrupted database
copy(os.path.join(os.path.dirname(__file__),
'corrupted_example.db'),
os.path.join(self.testdir, '1.db'))
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker.db_type = 'test'
exc = None
try:
with broker.get() as conn:
conn.execute('SELECT * FROM test')
except Exception, err:
exc = err
self.assertEquals(str(exc),
'Quarantined %s to %s due to corrupted database' %
(self.testdir, qpath))
finally:
swift.common.db.renamer = orig_renamer
def test_lock(self):
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'), timeout=.1)
got_exc = False
try:
with broker.lock():
pass
except Exception:
got_exc = True
self.assert_(got_exc)
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
with broker.lock():
pass
with broker.lock():
pass
broker2 = DatabaseBroker(os.path.join(self.testdir, '1.db'), timeout=.1)
broker2._initialize = stub
with broker.lock():
got_exc = False
try:
with broker2.lock():
pass
except LockTimeout:
got_exc = True
self.assert_(got_exc)
try:
with broker.lock():
raise Exception('test')
except Exception:
pass
with broker.lock():
pass
def test_newid(self):
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker.db_contains_type = 'test'
uuid1 = str(uuid4())
def _initialize(conn, timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
conn.execute('INSERT INTO test_stat (id) VALUES (?)', (uuid1,))
conn.commit()
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
broker.newid(uuid2)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEquals(len(uuids), 1)
self.assertNotEquals(uuids[0], uuid1)
uuid1 = uuids[0]
points = [(r[0], r[1]) for r in conn.execute('SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid2,))]
self.assertEquals(len(points), 1)
self.assertEquals(points[0][0], -1)
self.assertEquals(points[0][1], uuid2)
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
uuid3 = str(uuid4())
broker.newid(uuid3)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEquals(len(uuids), 1)
self.assertNotEquals(uuids[0], uuid1)
uuid1 = uuids[0]
points = [(r[0], r[1]) for r in conn.execute('SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid3,))]
self.assertEquals(len(points), 1)
self.assertEquals(points[0][1], uuid3)
broker.newid(uuid2)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEquals(len(uuids), 1)
self.assertNotEquals(uuids[0], uuid1)
points = [(r[0], r[1]) for r in conn.execute('SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid2,))]
self.assertEquals(len(points), 1)
self.assertEquals(points[0][1], uuid2)
def test_get_items_since(self):
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker.db_contains_type = 'test'
def _initialize(conn, timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.execute('INSERT INTO test (one) VALUES ("2")')
conn.execute('INSERT INTO test (one) VALUES ("3")')
conn.commit()
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
self.assertEquals(broker.get_items_since(-1, 10),
[{'one': '1'}, {'one': '2'}, {'one': '3'}])
self.assertEquals(broker.get_items_since(-1, 2),
[{'one': '1'}, {'one': '2'}])
self.assertEquals(broker.get_items_since(1, 2),
[{'one': '2'}, {'one': '3'}])
self.assertEquals(broker.get_items_since(3, 2), [])
self.assertEquals(broker.get_items_since(999, 2), [])
def test_get_sync(self):
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker.db_contains_type = 'test'
uuid1 = str(uuid4())
def _initialize(conn, timestamp):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
conn.execute('INSERT INTO test_stat (id) VALUES (?)', (uuid1,))
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
pass
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
self.assertEquals(broker.get_sync(uuid2), -1)
broker.newid(uuid2)
self.assertEquals(broker.get_sync(uuid2), 1)
uuid3 = str(uuid4())
self.assertEquals(broker.get_sync(uuid3), -1)
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("2")')
conn.commit()
broker.newid(uuid3)
self.assertEquals(broker.get_sync(uuid2), 1)
self.assertEquals(broker.get_sync(uuid3), 2)
self.assertEquals(broker.get_sync(uuid2, incoming=False), -1)
self.assertEquals(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 1, 'remote_id': uuid2}],
incoming=False)
self.assertEquals(broker.get_sync(uuid2), 1)
self.assertEquals(broker.get_sync(uuid3), 2)
self.assertEquals(broker.get_sync(uuid2, incoming=False), 1)
self.assertEquals(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 2, 'remote_id': uuid3}],
incoming=False)
self.assertEquals(broker.get_sync(uuid2, incoming=False), 1)
self.assertEquals(broker.get_sync(uuid3, incoming=False), 2)
def test_merge_syncs(self):
broker = DatabaseBroker(':memory:')
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
broker.merge_syncs([{'sync_point': 1, 'remote_id': uuid2}])
self.assertEquals(broker.get_sync(uuid2), 1)
uuid3 = str(uuid4())
broker.merge_syncs([{'sync_point': 2, 'remote_id': uuid3}])
self.assertEquals(broker.get_sync(uuid2), 1)
self.assertEquals(broker.get_sync(uuid3), 2)
self.assertEquals(broker.get_sync(uuid2, incoming=False), -1)
self.assertEquals(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 3, 'remote_id': uuid2},
{'sync_point': 4, 'remote_id': uuid3}],
incoming=False)
self.assertEquals(broker.get_sync(uuid2, incoming=False), 3)
self.assertEquals(broker.get_sync(uuid3, incoming=False), 4)
self.assertEquals(broker.get_sync(uuid2), 1)
self.assertEquals(broker.get_sync(uuid3), 2)
broker.merge_syncs([{'sync_point': 5, 'remote_id': uuid2}])
self.assertEquals(broker.get_sync(uuid2), 5)
def test_get_replication_info(self):
self.get_replication_info_tester(metadata=False)
def test_get_replication_info_with_metadata(self):
self.get_replication_info_tester(metadata=True)
def get_replication_info_tester(self, metadata=False):
broker = DatabaseBroker(':memory:', account='a')
broker.db_type = 'test'
broker.db_contains_type = 'test'
broker_creation = normalize_timestamp(1)
broker_uuid = str(uuid4())
broker_metadata = metadata and simplejson.dumps(
{'Test': ('Value', normalize_timestamp(1))}) or ''
def _initialize(conn, put_timestamp):
if put_timestamp is None:
put_timestamp = normalize_timestamp(0)
conn.executescript('''
CREATE TABLE test (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT UNIQUE,
created_at TEXT
);
CREATE TRIGGER test_insert AFTER INSERT ON test
BEGIN
UPDATE test_stat
SET test_count = test_count + 1,
hash = chexor(hash, new.name, new.created_at);
END;
CREATE TRIGGER test_update BEFORE UPDATE ON test
BEGIN
SELECT RAISE(FAIL,
'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER test_delete AFTER DELETE ON test
BEGIN
UPDATE test_stat
SET test_count = test_count - 1,
hash = chexor(hash, old.name, old.created_at);
END;
CREATE TABLE test_stat (
account TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
test_count INTEGER,
hash TEXT default '00000000000000000000000000000000',
id TEXT
%s
);
INSERT INTO test_stat (test_count) VALUES (0);
''' % (metadata and ", metadata TEXT DEFAULT ''" or ""))
conn.execute('''
UPDATE test_stat
SET account = ?, created_at = ?, id = ?, put_timestamp = ?
''', (broker.account, broker_creation, broker_uuid, put_timestamp))
if metadata:
conn.execute('UPDATE test_stat SET metadata = ?',
(broker_metadata,))
conn.commit()
broker._initialize = _initialize
put_timestamp = normalize_timestamp(2)
broker.initialize(put_timestamp)
info = broker.get_replication_info()
self.assertEquals(info, {'count': 0,
'hash': '00000000000000000000000000000000',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'max_row': -1, 'id': broker_uuid,
'metadata': broker_metadata})
insert_timestamp = normalize_timestamp(3)
with broker.get() as conn:
conn.execute('''
INSERT INTO test (name, created_at) VALUES ('test', ?)
''', (insert_timestamp,))
conn.commit()
info = broker.get_replication_info()
self.assertEquals(info, {'count': 1,
'hash': 'bdc4c93f574b0d8c2911a27ce9dd38ba',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'max_row': 1, 'id': broker_uuid,
'metadata': broker_metadata})
with broker.get() as conn:
conn.execute('DELETE FROM test')
conn.commit()
info = broker.get_replication_info()
self.assertEquals(info, {'count': 0,
'hash': '00000000000000000000000000000000',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'max_row': 1, 'id': broker_uuid,
'metadata': broker_metadata})
return broker
def test_metadata(self):
# Initializes a good broker for us
broker = self.get_replication_info_tester(metadata=True)
# Add our first item
first_timestamp = normalize_timestamp(1)
first_value = '1'
broker.update_metadata({'First': [first_value, first_timestamp]})
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
# Add our second item
second_timestamp = normalize_timestamp(2)
second_value = '2'
broker.update_metadata({'Second': [second_value, second_timestamp]})
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Update our first item
first_timestamp = normalize_timestamp(3)
first_value = '1b'
broker.update_metadata({'First': [first_value, first_timestamp]})
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Delete our second item (by setting to empty string)
second_timestamp = normalize_timestamp(4)
second_value = ''
broker.update_metadata({'Second': [second_value, second_timestamp]})
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim at point before second item was deleted
broker.reclaim(normalize_timestamp(3))
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim at point second item was deleted
broker.reclaim(normalize_timestamp(4))
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' in broker.metadata)
self.assertEquals(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim after point second item was deleted
broker.reclaim(normalize_timestamp(5))
self.assert_('First' in broker.metadata)
self.assertEquals(broker.metadata['First'],
[first_value, first_timestamp])
self.assert_('Second' not in broker.metadata)
class TestContainerBroker(unittest.TestCase):
""" Tests for swift.common.db.ContainerBroker """
def test_creation(self):
""" Test swift.common.db.ContainerBroker.__init__ """
broker = ContainerBroker(':memory:', account='a', container='c')
self.assertEqual(broker.db_file, ':memory:')
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
curs = conn.cursor()
curs.execute('SELECT 1')
self.assertEqual(curs.fetchall()[0][0], 1)
def test_exception(self):
""" Test swift.common.db.ContainerBroker throwing a conn away after
unhandled exception """
first_conn = None
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
first_conn = conn
try:
with broker.get() as conn:
self.assertEquals(first_conn, conn)
raise Exception('OMG')
except Exception:
pass
self.assert_(broker.conn is None)
def test_empty(self):
""" Test swift.common.db.ContainerBroker.empty """
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
self.assert_(broker.empty())
broker.put_object('o', normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
self.assert_(not broker.empty())
sleep(.00001)
broker.delete_object('o', normalize_timestamp(time()))
self.assert_(broker.empty())
def test_reclaim(self):
broker = ContainerBroker(':memory:', account='test_account',
container='test_container')
broker.initialize(normalize_timestamp('1'))
broker.put_object('o', normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
broker.reclaim(normalize_timestamp(time() - 999), time())
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
sleep(.00001)
broker.delete_object('o', normalize_timestamp(time()))
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 1)
broker.reclaim(normalize_timestamp(time() - 999), time())
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 1)
sleep(.00001)
broker.reclaim(normalize_timestamp(time()), time())
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
# Test the return values of reclaim()
broker.put_object('w', normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('x', normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('y', normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('z', normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
# Test before deletion
res = broker.reclaim(normalize_timestamp(time()), time())
broker.delete_db(normalize_timestamp(time()))
def test_delete_object(self):
""" Test swift.common.db.ContainerBroker.delete_object """
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
broker.put_object('o', normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 0)
sleep(.00001)
broker.delete_object('o', normalize_timestamp(time()))
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM object "
"WHERE deleted = 1").fetchone()[0], 1)
def test_put_object(self):
""" Test swift.common.db.ContainerBroker.put_object """
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
# Create initial object
timestamp = normalize_timestamp(time())
broker.put_object('"{<object \'&\' name>}"', timestamp, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Reput same event
broker.put_object('"{<object \'&\' name>}"', timestamp, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put new event
sleep(.00001)
timestamp = normalize_timestamp(time())
broker.put_object('"{<object \'&\' name>}"', timestamp, 124,
'application/x-test',
'aa0749bacbc79ec65fe206943d8fe449')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 124)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'aa0749bacbc79ec65fe206943d8fe449')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put old event
otimestamp = normalize_timestamp(float(timestamp) - 1)
broker.put_object('"{<object \'&\' name>}"', otimestamp, 124,
'application/x-test',
'aa0749bacbc79ec65fe206943d8fe449')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 124)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'aa0749bacbc79ec65fe206943d8fe449')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put old delete event
dtimestamp = normalize_timestamp(float(timestamp) - 1)
broker.put_object('"{<object \'&\' name>}"', dtimestamp, 0, '', '',
deleted=1)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 124)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'aa0749bacbc79ec65fe206943d8fe449')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put new delete event
sleep(.00001)
timestamp = normalize_timestamp(time())
broker.put_object('"{<object \'&\' name>}"', timestamp, 0, '', '',
deleted=1)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 1)
# Put new event
sleep(.00001)
timestamp = normalize_timestamp(time())
broker.put_object('"{<object \'&\' name>}"', timestamp, 123,
'application/x-test',
'5af83e3196bf99f440f31f2e1a6c9afe')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# We'll use this later
sleep(.0001)
in_between_timestamp = normalize_timestamp(time())
# New post event
sleep(.0001)
previous_timestamp = timestamp
timestamp = normalize_timestamp(time())
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0],
previous_timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 123)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'5af83e3196bf99f440f31f2e1a6c9afe')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
# Put event from after last put but before last post
timestamp = in_between_timestamp
broker.put_object('"{<object \'&\' name>}"', timestamp, 456,
'application/x-test3',
'6af83e3196bf99f440f31f2e1a6c9afe')
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM object").fetchone()[0],
'"{<object \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT created_at FROM object").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT size FROM object").fetchone()[0], 456)
self.assertEquals(conn.execute(
"SELECT content_type FROM object").fetchone()[0],
'application/x-test3')
self.assertEquals(conn.execute(
"SELECT etag FROM object").fetchone()[0],
'6af83e3196bf99f440f31f2e1a6c9afe')
self.assertEquals(conn.execute(
"SELECT deleted FROM object").fetchone()[0], 0)
def test_get_info(self):
""" Test swift.common.db.ContainerBroker.get_info """
broker = ContainerBroker(':memory:', account='test1', container='test2')
broker.initialize(normalize_timestamp('1'))
info = broker.get_info()
self.assertEquals(info['account'], 'test1')
self.assertEquals(info['container'], 'test2')
self.assertEquals(info['hash'], '00000000000000000000000000000000')
info = broker.get_info()
self.assertEquals(info['object_count'], 0)
self.assertEquals(info['bytes_used'], 0)
broker.put_object('o1', normalize_timestamp(time()), 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEquals(info['object_count'], 1)
self.assertEquals(info['bytes_used'], 123)
sleep(.00001)
broker.put_object('o2', normalize_timestamp(time()), 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEquals(info['object_count'], 2)
self.assertEquals(info['bytes_used'], 246)
sleep(.00001)
broker.put_object('o2', normalize_timestamp(time()), 1000,
'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEquals(info['object_count'], 2)
self.assertEquals(info['bytes_used'], 1123)
sleep(.00001)
broker.delete_object('o1', normalize_timestamp(time()))
info = broker.get_info()
self.assertEquals(info['object_count'], 1)
self.assertEquals(info['bytes_used'], 1000)
sleep(.00001)
broker.delete_object('o2', normalize_timestamp(time()))
info = broker.get_info()
self.assertEquals(info['object_count'], 0)
self.assertEquals(info['bytes_used'], 0)
info = broker.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
def test_set_x_syncs(self):
broker = ContainerBroker(':memory:', account='test1', container='test2')
broker.initialize(normalize_timestamp('1'))
info = broker.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
broker.set_x_container_sync_points(1, 2)
info = broker.get_info()
self.assertEquals(info['x_container_sync_point1'], 1)
self.assertEquals(info['x_container_sync_point2'], 2)
def test_get_report_info(self):
broker = ContainerBroker(':memory:', account='test1', container='test2')
broker.initialize(normalize_timestamp('1'))
info = broker.get_info()
self.assertEquals(info['account'], 'test1')
self.assertEquals(info['container'], 'test2')
self.assertEquals(info['object_count'], 0)
self.assertEquals(info['bytes_used'], 0)
self.assertEquals(info['reported_object_count'], 0)
self.assertEquals(info['reported_bytes_used'], 0)
broker.put_object('o1', normalize_timestamp(time()), 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEquals(info['object_count'], 1)
self.assertEquals(info['bytes_used'], 123)
self.assertEquals(info['reported_object_count'], 0)
self.assertEquals(info['reported_bytes_used'], 0)
sleep(.00001)
broker.put_object('o2', normalize_timestamp(time()), 123, 'text/plain',
'5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEquals(info['object_count'], 2)
self.assertEquals(info['bytes_used'], 246)
self.assertEquals(info['reported_object_count'], 0)
self.assertEquals(info['reported_bytes_used'], 0)
sleep(.00001)
broker.put_object('o2', normalize_timestamp(time()), 1000,
'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe')
info = broker.get_info()
self.assertEquals(info['object_count'], 2)
self.assertEquals(info['bytes_used'], 1123)
self.assertEquals(info['reported_object_count'], 0)
self.assertEquals(info['reported_bytes_used'], 0)
put_timestamp = normalize_timestamp(time())
sleep(.001)
delete_timestamp = normalize_timestamp(time())
broker.reported(put_timestamp, delete_timestamp, 2, 1123)
info = broker.get_info()
self.assertEquals(info['object_count'], 2)
self.assertEquals(info['bytes_used'], 1123)
self.assertEquals(info['reported_put_timestamp'], put_timestamp)
self.assertEquals(info['reported_delete_timestamp'], delete_timestamp)
self.assertEquals(info['reported_object_count'], 2)
self.assertEquals(info['reported_bytes_used'], 1123)
sleep(.00001)
broker.delete_object('o1', normalize_timestamp(time()))
info = broker.get_info()
self.assertEquals(info['object_count'], 1)
self.assertEquals(info['bytes_used'], 1000)
self.assertEquals(info['reported_object_count'], 2)
self.assertEquals(info['reported_bytes_used'], 1123)
sleep(.00001)
broker.delete_object('o2', normalize_timestamp(time()))
info = broker.get_info()
self.assertEquals(info['object_count'], 0)
self.assertEquals(info['bytes_used'], 0)
self.assertEquals(info['reported_object_count'], 2)
self.assertEquals(info['reported_bytes_used'], 1123)
def test_list_objects_iter(self):
""" Test swift.common.db.ContainerBroker.list_objects_iter """
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
for obj1 in xrange(4):
for obj2 in xrange(125):
broker.put_object('%d/%04d' % (obj1, obj2),
normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
for obj in xrange(125):
broker.put_object('2/0051/%04d' % obj,
normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
for obj in xrange(125):
broker.put_object('3/%04d/0049' % obj,
normalize_timestamp(time()), 0, 'text/plain',
'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(100, '', None, None, '')
self.assertEquals(len(listing), 100)
self.assertEquals(listing[0][0], '0/0000')
self.assertEquals(listing[-1][0], '0/0099')
listing = broker.list_objects_iter(100, '', '0/0050', None, '')
self.assertEquals(len(listing), 50)
self.assertEquals(listing[0][0], '0/0000')
self.assertEquals(listing[-1][0], '0/0049')
listing = broker.list_objects_iter(100, '0/0099', None, None, '')
self.assertEquals(len(listing), 100)
self.assertEquals(listing[0][0], '0/0100')
self.assertEquals(listing[-1][0], '1/0074')
listing = broker.list_objects_iter(55, '1/0074', None, None, '')
self.assertEquals(len(listing), 55)
self.assertEquals(listing[0][0], '1/0075')
self.assertEquals(listing[-1][0], '2/0004')
listing = broker.list_objects_iter(10, '', None, '0/01', '')
self.assertEquals(len(listing), 10)
self.assertEquals(listing[0][0], '0/0100')
self.assertEquals(listing[-1][0], '0/0109')
listing = broker.list_objects_iter(10, '', None, '0/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals(listing[0][0], '0/0000')
self.assertEquals(listing[-1][0], '0/0009')
listing = broker.list_objects_iter(10, '', None, '', '/')
self.assertEquals(len(listing), 4)
self.assertEquals([row[0] for row in listing],
['0/', '1/', '2/', '3/'])
listing = broker.list_objects_iter(10, '2', None, None, '/')
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing], ['2/', '3/'])
listing = broker.list_objects_iter(10, '2/',None, None, '/')
self.assertEquals(len(listing), 1)
self.assertEquals([row[0] for row in listing], ['3/'])
listing = broker.list_objects_iter(10, '2/0050', None, '2/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals(listing[0][0], '2/0051')
self.assertEquals(listing[1][0], '2/0051/')
self.assertEquals(listing[2][0], '2/0052')
self.assertEquals(listing[-1][0], '2/0059')
listing = broker.list_objects_iter(10, '3/0045', None, '3/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['3/0045/', '3/0046', '3/0046/', '3/0047',
'3/0047/', '3/0048', '3/0048/', '3/0049',
'3/0049/', '3/0050'])
broker.put_object('3/0049/', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(10, '3/0048', None, None, None)
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['3/0048/0049', '3/0049', '3/0049/',
'3/0049/0049', '3/0050', '3/0050/0049', '3/0051', '3/0051/0049',
'3/0052', '3/0052/0049'])
listing = broker.list_objects_iter(10, '3/0048', None, '3/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['3/0048/', '3/0049', '3/0049/', '3/0050',
'3/0050/', '3/0051', '3/0051/', '3/0052', '3/0052/', '3/0053'])
listing = broker.list_objects_iter(10, None, None, '3/0049/', '/')
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing],
['3/0049/', '3/0049/0049'])
listing = broker.list_objects_iter(10, None, None, None, None,
'3/0049')
self.assertEquals(len(listing), 1)
self.assertEquals([row[0] for row in listing], ['3/0049/0049'])
listing = broker.list_objects_iter(2, None, None, '3/', '/')
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing], ['3/0000', '3/0000/'])
listing = broker.list_objects_iter(2, None, None, None, None, '3')
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing], ['3/0000', '3/0001'])
def test_list_objects_iter_prefix_delim(self):
""" Test swift.common.db.ContainerBroker.list_objects_iter """
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
broker.put_object('/pets/dogs/1', normalize_timestamp(0), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('/pets/dogs/2', normalize_timestamp(0), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('/pets/fish/a', normalize_timestamp(0), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('/pets/fish/b', normalize_timestamp(0), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('/pets/fish_info.txt', normalize_timestamp(0), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('/snakes', normalize_timestamp(0), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
#def list_objects_iter(self, limit, marker, prefix, delimiter, path=None,
# format=None):
listing = broker.list_objects_iter(100, None, None, '/pets/f', '/')
self.assertEquals([row[0] for row in listing], ['/pets/fish/', '/pets/fish_info.txt'])
listing = broker.list_objects_iter(100, None, None, '/pets/fish', '/')
self.assertEquals([row[0] for row in listing], ['/pets/fish/', '/pets/fish_info.txt'])
listing = broker.list_objects_iter(100, None, None, '/pets/fish/', '/')
self.assertEquals([row[0] for row in listing], ['/pets/fish/a', '/pets/fish/b'])
def test_double_check_trailing_delimiter(self):
""" Test swift.common.db.ContainerBroker.list_objects_iter for a
container that has an odd file with a trailing delimiter """
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
broker.put_object('a', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/a', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/a/a', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/a/b', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('a/b', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b/a', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b/b', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('c', normalize_timestamp(time()), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
listing = broker.list_objects_iter(15, None, None, None, None)
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['a', 'a/', 'a/a', 'a/a/a', 'a/a/b', 'a/b', 'b', 'b/a', 'b/b', 'c'])
listing = broker.list_objects_iter(15, None, None, '', '/')
self.assertEquals(len(listing), 5)
self.assertEquals([row[0] for row in listing],
['a', 'a/', 'b', 'b/', 'c'])
listing = broker.list_objects_iter(15, None, None, 'a/', '/')
self.assertEquals(len(listing), 4)
self.assertEquals([row[0] for row in listing],
['a/', 'a/a', 'a/a/', 'a/b'])
listing = broker.list_objects_iter(15, None, None, 'b/', '/')
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing], ['b/a', 'b/b'])
def test_chexor(self):
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
broker.put_object('a', normalize_timestamp(1), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker.put_object('b', normalize_timestamp(2), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
hasha = hashlib.md5('%s-%s' % ('a', '0000000001.00000')).digest()
hashb = hashlib.md5('%s-%s' % ('b', '0000000002.00000')).digest()
hashc = ''.join(('%2x' % (ord(a)^ord(b)) for a, b in zip(hasha, hashb)))
self.assertEquals(broker.get_info()['hash'], hashc)
broker.put_object('b', normalize_timestamp(3), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
hashb = hashlib.md5('%s-%s' % ('b', '0000000003.00000')).digest()
hashc = ''.join(('%02x' % (ord(a)^ord(b)) for a, b in zip(hasha, hashb)))
self.assertEquals(broker.get_info()['hash'], hashc)
def test_newid(self):
"""test DatabaseBroker.newid"""
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
id = broker.get_info()['id']
broker.newid('someid')
self.assertNotEquals(id, broker.get_info()['id'])
def test_get_items_since(self):
"""test DatabaseBroker.get_items_since"""
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
broker.put_object('a', normalize_timestamp(1), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
max_row = broker.get_replication_info()['max_row']
broker.put_object('b', normalize_timestamp(2), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
items = broker.get_items_since(max_row, 1000)
self.assertEquals(len(items), 1)
self.assertEquals(items[0]['name'], 'b')
def test_sync_merging(self):
""" exercise the DatabaseBroker sync functions a bit """
broker1 = ContainerBroker(':memory:', account='a', container='c')
broker1.initialize(normalize_timestamp('1'))
broker2 = ContainerBroker(':memory:', account='a', container='c')
broker2.initialize(normalize_timestamp('1'))
self.assertEquals(broker2.get_sync('12345'), -1)
broker1.merge_syncs([{'sync_point': 3, 'remote_id': '12345'}])
broker2.merge_syncs(broker1.get_syncs())
self.assertEquals(broker2.get_sync('12345'), 3)
def test_merge_items(self):
broker1 = ContainerBroker(':memory:', account='a', container='c')
broker1.initialize(normalize_timestamp('1'))
broker2 = ContainerBroker(':memory:', account='a', container='c')
broker2.initialize(normalize_timestamp('1'))
broker1.put_object('a', normalize_timestamp(1), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1.put_object('b', normalize_timestamp(2), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
id = broker1.get_info()['id']
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEquals(len(items), 2)
self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items]))
broker1.put_object('c', normalize_timestamp(3), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEquals(len(items), 3)
self.assertEquals(['a', 'b', 'c'],
sorted([rec['name'] for rec in items]))
def test_merge_items_overwrite(self):
"""test DatabaseBroker.merge_items"""
broker1 = ContainerBroker(':memory:', account='a', container='c')
broker1.initialize(normalize_timestamp('1'))
id = broker1.get_info()['id']
broker2 = ContainerBroker(':memory:', account='a', container='c')
broker2.initialize(normalize_timestamp('1'))
broker1.put_object('a', normalize_timestamp(2), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1.put_object('b', normalize_timestamp(3), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
broker1.put_object('a', normalize_timestamp(4), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEquals(rec['created_at'], normalize_timestamp(4))
if rec['name'] == 'b':
self.assertEquals(rec['created_at'], normalize_timestamp(3))
def test_merge_items_post_overwrite_out_of_order(self):
"""test DatabaseBroker.merge_items"""
broker1 = ContainerBroker(':memory:', account='a', container='c')
broker1.initialize(normalize_timestamp('1'))
id = broker1.get_info()['id']
broker2 = ContainerBroker(':memory:', account='a', container='c')
broker2.initialize(normalize_timestamp('1'))
broker1.put_object('a', normalize_timestamp(2), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker1.put_object('b', normalize_timestamp(3), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
broker1.put_object('a', normalize_timestamp(4), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEquals(rec['created_at'], normalize_timestamp(4))
if rec['name'] == 'b':
self.assertEquals(rec['created_at'], normalize_timestamp(3))
self.assertEquals(rec['content_type'], 'text/plain')
items = broker2.get_items_since(-1, 1000)
self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEquals(rec['created_at'], normalize_timestamp(4))
if rec['name'] == 'b':
self.assertEquals(rec['created_at'], normalize_timestamp(3))
broker1.put_object('b', normalize_timestamp(5), 0,
'text/plain', 'd41d8cd98f00b204e9800998ecf8427e')
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items]))
for rec in items:
if rec['name'] == 'a':
self.assertEquals(rec['created_at'], normalize_timestamp(4))
if rec['name'] == 'b':
self.assertEquals(rec['created_at'], normalize_timestamp(5))
self.assertEquals(rec['content_type'], 'text/plain')
def premetadata_create_container_stat_table(self, conn, put_timestamp=None):
"""
Copied from swift.common.db.ContainerBroker before the metadata column was
added; used for testing with TestContainerBrokerBeforeMetadata.
Create the container_stat table which is specifc to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = normalize_timestamp(0)
conn.executescript("""
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0'
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
""")
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, normalize_timestamp(time()),
str(uuid4()), put_timestamp))
class TestContainerBrokerBeforeMetadata(TestContainerBroker):
"""
Tests for swift.common.db.ContainerBroker against databases created before
the metadata column was added.
"""
def setUp(self):
self._imported_create_container_stat_table = \
ContainerBroker.create_container_stat_table
ContainerBroker.create_container_stat_table = \
premetadata_create_container_stat_table
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
exc = None
with broker.get() as conn:
try:
conn.execute('SELECT metadata FROM container_stat')
except BaseException, err:
exc = err
self.assert_('no such column: metadata' in str(exc))
def tearDown(self):
ContainerBroker.create_container_stat_table = \
self._imported_create_container_stat_table
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
conn.execute('SELECT metadata FROM container_stat')
def prexsync_create_container_stat_table(self, conn, put_timestamp=None):
"""
Copied from swift.common.db.ContainerBroker before the
x_container_sync_point[12] columns were added; used for testing with
TestContainerBrokerBeforeXSync.
Create the container_stat table which is specifc to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = normalize_timestamp(0)
conn.executescript("""
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT ''
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
""")
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, normalize_timestamp(time()),
str(uuid4()), put_timestamp))
class TestContainerBrokerBeforeXSync(TestContainerBroker):
"""
Tests for swift.common.db.ContainerBroker against databases created before
the x_container_sync_point[12] columns were added.
"""
def setUp(self):
self._imported_create_container_stat_table = \
ContainerBroker.create_container_stat_table
ContainerBroker.create_container_stat_table = \
prexsync_create_container_stat_table
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
exc = None
with broker.get() as conn:
try:
conn.execute('''SELECT x_container_sync_point1
FROM container_stat''')
except BaseException, err:
exc = err
self.assert_('no such column: x_container_sync_point1' in str(exc))
def tearDown(self):
ContainerBroker.create_container_stat_table = \
self._imported_create_container_stat_table
broker = ContainerBroker(':memory:', account='a', container='c')
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
conn.execute('SELECT x_container_sync_point1 FROM container_stat')
class TestAccountBroker(unittest.TestCase):
""" Tests for swift.common.db.AccountBroker """
def test_creation(self):
""" Test swift.common.db.AccountBroker.__init__ """
broker = AccountBroker(':memory:', account='a')
self.assertEqual(broker.db_file, ':memory:')
got_exc = False
try:
with broker.get() as conn:
pass
except Exception:
got_exc = True
self.assert_(got_exc)
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
curs = conn.cursor()
curs.execute('SELECT 1')
self.assertEqual(curs.fetchall()[0][0], 1)
def test_exception(self):
""" Test swift.common.db.AccountBroker throwing a conn away after
exception """
first_conn = None
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
first_conn = conn
try:
with broker.get() as conn:
self.assertEquals(first_conn, conn)
raise Exception('OMG')
except Exception:
pass
self.assert_(broker.conn is None)
def test_empty(self):
""" Test swift.common.db.AccountBroker.empty """
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
self.assert_(broker.empty())
broker.put_container('o', normalize_timestamp(time()), 0, 0, 0)
self.assert_(not broker.empty())
sleep(.00001)
broker.put_container('o', 0, normalize_timestamp(time()), 0, 0)
self.assert_(broker.empty())
def test_reclaim(self):
broker = AccountBroker(':memory:', account='test_account')
broker.initialize(normalize_timestamp('1'))
broker.put_container('c', normalize_timestamp(time()), 0, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 0)
broker.reclaim(normalize_timestamp(time() - 999), time())
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 0)
sleep(.00001)
broker.put_container('c', 0, normalize_timestamp(time()), 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 1)
broker.reclaim(normalize_timestamp(time() - 999), time())
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 1)
sleep(.00001)
broker.reclaim(normalize_timestamp(time()), time())
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 0)
# Test reclaim after deletion. Create 3 test containers
broker.put_container('x', 0, 0, 0, 0)
broker.put_container('y', 0, 0, 0, 0)
broker.put_container('z', 0, 0, 0, 0)
res = broker.reclaim(normalize_timestamp(time()), time())
# self.assertEquals(len(res), 2)
# self.assert_(isinstance(res, tuple))
# containers, account_name = res
# self.assert_(containers is None)
# self.assert_(account_name is None)
# Now delete the account
broker.delete_db(normalize_timestamp(time()))
res = broker.reclaim(normalize_timestamp(time()), time())
# self.assertEquals(len(res), 2)
# self.assert_(isinstance(res, tuple))
# containers, account_name = res
# self.assertEquals(account_name, 'test_account')
# self.assertEquals(len(containers), 3)
# self.assert_('x' in containers)
# self.assert_('y' in containers)
# self.assert_('z' in containers)
# self.assert_('a' not in containers)
def test_delete_container(self):
""" Test swift.common.db.AccountBroker.delete_container """
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
broker.put_container('o', normalize_timestamp(time()), 0, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 0)
sleep(.00001)
broker.put_container('o', 0, normalize_timestamp(time()), 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEquals(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 1)
def test_get_container_timestamp(self):
""" Test swift.common.db.AccountBroker.get_container_timestamp """
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
# Create initial container
timestamp = normalize_timestamp(time())
broker.put_container('container_name', timestamp, 0, 0, 0)
# test extant map
ts = broker.get_container_timestamp('container_name')
self.assertEquals(ts, timestamp)
# test missing map
ts = broker.get_container_timestamp('something else')
self.assertEquals(ts, None)
def test_put_container(self):
""" Test swift.common.db.AccountBroker.put_container """
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
# Create initial container
timestamp = normalize_timestamp(time())
broker.put_container('"{<container \'&\' name>}"', timestamp, 0, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Reput same event
broker.put_container('"{<container \'&\' name>}"', timestamp, 0, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Put new event
sleep(.00001)
timestamp = normalize_timestamp(time())
broker.put_container('"{<container \'&\' name>}"', timestamp, 0, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Put old event
otimestamp = normalize_timestamp(float(timestamp) - 1)
broker.put_container('"{<container \'&\' name>}"', otimestamp, 0, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Put old delete event
dtimestamp = normalize_timestamp(float(timestamp) - 1)
broker.put_container('"{<container \'&\' name>}"', 0, dtimestamp, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT delete_timestamp FROM container").fetchone()[0],
dtimestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Put new delete event
sleep(.00001)
timestamp = normalize_timestamp(time())
broker.put_container('"{<container \'&\' name>}"', 0, timestamp, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT delete_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 1)
# Put new event
sleep(.00001)
timestamp = normalize_timestamp(time())
broker.put_container('"{<container \'&\' name>}"', timestamp, 0, 0, 0)
with broker.get() as conn:
self.assertEquals(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEquals(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0], timestamp)
self.assertEquals(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
def test_get_info(self):
""" Test swift.common.db.AccountBroker.get_info """
broker = AccountBroker(':memory:', account='test1')
broker.initialize(normalize_timestamp('1'))
info = broker.get_info()
self.assertEquals(info['account'], 'test1')
self.assertEquals(info['hash'], '00000000000000000000000000000000')
info = broker.get_info()
self.assertEquals(info['container_count'], 0)
broker.put_container('c1', normalize_timestamp(time()), 0, 0, 0)
info = broker.get_info()
self.assertEquals(info['container_count'], 1)
sleep(.00001)
broker.put_container('c2', normalize_timestamp(time()), 0, 0, 0)
info = broker.get_info()
self.assertEquals(info['container_count'], 2)
sleep(.00001)
broker.put_container('c2', normalize_timestamp(time()), 0, 0, 0)
info = broker.get_info()
self.assertEquals(info['container_count'], 2)
sleep(.00001)
broker.put_container('c1', 0, normalize_timestamp(time()), 0, 0)
info = broker.get_info()
self.assertEquals(info['container_count'], 1)
sleep(.00001)
broker.put_container('c2', 0, normalize_timestamp(time()), 0, 0)
info = broker.get_info()
self.assertEquals(info['container_count'], 0)
def test_list_containers_iter(self):
""" Test swift.common.db.AccountBroker.list_containers_iter """
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
for cont1 in xrange(4):
for cont2 in xrange(125):
broker.put_container('%d/%04d' % (cont1, cont2),
normalize_timestamp(time()), 0, 0, 0)
for cont in xrange(125):
broker.put_container('2/0051/%04d' % cont,
normalize_timestamp(time()), 0, 0, 0)
for cont in xrange(125):
broker.put_container('3/%04d/0049' % cont,
normalize_timestamp(time()), 0, 0, 0)
listing = broker.list_containers_iter(100, '', None, None, '')
self.assertEquals(len(listing), 100)
self.assertEquals(listing[0][0], '0/0000')
self.assertEquals(listing[-1][0], '0/0099')
listing = broker.list_containers_iter(100, '', '0/0050', None, '')
self.assertEquals(len(listing), 51)
self.assertEquals(listing[0][0], '0/0000')
self.assertEquals(listing[-1][0], '0/0050')
listing = broker.list_containers_iter(100, '0/0099', None, None, '')
self.assertEquals(len(listing), 100)
self.assertEquals(listing[0][0], '0/0100')
self.assertEquals(listing[-1][0], '1/0074')
listing = broker.list_containers_iter(55, '1/0074', None, None, '')
self.assertEquals(len(listing), 55)
self.assertEquals(listing[0][0], '1/0075')
self.assertEquals(listing[-1][0], '2/0004')
listing = broker.list_containers_iter(10, '', None, '0/01', '')
self.assertEquals(len(listing), 10)
self.assertEquals(listing[0][0], '0/0100')
self.assertEquals(listing[-1][0], '0/0109')
listing = broker.list_containers_iter(10, '', None, '0/01', '/')
self.assertEquals(len(listing), 10)
self.assertEquals(listing[0][0], '0/0100')
self.assertEquals(listing[-1][0], '0/0109')
listing = broker.list_containers_iter(10, '', None, '0/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals(listing[0][0], '0/0000')
self.assertEquals(listing[-1][0], '0/0009')
listing = broker.list_containers_iter(10, '', None, '', '/')
self.assertEquals(len(listing), 4)
self.assertEquals([row[0] for row in listing],
['0/', '1/', '2/', '3/'])
listing = broker.list_containers_iter(10, '2/', None, None, '/')
self.assertEquals(len(listing), 1)
self.assertEquals([row[0] for row in listing], ['3/'])
listing = broker.list_containers_iter(10, '', None, '2', '/')
self.assertEquals(len(listing), 1)
self.assertEquals([row[0] for row in listing], ['2/'])
listing = broker.list_containers_iter(10, '2/0050', None, '2/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals(listing[0][0], '2/0051')
self.assertEquals(listing[1][0], '2/0051/')
self.assertEquals(listing[2][0], '2/0052')
self.assertEquals(listing[-1][0], '2/0059')
listing = broker.list_containers_iter(10, '3/0045', None, '3/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['3/0045/', '3/0046', '3/0046/', '3/0047',
'3/0047/', '3/0048', '3/0048/', '3/0049',
'3/0049/', '3/0050'])
broker.put_container('3/0049/', normalize_timestamp(time()), 0, 0, 0)
listing = broker.list_containers_iter(10, '3/0048', None, None, None)
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['3/0048/0049', '3/0049', '3/0049/', '3/0049/0049',
'3/0050', '3/0050/0049', '3/0051', '3/0051/0049',
'3/0052', '3/0052/0049'])
listing = broker.list_containers_iter(10, '3/0048', None, '3/', '/')
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['3/0048/', '3/0049', '3/0049/', '3/0050',
'3/0050/', '3/0051', '3/0051/', '3/0052',
'3/0052/', '3/0053'])
listing = broker.list_containers_iter(10, None, None, '3/0049/', '/')
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing],
['3/0049/', '3/0049/0049'])
def test_double_check_trailing_delimiter(self):
""" Test swift.common.db.AccountBroker.list_containers_iter for an
account that has an odd file with a trailing delimiter """
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
broker.put_container('a', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('a/', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('a/a', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('a/a/a', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('a/a/b', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('a/b', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('b', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('b/a', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('b/b', normalize_timestamp(time()), 0, 0, 0)
broker.put_container('c', normalize_timestamp(time()), 0, 0, 0)
listing = broker.list_containers_iter(15, None, None, None, None)
self.assertEquals(len(listing), 10)
self.assertEquals([row[0] for row in listing],
['a', 'a/', 'a/a', 'a/a/a', 'a/a/b', 'a/b', 'b',
'b/a', 'b/b', 'c'])
listing = broker.list_containers_iter(15, None, None, '', '/')
self.assertEquals(len(listing), 5)
self.assertEquals([row[0] for row in listing],
['a', 'a/', 'b', 'b/', 'c'])
listing = broker.list_containers_iter(15, None, None, 'a/', '/')
self.assertEquals(len(listing), 4)
self.assertEquals([row[0] for row in listing],
['a/', 'a/a', 'a/a/', 'a/b'])
listing = broker.list_containers_iter(15, None, None, 'b/', '/')
self.assertEquals(len(listing), 2)
self.assertEquals([row[0] for row in listing], ['b/a', 'b/b'])
def test_chexor(self):
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
broker.put_container('a', normalize_timestamp(1),
normalize_timestamp(0), 0, 0)
broker.put_container('b', normalize_timestamp(2),
normalize_timestamp(0), 0, 0)
hasha = hashlib.md5('%s-%s' %
('a', '0000000001.00000-0000000000.00000-0-0')
).digest()
hashb = hashlib.md5('%s-%s' %
('b', '0000000002.00000-0000000000.00000-0-0')
).digest()
hashc = \
''.join(('%02x' % (ord(a)^ord(b)) for a, b in zip(hasha, hashb)))
self.assertEquals(broker.get_info()['hash'], hashc)
broker.put_container('b', normalize_timestamp(3),
normalize_timestamp(0), 0, 0)
hashb = hashlib.md5('%s-%s' %
('b', '0000000003.00000-0000000000.00000-0-0')
).digest()
hashc = \
''.join(('%02x' % (ord(a)^ord(b)) for a, b in zip(hasha, hashb)))
self.assertEquals(broker.get_info()['hash'], hashc)
def test_merge_items(self):
broker1 = AccountBroker(':memory:', account='a')
broker1.initialize(normalize_timestamp('1'))
broker2 = AccountBroker(':memory:', account='a')
broker2.initialize(normalize_timestamp('1'))
broker1.put_container('a', normalize_timestamp(1), 0, 0, 0)
broker1.put_container('b', normalize_timestamp(2), 0, 0, 0)
id = broker1.get_info()['id']
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEquals(len(items), 2)
self.assertEquals(['a', 'b'], sorted([rec['name'] for rec in items]))
broker1.put_container('c', normalize_timestamp(3), 0, 0, 0)
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEquals(len(items), 3)
self.assertEquals(['a', 'b', 'c'],
sorted([rec['name'] for rec in items]))
def premetadata_create_account_stat_table(self, conn, put_timestamp):
"""
Copied from swift.common.db.AccountBroker before the metadata column was
added; used for testing with TestAccountBrokerBeforeMetadata.
Create account_stat table which is specific to the account DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
conn.executescript("""
CREATE TABLE account_stat (
account TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
container_count INTEGER,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0'
);
INSERT INTO account_stat (container_count) VALUES (0);
""")
conn.execute('''
UPDATE account_stat SET account = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, normalize_timestamp(time()), str(uuid4()),
put_timestamp))
class TestAccountBrokerBeforeMetadata(TestAccountBroker):
"""
Tests for swift.common.db.AccountBroker against databases created before
the metadata column was added.
"""
def setUp(self):
self._imported_create_account_stat_table = \
AccountBroker.create_account_stat_table
AccountBroker.create_account_stat_table = \
premetadata_create_account_stat_table
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
exc = None
with broker.get() as conn:
try:
conn.execute('SELECT metadata FROM account_stat')
except BaseException, err:
exc = err
self.assert_('no such column: metadata' in str(exc))
def tearDown(self):
AccountBroker.create_account_stat_table = \
self._imported_create_account_stat_table
broker = AccountBroker(':memory:', account='a')
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
conn.execute('SELECT metadata FROM account_stat')
if __name__ == '__main__':
unittest.main()
| 2.078125
| 2
|
robocorp-code/tests/robocorp_code_tests/fixtures.py
|
mardukbp/robotframework-lsp
| 92
|
12585
|
<gh_stars>10-100
import os
import pytest
from robocorp_ls_core.protocols import IConfigProvider
from robocorp_ls_core.robotframework_log import get_logger
from robocorp_ls_core.unittest_tools.cases_fixture import CasesFixture
from robocorp_code.protocols import IRcc, ActionResult
import sys
from typing import Any
from pathlib import Path
from robocorp_code_tests.protocols import IRobocorpLanguageServerClient
log = get_logger(__name__)
IMAGE_IN_BASE64 = "<KEY>
@pytest.fixture
def language_server_client_class():
from robocorp_code_tests.robocode_language_server_client import (
RobocorpLanguageServerClient,
)
return RobocorpLanguageServerClient
@pytest.fixture
def language_server_class():
from robocorp_code.robocorp_language_server import RobocorpLanguageServer
return RobocorpLanguageServer
@pytest.fixture
def main_module():
from robocorp_code import __main__
return __main__
@pytest.fixture
def rcc_location() -> str:
from robocorp_code.rcc import download_rcc
from robocorp_code.rcc import get_default_rcc_location
location = get_default_rcc_location()
download_rcc(location, force=False)
return location
@pytest.fixture
def ci_endpoint() -> str:
ci_endpoint = os.environ.get("CI_ENDPOINT")
if ci_endpoint is None:
raise AssertionError("CI_ENDPOINT env variable must be specified for tests.")
return ci_endpoint
@pytest.fixture
def ci_credentials() -> str:
ci_credentials = os.environ.get("CI_CREDENTIALS")
if ci_credentials is None:
raise AssertionError("ci_credentials env variable must be specified for tests.")
return ci_credentials
@pytest.fixture
def rcc_config_location(tmpdir) -> str:
config_dir = tmpdir.join("config")
os.makedirs(str(config_dir))
return str(config_dir.join("config_test.yaml"))
@pytest.fixture(scope="session")
def cases(tmpdir_factory) -> CasesFixture:
basename = "res áéíóú"
copy_to = str(tmpdir_factory.mktemp(basename))
f = __file__
original_resources_dir = os.path.join(os.path.dirname(f), "_resources")
assert os.path.exists(original_resources_dir)
return CasesFixture(copy_to, original_resources_dir)
@pytest.fixture
def robocorp_home(tmpdir) -> str:
# import shutil
#
# ret = "c:/temp/tests_robohome"
# shutil.rmtree(os.path.join(ret, ".robocorp_code"), ignore_errors=True)
# return ret
return str(tmpdir.join("robocorp_home"))
@pytest.fixture
def config_provider(
ws_root_path: str,
rcc_location: str,
ci_endpoint: str,
rcc_config_location: str,
robocorp_home: str,
):
from robocorp_code.robocorp_config import RobocorpConfig
from robocorp_ls_core.ep_providers import DefaultConfigurationProvider
config = RobocorpConfig()
config.update(
{
"robocorp": {
"home": robocorp_home,
"rcc": {
"location": rcc_location,
"endpoint": ci_endpoint,
"config_location": rcc_config_location,
},
}
}
)
return DefaultConfigurationProvider(config)
@pytest.fixture
def rcc(config_provider: IConfigProvider, rcc_config_location: str) -> IRcc:
from robocorp_code.rcc import Rcc
rcc = Rcc(config_provider)
# We don't want to track tests.
# There's a bug in which the --do-not-track doesn't work the first time.
result = rcc._run_rcc(
"configure identity --do-not-track --config".split() + [rcc_config_location]
)
assert result.success
result_msg = result.result
assert result_msg
if "disabled" not in result_msg:
raise AssertionError(f"Did not expect {result_msg}")
return rcc
@pytest.fixture
def rcc_conda_installed(rcc: IRcc):
result = rcc.check_conda_installed()
assert result.success, r"Error: {result}"
return rcc
_WS_INFO = (
{
"id": "workspace_id_1",
"name": "CI workspace",
"orgId": "affd282c8f9fe",
"orgName": "My Org Name",
"orgShortName": "654321",
"shortName": "123456", # Can be some generated number or something provided by the user.
"state": "active",
"url": "http://url1",
},
{
"id": "workspace_id_2",
"name": "My Other workspace",
"orgId": "affd282c8f9fe",
"orgName": "My Org Name",
"orgShortName": "1234567",
"shortName": "7654321",
"state": "active",
"url": "http://url2",
},
)
_PACKAGE_INFO_WS_2: dict = {}
_PACKAGE_INFO_WS_1: dict = {
"activities": [
{"id": "452", "name": "Package Name 1"},
{"id": "453", "name": "Package Name 2"},
]
}
class RccPatch(object):
def __init__(self, monkeypatch, tmpdir):
from robocorp_code.rcc import Rcc
self.monkeypatch = monkeypatch
self._current_mock = self.mock_run_rcc_default
self._original = Rcc._run_rcc
self._package_info_ws_1 = _PACKAGE_INFO_WS_1
self.custom_handler: Any = None
self.tmpdir = tmpdir
def mock_run_rcc(self, args, *starargs, **kwargs) -> ActionResult:
return self._current_mock(args, *starargs, **kwargs)
def mock_run_rcc_default(self, args, *sargs, **kwargs) -> ActionResult:
import json
import copy
from robocorp_code.rcc import ACCOUNT_NAME
import shutil
if self.custom_handler is not None:
ret = self.custom_handler(args, *sargs, **kwargs)
if ret is not None:
return ret
if args[:4] == ["cloud", "workspace", "--workspace", "workspace_id_1"]:
# List packages for workspace 1
return ActionResult(
success=True, message=None, result=json.dumps(self._package_info_ws_1)
)
if args[:4] == ["cloud", "workspace", "--workspace", "workspace_id_2"]:
# List packages for workspace 2
return ActionResult(
success=True, message=None, result=json.dumps(_PACKAGE_INFO_WS_2)
)
if args[:3] == ["cloud", "workspace", "--config"]:
# List workspaces
workspace_info = _WS_INFO
return ActionResult(
success=True, message=None, result=json.dumps(workspace_info)
)
if args[:3] == ["cloud", "push", "--directory"]:
if args[4:8] == ["--workspace", "workspace_id_1", "--robot", "2323"]:
return ActionResult(success=True)
if args[4:8] == ["--workspace", "workspace_id_1", "--robot", "453"]:
return ActionResult(success=True)
if args[:5] == ["cloud", "new", "--workspace", "workspace_id_1", "--robot"]:
# Submit a new package to ws 1
cp = copy.deepcopy(self._package_info_ws_1)
cp["activities"].append({"id": "2323", "name": args[5]})
self._package_info_ws_1 = cp
return ActionResult(
success=True,
message=None,
result="Created new robot named {args[5]} with identity 2323.",
)
if args[:4] == ["config", "credentials", "-j", "--verified"]:
return ActionResult(
success=True,
message=None,
result=json.dumps(
[
{
"account": ACCOUNT_NAME,
"identifier": "001",
"endpoint": "https://endpoint.foo.bar",
"secret": "123...",
"verified": 1605525807,
}
]
),
)
if args[:3] == ["holotree", "variables", "--space"]:
space_name = args[3]
conda_prefix = Path(self.tmpdir.join(f"conda_prefix_{space_name}"))
conda_prefix.mkdir()
conda_yaml = args[-2]
assert conda_yaml.endswith("conda.yaml")
shutil.copyfile(conda_yaml, conda_prefix / "identity.yaml")
return ActionResult(
success=True,
message=None,
result=json.dumps(
[
{"key": "PYTHON_EXE", "value": sys.executable},
{"key": "SPACE_NAME", "value": args[3]},
{"key": "CONDA_PREFIX", "value": str(conda_prefix)},
{"key": "TEMP", "value": str(self.tmpdir.join("_temp_dir_"))},
]
),
)
raise AssertionError(f"Unexpected args: {args}")
def mock_run_rcc_should_not_be_called(self, args, *sargs, **kwargs):
raise AssertionError(
"This should not be called at this time (data should be cached)."
)
def apply(self) -> None:
from robocorp_code.rcc import Rcc
self.monkeypatch.setattr(Rcc, "_run_rcc", self.mock_run_rcc)
def disallow_calls(self) -> None:
self._current_mock = self.mock_run_rcc_should_not_be_called
@pytest.fixture
def rcc_patch(monkeypatch, tmpdir):
return RccPatch(monkeypatch, tmpdir)
@pytest.fixture
def initialization_options():
return {"do-not-track": True}
@pytest.fixture
def language_server_initialized(
language_server_tcp: IRobocorpLanguageServerClient,
ws_root_path: str,
rcc_location: str,
ci_endpoint: str,
rcc_config_location: str,
initialization_options,
):
from robocorp_code.commands import ROBOCORP_RUN_IN_RCC_INTERNAL
language_server = language_server_tcp
language_server.initialize(
ws_root_path, initialization_options=initialization_options
)
language_server.settings(
{
"settings": {
"robocorp": {
"rcc": {
"location": rcc_location,
"endpoint": ci_endpoint,
"config_location": rcc_config_location,
}
}
}
}
)
result = language_server.execute_command(
ROBOCORP_RUN_IN_RCC_INTERNAL,
[
{
"args": "configure identity --do-not-track --config".split()
+ [rcc_config_location]
}
],
)
assert result["result"]["success"]
if "disabled" not in result["result"]["result"]:
raise AssertionError(f"Unexpected result: {result}")
return language_server
| 1.976563
| 2
|
tensorflow_model_optimization/python/core/quantization/keras/quantize_emulatable_layer.py
|
akarmi/model-optimization
| 1
|
12586
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract Base Class for quantize emulation in custom keras layers."""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class QuantizeEmulatableLayer(object):
"""Abstract Base Class for quantize emulation in custom keras layers.
Custom keras layers which want to implement quantization of their operations
during training should implement this class.
"""
@abc.abstractmethod
def get_quantizable_weights(self):
"""Returns list of quantizable weight tensors.
All the weight tensors which the layer wants to be quantized during
training must be returned by this method.
Returns: List of weight tensors/kernels in the keras layer which must be
quantized during training.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def set_quantizable_weights(self, weights):
"""Sets list of quantizable weight tensors.
This method replaces the existing quantizable weight tensors for
the layer with the specified set of weights.
Arguments:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_quantizable_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
"""
raise NotImplementedError('Must be implemented in subclasses.')
| 2.609375
| 3
|
GEN_cell_culture/phase_plotting.py
|
dezeraecox/GEN_cell_culture
| 0
|
12587
|
<reponame>dezeraecox/GEN_cell_culture<gh_stars>0
import os
import re
import string
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from GEN_Utils import FileHandling
from loguru import logger
logger.info("Import OK")
# Set sample-specific variables
input_path = 'examples/python/gauss_models/'
output_path = 'examples/python/phase_plotting/'
plate_sample = ['TPE only', '1', '1.5', '2', '3', '4']*4
plate_cords = [f'{x}{y}' for x in string.ascii_uppercase[0:4]
for y in range(1, 7)]
sample_map = dict(zip(plate_cords, plate_sample))
if not os.path.exists(output_path):
os.mkdir(output_path)
# Read in summary df and preview
summary = pd.read_excel(f'{input_path}summary.xlsx')
# Assign sample-specific descriptors to summary table
summary['plate'] = summary['sample'].str[0]
summary['well'] = summary['sample'].str[1:]
summary['sample'] = summary['well'].map(sample_map)
phase_name = ['G', 'S', 'M']
phase_num = [1, 2, 3]
phase_map = dict(zip(phase_name, phase_num))
# Generate line-plot
fig = plt.subplots()
for phase in phase_name:
sns.lineplot(summary['sample'], summary[phase], label=phase, ci='sd')
plt.ylabel("Proportion of cells in phase")
plt.xlabel(r'Density(x 10$^ 5$)')
plt.title('Phase distribution')
plt.legend(bbox_to_anchor=(1.1, 1.0), title='Phase')
plt.tight_layout()
plt.autoscale()
plt.savefig(f'{output_path}line_plot.png')
| 2.3125
| 2
|
PlatformerGame/malmopy/explorers.py
|
MrMaik/platformer-ml-game
| 10
|
12588
|
<gh_stars>1-10
# --------------------------------------------------------------------------------------------------
# Copyright (c) 2018 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# --------------------------------------------------------------------------------------------------
"""Module containing explorer classes"""
from numpy import random as np_random
from .summaries import ScalarSummary
from .triggers import each_step
from .abc import Explorer, EpsilonFunction, Visualizable
class ConstantEpsilon(EpsilonFunction):
"""Epsilon function which returns a constant value regardless of step."""
def __init__(self, epsilon):
"""
Args:
epsilon -- the constant epsilon value
"""
self._epsilon = epsilon
def epsilon(self, step):
return self._epsilon
class LinearEpsilon(EpsilonFunction):
"""
This function uses linear interpolation between epsilon_max and epsilon_min
to linearly anneal epsilon as a function of the current episode.
3 cases exist:
- If 0 <= episode < eps_min_time then epsilon = interpolator(episode)
- If episode >= eps_min_time then epsilon then epsilon = eps_min
- Otherwise epsilon = eps_max
"""
def __init__(self, eps_max, eps_min, eps_min_time):
"""
Args:
eps_max -- the maximum epsilon value
eps_min -- the minimum epsilon value
eps_min_time -- the number of steps until epsilon is at its minimum
"""
assert eps_max > eps_min
assert eps_min_time > 0
self._eps_min_time = eps_min_time
self._eps_min = eps_min
self._eps_max = eps_max
self._delta = -(eps_max - eps_min) / eps_min_time
def epsilon(self, step):
"""The epsilon value at a specific step.
Args:
step -- the step during training
"""
if step < 0:
return self._eps_max
if step > self._eps_min_time:
return self._eps_min
return self._delta * step + self._eps_max
class EpsilonGreedyExplorer(Explorer, Visualizable):
"""Explorer which determines whether to explore by sampling from a Bernoulli distribution."""
def __init__(self, epsilon_function):
"""
Args:
epsilon_function -- an instance of EpsilonFunction
"""
assert isinstance(epsilon_function, EpsilonFunction)
self._epsilon = epsilon_function
self._epsilon_summary = ScalarSummary("EpsilonGreedy/Epsilon", each_step())
@property
def metrics(self):
return [self._epsilon_summary]
def is_exploring(self, step):
epsilon = self._epsilon(step)
self._epsilon_summary.add(epsilon)
return np_random.binomial(1, epsilon)
def explore(self, step, action_space):
return action_space.sample()
class ConstantExplorer(EpsilonGreedyExplorer):
"""Explorer which explores with a constant probability."""
def __init__(self, epsilon):
"""
Args:
epsilon -- the probability that the agent will explore
"""
super(ConstantExplorer, self).__init__(ConstantEpsilon(epsilon))
class LinearEpsilonGreedyExplorer(EpsilonGreedyExplorer):
"""Explorer which uses a LinearEpsilon function."""
def __init__(self, eps_max, eps_min, eps_min_time):
"""
Args:
eps_max -- the maximum epsilon value
eps_min -- the minimum epsilon value
eps_min_time -- the number of steps until epsilon is at its minimum
"""
epsilon_function = LinearEpsilon(eps_max, eps_min, eps_min_time)
super(LinearEpsilonGreedyExplorer, self).__init__(epsilon_function)
| 1.398438
| 1
|
Lib/icecreamscrape/__main__.py
|
kdwatt15/icecreamscrape
| 0
|
12589
|
# Standard imports
import sys
# Project imports
from icecreamscrape.cli import cli
from icecreamscrape.webdriver import driver_factory
from icecreamscrape import composites as comps
from icecreamscrape.composites import create_timestamped_dir
def main(args=sys.argv[1:]):
""" Main function. :param: args is used for testing """
user_inputs = cli(args)
url = user_inputs.params.url
active_features = user_inputs.active_features
if len(active_features) > 0:
time_dir = create_timestamped_dir()
with driver_factory(url) as driver:
for feature in active_features:
getattr(sys.modules[comps.__name__],
feature)(driver, time_dir)
def init():
""" Init construction allows for testing """
if __name__ == "__main__":
sys.exit(main())
init()
| 2.40625
| 2
|
tests/models/programdb/mission/mission_unit_test.py
|
weibullguy/ramstk
| 4
|
12590
|
# pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.controllers.mission.mission_unit_test.py is part of The
# RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Test class for testing Mission module algorithms and models."""
# Third Party Imports
import pytest
from pubsub import pub
from treelib import Tree
# RAMSTK Package Imports
from ramstk.models.dbrecords import RAMSTKMissionRecord
from ramstk.models.dbtables import RAMSTKMissionTable
from tests import (
MockDAO,
UnitTestDeleteMethods,
UnitTestGetterSetterMethods,
UnitTestInsertMethods,
UnitTestSelectMethods,
)
@pytest.mark.usefixtures("test_record_model", "unit_test_table_model")
class TestCreateMissionModels:
"""Class for unit testing Mission model __init__() methods.
Because each table model contains unique attributes, these methods must be
local to the module being tested.
"""
__test__ = True
@pytest.mark.unit
def test_record_model_create(self, test_record_model):
"""Should return a Mission record model instance."""
assert isinstance(test_record_model, RAMSTKMissionRecord)
# Verify class attributes are properly initialized.
assert test_record_model.__tablename__ == "ramstk_mission"
assert test_record_model.revision_id == 1
assert test_record_model.description == "Test mission #1"
assert test_record_model.mission_time == 100.0
assert test_record_model.time_units == "hours"
@pytest.mark.unit
def test_data_manager_create(self, unit_test_table_model):
"""Return a Mission table model instance."""
assert isinstance(unit_test_table_model, RAMSTKMissionTable)
assert isinstance(unit_test_table_model.tree, Tree)
assert isinstance(unit_test_table_model.dao, MockDAO)
assert unit_test_table_model._db_id_colname == "fld_mission_id"
assert unit_test_table_model._db_tablename == "ramstk_mission"
assert unit_test_table_model._tag == "mission"
assert unit_test_table_model._root == 0
assert unit_test_table_model._revision_id == 0
assert pub.isSubscribed(
unit_test_table_model.do_get_attributes, "request_get_mission_attributes"
)
assert pub.isSubscribed(
unit_test_table_model.do_get_tree, "request_get_mission_tree"
)
assert pub.isSubscribed(
unit_test_table_model.do_select_all, "selected_revision"
)
assert pub.isSubscribed(
unit_test_table_model.do_update, "request_update_mission"
)
assert pub.isSubscribed(
unit_test_table_model.do_update_all, "request_update_all_mission"
)
assert pub.isSubscribed(
unit_test_table_model.do_delete, "request_delete_mission"
)
assert pub.isSubscribed(
unit_test_table_model.do_insert, "request_insert_mission"
)
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestSelectMission(UnitTestSelectMethods):
"""Class for unit testing Mission table do_select() and do_select_all()."""
__test__ = True
_record = RAMSTKMissionRecord
_tag = "mission"
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestInsertMission(UnitTestInsertMethods):
"""Class for unit testing Mission table do_insert() method."""
__test__ = True
_next_id = 0
_record = RAMSTKMissionRecord
_tag = "mission"
@pytest.mark.skip(reason="Mission records are non-hierarchical.")
def test_do_insert_child(self, test_attributes, unit_test_table_model):
"""Should not run because Mission records are not hierarchical."""
pass
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestDeleteMission(UnitTestDeleteMethods):
"""Class for unit testing Mission table do_delete() method."""
__test__ = True
_next_id = 0
_record = RAMSTKMissionRecord
_tag = "mission"
@pytest.mark.usefixtures("test_attributes", "test_record_model")
class TestGetterSetterMission(UnitTestGetterSetterMethods):
"""Class for unit testing Mission table methods that get or set."""
__test__ = True
_id_columns = [
"revision_id",
"mission_id",
]
_test_attr = "mission_time"
_test_default_value = 0.0
@pytest.mark.unit
def test_get_record_model_attributes(self, test_record_model):
"""Should return a dict of attribute key:value pairs.
This method must be local because the attributes are different for each
database record model.
"""
_attributes = test_record_model.get_attributes()
assert isinstance(_attributes, dict)
assert _attributes["revision_id"] == 1
assert _attributes["description"] == "Test mission #1"
assert _attributes["mission_time"] == 100.0
assert _attributes["time_units"] == "hours"
| 2.421875
| 2
|
src/streamlink/packages/flashmedia/flv.py
|
RomanKornev/streamlink
| 5
|
12591
|
#!/usr/bin/env python
from .error import FLVError
from .compat import is_py2
from .tag import Header, Tag
class FLV(object):
def __init__(self, fd=None, strict=False):
self.fd = fd
self.header = Header.deserialize(self.fd)
self.strict = strict
def __iter__(self):
return self
def __next__(self):
try:
tag = Tag.deserialize(self.fd, strict=self.strict)
except (IOError, FLVError):
raise StopIteration
return tag
if is_py2:
next = __next__
__all__ = ["FLV"]
| 2.484375
| 2
|
tests/core/test_core_renderer.py
|
timvink/pheasant
| 24
|
12592
|
from pheasant.renderers.jupyter.jupyter import Jupyter
jupyter = Jupyter()
jupyter.findall("{{3}}3{{5}}")
jupyter.page
| 1.554688
| 2
|
chapter2-5-your-code-in-multiple-servers/packer/webapp.py
|
andrecp/devops-fundamentals-to-k8s
| 0
|
12593
|
<reponame>andrecp/devops-fundamentals-to-k8s<gh_stars>0
#!/usr/bin/env python3
import json
from http.server import HTTPServer, BaseHTTPRequestHandler
num_requests = 0
class Handler(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
def do_GET(self):
self._set_headers()
global num_requests
num_requests += 1
content = json.dumps({"num_requests": num_requests}).encode("utf8")
self.wfile.write(content)
if __name__ == "__main__":
http_service = HTTPServer(("0.0.0.0", 8000), Handler)
print(f"Starting http service on 0.0.0.0:8000")
http_service.serve_forever()
| 2.984375
| 3
|
run.py
|
ellotecnologia/galadriel
| 0
|
12594
|
from app.app import create_app
from config import BaseConfig
app = create_app(BaseConfig)
| 1.367188
| 1
|
retarget/make_data.py
|
EggPool/rx-experiments
| 1
|
12595
|
<gh_stars>1-10
"""
Create data for simulations
(c) 2019 - EggdraSyl
"""
import json
# from mockup import Blockchain, Block
from minersimulator import MinerSimulator
from math import sin, pi
SPECIAL_MIN_TIME = 5 * 60
def init_stable(
start,
end,
block_time=60,
target="0000000000000028acfa28a803d2000000000000000000000000000000000000",
file="stable.json",
):
start_time = 0
blocks = []
for height in range(start, end):
block = {
"time": start_time,
"height": height,
"special_min": True if block_time > SPECIAL_MIN_TIME else False,
"target": target,
"block_time": block_time, # This one is not native.
}
start_time += block_time
blocks.append(block)
with open("data/init/{}".format(file), "w") as fp:
json.dump(blocks, fp)
def hash_stable(hash_count: int, hash_rate:int, file="stable.json"):
simu = MinerSimulator(hash_rate)
hashes = []
for i in range(hash_count):
hashes.append((simu.hash_rate, simu.HEX(simu.get_min_hash())))
with open("data/live/{}".format(file), "w") as fp:
json.dump(hashes, fp, indent=2)
def hash_arithmetic(hash_count: int, start: int, increment: int, file="arithmetic.json"):
simu = MinerSimulator(start)
hashes = []
for i in range(hash_count):
hashes.append((simu.hash_rate, simu.HEX(simu.get_min_hash())))
simu.hash_rate += increment
with open("data/live/{}".format(file), "w") as fp:
json.dump(hashes, fp, indent=2)
def hash_step(hash_count: int, start: int, h_end: int, file="step.json"):
simu = MinerSimulator(start)
hashes = []
for i in range(hash_count):
hashes.append((simu.hash_rate,simu.HEX(simu.get_min_hash())))
if i == hash_count//2:
simu.hash_rate = h_end
with open("data/live/{}".format(file), "w") as fp:
json.dump(hashes, fp, indent=2)
def hash_sinus(hash_count: int, base: int, amplitude: int, period: int, file="sinus.json"):
simu = MinerSimulator(base)
hashes = []
for i in range(hash_count):
hashes.append((simu.hash_rate,simu.HEX(simu.get_min_hash())))
simu.hash_rate = base + amplitude * sin(i * 2 * pi / period)
with open("data/live/{}".format(file), "w") as fp:
json.dump(hashes, fp, indent=2)
if __name__ == "__main__":
init_stable(
0,
1000,
block_time=3600,
target="0000000000000028acfa28a803d2000000000000000000000000000000000000",
file="stable_3600_14.json",
)
init_stable(
0,
1000,
block_time=60 * 5,
target="000000ffffffffff28acfa28a803d20000000000000000000000000000000000",
file="stable_300_6.json",
)
init_stable(
0,
1000,
block_time=60 * 5,
target="00000ffffffffff28acfa28a803d200000000000000000000000000000000000",
file="stable_300_5.json",
)
init_stable(
0,
1000,
block_time=60 * 5,
target="0000ffffffffff28acfa28a803d2000000000000000000000000000000000000",
file="stable_300_4.json",
)
init_stable(
0,
1000,
block_time=60 * 5,
target="000ffffffffff28acfa28a803d2000000000000000000000000000000000000",
file="stable_300_3.json",
)
hash_stable(10000, 167, file="stable_167.json")
hash_stable(10000, 1670, file="stable_1670.json")
hash_stable(10000, 16700, file="stable_16700.json")
hash_arithmetic(10000, 167, 16, file="arithmetic_167_16.json")
hash_step(10000, 167, 500, file="step_up_167_500.json")
hash_step(10000, 500, 167, file="step_down_500_167.json")
hash_sinus(10000, 300, 150, 60*12, file="sinus_300_150_720.json")
hash_sinus(10000, 300, 100, 1440, file="sinus_300_100_1440.json")
hash_sinus(10000, 300, 100, 2880, file="sinus_300_100_2880.json")
| 2.84375
| 3
|
software/pynguin/tests/testcase/statements/test_primitivestatements.py
|
se2p/artifact-pynguin-ssbse2020
| 3
|
12596
|
# This file is part of Pynguin.
#
# Pynguin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pynguin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pynguin. If not, see <https://www.gnu.org/licenses/>.
from unittest import mock
from unittest.mock import MagicMock
import pytest
import pynguin.configuration as config
import pynguin.testcase.defaulttestcase as dtc
import pynguin.testcase.statements.primitivestatements as prim
import pynguin.testcase.testcase as tc
import pynguin.testcase.variable.variablereferenceimpl as vri
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_value(statement_type, test_case_mock, value):
statement = statement_type(test_case_mock, value)
assert statement.value == value
@pytest.mark.parametrize(
"statement_type",
[
pytest.param(prim.IntPrimitiveStatement),
pytest.param(prim.FloatPrimitiveStatement),
pytest.param(prim.StringPrimitiveStatement),
pytest.param(prim.BooleanPrimitiveStatement),
],
)
def test_primitive_statement_value_none(statement_type, test_case_mock):
statement = statement_type(test_case_mock, None)
assert statement.value is not None
@pytest.mark.parametrize(
"statement_type,value,new_value",
[
pytest.param(prim.IntPrimitiveStatement, 42, 23),
pytest.param(prim.FloatPrimitiveStatement, 2.1, 1.2),
pytest.param(prim.StringPrimitiveStatement, "foo", "bar"),
pytest.param(prim.BooleanPrimitiveStatement, True, False),
],
)
def test_primitive_statement_set_value(
statement_type, test_case_mock, value, new_value
):
statement = statement_type(test_case_mock, value)
statement.value = new_value
assert statement.value == new_value
@pytest.mark.parametrize(
"statement_type,test_case,new_test_case,value",
[
pytest.param(
prim.IntPrimitiveStatement,
MagicMock(tc.TestCase),
MagicMock(tc.TestCase),
42,
),
pytest.param(
prim.FloatPrimitiveStatement,
MagicMock(tc.TestCase),
MagicMock(tc.TestCase),
42.23,
),
pytest.param(
prim.StringPrimitiveStatement,
MagicMock(tc.TestCase),
MagicMock(tc.TestCase),
"foo",
),
pytest.param(
prim.BooleanPrimitiveStatement,
MagicMock(tc.TestCase),
MagicMock(tc.TestCase),
True,
),
],
)
def test_primitive_statement_clone(statement_type, test_case, new_test_case, value):
statement = statement_type(test_case, value)
new_statement = statement.clone(new_test_case)
assert new_statement.test_case == new_test_case
assert (
new_statement.return_value.variable_type == statement.return_value.variable_type
)
assert new_statement.value == statement.value
@pytest.mark.parametrize(
"statement_type,test_case,value,visitor_method",
[
pytest.param(
prim.IntPrimitiveStatement,
MagicMock(tc.TestCase),
42,
"visit_int_primitive_statement",
),
pytest.param(
prim.FloatPrimitiveStatement,
MagicMock(tc.TestCase),
2.1,
"visit_float_primitive_statement",
),
pytest.param(
prim.StringPrimitiveStatement,
MagicMock(tc.TestCase),
"foo",
"visit_string_primitive_statement",
),
pytest.param(
prim.BooleanPrimitiveStatement,
MagicMock(tc.TestCase),
True,
"visit_boolean_primitive_statement",
),
],
)
def test_primitive_statement_accept(statement_type, test_case, value, visitor_method):
stmt = statement_type(test_case, value)
visitor = MagicMock()
stmt.accept(visitor)
getattr(visitor, visitor_method).assert_called_once_with(stmt)
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_equals_same(statement_type, value):
test_case = MagicMock(tc.TestCase)
statement = statement_type(test_case, value)
assert statement.__eq__(statement)
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_equals_other_type(statement_type, value):
test_case = MagicMock(tc.TestCase)
statement = statement_type(test_case, value)
assert not statement.__eq__(test_case)
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_equals_clone(statement_type, value):
test_case = MagicMock(tc.TestCase)
statement = statement_type(test_case, value)
test_case.statements = [statement]
test_case2 = MagicMock(tc.TestCase)
clone = statement.clone(test_case2)
test_case2.statements = [clone]
assert statement.__eq__(clone)
def test_none_statement_equals_clone():
test_case = MagicMock(tc.TestCase)
statement = prim.NoneStatement(test_case, type(None))
test_case.statements = [statement]
test_case2 = MagicMock(tc.TestCase)
clone = statement.clone(test_case2)
test_case2.statements = [clone]
assert statement.__eq__(clone)
@pytest.mark.parametrize(
"statement_type,value",
[
pytest.param(prim.IntPrimitiveStatement, 42),
pytest.param(prim.FloatPrimitiveStatement, 42.23),
pytest.param(prim.StringPrimitiveStatement, "foo"),
pytest.param(prim.BooleanPrimitiveStatement, True),
],
)
def test_primitive_statement_hash(statement_type, value):
statement = statement_type(MagicMock(tc.TestCase), value)
assert statement.__hash__() != 0
def test_int_primitive_statement_randomize_value(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock)
statement.randomize_value()
assert isinstance(statement.value, int)
def test_float_primitive_statement_randomize_value(test_case_mock):
statement = prim.FloatPrimitiveStatement(test_case_mock)
statement.randomize_value()
assert isinstance(statement.value, float)
def test_bool_primitive_statement_randomize_value(test_case_mock):
statement = prim.BooleanPrimitiveStatement(test_case_mock)
statement.randomize_value()
assert statement.value or not statement.value
def test_string_primitive_statement_randomize_value(test_case_mock):
statement = prim.StringPrimitiveStatement(test_case_mock)
statement.randomize_value()
assert 0 <= len(statement.value) <= config.INSTANCE.string_length
def test_none_statement_randomize_value(test_case_mock):
statement = prim.NoneStatement(test_case_mock, type(None))
statement.randomize_value()
assert statement.value is None
def test_none_statement_delta(test_case_mock):
statement = prim.NoneStatement(test_case_mock, type(None))
statement.delta()
assert statement.value is None
def test_string_primitive_statement_random_deletion(test_case_mock):
sample = list("Test")
result = prim.StringPrimitiveStatement._random_deletion(sample)
assert len(result) <= len(sample)
def test_string_primitive_statement_random_insertion(test_case_mock):
sample = list("Test")
result = prim.StringPrimitiveStatement._random_insertion(sample)
assert len(result) >= len(sample)
def test_string_primitive_statement_random_insertion_empty(test_case_mock):
sample = list("")
result = prim.StringPrimitiveStatement._random_insertion(sample)
assert len(result) >= len(sample)
def test_string_primitive_statement_random_replacement(test_case_mock):
sample = list("Test")
result = prim.StringPrimitiveStatement._random_replacement(sample)
assert len(result) == len(sample)
def test_string_primitive_statement_delta_none(test_case_mock):
value = "t"
statement = prim.StringPrimitiveStatement(test_case_mock, value)
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
float_mock.side_effect = [1.0, 1.0, 1.0]
statement.delta()
assert statement.value == value
def test_string_primitive_statement_delta_all(test_case_mock):
value = "te"
statement = prim.StringPrimitiveStatement(test_case_mock, value)
with mock.patch("pynguin.utils.randomness.next_char") as char_mock:
char_mock.side_effect = ["a", "b"]
with mock.patch("pynguin.utils.randomness.next_int") as int_mock:
int_mock.return_value = 0
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
deletion = [0.0, 0.0, 1.0]
replacement = [0.0, 0.0]
insertion = [0.0, 0.0, 1.0]
float_mock.side_effect = deletion + replacement + insertion
statement.delta()
assert statement.value == "ba"
def test_int_primitive_statement_delta(test_case_mock):
config.INSTANCE.max_delta = 10
statement = prim.IntPrimitiveStatement(test_case_mock, 1)
with mock.patch("pynguin.utils.randomness.next_gaussian") as gauss_mock:
gauss_mock.return_value = 0.5
statement.delta()
assert statement.value == 6
def test_float_primitive_statement_delta_max(test_case_mock):
config.INSTANCE.max_delta = 10
statement = prim.FloatPrimitiveStatement(test_case_mock, 1.5)
with mock.patch("pynguin.utils.randomness.next_gaussian") as gauss_mock:
gauss_mock.return_value = 0.5
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
float_mock.return_value = 0.0
statement.delta()
assert statement.value == 6.5
def test_float_primitive_statement_delta_gauss(test_case_mock):
config.INSTANCE.max_delta = 10
statement = prim.FloatPrimitiveStatement(test_case_mock, 1.0)
with mock.patch("pynguin.utils.randomness.next_gaussian") as gauss_mock:
gauss_mock.return_value = 0.5
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
float_mock.return_value = 1.0 / 3.0
statement.delta()
assert statement.value == 1.5
def test_float_primitive_statement_delta_round(test_case_mock):
statement = prim.FloatPrimitiveStatement(test_case_mock, 1.2345)
with mock.patch("pynguin.utils.randomness.next_int") as int_mock:
int_mock.return_value = 2
with mock.patch("pynguin.utils.randomness.next_float") as float_mock:
float_mock.return_value = 2.0 / 3.0
statement.delta()
assert statement.value == 1.23
def test_boolean_primitive_statement_delta(test_case_mock):
statement = prim.BooleanPrimitiveStatement(test_case_mock, True)
statement.delta()
assert not statement.value
def test_primitive_statement_mutate(test_case_mock):
statement = prim.BooleanPrimitiveStatement(test_case_mock, True)
statement.mutate()
assert not statement.value
def test_primitive_statement_accessible(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock, 0)
assert statement.accessible_object() is None
def test_primitive_statement_references(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock, 0)
assert {statement.return_value} == statement.get_variable_references()
def test_primitive_statement_replace(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock, 0)
new = vri.VariableReferenceImpl(test_case_mock, int)
statement.replace(statement.return_value, new)
assert statement.return_value == new
def test_primitive_statement_replace_ignore(test_case_mock):
statement = prim.IntPrimitiveStatement(test_case_mock, 0)
new = prim.FloatPrimitiveStatement(test_case_mock, 0).return_value
old = statement.return_value
statement.replace(new, new)
assert statement.return_value == old
def test_primitive_statement_get_position():
test_case = dtc.DefaultTestCase()
statement = prim.IntPrimitiveStatement(test_case, 5)
test_case.add_statement(statement)
assert statement.get_position() == 0
| 2.328125
| 2
|
src/data/download/datasets/download_tencent_test.py
|
lcn-kul/conferencing-speech-2022
| 1
|
12597
|
from pathlib import Path
from src import constants
from src.data.download.utils.download_dataset_zip import download_dataset_zip
def download_tencent_test(
tmp_dir: Path = None,
tqdm_name: str = None,
tqdm_idx: int = None,
):
"""Download the test set of the Tencent Corpus and extract it to the
appropriate directory."""
download_dataset_zip(
name="tencent_test",
data_url=constants.TENCENT_TEST_URL,
output_dir=constants.TENCENT_TEST_DIR,
extracted_name=constants.TENCENT_TEST_ZIP_FOLDER,
tmp_dir=tmp_dir,
tqdm_name=tqdm_name,
tqdm_idx=tqdm_idx,
)
if __name__ == "__main__":
download_tencent_test(tqdm_name="tencent", tqdm_idx=0)
| 2.453125
| 2
|
malaya_speech/train/model/fastspeechsplit/model.py
|
ishine/malaya-speech
| 111
|
12598
|
import tensorflow as tf
from ..fastspeech.model import (
TFFastSpeechEncoder,
TFTacotronPostnet,
TFFastSpeechLayer,
)
from ..speechsplit.model import InterpLnr
import numpy as np
import copy
class Encoder_6(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_6, self).__init__(name='Encoder_6', **kwargs)
self.dim_neck_3 = hparams.dim_neck_3
self.freq_3 = hparams.freq_3
self.dim_f0 = hparams.dim_f0
self.dim_enc_3 = hparams.dim_enc_3
self.dim_emb = hparams.dim_spk_emb
self.chs_grp = hparams.chs_grp
self.before_dense_1 = tf.keras.layers.Dense(
units=self.dim_enc_3, dtype=tf.float32, name='before_dense_1'
)
config_1 = copy.deepcopy(config)
config_1.hidden_size = self.dim_enc_3
self.layer_1 = [
TFFastSpeechLayer(config_1, name='layer_._{}'.format(i))
for i in range(config_1.num_hidden_layers)
]
self.encoder_dense_1 = tf.keras.layers.Dense(
units=self.dim_neck_3,
dtype=tf.float32,
name='encoder_dense_1',
)
self.interp = InterpLnr(hparams)
def call(self, x, attention_mask, training=True):
x = self.before_dense_1(x)
for no, layer_module in enumerate(self.layer_1):
x = layer_module([x, attention_mask], training=training)[0]
x = self.interp(
x,
tf.tile([tf.shape(x)[1]], [tf.shape(x)[0]]),
training=training,
)
x = self.encoder_dense_1(x)
return x
class Encoder_7(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_7, self).__init__(name='Encoder_7', **kwargs)
self.config = config
self.dim_neck = hparams.dim_neck
self.dim_neck_3 = hparams.dim_neck_3
self.dim_freq = hparams.dim_freq
self.dim_enc = hparams.dim_enc
self.dim_enc_3 = hparams.dim_enc_3
self.before_dense_1 = tf.keras.layers.Dense(
units=self.dim_enc, dtype=tf.float32, name='before_dense_1'
)
self.before_dense_2 = tf.keras.layers.Dense(
units=self.dim_enc_3, dtype=tf.float32, name='before_dense_2'
)
config_1 = copy.deepcopy(config)
config_1.hidden_size = self.dim_enc
self.layer_1 = [
TFFastSpeechLayer(config_1, name='layer_._{}'.format(i))
for i in range(config_1.num_hidden_layers)
]
config_2 = copy.deepcopy(config)
config_2.hidden_size = self.dim_enc_3
self.layer_2 = [
TFFastSpeechLayer(config_2, name='layer_._{}'.format(i))
for i in range(config_2.num_hidden_layers)
]
self.encoder_dense_1 = tf.keras.layers.Dense(
units=self.dim_neck, dtype=tf.float32, name='encoder_dense_1'
)
self.encoder_dense_2 = tf.keras.layers.Dense(
units=self.dim_neck_3,
dtype=tf.float32,
name='encoder_dense_2',
)
self.interp = InterpLnr(hparams)
def call(self, x_f0, attention_mask, training=True):
x = x_f0[:, :, : self.dim_freq]
f0 = x_f0[:, :, self.dim_freq:]
x = self.before_dense_1(x)
f0 = self.before_dense_2(f0)
seq_length = tf.shape(x_f0)[1]
for no, layer_module in enumerate(self.layer_1):
x = layer_module([x, attention_mask], training=training)[0]
f0 = self.layer_2[no]([f0, attention_mask], training=training)[0]
x_f0 = tf.concat((x, f0), axis=2)
x_f0 = self.interp(
x_f0,
tf.tile([tf.shape(x_f0)[1]], [tf.shape(x)[0]]),
training=training,
)
x = x_f0[:, :, : self.dim_enc]
f0 = x_f0[:, :, self.dim_enc:]
x = x_f0[:, :, : self.dim_enc]
f0 = x_f0[:, :, self.dim_enc:]
x = self.encoder_dense_1(x)
f0 = self.encoder_dense_2(f0)
return x, f0
class Encoder_t(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_t, self).__init__(name='Encoder_t', **kwargs)
self.dim_neck_2 = hparams.dim_neck_2
self.freq_2 = hparams.freq_2
self.dim_freq = hparams.dim_freq
self.dim_enc_2 = hparams.dim_enc_2
self.dim_emb = hparams.dim_spk_emb
self.chs_grp = hparams.chs_grp
config = copy.deepcopy(config)
config.num_hidden_layers = 1
config.hidden_size = self.dim_enc_2
self.config = config
self.before_dense = tf.keras.layers.Dense(
units=self.dim_enc_2, dtype=tf.float32, name='before_dense_1'
)
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.encoder_dense = tf.keras.layers.Dense(
units=self.dim_neck_2, dtype=tf.float32, name='encoder_dense'
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.encoder_dense(f)
class Decoder_3(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Decoder_3, self).__init__(name='Decoder_3', **kwargs)
self.config = config
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.before_dense = tf.keras.layers.Dense(
units=config.hidden_size,
dtype=tf.float32,
name='before_dense_1',
)
self.linear_projection = tf.keras.layers.Dense(
units=hparams.dim_freq,
dtype=tf.float32,
name='self.linear_projection',
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.linear_projection(f)
class Decoder_4(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Decoder_4, self).__init__(name='Decoder_4', **kwargs)
self.config = config
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.before_dense = tf.keras.layers.Dense(
units=config.hidden_size,
dtype=tf.float32,
name='before_dense_1',
)
self.linear_projection = tf.keras.layers.Dense(
units=hparams.dim_f0,
dtype=tf.float32,
name='self.linear_projection',
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.linear_projection(f)
class Model(tf.keras.Model):
def __init__(self, config, hparams, **kwargs):
super(Model, self).__init__(name='speechsplit', **kwargs)
self.encoder_1 = Encoder_7(
config.encoder_self_attention_params, hparams
)
self.encoder_2 = Encoder_t(
config.encoder_self_attention_params, hparams
)
self.decoder = Decoder_3(config.decoder_self_attention_params, hparams)
self.freq = hparams.freq
self.freq_2 = hparams.freq_2
self.freq_3 = hparams.freq_3
def call(self, x_f0, x_org, c_trg, mel_lengths, training=True):
max_length = tf.cast(tf.reduce_max(mel_lengths), tf.int32)
attention_mask = tf.sequence_mask(
lengths=mel_lengths, maxlen=max_length, dtype=tf.float32
)
attention_mask.set_shape((None, None))
codes_x, codes_f0 = self.encoder_1(
x_f0, attention_mask, training=training
)
codes_2 = self.encoder_2(x_org, attention_mask, training=training)
code_exp_1 = codes_x
code_exp_3 = codes_f0
code_exp_2 = codes_2
c_trg = tf.tile(tf.expand_dims(c_trg, 1), (1, tf.shape(x_f0)[1], 1))
encoder_outputs = tf.concat(
(code_exp_1, code_exp_2, code_exp_3, c_trg), axis=-1
)
mel_outputs = self.decoder(
encoder_outputs, attention_mask, training=training
)
return codes_x, codes_f0, codes_2, encoder_outputs, mel_outputs
class Model_F0(tf.keras.Model):
def __init__(self, config, hparams, **kwargs):
super(Model_F0, self).__init__(name='speechsplit_f0', **kwargs)
self.encoder_2 = Encoder_t(
config.encoder_self_attention_params, hparams
)
self.encoder_3 = Encoder_6(
config.encoder_self_attention_params, hparams
)
self.decoder = Decoder_4(config.decoder_self_attention_params, hparams)
self.freq_2 = hparams.freq_2
self.freq_3 = hparams.freq_3
def call(self, x_org, f0_trg, mel_lengths, training=True):
max_length = tf.cast(tf.reduce_max(mel_lengths), tf.int32)
attention_mask = tf.sequence_mask(
lengths=mel_lengths, maxlen=max_length, dtype=tf.float32
)
attention_mask.set_shape((None, None))
codes_2 = self.encoder_2(x_org, attention_mask, training=training)
code_exp_2 = codes_2
codes_3 = self.encoder_3(f0_trg, attention_mask, training=training)
code_exp_3 = codes_3
self.o = [code_exp_2, code_exp_3]
encoder_outputs = tf.concat((code_exp_2, code_exp_3), axis=-1)
mel_outputs = self.decoder(
encoder_outputs, attention_mask, training=training
)
return codes_2, codes_3, encoder_outputs, mel_outputs
| 2.296875
| 2
|
test/test_memory_leaks.py
|
elventear/psutil
| 4
|
12599
|
#!/usr/bin/env python
#
# $Id$
#
"""
Note: this is targeted for python 2.x.
To run it under python 3.x you need to use 2to3 tool first:
$ 2to3 -w test/test_memory_leaks.py
"""
import os
import gc
import sys
import unittest
import psutil
from test_psutil import reap_children, skipUnless, skipIf, \
POSIX, LINUX, WINDOWS, OSX, BSD
LOOPS = 1000
TOLERANCE = 4096
class TestProcessObjectLeaks(unittest.TestCase):
"""Test leaks of Process class methods and properties"""
def setUp(self):
gc.collect()
def tearDown(self):
reap_children()
def execute(self, method, *args, **kwarks):
# step 1
p = psutil.Process(os.getpid())
for x in xrange(LOOPS):
obj = getattr(p, method)
if callable(obj):
retvalue = obj(*args, **kwarks)
else:
retvalue = obj # property
del x, p, obj, retvalue
gc.collect()
rss1 = psutil.Process(os.getpid()).get_memory_info()[0]
# step 2
p = psutil.Process(os.getpid())
for x in xrange(LOOPS):
obj = getattr(p, method)
if callable(obj):
retvalue = obj(*args, **kwarks)
else:
retvalue = obj # property
del x, p, obj, retvalue
gc.collect()
rss2 = psutil.Process(os.getpid()).get_memory_info()[0]
# comparison
difference = rss2 - rss1
if difference > TOLERANCE:
self.fail("rss1=%s, rss2=%s, difference=%s" %(rss1, rss2, difference))
def test_name(self):
self.execute('name')
def test_cmdline(self):
self.execute('cmdline')
def test_ppid(self):
self.execute('ppid')
def test_uid(self):
self.execute('uid')
def test_uid(self):
self.execute('gid')
@skipIf(POSIX)
def test_username(self):
self.execute('username')
def test_create_time(self):
self.execute('create_time')
def test_get_num_threads(self):
self.execute('get_num_threads')
def test_get_threads(self):
self.execute('get_num_threads')
def test_get_cpu_times(self):
self.execute('get_cpu_times')
def test_get_memory_info(self):
self.execute('get_memory_info')
def test_is_running(self):
self.execute('is_running')
@skipUnless(WINDOWS)
def test_resume(self):
self.execute('resume')
@skipUnless(WINDOWS)
def test_getcwd(self):
self.execute('getcwd')
@skipUnless(WINDOWS)
def test_get_open_files(self):
self.execute('get_open_files')
@skipUnless(WINDOWS)
def test_get_connections(self):
self.execute('get_connections')
class TestModuleFunctionsLeaks(unittest.TestCase):
"""Test leaks of psutil module functions."""
def setUp(self):
gc.collect()
def execute(self, function, *args, **kwarks):
# step 1
for x in xrange(LOOPS):
obj = getattr(psutil, function)
if callable(obj):
retvalue = obj(*args, **kwarks)
else:
retvalue = obj # property
del x, obj, retvalue
gc.collect()
rss1 = psutil.Process(os.getpid()).get_memory_info()[0]
# step 2
for x in xrange(LOOPS):
obj = getattr(psutil, function)
if callable(obj):
retvalue = obj(*args, **kwarks)
else:
retvalue = obj # property
del x, obj, retvalue
gc.collect()
rss2 = psutil.Process(os.getpid()).get_memory_info()[0]
# comparison
difference = rss2 - rss1
if difference > TOLERANCE:
self.fail("rss1=%s, rss2=%s, difference=%s" %(rss1, rss2, difference))
def test_get_pid_list(self):
self.execute('get_pid_list')
@skipIf(POSIX)
def test_pid_exists(self):
self.execute('pid_exists', os.getpid())
def test_process_iter(self):
self.execute('process_iter')
def test_used_phymem(self):
self.execute('used_phymem')
def test_avail_phymem(self):
self.execute('avail_phymem')
def test_total_virtmem(self):
self.execute('total_virtmem')
def test_used_virtmem(self):
self.execute('used_virtmem')
def test_avail_virtmem(self):
self.execute('avail_virtmem')
def test_cpu_times(self):
self.execute('cpu_times')
def test_main():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(TestProcessObjectLeaks))
test_suite.addTest(unittest.makeSuite(TestModuleFunctionsLeaks))
unittest.TextTestRunner(verbosity=2).run(test_suite)
if __name__ == '__main__':
test_main()
| 2.53125
| 3
|