hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6d7fbc093886a621415b81c0d8c77646c98951ee | 22,755 | py | Python | olympics_engine/scenario/curling.py | Yutongamber/Competition_Olympics-Curling | b762a6b4626fc1ee971c0b444a88399e9489414d | [
"MIT"
] | 7 | 2022-02-01T14:45:03.000Z | 2022-02-28T08:21:13.000Z | olympics_engine/scenario/curling.py | Yutongamber/Competition_Olympics-Curling | b762a6b4626fc1ee971c0b444a88399e9489414d | [
"MIT"
] | 1 | 2022-02-19T15:03:56.000Z | 2022-02-25T08:59:22.000Z | olympics_engine/scenario/curling.py | Yutongamber/Competition_Olympics-Curling | b762a6b4626fc1ee971c0b444a88399e9489414d | [
"MIT"
] | 5 | 2022-02-08T14:16:12.000Z | 2022-03-08T01:56:37.000Z | from olympics_engine.core import OlympicsBase
from olympics_engine.viewer import Viewer, debug
from olympics_engine.objects import Ball, Agent
from pathlib import Path
CURRENT_PATH = str(Path(__file__).resolve().parent.parent)
import numpy as np
import math
import pygame
import sys
import os
import random
import copy
# color 宏
COLORS = {
'red': [255, 0, 0],
'green': [0, 255, 0],
'blue': [0, 0, 255],
'yellow': [255, 255, 0],
'grey': [176,196,222],
'purple': [160, 32, 240],
'black': [0, 0, 0],
'white': [255, 255, 255],
'light green': [204, 255, 229],
'sky blue': [0,191,255]
}
COLOR_TO_IDX = {
'red': 7,
'green': 1,
'sky blue': 2,
'yellow': 3,
'grey': 4,
'purple': 5,
'black': 6,
'light green': 0,
'blue':8
}
IDX_TO_COLOR = {
0: 'light green',
1: 'green',
2: 'sky blue',
3: 'yellow',
4: 'grey',
5: 'purple',
6: 'black',
7: 'red',
8: 'blue'
}
grid_node_width = 2 #for view drawing
grid_node_height = 2
def closest_point(l1, l2, point):
"""
compute the coordinate of point on the line l1l2 closest to the given point, reference: https://en.wikipedia.org/wiki/Cramer%27s_rule
:param l1: start pos
:param l2: end pos
:param point:
:return:
"""
A1 = l2[1] - l1[1]
B1 = l1[0] - l2[0]
C1 = (l2[1] - l1[1])*l1[0] + (l1[0] - l2[0])*l1[1]
C2 = -B1 * point[0] + A1 * point[1]
det = A1*A1 + B1*B1
if det == 0:
cx, cy = point
else:
cx = (A1*C1 - B1*C2)/det
cy = (A1*C2 + B1*C1)/det
return [cx, cy]
| 34.218045 | 175 | 0.543749 | from olympics_engine.core import OlympicsBase
from olympics_engine.viewer import Viewer, debug
from olympics_engine.objects import Ball, Agent
from pathlib import Path
CURRENT_PATH = str(Path(__file__).resolve().parent.parent)
import numpy as np
import math
import pygame
import sys
import os
import random
import copy
# color 宏
COLORS = {
'red': [255, 0, 0],
'green': [0, 255, 0],
'blue': [0, 0, 255],
'yellow': [255, 255, 0],
'grey': [176,196,222],
'purple': [160, 32, 240],
'black': [0, 0, 0],
'white': [255, 255, 255],
'light green': [204, 255, 229],
'sky blue': [0,191,255]
}
COLOR_TO_IDX = {
'red': 7,
'green': 1,
'sky blue': 2,
'yellow': 3,
'grey': 4,
'purple': 5,
'black': 6,
'light green': 0,
'blue':8
}
IDX_TO_COLOR = {
0: 'light green',
1: 'green',
2: 'sky blue',
3: 'yellow',
4: 'grey',
5: 'purple',
6: 'black',
7: 'red',
8: 'blue'
}
grid_node_width = 2 #for view drawing
grid_node_height = 2
def closest_point(l1, l2, point):
"""
compute the coordinate of point on the line l1l2 closest to the given point, reference: https://en.wikipedia.org/wiki/Cramer%27s_rule
:param l1: start pos
:param l2: end pos
:param point:
:return:
"""
A1 = l2[1] - l1[1]
B1 = l1[0] - l2[0]
C1 = (l2[1] - l1[1])*l1[0] + (l1[0] - l2[0])*l1[1]
C2 = -B1 * point[0] + A1 * point[1]
det = A1*A1 + B1*B1
if det == 0:
cx, cy = point
else:
cx = (A1*C1 - B1*C2)/det
cy = (A1*C2 + B1*C1)/det
return [cx, cy]
def distance_to_line(l1, l2, pos):
closest_p = closest_point(l1, l2, pos)
n = [pos[0] - closest_p[0], pos[1] - closest_p[1]] # compute normal
nn = n[0] ** 2 + n[1] ** 2
nn_sqrt = math.sqrt(nn)
cl1 = [l1[0] - pos[0], l1[1] - pos[1]]
cl1_n = (cl1[0] * n[0] + cl1[1] * n[1]) / nn_sqrt
return abs(cl1_n)
class curling(OlympicsBase):
def __init__(self, map):
super(curling, self).__init__(map)
self.tau = 0.1
self.wall_restitution = 1
self.circle_restitution = 1
self.print_log = False
self.draw_obs = True
self.show_traj = False
self.start_pos = [300,150]
self.start_init_obs = 90
self.max_n = 4
self.round_max_step = 100
self.vis=300
self.vis_clear = 10
self.purple_rock = pygame.image.load(os.path.join(CURRENT_PATH, "assets/purple rock.png"))
self.green_rock = pygame.image.load(os.path.join(CURRENT_PATH,"assets/green rock.png"))
self.curling_ground = pygame.image.load(os.path.join(CURRENT_PATH, "assets/curling ground.png"))
self.crown_image = pygame.image.load(os.path.join(CURRENT_PATH, "assets/crown.png"))
# self.curling_ground.set_alpha(150)
def reset(self, reset_game=False):
self.release = False
self.top_area_gamma = 0.98
self.down_area_gamma = 0.95 #random.uniform(0.9, 0.95)
self.gamma = self.top_area_gamma
self.agent_num = 0
self.agent_list = []
self.agent_init_pos = []
self.agent_pos = []
self.agent_previous_pos = []
self.agent_v = []
self.agent_accel = []
self.agent_theta = []
self.temp_winner = -1
self.round_step = 0
if reset_game:
assert self.game_round == 1
self.current_team = 1 #start from green
self.num_purple = 0
self.num_green = 1
map_copy = copy.deepcopy(self.map)
map_copy['agents'][0].color = 'green'
map_copy["agents"][0].original_color = 'green'
else:
self.num_purple = 1
self.num_green = 0
self.current_team = 0
self.purple_game_point = 0
self.green_game_point = 0
self.game_round = 0
map_copy = copy.deepcopy(self.map)
self.obs_boundary_init = list()
self.obs_boundary = self.obs_boundary_init
#self.check_valid_map()
self.generate_map(map_copy)
self.merge_map()
self.init_state()
self.step_cnt = 0
self.done = False
self.release = False
self.viewer = Viewer(self.view_setting)
self.display_mode=False
self.view_terminal = False
obs = self.get_obs()
if self.current_team == 0:
return [obs, np.zeros_like(obs)-1]
else:
return [np.zeros_like(obs)-1, obs]
def _reset_round(self):
self.current_team = 1-self.current_team
#convert last agent to ball
if len(self.agent_list) != 0:
last_agent = self.agent_list[-1]
last_ball = Ball(mass = last_agent.mass, r = last_agent.r, position = self.agent_pos[-1],
color = last_agent.color)
last_ball.alive = False
self.agent_list[-1] = last_ball
#add new agent
if self.current_team == 0:
#team purple
new_agent_color = 'purple'
self.num_purple += 1
elif self.current_team == 1:
new_agent_color = 'green'
self.num_green += 1
else:
raise NotImplementedError
new_agent = Agent(mass = 1, r= 15, position = self.start_pos, color = new_agent_color,
vis = self.vis, vis_clear = self.vis_clear)
self.agent_list.append(new_agent)
self.agent_init_pos[-1] = self.start_pos
new_boundary = self.get_obs_boundaray(self.start_pos, 15, self.vis)
self.obs_boundary_init.append(new_boundary)
self.agent_num += 1
self.agent_pos.append(self.agent_init_pos[-1])
self.agent_v.append([0,0])
self.agent_accel.append([0,0])
init_obs = self.start_init_obs
self.agent_theta.append([init_obs])
self.agent_record.append([self.agent_init_pos[-1]])
self.release = False
self.gamma = self.top_area_gamma
self.round_step = 0
return self.get_obs()
def cross_detect(self):
"""
check whether the agent has reach the cross(final) line
:return:
"""
for agent_idx in range(self.agent_num):
agent = self.agent_list[agent_idx]
if agent.type != 'agent':
continue
for object_idx in range(len(self.map['objects'])):
object = self.map['objects'][object_idx]
if not object.can_pass():
continue
else:
#print('object = ', object.type)
if object.color == 'red' and object.type=='cross' and \
object.check_cross(self.agent_pos[agent_idx], agent.r):
# print('agent type = ', agent.type)
agent.alive = False
#agent.color = 'red'
self.gamma = self.down_area_gamma #this will change the gamma for the whole env, so need to change if dealing with multi-agent
self.release = True
self.round_countdown = self.round_max_step-self.round_step
# if the ball hasnot pass the cross, the relase will be True again in the new round
def check_action(self, action_list):
action = []
for agent_idx in range(len(self.agent_list)):
if self.agent_list[agent_idx].type == 'agent':
action.append(action_list[0])
_ = action_list.pop(0)
else:
action.append(None)
return action
def step(self, actions_list):
actions_list = [actions_list[self.current_team]]
#previous_pos = self.agent_pos
action_list = self.check_action(actions_list)
if self.release:
input_action = [None for _ in range(len(self.agent_list))] #if jump, stop actions
else:
input_action = action_list
self.stepPhysics(input_action, self.step_cnt)
if not self.release:
self.cross_detect()
self.step_cnt += 1
self.round_step += 1
obs_next = self.get_obs()
done = self.is_terminal()
if not done:
round_end, end_info = self._round_terminal()
if round_end:
if end_info is not None:
#clean the last agent
del self.agent_list[-1]
del self.agent_pos[-1]
del self.agent_v[-1]
del self.agent_theta[-1]
del self.agent_accel[-1]
self.agent_num -= 1
self.temp_winner, min_d = self.current_winner()
#step_reward = [1,0.] if self.temp_winner == 0 else [0., 1] #score for each round
if self.temp_winner == -1:
step_reward=[0., 0.]
elif self.temp_winner == 0:
step_reward=[1, 0.]
elif self.temp_winner == 1:
step_reward=[0., 1]
else:
raise NotImplementedError
obs_next = self._reset_round()
else:
step_reward = [0., 0.]
else:
if self.game_round == 1:
# self.final_winner, min_d = self.current_winner()
# self.temp_winner = self.final_winner
self._clear_agent()
self.cal_game_point()
if self.purple_game_point > self.green_game_point:
self.final_winner = 0
step_reward = [100., 0]
elif self.green_game_point > self.purple_game_point:
self.final_winner = 1
step_reward = [0., 100.]
else:
self.final_winner = -1
step_reward = [0.,0.]
self.temp_winner = self.final_winner
# step_reward = [100., 0] if self.final_winner == 0 else [0., 100]
self.view_terminal = True
elif self.game_round == 0:
self._clear_agent()
game1_winner = self.current_winner()
step_reward = [10., 0] if game1_winner == 0 else [0., 10.]
self.cal_game_point()
self.game_round += 1
next_obs = self.reset(reset_game=True)
return next_obs, step_reward, False, 'game1 ends, switch position'
else:
raise NotImplementedError
if self.current_team == 0:
obs_next = [obs_next, np.zeros_like(obs_next)-1]
else:
obs_next = [np.zeros_like(obs_next)-1, obs_next]
if self.release:
h_gamma = self.down_area_gamma + random.uniform(-1, 1)*0.001
self.gamma = h_gamma
#return self.agent_pos, self.agent_v, self.agent_accel, self.agent_theta, obs_next, step_reward, done
return obs_next, step_reward, done, ''
# def get_obs_encode(self):
# obs = self.get_obs()
# if self.current_team == 0:
# return [obs, np.zeros_like(obs)]
# else:
# return [np.zeros_like(obs), obs]
def get_reward(self):
center = [300, 500]
pos = self.agent_pos[0]
distance = math.sqrt((pos[0]-center[0])**2 + (pos[1]-center[1])**2)
return [distance]
def is_terminal(self):
# if self.step_cnt >= self.max_step:
# return True
if (self.num_green + self.num_purple == self.max_n*2):
if not self.release and self.round_step > self.round_max_step:
return True
if self.release:
L = []
for agent_idx in range(self.agent_num):
if (self.agent_v[agent_idx][0] ** 2 + self.agent_v[agent_idx][1] ** 2) < 1e-1:
L.append(True)
else:
L.append(False)
return all(L)
else:
return False
# for agent_idx in range(self.agent_num):
# if self.agent_list[agent_idx].color == 'red' and (
# self.agent_v[agent_idx][0] ** 2 + self.agent_v[agent_idx][1] ** 2) < 1e-5:
# return True
def _round_terminal(self):
if self.round_step > self.round_max_step and not self.release: #after maximum round step the agent has not released yet
return True, -1
#agent_idx = -1
L = []
for agent_idx in range(self.agent_num):
if (not self.agent_list[agent_idx].alive) and (self.agent_v[agent_idx][0] ** 2 +
self.agent_v[agent_idx][1] ** 2) < 1e-1:
L.append(True)
else:
L.append(False)
return all(L), None
def _clear_agent(self):
if self.round_step > self.round_max_step and not self.release:
# clean the last agent
del self.agent_list[-1]
del self.agent_pos[-1]
del self.agent_v[-1]
del self.agent_theta[-1]
del self.agent_accel[-1]
self.agent_num -= 1
def current_winner(self):
center = [300, 500]
min_dist = 1e4
win_team = -1
for i, agent in enumerate(self.agent_list):
pos = self.agent_pos[i]
distance = math.sqrt((pos[0]-center[0])**2 + (pos[1]-center[1])**2)
if distance < min_dist:
win_team = 0 if agent.color == 'purple' else 1
min_dist = distance
return win_team, min_dist
def cal_game_point(self):
center = [300, 500]
purple_dis = []
green_dis = []
min_dist = 1e4
closest_team = -1
for i, agent in enumerate(self.agent_list):
pos = self.agent_pos[i]
distance = math.sqrt((pos[0]-center[0])**2 + (pos[1]-center[1])**2)
if agent.color == 'purple':
purple_dis.append(distance)
elif agent.color=='green':
green_dis.append(distance)
else:
raise NotImplementedError
if distance < min_dist:
closest_team = 0 if agent.color == 'purple' else 1
min_dist = distance
purple_dis = np.array(sorted(purple_dis))
green_dis = np.array(sorted(green_dis))
if closest_team == 0:
if len(green_dis) == 0:
winner_point = len(purple_dis)
else:
winner_point = purple_dis < green_dis[0]
self.purple_game_point += np.float64(winner_point).sum()
elif closest_team == 1:
if len(purple_dis) == 0:
winner_point = len(green_dis)
else:
winner_point = green_dis < purple_dis[0]
self.green_game_point += np.float64(winner_point).sum()
elif closest_team == -1:
pass
else:
raise NotImplementedError
#print('purple dis = {}, green dis = {}'.format(purple_dis, green_dis))
def render(self, info=None):
if not self.display_mode:
self.viewer.set_mode()
self.display_mode=True
self.viewer.draw_background()
ground_image = pygame.transform.scale(self.curling_ground, size=(200,200))
self.viewer.background.blit(ground_image, (200,400))
# 先画map; ball在map之上
for w in self.map['objects']:
if w.type=='arc':
continue
self.viewer.draw_map(w)
self._draw_curling_rock(self.agent_pos, self.agent_list)
# self.viewer.draw_ball(self.agent_pos, self.agent_list)
if self.show_traj:
self.get_trajectory()
self.viewer.draw_trajectory(self.agent_record, self.agent_list)
self.viewer.draw_direction(self.agent_pos, self.agent_accel)
#self.viewer.draw_map()
if self.draw_obs:
if len(self.agent_list)!=0:
self.viewer.draw_obs(self.obs_boundary, [self.agent_list[-1]])
if self.current_team == 0:
# self.viewer.draw_view(self.obs_list, [self.agent_list[-1]])
# self.viewer.draw_curling_view(self.purple_rock,self.green_rock,self.obs_list, [self.agent_list[-1]])
self._draw_curling_view(self.obs_list, [self.agent_list[-1]])
else:
# self.viewer.draw_view([None, self.obs_list[0]], [None, self.agent_list[-1]])
# self.viewer.draw_curling_view(self.purple_rock, self.green_rock, [None, self.obs_list[0]], [None, self.agent_list[-1]])
self._draw_curling_view([None, self.obs_list[0]], [None, self.agent_list[-1]])
debug('Agent 0', x=570, y=110, c='purple')
debug("No. throws left: ", x=470, y=140)
debug("{}".format(self.max_n - self.num_purple), x = 590, y=140, c='purple')
debug('Agent 1', x=640, y=110, c='green')
debug("{}".format(self.max_n - self.num_green), x=660, y = 140, c='green')
debug("Closest team:", x=470, y=170)
debug("Score:", x=500, y = 200)
debug("{}".format(int(self.purple_game_point)), x=590, y=200, c='purple')
debug("{}".format(int(self.green_game_point)), x=660, y=200, c='green')
if self.view_terminal:
crown_size=(50,50)
else:
crown_size=(30,30)
crown_image = pygame.transform.scale(self.crown_image, size=crown_size)
if self.temp_winner == 0:
self.viewer.background.blit(crown_image, (570, 150) if self.view_terminal else (580, 160))
elif self.temp_winner == 1:
self.viewer.background.blit(crown_image, (640, 150) if self.view_terminal else (650, 160))
else:
pass
pygame.draw.line(self.viewer.background, start_pos=[470, 130], end_pos=[690, 130], color=[0,0,0])
pygame.draw.line(self.viewer.background, start_pos=[565, 100], end_pos=[565,220], color=[0,0,0])
pygame.draw.line(self.viewer.background, start_pos=[630, 100], end_pos=[630,220], color=[0,0,0])
pygame.draw.line(self.viewer.background, start_pos=[470, 160], end_pos=[690, 160], color=[0,0,0])
pygame.draw.line(self.viewer.background, start_pos=[470, 190], end_pos=[690, 190], color=[0,0,0])
#draw energy bar
#debug('agent remaining energy = {}'.format([i.energy for i in self.agent_list]), x=100)
# self.viewer.draw_energy_bar(self.agent_list)
# debug('mouse pos = '+ str(pygame.mouse.get_pos()))
debug('Step: ' + str(self.step_cnt), x=30)
if not self.release:
countdown = self.round_max_step-self.round_step
else:
countdown = self.round_countdown
debug("Countdown:", x=100)
debug("{}".format(countdown), x=170, c="red")
# debug("Current winner:", x=200)
# if self.temp_winner == -1:
# debug("None", x = 300)
# elif self.temp_winner == 0:
# debug("Purple", x=300, c='purple')
# elif self.temp_winner == 1:
# debug("Green", x=300, c='green')
debug('Game {}/{}'.format(self.game_round+1, 2), x= 280, y=50)
if info is not None:
debug(info, x=100)
for event in pygame.event.get():
# 如果单击关闭窗口,则退出
if event.type == pygame.QUIT:
sys.exit()
pygame.display.flip()
#self.viewer.background.fill((255, 255, 255))
def _draw_curling_rock(self, pos_list, agent_list):
assert len(pos_list) == len(agent_list)
for i in range(len(pos_list)):
t = pos_list[i]
r = agent_list[i].r
color = agent_list[i].color
if color == 'purple':
image_purple = pygame.transform.scale(self.purple_rock, size=(r * 2, r * 2))
loc = (t[0] - r, t[1] - r)
self.viewer.background.blit(image_purple, loc)
elif color == 'green':
image_green = pygame.transform.scale(self.green_rock, size=(r * 2, r * 2))
loc = (t[0] - r, t[1] - r)
self.viewer.background.blit(image_green, loc)
else:
raise NotImplementedError
def _draw_curling_view(self, obs, agent_list): #obs: [2, 100, 100] list
#draw agent 1, [50, 50], [50+width, 50], [50, 50+height], [50+width, 50+height]
coord = [580 + 70 * i for i in range(len(obs))]
for agent_idx in range(len(obs)):
matrix = obs[agent_idx]
if matrix is None:
continue
obs_weight, obs_height = matrix.shape[0], matrix.shape[1]
y = 40 - obs_height
for row in matrix:
x = coord[agent_idx]- obs_height/2
for item in row:
pygame.draw.rect(self.viewer.background, COLORS[IDX_TO_COLOR[int(item)]], [x,y,grid_node_width, grid_node_height])
x+= grid_node_width
y += grid_node_height
color = agent_list[agent_idx].color
r = agent_list[agent_idx].r
if color == 'purple':
image_purple = pygame.transform.scale(self.purple_rock, size=(r*2, r*2))
loc = [coord[agent_idx]+15-r, 70 + agent_list[agent_idx].r-r]
self.viewer.background.blit(image_purple, loc)
elif color == 'green':
image_green = pygame.transform.scale(self.green_rock, size=(r*2, r*2))
loc = [coord[agent_idx]+15-r, 70 + agent_list[agent_idx].r-r]
self.viewer.background.blit(image_green, loc)
else:
raise NotImplementedError
#
# pygame.draw.circle(self.background, COLORS[agent_list[agent_idx].color], [coord[agent_idx]+10, 55 + agent_list[agent_idx].r],
# agent_list[agent_idx].r, width=0)
# pygame.draw.circle(self.background, COLORS["black"], [coord[agent_idx]+10, 55 + agent_list[agent_idx].r], 2,
# width=0)
pygame.draw.lines(self.viewer.background, points =[[563+70*agent_idx,10],[563+70*agent_idx, 70], [565+60+70*agent_idx,70], [565+60+70*agent_idx, 10]], closed=True,
color = COLORS[agent_list[agent_idx].color], width=2)
| 19,274 | 1,866 | 46 |
26db171661fa88da4f961d36f0254d8e05140455 | 22,565 | py | Python | msnhnet_onnx/x2msnhnet/onnx2msnhnet.py | BBuf/msnhnet-onnx | bcb1bcbd1d4f65547c4513d5af1ba2e27295f28b | [
"Apache-2.0"
] | 1 | 2022-02-02T09:07:15.000Z | 2022-02-02T09:07:15.000Z | msnhnet_onnx/x2msnhnet/onnx2msnhnet.py | BBuf/msnhnet-onnx | bcb1bcbd1d4f65547c4513d5af1ba2e27295f28b | [
"Apache-2.0"
] | null | null | null | msnhnet_onnx/x2msnhnet/onnx2msnhnet.py | BBuf/msnhnet-onnx | bcb1bcbd1d4f65547c4513d5af1ba2e27295f28b | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
from struct import pack
import copy
from onnx import defs
from onnx import numpy_helper
from onnx.backend.base import Backend
from onnx.backend.base import Device
from onnx.backend.base import namedtupledict
from onnx.helper import make_opsetid
from onnx import numpy_helper
from msnhnet_onnx import util
from msnhnet_onnx.x2msnhnet.handler import BackendHandler
from msnhnet_onnx.x2msnhnet.handlers import *
from msnhnet_onnx.onnx_wrapper import Node as OnnxNode
from msnhnet_onnx.x2msnhnet.handler import msnhnet_params, msnhnet_weights, msnhnet_input_layer_shape
import io
import tempfile
import os
import shutil
import numpy as np
import onnx
try:
import torch
except ImportError:
print('If you want to convert pytorch model to msnhnet model, please install pytorch first')
try:
import paddle
except ImportError:
print('If you want to convert paddle model to msnhnet model, please install paddle first')
try:
import tensorflow as tf
import tf2onnx
except ImportError:
print('If you want to convert tensorflow2 model to msnhnet model, please install tensorflow and tf2onnx first')
import logging
import onnxoptimizer
try:
import onnxsim
has_onnxsim = True
except ImportError:
has_onnxsim = False
logger = logging.getLogger(__name__)
init_weight_dict = {}
def get_all_backend_handlers(opset_dict):
""" Get a dict of all backend handler classes.
e.g. {'domain': {'Abs': Abs handler class}, ...}, }.
:param opset_dict: A dict of opset. e.g. {'domain': version, ...}
:return: Dict.
"""
handlers = {}
for handler in BackendHandler.__subclasses__():
handler.check_cls()
domain = handler.DOMAIN
version = opset_dict[domain]
handler.VERSION = version
since_version = 1
if defs.has(handler.ONNX_OP, domain=handler.DOMAIN):
try:
since_version = defs.get_schema(
handler.ONNX_OP,
domain=handler.DOMAIN,
max_inclusive_version=version,
).since_version
except RuntimeError:
logger.info(
"Fail to get since_version of {} in domain `{}` "
"with max_inclusive_version={}. Set to 1.".format(
handler.ONNX_OP, handler.DOMAIN, version
)
)
else:
logger.info(
"Unknown op {} in domain `{}`.".format(
handler.ONNX_OP, handler.DOMAIN or "ai.onnx"
)
)
handler.SINCE_VERSION = since_version
handlers.setdefault(domain, {})[handler.ONNX_OP] = handler
return handlers
class MsnhnetBackend(Backend):
""" Msnhnet Backend for ONNX
"""
@classmethod
def prepare(
cls,
model,
device="CPU",
strict=True,
logging_level="INFO",
blob_dict=None,
**kwargs
):
"""Prepare an ONNX model for MsnhNet Backend.
:param model: The ONNX model to be converted.
:param device: The device to execute this model on.
:param strict: Whether to enforce semantic equivalence between the original model
and the converted msnhnet model, defaults to True (yes, enforce semantic equivalence).
Changing to False is strongly discouraged.
Currently, the strict flag only affects the behavior of MaxPool and AveragePool ops.
:param logging_level: The logging level, default is INFO. Change it to DEBUG
to see more conversion details or to WARNING to see less
:returns: The variable dict of the converted msnhnet model
"""
super(MsnhnetBackend, cls).prepare(model, device, **kwargs)
logger.setLevel(logging_level)
return cls.onnx_model_to_msnhnet(model, strict, blob_dict=blob_dict)
@classmethod
def onnx_model_to_msnhnet(cls, model, strict, blob_dict=None):
""" Convert ONNX model to MsnhNet.
:param model: ONNX ModelProto object.
:param strict: whether to enforce semantic equivalence between the original model
and the converted msnhnet model.
:return: The variable dict of the converted msnhnet model
"""
# Models with IR_VERSION less than 3 does not have opset_import set.
# We default to minimum opset, this behavior is consistent with
# onnx checker.
# c.f. https://github.com/onnx/onnx/blob/427ac0c1b792363d373e3d7e4eef97fa46458420/onnx/checker.cc#L478
if model.ir_version < 3:
opset_import = [make_opsetid(defs.ONNX_DOMAIN, 1)]
else:
opset_import = model.opset_import
return cls._onnx_graph_to_msnhnet(
model.graph, opset_import, strict, blob_dict=blob_dict
)
@classmethod
def _onnx_graph_to_msnhnet(cls, graph_def, opset, strict, blob_dict=None):
""" Convert ONNX graph to msnhnet.
:param graph_def: ONNX GraphProto object.
:param opset: ONNX OperatorSetIdProto list.
:param strict: whether to enforce semantic equivalence between the original model
and the converted msnhnet.
:param blob_dict: {name: msnhnet_blob}, the inputs of onnx graph will be populated with msnhnet_blob with the same name
:return: The variable dict of the converted msnhnet model
"""
if blob_dict is None:
blob_dict = {}
handlers = cls._get_handlers(opset)
# initializer: TensorProtos representing the values to initialize
# a given tensor.
# initialized: A list of names of the initialized tensors.
if graph_def.initializer:
input_dict_items = cls._onnx_initializer_to_input_dict_items(
graph_def.initializer
)
initialized = {
init.name: onnx.numpy_helper.to_array(init)
for init in graph_def.initializer
}
else:
input_dict_items = []
initialized = {}
for node in graph_def.node:
node = OnnxNode(node)
if node.op_type == "Constant":
initialized[node.output_tensor_names[0]] = numpy_helper.to_array(
node.attrs["value"]
)
# creating placeholders for currently unknown inputs
for value_info in graph_def.input:
if value_info.name in initialized:
continue
shape = list(
d.dim_value if (d.dim_value > 0 and d.dim_param == "") else None
for d in value_info.type.tensor_type.shape.dim
)
if value_info.name not in blob_dict:
raise NotImplementedError("no blob named {}".format(value_info.name))
input_dict_items.append((value_info.name, blob_dict[value_info.name]))
# tensor dict: this dictionary is a map from variable names
# to the latest produced msnhnet variables of the given name.
# This dictionary will get updated as we build the graph to
# record the names of newly produced tensors.
tensor_dict = dict(input_dict_items)
# Since tensor dict may be updated, we need to keep a copy
# of the original input dict where we track the earliest
# defined tensors so we can have access to the placeholders
# to feed in input tensors when we run the graph.
input_dict = dict(input_dict_items)
for node in graph_def.node:
onnx_node = OnnxNode(node)
output_ops = cls._onnx_node_to_msnhnet_op(
onnx_node,
tensor_dict,
initialized,
handlers,
opset=opset,
strict=strict,
)
curr_node_output_map = dict(zip(onnx_node.output_tensor_names, output_ops))
tensor_dict.update(curr_node_output_map)
return tensor_dict
@classmethod
def _onnx_initializer_to_input_dict_items(cls, initializer):
""" Convert ONNX graph initializer to input dict items.
:param initializer: ONNX graph initializer, list of TensorProto.
:return: List of input dict items.
"""
return [
(
init.name,
# flow.get_variable(
# name=init.name,
# shape=get_flow_shape(list(init.dims)),
# initializer=flow.zeros_initializer(),
# trainable=True,
# dtype=util.Onnx2FlowDtype(init.data_type),
# ),
init_weight_dict[init.name],
)
for init in initializer
]
@classmethod
def _onnx_node_to_msnhnet_op(
cls, node, tensor_dict, init_dict, handlers=None, opset=None, strict=True
):
"""
Convert onnx node to msnhnet op.
Args:
node: Onnx node object.
tensor_dict: Tensor dict of graph.
opset: Opset version of the operator set. Default 0 means using latest version.
strict: whether to enforce semantic equivalence between the original model
and the converted msnhnet model, defaults to True (yes, enforce semantic equivalence).
Changing to False is strongly discouraged.
Returns:
msnhnet op
"""
handlers = handlers or cls._get_handlers(opset)
handler = handlers[node.domain].get(node.op_type, None)
if handler:
output = handler.handle(
node, tensor_dict, init_dict=init_dict, strict=strict
)
if not isinstance(output, (list, tuple)):
output = [output]
return output
else:
raise ValueError("{} is not supported".format(node.op_type))
@classmethod
def _get_handlers(cls, opset):
""" Get all backend handlers with opset.
:param opset: ONNX OperatorSetIdProto list.
:return: All backend handlers.
"""
opset = opset or [make_opsetid(defs.ONNX_DOMAIN, defs.onnx_opset_version())]
opset_dict = dict([(o.domain, o.version) for o in opset])
return get_all_backend_handlers(opset_dict)
prepare = MsnhnetBackend.prepare
| 36.453958 | 163 | 0.605141 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
from struct import pack
import copy
from onnx import defs
from onnx import numpy_helper
from onnx.backend.base import Backend
from onnx.backend.base import Device
from onnx.backend.base import namedtupledict
from onnx.helper import make_opsetid
from onnx import numpy_helper
from msnhnet_onnx import util
from msnhnet_onnx.x2msnhnet.handler import BackendHandler
from msnhnet_onnx.x2msnhnet.handlers import *
from msnhnet_onnx.onnx_wrapper import Node as OnnxNode
from msnhnet_onnx.x2msnhnet.handler import msnhnet_params, msnhnet_weights, msnhnet_input_layer_shape
import io
import tempfile
import os
import shutil
import numpy as np
import onnx
try:
import torch
except ImportError:
print('If you want to convert pytorch model to msnhnet model, please install pytorch first')
try:
import paddle
except ImportError:
print('If you want to convert paddle model to msnhnet model, please install paddle first')
try:
import tensorflow as tf
import tf2onnx
except ImportError:
print('If you want to convert tensorflow2 model to msnhnet model, please install tensorflow and tf2onnx first')
import logging
import onnxoptimizer
try:
import onnxsim
has_onnxsim = True
except ImportError:
has_onnxsim = False
logger = logging.getLogger(__name__)
init_weight_dict = {}
def from_onnx(
onnx_model: onnx.ModelProto, inputs, model_weight_dir="/tmp/tmp", do_onnxsim=True, from_tf2=False, from_paddle=False, from_pytorch=False,
):
# msnhnet_params = []
# msnhnet_weights = []
input_names = [x.name for x in onnx_model.graph.input]
if type(inputs) is not dict:
assert (
len(input_names) == 1
), "Please use input dict if the model has multiple inputs"
inputs = {input_names[0]: inputs}
if do_onnxsim and has_onnxsim:
dict(zip(input_names, [x.shape for x in inputs.values()]))
onnx_model, _ = onnxsim.simplify(
onnx_model,
skip_fuse_bn=True,
skip_shape_inference=False,
input_shapes=dict(zip(input_names, [x.shape for x in inputs.values()])),
)
elif do_onnxsim:
logger.info(
"We recommend installing onnx-simplifier so that MsnhNet can remove the redundant ONNX nodes"
)
initializer_name = []
if from_tf2:
for x in onnx_model.graph.input:
x.name = x.name.replace('/', '_')
x.name = x.name.replace(':', '_')
for i, node in enumerate(onnx_model.graph.node):
node.name = node.name.replace('/', '_')
node.name = node.name.replace(':', '_')
for j in range(len(node.input)):
node.input[j] = node.input[j].replace('/', '_')
node.input[j] = node.input[j].replace(':', '_')
for j in range(len(node.output)):
node.output[j] = node.output[j].replace('/', '_')
node.output[j] = node.output[j].replace(':', '_')
for x in onnx_model.graph.initializer:
x.name = x.name.replace('/', '_')
x.name = x.name.replace(':', '_')
initializer_name.append(x.name)
# to solve tf batchnorm without scale params
delete_node_name = []
for i, node in enumerate(onnx_model.graph.node):
if node.op_type == "BatchNormalization":
if node.input[1] in initializer_name:
pass
else:
delete_node_name.append(node.input[1])
for i, x in enumerate(onnx_model.graph.input):
if x.name in delete_node_name:
tensor_dim = onnx_model.graph.input[i].type.tensor_type.shape.dim
new_bn_value = []
for j in range(int(tensor_dim[0].dim_value)):
new_bn_value.append(1.0)
new_bn_scale_node = onnx.helper.make_tensor(name=x.name, data_type=onnx.TensorProto.FLOAT, dims=(int(tensor_dim[0].dim_value),), vals=new_bn_value)
onnx_model.graph.initializer.extend([new_bn_scale_node])
for x in onnx_model.graph.input:
if x.name in delete_node_name:
onnx_model.graph.input.remove(x)
# to solve paddlepaddle2msnhnet initializer rename bug
if from_paddle == True:
graph_input_name = {}
graph_initializer_name = []
for x in onnx_model.graph.initializer:
graph_initializer_name.append(x.name)
for i, node in enumerate(onnx_model.graph.node):
# node_cp = node
node_cp = copy.deepcopy(node)
for j in range(len(node.input)):
if node.input[j] in graph_initializer_name:
node_cp.input[j] = node.name + "_" + node.input[j]
graph_input_name[node_cp.input[j]] = node.input[j]
onnx_model.graph.node.remove(node)
onnx_model.graph.node.insert(i, node_cp)
extend_op = []
for k, v in graph_input_name.items():
for x in onnx_model.graph.initializer:
base_name = x.name
if x.name == v:
x.name = k
for k2, v2 in graph_input_name.items():
if v2 == base_name and k2 != k:
x_cp = copy.deepcopy(x)
x_cp.name = k2
extend_op.append(x_cp)
for x in onnx_model.graph.input:
if x.name == v:
onnx_model.graph.input.remove(x)
for x in extend_op:
onnx_model.graph.initializer.extend([x])
# for code gen
for x in onnx_model.graph.input:
x.name = x.name.replace('.', '_')
x.name = x.name.replace('/', '_')
x.name = x.name.replace(':', '_')
for i, node in enumerate(onnx_model.graph.node):
node.name = node.name.replace('.', '_')
node.name = node.name.replace('/', '_')
node.name = node.name.replace(':', '_')
for j in range(len(node.input)):
node.input[j] = node.input[j].replace('.', '_')
node.input[j] = node.input[j].replace('/', '_')
node.input[j] = node.input[j].replace(':', '_')
for j in range(len(node.output)):
node.output[j] = node.output[j].replace('.', '_')
node.output[j] = node.output[j].replace('/', '_')
node.output[j] = node.output[j].replace(':', '_')
for x in onnx_model.graph.initializer:
x.name = x.name.replace('.', '_')
x.name = x.name.replace('/', '_')
x.name = x.name.replace(':', '_')
for x in onnx_model.graph.output:
x.name = x.name.replace('.', '_')
x.name = x.name.replace('/', '_')
x.name = x.name.replace(':', '_')
graph_initializer_name = []
for x in onnx_model.graph.initializer:
graph_initializer_name.append(x.name)
graph_name_dict = {}
rename_set = []
for i, node in enumerate(onnx_model.graph.node):
# node_cp = node
node_cp = copy.deepcopy(node)
if node.name == '':
cnt = 0
while True:
node.name = node.op_type + '_{}'.format(cnt)
if node.name in rename_set:
pass
else:
rename_set.append(node.name)
break
cnt = cnt + 1
for j in range(len(node.input)):
if node.input[j] == 'x_0':
node_cp.input[j] = node.input[j]
elif node.input[j] in graph_name_dict:
node_cp.input[j] = graph_name_dict[node.input[j]]
else:
if node.op_type == "Clip" and (node.input[j] not in graph_initializer_name):
pass
else:
node_cp.input[j] = node.name.lower() + '_input_{}'.format(j)
graph_name_dict[node.input[j]] = node_cp.input[j]
for j in range(len(node.output)):
if node.output[j] in graph_name_dict:
node_cp.output[j] = graph_name_dict[node.output[j]]
else:
node_cp.output[j] = node.name.lower() + '_output_{}'.format(j)
graph_name_dict[node.output[j]] = node_cp.output[j]
onnx_model.graph.node.remove(node)
onnx_model.graph.node.insert(i, node_cp)
for x in onnx_model.graph.input:
if x.name in graph_name_dict:
x.name = graph_name_dict[x.name]
for x in onnx_model.graph.output:
if x.name in graph_name_dict:
x.name = graph_name_dict[x.name]
for x in onnx_model.graph.initializer:
if x.name in graph_name_dict:
x.name = graph_name_dict[x.name]
onnx_model = onnx.shape_inference.infer_shapes(onnx_model)
# to save onnx model after onnx_simplifier
if not os.path.exists("/tmp"):
os.makedirs("/tmp")
onnx.save(onnx_model, "/tmp/simp.onnx")
for val in onnx_model.graph.value_info:
shape = []
for j in range(len(val.type.tensor_type.shape.dim)):
shape.append(val.type.tensor_type.shape.dim[j].dim_value)
msnhnet_input_layer_shape[val.name] = shape
for x in onnx_model.graph.initializer:
init_weight_dict[x.name] = numpy_helper.to_array(x)
d = prepare(onnx_model, blob_dict=inputs)
if not os.path.exists(model_weight_dir):
os.makedirs(model_weight_dir)
with open(os.path.join(model_weight_dir, "model.msnhnet"), "w") as temp_file:
for x in msnhnet_params:
temp_file.write("%s" % x)
with open(os.path.join(model_weight_dir, "model.msnhbin"), "wb") as temp_file:
for x in msnhnet_weights:
temp_file.write(pack('f', x))
# with open(os.path.join(model_weight_dir, "model.txt"), "w") as temp_file:
# for x in msnhnet_weights:
# temp_file.write("%s\n" % x)
output_names = [x.name for x in onnx_model.graph.output]
if len(output_names) == 1:
return d[output_names[0]]
return {output_name: d[output_name] for output_name in output_names}
def from_pytorch(
torch_model, inputs, model_weight_dir="/tmp", do_onnxsim=True, train_flag=True
):
if type(inputs) is not list:
inputs = [inputs]
input_names = ["x_{}".format(i) for i in range(len(inputs))]
assert len(inputs[0].shape) == 4
msnhnet_params.extend(f"config:\n")
msnhnet_params.extend(f" batch: {inputs[0].shape[0]}\n")
msnhnet_params.extend(f" height: {inputs[0].shape[2]}\n")
msnhnet_params.extend(f" width: {inputs[0].shape[3]}\n")
msnhnet_params.extend(f" channels: {inputs[0].shape[1]}\n")
torch_model = torch_model.to("cpu")
f = io.BytesIO()
torch.onnx.export(
torch_model,
tuple([torch.zeros(ipt.shape) for ipt in inputs]),
f,
input_names=input_names,
opset_version=12,
training=train_flag,
)
model_str = f.getvalue()
onnx_model = onnx.load_model_from_string(model_str)
return from_onnx(
onnx_model,
dict(zip(input_names, inputs)),
model_weight_dir=model_weight_dir,
do_onnxsim=do_onnxsim,
from_pytorch=True,
)
def from_paddle(
paddle_model, inputs, model_weight_dir="/tmp", do_onnxsim=True, train_flag=True
):
input_names = "x_0"
paddle_model.eval()
input_spec = paddle.static.InputSpec(
shape=inputs.shape, dtype="float32", name=input_names
)
assert len(inputs.shape) == 4
msnhnet_params.extend(f"config:\n")
msnhnet_params.extend(f" batch: {inputs.shape[0]}\n")
msnhnet_params.extend(f" height: {inputs.shape[2]}\n")
msnhnet_params.extend(f" width: {inputs.shape[3]}\n")
msnhnet_params.extend(f" channels: {inputs.shape[1]}\n")
mode_str = "/tmp/tmp"
paddle.onnx.export(
paddle_model,
mode_str,
input_spec=[input_spec],
opset_version=12,
enable_onnx_checker=True,
)
onnx_model = onnx.load(str(mode_str + ".onnx"))
return from_onnx(
onnx_model,
dict(zip([input_names], [inputs])),
model_weight_dir=model_weight_dir,
do_onnxsim=do_onnxsim,
from_paddle=True,
)
def from_tensorflow2(
tf_model, inputs, model_weight_dir="/tmp", do_onnxsim=True, train_flag=True
):
input_names = "x_0"
assert len(inputs.shape) == 4
msnhnet_params.extend(f"config:\n")
msnhnet_params.extend(f" batch: {inputs.shape[0]}\n")
msnhnet_params.extend(f" height: {inputs.shape[2]}\n")
msnhnet_params.extend(f" width: {inputs.shape[3]}\n")
msnhnet_params.extend(f" channels: {inputs.shape[1]}\n")
# input_spec = paddle.static.InputSpec(
# shape=inputs.shape, dtype="float32", name=input_names
# )
spec = (tf.TensorSpec(inputs.shape, tf.float32, name=input_names),)
mode_str = "/tmp/tmp.onnx"
model_proto, _ = tf2onnx.convert.from_keras(
tf_model, input_signature=spec, opset=11, output_path=mode_str
)
return from_onnx(
model_proto,
dict(zip([input_names], [inputs])),
model_weight_dir=model_weight_dir,
do_onnxsim=do_onnxsim,
from_tf2=True,
)
def get_all_backend_handlers(opset_dict):
""" Get a dict of all backend handler classes.
e.g. {'domain': {'Abs': Abs handler class}, ...}, }.
:param opset_dict: A dict of opset. e.g. {'domain': version, ...}
:return: Dict.
"""
handlers = {}
for handler in BackendHandler.__subclasses__():
handler.check_cls()
domain = handler.DOMAIN
version = opset_dict[domain]
handler.VERSION = version
since_version = 1
if defs.has(handler.ONNX_OP, domain=handler.DOMAIN):
try:
since_version = defs.get_schema(
handler.ONNX_OP,
domain=handler.DOMAIN,
max_inclusive_version=version,
).since_version
except RuntimeError:
logger.info(
"Fail to get since_version of {} in domain `{}` "
"with max_inclusive_version={}. Set to 1.".format(
handler.ONNX_OP, handler.DOMAIN, version
)
)
else:
logger.info(
"Unknown op {} in domain `{}`.".format(
handler.ONNX_OP, handler.DOMAIN or "ai.onnx"
)
)
handler.SINCE_VERSION = since_version
handlers.setdefault(domain, {})[handler.ONNX_OP] = handler
return handlers
class MsnhnetBackend(Backend):
""" Msnhnet Backend for ONNX
"""
@classmethod
def prepare(
cls,
model,
device="CPU",
strict=True,
logging_level="INFO",
blob_dict=None,
**kwargs
):
"""Prepare an ONNX model for MsnhNet Backend.
:param model: The ONNX model to be converted.
:param device: The device to execute this model on.
:param strict: Whether to enforce semantic equivalence between the original model
and the converted msnhnet model, defaults to True (yes, enforce semantic equivalence).
Changing to False is strongly discouraged.
Currently, the strict flag only affects the behavior of MaxPool and AveragePool ops.
:param logging_level: The logging level, default is INFO. Change it to DEBUG
to see more conversion details or to WARNING to see less
:returns: The variable dict of the converted msnhnet model
"""
super(MsnhnetBackend, cls).prepare(model, device, **kwargs)
logger.setLevel(logging_level)
return cls.onnx_model_to_msnhnet(model, strict, blob_dict=blob_dict)
@classmethod
def onnx_model_to_msnhnet(cls, model, strict, blob_dict=None):
""" Convert ONNX model to MsnhNet.
:param model: ONNX ModelProto object.
:param strict: whether to enforce semantic equivalence between the original model
and the converted msnhnet model.
:return: The variable dict of the converted msnhnet model
"""
# Models with IR_VERSION less than 3 does not have opset_import set.
# We default to minimum opset, this behavior is consistent with
# onnx checker.
# c.f. https://github.com/onnx/onnx/blob/427ac0c1b792363d373e3d7e4eef97fa46458420/onnx/checker.cc#L478
if model.ir_version < 3:
opset_import = [make_opsetid(defs.ONNX_DOMAIN, 1)]
else:
opset_import = model.opset_import
return cls._onnx_graph_to_msnhnet(
model.graph, opset_import, strict, blob_dict=blob_dict
)
@classmethod
def _onnx_graph_to_msnhnet(cls, graph_def, opset, strict, blob_dict=None):
""" Convert ONNX graph to msnhnet.
:param graph_def: ONNX GraphProto object.
:param opset: ONNX OperatorSetIdProto list.
:param strict: whether to enforce semantic equivalence between the original model
and the converted msnhnet.
:param blob_dict: {name: msnhnet_blob}, the inputs of onnx graph will be populated with msnhnet_blob with the same name
:return: The variable dict of the converted msnhnet model
"""
if blob_dict is None:
blob_dict = {}
handlers = cls._get_handlers(opset)
# initializer: TensorProtos representing the values to initialize
# a given tensor.
# initialized: A list of names of the initialized tensors.
if graph_def.initializer:
input_dict_items = cls._onnx_initializer_to_input_dict_items(
graph_def.initializer
)
initialized = {
init.name: onnx.numpy_helper.to_array(init)
for init in graph_def.initializer
}
else:
input_dict_items = []
initialized = {}
for node in graph_def.node:
node = OnnxNode(node)
if node.op_type == "Constant":
initialized[node.output_tensor_names[0]] = numpy_helper.to_array(
node.attrs["value"]
)
# creating placeholders for currently unknown inputs
for value_info in graph_def.input:
if value_info.name in initialized:
continue
shape = list(
d.dim_value if (d.dim_value > 0 and d.dim_param == "") else None
for d in value_info.type.tensor_type.shape.dim
)
if value_info.name not in blob_dict:
raise NotImplementedError("no blob named {}".format(value_info.name))
input_dict_items.append((value_info.name, blob_dict[value_info.name]))
# tensor dict: this dictionary is a map from variable names
# to the latest produced msnhnet variables of the given name.
# This dictionary will get updated as we build the graph to
# record the names of newly produced tensors.
tensor_dict = dict(input_dict_items)
# Since tensor dict may be updated, we need to keep a copy
# of the original input dict where we track the earliest
# defined tensors so we can have access to the placeholders
# to feed in input tensors when we run the graph.
input_dict = dict(input_dict_items)
for node in graph_def.node:
onnx_node = OnnxNode(node)
output_ops = cls._onnx_node_to_msnhnet_op(
onnx_node,
tensor_dict,
initialized,
handlers,
opset=opset,
strict=strict,
)
curr_node_output_map = dict(zip(onnx_node.output_tensor_names, output_ops))
tensor_dict.update(curr_node_output_map)
return tensor_dict
@classmethod
def _onnx_initializer_to_input_dict_items(cls, initializer):
""" Convert ONNX graph initializer to input dict items.
:param initializer: ONNX graph initializer, list of TensorProto.
:return: List of input dict items.
"""
def get_msnhnet_shape(shape):
if len(shape) == 0:
return (1,)
return shape
return [
(
init.name,
# flow.get_variable(
# name=init.name,
# shape=get_flow_shape(list(init.dims)),
# initializer=flow.zeros_initializer(),
# trainable=True,
# dtype=util.Onnx2FlowDtype(init.data_type),
# ),
init_weight_dict[init.name],
)
for init in initializer
]
@classmethod
def _onnx_node_to_msnhnet_op(
cls, node, tensor_dict, init_dict, handlers=None, opset=None, strict=True
):
"""
Convert onnx node to msnhnet op.
Args:
node: Onnx node object.
tensor_dict: Tensor dict of graph.
opset: Opset version of the operator set. Default 0 means using latest version.
strict: whether to enforce semantic equivalence between the original model
and the converted msnhnet model, defaults to True (yes, enforce semantic equivalence).
Changing to False is strongly discouraged.
Returns:
msnhnet op
"""
handlers = handlers or cls._get_handlers(opset)
handler = handlers[node.domain].get(node.op_type, None)
if handler:
output = handler.handle(
node, tensor_dict, init_dict=init_dict, strict=strict
)
if not isinstance(output, (list, tuple)):
output = [output]
return output
else:
raise ValueError("{} is not supported".format(node.op_type))
@classmethod
def _get_handlers(cls, opset):
""" Get all backend handlers with opset.
:param opset: ONNX OperatorSetIdProto list.
:return: All backend handlers.
"""
opset = opset or [make_opsetid(defs.ONNX_DOMAIN, defs.onnx_opset_version())]
opset_dict = dict([(o.domain, o.version) for o in opset])
return get_all_backend_handlers(opset_dict)
prepare = MsnhnetBackend.prepare
| 11,995 | 0 | 123 |
a712a3750d654969a570adf1e66841935ec26362 | 691 | py | Python | bin/process_exportpicasa_xml.py | gombos/dotfiles | 4211b1b4778ee94f73ab3998a0a40d6820e15a1c | [
"Apache-2.0"
] | 1 | 2017-04-17T16:15:23.000Z | 2017-04-17T16:15:23.000Z | bin/process_exportpicasa_xml.py | gombos/dotfiles | 4211b1b4778ee94f73ab3998a0a40d6820e15a1c | [
"Apache-2.0"
] | null | null | null | bin/process_exportpicasa_xml.py | gombos/dotfiles | 4211b1b4778ee94f73ab3998a0a40d6820e15a1c | [
"Apache-2.0"
] | null | null | null | import xml.etree.ElementTree as ET
# Point this to the output of exportpicasa
XML_FILE_PATH = '/home/user/3/index.xml'
tree = ET.parse(XML_FILE_PATH)
root = tree.getroot()
for folder in root:
folderName = folder.get('name')
for file in folder:
fileName = file.get('name')
for face in file:
personName = face.get('contact_name')
# Let digikam calculate these to train its AI
# rectLeft = float(face.get('rect_left'))
# rectRight = float(face.get('rect_right'))
# rectTop = float(face.get('rect_top'))
# rectBottom = float(face.get('rect_bottom'))
if personName:
print ('Image: ' + folderName + '/' + fileName + ', personName: ' + personName)
print (rectLeft)
| 27.64 | 83 | 0.68741 | import xml.etree.ElementTree as ET
# Point this to the output of exportpicasa
XML_FILE_PATH = '/home/user/3/index.xml'
tree = ET.parse(XML_FILE_PATH)
root = tree.getroot()
for folder in root:
folderName = folder.get('name')
for file in folder:
fileName = file.get('name')
for face in file:
personName = face.get('contact_name')
# Let digikam calculate these to train its AI
# rectLeft = float(face.get('rect_left'))
# rectRight = float(face.get('rect_right'))
# rectTop = float(face.get('rect_top'))
# rectBottom = float(face.get('rect_bottom'))
if personName:
print ('Image: ' + folderName + '/' + fileName + ', personName: ' + personName)
print (rectLeft)
| 0 | 0 | 0 |
184485bf328912d205cbcd17cd9d4771ad2e89b5 | 1,472 | py | Python | dataset/multimask_sparse_contr_dataset.py | ashwinipokle/contrastive_landscape | daec951c7a4cfc6c96464e0ef010081a642e3847 | [
"MIT"
] | 2 | 2022-03-30T07:24:07.000Z | 2022-03-30T07:53:44.000Z | dataset/multimask_sparse_contr_dataset.py | ashwinipokle/contrastive_landscape | daec951c7a4cfc6c96464e0ef010081a642e3847 | [
"MIT"
] | null | null | null | dataset/multimask_sparse_contr_dataset.py | ashwinipokle/contrastive_landscape | daec951c7a4cfc6c96464e0ef010081a642e3847 | [
"MIT"
] | null | null | null | import numpy as np
from torch.utils.data import Dataset
# Custom collate for dataset | 26.285714 | 91 | 0.5625 | import numpy as np
from torch.utils.data import Dataset
class MultiMaskedSparseContrastiveDataset(Dataset):
def __init__(self, data, Z, prob_ones=0.5, n_aug=5):
self.data = data
self.Z = Z
self.n_aug = n_aug
assert data.shape[0] == Z.shape[0]
self.prob_ones = prob_ones
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
x = self.data[idx]
p = x.shape[0]
a1_list = []
a2_list = []
for _ in range(self.n_aug):
identity = np.eye(p)
mask = np.random.choice([0, 1], (p, p), p=[1 - self.prob_ones, self.prob_ones])
D1 = identity * mask
mask = np.random.choice([0, 1], (p, p), p=[1 - self.prob_ones, self.prob_ones])
D2 = identity * mask
a1 = np.matmul(D1, x)
a2 = np.matmul(D2, x)
a1_list.append(a1.astype(np.float))
a2_list.append(a2.astype(np.float))
return a1_list, a2_list, self.Z[idx].astype(np.int)
# Custom collate for dataset
def multi_mask_data_collate(batch):
all_a1 = []
all_a2 = []
all_z = []
for a1_list, a2_list, z in batch:
for a1, a2 in zip(a1_list, a2_list):
all_a1.append(a1)
all_a2.append(a2)
all_z.append(z)
all_a1 = torch.tensor(all_a1)
all_a2 = torch.tensor(all_a2)
all_z = torch.tensor(all_z)
return all_a1, all_a2, all_z | 1,232 | 30 | 125 |
a35c60be89fafaf9211eb5b99e308863f867f50a | 469 | py | Python | lexer.py | mooseman/pd_c_stuff | b8ee14c977a5560f0eae0e40178fe1db00a7beef | [
"Unlicense"
] | 2 | 2018-01-14T22:00:28.000Z | 2019-01-25T09:48:57.000Z | lexer.py | mooseman/pd_c_stuff | b8ee14c977a5560f0eae0e40178fe1db00a7beef | [
"Unlicense"
] | null | null | null | lexer.py | mooseman/pd_c_stuff | b8ee14c977a5560f0eae0e40178fe1db00a7beef | [
"Unlicense"
] | null | null | null |
# lexer.py
import string
| 14.65625 | 54 | 0.428571 |
# lexer.py
import string
def findtype(str):
if isalpha(str[0]) or str[0] == '_':
toktype = "kw_colname"
elif str[0] == '"':
toktype = "string"
elif isdigit(str[0]):
toktype = "integer"
elif str[0] == ',':
toktype = "comma"
elif str[0] == ';':
toktype = "semicolon"
elif str[0] == '=':
toktype = "equals"
else:
toktype = "other"
| 386 | 0 | 24 |
c26bc5645ceedc7194775a606663b66eb9315ab0 | 518 | py | Python | greedy/1567_maximum_length_of_subarray_with_positive_product/1567_maximum_length_of_subarray_with_positive_product.py | zdyxry/LeetCode | 33371285d0f3302158230f46e8b1b63b9f4639c4 | [
"Xnet",
"X11"
] | 6 | 2019-09-16T01:50:44.000Z | 2020-09-17T08:52:25.000Z | greedy/1567_maximum_length_of_subarray_with_positive_product/1567_maximum_length_of_subarray_with_positive_product.py | zdyxry/LeetCode | 33371285d0f3302158230f46e8b1b63b9f4639c4 | [
"Xnet",
"X11"
] | null | null | null | greedy/1567_maximum_length_of_subarray_with_positive_product/1567_maximum_length_of_subarray_with_positive_product.py | zdyxry/LeetCode | 33371285d0f3302158230f46e8b1b63b9f4639c4 | [
"Xnet",
"X11"
] | 4 | 2020-02-07T12:43:16.000Z | 2021-04-11T06:38:55.000Z | from typing import List
nums = [1,-2,-3,4]
res = Solution().getMaxLen(nums)
print(res) | 22.521739 | 48 | 0.399614 | from typing import List
class Solution:
def getMaxLen(self, nums: List[int]) -> int:
pre = -1
l = []
res = 0
for i, num in enumerate(nums):
if num < 0 :
l.append(i)
elif num == 0:
l = []
pre = i
if len(l) % 2 == 0:
res = max(res, i - pre)
else:
res = max(res, i - l[0])
return res
nums = [1,-2,-3,4]
res = Solution().getMaxLen(nums)
print(res) | 387 | -6 | 49 |
ed47ed0ae398dc320f9bec037ae9035a4d8ca922 | 2,814 | py | Python | tests/unit/services/job_scheduler/test_target.py | intel-hpdd/-intel-manager-for-lustre | f8a6f61205b42cc62f4bbcb8d81214ad4f215cd6 | [
"MIT"
] | 52 | 2018-09-13T03:26:23.000Z | 2022-03-25T16:51:37.000Z | tests/unit/services/job_scheduler/test_target.py | intel-hpdd/-intel-manager-for-lustre | f8a6f61205b42cc62f4bbcb8d81214ad4f215cd6 | [
"MIT"
] | 1,264 | 2018-06-15T19:50:57.000Z | 2022-03-28T08:19:04.000Z | tests/unit/services/job_scheduler/test_target.py | whamcloud/intel-manager-for-lustre | f8a6f61205b42cc62f4bbcb8d81214ad4f215cd6 | [
"MIT"
] | 27 | 2018-06-18T08:51:59.000Z | 2022-03-16T15:35:34.000Z | from chroma_core.lib.cache import ObjectCache
from chroma_core.models import Nid
from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
from chroma_core.models import ManagedTarget, ManagedMgs, ManagedHost
from tests.unit.chroma_core.helpers import freshen
from tests.unit.chroma_core.helpers import MockAgentRpc
from tests.unit.chroma_core.helpers import create_simple_fs
from tests.unit.services.job_scheduler.job_test_case import JobTestCaseWithHost
| 43.292308 | 112 | 0.704335 | from chroma_core.lib.cache import ObjectCache
from chroma_core.models import Nid
from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
from chroma_core.models import ManagedTarget, ManagedMgs, ManagedHost
from tests.unit.chroma_core.helpers import freshen
from tests.unit.chroma_core.helpers import MockAgentRpc
from tests.unit.chroma_core.helpers import create_simple_fs
from tests.unit.services.job_scheduler.job_test_case import JobTestCaseWithHost
class TestTargetTransitions(JobTestCaseWithHost):
def setUp(self):
super(TestTargetTransitions, self).setUp()
(mgt, fs, mdt, ost) = create_simple_fs()
self.mgt = mgt
self.assertEqual(ManagedMgs.objects.get(pk=self.mgt.pk).state, "unmounted")
def test_start_stop(self):
from chroma_core.models import ManagedMgs
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "unmounted")
self.assertEqual(ManagedMgs.objects.get(pk=self.mgt.pk).state, "unmounted")
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "mounted")
self.assertEqual(ManagedMgs.objects.get(pk=self.mgt.pk).state, "mounted")
def test_lnet_dependency(self):
"""Test that if I try to stop LNet on a host where a target is running,
stopping the target calculated as a dependency of that"""
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "mounted")
self.lnet_configuration = self.assertState(self.host.lnet_configuration, "lnet_up")
consequences = JobSchedulerClient.get_transition_consequences(self.host.lnet_configuration, "lnet_down")
self.assertEqual(len(consequences["dependency_jobs"]), 1)
self.assertEqual(consequences["dependency_jobs"][0]["class"], "StopTargetJob")
class TestSharedTarget(JobTestCaseWithHost):
mock_servers = {
"pair1": {
"fqdn": "pair1.mycompany.com",
"nodename": "test01.pair1.mycompany.com",
"nids": [Nid.Nid("192.168.0.1", "tcp", 0)],
},
"pair2": {
"fqdn": "pair2.mycompany.com",
"nodename": "test02.pair2.mycompany.com",
"nids": [Nid.Nid("192.168.0.2", "tcp", 0)],
},
}
def setUp(self):
super(TestSharedTarget, self).setUp()
(mgt, fs, mdt, ost) = create_simple_fs()
self.mgt = mgt
self.assertEqual(ManagedMgs.objects.get(pk=self.mgt.pk).state, "unmounted")
def test_clean_setup(self):
# Start it normally the way the API would on creation
self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, "mounted")
self.assertEqual(ManagedTarget.objects.get(pk=self.mgt.pk).state, "mounted")
| 1,085 | 1,195 | 46 |
4a73a8d77c759dd0dc752d177d1a230b9d15572c | 3,056 | py | Python | python/subactivity.py | atul107/grammar-activity-prediction | 983f973717884a60ef4b4ecb7bf56e671aefb332 | [
"MIT"
] | 20 | 2018-02-23T02:51:00.000Z | 2021-05-25T20:32:43.000Z | python/subactivity.py | atul107/grammar-activity-prediction | 983f973717884a60ef4b4ecb7bf56e671aefb332 | [
"MIT"
] | 3 | 2019-01-21T07:40:46.000Z | 2019-10-19T18:47:09.000Z | python/subactivity.py | RomeroBarata/grammar-activity-prediction | 983f973717884a60ef4b4ecb7bf56e671aefb332 | [
"MIT"
] | 7 | 2018-02-23T16:08:46.000Z | 2021-01-25T04:48:19.000Z | """
Created on Feb 24, 2017
@author: Siyuan Huang
Process the skeleton, get the input for LSTM.
Input: Aligned human skeleton feature.
"""
import config
import json
import scipy.io
import os
import numpy as np
if __name__ == '__main__':
main()
| 32.168421 | 109 | 0.585733 | """
Created on Feb 24, 2017
@author: Siyuan Huang
Process the skeleton, get the input for LSTM.
Input: Aligned human skeleton feature.
"""
import config
import json
import scipy.io
import os
import numpy as np
def json_to_mat(paths, flipped=0):
if flipped == 1:
dir_data = paths.metadata_root + 'flipped/all/action.json'
else:
dir_data = paths.metadata_root + 'action.json'
with open(dir_data, 'r') as f:
action = json.load(f)
# save skeleton to mat file
if flipped == 1:
save_skeleton(action, paths.metadata_root + 'flipped/skeletons')
else:
save_skeleton(action, paths.metadata_root + 'skeletons')
sequence_processing(action, paths.metadata_root + 'flipped/sequence_label.json')
def save_skeleton(action, path):
if not os.path.exists(path):
os.mkdir(path)
for sequence, skeleton_pos in action['skeletons'].items():
action['skeletons'][sequence] = np.asarray(skeleton_pos)
scipy.io.savemat(path + '/' + sequence + '.mat', mdict={'skeleton': action['skeletons'][sequence]})
def sequence_processing(action, path):
label = []
index = 0
frame_num = 0
frame_max = 0
subactivity_category = {}
for sequence_id, sequence_label in action['skeleton_labels'].items():
start_frame = 0
label_temp = 'null'
for i in range(len(sequence_label)):
sequence_label[i] = str(sequence_label[i])
if i == 0:
start_frame = 0
label_temp = sequence_label[i]
elif sequence_label[i] != label_temp or i == len(sequence_label) - 1:
label.append({})
label[index]['sequence_id'] = sequence_id
label[index]['sequence_label'] = label_temp
label[index]['start_frame'] = start_frame
if i == len(sequence_label) - 1:
label[index]['end_frame'] = i
else:
label[index]['end_frame'] = i-1
label[index]['length'] = label[index]['end_frame'] - label[index]['start_frame'] + 1
if label_temp not in subactivity_category:
subactivity_category[label_temp] = 1
else:
subactivity_category[label_temp] += 1
frame_num += label[index]['length']
start_frame = i
label_temp = sequence_label[i]
if label[index]['length'] > frame_max:
frame_max = label[index]['length']
if label[index]['length'] > 100:
print label[index]['length'], label[index]['sequence_label'], label[index]['sequence_id']
index += 1
# print(frame_num)
# print(float(frame_num)/len(label))
print frame_max
print subactivity_category
with open(path, 'w') as f:
json.dump(label, f)
return label
def main():
paths = config.Paths()
paths.path_huang()
json_to_mat(paths, 1)
if __name__ == '__main__':
main()
| 2,705 | 0 | 92 |
50fdc608f80998eb86da4279fa9a44c22d46da93 | 6,902 | py | Python | paleomix/pipelines/ngs/parts/statistics.py | jfy133/paleomix | f7f687f6f69b2faedd247a1d289d28657710a8c2 | [
"MIT"
] | null | null | null | paleomix/pipelines/ngs/parts/statistics.py | jfy133/paleomix | f7f687f6f69b2faedd247a1d289d28657710a8c2 | [
"MIT"
] | null | null | null | paleomix/pipelines/ngs/parts/statistics.py | jfy133/paleomix | f7f687f6f69b2faedd247a1d289d28657710a8c2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import collections
from paleomix.common.fileutils import swap_ext
from paleomix.nodes.commands import CoverageNode, MergeCoverageNode, DepthHistogramNode
from paleomix.pipelines.ngs.parts.summary import SummaryTableNode
| 34.51 | 87 | 0.63967 | #!/usr/bin/python
#
# Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import collections
from paleomix.common.fileutils import swap_ext
from paleomix.nodes.commands import CoverageNode, MergeCoverageNode, DepthHistogramNode
from paleomix.pipelines.ngs.parts.summary import SummaryTableNode
def add_statistics_nodes(config, makefile, target):
features = makefile["Options"]["Features"]
nodes = []
if features["Depths"]:
nodes.extend(_build_depth(config, target, makefile["Prefixes"]))
if features["Summary"] or features["Coverage"]:
make_summary = features["Summary"]
coverage = _build_coverage(config, target, make_summary)
if make_summary:
summary_node = _build_summary_node(config, makefile, target, coverage)
nodes.append(summary_node)
elif features["Coverage"]:
nodes.extend(coverage["Nodes"])
target.nodes.extend(nodes)
def _build_summary_node(config, makefile, target, coverage):
coverage_by_label = _build_coverage_nodes(target)
return SummaryTableNode(
config=config,
makefile=makefile,
target=target,
cov_for_lanes=coverage_by_label["Lanes"],
cov_for_libs=coverage_by_label["Libraries"],
dependencies=coverage["Nodes"],
)
def _build_depth(config, target, prefixes):
nodes = []
for prefix in target.prefixes:
for (roi_name, roi_filename) in _get_roi(prefix, name_prefix="."):
((input_file, dependencies),) = prefix.bams.items()
output_filename = "%s.%s%s.depths" % (target.name, prefix.name, roi_name)
output_fpath = os.path.join(config.destination, output_filename)
nodes.append(
DepthHistogramNode(
target_name=target.name,
input_file=input_file,
prefix=prefixes[prefix.name],
regions_file=roi_filename,
output_file=output_fpath,
dependencies=dependencies,
)
)
return nodes
def _aggregate_for_prefix(cov, prefix, roi_name=None, into=None):
prefix = _get_prefix_label(prefix, roi_name)
results = {} if into is None else into
for (key, files_and_nodes) in cov.items():
if prefix is None or (key[0] == prefix):
results.update(files_and_nodes)
return results
def _build_coverage(config, target, make_summary):
merged_nodes = []
coverage = _build_coverage_nodes(target)
for prefix in target.prefixes:
for (roi_name, _) in _get_roi(prefix):
label = _get_prefix_label(prefix.name, roi_name)
if not roi_name:
postfix = prefix.name
else:
postfix = "%s.%s" % (prefix.name, roi_name)
files_and_nodes = _aggregate_for_prefix(coverage["Libraries"], label)
output_filename = os.path.join(
config.destination, "%s.%s.coverage" % (target.name, postfix)
)
merged = MergeCoverageNode(
input_files=list(files_and_nodes.keys()),
output_file=output_filename,
dependencies=list(files_and_nodes.values()),
)
merged_nodes.append(merged)
files_and_nodes = _aggregate_for_prefix(coverage["Libraries"], None)
if make_summary:
files_and_nodes = _aggregate_for_prefix(
coverage["Lanes"], None, into=files_and_nodes
)
all_nodes = []
all_nodes.extend(files_and_nodes.values())
all_nodes.extend(merged_nodes)
coverage["Nodes"] = tuple(all_nodes)
return coverage
def _build_coverage_nodes(target):
coverage = {
"Lanes": collections.defaultdict(dict),
"Libraries": collections.defaultdict(dict),
}
cache = {}
for prefix in target.prefixes:
for (roi_name, roi_filename) in _get_roi(prefix):
prefix_label = _get_prefix_label(prefix.name, roi_name)
for sample in prefix.samples:
for library in sample.libraries:
key = (prefix_label, target.name, sample.name, library.name)
for lane in library.lanes:
for bams in lane.bams.values():
bams = _build_coverage_nodes_cached(
bams, target.name, roi_name, roi_filename, cache
)
coverage["Lanes"][key].update(bams)
bams = _build_coverage_nodes_cached(
library.bams, target.name, roi_name, roi_filename, cache
)
coverage["Libraries"][key].update(bams)
return coverage
def _build_coverage_nodes_cached(
files_and_nodes, target_name, roi_name, roi_filename, cache
):
output_ext = ".coverage"
if roi_name:
output_ext = ".%s.coverage" % roi_name
coverages = {}
for (input_filename, node) in files_and_nodes.items():
output_filename = swap_ext(input_filename, output_ext)
cache_key = (roi_filename, input_filename)
if cache_key not in cache:
cache[cache_key] = CoverageNode(
input_file=input_filename,
output_file=output_filename,
target_name=target_name,
regions_file=roi_filename,
dependencies=node,
)
coverages[output_filename] = cache[cache_key]
return coverages
def _get_roi(prefix, name_prefix=""):
roi = [("", None)]
for (name, path) in prefix.roi.items():
roi.append((name_prefix + name, path))
return roi
def _get_prefix_label(label, roi_name):
if not roi_name:
return label
return "%s:%s" % (label, roi_name)
| 5,316 | 0 | 207 |
eed05e5c56bbefbcff9d2bf7d91a4dc929b1fd82 | 937 | py | Python | tests/commands/test_execute.py | riffard/scikit-validate | c490aead800b15daebd8839ac6365de6eab6014b | [
"Apache-2.0"
] | 2 | 2019-06-12T17:05:47.000Z | 2019-09-25T13:13:31.000Z | tests/commands/test_execute.py | riffard/scikit-validate | c490aead800b15daebd8839ac6365de6eab6014b | [
"Apache-2.0"
] | 23 | 2019-05-21T15:30:11.000Z | 2021-07-08T19:48:06.000Z | tests/commands/test_execute.py | riffard/scikit-validate | c490aead800b15daebd8839ac6365de6eab6014b | [
"Apache-2.0"
] | 2 | 2019-05-21T15:32:21.000Z | 2021-05-17T18:43:36.000Z | import pytest
from skvalidate.commands.execute import print_metrics
@pytest.mark.parametrize('metrics,command', [
(
{'sleep 2':
{
'cpu_time': {
'value': 23,
'unit': 's',
},
'max_rss': {
'value': 200,
'unit': 'MB',
}
}
},
'sleep 2'
),
])
| 24.657895 | 53 | 0.469584 | import pytest
from skvalidate.commands.execute import print_metrics
@pytest.mark.parametrize('metrics,command', [
(
{'sleep 2':
{
'cpu_time': {
'value': 23,
'unit': 's',
},
'max_rss': {
'value': 200,
'unit': 'MB',
}
}
},
'sleep 2'
),
])
def test_print_metrics(capsys, metrics, command):
msg = [
'>>> Ran command: "{0}"',
'>>> in {1}{2} and used {3} {4} of memory.'
]
msg = '\n'.join(msg)
expected = msg.format(
command,
metrics[command]['cpu_time']['value'],
metrics[command]['cpu_time']['unit'],
metrics[command]['max_rss']['value'],
metrics[command]['max_rss']['unit'],
)
print_metrics(metrics, command)
captured = capsys.readouterr()
assert captured.out == expected + '\n'
| 505 | 0 | 22 |
85a348621d2c81c4d8ec553e246e6062a6030a13 | 3,722 | py | Python | mischief/helpers.py | murtazazaidi/mischief | d84f7bc1bec366ba024cfeedb2bb74228e1b1751 | [
"BSD-2-Clause"
] | null | null | null | mischief/helpers.py | murtazazaidi/mischief | d84f7bc1bec366ba024cfeedb2bb74228e1b1751 | [
"BSD-2-Clause"
] | null | null | null | mischief/helpers.py | murtazazaidi/mischief | d84f7bc1bec366ba024cfeedb2bb74228e1b1751 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
''' For mischief module, all the helper methods are
added in this file, for user to use in core. '''
from datetime import datetime
import tweepy
from .config import PARDON_LIST
def generate_summary_report(api):
""" Generate Summary Report of Authenticated User """
# Get the User object for twitter...
user = api.me()
print '------------------------'
print 'Hello ' + user.name + ' (' + user.screen_name + ') !!'
print '------------------------'
print datetime.now()
print 'Following: ' + str(user.friends_count)
print 'Followers: ' + str(user.followers_count)
print 'Total Tweets: ' + str(user.statuses_count)
print 'Location: ' + user.location
print 'Description: ' + user.description
def generate_follower_list(api):
""" Generate Complete follower list of Authenticated User """
print '------- Followers ---------'
for friend in tweepy.Cursor(api.followers).items():
print friend.screen_name
def generate_following_list(api):
""" Generate Complete following list of Authenticated User """
print '------- Following ---------'
for friend in tweepy.Cursor(api.followers).items():
print friend.screen_name
def get_arrogance_list(api, user_name):
""" Whom you follow and doesn't follow back """
following = api.friends_ids(user_name)
followers = api.followers_ids(user_name)
arrogance_list = []
for user_id in following:
if user_id not in followers and user_id not in PARDON_LIST:
arrogance_list.append(user_id)
return arrogance_list
def get_losers_list(api, user_name):
""" Who follows you and whom you don't follow back """
following = api.friends_ids(user_name)
followers = api.followers_ids(user_name)
losers_list = []
for user_id in followers:
if user_id not in following:
losers_list.append(user_id)
return losers_list
def clean_following_list(api):
""" Unfollow those who doesn't follow back """
user = api.me()
users_to_unfollow = get_arrogance_list(api=api, user_name=user.screen_name)
for user_id in users_to_unfollow:
unfollowed_user = api.destroy_friendship(user_id)
print 'Unfollowed: ' + unfollowed_user.screen_name
def generate_report(api):
""" Generates complete report for Authenticated User """
generate_summary_report(api=api)
generate_follower_list(api=api)
generate_following_list(api=api)
def get_user(api, user_name, min_details=False):
""" Get User Details """
print api.get_user(user_name)
if not min_details:
print 'Following: ' + str(api.friends_ids(user_name))
print 'Followed By: ' + str(api.followers_ids(user_name))
def find_people(api, query):
""" Find People """
for user in tweepy.Cursor(api.search_users, q=query).items():
print user.screen_name
def get_status(api, status_id):
""" Get Status Details """
status = api.get_status(status_id)
print status.text
print str(status)
def show_rate_limit(api):
""" Show Rate Limit """
print str(api.rate_limit_status())
def new_tweet(api):
""" New Tweet """
tweet = raw_input('Tweet here buddy: ')
#tweet = tweet + '\nvia #Mischief'
if len(tweet) <= 140:
api.update_status(status=tweet)
else:
print 'Please remove extra ' + len(tweet)-140 + ' characters.'
def show_diff_lists(api, user_name):
""" Show arrogance and losers lists of a user """
print ('Arrogance List: ' +
str(get_arrogance_list(api=api, user_name=user_name)))
print '\n-----------------------------------\n'
print 'Losers List: ' + str(get_losers_list(api=api, user_name=user_name))
| 34.462963 | 79 | 0.658248 | # -*- coding: utf-8 -*-
''' For mischief module, all the helper methods are
added in this file, for user to use in core. '''
from datetime import datetime
import tweepy
from .config import PARDON_LIST
def generate_summary_report(api):
""" Generate Summary Report of Authenticated User """
# Get the User object for twitter...
user = api.me()
print '------------------------'
print 'Hello ' + user.name + ' (' + user.screen_name + ') !!'
print '------------------------'
print datetime.now()
print 'Following: ' + str(user.friends_count)
print 'Followers: ' + str(user.followers_count)
print 'Total Tweets: ' + str(user.statuses_count)
print 'Location: ' + user.location
print 'Description: ' + user.description
def generate_follower_list(api):
""" Generate Complete follower list of Authenticated User """
print '------- Followers ---------'
for friend in tweepy.Cursor(api.followers).items():
print friend.screen_name
def generate_following_list(api):
""" Generate Complete following list of Authenticated User """
print '------- Following ---------'
for friend in tweepy.Cursor(api.followers).items():
print friend.screen_name
def get_arrogance_list(api, user_name):
""" Whom you follow and doesn't follow back """
following = api.friends_ids(user_name)
followers = api.followers_ids(user_name)
arrogance_list = []
for user_id in following:
if user_id not in followers and user_id not in PARDON_LIST:
arrogance_list.append(user_id)
return arrogance_list
def get_losers_list(api, user_name):
""" Who follows you and whom you don't follow back """
following = api.friends_ids(user_name)
followers = api.followers_ids(user_name)
losers_list = []
for user_id in followers:
if user_id not in following:
losers_list.append(user_id)
return losers_list
def clean_following_list(api):
""" Unfollow those who doesn't follow back """
user = api.me()
users_to_unfollow = get_arrogance_list(api=api, user_name=user.screen_name)
for user_id in users_to_unfollow:
unfollowed_user = api.destroy_friendship(user_id)
print 'Unfollowed: ' + unfollowed_user.screen_name
def generate_report(api):
""" Generates complete report for Authenticated User """
generate_summary_report(api=api)
generate_follower_list(api=api)
generate_following_list(api=api)
def get_user(api, user_name, min_details=False):
""" Get User Details """
print api.get_user(user_name)
if not min_details:
print 'Following: ' + str(api.friends_ids(user_name))
print 'Followed By: ' + str(api.followers_ids(user_name))
def find_people(api, query):
""" Find People """
for user in tweepy.Cursor(api.search_users, q=query).items():
print user.screen_name
def get_status(api, status_id):
""" Get Status Details """
status = api.get_status(status_id)
print status.text
print str(status)
def show_rate_limit(api):
""" Show Rate Limit """
print str(api.rate_limit_status())
def new_tweet(api):
""" New Tweet """
tweet = raw_input('Tweet here buddy: ')
#tweet = tweet + '\nvia #Mischief'
if len(tweet) <= 140:
api.update_status(status=tweet)
else:
print 'Please remove extra ' + len(tweet)-140 + ' characters.'
def show_diff_lists(api, user_name):
""" Show arrogance and losers lists of a user """
print ('Arrogance List: ' +
str(get_arrogance_list(api=api, user_name=user_name)))
print '\n-----------------------------------\n'
print 'Losers List: ' + str(get_losers_list(api=api, user_name=user_name))
| 0 | 0 | 0 |
8828a3b5f446aad08e6f0c3ab1b7e76454666009 | 4,281 | py | Python | Visualize_Feature_Space/next_viz.py | Guylu/OCT_Interpretability | c0e0107b7ce0204ee16ccd2ec70dfd12411d8c72 | [
"Apache-2.0"
] | null | null | null | Visualize_Feature_Space/next_viz.py | Guylu/OCT_Interpretability | c0e0107b7ce0204ee16ccd2ec70dfd12411d8c72 | [
"Apache-2.0"
] | null | null | null | Visualize_Feature_Space/next_viz.py | Guylu/OCT_Interpretability | c0e0107b7ce0204ee16ccd2ec70dfd12411d8c72 | [
"Apache-2.0"
] | null | null | null | from data_for_tests import Kermany_DataSet
import timm
import wandb
import os
from timm.models.swin_transformer import SwinTransformer
from utils import *
from res_models import *
from model_running import *
from convnext import convnext_base, convnext_large, convnext_xlarge
import numpy as np
import random
from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, FullGrad
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from pytorch_grad_cam.utils.image import show_cam_on_image
import torch
import matplotlib.pyplot as plt
from torchvision import transforms as transforms
import cv2 as cv
import cv2
import umap
wandb.init(project="featureViz")
seed = 25
torch.manual_seed(hash("by removing stochasticity") % seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(hash("so runs are repeatable") % seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def_args = dot_dict({
"train": ["../../../data/kermany/train"],
"val": ["../../../data/kermany/val"],
"test": ["../../../data/kermany/test"],
})
label_names = [
"NORMAL",
"CNV",
"DME",
"DRUSEN",
]
test_dataset = Kermany_DataSet(def_args.test[0])
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=1,
shuffle=True)
names = ["convnext_base"] # , "res50", "res101", "res152"]
models = [convnext_base(pretrained=False, num_classes=4)] # , Resnet50(4), Resnet101(4), Resnet152(4)]
with torch.no_grad():
for name, model in zip(names, models):
embds = []
colors = []
model.load_state_dict(torch.load(f'{name}.pt', map_location=torch.device(device)))
model = model.to(device)
correct = 0.0
correct_arr = [0.0] * 10
total = 0.0
total_arr = [0.0] * 10
predictions = None
ground_truth = None
# Iterate through test dataset
for i, (images, labels) in enumerate(test_loader):
if i % 10 == 0:
print(f'image : {i}\n\n\n')
images = Variable(images).to(device)
labels = labels.to(device)
# Forward pass only to get logits/output
outputs = model(images)
# Get predictions from the maximum value
_, predicted = torch.max(outputs.data, 1)
# Total number of labels
total += labels.size(0)
correct += (predicted == labels).sum()
for label in range(4):
correct_arr[label] += (((predicted == labels) & (labels == label)).sum())
total_arr[label] += (labels == label).sum()
if i == 0:
predictions = predicted
ground_truth = labels
else:
predictions = torch.cat((predictions, predicted), 0)
ground_truth = torch.cat((ground_truth, labels), 0)
accuracy = correct / total
# pass the image through all the layers
# visualize 64 features from each layer
# (although there are more feature maps in the upper layers)
layer_viz = model.forward_features(images)
embds.append(layer_viz.data.flatten().cpu().detach().numpy())
colors.append(labels.item())
embds = np.array(embds)
colors = np.array(colors)
embedding = umap.UMAP(n_components=3).fit_transform(embds)
plt.scatter(embedding[:, 0], embedding[:, 1], c=colors)
plt.gca().legend(tuple(label_names))
plt.title(f'Feature Map of {name} Network 2_')
plt.show()
plt.savefig(f'Feature Map of {name} Network 2_')
plt.close()
point_cloud = np.hstack([embedding, colors.reshape(-1, 1)])
wandb.log({f"3D_UMAP_FeatureMap_{name}": wandb.Object3D(point_cloud)})
metrics = {f'Test Accuracy_{name}': accuracy}
for label in range(4):
metrics[f'Test Accuracy_{name}' + label_names[label]] = correct_arr[label] / total_arr[label]
wandb.log(metrics)
| 35.675 | 106 | 0.623452 | from data_for_tests import Kermany_DataSet
import timm
import wandb
import os
from timm.models.swin_transformer import SwinTransformer
from utils import *
from res_models import *
from model_running import *
from convnext import convnext_base, convnext_large, convnext_xlarge
import numpy as np
import random
from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, FullGrad
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from pytorch_grad_cam.utils.image import show_cam_on_image
import torch
import matplotlib.pyplot as plt
from torchvision import transforms as transforms
import cv2 as cv
import cv2
import umap
wandb.init(project="featureViz")
seed = 25
torch.manual_seed(hash("by removing stochasticity") % seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(hash("so runs are repeatable") % seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def_args = dot_dict({
"train": ["../../../data/kermany/train"],
"val": ["../../../data/kermany/val"],
"test": ["../../../data/kermany/test"],
})
label_names = [
"NORMAL",
"CNV",
"DME",
"DRUSEN",
]
test_dataset = Kermany_DataSet(def_args.test[0])
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=1,
shuffle=True)
names = ["convnext_base"] # , "res50", "res101", "res152"]
models = [convnext_base(pretrained=False, num_classes=4)] # , Resnet50(4), Resnet101(4), Resnet152(4)]
with torch.no_grad():
for name, model in zip(names, models):
embds = []
colors = []
model.load_state_dict(torch.load(f'{name}.pt', map_location=torch.device(device)))
model = model.to(device)
correct = 0.0
correct_arr = [0.0] * 10
total = 0.0
total_arr = [0.0] * 10
predictions = None
ground_truth = None
# Iterate through test dataset
for i, (images, labels) in enumerate(test_loader):
if i % 10 == 0:
print(f'image : {i}\n\n\n')
images = Variable(images).to(device)
labels = labels.to(device)
# Forward pass only to get logits/output
outputs = model(images)
# Get predictions from the maximum value
_, predicted = torch.max(outputs.data, 1)
# Total number of labels
total += labels.size(0)
correct += (predicted == labels).sum()
for label in range(4):
correct_arr[label] += (((predicted == labels) & (labels == label)).sum())
total_arr[label] += (labels == label).sum()
if i == 0:
predictions = predicted
ground_truth = labels
else:
predictions = torch.cat((predictions, predicted), 0)
ground_truth = torch.cat((ground_truth, labels), 0)
accuracy = correct / total
# pass the image through all the layers
# visualize 64 features from each layer
# (although there are more feature maps in the upper layers)
layer_viz = model.forward_features(images)
embds.append(layer_viz.data.flatten().cpu().detach().numpy())
colors.append(labels.item())
embds = np.array(embds)
colors = np.array(colors)
embedding = umap.UMAP(n_components=3).fit_transform(embds)
plt.scatter(embedding[:, 0], embedding[:, 1], c=colors)
plt.gca().legend(tuple(label_names))
plt.title(f'Feature Map of {name} Network 2_')
plt.show()
plt.savefig(f'Feature Map of {name} Network 2_')
plt.close()
point_cloud = np.hstack([embedding, colors.reshape(-1, 1)])
wandb.log({f"3D_UMAP_FeatureMap_{name}": wandb.Object3D(point_cloud)})
metrics = {f'Test Accuracy_{name}': accuracy}
for label in range(4):
metrics[f'Test Accuracy_{name}' + label_names[label]] = correct_arr[label] / total_arr[label]
wandb.log(metrics)
| 0 | 0 | 0 |
731234bcc4ff1cf2a24510d50c52c3b392b4e6b8 | 830 | py | Python | ficheros/Ejer3_Troceador/unificador.py | txtbits/daw-python | 5dde1207e2791e90aa5e9ce2b6afc4116129efab | [
"MIT"
] | null | null | null | ficheros/Ejer3_Troceador/unificador.py | txtbits/daw-python | 5dde1207e2791e90aa5e9ce2b6afc4116129efab | [
"MIT"
] | null | null | null | ficheros/Ejer3_Troceador/unificador.py | txtbits/daw-python | 5dde1207e2791e90aa5e9ce2b6afc4116129efab | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Escribe un programa troceador.py que pedirá un fichero de una imagen o una canción y la troceará en archivos más pequeños de 521 bytes.
El programa irá numerandolos archivos (trozo1, trozo2, etc) Un segundo programa tomará los archivos troceados y recompondrá el archivo original
'''
import os
cont = 1
fw = open('unido.jpg', 'wb')
namefile = 'trozo' + str(cont)
print namefile
while os.path.exists(namefile):
cont += 1
fr = abrir_trozo(namefile)
reconstruir_fichero(fr,fw)
fr.close()
namefile = 'trozo' + str(cont)
print namefile
fw.close() | 24.411765 | 143 | 0.692771 | # -*- coding: utf-8 -*-
'''
Escribe un programa troceador.py que pedirá un fichero de una imagen o una canción y la troceará en archivos más pequeños de 521 bytes.
El programa irá numerandolos archivos (trozo1, trozo2, etc) Un segundo programa tomará los archivos troceados y recompondrá el archivo original
'''
def abrir_trozo(namefile):
fr = open(namefile, 'rb')
return fr
def leer_trozo(f):
contenido = f.read()
return contenido
def reconstruir_fichero(fr,fw):
contenido = leer_trozo(fr)
fw.write(contenido)
import os
cont = 1
fw = open('unido.jpg', 'wb')
namefile = 'trozo' + str(cont)
print namefile
while os.path.exists(namefile):
cont += 1
fr = abrir_trozo(namefile)
reconstruir_fichero(fr,fw)
fr.close()
namefile = 'trozo' + str(cont)
print namefile
fw.close() | 157 | 0 | 72 |
6a484108bbea8ba6a75a72e113950768f834cca4 | 992 | py | Python | tests/dummy_site_crawler/mongo_backend/site_music/test_music_music_page_mongo_backend.py | MacHu-GWU/crawlib-project | b2963b7f6a36ee7f1ef95a6bf9d8cb746d9da991 | [
"MIT"
] | 1 | 2020-06-19T09:45:20.000Z | 2020-06-19T09:45:20.000Z | tests/dummy_site_crawler/mongo_backend/site_music/test_music_music_page_mongo_backend.py | MacHu-GWU/crawlib-project | b2963b7f6a36ee7f1ef95a6bf9d8cb746d9da991 | [
"MIT"
] | 1 | 2019-12-27T18:41:21.000Z | 2019-12-27T18:41:21.000Z | tests/dummy_site_crawler/mongo_backend/site_music/test_music_music_page_mongo_backend.py | MacHu-GWU/crawlib-project | b2963b7f6a36ee7f1ef95a6bf9d8cb746d9da991 | [
"MIT"
] | 1 | 2018-08-22T01:27:32.000Z | 2018-08-22T01:27:32.000Z | # -*- coding: utf-8 -*-
import pytest
from crawlib.cache import create_cache_here
from crawlib.cached_request import CachedRequest
from crawlib.tests.dummy_site.music.view import (
max_n_artist, max_n_genre,
)
from crawlib.tests.dummy_site_crawler.mongo_backend.s2_music import MusicPage
cache = create_cache_here(__file__)
spider = CachedRequest(cache=cache, log_cache_miss=True, expire=24 * 3600)
spider.use_requests()
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| 30.060606 | 80 | 0.716734 | # -*- coding: utf-8 -*-
import pytest
from crawlib.cache import create_cache_here
from crawlib.cached_request import CachedRequest
from crawlib.tests.dummy_site.music.view import (
max_n_artist, max_n_genre,
)
from crawlib.tests.dummy_site_crawler.mongo_backend.s2_music import MusicPage
cache = create_cache_here(__file__)
spider = CachedRequest(cache=cache, log_cache_miss=True, expire=24 * 3600)
spider.use_requests()
class TestMusicPage(object):
def test_parse_response(self):
music_id = 20
music = MusicPage(_id=music_id)
url = music.build_url()
html = spider.request_for_html(url)
pres = music.parse_response(url, request=None, response=None, html=html)
assert pres.entity_data["title"] == "Music {} Title".format(music_id)
assert len(pres.children) == (max_n_artist + max_n_genre)
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| 372 | 7 | 49 |
6a9565cfc738b7b93347f02898c082863792bb0e | 293 | py | Python | example87.py | augustone/100examples | 94b593b5690a7403e1bf7424047f9a67822d2fd7 | [
"Unlicense"
] | 21 | 2017-05-01T10:23:42.000Z | 2021-09-27T17:11:43.000Z | example87.py | augustone/100examples | 94b593b5690a7403e1bf7424047f9a67822d2fd7 | [
"Unlicense"
] | null | null | null | example87.py | augustone/100examples | 94b593b5690a7403e1bf7424047f9a67822d2fd7 | [
"Unlicense"
] | 6 | 2017-05-26T12:23:26.000Z | 2020-06-30T01:57:36.000Z | #!/usr/bin/python3
__author__ = "yang.dd"
"""
example 087
python是按值传递参数
"""
if __name__ == "__main__":
a = student()
a.x = 3
a.c = 'a'
f(a)
print(a.x, a.c)
| 11.269231 | 26 | 0.440273 | #!/usr/bin/python3
__author__ = "yang.dd"
"""
example 087
python是按值传递参数
"""
if __name__ == "__main__":
class student:
x = 0
c = 0
def f(stu):
stu.x = 20
stu.c = 'c'
a = student()
a.x = 3
a.c = 'a'
f(a)
print(a.x, a.c)
| 29 | 21 | 53 |
df4cd5226ecb308f0f0abc9cd824b0c102a8e86c | 4,928 | py | Python | src/dqn_agent.py | plopd/navigation | 5af9911fc980ec44ff7940f34e365534f5d46163 | [
"MIT"
] | null | null | null | src/dqn_agent.py | plopd/navigation | 5af9911fc980ec44ff7940f34e365534f5d46163 | [
"MIT"
] | 12 | 2020-01-28T22:36:14.000Z | 2022-03-11T23:39:37.000Z | src/dqn_agent.py | plopd/navigation | 5af9911fc980ec44ff7940f34e365534f5d46163 | [
"MIT"
] | 1 | 2019-01-26T15:46:34.000Z | 2019-01-26T15:46:34.000Z | import random
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from src.model import QNetwork
from utils.replay_buffer import ReplayBuffer
BUFFER_SIZE = int(1e5)
BATCH_SIZE = 64
GAMMA = 0.99
TAU = 1e-3
LR = 5e-4
UPDATE_EVERY = 5 # UPDATE FREQUENCY: how often to update the local network
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# get targets by doing a forward pass of the next states in the target network
self.qnetwork_target.eval()
with torch.no_grad():
Q_targets_next = torch.max(self.qnetwork_target.forward(next_states), dim=1, keepdim=True)[0]
# distinguish the cases in which next states are terminal and those which are not
# for the first case the targets are only the one-step rewards
Q_targets = rewards + (GAMMA * Q_targets_next * (1 - dones))
# get outputs by forward pass of states in the local network
# Note: our qnetwork for a given state all action values for that state.
# However, for each state we know what action to do, so we gather all corresponding action values
self.qnetwork_local.train()
Q_expected = self.qnetwork_local.forward(states).gather(1, actions)
# compute the mean squared error of the Bellman Eq.
loss = F.mse_loss(Q_expected, Q_targets)
# clear gradients buffer from previous iteration
self.optimizer.zero_grad()
# backprop error through local network
loss.backward()
# update weights of local network by taking one SGD step
self.optimizer.step()
# update target network by copying the latest weights of the locat network
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = tau*θ_local + (1 - tau)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
| 37.333333 | 105 | 0.641437 | import random
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from src.model import QNetwork
from utils.replay_buffer import ReplayBuffer
BUFFER_SIZE = int(1e5)
BATCH_SIZE = 64
GAMMA = 0.99
TAU = 1e-3
LR = 5e-4
UPDATE_EVERY = 5 # UPDATE FREQUENCY: how often to update the local network
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# get targets by doing a forward pass of the next states in the target network
self.qnetwork_target.eval()
with torch.no_grad():
Q_targets_next = torch.max(self.qnetwork_target.forward(next_states), dim=1, keepdim=True)[0]
# distinguish the cases in which next states are terminal and those which are not
# for the first case the targets are only the one-step rewards
Q_targets = rewards + (GAMMA * Q_targets_next * (1 - dones))
# get outputs by forward pass of states in the local network
# Note: our qnetwork for a given state all action values for that state.
# However, for each state we know what action to do, so we gather all corresponding action values
self.qnetwork_local.train()
Q_expected = self.qnetwork_local.forward(states).gather(1, actions)
# compute the mean squared error of the Bellman Eq.
loss = F.mse_loss(Q_expected, Q_targets)
# clear gradients buffer from previous iteration
self.optimizer.zero_grad()
# backprop error through local network
loss.backward()
# update weights of local network by taking one SGD step
self.optimizer.step()
# update target network by copying the latest weights of the locat network
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = tau*θ_local + (1 - tau)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
| 504 | 0 | 27 |
0fbea4ba9454a1148735df4a7184746a5f0494c2 | 3,214 | py | Python | api/real_time.py | ayoubelaamri/Speech_Emotion_Recognition | 94d4cff5f3b15cda6d955f38c018ef06457d86c1 | [
"MIT"
] | null | null | null | api/real_time.py | ayoubelaamri/Speech_Emotion_Recognition | 94d4cff5f3b15cda6d955f38c018ef06457d86c1 | [
"MIT"
] | null | null | null | api/real_time.py | ayoubelaamri/Speech_Emotion_Recognition | 94d4cff5f3b15cda6d955f38c018ef06457d86c1 | [
"MIT"
] | null | null | null | import pyaudio
import os
import struct
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.fftpack import fft
import time
from tkinter import TclError
# # to display in separate Tk window
# %matplotlib tk
from keras.models import Sequential, Model, model_from_json
from keras import losses
import keras
import pickle
import wave # !pip install wave
# import os
import sys
import warnings
import librosa
import librosa.display
import IPython.display as ipd # To play sound in the notebook
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import optimizers
# ignore warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
# def mainloop(self):
# while (self.stream.is_active()): # if using button you can set self.stream to 0 (self.stream = 0), otherwise you can use a stop condition
# time.sleep(0.5)
# return self.emotion
| 33.479167 | 147 | 0.622278 | import pyaudio
import os
import struct
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.fftpack import fft
import time
from tkinter import TclError
# # to display in separate Tk window
# %matplotlib tk
from keras.models import Sequential, Model, model_from_json
from keras import losses
import keras
import pickle
import wave # !pip install wave
# import os
import sys
import warnings
import librosa
import librosa.display
import IPython.display as ipd # To play sound in the notebook
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import optimizers
# ignore warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
class RealTime(object):
def __init__(self):
self.FORMAT = pyaudio.paFloat32
self.CHANNELS = 1
self.RATE = 44100
self.DURATION = 2.5
self.CHUNK = 1024
self.p = None
self.stream = None
self.emotion = None
# loading json and model architecture :
json_file = open('../model/model_json_1D.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("../model/model_1D.h5")
print("Loaded model from disk")
# the optimiser
opt = optimizers.RMSprop(learning_rate=0.00001, decay=1e-6)
loaded_model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
self.model = loaded_model
def start(self):
print("Start Recording ..")
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=self.FORMAT,
channels=self.CHANNELS,
rate=self.RATE,
input=True,
output=False,
stream_callback=self.callback,
frames_per_buffer=int(self.RATE*self.DURATION))
def stop(self):
print("Stop Recording ..")
self.stream.close()
self.p.terminate()
self.emotion = None
def callback(self, in_data, frame_count, time_info, flag):
data = np.frombuffer(in_data, dtype=np.float32)
# data = data / data.max() * np.iinfo(np.int16).max
mfccs = np.mean(librosa.feature.mfcc(y=data, sr=self.RATE, n_mfcc=13),axis=0)
newdf = pd.DataFrame(mfccs).T
newdf= np.expand_dims(newdf, axis=2)
newpred = self.model.predict(newdf, batch_size=16, verbose=1)
infile = open('../model/labels_1D','rb')
lb = pickle.load(infile)
infile.close()
result = newpred.argmax(axis=1)
result = result.astype(int).flatten()
result = (lb.inverse_transform((result)))
self.emotion= result[0]
print(self.emotion)
return result[0], pyaudio.paContinue
# def mainloop(self):
# while (self.stream.is_active()): # if using button you can set self.stream to 0 (self.stream = 0), otherwise you can use a stop condition
# time.sleep(0.5)
# return self.emotion
| 2,148 | 2 | 130 |
e67bff13a4fe8a189f4b530ef306940f105926ac | 610 | py | Python | zillow/tests/string_to_long.py | gsathya/dsalgo | 61c89ec597ced3e69bfbb438fd856c8fc5f20aba | [
"MIT"
] | 2 | 2017-02-25T04:05:29.000Z | 2018-05-10T16:54:31.000Z | zillow/tests/string_to_long.py | gsathya/dsalgo | 61c89ec597ced3e69bfbb438fd856c8fc5f20aba | [
"MIT"
] | null | null | null | zillow/tests/string_to_long.py | gsathya/dsalgo | 61c89ec597ced3e69bfbb438fd856c8fc5f20aba | [
"MIT"
] | null | null | null | import unittest
import stringToLong
| 29.047619 | 58 | 0.677049 | import unittest
import stringToLong
class TestStringToLong(unittest.TestCase):
def setUp(self):
self.convert = stringToLong.convert
def test_conversion(self):
self.assertEqual(self.convert("123"), 123)
self.assertEqual(self.convert("-10"), -10)
def test_exceptions(self):
self.assertRaises(TypeError, self.convert, 123)
self.assertRaises(TypeError, self.convert, 123.01)
self.assertRaises(ValueError, self.convert, '')
def test_zeroes(self):
self.assertEqual(self.convert("0"), 0)
self.assertEqual(self.convert("000000"), 0)
| 422 | 21 | 130 |
f9d04eb5232236c2a65d824faca26b8f5fa32d9b | 873 | py | Python | Training/BDT/trainBDT.py | mdkdrnevich/DeepHadTopTagger | 560b51b98e0d9a3a78a0986408ad4d2a30f9960f | [
"MIT"
] | 3 | 2018-04-14T18:07:00.000Z | 2020-07-15T13:21:49.000Z | Training/BDT/trainBDT.py | mdkdrnevich/DeepHadTopTagger | 560b51b98e0d9a3a78a0986408ad4d2a30f9960f | [
"MIT"
] | null | null | null | Training/BDT/trainBDT.py | mdkdrnevich/DeepHadTopTagger | 560b51b98e0d9a3a78a0986408ad4d2a30f9960f | [
"MIT"
] | null | null | null | from sklearn.ensemble import GradientBoostingClassifier
import argparse
import numpy as np
import pickle
parser = argparse.ArgumentParser()
parser.add_argument("training", help="File path to the training set")
parser.add_argument("validation", help="File path to the validation set")
parser.add_argument("-n", "--name", help="Name to help describe the output neural net and standardizer", default="")
args = parser.parse_args()
train = np.load(args.training)
val = np.load(args.validation)
train_x = train[:, 1:]
train_y = train[:, 0]
val_x = val[:, 1:]
val_y = val[:, 0]
params = dict(max_depth=8, learning_rate=0.1, n_estimators=1000, min_samples_leaf=0.045, subsample=0.5, min_samples_split=20)
bdt = GradientBoostingClassifier(**params).fit(train_x, train_y)
bdt.score(val_x, val_y)*100
with open("{}_bdt.pkl".format(args.name), 'wb') as f:
pickle.dump(bdt, f) | 34.92 | 125 | 0.743414 | from sklearn.ensemble import GradientBoostingClassifier
import argparse
import numpy as np
import pickle
parser = argparse.ArgumentParser()
parser.add_argument("training", help="File path to the training set")
parser.add_argument("validation", help="File path to the validation set")
parser.add_argument("-n", "--name", help="Name to help describe the output neural net and standardizer", default="")
args = parser.parse_args()
train = np.load(args.training)
val = np.load(args.validation)
train_x = train[:, 1:]
train_y = train[:, 0]
val_x = val[:, 1:]
val_y = val[:, 0]
params = dict(max_depth=8, learning_rate=0.1, n_estimators=1000, min_samples_leaf=0.045, subsample=0.5, min_samples_split=20)
bdt = GradientBoostingClassifier(**params).fit(train_x, train_y)
bdt.score(val_x, val_y)*100
with open("{}_bdt.pkl".format(args.name), 'wb') as f:
pickle.dump(bdt, f) | 0 | 0 | 0 |
f5c08cb2a6b393b4cad9caf35e1a22d1866c76fd | 158 | py | Python | learning/admin.py | CiganOliviu/MyWorkflow | 85951c2e8ebdb3e970fcc0b3e24bd319360b852a | [
"Apache-2.0"
] | null | null | null | learning/admin.py | CiganOliviu/MyWorkflow | 85951c2e8ebdb3e970fcc0b3e24bd319360b852a | [
"Apache-2.0"
] | null | null | null | learning/admin.py | CiganOliviu/MyWorkflow | 85951c2e8ebdb3e970fcc0b3e24bd319360b852a | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from learning.models import CurrentReadingBook, Course
admin.site.register(CurrentReadingBook)
admin.site.register(Course)
| 22.571429 | 54 | 0.848101 | from django.contrib import admin
from learning.models import CurrentReadingBook, Course
admin.site.register(CurrentReadingBook)
admin.site.register(Course)
| 0 | 0 | 0 |
0bc3eaff513932f747a6c26eea546694f08ce0cf | 1,978 | py | Python | pypaperwallet/ensure_cairolib.py | brianddk/pypaperwallet | e8602cda534b194ee688be1fca7cb1ac1474b853 | [
"Apache-2.0"
] | null | null | null | pypaperwallet/ensure_cairolib.py | brianddk/pypaperwallet | e8602cda534b194ee688be1fca7cb1ac1474b853 | [
"Apache-2.0"
] | null | null | null | pypaperwallet/ensure_cairolib.py | brianddk/pypaperwallet | e8602cda534b194ee688be1fca7cb1ac1474b853 | [
"Apache-2.0"
] | null | null | null | # [rights] Copyright 2020 brianddk at github https://github.com/brianddk
# [license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0
# [repo] https://github.com/brianddk/pypaperwallet
# [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj
# [tipjar] https://gist.github.com/brianddk/3ec16fbf1d008ea290b0
from winreg import OpenKey, EnumKey, QueryValueEx, QueryInfoKey
from winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE
from os.path import exists, isdir, join
from os import listdir
from os import environ
cairo = 'libcairo-2.dll'
if not in_path(cairo):
libdir = find_msys2_cairo(cairo)
if(libdir):
environ["PATH"] += f";{libdir}"
# print(f"added {libdir}")
# else:
# print("ERROR: cairolib not found")
# else:
# print("cairo is in path")
# print("imported ensure")
| 37.320755 | 83 | 0.575834 | # [rights] Copyright 2020 brianddk at github https://github.com/brianddk
# [license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0
# [repo] https://github.com/brianddk/pypaperwallet
# [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj
# [tipjar] https://gist.github.com/brianddk/3ec16fbf1d008ea290b0
from winreg import OpenKey, EnumKey, QueryValueEx, QueryInfoKey
from winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE
from os.path import exists, isdir, join
from os import listdir
from os import environ
cairo = 'libcairo-2.dll'
def find_msys2_cairo(cairo):
swpath = r"Software\Microsoft\Windows\CurrentVersion\Uninstall"
for root in [HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE]:
with OpenKey(root, swpath) as swkey:
keys, _, _ = QueryInfoKey(swkey)
for i in range(0, keys):
subpath = EnumKey(swkey, i)
with OpenKey(root, swpath +"\\"+ subpath) as subkey:
try:
name, _ = QueryValueEx(subkey, 'DisplayName')
loc, _ = QueryValueEx(subkey, 'InstallLocation')
if name.startswith('MSYS2'):
dirs = [d for d in listdir(loc) if isdir(join(loc, d))]
for d in dirs:
libdir = join(loc, d, 'bin')
if exists(join(libdir, cairo)):
return libdir
except:
pass
return False
def in_path(cairo):
for d in environ["PATH"].split(';'):
if exists(join(d, cairo)):
return True
return False
if not in_path(cairo):
libdir = find_msys2_cairo(cairo)
if(libdir):
environ["PATH"] += f";{libdir}"
# print(f"added {libdir}")
# else:
# print("ERROR: cairolib not found")
# else:
# print("cairo is in path")
# print("imported ensure")
| 1,084 | 0 | 46 |
f8141bc75f696672e07fed5156e8b2c01ee81040 | 1,660 | py | Python | addLight.py | yagidot/Shinkai-Filter | b08ef597e7a47af3ea472800d3a757a9315cd801 | [
"MIT"
] | 27 | 2017-11-15T09:19:13.000Z | 2021-12-30T02:34:10.000Z | addLight.py | yagidot/Shinkai-Filter | b08ef597e7a47af3ea472800d3a757a9315cd801 | [
"MIT"
] | null | null | null | addLight.py | yagidot/Shinkai-Filter | b08ef597e7a47af3ea472800d3a757a9315cd801 | [
"MIT"
] | 3 | 2019-03-22T20:08:14.000Z | 2021-12-27T20:32:31.000Z | from ompc import
@mfunction("out, filter")
| 31.320755 | 77 | 0.363855 | from ompc import
@mfunction("out, filter")
def addLight(src=None, _in=None, M=None, N=None):
# Summary - add extra light
# choose light source
imshow(src)
[y, x] = ginput(1)
x = floor(x)
y = floor(y)
close()
if x < 0 or x > M or y < 0 or y > N:
if x > M / 2:
mode = 2
else:
mode = 1
end
else:
mode = 0; print mode
end
# generate light filter
filter = zeros(M, N)
r = floor(N / 10)
n = floor(r / 25)
if mode == 0:
filter = drawCircle(filter, x, y, r)
filter = imgaussfilt(filter, r / 2)
filter = drawRadixLine(filter, x, y, n)
filter = imgaussfilt(filter, r / 10)
elif mode == 1:
deltax = x - M
deltay = y - N / 2
angle = atan(deltay / deltax)
filter = drawParallelLine(filter, angle, n * 2)
filter = imgaussfilt(filter, r / 20)
end
# add light
out = zeros(M, N, 3)
if mode < 2:
for i in mslice[1:M]:
for j in mslice[1:N]:
a = filter(i, j)
out(i, j, 1).lvalue = a + (1 - a) * _in(i, j, 1)
out(i, j, 2).lvalue = a + (1 - a) * _in(i, j, 2)
out(i, j, 3).lvalue = a + (1 - a) * _in(i, j, 3)
end
end
| 1,590 | 0 | 23 |
3a36722a1e01ce5c052c1b5d57f9056027b617b0 | 2,282 | py | Python | registers/urls.py | adonm/it-assets | 8af0e74a59725d4c22694b9108be06feb0da282e | [
"Apache-2.0"
] | null | null | null | registers/urls.py | adonm/it-assets | 8af0e74a59725d4c22694b9108be06feb0da282e | [
"Apache-2.0"
] | null | null | null | registers/urls.py | adonm/it-assets | 8af0e74a59725d4c22694b9108be06feb0da282e | [
"Apache-2.0"
] | null | null | null | from django.urls import path, re_path
from registers import views
urlpatterns = [
path('itsystem/export/', views.ITSystemExport.as_view(), name='itsystem_export'),
path('itsystem/discrepancy-report/', views.ITSystemDiscrepancyReport.as_view(), name='itsystem_discrepancy_report'),
path('incident/', views.IncidentList.as_view(), name='incident_list'),
path('incident/<int:pk>/', views.IncidentDetail.as_view(), name='incident_detail'),
path('changerequest/', views.ChangeRequestList.as_view(), name='change_request_list'),
path('changerequest/<int:pk>/', views.ChangeRequestDetail.as_view(), name='change_request_detail'),
path('changerequest/<int:pk>/change/', views.ChangeRequestChange.as_view(), name='change_request_change'),
path('changerequest/<int:pk>/endorse/', views.ChangeRequestEndorse.as_view(), name='change_request_endorse'),
path('changerequest/<int:pk>/approval/', views.ChangeRequestApproval.as_view(), name='change_request_approval'),
path('changerequest/<int:pk>/complete/', views.ChangeRequestComplete.as_view(), name='change_request_complete'),
path('changerequest/add/', views.ChangeRequestCreate.as_view(), name='change_request_create'),
path('changerequest/create/', views.ChangeRequestCreate.as_view(), name='change_request_create'),
path('changerequest/create-standard/', views.ChangeRequestCreate.as_view(), name='std_change_request_create', kwargs={'std': True}),
path('changerequest/create-emergency/', views.ChangeRequestCreate.as_view(), name='emerg_change_request_create', kwargs={'emerg': True}),
path('changerequest/calendar/', views.ChangeRequestCalendar.as_view(), name='change_request_calendar'),
re_path('^changerequest/calendar/(?P<date>\d{4}-\d{1,2}-\d{1,2})/$', views.ChangeRequestCalendar.as_view(), name='change_request_calendar'),
re_path('^changerequest/calendar/(?P<date>\d{4}-\d{1,2})/$', views.ChangeRequestCalendar.as_view(), name='change_request_calendar'),
path('changerequest/export/', views.ChangeRequestExport.as_view(), name='change_request_export'),
path('standardchange/', views.StandardChangeList.as_view(), name='standard_change_list'),
path('standardchange/<int:pk>/', views.StandardChangeDetail.as_view(), name='standard_change_detail'),
]
| 87.769231 | 144 | 0.755039 | from django.urls import path, re_path
from registers import views
urlpatterns = [
path('itsystem/export/', views.ITSystemExport.as_view(), name='itsystem_export'),
path('itsystem/discrepancy-report/', views.ITSystemDiscrepancyReport.as_view(), name='itsystem_discrepancy_report'),
path('incident/', views.IncidentList.as_view(), name='incident_list'),
path('incident/<int:pk>/', views.IncidentDetail.as_view(), name='incident_detail'),
path('changerequest/', views.ChangeRequestList.as_view(), name='change_request_list'),
path('changerequest/<int:pk>/', views.ChangeRequestDetail.as_view(), name='change_request_detail'),
path('changerequest/<int:pk>/change/', views.ChangeRequestChange.as_view(), name='change_request_change'),
path('changerequest/<int:pk>/endorse/', views.ChangeRequestEndorse.as_view(), name='change_request_endorse'),
path('changerequest/<int:pk>/approval/', views.ChangeRequestApproval.as_view(), name='change_request_approval'),
path('changerequest/<int:pk>/complete/', views.ChangeRequestComplete.as_view(), name='change_request_complete'),
path('changerequest/add/', views.ChangeRequestCreate.as_view(), name='change_request_create'),
path('changerequest/create/', views.ChangeRequestCreate.as_view(), name='change_request_create'),
path('changerequest/create-standard/', views.ChangeRequestCreate.as_view(), name='std_change_request_create', kwargs={'std': True}),
path('changerequest/create-emergency/', views.ChangeRequestCreate.as_view(), name='emerg_change_request_create', kwargs={'emerg': True}),
path('changerequest/calendar/', views.ChangeRequestCalendar.as_view(), name='change_request_calendar'),
re_path('^changerequest/calendar/(?P<date>\d{4}-\d{1,2}-\d{1,2})/$', views.ChangeRequestCalendar.as_view(), name='change_request_calendar'),
re_path('^changerequest/calendar/(?P<date>\d{4}-\d{1,2})/$', views.ChangeRequestCalendar.as_view(), name='change_request_calendar'),
path('changerequest/export/', views.ChangeRequestExport.as_view(), name='change_request_export'),
path('standardchange/', views.StandardChangeList.as_view(), name='standard_change_list'),
path('standardchange/<int:pk>/', views.StandardChangeDetail.as_view(), name='standard_change_detail'),
]
| 0 | 0 | 0 |
c6292ce0c58aeeac8e2593fa9a4a4c420efe2a6c | 707 | py | Python | search.py | kartik1000/what-slot | 205b03d2d0082dfdb5e18b130330cdde80f58e41 | [
"MIT"
] | 16 | 2018-09-02T15:29:20.000Z | 2019-05-30T10:05:30.000Z | search.py | kartik1000/what-slot | 205b03d2d0082dfdb5e18b130330cdde80f58e41 | [
"MIT"
] | 28 | 2018-08-25T11:51:25.000Z | 2020-03-03T08:44:29.000Z | search.py | kartik1000/what-slot | 205b03d2d0082dfdb5e18b130330cdde80f58e41 | [
"MIT"
] | 18 | 2018-12-01T20:15:49.000Z | 2020-01-02T09:15:29.000Z | import json, re
dataFileName = 'courses.json'
slotFileName = 'slots.1.txt'
if __name__ == '__main__':
print( searchData( input('Search for: ') ) ) | 24.37931 | 93 | 0.663366 | import json, re
dataFileName = 'courses.json'
slotFileName = 'slots.1.txt'
def slot2Time(slot):
with open(slotFileName, 'r') as slotFile:
for line in slotFile:
if line.startswith(slot):
return line.split()[1:]
return []
def searchData(query):
with open(dataFileName, 'r') as dataFile:
data = json.load(dataFile)
results = [ course for course in data if re.search( query, course['Name'], re.IGNORECASE ) ]
ret = []
for course in results:
slots = []
for slot in course['Data']['Slot'].split(','):
slots.extend( slot2Time( slot.strip() ) )
course['Data']['Slot'] = slots
ret.append(course)
return ret
if __name__ == '__main__':
print( searchData( input('Search for: ') ) ) | 511 | 0 | 47 |
25fa983a14a4ffaca35c95ec9d79e2db523e7bba | 2,395 | py | Python | distance/_impl/fragments/levelsettings.py | ferreum/distanceutils | a80b833e0c60afa60f0c8cb1aa6254f0da4f3bf6 | [
"MIT"
] | 6 | 2017-10-10T02:56:19.000Z | 2018-09-12T17:41:04.000Z | distance/_impl/fragments/levelsettings.py | ferreum/distanceutils | a80b833e0c60afa60f0c8cb1aa6254f0da4f3bf6 | [
"MIT"
] | null | null | null | distance/_impl/fragments/levelsettings.py | ferreum/distanceutils | a80b833e0c60afa60f0c8cb1aa6254f0da4f3bf6 | [
"MIT"
] | null | null | null |
from construct import (
Struct, Sequence,
PrefixedArray, If, Computed,
this,
)
from distance.bytes import Magic, Section
from distance.construct import (
BaseConstructFragment,
Int, UInt, Bytes, Byte, Float,
DstString, Remainder,
)
from distance.classes import CollectorGroup
from distance._common import (
ModesMapperProperty,
MedalTimesMapperProperty,
MedalScoresMapperProperty,
)
from distance._impl.level_content.levelsettings_base import BaseLevelSettings
Classes = CollectorGroup()
@Classes.fragments.fragment(any_version=True)
# vim:set sw=4 et:
| 26.032609 | 84 | 0.6 |
from construct import (
Struct, Sequence,
PrefixedArray, If, Computed,
this,
)
from distance.bytes import Magic, Section
from distance.construct import (
BaseConstructFragment,
Int, UInt, Bytes, Byte, Float,
DstString, Remainder,
)
from distance.classes import CollectorGroup
from distance._common import (
ModesMapperProperty,
MedalTimesMapperProperty,
MedalScoresMapperProperty,
)
from distance._impl.level_content.levelsettings_base import BaseLevelSettings
Classes = CollectorGroup()
@Classes.fragments.fragment(any_version=True)
class LevelSettingsFragment(BaseLevelSettings, BaseConstructFragment):
base_container = Section.base(Magic[2], 0x52)
is_interesting = True
def get_unk_2_size(this):
version = this.version
if version <= 3:
return 57
elif version == 4:
return 141
elif version == 5:
return 172
elif 6 <= version < 25:
# confirmed only for v6..v9
return 176
else:
# confirmed for v25..v26
return 231
_construct_ = Struct(
'version' / Computed(this._params.sec.version),
'unk_0' / Bytes(8),
'name' / DstString,
'description' / If(this.version >= 25, DstString),
'author_name' / If(this.version >= 25, DstString),
'unk_1' / Bytes(4),
'modes_list' / PrefixedArray(UInt, Struct(
'mode' / UInt,
'enabled' / Byte,
)),
'music_id' / UInt,
'skybox_name' / If(this.version <= 3, DstString),
'unk_2' / Bytes(get_unk_2_size),
# confirmed for v25..26
'background_layer' / If(this.version >= 25, DstString),
# confirmed for v25..26
'unk_3' / If(this.version >= 25, Bytes(61)),
'medals' / Struct(
'time' / Float,
'score' / Int,
)[4],
'abilities' / If(this.version >= 1, Sequence(Byte, Byte, Byte, Byte, Byte)),
'difficulty' / If(this.version >= 2, UInt),
'unk_4' / Remainder,
)
_add_fields_ = dict(
modes = (),
medal_times = None,
medal_scores = None,
)
del get_unk_2_size
modes = ModesMapperProperty('modes_list')
medal_times = MedalTimesMapperProperty('medals')
medal_scores = MedalScoresMapperProperty('medals')
# vim:set sw=4 et:
| 351 | 1,424 | 22 |
a19821d89d80acb2bfb878b50dfa778b7b859103 | 416 | py | Python | factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/camera_tasks/migrations/0004_cameratask_enable_tracking.py | kaka-lin/azure-intelligent-edge-patterns | 766833c7c25d2458cec697937be288202d1763bc | [
"MIT"
] | 176 | 2019-07-03T00:20:15.000Z | 2022-03-14T07:51:22.000Z | factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/camera_tasks/migrations/0004_cameratask_enable_tracking.py | kaka-lin/azure-intelligent-edge-patterns | 766833c7c25d2458cec697937be288202d1763bc | [
"MIT"
] | 121 | 2019-06-24T20:47:27.000Z | 2022-03-28T02:16:18.000Z | factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/camera_tasks/migrations/0004_cameratask_enable_tracking.py | kaka-lin/azure-intelligent-edge-patterns | 766833c7c25d2458cec697937be288202d1763bc | [
"MIT"
] | 144 | 2019-06-18T18:48:43.000Z | 2022-03-31T12:14:46.000Z | # Generated by Django 3.0.8 on 2020-11-10 10:01
from django.db import migrations, models
| 21.894737 | 63 | 0.627404 | # Generated by Django 3.0.8 on 2020-11-10 10:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("camera_tasks", "0003_cameratask_recording_duration"),
]
operations = [
migrations.AddField(
model_name="cameratask",
name="enable_tracking",
field=models.BooleanField(default=False),
),
]
| 0 | 302 | 23 |
ba25a4f98f3229e554dfe5317737d1d38a609af0 | 430 | py | Python | 210208/hw.py | Floou/python-adv | 9e2c518ab48eb4e9744c405470525f8931702525 | [
"Apache-2.0"
] | null | null | null | 210208/hw.py | Floou/python-adv | 9e2c518ab48eb4e9744c405470525f8931702525 | [
"Apache-2.0"
] | null | null | null | 210208/hw.py | Floou/python-adv | 9e2c518ab48eb4e9744c405470525f8931702525 | [
"Apache-2.0"
] | null | null | null | import random
import re
RE_PROVERKA = re.compile(r'[а-яА-Я]+')
f = open('text', 'r', encoding='utf-8')
text = f.read()
print(text)
bad_chars = [';', ':', '?', '.', ',', '!', '~', '\n', '…', '-']
for i in bad_chars:
text = text.replace(i, ' ')
text = text.split(" ")
slova = [w for w in filter(RE_PROVERKA.match, text)]
print(slova, sep='\n')
i = 0
while i != 20:
i += 1
print(i, random.choice(slova))
f.close() | 17.2 | 63 | 0.539535 | import random
import re
RE_PROVERKA = re.compile(r'[а-яА-Я]+')
f = open('text', 'r', encoding='utf-8')
text = f.read()
print(text)
bad_chars = [';', ':', '?', '.', ',', '!', '~', '\n', '…', '-']
for i in bad_chars:
text = text.replace(i, ' ')
text = text.split(" ")
slova = [w for w in filter(RE_PROVERKA.match, text)]
print(slova, sep='\n')
i = 0
while i != 20:
i += 1
print(i, random.choice(slova))
f.close() | 0 | 0 | 0 |
8a2fbefa4a6065d79afe2374d627f3b28a81bb41 | 5,723 | py | Python | xbaydns/tests/initconftest.py | bopopescu/xbaydns-2 | 606e8d9848d42fe5c0c5847a5a0e62044f58e486 | [
"BSD-2-Clause"
] | 1 | 2019-01-16T05:20:51.000Z | 2019-01-16T05:20:51.000Z | xbaydns/tests/initconftest.py | bopopescu/xbaydns-2 | 606e8d9848d42fe5c0c5847a5a0e62044f58e486 | [
"BSD-2-Clause"
] | null | null | null | xbaydns/tests/initconftest.py | bopopescu/xbaydns-2 | 606e8d9848d42fe5c0c5847a5a0e62044f58e486 | [
"BSD-2-Clause"
] | 3 | 2015-12-29T11:22:28.000Z | 2020-07-26T04:11:28.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
initconftest.py
Created by 黄 冬 on 2007-11-19.
Copyright (c) 2007 __MyCompanyName__. All rights reserved.
"""
import basetest
import logging.config
import os
import pwd
import shutil
import tempfile
import time
import unittest
log = logging.getLogger('xbaydns.tests.initconftest')
#logging.basicConfig(level=logging.DEBUG)
from xbaydns.tools import initconf
from xbaydns.conf import sysconf
from xbaydns.utils import shtools
def suite():
"""集合测试用例"""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(InitConfTest, 'test'))
return suite
"""
单独运行command的测试用例
"""
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 35.76875 | 142 | 0.643369 | #!/usr/bin/env python
# encoding: utf-8
"""
initconftest.py
Created by 黄 冬 on 2007-11-19.
Copyright (c) 2007 __MyCompanyName__. All rights reserved.
"""
import basetest
import logging.config
import os
import pwd
import shutil
import tempfile
import time
import unittest
log = logging.getLogger('xbaydns.tests.initconftest')
#logging.basicConfig(level=logging.DEBUG)
from xbaydns.tools import initconf
from xbaydns.conf import sysconf
from xbaydns.utils import shtools
class InitConfTest(basetest.BaseTestCase):
def setUp(self):
"""初始化测试环境"""
ostype = os.uname()[0].lower()
self.named_uid = sysconf.named_uid
self.basedir = os.path.realpath(tempfile.mkdtemp(suffix='xbaydns_test'))
basetest.BaseTestCase.setUp(self)
def tearDown(self):
"""清洁测试环境"""
shutil.rmtree(self.basedir)
basetest.BaseTestCase.tearDown(self)
def test_acl_file(self):
"""测试acl_file调用"""
acl_content = initconf.acl_file( dict(cnc=('192.168.1.1', '202.106.1.1')) )
#log.debug("acl content is:" + acl_content)
self.assertEqual(acl_content,'acl "cnc" { 192.168.1.1; 202.106.1.1; };\n')
def _create_dir(self, *path):
cur = self.basedir
for part in path:
cur = os.path.join(cur, part)
os.mkdir(cur)
return cur
def _create_file(self, *path):
filename = os.path.join(self.basedir, *path)
fd = file(filename, 'w')
fd.close()
return filename[len(self.basedir) + 1:]
def test_muti_acl_file(self):
"""test muti record acl acl_file"""
acl_content = initconf.acl_file( dict(
cnc=('1.1.1.1','2.2.2.2','3.3.3.3'),
telcom=('4.4.4.4','5.5.5.5') ))
self.assertEqual(acl_content,'acl "telcom" { 4.4.4.4; 5.5.5.5; };\nacl "cnc" { 1.1.1.1; 2.2.2.2; 3.3.3.3; };\n')
def test_defaultzone_file(self):
"""defaultzone_file test"""
defaultzone = initconf.defaultzone_file()
#log.debug("defaultzone is:%s"%defaultzone)
self.assertTrue( 'zone "." { type hint; file "named.root"; };' in defaultzone )
def test_error_default_file(self):
curset = initconf.TMPL_DEFAULTZONE
initconf.TMPL_DEFAULTZONE = "中华人民共和国"
returncode = initconf.defaultzone_file()
initconf.TMPL_DEFAULTZONE = curset
self.assertFalse( returncode )
def test_named_root_file(self):
"""named_root_file test"""
rootfile = initconf.named_root_file()
self.assertTrue('A.ROOT-SERVERS.NET. 3600000 A' in rootfile )
def test_error_named_root_file(self):
"""对于named_root_file的错误调用测试"""
curset = initconf.TMPL_NAMEDROOT
initconf.TMPL_NAMEDROOT = "中华人民共和国"
returncode = initconf.named_root_file()
initconf.TMPL_NAMEDROOT = curset
self.assertFalse(returncode)
def test_error_backup_conf(self):
"""对于backup_conf的错误调用测试"""
self.assertFalse( initconf.backup_conf("中华人民共和国","中华人民共和国") )
def test_backup_conf(self):
"""测试backup_conf的调用"""
tmpdir = self._create_dir("backuptest")
self.assertTrue( initconf.backup_conf("/etc",tmpdir) )
conffilename = "namedconf_%s.tar.gz"%(time.strftime("%y%m%d%H%M"))
log.debug("backup file is:%s"%(os.path.join(tmpdir,conffilename)))
self.assertTrue( os.path.isfile(os.path.join(tmpdir,conffilename)) )
def test_create_destdir(self):
"""测试create_destdir的调用"""
tmpdir = initconf.create_destdir()
log.debug("create tmpdir is:%s"%tmpdir)
self.assertTrue( os.path.isdir("%s/%s/acl"%(tmpdir, sysconf.namedconf)) )
self.assertTrue( os.path.isdir("%s/%s/dynamic"%(tmpdir, sysconf.namedconf)) )
self.assertTrue( os.path.isdir("%s/%s/master"%(tmpdir, sysconf.namedconf)) )
self.assertTrue( os.path.isdir("%s/%s/slave"%(tmpdir, sysconf.namedconf)) )
shutil.rmtree(tmpdir)
def test_create_conf(self):
"""测试create_conf的调用"""
tmpdir = initconf.create_destdir()
self.assertTrue( initconf.create_conf(tmpdir) )
shutil.rmtree(tmpdir)
def test_namedconf_file(self):
"""测试namedconf_file的调用"""
namedconf = initconf.namedconf_file(dict(acl='acl/acldef.conf', defzone='defaultzone.conf'))
#log.debug("namedconf gen to:%s"%namedconf)
self.assertTrue('include "defaultzone.conf";' in namedconf)
self.assertTrue('include "acl/acldef.conf";' in namedconf)
def test_install_conf(self):
"""测试install_conf的调用"""
tmpdir = initconf.create_destdir()
chrootdir = os.path.realpath(self._create_dir("namedchroot"))
real_confdir = os.path.join(chrootdir, "etc/namedconf")
self.assertTrue( initconf.create_conf(tmpdir) )
self.assertTrue(initconf.install_conf(tmpdir, chrootdir) )
def test_check_conf(self):
'''使用named-checkconf检查生成文件语法'''
tmpdir = initconf.create_destdir()
self.assertTrue(initconf.create_conf(tmpdir))
ret = shtools.execute(executable = "named-checkconf", args = "-t %s /%s/named.conf"%(tmpdir, sysconf.namedconf), output="/tmp/hd.txt")
self.assertEqual(ret, 0)
def test_main(self):
"""测试main调用"""
cruroot = sysconf.chroot_path
sysconf.chroot_path = self.basedir
returncode = initconf.main()
sysconf.chroot_path = cruroot
self.assertTrue(returncode == 0 )
def suite():
"""集合测试用例"""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(InitConfTest, 'test'))
return suite
"""
单独运行command的测试用例
"""
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 557 | 4,634 | 23 |
9735e992a0b4ca6af3f5a177fa2499b3a3abf9f0 | 95 | py | Python | djangocms_url_manager/__init__.py | crydotsnake/djangocms-url-manager | e5e83c686d9aae0673ce66591f383ec94bef536a | [
"BSD-3-Clause"
] | null | null | null | djangocms_url_manager/__init__.py | crydotsnake/djangocms-url-manager | e5e83c686d9aae0673ce66591f383ec94bef536a | [
"BSD-3-Clause"
] | null | null | null | djangocms_url_manager/__init__.py | crydotsnake/djangocms-url-manager | e5e83c686d9aae0673ce66591f383ec94bef536a | [
"BSD-3-Clause"
] | null | null | null | __version__ = "1.0.0.dev1"
default_app_config = "djangocms_url_manager.apps.UrlManagerConfig"
| 23.75 | 66 | 0.810526 | __version__ = "1.0.0.dev1"
default_app_config = "djangocms_url_manager.apps.UrlManagerConfig"
| 0 | 0 | 0 |
cef991bb172cbaa66aaad0735a2f7ac60e9311c7 | 619 | py | Python | emo/migrations/0001_initial.py | desmondyeoh/cog-csi-assignment | 1995419c7ffcb6c620c7eccd19afe67543631c08 | [
"MIT"
] | 2 | 2020-10-10T13:20:35.000Z | 2021-11-08T12:46:01.000Z | emo/migrations/0001_initial.py | desmondyeoh/cog-csi-assignment | 1995419c7ffcb6c620c7eccd19afe67543631c08 | [
"MIT"
] | 3 | 2020-06-05T18:23:45.000Z | 2021-06-10T20:27:22.000Z | emo/migrations/0001_initial.py | desmondyeoh/cog-csi-assignment | 1995419c7ffcb6c620c7eccd19afe67543631c08 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.5 on 2018-05-07 17:37
from django.db import migrations, models
| 24.76 | 100 | 0.544426 | # Generated by Django 2.0.5 on 2018-05-07 17:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Session_data',
fields=[
('session_id', models.CharField(max_length=200, primary_key=True, serialize=False)),
('usr_data', models.TextField()),
('total_img', models.IntegerField()),
('spp', models.IntegerField()),
('lock', models.IntegerField()),
],
),
]
| 0 | 505 | 23 |
be1f99f5b5f428fbe7d45afa635a0f02e78bac1f | 1,684 | py | Python | cscs-checks/apps/python/numpy_check.py | CLIP-HPC/reframe | eddf0b2508c2ba644e4c3aba5652e57fddfde106 | [
"BSD-3-Clause"
] | 167 | 2017-11-14T20:37:28.000Z | 2022-03-31T11:19:18.000Z | cscs-checks/apps/python/numpy_check.py | CLIP-HPC/reframe | eddf0b2508c2ba644e4c3aba5652e57fddfde106 | [
"BSD-3-Clause"
] | 2,190 | 2017-06-14T12:48:13.000Z | 2022-03-31T16:09:51.000Z | cscs-checks/apps/python/numpy_check.py | CLIP-HPC/reframe | eddf0b2508c2ba644e4c3aba5652e57fddfde106 | [
"BSD-3-Clause"
] | 83 | 2017-05-29T19:12:16.000Z | 2022-03-18T09:49:21.000Z | # Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
from hpctestlib.python.numpy.numpy_ops import numpy_ops_check
@rfm.simple_test
| 32.384615 | 76 | 0.573634 | # Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
from hpctestlib.python.numpy.numpy_ops import numpy_ops_check
@rfm.simple_test
class cscs_numpy_test(numpy_ops_check):
valid_prog_environs = ['builtin']
valid_systems = ['daint:gpu', 'daint:mc', 'dom:gpu', 'dom:mc']
modules = ['numpy']
num_tasks_per_node = 1
use_multithreading = False
all_ref = {
'haswell@12c': {
'dot': (0.4, None, 0.05, 's'),
'svd': (0.37, None, 0.05, 's'),
'cholesky': (0.12, None, 0.05, 's'),
'eigendec': (3.5, None, 0.05, 's'),
'inv': (0.21, None, 0.05, 's'),
},
'broadwell@36c': {
'dot': (0.3, None, 0.05, 's'),
'svd': (0.35, None, 0.05, 's'),
'cholesky': (0.1, None, 0.05, 's'),
'eigendec': (4.14, None, 0.05, 's'),
'inv': (0.16, None, 0.05, 's'),
}
}
tags = {'production'}
maintainers = ['RS', 'TR']
@run_after('setup')
def set_num_cpus_per_task(self):
self.num_cpus_per_task = self.current_partition.processor.num_cores
self.variables = {
'OMP_NUM_THREADS': str(self.num_cpus_per_task)
}
@run_before('performance')
def set_perf_ref(self):
arch = self.current_partition.processor.arch
pname = self.current_partition.fullname
num_cores = self.current_partition.processor.num_cores
self.reference = {
pname: self.all_ref[f'{arch}@{num_cores}c']
}
| 442 | 922 | 22 |
490282d4715af24eafeddbdc0fd067ace0eaafd4 | 2,475 | py | Python | Preprocessing/extract_nordic_tweets.py | centre-for-humanities-computing/hope_dataprep | 77e23256e8bd429b904b15d236b2110475c51bbf | [
"MIT"
] | null | null | null | Preprocessing/extract_nordic_tweets.py | centre-for-humanities-computing/hope_dataprep | 77e23256e8bd429b904b15d236b2110475c51bbf | [
"MIT"
] | null | null | null | Preprocessing/extract_nordic_tweets.py | centre-for-humanities-computing/hope_dataprep | 77e23256e8bd429b904b15d236b2110475c51bbf | [
"MIT"
] | null | null | null | import os
import ndjson
import pandas as pd
"""
Makes daily language specific files in correct format
"""
# define languages to extract
langs = ["da", "no", "sv"]
# make a function that transforms a pandas DF to ndjson format (found on stackoverflow)
# List file paths from folders with raw data
raw1 = ["/data/001_twitter_hope/raw/nordic-tweets/" + f for f in
os.listdir("/data/001_twitter_hope/raw/nordic-tweets")
if f.endswith(".tsv")]
raw2 = ["/data/001_twitter_hope/raw/nordic-tweets-2/" + f
for f in os.listdir("/data/001_twitter_hope/raw/nordic-tweets-2")
if f.endswith(".tsv")]
# combine file paths
raw_files = raw1 + raw2
# read in logfile to see which files have already been processed
logfile = "processed_files_log/nordic_language_extracted.ndjson"
with open(logfile) as log:
done = ndjson.load(log)
# keep only files that have not been processed yet + sort
raw_files = [f for f in raw_files if f not in done]
raw_files.sort()
# define which variables to keep in the output format
column_list = ['id', 'created_at', 'from_user_id', 'text', 'lang', 'favorite_count', 'retweet_count']
# loop through new filepaths
for path_ in raw_files:
# extract identifiers from the file path
id = path_[-14:-4]
year = id[:4]
month = id[5:7]
day = id[8:10]
print(f"Processing {year}{month}{day}")
# load raw data in tsv format
df = pd.read_csv(path_, sep='\t', skipinitialspace=True, usecols = column_list)
# loop through the desired language list
for language in langs:
print(f"extract {language}")
# filter data for the desired language using twitter lang tag
df_lang = df[df.lang.eq(language)]
# convert data to ndjson and write it down
print("Writing down...")
df_js = iterndjson(df_lang)
output_path=f"/data/001_twitter_hope/preprocessed/{language}/td_{year}{month}{day}_{language}.ndjson"
with open(output_path, "w") as f:
ndjson.dump(df_js, f)
# Add newly processed filenames to the log file
with open(logfile, "a") as out:
writer = ndjson.writer(out, ensure_ascii=False)
for line in raw_files:
writer.writerow(line) | 30.555556 | 109 | 0.663838 | import os
import ndjson
import pandas as pd
"""
Makes daily language specific files in correct format
"""
# define languages to extract
langs = ["da", "no", "sv"]
# make a function that transforms a pandas DF to ndjson format (found on stackoverflow)
def iterndjson(df):
generator = df.iterrows()
ndjson = []
row = True
while row:
try:
row = next(generator)
ndjson.append(row[1].to_dict())
except StopIteration:
row = None
return ndjson
# List file paths from folders with raw data
raw1 = ["/data/001_twitter_hope/raw/nordic-tweets/" + f for f in
os.listdir("/data/001_twitter_hope/raw/nordic-tweets")
if f.endswith(".tsv")]
raw2 = ["/data/001_twitter_hope/raw/nordic-tweets-2/" + f
for f in os.listdir("/data/001_twitter_hope/raw/nordic-tweets-2")
if f.endswith(".tsv")]
# combine file paths
raw_files = raw1 + raw2
# read in logfile to see which files have already been processed
logfile = "processed_files_log/nordic_language_extracted.ndjson"
with open(logfile) as log:
done = ndjson.load(log)
# keep only files that have not been processed yet + sort
raw_files = [f for f in raw_files if f not in done]
raw_files.sort()
# define which variables to keep in the output format
column_list = ['id', 'created_at', 'from_user_id', 'text', 'lang', 'favorite_count', 'retweet_count']
# loop through new filepaths
for path_ in raw_files:
# extract identifiers from the file path
id = path_[-14:-4]
year = id[:4]
month = id[5:7]
day = id[8:10]
print(f"Processing {year}{month}{day}")
# load raw data in tsv format
df = pd.read_csv(path_, sep='\t', skipinitialspace=True, usecols = column_list)
# loop through the desired language list
for language in langs:
print(f"extract {language}")
# filter data for the desired language using twitter lang tag
df_lang = df[df.lang.eq(language)]
# convert data to ndjson and write it down
print("Writing down...")
df_js = iterndjson(df_lang)
output_path=f"/data/001_twitter_hope/preprocessed/{language}/td_{year}{month}{day}_{language}.ndjson"
with open(output_path, "w") as f:
ndjson.dump(df_js, f)
# Add newly processed filenames to the log file
with open(logfile, "a") as out:
writer = ndjson.writer(out, ensure_ascii=False)
for line in raw_files:
writer.writerow(line) | 236 | 0 | 22 |
f3bbd74f7204487b945c7f6840c5509499223d33 | 3,431 | py | Python | src/visualize.py | Yoan-D/DisentangledVAE | b0edeb95665de804e221868e2ca8e7c776711b4b | [
"Apache-2.0"
] | 8 | 2021-10-11T19:21:17.000Z | 2022-01-10T07:58:54.000Z | src/visualize.py | Yoan-D/disentangled-VAE | b0edeb95665de804e221868e2ca8e7c776711b4b | [
"Apache-2.0"
] | null | null | null | src/visualize.py | Yoan-D/disentangled-VAE | b0edeb95665de804e221868e2ca8e7c776711b4b | [
"Apache-2.0"
] | null | null | null | import random
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.widgets import Slider, RadioButtons
from DSprites_VAE.src.model import VAE
from DSprites_VAE.src.utils import load_data, get_batch, create_categories_map
if __name__ == '__main__':
show()
| 32.67619 | 117 | 0.651705 | import random
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.widgets import Slider, RadioButtons
from DSprites_VAE.src.model import VAE
from DSprites_VAE.src.utils import load_data, get_batch, create_categories_map
def load_model(batch_size=1, latent_dim=10, checkpoint_path='checkpoints/model1599'):
vae_model = VAE(image_shape=(64, 64, 1), condition_shape=(1,), latent_dim=latent_dim, batch_size=batch_size)
vae_model.load_weights(checkpoint_path)
return vae_model
def activate_slider_widgets(model, z, c, im, fig):
slider_positions = [0.1, 0.18, 0.26, 0.34, 0.42, 0.50, 0.58, 0.66, 0.74, 0.82]
sliders = []
for index, x in enumerate(slider_positions):
ax_slider = plt.axes([x, 0.1, 0.0225, 0.25], facecolor='white')
s = Slider(
ax=ax_slider,
label=r'$z_' + str(index) + '$',
valmin=-10.0,
valmax=10.0,
valstep=0.0001,
orientation="vertical",
color='black',
)
s.set_val(float(z[:, index]))
sliders.append(s)
def update(_):
for index, s in enumerate(sliders):
z[:, index] = s.val
prediction = model.decode(z, c, sigmoid=True)
im.set_data(np.asarray(prediction).squeeze(0))
fig.canvas.draw()
for s in sliders:
s.on_changed(update)
return sliders
def initialize_plot(train_i, train_c, indices, model):
plt.rcParams["figure.figsize"] = (7, 3)
mpl.rcParams['toolbar'] = 'None'
fig, ax = plt.subplots()
ax.margins(x=0)
plt.axis('off')
fig.suptitle('Disentangling the VAE latent space', fontsize=16)
plt.subplots_adjust(left=0.1, bottom=0.455, right=0.84, top=0.757, wspace=0.05, hspace=0.05)
x, c = get_batch([random.choice(indices)], train_i, train_c)
mean, logvar = model.encode(x, c)
z = model.reparameterize(mean, logvar)
z = z.numpy()
prediction = model.decode(z, c, sigmoid=True)
im = ax.imshow(np.asarray(prediction).squeeze(0), cmap=plt.get_cmap('gray'))
return c, z, im, fig
def show(checkpoint_path='checkpoints/model1299'):
vae_model = load_model(checkpoint_path=checkpoint_path)
shapes_map = {'Square': 0, 'Ellipse': 1, 'Heart': 2}
train_images, train_categories = load_data()
category_map = create_categories_map(train_categories)
indices = category_map[shapes_map['Square']]
random.shuffle(indices)
c, z, im, fig = initialize_plot(train_i=train_images, train_c=train_categories, indices=indices, model=vae_model)
sliders = activate_slider_widgets(model=vae_model, z=z, c=c, im=im, fig=fig)
radio_ax = plt.axes([0.74, 0.5, 0.105, 0.2], facecolor='white')
shapes_radio_button = RadioButtons(radio_ax, ('Square', 'Ellipse', 'Heart'))
def shapefunc(val):
indices = category_map[shapes_map[val]]
random.shuffle(indices)
x, c = get_batch(indices[0:1], train_images, train_categories)
mean, logvar = vae_model.encode(x, c)
z = vae_model.reparameterize(mean, logvar)
z = z.numpy()
# update sliders
for index, s in enumerate(sliders):
s.set_val(float(z[:, index]))
im.set_data(np.asarray(vae_model.decode(z, c, sigmoid=True)).squeeze(0))
fig.canvas.draw()
shapes_radio_button.on_clicked(shapefunc)
plt.show()
if __name__ == '__main__':
show()
| 3,034 | 0 | 92 |
65b2684f7e8ff9efe0f9563a4f939ce320580f9d | 1,767 | py | Python | cfgov/regulations3k/tests/test_jinja2tags.py | Colin-Seifer/consumerfinance.gov | a1a943f7170b498707d642d6be97b9a97a2b52e3 | [
"CC0-1.0"
] | 156 | 2015-01-16T15:16:46.000Z | 2020-08-04T04:48:01.000Z | cfgov/regulations3k/tests/test_jinja2tags.py | Colin-Seifer/consumerfinance.gov | a1a943f7170b498707d642d6be97b9a97a2b52e3 | [
"CC0-1.0"
] | 3,604 | 2015-01-05T22:09:12.000Z | 2020-08-14T17:09:19.000Z | cfgov/regulations3k/tests/test_jinja2tags.py | Colin-Seifer/consumerfinance.gov | a1a943f7170b498707d642d6be97b9a97a2b52e3 | [
"CC0-1.0"
] | 102 | 2015-01-28T14:51:18.000Z | 2020-08-10T00:00:39.000Z | import datetime
from django.template import engines
from django.test import TestCase
from regulations3k.jinja2tags import ap_date, regs_hide_on_mobile
| 31.553571 | 76 | 0.640068 | import datetime
from django.template import engines
from django.test import TestCase
from regulations3k.jinja2tags import ap_date, regs_hide_on_mobile
class RegulationsExtensionTestCase(TestCase):
def test_ap_date(self):
test_date = datetime.date(2011, 1, 1)
result = ap_date(test_date)
self.assertEqual(result, "Jan. 1, 2011")
def test_ap_date_sept(self):
test_date = datetime.date(2011, 9, 1)
result = ap_date(test_date)
self.assertEqual(result, "Sept. 1, 2011")
def test_ap_date_march(self):
test_date = datetime.date(2011, 3, 1)
result = ap_date(test_date)
self.assertEqual(result, "March 1, 2011")
def test_ap_date_string(self):
test_date = "2011-01-01"
result = ap_date(test_date)
self.assertEqual(result, "Jan. 1, 2011")
def test_ap_date_invalid_string(self):
test_date = "I am not a date"
result = ap_date(test_date)
self.assertEqual(result, None)
def test_ap_date_none_date(self):
result = ap_date(None)
self.assertEqual(result, None)
def test_regdown_filter_available(self):
jinja2_engine = engines["wagtail-env"]
template = jinja2_engine.from_string('{{ "*Hello*" | regdown }}')
result = template.render()
self.assertEqual(
result,
'<p class="regdown-block" data-label="" '
'id="be34deef8eb9a480514ed3b4a5ebdaea61c711d2b11d40e830cb0656">'
"<em>Hello</em></p>",
)
def test_regs_hide_on_mobile(self):
test_str = "Regulation C"
result = regs_hide_on_mobile(test_str)
self.assertEqual(
result, 'Reg<span class="u-hide-on-mobile">ulation</span> C'
)
| 1,351 | 24 | 238 |
44608a4139015e9603f75a9c6555359519d4998b | 520 | py | Python | fastlab/models/__init__.py | tezignlab/fastweb | 7087b54f13623ae9eb43eb60bd7f4619bd451e70 | [
"MIT"
] | 14 | 2021-12-18T07:33:11.000Z | 2022-01-25T19:30:53.000Z | fastlab/models/__init__.py | tezignlab/fastweb | 7087b54f13623ae9eb43eb60bd7f4619bd451e70 | [
"MIT"
] | 1 | 2021-12-26T10:30:51.000Z | 2021-12-27T03:39:07.000Z | fastlab/models/__init__.py | tezignlab/fastweb | 7087b54f13623ae9eb43eb60bd7f4619bd451e70 | [
"MIT"
] | 1 | 2021-12-30T08:56:54.000Z | 2021-12-30T08:56:54.000Z | from typing import Generic, TypeVar, Optional, List
from pydantic import Field
from pydantic.generics import GenericModel
T = TypeVar("T")
| 23.636364 | 51 | 0.671154 | from typing import Generic, TypeVar, Optional, List
from pydantic import Field
from pydantic.generics import GenericModel
T = TypeVar("T")
class Response(GenericModel, Generic[T]):
code: int = Field(0, example=0)
message: str = Field('', example='')
data: Optional[T]
class PageData(GenericModel, Generic[T]):
skip: int = Field(0, example=0)
limit: int = Field(0, example=10)
total: int = Field(0, example=10)
has_more: bool = Field(False, example=False)
data: List[T] = Field([])
| 0 | 330 | 46 |
93eaa9841b6e5d4c764ffd5121a720c876a63d94 | 762 | py | Python | poblaciones/poblaciones.py | laluferu/hw_7 | 8f8fa38695d23a6aaa97fed7facc6bf03481c03d | [
"MIT"
] | null | null | null | poblaciones/poblaciones.py | laluferu/hw_7 | 8f8fa38695d23a6aaa97fed7facc6bf03481c03d | [
"MIT"
] | null | null | null | poblaciones/poblaciones.py | laluferu/hw_7 | 8f8fa38695d23a6aaa97fed7facc6bf03481c03d | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
tray = np.genfromtxt("poblaciones.dat",delimiter=",")
a = tray[:,0]
b = tray[:,1]
c = tray[:,2]
d = tray[:,3]
fig = plt.figure(figsize = (20,20))
plt.subplot(2,3,1)
plt.scatter(a,b)
plt.xlabel(r'$\alpha$' )
plt.ylabel(r'$\beta$' )
plt.subplot(2,3,2)
plt.scatter(a,c)
plt.xlabel(r'$\alpha$' )
plt.ylabel(r'$\gamma$' )
plt.subplot(2,3,3)
plt.scatter(a,d)
plt.xlabel(r'$\alpha$' )
plt.ylabel(r'$\delta$' )
plt.subplot(2,3,4)
plt.scatter(b,c)
plt.xlabel(r'$\beta$' )
plt.ylabel(r'$\gamma$' )
plt.subplot(2,3,5)
plt.scatter(b,d)
plt.xlabel(r'$\beta$')
plt.ylabel(r'$\delta$')
plt.subplot(2,3,3)
plt.scatter(c,d)
plt.xlabel(r'$\gamma$' )
plt.ylabel(r'$\delta$' )
plt.savefig("poblaciones.pdf",dpi = 400)
| 16.212766 | 53 | 0.636483 | import matplotlib.pyplot as plt
import numpy as np
tray = np.genfromtxt("poblaciones.dat",delimiter=",")
a = tray[:,0]
b = tray[:,1]
c = tray[:,2]
d = tray[:,3]
fig = plt.figure(figsize = (20,20))
plt.subplot(2,3,1)
plt.scatter(a,b)
plt.xlabel(r'$\alpha$' )
plt.ylabel(r'$\beta$' )
plt.subplot(2,3,2)
plt.scatter(a,c)
plt.xlabel(r'$\alpha$' )
plt.ylabel(r'$\gamma$' )
plt.subplot(2,3,3)
plt.scatter(a,d)
plt.xlabel(r'$\alpha$' )
plt.ylabel(r'$\delta$' )
plt.subplot(2,3,4)
plt.scatter(b,c)
plt.xlabel(r'$\beta$' )
plt.ylabel(r'$\gamma$' )
plt.subplot(2,3,5)
plt.scatter(b,d)
plt.xlabel(r'$\beta$')
plt.ylabel(r'$\delta$')
plt.subplot(2,3,3)
plt.scatter(c,d)
plt.xlabel(r'$\gamma$' )
plt.ylabel(r'$\delta$' )
plt.savefig("poblaciones.pdf",dpi = 400)
| 0 | 0 | 0 |
d5e098cf639cbeb687219e6dd937c401c3966e40 | 1,321 | py | Python | core/tests/test_models.py | fossabot/Django-BaaS | 2f46f9afb1feff564139e367f16eaa0349700621 | [
"Apache-2.0"
] | 9 | 2019-04-10T05:46:22.000Z | 2020-06-03T11:23:20.000Z | core/tests/test_models.py | fossabot/Django-BaaS | 2f46f9afb1feff564139e367f16eaa0349700621 | [
"Apache-2.0"
] | 8 | 2019-04-11T02:25:14.000Z | 2019-07-05T19:47:20.000Z | core/tests/test_models.py | fossabot/Django-BaaS | 2f46f9afb1feff564139e367f16eaa0349700621 | [
"Apache-2.0"
] | 4 | 2019-04-23T04:02:40.000Z | 2020-01-22T03:41:24.000Z | from django.test import TestCase
from model_mommy import mommy
from ..models import Human, Child, Parent, Sibling, Avatar, User
| 37.742857 | 103 | 0.680545 | from django.test import TestCase
from model_mommy import mommy
from ..models import Human, Child, Parent, Sibling, Avatar, User
class BaseModelTestCase(TestCase):
def setUp(self):
self.user1 = mommy.make(User, username='user1')
self.user2 = mommy.make(User, username='user2')
self.parent = mommy.make(Parent, name='Category1')
self.human = mommy.make(Human, user=self.user1, parent=self.parent)
self.childs = mommy.make(Child, name="comment", human=self.human, _quantity=3, user=self.user2)
## make 5 siblings for the human
mommy.make(Sibling, _quantity=5, humans=[self.human.id])
self.avatar = mommy.make(Avatar, name='page', parent=self.parent)
def tearDown(self):
pass
class ModelsBaseTestCase(BaseModelTestCase):
def setUp(self):
super(ModelsBaseTestCase, self).setUp()
def test_instance(self):
self.assertTrue(isinstance(self.human, Human))
self.assertEqual(self.childs[2].human.name, self.human.name)
self.assertEqual(self.human.parent.name, 'Category1')
self.assertEqual(len(self.human.siblings.all()), 5)
self.assertEqual(len(self.parent.avatars.all()), 1)
self.assertEqual(self.human.user, self.user1)
self.assertEqual(self.childs[0].user, self.user2)
| 1,003 | 36 | 153 |
9a1a0eb13318fa750171f84bf7377ed676d9533e | 1,641 | py | Python | mimosa/pylib/patterns/color_patterns.py | rafelafrance/traiter_mimosa | 7a248b610747d5d0e5ce5473953cbdc90d336aae | [
"MIT"
] | null | null | null | mimosa/pylib/patterns/color_patterns.py | rafelafrance/traiter_mimosa | 7a248b610747d5d0e5ce5473953cbdc90d336aae | [
"MIT"
] | null | null | null | mimosa/pylib/patterns/color_patterns.py | rafelafrance/traiter_mimosa | 7a248b610747d5d0e5ce5473953cbdc90d336aae | [
"MIT"
] | null | null | null | """Common color snippets."""
import re
from spacy import registry
from traiter import actions
from traiter import const as t_const
from traiter.patterns import matcher_patterns
from . import common_patterns
from . import term_patterns
from .. import consts
MULTIPLE_DASHES = ["\\" + c for c in t_const.DASH_CHAR]
MULTIPLE_DASHES = rf'\s*[{"".join(MULTIPLE_DASHES)}]{{2,}}\s*'
SKIP = t_const.DASH + common_patterns.MISSING
COLOR = matcher_patterns.MatcherPatterns(
"color",
on_match="mimosa.color.v1",
decoder=common_patterns.COMMON_PATTERNS
| {
"color_words": {"ENT_TYPE": {"IN": ["color", "color_mod"]}},
"color": {"ENT_TYPE": "color"},
"to": {"POS": {"IN": ["AUX"]}},
},
patterns=[
"missing? color_words* -* color+ -* color_words*",
"missing? color_words+ to color_words+ color+ -* color_words*",
],
)
@registry.misc(COLOR.on_match)
| 28.293103 | 71 | 0.630713 | """Common color snippets."""
import re
from spacy import registry
from traiter import actions
from traiter import const as t_const
from traiter.patterns import matcher_patterns
from . import common_patterns
from . import term_patterns
from .. import consts
MULTIPLE_DASHES = ["\\" + c for c in t_const.DASH_CHAR]
MULTIPLE_DASHES = rf'\s*[{"".join(MULTIPLE_DASHES)}]{{2,}}\s*'
SKIP = t_const.DASH + common_patterns.MISSING
COLOR = matcher_patterns.MatcherPatterns(
"color",
on_match="mimosa.color.v1",
decoder=common_patterns.COMMON_PATTERNS
| {
"color_words": {"ENT_TYPE": {"IN": ["color", "color_mod"]}},
"color": {"ENT_TYPE": "color"},
"to": {"POS": {"IN": ["AUX"]}},
},
patterns=[
"missing? color_words* -* color+ -* color_words*",
"missing? color_words+ to color_words+ color+ -* color_words*",
],
)
@registry.misc(COLOR.on_match)
def color(ent):
parts = []
for token in ent:
replace = term_patterns.REPLACE.get(token.lower_, token.lower_)
if replace in SKIP:
continue
if term_patterns.REMOVE.get(token.lower_):
continue
if token.pos_ in ["AUX"]:
continue
if token.shape_ in consts.TITLE_SHAPES:
continue
parts.append(replace)
if not parts:
ent._.delete = True
raise actions.RejectMatch()
value = "-".join(parts)
value = re.sub(MULTIPLE_DASHES, r"-", value)
ent._.data["color"] = term_patterns.REPLACE.get(value, value)
if any(t for t in ent if t.lower_ in common_patterns.MISSING):
ent._.data["missing"] = True
| 709 | 0 | 22 |
ec969eddab663577462b502f546795bf756e2137 | 77 | py | Python | brightid/__init__.py | PooyaFekri/python-brightid | 2ade82030527e1ac58e7049b3657a970ef3e4fd4 | [
"MIT"
] | 8 | 2020-12-25T19:50:11.000Z | 2022-01-30T09:19:03.000Z | brightid/__init__.py | PooyaFekri/python-brightid | 2ade82030527e1ac58e7049b3657a970ef3e4fd4 | [
"MIT"
] | null | null | null | brightid/__init__.py | PooyaFekri/python-brightid | 2ade82030527e1ac58e7049b3657a970ef3e4fd4 | [
"MIT"
] | 1 | 2021-09-20T06:32:56.000Z | 2021-09-20T06:32:56.000Z | # Be name khoda
from .node import Node as Node
from . import tools as tools
| 15.4 | 30 | 0.74026 | # Be name khoda
from .node import Node as Node
from . import tools as tools
| 0 | 0 | 0 |
08bc2a2ab2a0f7d4492cb0f95312ef27f1410cbd | 187 | py | Python | boards/admin.py | 6ba/bbgo | dfa9b55b8d40c53940105333c2e03a3c6abddb88 | [
"MIT"
] | 22 | 2017-07-13T04:07:03.000Z | 2021-06-10T05:39:29.000Z | boards/admin.py | genonfire/bbgo | 5f374f0b620f4dc3e106de5969f26f4585044605 | [
"MIT"
] | 7 | 2017-08-25T06:33:45.000Z | 2019-10-14T05:49:32.000Z | boards/admin.py | 6ba/bbgo | dfa9b55b8d40c53940105333c2e03a3c6abddb88 | [
"MIT"
] | 9 | 2017-12-31T02:45:58.000Z | 2021-01-22T03:09:02.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Board, Reply
admin.site.register(Board)
admin.site.register(Reply)
| 18.7 | 39 | 0.770053 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Board, Reply
admin.site.register(Board)
admin.site.register(Reply)
| 0 | 0 | 0 |
eed37e81df104a40f403189b836b2f9eef8cbe4e | 441 | py | Python | server/processes/migrations/0097_processtype_aws_ecs_service_updated_at.py | CloudReactor/task_manager | 464ca74371064fabb9a21b1f5bacba30360932ab | [
"Fair"
] | null | null | null | server/processes/migrations/0097_processtype_aws_ecs_service_updated_at.py | CloudReactor/task_manager | 464ca74371064fabb9a21b1f5bacba30360932ab | [
"Fair"
] | 6 | 2021-11-01T01:35:40.000Z | 2022-02-11T03:33:06.000Z | server/processes/migrations/0097_processtype_aws_ecs_service_updated_at.py | CloudReactor/task_manager | 464ca74371064fabb9a21b1f5bacba30360932ab | [
"Fair"
] | null | null | null | # Generated by Django 2.2.2 on 2020-03-22 05:15
from django.db import migrations, models
| 23.210526 | 63 | 0.600907 | # Generated by Django 2.2.2 on 2020-03-22 05:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('processes', '0096_auto_20200308_0325'),
]
operations = [
migrations.AddField(
model_name='processtype',
name='aws_ecs_service_updated_at',
field=models.DateTimeField(blank=True, null=True),
),
]
| 0 | 321 | 25 |
e0c2ddc4b353f04bcd3e55e1d338213d470161f0 | 1,474 | py | Python | Geometry/EcalTestBeam/test/runSurveyToTransforms_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Geometry/EcalTestBeam/test/runSurveyToTransforms_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Geometry/EcalTestBeam/test/runSurveyToTransforms_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z |
import FWCore.ParameterSet.Config as cms
process = cms.Process("SurveyToTransforms")
#process.load("FWCore.MessageLogger.MessageLogger_cfi")
#process.MessageLogger.cout.enable = cms.untracked.bool(True)
#process.MessageLogger.cout.threshold = cms.untracked.string('INFO')
#process.MessageLogger.debugModules = cms.untracked.vstring('*')
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
#process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.load("Geometry.EcalTestBeam.idealGeomPlusEE_cfi")
process.load("Geometry.CaloEventSetup.CaloGeometry_cff")
process.load("Geometry.CaloEventSetup.CaloTopology_cfi")
process.load("Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )
process.source = cms.Source("EmptySource")
process.cga = cms.EDAnalyzer("SurveyToTransforms" )
process.Timing = cms.Service("Timing")
process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck")
process.TFileService = cms.Service("TFileService",
fileName = cms.string('survey.root')
)
process.testendcap = cms.ESProducer( "testEcalEndcapGeometryEP",
applyAlignment = cms.bool(False) )
process.es_prefer_endcap = cms.ESPrefer( "testEcalEndcapGeometryEP", "testendcap" )
process.p1 = cms.Path(process.cga)
| 30.708333 | 83 | 0.744912 |
import FWCore.ParameterSet.Config as cms
process = cms.Process("SurveyToTransforms")
#process.load("FWCore.MessageLogger.MessageLogger_cfi")
#process.MessageLogger.cout.enable = cms.untracked.bool(True)
#process.MessageLogger.cout.threshold = cms.untracked.string('INFO')
#process.MessageLogger.debugModules = cms.untracked.vstring('*')
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
#process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.load("Geometry.EcalTestBeam.idealGeomPlusEE_cfi")
process.load("Geometry.CaloEventSetup.CaloGeometry_cff")
process.load("Geometry.CaloEventSetup.CaloTopology_cfi")
process.load("Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )
process.source = cms.Source("EmptySource")
process.cga = cms.EDAnalyzer("SurveyToTransforms" )
process.Timing = cms.Service("Timing")
process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck")
process.TFileService = cms.Service("TFileService",
fileName = cms.string('survey.root')
)
process.testendcap = cms.ESProducer( "testEcalEndcapGeometryEP",
applyAlignment = cms.bool(False) )
process.es_prefer_endcap = cms.ESPrefer( "testEcalEndcapGeometryEP", "testendcap" )
process.p1 = cms.Path(process.cga)
| 0 | 0 | 0 |
3666add414bdad8ca2ee8c15a68b12f7a3020431 | 338 | py | Python | syft/frameworks/torch/he/fv/plaintext.py | wendong1997/PySyft | 1754a0720452db8a868104c74c5c2548ea8e75ea | [
"Apache-2.0"
] | 7 | 2020-04-20T22:22:08.000Z | 2020-07-25T17:32:08.000Z | syft/frameworks/torch/he/fv/plaintext.py | wendong1997/PySyft | 1754a0720452db8a868104c74c5c2548ea8e75ea | [
"Apache-2.0"
] | 3 | 2020-04-24T21:20:57.000Z | 2020-05-28T09:17:02.000Z | syft/frameworks/torch/he/fv/plaintext.py | wendong1997/PySyft | 1754a0720452db8a868104c74c5c2548ea8e75ea | [
"Apache-2.0"
] | 4 | 2020-04-24T22:32:37.000Z | 2020-05-25T19:29:20.000Z | class PlainText:
"""A wrapper class for representing plaintext.
Typical format of plaintext data would be [x0, x1, x2...] where xi represents
coefficients of the polynomial.
Attributes:
data: A 1-dim list representing plaintext coefficient values.
"""
| 26 | 81 | 0.674556 | class PlainText:
"""A wrapper class for representing plaintext.
Typical format of plaintext data would be [x0, x1, x2...] where xi represents
coefficients of the polynomial.
Attributes:
data: A 1-dim list representing plaintext coefficient values.
"""
def __init__(self, data):
self.data = data
| 29 | 0 | 27 |
7dd23b3da72167402199861c33ec8e354a01ad64 | 11,091 | py | Python | GeneralStats/GeneralStats.py | haoruilee/statslibrary | 01494043bc7fb82d4aa6d7d550a4e7dc2ac0503a | [
"MIT"
] | 58 | 2019-02-04T13:53:16.000Z | 2022-02-24T02:59:55.000Z | GeneralStats/GeneralStats.py | haoruilee/statslibrary | 01494043bc7fb82d4aa6d7d550a4e7dc2ac0503a | [
"MIT"
] | null | null | null | GeneralStats/GeneralStats.py | haoruilee/statslibrary | 01494043bc7fb82d4aa6d7d550a4e7dc2ac0503a | [
"MIT"
] | 19 | 2019-03-21T01:54:55.000Z | 2021-12-03T13:55:16.000Z | import numpy as np
import math as mt
| 33.107463 | 135 | 0.500135 | import numpy as np
import math as mt
class GeneralStats:
def average(self, data, rowvar=True):
'''
:average: 求解样本的平均数
:param data: 样本集
:type data: np.array
:param rowvar: 指定每一行或者每一列作为样本向量;rowvar=True指定每一列作为一个样本向量,也即每一行代表一个变量;rowvar=False指定每一行作为一个样本向量,也即每一列代表一个变量
:type rowvar: bool
:return: 各个变量的平均数组成的向量
:rtype: np.array
'''
# 1. 统一变换为rowvar==False的情况,即每一列代表一个变量,每一行代表一个样本向量
if rowvar==True:
data=data.T
# 2. 特别处理一维数组的情况
if data.ndim==1:
return np.array([np.sum(data)/np.shape(data)[0]])
# 3. 各个样本向量进行求和
size=np.shape(data)[1]
count=np.shape(data)[0]
add=np.zeros((1,size))
for i in range(count):
add=np.add(add,data[i])
# 4. 求解平均向量
res=np.divide(add,count)
return res
def median(self, data, rowvar=True):
'''
:median: 求解样本的中位数
:param data: 样本集
:type data: np.array
:param rowvar: 指定每一行或者每一列作为样本向量;rowvar=True指定每一列作为一个样本向量,也即每一行代表一个变量;rowvar=False指定每一行作为一个样本向量,也即每一列代表一个变量
:type rowvar: bool
:return: 各个变量的中位数组成的向量
:rtype: np.array
'''
# 1. 统一变换为rowvar==True的情况,即每一行代表一个变量,每一列代表一个样本向量
if rowvar==False:
data=data.T
# 2. 特别处理一维数组的情况
if data.ndim==1:
count=np.shape(data)[0]
data=np.sort(data)
if count%2:
return np.array([data[mt.floor(count/2)]])
else:
return np.array([(data[mt.floor(count/2)]+data[mt.floor(count/2)-1])/2.0])
# 3. 通过排序生成中位数
size=np.shape(data)[0]
count=np.shape(data)[1]
for i in range(size):
data[i]=np.sort(data[i])
res=np.zeros((1,size))
if count%2:
for i in range(size):
res[:,i]=data[i][mt.floor(count/2)]
else:
for i in range(size):
res[:,i]=(data[i][mt.floor(count/2)]+data[i][mt.floor(count/2)-1])/2.0
return res
def mode(self, data, rowvar=True):
'''
:mode: 求解样本的众数
:param data: 样本集
:type data: np.array
:param rowvar: 指定每一行或者每一列作为样本向量;rowvar=True指定每一列作为一个样本向量,也即每一行代表一个变量;rowvar=False指定每一行作为一个样本向量,也即每一列代表一个变量
:type rowvar: bool
:return: 各个变量的众数组成的向量
:rtype: np.array
'''
# 1. 统一变换为rowvar==True的情况,即每一行代表一个变量,每一列代表一个样本向量
if rowvar==False:
data=data.T
# 2. 特别处理一维数组的情况
if data.ndim==1:
dic={}
for i in range(np.shape(data)[0]):
if data[i] in dic:
dic[data[i]]+=1
else:
dic[data[i]]=1
res=np.array([max(dic,key=dic.get)])
return res
# 3. 生成众数结果
size=np.shape(data)[0]
count=np.shape(data)[1]
res=[]
for i in range(size):
dic={}
for k in range(count):
if data[i][k] in dic:
dic[data[i][k]]+=1
else:
dic[data[i][k]]=1
res.append(max(dic,key=dic.get))
return np.array([res])
def quantile(self, data, fraction, rowvar=True, interpolation='linear'):
'''
:quantile: 求解样本的分位数
:param data: 样本集
:type data: np.array
:param fraction: 分位值,满足fraction>=0且fraction<=1
:type fraction: float
:param rowvar: 指定每一行或者每一列作为样本向量;rowvar=True指定每一列作为一个样本向量,也即每一行代表一个变量;rowvar=False指定每一行作为一个样本向量,也即每一列代表一个变量
:type rowvar: bool
:param interpolation: 此可选参数指定当所需分位数位于两个数据点i<j之间时要使用的插值方法,
: 取值为{'linear', 'lower', 'higher', 'midpoint'}。
: 若分位值fraction(0和1之间)计算得到的分位数下标不是整数,该下标两侧的数组元素分别为i和j,则:
: 'linear': i+fraction*(j-i)
: 'lower': i
: 'higher': j
: 'midpoint': (i+j)/2
: 若使用范围之外的可选参数,均将默认使用'midpoint'模式进行分位数的求解
:type interpolation: str
:return: 各个变量的分位数组成的向量
:rtype: np.array
'''
# 1. 统一变换为rowvar==True的情况,即每一行代表一个变量,每一列代表一个样本向量
if rowvar==False:
data=data.T
# 2. 特殊处理data为向量的情况
if data.ndim==1:
data=np.sort(data)
tar=fraction*(np.shape(data)[0]-1)
res=0
if interpolation=='linear':
res=data[mt.floor(tar)]+(data[mt.ceil(tar)]-data[mt.floor(tar)])*fraction
elif interpolation=='lower':
res=data[mt.floor(tar)]
elif interpolation=='higher':
res=data[mt.ceil(tar)]
else:
res=(data[mt.floor(tar)]+data[mt.ceil(tar)])/2
return np.array([res])
# 3. 生成分位数
size=np.shape(data)[0]
count=np.shape(data)[1]
res=np.zeros((1,size))
for i in range(size):
data[i]=np.sort(data[i])
tar=fraction*(count-1)
if interpolation=='linear':
res[:,i]=data[i][mt.floor(tar)]+(data[i][mt.ceil(tar)]-data[i][mt.floor(tar)])*fraction
elif interpolation=='lower':
res[:,i]=data[i][mt.floor(tar)]
elif interpolation=='higher':
res[:,i]=data[i][mt.ceil(tar)]
else:
res[:,i]=(data[i][mt.floor(tar)]+data[i][mt.ceil(tar)])/2
return res
def range(self, data, rowvar=True):
'''
:range: 求解样本的极差
:param data: 样本集
:type data: np.array
:param rowvar: 指定每一行或者每一列作为样本向量;rowvar=True指定每一列作为一个样本向量,也即每一行代表一个变量;rowvar=False指定每一行作为一个样本向量,也即每一列代表一个变量
:type rowvar: bool
:return: 各个变量的极差组成的向量
:rtype: np.array
'''
# 1. 统一变换为rowvar==True的情况,即每一行代表一个变量,每一列代表一个样本向量
if rowvar==False:
data=data.T
# 2. 特殊处理data为向量的情况
if data.ndim==1:
return np.array([np.max(data)-np.min(data)])
# 3. 计算data为矩阵时的极差
size=np.shape(data)[0]
res=np.zeros((1,size))
for i in range(size):
res[:,i]=np.max(data[i])-np.min(data[i])
return res
def variance(self, data, rowvar=True):
'''
:variance: 求解样本的方差
:param data: 样本集
:type data: np.array
:param rowvar: 指定每一行或者每一列作为样本向量;rowvar=True指定每一列作为一个样本向量,也即每一行代表一个变量;rowvar=False指定每一行作为一个样本向量,也即每一列代表一个变量
:type rowvar: bool
:return: 各个变量的方差组成的向量
:rtype: np.array
'''
# 1. 统一变换为rowvar==True的情况,即每一行代表一个变量,每一列代表一个样本向量
if rowvar==False:
data=data.T
# 2. 特殊处理data为向量的情况
if data.ndim==1:
avg=np.sum(data)/np.shape(data)[0]
res=np.sum(np.square(np.add(data,-avg)))/np.shape(data)[0]
return np.array([res])
# 3. 计算data为矩阵时的方差
size=np.shape(data)[0] #变量数
count=np.shape(data)[1] #每个变量的样本数
res=np.zeros((1,size))
for i in range(size):
avg=np.sum(data[i])/count
res[:,i]=np.sum(np.square(np.add(data[i],-avg)))/count
return np.array(res)
def standard_dev(self, data, rowvar=True):
'''
:variance: 求解样本的标准差
:param data: 样本集
:type data: np.array
:param rowvar: 指定每一行或者每一列作为样本向量;rowvar=True指定每一列作为一个样本向量,也即每一行代表一个变量;rowvar=False指定每一行作为一个样本向量,也即每一列代表一个变量
:type rowvar: bool
:return: 各个变量的标准差组成的向量
:rtype: np.array
'''
# 1. 统一变换为rowvar==True的情况,即每一行代表一个变量,每一列代表一个样本向量
if rowvar==False:
data=data.T
# 2. 特殊处理data为向量的情况
if data.ndim==1:
avg=np.sum(data)/np.shape(data)[0]
res=np.sqrt(np.sum(np.square(np.add(data,-avg)))/np.shape(data)[0])
return np.array([res])
# 3. 计算data为矩阵时的标准差
size=np.shape(data)[0] #变量数
count=np.shape(data)[1] #每个变量的样本数
res=np.zeros((1,size))
for i in range(size):
avg=np.sum(data[i])/count
res[:,i]=np.sqrt(np.sum(np.square(np.add(data[i],-avg)))/count)
return res
def skewness(self, data, rowvar=True):
'''
:skewness: 求解样本的偏度
:param data: 样本集
:type data: np.array
:param rowvar: 指定每一行或者每一列作为样本向量;rowvar=True指定每一列作为一个样本向量,也即每一行代表一个变量;rowvar=False指定每一行作为一个样本向量,也即每一列代表一个变量
:type rowvar: bool
:return: 各个变量的偏度组成的向量
:rtype: np.array
'''
# 1. 统一变换为rowvar==True的情况,即每一行代表一个变量,每一列代表一个样本向量
if rowvar==False:
data=data.T
# 2. 特殊处理data为向量的情况
if data.ndim==1:
count=np.shape(data)[0] #每个变量的样本数
avg=np.average(data)
res=((np.sum(np.power(np.add(data,-avg),3)))/count)/np.power((np.sum(np.power(np.add(data,-avg),2)))/count,3/2)
return np.array([res])
# 3. 计算样本为矩阵时的偏度
size=np.shape(data)[0] #变量数
count=np.shape(data)[1] #每个变量的样本数
res=np.zeros((1,size))
for i in range(size):
avg=np.average(data[i])
res[:,i]=((np.sum(np.power(np.add(data[i],-avg),3)))/count)/np.power((np.sum(np.power(np.add(data[i],-avg),2)))/count,3/2)
return res
def kurtosis(self, data, rowvar=True):
'''
:kurtosis: 求解样本的峰度
:param data: 样本集
:type data: np.array
:param rowvar: 指定每一行或者每一列作为样本向量;rowvar=True指定每一列作为一个样本向量,也即每一行代表一个变量;rowvar=False指定每一行作为一个样本向量,也即每一列代表一个变量
:type rowvar: bool
:return: 各个变量的偏度组成的向量
:rtype: np.array
'''
# 1. 统一变换为rowvar==True的情况,即每一行代表一个变量,每一列代表一个样本向量
if rowvar==False:
data=data.T
# 2. 特殊处理data为向量的情况
if data.ndim==1:
n=np.shape(data)[0] #每个变量的样本数
avg=np.average(data)
g=(np.sum(np.power(np.add(data,-avg),4))/n)/(np.power(np.sum(np.power(np.add(data,-avg),2))/n,2))-3
res=((n-1)/((n-2)*(n-3)))*((n+1)*g+6)
return np.array([res])
# 3. 计算样本为矩阵时的峰度
size=np.shape(data)[0] #变量数
n=np.shape(data)[1] #每个变量的样本数
res=np.zeros((1,size))
for i in range(size):
avg=np.average(data[i])
g=(np.sum(np.power(np.add(data[i],-avg),4))/n)/(np.power(np.sum(np.power(np.add(data[i],-avg),2))/n,2))-3
res[:,i]=((n-1)/((n-2)*(n-3)))*((n+1)*g+6)
return res
| 0 | 13,856 | 26 |
2a4e39e1b0d7707a07c2ad96c31e3aa942de3d78 | 79 | py | Python | benchgen/__init__.py | ansible-lockdown/BenchmarkGenerator | ad5890e2ba53197d750966e57595be720132ea61 | [
"MIT"
] | 3 | 2020-08-27T13:53:41.000Z | 2022-02-27T20:43:44.000Z | benchgen/__init__.py | ansible-lockdown/BenchmarkGenerator | ad5890e2ba53197d750966e57595be720132ea61 | [
"MIT"
] | null | null | null | benchgen/__init__.py | ansible-lockdown/BenchmarkGenerator | ad5890e2ba53197d750966e57595be720132ea61 | [
"MIT"
] | 2 | 2020-12-10T06:57:44.000Z | 2021-05-03T17:50:35.000Z | import pkg_resources
from .parser import Parser
from .generate import generate | 19.75 | 30 | 0.848101 | import pkg_resources
from .parser import Parser
from .generate import generate | 0 | 0 | 0 |
d3462aec38e90b93f8ee1720113086b4a3626b1e | 11,690 | py | Python | tl_tweets.py | TopView/evtools | d0add3045939ef602a5cd40bb9295d4a69edd35f | [
"MIT"
] | 40 | 2016-02-24T08:09:20.000Z | 2020-12-22T14:37:57.000Z | tl_tweets.py | TopView/evtools | d0add3045939ef602a5cd40bb9295d4a69edd35f | [
"MIT"
] | 3 | 2016-03-14T16:11:49.000Z | 2019-08-25T20:17:33.000Z | tl_tweets.py | TopView/evtools | d0add3045939ef602a5cd40bb9295d4a69edd35f | [
"MIT"
] | 12 | 2016-03-12T19:50:57.000Z | 2020-12-27T22:23:55.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
tl_tweets.py
Copyright (c) 2015 Rob Mason
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Twitter: @Teslaliving
Blog: http://teslaliving.net
Description:
Twitter Helper Functions
Dependencies: twython: https://github.com/ryanmcgrath/twython
You need to get application keys for Twitter at https://apps.twitter.com
Provide them via environment variables:
TL_APP_KEY
TL_APP_SECRET
TL_OAUTH_TOKEN
TL_OAUTH_TOKEN_SECRET
Or via init function.
Note: The logging stuff is as Twython emits a bunch of stuff during its work that I wanted to suppress
"""
import os
import sys
import time
import random
import logging
import sys
basepath = os.path.dirname(sys.argv[0])
sys.path.append(os.path.join(basepath, 'twython'))
from twython import Twython, TwythonAuthError
# Initialize Twitter Keys
APP_KEY = None
APP_SECRET = None
OAUTH_TOKEN = None
OAUTH_TOKEN_SECRET = None
# Cache self ID
MYSELF = None
if 'TL_APP_KEY' in os.environ:
APP_KEY = os.environ['TL_APP_KEY']
if 'TL_APP_SECRET' in os.environ:
APP_SECRET = os.environ['TL_APP_SECRET']
if 'TL_OAUTH_TOKEN' in os.environ:
OAUTH_TOKEN = os.environ['TL_OAUTH_TOKEN']
if 'TL_OAUTH_TOKEN_SECRET' in os.environ:
OAUTH_TOKEN_SECRET = os.environ['TL_OAUTH_TOKEN_SECRET']
| 32.382271 | 120 | 0.664414 | #!/usr/bin/env python
# encoding: utf-8
"""
tl_tweets.py
Copyright (c) 2015 Rob Mason
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Twitter: @Teslaliving
Blog: http://teslaliving.net
Description:
Twitter Helper Functions
Dependencies: twython: https://github.com/ryanmcgrath/twython
You need to get application keys for Twitter at https://apps.twitter.com
Provide them via environment variables:
TL_APP_KEY
TL_APP_SECRET
TL_OAUTH_TOKEN
TL_OAUTH_TOKEN_SECRET
Or via init function.
Note: The logging stuff is as Twython emits a bunch of stuff during its work that I wanted to suppress
"""
import os
import sys
import time
import random
import logging
import sys
basepath = os.path.dirname(sys.argv[0])
sys.path.append(os.path.join(basepath, 'twython'))
from twython import Twython, TwythonAuthError
# Initialize Twitter Keys
APP_KEY = None
APP_SECRET = None
OAUTH_TOKEN = None
OAUTH_TOKEN_SECRET = None
# Cache self ID
MYSELF = None
if 'TL_APP_KEY' in os.environ:
APP_KEY = os.environ['TL_APP_KEY']
if 'TL_APP_SECRET' in os.environ:
APP_SECRET = os.environ['TL_APP_SECRET']
if 'TL_OAUTH_TOKEN' in os.environ:
OAUTH_TOKEN = os.environ['TL_OAUTH_TOKEN']
if 'TL_OAUTH_TOKEN_SECRET' in os.environ:
OAUTH_TOKEN_SECRET = os.environ['TL_OAUTH_TOKEN_SECRET']
def init_twitter_account(app_key, app_secret, oauth_token, oauth_token_secret):
global APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET, MYSELF
APP_KEY = app_key
APP_SECRET = app_secret
OAUTH_TOKEN = oauth_token
OAUTH_TOKEN_SECRET = oauth_token_secret
MYSELF = None
def check_twitter_config():
if not APP_KEY:
raise Exception("APP_KEY missing for twitter")
if not APP_SECRET:
raise Exception("APP_KEY missing for twitter")
if not OAUTH_TOKEN:
raise Exception("OAUTH_TOKEN missing for twitter")
if not OAUTH_TOKEN_SECRET:
raise Exception("OAUTH_TOKEN_SECRET missing for twitter")
def twitter_auth_issue(e):
message = "There was a problem with automated tweet operations:\n\n"
message += e
message += "\nPlease investigate."
print(message, file=sys.stderr)
def tweet_string(message, log, media=None):
check_twitter_config()
logging.captureWarnings(True)
old_level = log.getEffectiveLevel()
log.setLevel(logging.ERROR)
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
retries = 0
while retries < 2:
log.setLevel(logging.ERROR)
try:
if media:
photo = open(media, 'rb')
media_ids = twitter.upload_media(media=photo)
twitter.update_status(status=message.encode('utf-8').strip(), media_ids=media_ids['media_id'])
else:
twitter.update_status(status=message.encode('utf-8').strip())
break
except TwythonAuthError as e:
log.setLevel(old_level)
log.exception(" Problem trying to tweet string")
twitter_auth_issue(e)
return
except:
log.setLevel(old_level)
log.exception(" Problem trying to tweet string")
retries += 1
s = random.randrange(5, 10 * retries)
log.debug(" sleeping %d seconds for retry", s)
time.sleep(s)
log.setLevel(old_level)
if retries == 5:
log.error("Couldn't tweet string: %s with media: %s", message, media)
def tweet_price(price, log, stock, extra="", image=None):
log.debug(" Tweet about stock price for %s: $%s", stock, price)
message = "$%s current stock price: $%s. %s #bot" % (stock, price, extra)
tweet_string(message=message, log=log, media=image)
def tweet_search(log, item, limit=50):
log.debug(" Searching twitter for %s", item)
check_twitter_config()
if len(item) > 500:
log.error(" Search string too long")
raise Exception("Search string too long: %d", len(item))
logging.captureWarnings(True)
old_level = log.getEffectiveLevel()
log.setLevel(logging.ERROR)
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
try:
result = twitter.search(q=item, count=limit)
except TwythonAuthError as e:
twitter_auth_issue(e)
raise
except:
raise
log.setLevel(old_level)
return result
def check_relationship(log, id):
my_screen_name = get_screen_name(log)
if my_screen_name == "Unknown":
raise("Couldn't get my own screen name")
log.debug(" Checking relationship of %s with me (%s)", id, my_screen_name)
check_twitter_config()
logging.captureWarnings(True)
old_level = log.getEffectiveLevel()
log.setLevel(logging.ERROR)
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
try:
result = twitter.show_friendship(source_screen_name=my_screen_name, target_screen_name=id)
except TwythonAuthError as e:
log.setLevel(old_level)
log.exception(" Problem trying to check relationship")
twitter_auth_issue(e)
raise
except:
raise
log.setLevel(old_level)
return result["relationship"]["source"]["following"], result["relationship"]["source"]["followed_by"]
def follow_twitter_user(log, id):
log.debug(" Following %s", id)
check_twitter_config()
logging.captureWarnings(True)
old_level = log.getEffectiveLevel()
log.setLevel(logging.ERROR)
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
try:
twitter.create_friendship(screen_name=id)
except TwythonAuthError as e:
log.setLevel(old_level)
log.exception(" Problem trying to follow twitter user")
twitter_auth_issue(e)
raise
except:
raise
log.setLevel(old_level)
def unfollow_twitter_user(log, id):
log.debug(" Unfollowing %s", id)
check_twitter_config()
logging.captureWarnings(True)
old_level = log.getEffectiveLevel()
log.setLevel(logging.ERROR)
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
try:
twitter.destroy_friendship(screen_name=id)
except TwythonAuthError as e:
log.setLevel(old_level)
log.exception("Error unfollowing %s", id)
twitter_auth_issue(e)
raise
except:
log.exception("Error unfollowing %s", id)
log.setLevel(old_level)
def get_account_details(log, id):
log.debug(" Getting account details for %s", id)
check_twitter_config()
logging.captureWarnings(True)
old_level = log.getEffectiveLevel()
log.setLevel(logging.ERROR)
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
try:
details = twitter.show_user(screen_name=id)
except TwythonAuthError as e:
log.setLevel(old_level)
log.exception(" Problem trying to get account details")
twitter_auth_issue(e)
raise
except:
details = None
log.setLevel(old_level)
return details
def get_follower_count(log, id):
log.debug(" Getting follower count for %s", id)
details = get_account_details(log, id)
if details:
log.debug(" %d", details["followers_count"])
return details["followers_count"]
else:
return None
def get_screen_name(log):
global MYSELF
if not MYSELF or MYSELF == "Unknown":
log.debug(" Getting current user screen name")
check_twitter_config()
logging.captureWarnings(True)
old_level = log.getEffectiveLevel()
log.setLevel(logging.ERROR)
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
try:
details = twitter.verify_credentials()
except TwythonAuthError as e:
log.setLevel(old_level)
log.exception(" Problem trying to get screen name")
twitter_auth_issue(e)
raise
except:
log.exception(" Problem trying to get screen name")
details = None
log.setLevel(old_level)
name = "Unknown"
if details:
name = details["screen_name"]
MYSELF = name
return MYSELF
def get_following(log, id):
log.debug(" Getting people %s is following", id)
check_twitter_config()
logging.captureWarnings(True)
old_level = log.getEffectiveLevel()
log.setLevel(logging.ERROR)
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
log.setLevel(old_level)
cursor = -1
max_loops = 15
while cursor != 0:
try:
log.setLevel(logging.ERROR)
following = twitter.get_friends_list(screen_name=id, cursor=cursor, count=200)
log.setLevel(old_level)
except TwythonAuthError as e:
log.exception(" Problem trying to get people following")
twitter_auth_issue(e)
raise
except:
raise
for u in following["users"]:
yield u["screen_name"]
cursor = following["next_cursor"]
if cursor:
s = random.randint(55, 65)
log.debug(" Sleeping %ds to avoid rate limit. Cursor: %s", s, cursor)
time.sleep(s)
else:
log.debug(" Normal query end")
max_loops -= 1
if max_loops <= 0:
log.debug(" Killing search due to max loops")
break
log.setLevel(old_level)
def get_followers(log, id):
log.debug(" Getting people following % s", id)
check_twitter_config()
logging.captureWarnings(True)
old_level = log.getEffectiveLevel()
log.setLevel(logging.ERROR)
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
log.setLevel(old_level)
cursor = -1
max_loops = 15
while cursor != 0:
try:
log.setLevel(logging.ERROR)
following = twitter.get_followers_list(screen_name=id, cursor=cursor, count=200)
log.setLevel(old_level)
except TwythonAuthError as e:
log.exception(" Problem trying to get people following")
twitter_auth_issue(e)
raise
except:
raise
for u in following["users"]:
yield u
cursor = following["next_cursor"]
if cursor:
s = random.randint(55, 65)
log.debug(" Sleeping %ds to avoid rate limit. Cursor: %s", s, cursor)
time.sleep(s)
else:
log.debug(" Normal query end")
max_loops -= 1
if max_loops <= 0:
log.debug(" Killing search due to max loops")
break
log.setLevel(old_level)
| 9,078 | 0 | 322 |
fe324a340f51930eebe933a0766ef5d1097b3453 | 47,966 | py | Python | pycaret/anomaly.py | AJarman/pycaret | e96fefbf95c9e0195ec07ea63ebe25a8ce98baf3 | [
"MIT"
] | null | null | null | pycaret/anomaly.py | AJarman/pycaret | e96fefbf95c9e0195ec07ea63ebe25a8ce98baf3 | [
"MIT"
] | null | null | null | pycaret/anomaly.py | AJarman/pycaret | e96fefbf95c9e0195ec07ea63ebe25a8ce98baf3 | [
"MIT"
] | null | null | null | # Module: Anomaly Detection
# Author: Moez Ali <moez.ali@queensu.ca>
# License: MIT
# Release: PyCaret 2.2.0
# Last modified : 25/10/2020
import logging
import pandas as pd
import numpy as np
from pycaret.internal.pycaret_experiment import AnomalyExperiment, ClusteringExperiment
from pycaret.internal.utils import check_if_global_is_not_none
from typing import List, Tuple, Any, Union, Optional, Dict
import warnings
warnings.filterwarnings("ignore")
_EXPERIMENT_CLASS = AnomalyExperiment
_CURRENT_EXPERIMENT = None
_CURRENT_EXPERIMENT_EXCEPTION = (
"_CURRENT_EXPERIMENT global variable is not set. Please run setup() first."
)
_CURRENT_EXPERIMENT_DECORATOR_DICT = {
"_CURRENT_EXPERIMENT": _CURRENT_EXPERIMENT_EXCEPTION
}
def setup(
data,
preprocess: bool = True,
imputation_type: str = "simple",
iterative_imputation_iters: int = 5,
categorical_features: Optional[List[str]] = None,
categorical_imputation: str = "mode",
categorical_iterative_imputer: Union[str, Any] = "lightgbm",
ordinal_features: Optional[Dict[str, list]] = None,
high_cardinality_features: Optional[List[str]] = None,
high_cardinality_method: str = "frequency",
numeric_features: Optional[List[str]] = None,
numeric_imputation: str = "mean",
numeric_iterative_imputer: Union[str, Any] = "lightgbm",
date_features: Optional[List[str]] = None,
ignore_features: Optional[List[str]] = None,
normalize: bool = False,
normalize_method: str = "zscore",
transformation: bool = False,
transformation_method: str = "yeo-johnson",
handle_unknown_categorical: bool = True,
unknown_categorical_method: str = "least_frequent",
pca: bool = False,
pca_method: str = "linear",
pca_components: Optional[float] = None,
ignore_low_variance: bool = False,
combine_rare_levels: bool = False,
rare_level_threshold: float = 0.10,
bin_numeric_features: Optional[List[str]] = None,
remove_multicollinearity: bool = False,
multicollinearity_threshold: float = 0.9,
remove_perfect_collinearity: bool = False,
group_features: Optional[List[str]] = None,
group_names: Optional[List[str]] = None,
n_jobs: Optional[int] = -1,
use_gpu: bool = False,
custom_pipeline: Union[
Any, Tuple[str, Any], List[Any], List[Tuple[str, Any]]
] = None,
html: bool = True,
session_id: Optional[int] = None,
system_log: Union[bool, logging.Logger] = True,
log_experiment: bool = False,
experiment_name: Optional[str] = None,
log_plots: Union[bool, list] = False,
log_profile: bool = False,
log_data: bool = False,
silent: bool = False,
verbose: bool = True,
profile: bool = False,
profile_kwargs: Dict[str, Any] = None,
):
"""
This function initializes the training environment and creates the transformation
pipeline. Setup function must be called before executing any other function. It
takes one mandatory parameter: ``data``. All the other parameters are optional.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
data: pandas.DataFrame
Shape (n_samples, n_features), where n_samples is the number of samples and
n_features is the number of features.
preprocess: bool, default = True
When set to False, no transformations are applied except for custom
transformations passed in ``custom_pipeline`` param. Data must be
ready for modeling (no missing values, no dates, categorical data encoding),
when preprocess is set to False.
imputation_type: str, default = 'simple'
The type of imputation to use. Can be either 'simple' or 'iterative'.
iterative_imputation_iters: int, default = 5
Number of iterations. Ignored when ``imputation_type`` is not 'iterative'.
categorical_features: list of str, default = None
If the inferred data types are not correct or the silent param is set to True,
categorical_features param can be used to overwrite or define the data types.
It takes a list of strings with column names that are categorical.
categorical_imputation: str, default = 'constant'
Missing values in categorical features are imputed with a constant 'not_available'
value. The other available option is 'mode'.
categorical_iterative_imputer: str, default = 'lightgbm'
Estimator for iterative imputation of missing values in categorical features.
Ignored when ``imputation_type`` is not 'iterative'.
ordinal_features: dict, default = None
Encode categorical features as ordinal. For example, a categorical feature with
'low', 'medium', 'high' values where low < medium < high can be passed as
ordinal_features = { 'column_name' : ['low', 'medium', 'high'] }.
high_cardinality_features: list of str, default = None
When categorical features contains many levels, it can be compressed into fewer
levels using this parameter. It takes a list of strings with column names that
are categorical.
high_cardinality_method: str, default = 'frequency'
Categorical features with high cardinality are replaced with the frequency of
values in each level occurring in the training dataset. Other available method
is 'clustering' which trains the K-Means clustering algorithm on the statistical
attribute of the training data and replaces the original value of feature with the
cluster label. The number of clusters is determined by optimizing Calinski-Harabasz
and Silhouette criterion.
numeric_features: list of str, default = None
If the inferred data types are not correct or the silent param is set to True,
numeric_features param can be used to overwrite or define the data types.
It takes a list of strings with column names that are numeric.
numeric_imputation: str, default = 'mean'
Missing values in numeric features are imputed with 'mean' value of the feature
in the training dataset. The other available option is 'median' or 'zero'.
numeric_iterative_imputer: str, default = 'lightgbm'
Estimator for iterative imputation of missing values in numeric features.
Ignored when ``imputation_type`` is set to 'simple'.
date_features: list of str, default = None
If the inferred data types are not correct or the silent param is set to True,
date_features param can be used to overwrite or define the data types. It takes
a list of strings with column names that are DateTime.
ignore_features: list of str, default = None
ignore_features param can be used to ignore features during model training.
It takes a list of strings with column names that are to be ignored.
normalize: bool, default = False
When set to True, it transforms the numeric features by scaling them to a given
range. Type of scaling is defined by the ``normalize_method`` parameter.
normalize_method: str, default = 'zscore'
Defines the method for scaling. By default, normalize method is set to 'zscore'
The standard zscore is calculated as z = (x - u) / s. Ignored when ``normalize``
is not True. The other options are:
- minmax: scales and translates each feature individually such that it is in
the range of 0 - 1.
- maxabs: scales and translates each feature individually such that the
maximal absolute value of each feature will be 1.0. It does not
shift/center the data, and thus does not destroy any sparsity.
- robust: scales and translates each feature according to the Interquartile
range. When the dataset contains outliers, robust scaler often gives
better results.
transformation: bool, default = False
When set to True, it applies the power transform to make data more Gaussian-like.
Type of transformation is defined by the ``transformation_method`` parameter.
transformation_method: str, default = 'yeo-johnson'
Defines the method for transformation. By default, the transformation method is
set to 'yeo-johnson'. The other available option for transformation is 'quantile'.
Ignored when ``transformation`` is not True.
handle_unknown_categorical: bool, default = True
When set to True, unknown categorical levels in unseen data are replaced by the
most or least frequent level as learned in the training dataset.
unknown_categorical_method: str, default = 'least_frequent'
Method used to replace unknown categorical levels in unseen data. Method can be
set to 'least_frequent' or 'most_frequent'.
pca: bool, default = False
When set to True, dimensionality reduction is applied to project the data into
a lower dimensional space using the method defined in ``pca_method`` parameter.
pca_method: str, default = 'linear'
The 'linear' method performs uses Singular Value Decomposition. Other options are:
- kernel: dimensionality reduction through the use of RBF kernel.
- incremental: replacement for 'linear' pca when the dataset is too large.
pca_components: int or float, default = None
Number of components to keep. if pca_components is a float, it is treated as a
target percentage for information retention. When pca_components is an integer
it is treated as the number of features to be kept. pca_components must be less
than the original number of features. Ignored when ``pca`` is not True.
ignore_low_variance: bool, default = False
When set to True, all categorical features with insignificant variances are
removed from the data. The variance is calculated using the ratio of unique
values to the number of samples, and the ratio of the most common value to the
frequency of the second most common value.
combine_rare_levels: bool, default = False
When set to True, frequency percentile for levels in categorical features below
a certain threshold is combined into a single level.
rare_level_threshold: float, default = 0.1
Percentile distribution below which rare categories are combined. Ignored when
``combine_rare_levels`` is not True.
bin_numeric_features: list of str, default = None
To convert numeric features into categorical, bin_numeric_features parameter can
be used. It takes a list of strings with column names to be discretized. It does
so by using 'sturges' rule to determine the number of clusters and then apply
KMeans algorithm. Original values of the feature are then replaced by the
cluster label.
remove_multicollinearity: bool, default = False
When set to True, features with the inter-correlations higher than the defined
threshold are removed. When two features are highly correlated with each other,
the feature that is less correlated with the target variable is removed. Only
considers numeric features.
multicollinearity_threshold: float, default = 0.9
Threshold for correlated features. Ignored when ``remove_multicollinearity``
is not True.
remove_perfect_collinearity: bool, default = True
When set to True, perfect collinearity (features with correlation = 1) is removed
from the dataset, when two features are 100% correlated, one of it is randomly
removed from the dataset.
group_features: list or list of list, default = None
When the dataset contains features with related characteristics, group_features
parameter can be used for feature extraction. It takes a list of strings with
column names that are related.
group_names: list, default = None
Group names to be used in naming new features. When the length of group_names
does not match with the length of ``group_features``, new features are named
sequentially group_1, group_2, etc. It is ignored when ``group_features`` is
None.
n_jobs: int, default = -1
The number of jobs to run in parallel (for functions that supports parallel
processing) -1 means using all processors. To run all functions on single
processor set n_jobs to None.
use_gpu: bool or str, default = False
When set to True, it will use GPU for training with algorithms that support it,
and fall back to CPU if they are unavailable. When set to 'force', it will only
use GPU-enabled algorithms and raise exceptions when they are unavailable. When
False, all algorithms are trained using CPU only.
GPU enabled algorithms:
- None at this moment.
custom_pipeline: (str, transformer) or list of (str, transformer), default = None
When passed, will append the custom transformers in the preprocessing pipeline
and are applied on each CV fold separately and on the final fit. All the custom
transformations are applied before pycaret's internal transformations.
html: bool, default = True
When set to False, prevents runtime display of monitor. This must be set to False
when the environment does not support IPython. For example, command line terminal,
Databricks Notebook, Spyder and other similar IDEs.
session_id: int, default = None
Controls the randomness of experiment. It is equivalent to 'random_state' in
scikit-learn. When None, a pseudo random number is generated. This can be used
for later reproducibility of the entire experiment.
system_log: bool or logging.Logger, default = True
Whether to save the system logging file (as logs.log). If the input
already is a logger object, that one is used instead.
log_experiment: bool, default = False
When set to True, all metrics and parameters are logged on the ``MLFlow`` server.
experiment_name: str, default = None
Name of the experiment for logging. Ignored when ``log_experiment`` is not True.
log_plots: bool or list, default = False
When set to True, certain plots are logged automatically in the ``MLFlow`` server.
To change the type of plots to be logged, pass a list containing plot IDs. Refer
to documentation of ``plot_model``. Ignored when ``log_experiment`` is not True.
log_profile: bool, default = False
When set to True, data profile is logged on the ``MLflow`` server as a html file.
Ignored when ``log_experiment`` is not True.
log_data: bool, default = False
When set to True, dataset is logged on the ``MLflow`` server as a csv file.
Ignored when ``log_experiment`` is not True.
silent: bool, default = False
Controls the confirmation input of data types when ``setup`` is executed. When
executing in completely automated mode or on a remote kernel, this must be True.
verbose: bool, default = True
When set to False, Information grid is not printed.
profile: bool, default = False
When set to True, an interactive EDA report is displayed.
profile_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the ProfileReport method used
to create the EDA report. Ignored if ``profile`` is False.
Returns:
Global variables that can be changed using the ``set_config`` function.
"""
exp = _EXPERIMENT_CLASS()
set_current_experiment(exp)
return exp.setup(
data=data,
preprocess=preprocess,
imputation_type=imputation_type,
iterative_imputation_iters=iterative_imputation_iters,
categorical_features=categorical_features,
categorical_imputation=categorical_imputation,
categorical_iterative_imputer=categorical_iterative_imputer,
ordinal_features=ordinal_features,
high_cardinality_features=high_cardinality_features,
high_cardinality_method=high_cardinality_method,
numeric_features=numeric_features,
numeric_imputation=numeric_imputation,
numeric_iterative_imputer=numeric_iterative_imputer,
date_features=date_features,
ignore_features=ignore_features,
normalize=normalize,
normalize_method=normalize_method,
transformation=transformation,
transformation_method=transformation_method,
handle_unknown_categorical=handle_unknown_categorical,
unknown_categorical_method=unknown_categorical_method,
pca=pca,
pca_method=pca_method,
pca_components=pca_components,
ignore_low_variance=ignore_low_variance,
combine_rare_levels=combine_rare_levels,
rare_level_threshold=rare_level_threshold,
bin_numeric_features=bin_numeric_features,
remove_multicollinearity=remove_multicollinearity,
multicollinearity_threshold=multicollinearity_threshold,
remove_perfect_collinearity=remove_perfect_collinearity,
group_features=group_features,
group_names=group_names,
n_jobs=n_jobs,
use_gpu=use_gpu,
custom_pipeline=custom_pipeline,
html=html,
session_id=session_id,
system_log=system_log,
log_experiment=log_experiment,
experiment_name=experiment_name,
log_plots=log_plots,
log_profile=log_profile,
log_data=log_data,
silent=silent,
verbose=verbose,
profile=profile,
profile_kwargs=profile_kwargs,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def create_model(
model: Union[str, Any],
fraction: float = 0.05,
verbose: bool = True,
fit_kwargs: Optional[dict] = None,
**kwargs,
):
"""
This function trains a given model from the model library. All available
models can be accessed using the ``models`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
model: str or scikit-learn compatible object
ID of an model available in the model library or pass an untrained
model object consistent with scikit-learn API. Estimators available
in the model library (ID - Name):
* 'abod' - Angle-base Outlier Detection
* 'cluster' - Clustering-Based Local Outlier
* 'cof' - Connectivity-Based Outlier Factor
* 'histogram' - Histogram-based Outlier Detection
* 'knn' - k-Nearest Neighbors Detector
* 'lof' - Local Outlier Factor
* 'svm' - One-class SVM detector
* 'pca' - Principal Component Analysis
* 'mcd' - Minimum Covariance Determinant
* 'sod' - Subspace Outlier Detection
* 'sos' - Stochastic Outlier Selection
fraction: float, default = 0.05
The amount of contamination of the data set, i.e. the proportion of
outliers in the data set. Used when fitting to define the threshold on
the decision function.
verbose: bool, default = True
Status update is not printed when verbose is set to False.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
**kwargs:
Additional keyword arguments to pass to the estimator.
Returns:
Trained Model
"""
return _CURRENT_EXPERIMENT.create_model(
estimator=model,
fraction=fraction,
fit_kwargs=fit_kwargs,
verbose=verbose,
**kwargs,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def assign_model(
model, transformation: bool = False, score: bool = True, verbose: bool = True
) -> pd.DataFrame:
"""
This function assigns anomaly labels to the dataset for a given model.
(1 = outlier, 0 = inlier).
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> knn_df = assign_model(knn)
model: scikit-learn compatible object
Trained model object
transformation: bool, default = False
Whether to apply anomaly labels on the transformed dataset.
score: bool, default = True
Whether to show outlier score or not.
verbose: bool, default = True
Status update is not printed when verbose is set to False.
Returns:
pandas.DataFrame
"""
return _CURRENT_EXPERIMENT.assign_model(
model, transformation=transformation, score=score, verbose=verbose
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def plot_model(
model,
plot: str = "tsne",
feature: Optional[str] = None,
label: bool = False,
scale: float = 1,
save: bool = False,
display_format: Optional[str] = None,
):
"""
This function analyzes the performance of a trained model.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> plot_model(knn, plot = 'tsne')
model: scikit-learn compatible object
Trained Model Object
plot: str, default = 'tsne'
List of available plots (ID - Name):
* 'tsne' - t-SNE (3d) Dimension Plot
* 'umap' - UMAP Dimensionality Plot
feature: str, default = None
Feature to be used as a hoverover tooltip and/or label when the ``label``
param is set to True. When feature is None, first column of the dataset
is used.
label: bool, default = False
Name of column to be used as data labels.
scale: float, default = 1
The resolution scale of the figure.
save: bool, default = False
When set to True, plot is saved in the current working directory.
display_format: str, default = None
To display plots in Streamlit (https://www.streamlit.io/), set this to 'streamlit'.
Currently, not all plots are supported.
Returns:
None
"""
return _CURRENT_EXPERIMENT.plot_model(
model,
plot=plot,
feature_name=feature,
label=label,
scale=scale,
save=save,
display_format=display_format,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def evaluate_model(
model, feature: Optional[str] = None, fit_kwargs: Optional[dict] = None,
):
"""
This function displays a user interface for analyzing performance of a trained
model. It calls the ``plot_model`` function internally.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> evaluate_model(knn)
model: scikit-learn compatible object
Trained model object
feature: str, default = None
Feature to be used as a hoverover tooltip and/or label when the ``label``
param is set to True. When feature is None, first column of the dataset
is used by default.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
Returns:
None
Warnings
--------
- This function only works in IPython enabled Notebook.
"""
return _CURRENT_EXPERIMENT.evaluate_model(
estimator=model, feature_name=feature, fit_kwargs=fit_kwargs
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def tune_model(
model,
supervised_target: str,
supervised_type: Optional[str] = None,
supervised_estimator: Union[str, Any] = "lr",
method: str = "drop",
optimize: Optional[str] = None,
custom_grid: Optional[List[int]] = None,
fold: int = 10,
fit_kwargs: Optional[dict] = None,
groups: Optional[Union[str, Any]] = None,
round: int = 4,
verbose: bool = True,
):
"""
This function tunes the ``fraction`` parameter of a given model.
Example
-------
>>> from pycaret.datasets import get_data
>>> juice = get_data('juice')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = juice)
>>> tuned_knn = tune_model(model = 'knn', supervised_target = 'Purchase')
model: str
ID of an model available in the model library. Models that can be
tuned in this function (ID - Model):
* 'abod' - Angle-base Outlier Detection
* 'cluster' - Clustering-Based Local Outlier
* 'cof' - Connectivity-Based Outlier Factor
* 'histogram' - Histogram-based Outlier Detection
* 'knn' - k-Nearest Neighbors Detector
* 'lof' - Local Outlier Factor
* 'svm' - One-class SVM detector
* 'pca' - Principal Component Analysis
* 'mcd' - Minimum Covariance Determinant
* 'sod' - Subspace Outlier Detection
* 'sos' - Stochastic Outlier Selection
supervised_target: str
Name of the target column containing labels.
supervised_type: str, default = None
Type of task. 'classification' or 'regression'. Automatically inferred
when None.
supervised_estimator: str, default = None
Classification (ID - Name):
* 'lr' - Logistic Regression (Default)
* 'knn' - K Nearest Neighbour
* 'nb' - Naive Bayes
* 'dt' - Decision Tree Classifier
* 'svm' - SVM - Linear Kernel
* 'rbfsvm' - SVM - Radial Kernel
* 'gpc' - Gaussian Process Classifier
* 'mlp' - Multi Level Perceptron
* 'ridge' - Ridge Classifier
* 'rf' - Random Forest Classifier
* 'qda' - Quadratic Discriminant Analysis
* 'ada' - Ada Boost Classifier
* 'gbc' - Gradient Boosting Classifier
* 'lda' - Linear Discriminant Analysis
* 'et' - Extra Trees Classifier
* 'xgboost' - Extreme Gradient Boosting
* 'lightgbm' - Light Gradient Boosting
* 'catboost' - CatBoost Classifier
Regression (ID - Name):
* 'lr' - Linear Regression (Default)
* 'lasso' - Lasso Regression
* 'ridge' - Ridge Regression
* 'en' - Elastic Net
* 'lar' - Least Angle Regression
* 'llar' - Lasso Least Angle Regression
* 'omp' - Orthogonal Matching Pursuit
* 'br' - Bayesian Ridge
* 'ard' - Automatic Relevance Determ.
* 'par' - Passive Aggressive Regressor
* 'ransac' - Random Sample Consensus
* 'tr' - TheilSen Regressor
* 'huber' - Huber Regressor
* 'kr' - Kernel Ridge
* 'svm' - Support Vector Machine
* 'knn' - K Neighbors Regressor
* 'dt' - Decision Tree
* 'rf' - Random Forest
* 'et' - Extra Trees Regressor
* 'ada' - AdaBoost Regressor
* 'gbr' - Gradient Boosting
* 'mlp' - Multi Level Perceptron
* 'xgboost' - Extreme Gradient Boosting
* 'lightgbm' - Light Gradient Boosting
* 'catboost' - CatBoost Regressor
method: str, default = 'drop'
When method set to drop, it will drop the outliers from training dataset.
When 'surrogate', it uses decision function and label as a feature during
training.
optimize: str, default = None
For Classification tasks:
Accuracy, AUC, Recall, Precision, F1, Kappa (default = 'Accuracy')
For Regression tasks:
MAE, MSE, RMSE, R2, RMSLE, MAPE (default = 'R2')
custom_grid: list, default = None
By default, a pre-defined list of fraction values is iterated over to
optimize the supervised objective. To overwrite default iteration,
pass a list of fraction value to iterate over in custom_grid param.
fold: int, default = 10
Number of folds to be used in Kfold CV. Must be at least 2.
verbose: bool, default = True
Status update is not printed when verbose is set to False.
Returns:
Trained Model with optimized ``fraction`` parameter.
"""
return _CURRENT_EXPERIMENT.tune_model(
model=model,
supervised_target=supervised_target,
supervised_type=supervised_type,
supervised_estimator=supervised_estimator,
method=method,
optimize=optimize,
custom_grid=custom_grid,
fold=fold,
fit_kwargs=fit_kwargs,
groups=groups,
round=round,
verbose=verbose,
)
# not using check_if_global_is_not_none on purpose
def predict_model(model, data: pd.DataFrame) -> pd.DataFrame:
"""
This function generates anomaly labels on using a trained model.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> knn_predictions = predict_model(model = knn, data = unseen_data)
model: scikit-learn compatible object
Trained Model Object.
data : pandas.DataFrame
Shape (n_samples, n_features) where n_samples is the number of samples and
n_features is the number of features.
Returns:
pandas.DataFrame
Warnings
--------
- The behavior of the predict_model is changed in version 2.1 without backward compatibility.
As such, the pipelines trained using the version (<= 2.0), may not work for inference
with version >= 2.1. You can either retrain your models with a newer version or downgrade
the version for inference.
"""
experiment = _CURRENT_EXPERIMENT
if experiment is None:
experiment = _EXPERIMENT_CLASS()
return experiment.predict_model(estimator=model, data=data)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def deploy_model(
model, model_name: str, authentication: dict, platform: str = "aws",
):
"""
This function deploys the transformation pipeline and trained model on cloud.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> # sets appropriate credentials for the platform as environment variables
>>> import os
>>> os.environ["AWS_ACCESS_KEY_ID"] = str("foo")
>>> os.environ["AWS_SECRET_ACCESS_KEY"] = str("bar")
>>> deploy_model(model = knn, model_name = 'knn-for-deployment', platform = 'aws', authentication = {'bucket' : 'S3-bucket-name'})
Amazon Web Service (AWS) users:
To deploy a model on AWS S3 ('aws'), the credentials have to be passed. The easiest way is to use environment
variables in your local environment. Following information from the IAM portal of amazon console account
are required:
- AWS Access Key ID
- AWS Secret Key Access
More info: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#environment-variables
Google Cloud Platform (GCP) users:
To deploy a model on Google Cloud Platform ('gcp'), project must be created
using command line or GCP console. Once project is created, you must create
a service account and download the service account key as a JSON file to set
environment variables in your local environment.
More info: https://cloud.google.com/docs/authentication/production
Microsoft Azure (Azure) users:
To deploy a model on Microsoft Azure ('azure'), environment variables for connection
string must be set in your local environment. Go to settings of storage account on
Azure portal to access the connection string required.
- AZURE_STORAGE_CONNECTION_STRING (required as environment variable)
More info: https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python?toc=%2Fpython%2Fazure%2FTOC.json
model: scikit-learn compatible object
Trained model object
model_name: str
Name of model.
authentication: dict
Dictionary of applicable authentication tokens.
When platform = 'aws':
{'bucket' : 'S3-bucket-name', 'path': (optional) folder name under the bucket}
When platform = 'gcp':
{'project': 'gcp-project-name', 'bucket' : 'gcp-bucket-name'}
When platform = 'azure':
{'container': 'azure-container-name'}
platform: str, default = 'aws'
Name of the platform. Currently supported platforms: 'aws', 'gcp' and 'azure'.
Returns:
None
"""
return _CURRENT_EXPERIMENT.deploy_model(
model=model,
model_name=model_name,
authentication=authentication,
platform=platform,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def save_model(
model, model_name: str, model_only: bool = False, verbose: bool = True, **kwargs
):
"""
This function saves the transformation pipeline and trained model object
into the current working directory as a pickle file for later use.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> save_model(knn, 'saved_knn_model')
model: scikit-learn compatible object
Trained model object
model_name: str
Name of the model.
model_only: bool, default = False
When set to True, only trained model object is saved instead of the
entire pipeline.
verbose: bool, default = True
Success message is not printed when verbose is set to False.
**kwargs:
Additional keyword arguments to pass to joblib.dump().
Returns:
Tuple of the model object and the filename.
"""
return _CURRENT_EXPERIMENT.save_model(
model=model,
model_name=model_name,
model_only=model_only,
verbose=verbose,
**kwargs,
)
# not using check_if_global_is_not_none on purpose
def load_model(
model_name,
platform: Optional[str] = None,
authentication: Optional[Dict[str, str]] = None,
verbose: bool = True,
):
"""
This function loads a previously saved pipeline.
Example
-------
>>> from pycaret.anomaly import load_model
>>> saved_knn = load_model('saved_knn_model')
model_name: str
Name of the model.
platform: str, default = None
Name of the cloud platform. Currently supported platforms:
'aws', 'gcp' and 'azure'.
authentication: dict, default = None
dictionary of applicable authentication tokens.
when platform = 'aws':
{'bucket' : 'S3-bucket-name'}
when platform = 'gcp':
{'project': 'gcp-project-name', 'bucket' : 'gcp-bucket-name'}
when platform = 'azure':
{'container': 'azure-container-name'}
verbose: bool, default = True
Success message is not printed when verbose is set to False.
Returns:
Trained Model
"""
experiment = _CURRENT_EXPERIMENT
if experiment is None:
experiment = _EXPERIMENT_CLASS()
return experiment.load_model(
model_name=model_name,
platform=platform,
authentication=authentication,
verbose=verbose,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def models(internal: bool = False, raise_errors: bool = True,) -> pd.DataFrame:
"""
Returns table of models available in the model library.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> all_models = models()
internal: bool, default = False
If True, will return extra columns and rows used internally.
raise_errors: bool, default = True
If False, will suppress all exceptions, ignoring models
that couldn't be created.
Returns:
pandas.DataFrame
"""
return _CURRENT_EXPERIMENT.models(internal=internal, raise_errors=raise_errors)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def get_logs(experiment_name: Optional[str] = None, save: bool = False) -> pd.DataFrame:
"""
Returns a table of experiment logs. Only works when ``log_experiment``
is True when initializing the ``setup`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly, log_experiment = True)
>>> knn = create_model('knn')
>>> exp_logs = get_logs()
experiment_name: str, default = None
When None current active run is used.
save: bool, default = False
When set to True, csv file is saved in current working directory.
Returns:
pandas.DataFrame
"""
return _CURRENT_EXPERIMENT.get_logs(experiment_name=experiment_name, save=save)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def get_config(variable: str):
"""
This function retrieves the global variables created when initializing the
``setup`` function. Following variables are accessible:
- X: Transformed dataset (X)
- data_before_preprocess: data before preprocessing
- seed: random state set through session_id
- prep_pipe: Transformation pipeline configured through setup
- n_jobs_param: n_jobs parameter used in model training
- html_param: html_param configured through setup
- master_model_container: model storage container
- display_container: results display container
- exp_name_log: Name of experiment set through setup
- logging_param: log_experiment param set through setup
- log_plots_param: log_plots param set through setup
- USI: Unique session ID parameter set through setup
- gpu_param: use_gpu param configured through setup
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> X = get_config('X')
Returns:
Global variable
"""
return _CURRENT_EXPERIMENT.get_config(variable=variable)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def set_config(variable: str, value):
"""
This function resets the global variables. Following variables are
accessible:
- X: Transformed dataset (X)
- data_before_preprocess: data before preprocessing
- seed: random state set through session_id
- prep_pipe: Transformation pipeline configured through setup
- n_jobs_param: n_jobs parameter used in model training
- html_param: html_param configured through setup
- master_model_container: model storage container
- display_container: results display container
- exp_name_log: Name of experiment set through setup
- logging_param: log_experiment param set through setup
- log_plots_param: log_plots param set through setup
- USI: Unique session ID parameter set through setup
- gpu_param: use_gpu param configured through setup
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> set_config('seed', 123)
Returns:
None
"""
return _CURRENT_EXPERIMENT.set_config(variable=variable, value=value)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def save_config(file_name: str):
"""
This function save all global variables to a pickle file, allowing to
later resume without rerunning the ``setup``.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> save_config('myvars.pkl')
Returns:
None
"""
return _CURRENT_EXPERIMENT.save_config(file_name=file_name)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def load_config(file_name: str):
"""
This function loads global variables from a pickle file into Python
environment.
Example
-------
>>> from pycaret.anomaly import load_config
>>> load_config('myvars.pkl')
Returns:
Global variables
"""
return _CURRENT_EXPERIMENT.load_config(file_name=file_name)
def get_outliers(
data,
model: Union[str, Any] = "knn",
fraction: float = 0.05,
fit_kwargs: Optional[dict] = None,
preprocess: bool = True,
imputation_type: str = "simple",
iterative_imputation_iters: int = 5,
categorical_features: Optional[List[str]] = None,
categorical_imputation: str = "mode",
categorical_iterative_imputer: Union[str, Any] = "lightgbm",
ordinal_features: Optional[Dict[str, list]] = None,
high_cardinality_features: Optional[List[str]] = None,
high_cardinality_method: str = "frequency",
numeric_features: Optional[List[str]] = None,
numeric_imputation: str = "mean", # method 'zero' added in pycaret==2.1
numeric_iterative_imputer: Union[str, Any] = "lightgbm",
date_features: Optional[List[str]] = None,
ignore_features: Optional[List[str]] = None,
normalize: bool = False,
normalize_method: str = "zscore",
transformation: bool = False,
transformation_method: str = "yeo-johnson",
handle_unknown_categorical: bool = True,
unknown_categorical_method: str = "least_frequent",
pca: bool = False,
pca_method: str = "linear",
pca_components: Optional[float] = None,
ignore_low_variance: bool = False,
combine_rare_levels: bool = False,
rare_level_threshold: float = 0.10,
bin_numeric_features: Optional[List[str]] = None,
remove_multicollinearity: bool = False,
multicollinearity_threshold: float = 0.9,
remove_perfect_collinearity: bool = False,
group_features: Optional[List[str]] = None,
group_names: Optional[List[str]] = None,
n_jobs: Optional[int] = -1,
session_id: Optional[int] = None,
system_log: Union[bool, logging.Logger] = True,
log_experiment: bool = False,
experiment_name: Optional[str] = None,
log_plots: Union[bool, list] = False,
log_profile: bool = False,
log_data: bool = False,
profile: bool = False,
**kwargs,
) -> pd.DataFrame:
"""
Callable from any external environment without requiring setup initialization.
"""
exp = _EXPERIMENT_CLASS()
exp.setup(
data=data,
preprocess=preprocess,
imputation_type=imputation_type,
iterative_imputation_iters=iterative_imputation_iters,
categorical_features=categorical_features,
categorical_imputation=categorical_imputation,
categorical_iterative_imputer=categorical_iterative_imputer,
ordinal_features=ordinal_features,
high_cardinality_features=high_cardinality_features,
high_cardinality_method=high_cardinality_method,
numeric_features=numeric_features,
numeric_imputation=numeric_imputation,
numeric_iterative_imputer=numeric_iterative_imputer,
date_features=date_features,
ignore_features=ignore_features,
normalize=normalize,
normalize_method=normalize_method,
transformation=transformation,
transformation_method=transformation_method,
handle_unknown_categorical=handle_unknown_categorical,
unknown_categorical_method=unknown_categorical_method,
pca=pca,
pca_method=pca_method,
pca_components=pca_components,
ignore_low_variance=ignore_low_variance,
combine_rare_levels=combine_rare_levels,
rare_level_threshold=rare_level_threshold,
bin_numeric_features=bin_numeric_features,
remove_multicollinearity=remove_multicollinearity,
multicollinearity_threshold=multicollinearity_threshold,
remove_perfect_collinearity=remove_perfect_collinearity,
group_features=group_features,
group_names=group_names,
n_jobs=n_jobs,
html=False,
session_id=session_id,
system_log=system_log,
log_experiment=log_experiment,
experiment_name=experiment_name,
log_plots=log_plots,
log_profile=log_profile,
log_data=log_data,
silent=True,
verbose=False,
profile=profile,
)
c = exp.create_model(
model=model, fraction=fraction, fit_kwargs=fit_kwargs, verbose=False, **kwargs,
)
return exp.assign_model(c, verbose=False)
| 34.507914 | 136 | 0.65142 | # Module: Anomaly Detection
# Author: Moez Ali <moez.ali@queensu.ca>
# License: MIT
# Release: PyCaret 2.2.0
# Last modified : 25/10/2020
import logging
import pandas as pd
import numpy as np
from pycaret.internal.pycaret_experiment import AnomalyExperiment, ClusteringExperiment
from pycaret.internal.utils import check_if_global_is_not_none
from typing import List, Tuple, Any, Union, Optional, Dict
import warnings
warnings.filterwarnings("ignore")
_EXPERIMENT_CLASS = AnomalyExperiment
_CURRENT_EXPERIMENT = None
_CURRENT_EXPERIMENT_EXCEPTION = (
"_CURRENT_EXPERIMENT global variable is not set. Please run setup() first."
)
_CURRENT_EXPERIMENT_DECORATOR_DICT = {
"_CURRENT_EXPERIMENT": _CURRENT_EXPERIMENT_EXCEPTION
}
def setup(
data,
preprocess: bool = True,
imputation_type: str = "simple",
iterative_imputation_iters: int = 5,
categorical_features: Optional[List[str]] = None,
categorical_imputation: str = "mode",
categorical_iterative_imputer: Union[str, Any] = "lightgbm",
ordinal_features: Optional[Dict[str, list]] = None,
high_cardinality_features: Optional[List[str]] = None,
high_cardinality_method: str = "frequency",
numeric_features: Optional[List[str]] = None,
numeric_imputation: str = "mean",
numeric_iterative_imputer: Union[str, Any] = "lightgbm",
date_features: Optional[List[str]] = None,
ignore_features: Optional[List[str]] = None,
normalize: bool = False,
normalize_method: str = "zscore",
transformation: bool = False,
transformation_method: str = "yeo-johnson",
handle_unknown_categorical: bool = True,
unknown_categorical_method: str = "least_frequent",
pca: bool = False,
pca_method: str = "linear",
pca_components: Optional[float] = None,
ignore_low_variance: bool = False,
combine_rare_levels: bool = False,
rare_level_threshold: float = 0.10,
bin_numeric_features: Optional[List[str]] = None,
remove_multicollinearity: bool = False,
multicollinearity_threshold: float = 0.9,
remove_perfect_collinearity: bool = False,
group_features: Optional[List[str]] = None,
group_names: Optional[List[str]] = None,
n_jobs: Optional[int] = -1,
use_gpu: bool = False,
custom_pipeline: Union[
Any, Tuple[str, Any], List[Any], List[Tuple[str, Any]]
] = None,
html: bool = True,
session_id: Optional[int] = None,
system_log: Union[bool, logging.Logger] = True,
log_experiment: bool = False,
experiment_name: Optional[str] = None,
log_plots: Union[bool, list] = False,
log_profile: bool = False,
log_data: bool = False,
silent: bool = False,
verbose: bool = True,
profile: bool = False,
profile_kwargs: Dict[str, Any] = None,
):
"""
This function initializes the training environment and creates the transformation
pipeline. Setup function must be called before executing any other function. It
takes one mandatory parameter: ``data``. All the other parameters are optional.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
data: pandas.DataFrame
Shape (n_samples, n_features), where n_samples is the number of samples and
n_features is the number of features.
preprocess: bool, default = True
When set to False, no transformations are applied except for custom
transformations passed in ``custom_pipeline`` param. Data must be
ready for modeling (no missing values, no dates, categorical data encoding),
when preprocess is set to False.
imputation_type: str, default = 'simple'
The type of imputation to use. Can be either 'simple' or 'iterative'.
iterative_imputation_iters: int, default = 5
Number of iterations. Ignored when ``imputation_type`` is not 'iterative'.
categorical_features: list of str, default = None
If the inferred data types are not correct or the silent param is set to True,
categorical_features param can be used to overwrite or define the data types.
It takes a list of strings with column names that are categorical.
categorical_imputation: str, default = 'constant'
Missing values in categorical features are imputed with a constant 'not_available'
value. The other available option is 'mode'.
categorical_iterative_imputer: str, default = 'lightgbm'
Estimator for iterative imputation of missing values in categorical features.
Ignored when ``imputation_type`` is not 'iterative'.
ordinal_features: dict, default = None
Encode categorical features as ordinal. For example, a categorical feature with
'low', 'medium', 'high' values where low < medium < high can be passed as
ordinal_features = { 'column_name' : ['low', 'medium', 'high'] }.
high_cardinality_features: list of str, default = None
When categorical features contains many levels, it can be compressed into fewer
levels using this parameter. It takes a list of strings with column names that
are categorical.
high_cardinality_method: str, default = 'frequency'
Categorical features with high cardinality are replaced with the frequency of
values in each level occurring in the training dataset. Other available method
is 'clustering' which trains the K-Means clustering algorithm on the statistical
attribute of the training data and replaces the original value of feature with the
cluster label. The number of clusters is determined by optimizing Calinski-Harabasz
and Silhouette criterion.
numeric_features: list of str, default = None
If the inferred data types are not correct or the silent param is set to True,
numeric_features param can be used to overwrite or define the data types.
It takes a list of strings with column names that are numeric.
numeric_imputation: str, default = 'mean'
Missing values in numeric features are imputed with 'mean' value of the feature
in the training dataset. The other available option is 'median' or 'zero'.
numeric_iterative_imputer: str, default = 'lightgbm'
Estimator for iterative imputation of missing values in numeric features.
Ignored when ``imputation_type`` is set to 'simple'.
date_features: list of str, default = None
If the inferred data types are not correct or the silent param is set to True,
date_features param can be used to overwrite or define the data types. It takes
a list of strings with column names that are DateTime.
ignore_features: list of str, default = None
ignore_features param can be used to ignore features during model training.
It takes a list of strings with column names that are to be ignored.
normalize: bool, default = False
When set to True, it transforms the numeric features by scaling them to a given
range. Type of scaling is defined by the ``normalize_method`` parameter.
normalize_method: str, default = 'zscore'
Defines the method for scaling. By default, normalize method is set to 'zscore'
The standard zscore is calculated as z = (x - u) / s. Ignored when ``normalize``
is not True. The other options are:
- minmax: scales and translates each feature individually such that it is in
the range of 0 - 1.
- maxabs: scales and translates each feature individually such that the
maximal absolute value of each feature will be 1.0. It does not
shift/center the data, and thus does not destroy any sparsity.
- robust: scales and translates each feature according to the Interquartile
range. When the dataset contains outliers, robust scaler often gives
better results.
transformation: bool, default = False
When set to True, it applies the power transform to make data more Gaussian-like.
Type of transformation is defined by the ``transformation_method`` parameter.
transformation_method: str, default = 'yeo-johnson'
Defines the method for transformation. By default, the transformation method is
set to 'yeo-johnson'. The other available option for transformation is 'quantile'.
Ignored when ``transformation`` is not True.
handle_unknown_categorical: bool, default = True
When set to True, unknown categorical levels in unseen data are replaced by the
most or least frequent level as learned in the training dataset.
unknown_categorical_method: str, default = 'least_frequent'
Method used to replace unknown categorical levels in unseen data. Method can be
set to 'least_frequent' or 'most_frequent'.
pca: bool, default = False
When set to True, dimensionality reduction is applied to project the data into
a lower dimensional space using the method defined in ``pca_method`` parameter.
pca_method: str, default = 'linear'
The 'linear' method performs uses Singular Value Decomposition. Other options are:
- kernel: dimensionality reduction through the use of RBF kernel.
- incremental: replacement for 'linear' pca when the dataset is too large.
pca_components: int or float, default = None
Number of components to keep. if pca_components is a float, it is treated as a
target percentage for information retention. When pca_components is an integer
it is treated as the number of features to be kept. pca_components must be less
than the original number of features. Ignored when ``pca`` is not True.
ignore_low_variance: bool, default = False
When set to True, all categorical features with insignificant variances are
removed from the data. The variance is calculated using the ratio of unique
values to the number of samples, and the ratio of the most common value to the
frequency of the second most common value.
combine_rare_levels: bool, default = False
When set to True, frequency percentile for levels in categorical features below
a certain threshold is combined into a single level.
rare_level_threshold: float, default = 0.1
Percentile distribution below which rare categories are combined. Ignored when
``combine_rare_levels`` is not True.
bin_numeric_features: list of str, default = None
To convert numeric features into categorical, bin_numeric_features parameter can
be used. It takes a list of strings with column names to be discretized. It does
so by using 'sturges' rule to determine the number of clusters and then apply
KMeans algorithm. Original values of the feature are then replaced by the
cluster label.
remove_multicollinearity: bool, default = False
When set to True, features with the inter-correlations higher than the defined
threshold are removed. When two features are highly correlated with each other,
the feature that is less correlated with the target variable is removed. Only
considers numeric features.
multicollinearity_threshold: float, default = 0.9
Threshold for correlated features. Ignored when ``remove_multicollinearity``
is not True.
remove_perfect_collinearity: bool, default = True
When set to True, perfect collinearity (features with correlation = 1) is removed
from the dataset, when two features are 100% correlated, one of it is randomly
removed from the dataset.
group_features: list or list of list, default = None
When the dataset contains features with related characteristics, group_features
parameter can be used for feature extraction. It takes a list of strings with
column names that are related.
group_names: list, default = None
Group names to be used in naming new features. When the length of group_names
does not match with the length of ``group_features``, new features are named
sequentially group_1, group_2, etc. It is ignored when ``group_features`` is
None.
n_jobs: int, default = -1
The number of jobs to run in parallel (for functions that supports parallel
processing) -1 means using all processors. To run all functions on single
processor set n_jobs to None.
use_gpu: bool or str, default = False
When set to True, it will use GPU for training with algorithms that support it,
and fall back to CPU if they are unavailable. When set to 'force', it will only
use GPU-enabled algorithms and raise exceptions when they are unavailable. When
False, all algorithms are trained using CPU only.
GPU enabled algorithms:
- None at this moment.
custom_pipeline: (str, transformer) or list of (str, transformer), default = None
When passed, will append the custom transformers in the preprocessing pipeline
and are applied on each CV fold separately and on the final fit. All the custom
transformations are applied before pycaret's internal transformations.
html: bool, default = True
When set to False, prevents runtime display of monitor. This must be set to False
when the environment does not support IPython. For example, command line terminal,
Databricks Notebook, Spyder and other similar IDEs.
session_id: int, default = None
Controls the randomness of experiment. It is equivalent to 'random_state' in
scikit-learn. When None, a pseudo random number is generated. This can be used
for later reproducibility of the entire experiment.
system_log: bool or logging.Logger, default = True
Whether to save the system logging file (as logs.log). If the input
already is a logger object, that one is used instead.
log_experiment: bool, default = False
When set to True, all metrics and parameters are logged on the ``MLFlow`` server.
experiment_name: str, default = None
Name of the experiment for logging. Ignored when ``log_experiment`` is not True.
log_plots: bool or list, default = False
When set to True, certain plots are logged automatically in the ``MLFlow`` server.
To change the type of plots to be logged, pass a list containing plot IDs. Refer
to documentation of ``plot_model``. Ignored when ``log_experiment`` is not True.
log_profile: bool, default = False
When set to True, data profile is logged on the ``MLflow`` server as a html file.
Ignored when ``log_experiment`` is not True.
log_data: bool, default = False
When set to True, dataset is logged on the ``MLflow`` server as a csv file.
Ignored when ``log_experiment`` is not True.
silent: bool, default = False
Controls the confirmation input of data types when ``setup`` is executed. When
executing in completely automated mode or on a remote kernel, this must be True.
verbose: bool, default = True
When set to False, Information grid is not printed.
profile: bool, default = False
When set to True, an interactive EDA report is displayed.
profile_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the ProfileReport method used
to create the EDA report. Ignored if ``profile`` is False.
Returns:
Global variables that can be changed using the ``set_config`` function.
"""
exp = _EXPERIMENT_CLASS()
set_current_experiment(exp)
return exp.setup(
data=data,
preprocess=preprocess,
imputation_type=imputation_type,
iterative_imputation_iters=iterative_imputation_iters,
categorical_features=categorical_features,
categorical_imputation=categorical_imputation,
categorical_iterative_imputer=categorical_iterative_imputer,
ordinal_features=ordinal_features,
high_cardinality_features=high_cardinality_features,
high_cardinality_method=high_cardinality_method,
numeric_features=numeric_features,
numeric_imputation=numeric_imputation,
numeric_iterative_imputer=numeric_iterative_imputer,
date_features=date_features,
ignore_features=ignore_features,
normalize=normalize,
normalize_method=normalize_method,
transformation=transformation,
transformation_method=transformation_method,
handle_unknown_categorical=handle_unknown_categorical,
unknown_categorical_method=unknown_categorical_method,
pca=pca,
pca_method=pca_method,
pca_components=pca_components,
ignore_low_variance=ignore_low_variance,
combine_rare_levels=combine_rare_levels,
rare_level_threshold=rare_level_threshold,
bin_numeric_features=bin_numeric_features,
remove_multicollinearity=remove_multicollinearity,
multicollinearity_threshold=multicollinearity_threshold,
remove_perfect_collinearity=remove_perfect_collinearity,
group_features=group_features,
group_names=group_names,
n_jobs=n_jobs,
use_gpu=use_gpu,
custom_pipeline=custom_pipeline,
html=html,
session_id=session_id,
system_log=system_log,
log_experiment=log_experiment,
experiment_name=experiment_name,
log_plots=log_plots,
log_profile=log_profile,
log_data=log_data,
silent=silent,
verbose=verbose,
profile=profile,
profile_kwargs=profile_kwargs,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def create_model(
model: Union[str, Any],
fraction: float = 0.05,
verbose: bool = True,
fit_kwargs: Optional[dict] = None,
**kwargs,
):
"""
This function trains a given model from the model library. All available
models can be accessed using the ``models`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
model: str or scikit-learn compatible object
ID of an model available in the model library or pass an untrained
model object consistent with scikit-learn API. Estimators available
in the model library (ID - Name):
* 'abod' - Angle-base Outlier Detection
* 'cluster' - Clustering-Based Local Outlier
* 'cof' - Connectivity-Based Outlier Factor
* 'histogram' - Histogram-based Outlier Detection
* 'knn' - k-Nearest Neighbors Detector
* 'lof' - Local Outlier Factor
* 'svm' - One-class SVM detector
* 'pca' - Principal Component Analysis
* 'mcd' - Minimum Covariance Determinant
* 'sod' - Subspace Outlier Detection
* 'sos' - Stochastic Outlier Selection
fraction: float, default = 0.05
The amount of contamination of the data set, i.e. the proportion of
outliers in the data set. Used when fitting to define the threshold on
the decision function.
verbose: bool, default = True
Status update is not printed when verbose is set to False.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
**kwargs:
Additional keyword arguments to pass to the estimator.
Returns:
Trained Model
"""
return _CURRENT_EXPERIMENT.create_model(
estimator=model,
fraction=fraction,
fit_kwargs=fit_kwargs,
verbose=verbose,
**kwargs,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def assign_model(
model, transformation: bool = False, score: bool = True, verbose: bool = True
) -> pd.DataFrame:
"""
This function assigns anomaly labels to the dataset for a given model.
(1 = outlier, 0 = inlier).
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> knn_df = assign_model(knn)
model: scikit-learn compatible object
Trained model object
transformation: bool, default = False
Whether to apply anomaly labels on the transformed dataset.
score: bool, default = True
Whether to show outlier score or not.
verbose: bool, default = True
Status update is not printed when verbose is set to False.
Returns:
pandas.DataFrame
"""
return _CURRENT_EXPERIMENT.assign_model(
model, transformation=transformation, score=score, verbose=verbose
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def plot_model(
model,
plot: str = "tsne",
feature: Optional[str] = None,
label: bool = False,
scale: float = 1,
save: bool = False,
display_format: Optional[str] = None,
):
"""
This function analyzes the performance of a trained model.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> plot_model(knn, plot = 'tsne')
model: scikit-learn compatible object
Trained Model Object
plot: str, default = 'tsne'
List of available plots (ID - Name):
* 'tsne' - t-SNE (3d) Dimension Plot
* 'umap' - UMAP Dimensionality Plot
feature: str, default = None
Feature to be used as a hoverover tooltip and/or label when the ``label``
param is set to True. When feature is None, first column of the dataset
is used.
label: bool, default = False
Name of column to be used as data labels.
scale: float, default = 1
The resolution scale of the figure.
save: bool, default = False
When set to True, plot is saved in the current working directory.
display_format: str, default = None
To display plots in Streamlit (https://www.streamlit.io/), set this to 'streamlit'.
Currently, not all plots are supported.
Returns:
None
"""
return _CURRENT_EXPERIMENT.plot_model(
model,
plot=plot,
feature_name=feature,
label=label,
scale=scale,
save=save,
display_format=display_format,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def evaluate_model(
model, feature: Optional[str] = None, fit_kwargs: Optional[dict] = None,
):
"""
This function displays a user interface for analyzing performance of a trained
model. It calls the ``plot_model`` function internally.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> evaluate_model(knn)
model: scikit-learn compatible object
Trained model object
feature: str, default = None
Feature to be used as a hoverover tooltip and/or label when the ``label``
param is set to True. When feature is None, first column of the dataset
is used by default.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
Returns:
None
Warnings
--------
- This function only works in IPython enabled Notebook.
"""
return _CURRENT_EXPERIMENT.evaluate_model(
estimator=model, feature_name=feature, fit_kwargs=fit_kwargs
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def tune_model(
model,
supervised_target: str,
supervised_type: Optional[str] = None,
supervised_estimator: Union[str, Any] = "lr",
method: str = "drop",
optimize: Optional[str] = None,
custom_grid: Optional[List[int]] = None,
fold: int = 10,
fit_kwargs: Optional[dict] = None,
groups: Optional[Union[str, Any]] = None,
round: int = 4,
verbose: bool = True,
):
"""
This function tunes the ``fraction`` parameter of a given model.
Example
-------
>>> from pycaret.datasets import get_data
>>> juice = get_data('juice')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = juice)
>>> tuned_knn = tune_model(model = 'knn', supervised_target = 'Purchase')
model: str
ID of an model available in the model library. Models that can be
tuned in this function (ID - Model):
* 'abod' - Angle-base Outlier Detection
* 'cluster' - Clustering-Based Local Outlier
* 'cof' - Connectivity-Based Outlier Factor
* 'histogram' - Histogram-based Outlier Detection
* 'knn' - k-Nearest Neighbors Detector
* 'lof' - Local Outlier Factor
* 'svm' - One-class SVM detector
* 'pca' - Principal Component Analysis
* 'mcd' - Minimum Covariance Determinant
* 'sod' - Subspace Outlier Detection
* 'sos' - Stochastic Outlier Selection
supervised_target: str
Name of the target column containing labels.
supervised_type: str, default = None
Type of task. 'classification' or 'regression'. Automatically inferred
when None.
supervised_estimator: str, default = None
Classification (ID - Name):
* 'lr' - Logistic Regression (Default)
* 'knn' - K Nearest Neighbour
* 'nb' - Naive Bayes
* 'dt' - Decision Tree Classifier
* 'svm' - SVM - Linear Kernel
* 'rbfsvm' - SVM - Radial Kernel
* 'gpc' - Gaussian Process Classifier
* 'mlp' - Multi Level Perceptron
* 'ridge' - Ridge Classifier
* 'rf' - Random Forest Classifier
* 'qda' - Quadratic Discriminant Analysis
* 'ada' - Ada Boost Classifier
* 'gbc' - Gradient Boosting Classifier
* 'lda' - Linear Discriminant Analysis
* 'et' - Extra Trees Classifier
* 'xgboost' - Extreme Gradient Boosting
* 'lightgbm' - Light Gradient Boosting
* 'catboost' - CatBoost Classifier
Regression (ID - Name):
* 'lr' - Linear Regression (Default)
* 'lasso' - Lasso Regression
* 'ridge' - Ridge Regression
* 'en' - Elastic Net
* 'lar' - Least Angle Regression
* 'llar' - Lasso Least Angle Regression
* 'omp' - Orthogonal Matching Pursuit
* 'br' - Bayesian Ridge
* 'ard' - Automatic Relevance Determ.
* 'par' - Passive Aggressive Regressor
* 'ransac' - Random Sample Consensus
* 'tr' - TheilSen Regressor
* 'huber' - Huber Regressor
* 'kr' - Kernel Ridge
* 'svm' - Support Vector Machine
* 'knn' - K Neighbors Regressor
* 'dt' - Decision Tree
* 'rf' - Random Forest
* 'et' - Extra Trees Regressor
* 'ada' - AdaBoost Regressor
* 'gbr' - Gradient Boosting
* 'mlp' - Multi Level Perceptron
* 'xgboost' - Extreme Gradient Boosting
* 'lightgbm' - Light Gradient Boosting
* 'catboost' - CatBoost Regressor
method: str, default = 'drop'
When method set to drop, it will drop the outliers from training dataset.
When 'surrogate', it uses decision function and label as a feature during
training.
optimize: str, default = None
For Classification tasks:
Accuracy, AUC, Recall, Precision, F1, Kappa (default = 'Accuracy')
For Regression tasks:
MAE, MSE, RMSE, R2, RMSLE, MAPE (default = 'R2')
custom_grid: list, default = None
By default, a pre-defined list of fraction values is iterated over to
optimize the supervised objective. To overwrite default iteration,
pass a list of fraction value to iterate over in custom_grid param.
fold: int, default = 10
Number of folds to be used in Kfold CV. Must be at least 2.
verbose: bool, default = True
Status update is not printed when verbose is set to False.
Returns:
Trained Model with optimized ``fraction`` parameter.
"""
return _CURRENT_EXPERIMENT.tune_model(
model=model,
supervised_target=supervised_target,
supervised_type=supervised_type,
supervised_estimator=supervised_estimator,
method=method,
optimize=optimize,
custom_grid=custom_grid,
fold=fold,
fit_kwargs=fit_kwargs,
groups=groups,
round=round,
verbose=verbose,
)
# not using check_if_global_is_not_none on purpose
def predict_model(model, data: pd.DataFrame) -> pd.DataFrame:
"""
This function generates anomaly labels on using a trained model.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> knn_predictions = predict_model(model = knn, data = unseen_data)
model: scikit-learn compatible object
Trained Model Object.
data : pandas.DataFrame
Shape (n_samples, n_features) where n_samples is the number of samples and
n_features is the number of features.
Returns:
pandas.DataFrame
Warnings
--------
- The behavior of the predict_model is changed in version 2.1 without backward compatibility.
As such, the pipelines trained using the version (<= 2.0), may not work for inference
with version >= 2.1. You can either retrain your models with a newer version or downgrade
the version for inference.
"""
experiment = _CURRENT_EXPERIMENT
if experiment is None:
experiment = _EXPERIMENT_CLASS()
return experiment.predict_model(estimator=model, data=data)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def deploy_model(
model, model_name: str, authentication: dict, platform: str = "aws",
):
"""
This function deploys the transformation pipeline and trained model on cloud.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> # sets appropriate credentials for the platform as environment variables
>>> import os
>>> os.environ["AWS_ACCESS_KEY_ID"] = str("foo")
>>> os.environ["AWS_SECRET_ACCESS_KEY"] = str("bar")
>>> deploy_model(model = knn, model_name = 'knn-for-deployment', platform = 'aws', authentication = {'bucket' : 'S3-bucket-name'})
Amazon Web Service (AWS) users:
To deploy a model on AWS S3 ('aws'), the credentials have to be passed. The easiest way is to use environment
variables in your local environment. Following information from the IAM portal of amazon console account
are required:
- AWS Access Key ID
- AWS Secret Key Access
More info: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#environment-variables
Google Cloud Platform (GCP) users:
To deploy a model on Google Cloud Platform ('gcp'), project must be created
using command line or GCP console. Once project is created, you must create
a service account and download the service account key as a JSON file to set
environment variables in your local environment.
More info: https://cloud.google.com/docs/authentication/production
Microsoft Azure (Azure) users:
To deploy a model on Microsoft Azure ('azure'), environment variables for connection
string must be set in your local environment. Go to settings of storage account on
Azure portal to access the connection string required.
- AZURE_STORAGE_CONNECTION_STRING (required as environment variable)
More info: https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python?toc=%2Fpython%2Fazure%2FTOC.json
model: scikit-learn compatible object
Trained model object
model_name: str
Name of model.
authentication: dict
Dictionary of applicable authentication tokens.
When platform = 'aws':
{'bucket' : 'S3-bucket-name', 'path': (optional) folder name under the bucket}
When platform = 'gcp':
{'project': 'gcp-project-name', 'bucket' : 'gcp-bucket-name'}
When platform = 'azure':
{'container': 'azure-container-name'}
platform: str, default = 'aws'
Name of the platform. Currently supported platforms: 'aws', 'gcp' and 'azure'.
Returns:
None
"""
return _CURRENT_EXPERIMENT.deploy_model(
model=model,
model_name=model_name,
authentication=authentication,
platform=platform,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def save_model(
model, model_name: str, model_only: bool = False, verbose: bool = True, **kwargs
):
"""
This function saves the transformation pipeline and trained model object
into the current working directory as a pickle file for later use.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> save_model(knn, 'saved_knn_model')
model: scikit-learn compatible object
Trained model object
model_name: str
Name of the model.
model_only: bool, default = False
When set to True, only trained model object is saved instead of the
entire pipeline.
verbose: bool, default = True
Success message is not printed when verbose is set to False.
**kwargs:
Additional keyword arguments to pass to joblib.dump().
Returns:
Tuple of the model object and the filename.
"""
return _CURRENT_EXPERIMENT.save_model(
model=model,
model_name=model_name,
model_only=model_only,
verbose=verbose,
**kwargs,
)
# not using check_if_global_is_not_none on purpose
def load_model(
model_name,
platform: Optional[str] = None,
authentication: Optional[Dict[str, str]] = None,
verbose: bool = True,
):
"""
This function loads a previously saved pipeline.
Example
-------
>>> from pycaret.anomaly import load_model
>>> saved_knn = load_model('saved_knn_model')
model_name: str
Name of the model.
platform: str, default = None
Name of the cloud platform. Currently supported platforms:
'aws', 'gcp' and 'azure'.
authentication: dict, default = None
dictionary of applicable authentication tokens.
when platform = 'aws':
{'bucket' : 'S3-bucket-name'}
when platform = 'gcp':
{'project': 'gcp-project-name', 'bucket' : 'gcp-bucket-name'}
when platform = 'azure':
{'container': 'azure-container-name'}
verbose: bool, default = True
Success message is not printed when verbose is set to False.
Returns:
Trained Model
"""
experiment = _CURRENT_EXPERIMENT
if experiment is None:
experiment = _EXPERIMENT_CLASS()
return experiment.load_model(
model_name=model_name,
platform=platform,
authentication=authentication,
verbose=verbose,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def models(internal: bool = False, raise_errors: bool = True,) -> pd.DataFrame:
"""
Returns table of models available in the model library.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> all_models = models()
internal: bool, default = False
If True, will return extra columns and rows used internally.
raise_errors: bool, default = True
If False, will suppress all exceptions, ignoring models
that couldn't be created.
Returns:
pandas.DataFrame
"""
return _CURRENT_EXPERIMENT.models(internal=internal, raise_errors=raise_errors)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def get_logs(experiment_name: Optional[str] = None, save: bool = False) -> pd.DataFrame:
"""
Returns a table of experiment logs. Only works when ``log_experiment``
is True when initializing the ``setup`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly, log_experiment = True)
>>> knn = create_model('knn')
>>> exp_logs = get_logs()
experiment_name: str, default = None
When None current active run is used.
save: bool, default = False
When set to True, csv file is saved in current working directory.
Returns:
pandas.DataFrame
"""
return _CURRENT_EXPERIMENT.get_logs(experiment_name=experiment_name, save=save)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def get_config(variable: str):
"""
This function retrieves the global variables created when initializing the
``setup`` function. Following variables are accessible:
- X: Transformed dataset (X)
- data_before_preprocess: data before preprocessing
- seed: random state set through session_id
- prep_pipe: Transformation pipeline configured through setup
- n_jobs_param: n_jobs parameter used in model training
- html_param: html_param configured through setup
- master_model_container: model storage container
- display_container: results display container
- exp_name_log: Name of experiment set through setup
- logging_param: log_experiment param set through setup
- log_plots_param: log_plots param set through setup
- USI: Unique session ID parameter set through setup
- gpu_param: use_gpu param configured through setup
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> X = get_config('X')
Returns:
Global variable
"""
return _CURRENT_EXPERIMENT.get_config(variable=variable)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def set_config(variable: str, value):
"""
This function resets the global variables. Following variables are
accessible:
- X: Transformed dataset (X)
- data_before_preprocess: data before preprocessing
- seed: random state set through session_id
- prep_pipe: Transformation pipeline configured through setup
- n_jobs_param: n_jobs parameter used in model training
- html_param: html_param configured through setup
- master_model_container: model storage container
- display_container: results display container
- exp_name_log: Name of experiment set through setup
- logging_param: log_experiment param set through setup
- log_plots_param: log_plots param set through setup
- USI: Unique session ID parameter set through setup
- gpu_param: use_gpu param configured through setup
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> set_config('seed', 123)
Returns:
None
"""
return _CURRENT_EXPERIMENT.set_config(variable=variable, value=value)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def save_config(file_name: str):
"""
This function save all global variables to a pickle file, allowing to
later resume without rerunning the ``setup``.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> save_config('myvars.pkl')
Returns:
None
"""
return _CURRENT_EXPERIMENT.save_config(file_name=file_name)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def load_config(file_name: str):
"""
This function loads global variables from a pickle file into Python
environment.
Example
-------
>>> from pycaret.anomaly import load_config
>>> load_config('myvars.pkl')
Returns:
Global variables
"""
return _CURRENT_EXPERIMENT.load_config(file_name=file_name)
def get_outliers(
data,
model: Union[str, Any] = "knn",
fraction: float = 0.05,
fit_kwargs: Optional[dict] = None,
preprocess: bool = True,
imputation_type: str = "simple",
iterative_imputation_iters: int = 5,
categorical_features: Optional[List[str]] = None,
categorical_imputation: str = "mode",
categorical_iterative_imputer: Union[str, Any] = "lightgbm",
ordinal_features: Optional[Dict[str, list]] = None,
high_cardinality_features: Optional[List[str]] = None,
high_cardinality_method: str = "frequency",
numeric_features: Optional[List[str]] = None,
numeric_imputation: str = "mean", # method 'zero' added in pycaret==2.1
numeric_iterative_imputer: Union[str, Any] = "lightgbm",
date_features: Optional[List[str]] = None,
ignore_features: Optional[List[str]] = None,
normalize: bool = False,
normalize_method: str = "zscore",
transformation: bool = False,
transformation_method: str = "yeo-johnson",
handle_unknown_categorical: bool = True,
unknown_categorical_method: str = "least_frequent",
pca: bool = False,
pca_method: str = "linear",
pca_components: Optional[float] = None,
ignore_low_variance: bool = False,
combine_rare_levels: bool = False,
rare_level_threshold: float = 0.10,
bin_numeric_features: Optional[List[str]] = None,
remove_multicollinearity: bool = False,
multicollinearity_threshold: float = 0.9,
remove_perfect_collinearity: bool = False,
group_features: Optional[List[str]] = None,
group_names: Optional[List[str]] = None,
n_jobs: Optional[int] = -1,
session_id: Optional[int] = None,
system_log: Union[bool, logging.Logger] = True,
log_experiment: bool = False,
experiment_name: Optional[str] = None,
log_plots: Union[bool, list] = False,
log_profile: bool = False,
log_data: bool = False,
profile: bool = False,
**kwargs,
) -> pd.DataFrame:
"""
Callable from any external environment without requiring setup initialization.
"""
exp = _EXPERIMENT_CLASS()
exp.setup(
data=data,
preprocess=preprocess,
imputation_type=imputation_type,
iterative_imputation_iters=iterative_imputation_iters,
categorical_features=categorical_features,
categorical_imputation=categorical_imputation,
categorical_iterative_imputer=categorical_iterative_imputer,
ordinal_features=ordinal_features,
high_cardinality_features=high_cardinality_features,
high_cardinality_method=high_cardinality_method,
numeric_features=numeric_features,
numeric_imputation=numeric_imputation,
numeric_iterative_imputer=numeric_iterative_imputer,
date_features=date_features,
ignore_features=ignore_features,
normalize=normalize,
normalize_method=normalize_method,
transformation=transformation,
transformation_method=transformation_method,
handle_unknown_categorical=handle_unknown_categorical,
unknown_categorical_method=unknown_categorical_method,
pca=pca,
pca_method=pca_method,
pca_components=pca_components,
ignore_low_variance=ignore_low_variance,
combine_rare_levels=combine_rare_levels,
rare_level_threshold=rare_level_threshold,
bin_numeric_features=bin_numeric_features,
remove_multicollinearity=remove_multicollinearity,
multicollinearity_threshold=multicollinearity_threshold,
remove_perfect_collinearity=remove_perfect_collinearity,
group_features=group_features,
group_names=group_names,
n_jobs=n_jobs,
html=False,
session_id=session_id,
system_log=system_log,
log_experiment=log_experiment,
experiment_name=experiment_name,
log_plots=log_plots,
log_profile=log_profile,
log_data=log_data,
silent=True,
verbose=False,
profile=profile,
)
c = exp.create_model(
model=model, fraction=fraction, fit_kwargs=fit_kwargs, verbose=False, **kwargs,
)
return exp.assign_model(c, verbose=False)
def set_current_experiment(experiment: AnomalyExperiment):
global _CURRENT_EXPERIMENT
if not isinstance(experiment, AnomalyExperiment):
raise TypeError(
f"experiment must be a PyCaret AnomalyExperiment object, got {type(experiment)}."
)
_CURRENT_EXPERIMENT = experiment
| 289 | 0 | 23 |
e2c921fa196cc33b291c9768ba921ca005df8547 | 142 | py | Python | example_problem/engineer/urls.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | example_problem/engineer/urls.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | example_problem/engineer/urls.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('evaluate-architecture', views.EvaluateArchitecture.as_view()),
]
| 17.75 | 72 | 0.746479 | from django.urls import path
from . import views
urlpatterns = [
path('evaluate-architecture', views.EvaluateArchitecture.as_view()),
]
| 0 | 0 | 0 |
87a23b02e213e4927b2074ff788d02f58b2845eb | 2,909 | py | Python | reprohack_hub/migrations/0019_auto_20210910_1543.py | maelle/reprohack_site | 1ad92436acf7bb35ad6a6a92ad937b49ca01fedb | [
"MIT"
] | 10 | 2019-10-27T07:51:41.000Z | 2022-02-04T14:48:01.000Z | reprohack_hub/migrations/0019_auto_20210910_1543.py | maelle/reprohack_site | 1ad92436acf7bb35ad6a6a92ad937b49ca01fedb | [
"MIT"
] | 131 | 2019-10-25T20:21:41.000Z | 2022-03-22T16:12:56.000Z | reprohack_hub/migrations/0019_auto_20210910_1543.py | maelle/reprohack_site | 1ad92436acf7bb35ad6a6a92ad937b49ca01fedb | [
"MIT"
] | 12 | 2019-10-26T12:52:45.000Z | 2022-02-16T17:07:40.000Z | # Generated by Django 3.1.4 on 2021-09-10 15:43
from django.db import migrations
import markdownx.models
| 44.753846 | 186 | 0.645583 | # Generated by Django 3.1.4 on 2021-09-10 15:43
from django.db import migrations
import markdownx.models
class Migration(migrations.Migration):
dependencies = [
('reprohack_hub', '0018_auto_20210910_1412'),
]
operations = [
migrations.AlterField(
model_name='review',
name='advantages',
field=markdownx.models.MarkdownxField(help_text='Markdown field', verbose_name='What were the positive features of this approach?'),
),
migrations.AlterField(
model_name='review',
name='challenges',
field=markdownx.models.MarkdownxField(help_text='Markdown field', verbose_name='What were the main challenges you ran into (if any)?'),
),
migrations.AlterField(
model_name='review',
name='comments_and_suggestions',
field=markdownx.models.MarkdownxField(blank=True, default='', help_text='Markdown field', verbose_name='Any other comments/suggestions on the reproducibility approach?'),
),
migrations.AlterField(
model_name='review',
name='documentation_cons',
field=markdownx.models.MarkdownxField(help_text='Markdown field', verbose_name='How could the documentation be improved?'),
),
migrations.AlterField(
model_name='review',
name='documentation_pros',
field=markdownx.models.MarkdownxField(help_text='Markdown field', verbose_name='What do you like about the documentation?'),
),
migrations.AlterField(
model_name='review',
name='general_comments',
field=markdownx.models.MarkdownxField(blank=True, default='', help_text='Markdown field', verbose_name='Any final comments?'),
),
migrations.AlterField(
model_name='review',
name='reusability_suggestions',
field=markdownx.models.MarkdownxField(blank=True, default='', help_text='Markdown field', verbose_name='Any suggestions on how the project could be more reusable?'),
),
migrations.AlterField(
model_name='review',
name='software_installed',
field=markdownx.models.MarkdownxField(help_text='Markdown field', verbose_name='What additional software did you need to install?'),
),
migrations.AlterField(
model_name='review',
name='software_used',
field=markdownx.models.MarkdownxField(help_text='Markdown field', verbose_name='What software did you use?'),
),
migrations.AlterField(
model_name='review',
name='transparency_suggestions',
field=markdownx.models.MarkdownxField(blank=True, default='', help_text='Markdown field', verbose_name='Any suggestions on how the analysis could be made more transparent?'),
),
]
| 0 | 2,779 | 23 |
dc3e3aa05880d91a74c5871736700751f781d474 | 14,825 | py | Python | src/spaceone/inventory/libs/manager.py | xellos00/plugin-monitoring | 137d0aa013c3061d45b25b2d5008b6e6a18fe6d2 | [
"Apache-2.0"
] | null | null | null | src/spaceone/inventory/libs/manager.py | xellos00/plugin-monitoring | 137d0aa013c3061d45b25b2d5008b6e6a18fe6d2 | [
"Apache-2.0"
] | 2 | 2021-06-08T22:45:46.000Z | 2021-07-29T07:59:52.000Z | src/spaceone/inventory/libs/manager.py | xellos00/plugin-monitoring | 137d0aa013c3061d45b25b2d5008b6e6a18fe6d2 | [
"Apache-2.0"
] | 1 | 2021-12-23T04:00:30.000Z | 2021-12-23T04:00:30.000Z | __all__ = ['CollectorManager']
import concurrent.futures
from spaceone.core.manager import BaseManager
from datetime import datetime, timedelta
from spaceone.inventory.error.custom import *
from spaceone.inventory.model.server import *
from spaceone.inventory.libs.schema.base import ReferenceModel
from pprint import pprint
_LOGGER = logging.getLogger(__name__)
COLLECTIVE_STATE = ['max', 'avg']
DEFAULT_INTERVAL = 86400
MAX_WORKER = 20
MAX_DIVIDING_COUNT = 20
| 42.846821 | 130 | 0.533086 | __all__ = ['CollectorManager']
import concurrent.futures
from spaceone.core.manager import BaseManager
from datetime import datetime, timedelta
from spaceone.inventory.error.custom import *
from spaceone.inventory.model.server import *
from spaceone.inventory.libs.schema.base import ReferenceModel
from pprint import pprint
_LOGGER = logging.getLogger(__name__)
COLLECTIVE_STATE = ['max', 'avg']
DEFAULT_INTERVAL = 86400
MAX_WORKER = 20
MAX_DIVIDING_COUNT = 20
class CollectorManager(BaseManager):
provider = None
def __init__(self, **kwargs):
super().__init__(transaction=None, config=None)
secret_data = kwargs.get('secret_data')
self.data_source = secret_data.get('data_source_info')
self.end = None
self.start = None
try:
self.max_worker = MAX_WORKER
self.inventory_manager = secret_data.get('inventory_manager')
self.monitoring_manager = secret_data.get('monitoring_manager')
self.domain_id = secret_data.get('domain_id')
self.set_time(1)
except Exception as e:
print()
raise ERROR_UNKNOWN(message=e)
def verify(self, secret_data, region_name):
"""
Check connection
"""
return ''
def collect_monitoring_data(self, params) -> list:
raise NotImplemented
def collect_resources(self, params) -> list:
return self.collect_monitoring_data(params)
def set_time(self, interval_options: int):
self.end = datetime.utcnow()
self.start = self.end - timedelta(days=interval_options)
def list_metrics(self, provider, resource_type, server_ids):
data_source = self.get_data_source_info_by_provider(provider)
metric_list = self.monitoring_manager.get_metric_list(data_source.get('data_source_id'),
resource_type,
server_ids)
return metric_list
def get_data_source_info_by_provider(self, provider):
data_source = self.data_source.get(provider, [])
return data_source[0] if len(data_source) > 0 else None
def get_servers_metric_data(self, metric_info_vo, provider, server_ids, start, end):
server_monitoring_vo = {}
metric_info = metric_info_vo.get('json')
metric_keys = metric_info_vo.get('key')
data_source = self.get_data_source_info_by_provider(provider)
if data_source:
for collect_item in metric_keys:
dict_key = collect_item.split('.')
if dict_key[0] not in server_monitoring_vo:
server_monitoring_vo.update({dict_key[0]: {}})
if provider in metric_info[dict_key[0]][dict_key[1]]:
for provider_metric in metric_info[dict_key[0]][dict_key[1]][provider]:
# metric_data contains metric data via index
# 0: max (Max)
# 1: avg (Average or Mean)
metric_data = [{}, {}]
if provider_metric.get('metric') != '':
param = self._get_metric_param(provider,
data_source.get('data_source_id'),
'inventory.Server',
server_ids,
provider_metric.get('metric'),
start,
end)
metric_data[0] = self.get_metric_data(param)
param.update({'stat_flag': 'avg'})
metric_data[1] = self.get_metric_data(param)
vo = server_monitoring_vo[dict_key[0]].get(dict_key[1])
server_monitoring_vo[dict_key[0]].update(
{dict_key[1]: self.get_collect_data_per_state(metric_data, server_ids, vo)})
return server_monitoring_vo
def get_metric_data(self, params):
stat_flag = 'MAX'
stat_interval = params.get('stat_interval') if params.get('stat_interval') is not None else DEFAULT_INTERVAL
if params.get('stat_flag') == 'avg':
stat_flag = 'AVERAGE' if params.get('provider') == 'aws' else 'MEAN'
monitoring_data = self.monitoring_manager.get_metric_data(params.get('data_source_id'),
params.get('source_type'),
params.get('server_ids'),
params.get('metric'),
params.get('start'),
params.get('end'),
stat_interval,
stat_flag)
return monitoring_data
def get_collect_data_per_state(self, metric_data, server_ids, previous_dt):
collected_data_map = {}
if len(metric_data) != len(metric_data):
raise ERROR_NOT_SUPPORT_STAT(supported_stat=' | '.join(COLLECTIVE_STATE))
for idx, state in enumerate(COLLECTIVE_STATE):
state_data = metric_data[idx]
filter_dt = self._get_only_available_values(state_data, server_ids)
if previous_dt:
previous_filtered = self._get_only_available_values(previous_dt[state], server_ids)
if bool(filter_dt.get('resource_values', {})):
merge_pre = previous_filtered.get('resource_values', {})
merged_aft = filter_dt.get('resource_values', {})
resource = {**merge_pre, **merged_aft}
collected_data_map.update({
state: {'resource_values': resource,
'labels': filter_dt.get('labels'),
'domain_id': filter_dt.get('domain_id')}
})
else:
collected_data_map.update({
state: previous_filtered
})
else:
collected_data_map.update({
state: filter_dt
})
return collected_data_map
def set_metric_data_to_server(self, metric_info_vo, servers, collected_data):
return_list = []
metric_keys = metric_info_vo.get('key')
for server in servers:
server_vo = {}
provider = server.get('provider')
server_id = server.get('server_id')
if collected_data != {}:
for metric_key in metric_keys:
key = metric_key.split('.')
if key[0] not in server_vo and key[0] in collected_data:
server_vo.update({key[0]: {}})
for state in COLLECTIVE_STATE:
if key[1] not in server_vo[key[0]] and key[1] in collected_data[key[0]]:
server_vo[key[0]].update({key[1]: {}})
if key[0] in collected_data and key[1] in collected_data[key[0]]:
resources = collected_data[key[0]][key[1]]
if state in resources:
# If perfer to deliver raw data from monitoring.
# server_vo[key[0]][key[1]].update({state: {
# 'labels': resources[state].get('labels', []),
# 'values': resources[state].get('resource_values', {}).get(server_id, [])
# }})
metric_value = self._get_data_only(resources, state, server_id)
if metric_value is not None:
_metric_value_revised = float(metric_value) if isinstance(metric_value, str) else metric_value
try:
server_vo[key[0]][key[1]].update({state: round(_metric_value_revised, 1)})
except Exception as e:
raise e
if provider == 'google_cloud':
updated_memory = self._set_memory_usage(server_vo)
server_vo['memory'].update(updated_memory)
monitoring_data = Server({'monitoring': Monitoring(server_vo, strict=False)}, strict=False)
if self._check_to_update(monitoring_data.to_primitive()):
if provider == 'aws':
compute_vm_resource = ServerAwsInstanceResource({
'provider': provider,
'cloud_service_group': server.get('cloud_service_group'),
'cloud_service_type': server.get('cloud_service_type'),
'data': monitoring_data,
'reference': ReferenceModel(monitoring_data.reference(server.get('reference').get('resource_id')))
}, strict=False)
return_list.append(ServerAwsInstanceResponse({'resource': compute_vm_resource}))
elif provider == 'azure':
compute_vm_resource = ServerAzureInstanceResource({
'provider': provider,
'cloud_service_group': server.get('cloud_service_group'),
'cloud_service_type': server.get('cloud_service_type'),
'data': monitoring_data,
'reference': ReferenceModel(
monitoring_data.reference(server.get('reference').get('resource_id')))
}, strict=False)
return_list.append(ServerAzureInstanceResponse({'resource': compute_vm_resource}))
elif provider == 'google_cloud':
compute_vm_resource = ServerGoogleInstanceResource({
'provider': provider,
'cloud_service_group': server.get('cloud_service_group'),
'cloud_service_type': server.get('cloud_service_type'),
'data': monitoring_data,
'reference': ReferenceModel(
monitoring_data.reference(server.get('reference').get('resource_id')))
}, strict=False)
return_list.append(ServerGoogleInstanceResponse({'resource': compute_vm_resource}))
return return_list
@staticmethod
def _set_memory_usage(server_vo):
memory = server_vo.get('memory', {})
total = memory.get('total', {})
used = memory.get('used', {})
usage = {}
if total != {} and used != {}:
avg_total = total.get('avg')
avg_used = used.get('avg')
max_total = total.get('max')
max_used = used.get('max')
if avg_total is not None and avg_used is not None:
avg_usage = float(avg_used) / float(avg_total) * 100
usage.update({'avg': round(avg_usage, 1)})
if max_total is not None and max_used is not None:
max_usage = float(avg_used) / float(avg_total) * 100
usage.update({'max': round(max_usage, 1)})
if usage != {}:
memory.update({'usage': usage})
return memory
@staticmethod
def _get_data_only(metric_data, state, server_id):
data_only = None
resource_values = metric_data[state].get('resource_values', {})
values = resource_values.get(server_id)
if values and len(values) > 0:
data_only = values[0]
return data_only
@staticmethod
def _is_update_able(metric, server_id):
resource_values = metric.get('resource_values')
values = resource_values.get(server_id)
return False if not values or values is None else True
@staticmethod
def _get_metric_param(provider, data_source_id, source_type, server_ids, metric, start, end):
return {
'provider': provider,
'data_source_id': data_source_id,
'source_type': source_type,
'server_ids': server_ids,
'metric': metric,
'start': start,
'end': end,
}
@staticmethod
def _get_only_available_values(metric_monitoring_data, server_ids):
dummy = metric_monitoring_data.copy()
for server_id in server_ids:
if 'resource_values' in dummy and dummy['resource_values'].get(server_id) == []:
dummy['resource_values'].pop(server_id, None)
metric_monitoring_data.update({
'resource_values': dummy.get('resource_values', {})
})
return metric_monitoring_data
@staticmethod
def _get_only_available_ids(available_resources, server_ids):
_available_resources = []
if server_ids:
if isinstance(server_ids, list):
for server_id in server_ids:
if available_resources.get(server_id):
_available_resources.append(server_id)
else:
if available_resources.get(server_ids):
_available_resources.append(server_ids)
return _available_resources
@staticmethod
def get_divided_into_max_count(max_count, divide_targets):
return_arr = []
for idx, target in enumerate(divide_targets, start=0):
return_arr_idx = len(return_arr) - 1
if return_arr_idx < 0:
return_arr.append([target])
else:
current_target_length = len(return_arr[return_arr_idx])
if current_target_length < max_count:
return_arr[return_arr_idx].append(target)
else:
return_arr.append([target])
return return_arr
@staticmethod
def _get_total_length(server_ids):
length = 0
for server_id in server_ids:
length = length + len(server_id)
return length
@staticmethod
def _check_to_update(monitoring_data):
return True if monitoring_data.get('monitoring', {}) != {} else False
| 13,507 | 830 | 23 |
103bba48f40e943f5ad7c9bf43cd0ce50e81ca93 | 3,360 | py | Python | models/baseline/bert.py | Thesharing/lfesm | e956ed76f5a85259000742db093726d4b4c51751 | [
"Apache-2.0"
] | 6 | 2020-01-31T13:14:11.000Z | 2021-05-16T11:43:17.000Z | models/baseline/bert.py | Cyprestar/scm-fsim | 924fb184451fa4ca0eb419a1dcc0bd6cea2edf3a | [
"Apache-2.0"
] | 5 | 2020-11-16T06:23:31.000Z | 2022-01-04T10:17:16.000Z | models/baseline/bert.py | Cyprestar/scm-fsim | 924fb184451fa4ca0eb419a1dcc0bd6cea2edf3a | [
"Apache-2.0"
] | 4 | 2020-11-04T02:42:57.000Z | 2022-03-21T06:36:20.000Z | import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import CrossEntropyLoss
from transformers.modeling_bert import BertPreTrainedModel, BertModel
from ..esim.layers import Seq2SeqEncoder
from ..esim.utils import replace_masked
class BERTBaseline(BertPreTrainedModel):
"""
ab、ac交互并编码
"""
@staticmethod
| 40 | 90 | 0.615476 | import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import CrossEntropyLoss
from transformers.modeling_bert import BertPreTrainedModel, BertModel
from ..esim.layers import Seq2SeqEncoder
from ..esim.utils import replace_masked
class BERTBaseline(BertPreTrainedModel):
"""
ab、ac交互并编码
"""
def __init__(self, config):
super(BERTBaseline, self).__init__(config)
self.bert = BertModel(config)
self.init_weights()
self._embedding = self.bert.embeddings.word_embeddings
self._encoding = Seq2SeqEncoder(nn.LSTM,
config.hidden_size,
config.hidden_size,
bidirectional=True)
self._linear = nn.Bilinear(config.hidden_size, config.hidden_size, 1)
self.apply(self.init_esim_weights)
def forward(self, a, b, c, labels=None, mode="prob"):
a_mask = a[1].float()
b_mask = b[1].float()
c_mask = c[1].float()
# the parameter is: input_ids, attention_mask, token_type_ids
# which is corresponding to input_ids, input_mask and segment_ids in InputFeatures
v_a = self._embedding(a[0])
v_b = self._embedding(b[0])
v_c = self._embedding(c[0])
# The return value: sequence_output, pooled_output, (hidden_states), (attentions)
v_a_max, _ = replace_masked(v_a, a_mask, -1e7).max(dim=1)
v_b_max, _ = replace_masked(v_b, b_mask, -1e7).max(dim=1)
v_c_max, _ = replace_masked(v_c, c_mask, -1e7).max(dim=1)
ab = self._linear(v_a_max, v_b_max)
ac = self._linear(v_a_max, v_c_max)
output = torch.cat([ab, ac], dim=-1)
if mode == "prob":
prob = torch.nn.functional.softmax(Variable(output), dim=1)
return prob
elif mode == "logits":
return output
elif mode == "loss":
loss_fct = CrossEntropyLoss()
loss = loss_fct(output.view(-1, 2), labels.view(-1))
return loss
elif mode == "evaluate":
prob = torch.nn.functional.softmax(Variable(output), dim=1)
loss_fct = CrossEntropyLoss()
loss = loss_fct(output.view(-1, 2), labels.view(-1))
return output, prob, loss
@staticmethod
def init_esim_weights(module):
if isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight.data)
nn.init.constant_(module.bias.data, 0.0)
elif isinstance(module, nn.LSTM):
nn.init.xavier_uniform_(module.weight_ih_l0.data)
nn.init.orthogonal_(module.weight_hh_l0.data)
nn.init.constant_(module.bias_ih_l0.data, 0.0)
nn.init.constant_(module.bias_hh_l0.data, 0.0)
hidden_size = module.bias_hh_l0.data.shape[0] // 4
module.bias_hh_l0.data[hidden_size:(2 * hidden_size)] = 1.0
if module.bidirectional:
nn.init.xavier_uniform_(module.weight_ih_l0_reverse.data)
nn.init.orthogonal_(module.weight_hh_l0_reverse.data)
nn.init.constant_(module.bias_ih_l0_reverse.data, 0.0)
nn.init.constant_(module.bias_hh_l0_reverse.data, 0.0)
module.bias_hh_l0_reverse.data[hidden_size:(2 * hidden_size)] = 1.0
| 2,927 | 0 | 80 |
f3d5720a4a238b43e2514e673d8c18806e8f3604 | 785 | py | Python | aula15/ex06.py | FelipeMachad0/python | 20b4e4264beca6914815c5c4c11ec7805d99e8d2 | [
"MIT"
] | 1 | 2021-12-10T21:48:12.000Z | 2021-12-10T21:48:12.000Z | aula15/ex06.py | FelipeMachad0/python | 20b4e4264beca6914815c5c4c11ec7805d99e8d2 | [
"MIT"
] | null | null | null | aula15/ex06.py | FelipeMachad0/python | 20b4e4264beca6914815c5c4c11ec7805d99e8d2 | [
"MIT"
] | null | null | null | valor_total = int(input('Qual valor ira sacar? R$'))
cedula50 = cedula20 = cedula10 = cedula5 = moeda1 = 0
while True:
if valor_total >= 50:
cedula50 += 1
valor_total -= 50
elif valor_total >= 20:
cedula20 += 1
valor_total -= 20
elif valor_total >= 10:
cedula10 += 1
valor_total -= 10
elif valor_total >= 5:
cedula5 += 1
valor_total -= 5
elif valor_total >= 1:
moeda1 += 1
valor_total -= 1
else:
break
if cedula50 > 0:
print(f'Cedulas R$50: {cedula50}')
if cedula20 > 0:
print(f'Cedulas R$20: {cedula20}')
if cedula10 > 0:
print(f'Cedulas R$10: {cedula10}')
if cedula5 > 0:
print(f'Cedulas R$5: {cedula5}')
if moeda1 > 0:
print(f'Moedas R$1: {moeda1}')
| 25.322581 | 53 | 0.566879 | valor_total = int(input('Qual valor ira sacar? R$'))
cedula50 = cedula20 = cedula10 = cedula5 = moeda1 = 0
while True:
if valor_total >= 50:
cedula50 += 1
valor_total -= 50
elif valor_total >= 20:
cedula20 += 1
valor_total -= 20
elif valor_total >= 10:
cedula10 += 1
valor_total -= 10
elif valor_total >= 5:
cedula5 += 1
valor_total -= 5
elif valor_total >= 1:
moeda1 += 1
valor_total -= 1
else:
break
if cedula50 > 0:
print(f'Cedulas R$50: {cedula50}')
if cedula20 > 0:
print(f'Cedulas R$20: {cedula20}')
if cedula10 > 0:
print(f'Cedulas R$10: {cedula10}')
if cedula5 > 0:
print(f'Cedulas R$5: {cedula5}')
if moeda1 > 0:
print(f'Moedas R$1: {moeda1}')
| 0 | 0 | 0 |
036e6cf5077395cda11f919a0feec8d78ffb909f | 93 | py | Python | scripts/vr-aubo-binding/test.py | Yanxxx/vive_ros | 0e3be46107dbae39b4ea17164e5b9cd2d960c7a4 | [
"BSD-3-Clause"
] | null | null | null | scripts/vr-aubo-binding/test.py | Yanxxx/vive_ros | 0e3be46107dbae39b4ea17164e5b9cd2d960c7a4 | [
"BSD-3-Clause"
] | null | null | null | scripts/vr-aubo-binding/test.py | Yanxxx/vive_ros | 0e3be46107dbae39b4ea17164e5b9cd2d960c7a4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import PyKDL as kdl
test = kdl.Vector(0, 0, -0.2)
print(test)
# | 11.625 | 29 | 0.623656 | #!/usr/bin/env python3
import PyKDL as kdl
test = kdl.Vector(0, 0, -0.2)
print(test)
# | 0 | 0 | 0 |
786dbd03b63c3402a3bbf979c36b94bb4258d6f6 | 74 | py | Python | tests/__init__.py | icaropires/pdf2dataset | b070d656fa446c296458512515fc68fc43d949e1 | [
"Apache-2.0"
] | 11 | 2020-06-30T03:22:57.000Z | 2021-11-16T03:35:50.000Z | tests/__init__.py | icaropires/pdf2dataset | b070d656fa446c296458512515fc68fc43d949e1 | [
"Apache-2.0"
] | 23 | 2020-07-21T19:03:37.000Z | 2020-11-01T15:53:03.000Z | tests/__init__.py | icaropires/pdf2dataset | b070d656fa446c296458512515fc68fc43d949e1 | [
"Apache-2.0"
] | 4 | 2020-07-15T20:16:28.000Z | 2021-04-13T18:38:22.000Z | import pytest
pytest.register_assert_rewrite('tests.testing_dataframe')
| 14.8 | 57 | 0.851351 | import pytest
pytest.register_assert_rewrite('tests.testing_dataframe')
| 0 | 0 | 0 |
4f9815c445dce47c705efa25ba0c20411efffe59 | 1,200 | py | Python | acc-TopK/acc_topK.py | ChenChunShenG19/Tensorflow-Green-Hand | da4a1b852026c7a77f57fd25c25cc26bdbb0afd2 | [
"MIT"
] | null | null | null | acc-TopK/acc_topK.py | ChenChunShenG19/Tensorflow-Green-Hand | da4a1b852026c7a77f57fd25c25cc26bdbb0afd2 | [
"MIT"
] | null | null | null | acc-TopK/acc_topK.py | ChenChunShenG19/Tensorflow-Green-Hand | da4a1b852026c7a77f57fd25c25cc26bdbb0afd2 | [
"MIT"
] | null | null | null | # Author: Betterman
# -*- coding = utf-8 -*-
# @Time : 2020/8/27 14:56
# @File : acc_topK.py
# @Software : PyCharm
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
tf.random.set_seed(2467)
#计算accuracy
#正态分布10个样本,6个类
output = tf.random.normal([10, 6])
#softmax使得6类总和概率为1
output = tf.math.softmax(output, axis=1)
#maxval =6从0-5中随机生成10个label
target = tf.random.uniform([10], maxval=6, dtype=tf.int32)
print('prob:', output.numpy())
pred = tf.argmax(output, axis=1)
print('pred:', pred.numpy())
print('label:', target.numpy())
acc = accuracy(output, target, topk=(1,2,3,4,5,6))
print('top-1-6 acc:', acc) | 31.578947 | 77 | 0.6325 | # Author: Betterman
# -*- coding = utf-8 -*-
# @Time : 2020/8/27 14:56
# @File : acc_topK.py
# @Software : PyCharm
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
tf.random.set_seed(2467)
#计算accuracy
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.shape[0]
pred = tf.math.top_k(output, maxk).indices
pred = tf.transpose(pred, perm=[1, 0])
target_ = tf.broadcast_to(target, pred.shape)
# [10, b]
correct = tf.equal(pred, target_)
res = []
for k in topk:
correct_k = tf.cast(tf.reshape(correct[:k], [-1]), dtype=tf.float32)
correct_k = tf.reduce_sum(correct_k)
acc = float(correct_k* (100.0 / batch_size) )
res.append(acc)
return res
#正态分布10个样本,6个类
output = tf.random.normal([10, 6])
#softmax使得6类总和概率为1
output = tf.math.softmax(output, axis=1)
#maxval =6从0-5中随机生成10个label
target = tf.random.uniform([10], maxval=6, dtype=tf.int32)
print('prob:', output.numpy())
pred = tf.argmax(output, axis=1)
print('pred:', pred.numpy())
print('label:', target.numpy())
acc = accuracy(output, target, topk=(1,2,3,4,5,6))
print('top-1-6 acc:', acc) | 526 | 0 | 23 |
e7dcbfce6d15e1e2d19d25d3d6d8038c532c9845 | 5,038 | py | Python | packaging/setup/plugins/ovirt-engine-rename/ovirt-engine/database.py | UranusBlockStack/ovirt-engine | fe3c90ed3e74e6af9497c826c82e653382946ae1 | [
"Apache-2.0"
] | null | null | null | packaging/setup/plugins/ovirt-engine-rename/ovirt-engine/database.py | UranusBlockStack/ovirt-engine | fe3c90ed3e74e6af9497c826c82e653382946ae1 | [
"Apache-2.0"
] | null | null | null | packaging/setup/plugins/ovirt-engine-rename/ovirt-engine/database.py | UranusBlockStack/ovirt-engine | fe3c90ed3e74e6af9497c826c82e653382946ae1 | [
"Apache-2.0"
] | null | null | null | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""database plugin."""
import gettext
from otopi import constants as otopicons
from otopi import plugin, transaction, util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup import domains
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_engine_setup.engine_common import database
@util.export
class Plugin(plugin.PluginBase):
"""database plugin."""
class DBTransaction(transaction.TransactionElement):
"""yum transaction element."""
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
name=oengcommcons.Stages.DB_CONNECTION_AVAILABLE,
)
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
)
# vim: expandtab tabstop=4 shiftwidth=4
| 32.503226 | 79 | 0.599246 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""database plugin."""
import gettext
from otopi import constants as otopicons
from otopi import plugin, transaction, util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup import domains
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_engine_setup.engine_common import database
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""database plugin."""
class DBTransaction(transaction.TransactionElement):
"""yum transaction element."""
def __init__(self, parent):
self._parent = parent
def __str__(self):
return _("Database Transaction")
def prepare(self):
pass
def abort(self):
connection = self._parent.environment[
oenginecons.EngineDBEnv.CONNECTION
]
if connection is not None:
connection.rollback()
self._parent.environment[
oenginecons.EngineDBEnv.CONNECTION
] = None
def commit(self):
connection = self._parent.environment[
oenginecons.EngineDBEnv.CONNECTION
]
if connection is not None:
connection.commit()
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
self.DBTransaction(self)
)
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
name=oengcommcons.Stages.DB_CONNECTION_AVAILABLE,
)
def _connection(self):
self.environment[
oenginecons.EngineDBEnv.STATEMENT
] = database.Statement(
dbenvkeys=oenginecons.Const.ENGINE_DB_ENV_KEYS,
environment=self.environment,
)
# must be here as we do not have database at validation
self.environment[
oenginecons.EngineDBEnv.CONNECTION
] = self.environment[oenginecons.EngineDBEnv.STATEMENT].connect()
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
)
def _validation(self):
dbovirtutils = database.OvirtUtils(
plugin=self,
dbenvkeys=oenginecons.Const.ENGINE_DB_ENV_KEYS,
)
dbovirtutils.tryDatabaseConnect()
dbstatement = database.Statement(
dbenvkeys=oenginecons.Const.ENGINE_DB_ENV_KEYS,
environment=self.environment,
)
my_domains = []
rows = dbstatement.execute(
statement="""
select
storage_name,
connection
from
storage_domain_static s,
storage_server_connections c
where
s.storage = c.id and
s.storage_type=%(storage_type)s and
s.storage_domain_type=%(storage_domain_type)s
""",
args=dict(
storage_type=domains.StorageType.NFS,
storage_domain_type=domains.StorageDomainType.ISO,
),
ownConnection=True,
)
for row in rows:
host, path = row['connection'].split(':', 1)
if host == self.environment[osetupcons.ConfigEnv.FQDN]:
my_domains.append(row['storage_name'])
if my_domains:
self.logger.warning(_('Engine host hosting Storage Domains'))
self.dialog.note(
text=_(
'The following Storage Domains use the engine host\n'
'as an NFS server:\n'
'\n'
'{domains}\n'
'\n'
'Cannot rename the engine host. Please backup relevant\n'
'data if needed, remove all of these domains, and then\n'
'run this utility again.\n'
).format(
domains='\n'.join(sorted(my_domains))
),
)
raise RuntimeError(_('Cannot rename host hosting Storage Domains'))
# vim: expandtab tabstop=4 shiftwidth=4
| 3,234 | 0 | 283 |
22a09ea86ed2b411613e8c0f7c625b2f5d11a6be | 441 | py | Python | Accessible_Campus-master/Geodjango/firstgis/migrations/0004_auto_20180725_0157.py | zzrose/Campus_Locator | 9262968165c198c15cffd0b3165c97b26bdafed2 | [
"Apache-2.0"
] | 1 | 2019-02-25T23:17:29.000Z | 2019-02-25T23:17:29.000Z | Geodjango/firstgis/migrations/0004_auto_20180725_0157.py | Harrymissi/Accessible_Campus | e20c14a18809e86e90b4aff528d2966a5b36f416 | [
"Apache-2.0"
] | null | null | null | Geodjango/firstgis/migrations/0004_auto_20180725_0157.py | Harrymissi/Accessible_Campus | e20c14a18809e86e90b4aff528d2966a5b36f416 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.0.3 on 2018-07-25 05:57
from django.db import migrations
import django.db.models.manager
| 21 | 63 | 0.587302 | # Generated by Django 2.0.3 on 2018-07-25 05:57
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('firstgis', '0003_auto_20180725_0150'),
]
operations = [
migrations.AlterModelManagers(
name='incidences',
managers=[
('object', django.db.models.manager.Manager()),
],
),
]
| 0 | 303 | 23 |
c6ddad16f737ee786ec98ffe600a27a0d7811e70 | 22,172 | py | Python | broker/end_code.py | ebloc/ebloc-broker | 776a8d9d4642ed1ba4726c94da68d61bd81c098b | [
"MIT"
] | 3 | 2021-12-11T19:26:57.000Z | 2021-12-30T00:17:23.000Z | broker/end_code.py | ebloc/ebloc-broker | 776a8d9d4642ed1ba4726c94da68d61bd81c098b | [
"MIT"
] | null | null | null | broker/end_code.py | ebloc/ebloc-broker | 776a8d9d4642ed1ba4726c94da68d61bd81c098b | [
"MIT"
] | 1 | 2021-09-18T11:38:07.000Z | 2021-09-18T11:38:07.000Z | #!/usr/bin/env python3
import base64
import getpass
import os
import pprint
import sys
import time
from contextlib import suppress
from pathlib import Path
from time import sleep
from typing import Dict, List
from broker import cfg
from broker._utils._log import br, log, ok
from broker._utils.tools import _remove, exit_after, mkdir, read_json
from broker._utils.web3_tools import get_tx_status
from broker.config import env, logging, setup_logger
from broker.errors import QuietExit
from broker.imports import connect
from broker.lib import (
calculate_size,
eblocbroker_function_call,
is_dir,
remove_files,
run,
run_stdout_to_file,
state,
subprocess_call,
)
from broker.libs import _git, eudat, gdrive, slurm
from broker.utils import (
WHERE,
StorageID,
byte_to_mb,
bytes32_to_ipfs,
eth_address_to_md5,
is_dir_empty,
print_tb,
read_file,
remove_empty_files_and_folders,
)
Ebb = cfg.Ebb
connect()
class Common:
"""Prevent "Class" to have attribute "method" mypy warnings."""
@exit_after(900) # timeout in 15 minuntes
if __name__ == "__main__":
kwargs = {
"job_key": sys.argv[1],
"index": sys.argv[2],
"received_block_number": sys.argv[3],
"folder_name": sys.argv[4],
"slurm_job_id": sys.argv[5],
}
try:
cloud_storage = ENDCODE(**kwargs)
cloud_storage.run()
except QuietExit:
pass
except Exception as e:
print_tb(e)
| 39.381883 | 118 | 0.599269 | #!/usr/bin/env python3
import base64
import getpass
import os
import pprint
import sys
import time
from contextlib import suppress
from pathlib import Path
from time import sleep
from typing import Dict, List
from broker import cfg
from broker._utils._log import br, log, ok
from broker._utils.tools import _remove, exit_after, mkdir, read_json
from broker._utils.web3_tools import get_tx_status
from broker.config import env, logging, setup_logger
from broker.errors import QuietExit
from broker.imports import connect
from broker.lib import (
calculate_size,
eblocbroker_function_call,
is_dir,
remove_files,
run,
run_stdout_to_file,
state,
subprocess_call,
)
from broker.libs import _git, eudat, gdrive, slurm
from broker.utils import (
WHERE,
StorageID,
byte_to_mb,
bytes32_to_ipfs,
eth_address_to_md5,
is_dir_empty,
print_tb,
read_file,
remove_empty_files_and_folders,
)
Ebb = cfg.Ebb
connect()
class Common:
"""Prevent "Class" to have attribute "method" mypy warnings."""
def __init__(self) -> None:
self.results_folder: Path = Path("")
self.results_folder_prev: Path = Path("")
self.patch_file: Path = Path("")
self.requester_gpg_fingerprint: str = ""
self.patch_upload_name = ""
self.data_transfer_out = 0.0
@exit_after(900) # timeout in 15 minuntes
def _get_tx_status(self, tx_hash):
get_tx_status(tx_hash)
def initialize(self):
pass
class Ipfs(Common):
def upload(self, *_):
"""Upload nothing."""
return
class IpfsGPG(Common):
def upload(self, *_):
"""Upload files right after all the patchings are completed."""
try:
cfg.ipfs.gpg_encrypt(self.requester_gpg_fingerprint, self.patch_file)
except Exception as e:
_remove(self.patch_file)
raise e
class Eudat(Common):
def __init__(self) -> None:
self.encoded_share_tokens = {} # type: Dict[str, str]
self.patch_folder: Path = Path("")
def initialize(self):
with suppress(Exception):
eudat.login(env.OC_USER, env.LOG_PATH.joinpath(".eudat_client.txt"), env.OC_CLIENT)
try:
self.get_shared_tokens()
except Exception as e:
print_tb(e)
raise e
def upload(self, source_code_hash, *_):
with suppress(Exception): # first time uploading
uploaded_file_size = eudat.get_size(f_name=f"{source_code_hash}/{self.patch_upload_name}")
size_in_bytes = calculate_size(self.patch_file, _type="bytes")
if uploaded_file_size == float(size_in_bytes):
log(f"==> {self.patch_file} is already uploaded")
return
_data_transfer_out = calculate_size(self.patch_file)
log(f"==> {br(source_code_hash)}.data_transfer_out={_data_transfer_out}MB")
self.data_transfer_out += _data_transfer_out
if not eudat.upload_results(
self.encoded_share_tokens[source_code_hash], self.patch_upload_name, self.patch_folder, max_retries=5
):
raise
class Gdrive(Common):
def upload(self, key, is_job_key):
"""Upload result into gdrive.
:param key: key of the shared gdrive file
:returns: True if upload is successful
"""
try:
if not is_job_key:
meta_data = gdrive.get_data_key_ids(self.results_folder_prev)
key = meta_data[key]
cmd = [env.GDRIVE, "info", "--bytes", key, "-c", env.GDRIVE_METADATA]
gdrive_info = subprocess_call(cmd, 5, sleep_time=30)
except Exception as e:
raise Exception(f"{WHERE(1)} E: {key} does not have a match. meta_data={meta_data}. {e}") from e
mime_type = gdrive.get_file_info(gdrive_info, "Mime")
logging.info(f"mime_type={mime_type}")
self.data_transfer_out += calculate_size(self.patch_file)
logging.info(f"data_transfer_out={self.data_transfer_out} MB =>" f" rounded={int(self.data_transfer_out)} MB")
if "folder" in mime_type:
cmd = [env.GDRIVE, "upload", "--parent", key, self.patch_file, "-c", env.GDRIVE_METADATA]
elif "gzip" in mime_type or "/zip" in mime_type:
cmd = [env.GDRIVE, "update", key, self.patch_file, "-c", env.GDRIVE_METADATA]
else:
raise Exception("E: files could not be uploaded")
try:
log(subprocess_call(cmd, 5))
except Exception as e:
print_tb(e)
raise Exception("E: gdrive could not upload the file") from e
class ENDCODE(IpfsGPG, Ipfs, Eudat, Gdrive):
def __init__(self, **kwargs) -> None:
args = " ".join(["{!r}".format(v) for k, v in kwargs.items()])
self.job_key = kwargs.pop("job_key")
self.index = int(kwargs.pop("index"))
self.received_block_number = kwargs.pop("received_block_number")
self.folder_name = kwargs.pop("folder_name")
self.slurm_job_id = kwargs.pop("slurm_job_id")
self.share_tokens = {} # type: Dict[str, str]
self.requester_id_address = ""
self.data_transfer_in = 0
self.data_transfer_out = 0.0
self.elapsed_time = 0
self.source_code_hashes_to_process: List[str] = []
self.source_code_hashes: List[str] = []
self.result_ipfs_hash: str = ""
self.requester_gpg_fingerprint: str = ""
self.end_time_stamp = ""
self.modified_date = None
self.encoded_share_tokens = {} # type: Dict[str, str]
#: Set environment variables: https://stackoverflow.com/a/5971326/2402577
os.environ["IPFS_PATH"] = str(env.HOME.joinpath(".ipfs"))
log_filename = Path(env.LOG_PATH) / "end_code_output" / f"{self.job_key}_{self.index}.log"
logging = setup_logger(log_filename)
self.job_id = 0 # TODO: should be mapped to slurm_job_id
log(f"{env.EBLOCPATH}/broker/end_code.py {args}", "bold blue", is_code=True)
log(f"==> slurm_job_id={self.slurm_job_id}")
if self.job_key == self.index:
logging.error("E: Given key and index are equal to each other")
sys.exit(1)
try:
self.job_info = eblocbroker_function_call(
lambda: Ebb.get_job_info(
env.PROVIDER_ID,
self.job_key,
self.index,
self.job_id,
self.received_block_number,
),
max_retries=10,
)
self.storage_ids = self.job_info["cloudStorageID"]
requester_id = self.job_info["job_owner"]
self.requester_id_address = eth_address_to_md5(requester_id)
self.requester_info = Ebb.get_requester_info(requester_id)
except Exception as e:
log(f"E: {e}")
sys.exit(1)
self.results_folder_prev: Path = env.PROGRAM_PATH / self.requester_id_address / f"{self.job_key}_{self.index}"
self.results_folder = self.results_folder_prev / "JOB_TO_RUN"
if not is_dir(self.results_folder) and not is_dir(self.results_folder_prev):
sys.exit(1)
self.results_data_link = Path(self.results_folder_prev) / "data_link"
self.results_data_folder = Path(self.results_folder_prev) / "data"
self.private_dir = Path(env.PROGRAM_PATH) / self.requester_id_address / "cache"
self.patch_folder = Path(self.results_folder_prev) / "patch"
self.patch_folder_ipfs = Path(self.results_folder_prev) / "patch_ipfs"
self.job_status_running_tx = Ebb.mongo_broker.get_job_status_running_tx(self.job_key, self.index)
mkdir(self.patch_folder)
mkdir(self.patch_folder_ipfs)
remove_empty_files_and_folders(self.results_folder)
log(f"==> whoami={getpass.getuser()} | id={os.getegid()}")
log(f"==> home={env.HOME}")
log(f"==> pwd={os.getcwd()}")
log(f"==> results_folder={self.results_folder}")
log(f"==> job_key={self.job_key}")
log(f"==> index={self.index}")
log(f"==> storage_ids={self.storage_ids}")
log(f"==> folder_name=[white]{self.folder_name}")
log(f"==> provider_id={env.PROVIDER_ID}")
log(f"==> requester_id_address={self.requester_id_address}")
log(f"==> received={self.job_info['received']}")
log(f"==> job_status_running_tx={self.job_status_running_tx}")
def get_shared_tokens(self):
with suppress(Exception):
share_ids = read_json(f"{self.private_dir}/{self.job_key}_share_id.json")
for source_code_hash in self.source_code_hashes_to_process:
try:
share_token = share_ids[source_code_hash]["share_token"]
self.share_tokens[source_code_hash] = share_token
self.encoded_share_tokens[source_code_hash] = base64.b64encode(
(f"{share_token}:").encode("utf-8")
).decode("utf-8")
except KeyError:
try:
shared_id = Ebb.mongo_broker.find_shareid_item(f"{self.job_key}_{self.requester_id_address[:16]}")
share_token = shared_id["share_token"]
self.share_tokens[source_code_hash] = share_token
self.encoded_share_tokens[source_code_hash] = base64.b64encode(
(f"{share_token}:").encode("utf-8")
).decode("utf-8")
except Exception as e:
log(f"E: share_id cannot be detected from key={self.job_key}")
raise e
for key in share_ids:
value = share_ids[key]
try:
encoded_value = self.encoded_share_tokens[key]
except:
_share_token = share_ids[key]["share_token"]
encoded_value = base64.b64encode((f"{_share_token}:").encode("utf-8")).decode("utf-8")
log(f"## shared_tokens: {key} => {value['share_token']} | encoded={encoded_value}")
def get_cloud_storage_class(self, _id):
"""Return cloud storage used for the id of the data."""
if self.storage_ids[_id] == StorageID.IPFS:
return Ipfs
if self.storage_ids[_id] == StorageID.IPFS_GPG:
return IpfsGPG
if self.storage_ids[_id] == StorageID.EUDAT:
return Eudat
if self.storage_ids[_id] == StorageID.GDRIVE:
return Gdrive
raise Exception(f"Corresponding storage_id_class={self.storage_ids[_id]} does not exist")
def set_source_code_hashes_to_process(self):
for idx, source_code_hash in enumerate(self.source_code_hashes):
if self.storage_ids[idx] in [StorageID.IPFS, StorageID.IPFS_GPG]:
ipfs_hash = bytes32_to_ipfs(source_code_hash)
self.source_code_hashes_to_process.append(ipfs_hash)
else:
self.source_code_hashes_to_process.append(cfg.w3.toText(source_code_hash))
def _ipfs_add_folder(self, folder_path):
try:
self.result_ipfs_hash = cfg.ipfs.add(folder_path)
logging.info(f"==> result_ipfs_hash={self.result_ipfs_hash}")
cfg.ipfs.pin(self.result_ipfs_hash)
data_transfer_out = cfg.ipfs.get_cumulative_size(self.result_ipfs_hash)
except Exception as e:
print_tb(e)
raise e
data_transfer_out = byte_to_mb(data_transfer_out)
self.data_transfer_out += data_transfer_out
def process_payment_tx(self):
try:
tx_hash = eblocbroker_function_call(
lambda: Ebb.process_payment(
self.job_key,
self.index,
self.job_id,
self.elapsed_time,
self.result_ipfs_hash,
self.storage_ids,
self.end_time_stamp,
self.data_transfer_in,
self.data_transfer_out,
self.job_info["core"],
self.job_info["run_time"],
self.received_block_number,
),
max_retries=10,
)
except Exception as e:
print_tb(e)
sys.exit(1)
log(f"==> process_payment {self.job_key} {self.index}")
return tx_hash
def clean_before_upload(self):
remove_files(f"{self.results_folder}/.node-xmlhttprequest*")
def remove_source_code(self):
"""Client's initial downloaded files are removed."""
timestamp_file = f"{self.results_folder_prev}/timestamp.txt"
try:
cmd = ["find", self.results_folder, "-type", "f", "!", "-newer", timestamp_file]
files_to_remove = run(cmd)
if files_to_remove:
log(f"## Files to be removed: \n{files_to_remove}\n")
except Exception as e:
print_tb(e)
sys.exit()
run(["find", self.results_folder, "-type", "f", "!", "-newer", timestamp_file, "-delete"])
def git_diff_patch_and_upload(self, source: Path, name, storage_class, is_job_key):
if is_job_key:
log(f"==> base_patch={self.patch_folder}")
log(f"==> sourcecode_patch={name}")
else:
log(f"==> datafile_patch={name}")
try:
if storage_class is Ipfs or storage_class is IpfsGPG:
target_path = self.patch_folder_ipfs
else:
target_path = self.patch_folder
self.patch_upload_name, self.patch_file, is_file_empty = _git.diff_patch(
source, name, self.index, target_path
)
if not is_file_empty:
try:
storage_class.upload(self, name, is_job_key)
except Exception as e:
print_tb(e)
raise e
except Exception as e:
raise Exception("E: Problem on the git_diff_patch_and_upload() function") from e
def upload_driver(self):
self.clean_before_upload()
try:
storage_class = self.get_cloud_storage_class(0)
self.git_diff_patch_and_upload(self.results_folder, self.job_key, storage_class, is_job_key=True)
except Exception as e:
raise e
for idx, name in enumerate(self.source_code_hashes_to_process[1:], 1):
# starting from 1st index for data files
source = self.results_data_folder / name
try:
if not self.storage_ids[idx] == StorageID.NONE:
storage_class = self.get_cloud_storage_class(idx)
self.git_diff_patch_and_upload(source, name, storage_class, is_job_key=False)
else:
pass
except Exception as e:
print_tb(e)
raise e
if not is_dir_empty(self.patch_folder_ipfs):
# it will upload files after all the patchings are completed
# in case any file is created via ipfs
self._ipfs_add_folder(self.patch_folder_ipfs)
def sacct_result(self):
"""Return sacct results.
CPUTime = NCPUS * Elapsed
To get stats about real CPU usage you need to look at SystemCPU and
UserCPU, but the docs warns that it only measure CPU time for the
parent process and not for child processes.
"""
slurm_log_output_fn = f"{self.results_folder}/slurm_job_info.out"
cmd = ["sacct", "-X", "--job", self.slurm_job_id, "--format"]
cmd.append("jobID,jobname,user,account,group,cluster,allocCPUS,REQMEM,TotalCPU,elapsed")
run_stdout_to_file(cmd, slurm_log_output_fn)
with open(slurm_log_output_fn, "a") as f:
f.write("\n\n")
cmd.pop()
cmd.append("NNodes,NTasks,ncpus,CPUTime,State,ExitCode,End,CPUTime,MaxRSS")
run_stdout_to_file(cmd, slurm_log_output_fn, mode="a")
with open(slurm_log_output_fn, "a") as f:
f.write("\n")
def get_job_info(self, is_print=False, is_log_print=True):
self.job_info = eblocbroker_function_call(
lambda: Ebb.get_job_info(
env.PROVIDER_ID,
self.job_key,
self.index,
self.job_id,
self.received_block_number,
is_print=is_print,
is_log_print=is_log_print,
),
max_retries=1,
)
def attemp_get_job_info(self):
is_print = True
sleep_time = 30
for attempt in range(10):
# log(self.job_info)
if self.job_info["stateCode"] == state.code["RUNNING"]:
# it will come here eventually, when setJob() is deployed. Wait
# until does values updated on the blockchain
log("## job has been started")
return
if self.job_info["stateCode"] == state.code["COMPLETED"]:
# detects an error on the slurm side
log("warning: job is already completed and its money is received")
self.get_job_info()
raise QuietExit
try:
self.job_info = Ebb.get_job_info(
env.PROVIDER_ID, self.job_key, self.index, self.job_id, self.received_block_number, is_print
)
is_print = False
except Exception as e:
print_tb(e)
# sys.exit(1)
# sleep here so this loop is not keeping CPU busy due to
# start_code tx may deploy late into the blockchain.
log(
f"==> {br(attempt)} start_code tx of the job is not obtained yet, "
f"waiting for {sleep_time} seconds to pass...",
end="",
)
sleep(sleep_time)
log(ok())
log("E: failed all the attempts, abort")
sys.exit(1)
def run(self):
try:
data = read_json(f"{self.results_folder_prev}/data_transfer_in.json")
self.data_transfer_in = data["data_transfer_in"]
log(f"==> data_transfer_in={self.data_transfer_in} MB -> rounded={int(self.data_transfer_in)} MB")
except:
log("E: data_transfer_in.json file does not exist")
try:
self.modified_date = read_file(f"{self.results_folder_prev}/modified_date.txt")
log(f"==> modified_date={self.modified_date}")
except:
log("E: modified_date.txt file could not be read")
self.requester_gpg_fingerprint = self.requester_info["gpg_fingerprint"]
log("\njob_owner's info\n================", "bold green")
log(f"==> email=[white]{self.requester_info['email']}")
log(f"==> gpg_fingerprint={self.requester_gpg_fingerprint}")
log(f"==> ipfs_id={self.requester_info['ipfs_id']}")
log(f"==> f_id={self.requester_info['f_id']}")
if self.job_info["stateCode"] == str(state.code["COMPLETED"]):
self.get_job_info()
log(":beer: job is already completed and its money is received", "bold green")
raise QuietExit
run_time = self.job_info["run_time"]
log(f"==> requested_run_time={run_time[self.job_id]} minutes")
try:
if self.job_status_running_tx:
Ebb._wait_for_transaction_receipt(self.job_status_running_tx)
else:
log("warning: job_status_running_tx is empty")
self.get_job_info(is_log_print=False) # re-fetch job info
self.attemp_get_job_info()
except Exception as e:
print_tb(e)
raise e
log("## Received running job status successfully", "bold green")
try:
self.job_info = eblocbroker_function_call(
lambda: Ebb.get_job_source_code_hashes(
env.PROVIDER_ID,
self.job_key,
self.index,
# self.job_id,
self.received_block_number,
),
max_retries=10,
)
except Exception as e:
print_tb(e)
sys.exit(1)
self.source_code_hashes = self.job_info["code_hashes"]
self.set_source_code_hashes_to_process()
self.sacct_result()
self.end_time_stamp = slurm.get_job_end_time(self.slurm_job_id)
self.elapsed_time = slurm.get_elapsed_time(self.slurm_job_id)
if self.elapsed_time > int(run_time[self.job_id]):
self.elapsed_time = run_time[self.job_id]
logging.info(f"finalized_elapsed_time={self.elapsed_time}")
_job_info = pprint.pformat(self.job_info)
log("## job_info:", "bold magenta")
log(_job_info, "bold")
try:
self.get_cloud_storage_class(0).initialize(self)
self.upload_driver()
except Exception as e:
print_tb(e)
sys.exit(1)
data_transfer_sum = self.data_transfer_in + self.data_transfer_out
log(f"==> data_transfer_in={self.data_transfer_in} MB -> rounded={int(self.data_transfer_in)} MB")
log(f"==> data_transfer_out={self.data_transfer_out} MB -> rounded={int(self.data_transfer_out)} MB")
log(f"==> data_transfer_sum={data_transfer_sum} MB -> rounded={int(data_transfer_sum)} MB")
tx_hash = self.process_payment_tx()
time.sleep(1)
self._get_tx_status(tx_hash)
self.get_job_info()
log("SUCCESS")
# TODO: garbage collector, removed downloaded code from local since it is not needed anymore
if __name__ == "__main__":
kwargs = {
"job_key": sys.argv[1],
"index": sys.argv[2],
"received_block_number": sys.argv[3],
"folder_name": sys.argv[4],
"slurm_job_id": sys.argv[5],
}
try:
cloud_storage = ENDCODE(**kwargs)
cloud_storage.run()
except QuietExit:
pass
except Exception as e:
print_tb(e)
| 16,217 | 4,181 | 275 |
888bb26f0894e0a05403f726ece9c5a63104ce37 | 518 | py | Python | tests/test_console/test_models.py | dmitriiweb/hub-scraper | b6817e216f75a9835f3d9cd304f62611defbe458 | [
"MIT"
] | null | null | null | tests/test_console/test_models.py | dmitriiweb/hub-scraper | b6817e216f75a9835f3d9cd304f62611defbe458 | [
"MIT"
] | null | null | null | tests/test_console/test_models.py | dmitriiweb/hub-scraper | b6817e216f75a9835f3d9cd304f62611defbe458 | [
"MIT"
] | null | null | null | from typing import Optional, Protocol
import pytest
EXPECTED_URLS = [
"https://habr.com/kek/v2/articles/?hub=python&sort=all&fl=ru&hl=ru&page=1",
None,
]
@pytest.mark.parametrize(
"page_number, expected_url", ([1, EXPECTED_URLS[0]], [100, EXPECTED_URLS[1]])
)
| 23.545455 | 81 | 0.706564 | from typing import Optional, Protocol
import pytest
EXPECTED_URLS = [
"https://habr.com/kek/v2/articles/?hub=python&sort=all&fl=ru&hl=ru&page=1",
None,
]
class Hub(Protocol):
def get_page_url(self, page_number: int) -> Optional[str]:
...
@pytest.mark.parametrize(
"page_number, expected_url", ([1, EXPECTED_URLS[0]], [100, EXPECTED_URLS[1]])
)
def test_get_page_url(page_number: int, expected_url: str, default_hub: Hub):
assert default_hub.get_page_url(page_number) == expected_url
| 170 | -1 | 71 |
e3819a78434a255592c513cfc1ada521ed094c49 | 1,771 | py | Python | Optimizor.py | muradtuk/UnifiedFramework | 07dd7cf50552fa87fd875818eead03a2fe9e5073 | [
"MIT"
] | null | null | null | Optimizor.py | muradtuk/UnifiedFramework | 07dd7cf50552fa87fd875818eead03a2fe9e5073 | [
"MIT"
] | null | null | null | Optimizor.py | muradtuk/UnifiedFramework | 07dd7cf50552fa87fd875818eead03a2fe9e5073 | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
import RegressionProblems as RP
import time
from multiprocessing import Lock
| 34.057692 | 101 | 0.610954 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
import RegressionProblems as RP
import time
from multiprocessing import Lock
class Optimizor(object):
MODELS = {
'logistic': lambda C, tol, Z: LogisticRegression(tol=tol, C=C, solver='lbfgs', max_iter=1e4),
'svm': lambda C, tol, Z: SVC(kernel='linear', C=C, tol=tol),
'lz': lambda C, tol, Z: RP.RegressionProblem(Z)
}
# create mutex for multi-threading purposes
mutex = Lock()
def __init__(self, P, problem_type, C, tol, Z, objective_cost):
self.problem_type = problem_type
self.model = Optimizor.MODELS[problem_type](C=C, tol=tol, Z=Z)
self.sum_weights = None
self.C = C
self.Z = Z
self.TOL = tol
self.objective_cost = objective_cost
self.P = P
self.optimal_w = None
def defineSumOfWegiths(self, W):
self.sum_weights = np.sum(W)
def fit(self, P):
start_time = time.time()
if 'lz' not in self.problem_type:
Optimizor.mutex.acquire()
c_prime = self.model.C * float(self.sum_weights / (np.sum(P.W)))
params = {"C": c_prime}
self.model.set_params(**params)
Optimizor.mutex.release()
self.model.fit(P.P[:, :-1], P.P[:, -1], P.W)
Optimizor.mutex.acquire()
w, b = self.model.coef_, self.model.intercept_
sol = np.hstack((w.flatten(), b)) if b is not None else w
if self.optimal_w is None:
self.optimal_w = sol
Optimizor.mutex.release()
return self.computeCost(self.P, sol), time.time() - start_time
def computeCost(self, P, x):
return self.objective_cost(P, x, (self.sum_weights, )) | 1,145 | 427 | 23 |
58a06add78efa96f730abe62f26a8218d458cc5b | 2,377 | py | Python | ana/robodao.py | Janvanoorschot/anarobo | f50c8dbb72280dfd39210ae3aeeaad2c4046ecd2 | [
"MIT"
] | null | null | null | ana/robodao.py | Janvanoorschot/anarobo | f50c8dbb72280dfd39210ae3aeeaad2c4046ecd2 | [
"MIT"
] | null | null | null | ana/robodao.py | Janvanoorschot/anarobo | f50c8dbb72280dfd39210ae3aeeaad2c4046ecd2 | [
"MIT"
] | null | null | null | import gzip
import os
import json
from .model import Sitting, Action, Teacher, IPupil, APupil, Storyline, StorylineItem, Course, Challenge
class RoboDAO:
"""Gives access to the Robo model objects as defined in the model module. Objects
are preloaded from the ano-directory which contains the Robomind Academy
sitting datafiles"""
TYPE2NAME = {
'Action': None,
'Sitting': None,
'APupil': "apupil",
'IPupil': "ipupil",
'Teacher': "teacher",
'Challenge': "challenge",
'Course': "course",
'Storyline': "storyline",
'StorylineItem': "storylineitem"
}
TYPE2CLASS = {
'Action': Action,
'Sitting': Sitting,
'APupil': APupil,
'IPupil': IPupil,
'Teacher': Teacher,
'Challenge': Challenge,
'Course': Course,
'Storyline': Storyline,
'StorylineItem': StorylineItem
}
def preload(self):
"""Preload model objects as defined in the model module from the
anonymised sittings file in the ano-directory."""
# load the objects
for otype, fname in self.TYPE2NAME.items():
if fname:
path = os.path.join(self.anodir, fname + ".gz")
if os.path.isfile(path):
with gzip.open(path, "rt") as handler:
for line in handler:
omap = json.loads(line)
cls = self.TYPE2CLASS[otype]
item = cls.from_map(omap, self)
self.caches[otype][item.id] = item
| 30.87013 | 104 | 0.545646 | import gzip
import os
import json
from .model import Sitting, Action, Teacher, IPupil, APupil, Storyline, StorylineItem, Course, Challenge
class RoboDAO:
"""Gives access to the Robo model objects as defined in the model module. Objects
are preloaded from the ano-directory which contains the Robomind Academy
sitting datafiles"""
TYPE2NAME = {
'Action': None,
'Sitting': None,
'APupil': "apupil",
'IPupil': "ipupil",
'Teacher': "teacher",
'Challenge': "challenge",
'Course': "course",
'Storyline': "storyline",
'StorylineItem': "storylineitem"
}
TYPE2CLASS = {
'Action': Action,
'Sitting': Sitting,
'APupil': APupil,
'IPupil': IPupil,
'Teacher': Teacher,
'Challenge': Challenge,
'Course': Course,
'Storyline': Storyline,
'StorylineItem': StorylineItem
}
def __init__(self, anodir):
self.anodir = anodir
self.caches = {}
for otype, fname in self.TYPE2NAME.items():
if self.TYPE2NAME[otype]:
self.caches[otype] = {}
self.preload()
def preload(self):
"""Preload model objects as defined in the model module from the
anonymised sittings file in the ano-directory."""
# load the objects
for otype, fname in self.TYPE2NAME.items():
if fname:
path = os.path.join(self.anodir, fname + ".gz")
if os.path.isfile(path):
with gzip.open(path, "rt") as handler:
for line in handler:
omap = json.loads(line)
cls = self.TYPE2CLASS[otype]
item = cls.from_map(omap, self)
self.caches[otype][item.id] = item
def get(self, id):
type = self._id2type(id)
return self.get_by_id(type, id)
def get_by_id(self, otype, id):
if otype not in self.TYPE2NAME:
raise KeyError("no such type %s" % (otype,))
if id in self.caches[otype]:
return self.caches[otype][id]
else:
print(f"request for non-existing object: {otype}/{id}")
return None
def _id2type(self, id):
import re
return re.search(r"\D+", id).group()
| 640 | 0 | 108 |
972c49816166cedf6653d3e1ec02c44814aae24c | 3,098 | py | Python | sphinxsearch/fields.py | bogdandm/django_sphinxsearch | b3a4a46997b4648413cc0313f409c4bdf2c0ebe9 | [
"Beerware"
] | 11 | 2015-09-02T23:47:22.000Z | 2021-05-09T17:50:49.000Z | sphinxsearch/fields.py | bogdandm/django_sphinxsearch | b3a4a46997b4648413cc0313f409c4bdf2c0ebe9 | [
"Beerware"
] | 67 | 2017-12-12T06:46:36.000Z | 2021-09-22T19:33:32.000Z | sphinxsearch/fields.py | bogdandm/django_sphinxsearch | b3a4a46997b4648413cc0313f409c4bdf2c0ebe9 | [
"Beerware"
] | 7 | 2018-02-22T07:14:01.000Z | 2021-09-04T12:16:25.000Z | import datetime
import json
import time
import pytz
from sphinxsearch.lookups import sphinx_lookups
from django.core import exceptions
from django.db import models
class SphinxField(models.TextField):
""" Non-selectable indexed string field
In sphinxsearch config terms, sql_field_string or rt_field.
"""
class_lookups = sphinx_lookups.copy()
class SphinxDateTimeField(models.FloatField):
""" Sphinx timestamp field for sql_attr_timestamp and rt_attr_timestamp.
NB: sphinxsearch doesn't store microseconds, if necessary, describe
field as sql_attr_float in config.
"""
# noinspection PyMethodMayBeStatic,PyUnusedLocal
# noinspection PyUnusedLocal,PyMethodMayBeStatic
| 28.953271 | 78 | 0.641704 | import datetime
import json
import time
import pytz
from sphinxsearch.lookups import sphinx_lookups
from django.core import exceptions
from django.db import models
class SphinxField(models.TextField):
""" Non-selectable indexed string field
In sphinxsearch config terms, sql_field_string or rt_field.
"""
class_lookups = sphinx_lookups.copy()
class SphinxDateTimeField(models.FloatField):
""" Sphinx timestamp field for sql_attr_timestamp and rt_attr_timestamp.
NB: sphinxsearch doesn't store microseconds, if necessary, describe
field as sql_attr_float in config.
"""
def get_prep_value(self, value):
if isinstance(value, (datetime.datetime, datetime.date)):
if value.tzinfo is not None:
value = pytz.UTC.normalize(value)
return int(time.mktime(value.timetuple()))
elif isinstance(value, (int, float)):
return value
else:
raise ValueError("Invalid value for UNIX_TIMESTAMP")
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def from_db_value(self, value, expression, connection):
return datetime.datetime.fromtimestamp(value).replace(tzinfo=pytz.UTC)
class SphinxIntegerField(models.IntegerField):
class_lookups = sphinx_lookups.copy()
class SphinxBigIntegerField(models.BigIntegerField):
class_lookups = sphinx_lookups.copy()
class SphinxMultiField(models.IntegerField):
class_lookups = sphinx_lookups.copy()
def get_prep_value(self, value):
if value is None:
return None
if isinstance(value, int):
return value
get_prep_value = super().get_prep_value
return [get_prep_value(v) for v in value]
# noinspection PyUnusedLocal
def from_db_value(self, value, expression, connection):
if value is None:
return value
if isinstance(value, bytes):
value = value.decode('utf-8')
if value == '':
return []
try:
return list(map(int, value.split(',')))
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def to_python(self, value):
if value is None:
return value
try:
return list(map(int, value.split(',')))
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
class SphinxMulti64Field(SphinxMultiField):
pass
class SphinxJSONField(models.TextField):
# noinspection PyUnusedLocal,PyMethodMayBeStatic
def from_db_value(self, value, expression, connection):
if not isinstance(value, str) or value is None:
return value
return json.loads(value)
def to_python(self, value):
if value is None:
return value
return json.dumps(value)
| 1,782 | 369 | 221 |
d9f96a892d9cdb93e1cc51178c26ae2bd3f0ba2a | 7,727 | py | Python | d4rl/gym_mujoco/__init__.py | vermouth1992/d4rl | a65b64681f21601be5317d3af3171dc7c91f031d | [
"Apache-2.0"
] | null | null | null | d4rl/gym_mujoco/__init__.py | vermouth1992/d4rl | a65b64681f21601be5317d3af3171dc7c91f031d | [
"Apache-2.0"
] | null | null | null | d4rl/gym_mujoco/__init__.py | vermouth1992/d4rl | a65b64681f21601be5317d3af3171dc7c91f031d | [
"Apache-2.0"
] | null | null | null | from gym.envs.registration import register
from d4rl.gym_mujoco import gym_envs
HOPPER_RANDOM_SCORE = -20.272305
HALFCHEETAH_RANDOM_SCORE = -280.178953
WALKER_RANDOM_SCORE = 1.629008
ANT_RANDOM_SCORE = -325.6
HOPPER_EXPERT_SCORE = 3234.3
HALFCHEETAH_EXPERT_SCORE = 12135.0
WALKER_EXPERT_SCORE = 4592.3
ANT_EXPERT_SCORE = 3879.7
# Single Policy datasets
register(
id='hopper-medium-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_medium.hdf5'
}
)
register(
id='halfcheetah-medium-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_medium.hdf5'
}
)
register(
id='walker2d-medium-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_medium.hdf5'
}
)
register(
id='hopper-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url': 'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_expert.hdf5'
}
)
register(
id='halfcheetah-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_expert.hdf5'
}
)
register(
id='walker2d-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_expert.hdf5'
}
)
register(
id='hopper-random-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url': 'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_random.hdf5'
}
)
register(
id='halfcheetah-random-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_random.hdf5'
}
)
register(
id='walker2d-random-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_random.hdf5'
}
)
# Mixed datasets
register(
id='hopper-medium-replay-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_mixed.hdf5'
},
)
register(
id='walker2d-medium-replay-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker_mixed.hdf5'
}
)
register(
id='halfcheetah-medium-replay-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_mixed.hdf5'
}
)
# Mixtures of random/medium and experts
register(
id='walker2d-medium-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_medium_expert.hdf5'
}
)
register(
id='halfcheetah-medium-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_medium_expert.hdf5'
}
)
register(
id='hopper-medium-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_medium_expert_v1.hdf5'
}
)
register(
id='ant-medium-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_medium_expert.hdf5'
}
)
register(
id='ant-medium-replay-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_mixed.hdf5'
}
)
register(
id='ant-medium-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_medium.hdf5'
}
)
register(
id='ant-random-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_random.hdf5'
}
)
register(
id='ant-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_expert.hdf5'
}
)
register(
id='ant-random-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_random_expert.hdf5'
}
)
| 31.283401 | 115 | 0.714508 | from gym.envs.registration import register
from d4rl.gym_mujoco import gym_envs
HOPPER_RANDOM_SCORE = -20.272305
HALFCHEETAH_RANDOM_SCORE = -280.178953
WALKER_RANDOM_SCORE = 1.629008
ANT_RANDOM_SCORE = -325.6
HOPPER_EXPERT_SCORE = 3234.3
HALFCHEETAH_EXPERT_SCORE = 12135.0
WALKER_EXPERT_SCORE = 4592.3
ANT_EXPERT_SCORE = 3879.7
# Single Policy datasets
register(
id='hopper-medium-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_medium.hdf5'
}
)
register(
id='halfcheetah-medium-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_medium.hdf5'
}
)
register(
id='walker2d-medium-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_medium.hdf5'
}
)
register(
id='hopper-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url': 'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_expert.hdf5'
}
)
register(
id='halfcheetah-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_expert.hdf5'
}
)
register(
id='walker2d-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_expert.hdf5'
}
)
register(
id='hopper-random-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url': 'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_random.hdf5'
}
)
register(
id='halfcheetah-random-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_random.hdf5'
}
)
register(
id='walker2d-random-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_random.hdf5'
}
)
# Mixed datasets
register(
id='hopper-medium-replay-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_mixed.hdf5'
},
)
register(
id='walker2d-medium-replay-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker_mixed.hdf5'
}
)
register(
id='halfcheetah-medium-replay-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_mixed.hdf5'
}
)
# Mixtures of random/medium and experts
register(
id='walker2d-medium-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_medium_expert.hdf5'
}
)
register(
id='halfcheetah-medium-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_medium_expert.hdf5'
}
)
register(
id='hopper-medium-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_medium_expert_v1.hdf5'
}
)
register(
id='ant-medium-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_medium_expert.hdf5'
}
)
register(
id='ant-medium-replay-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_mixed.hdf5'
}
)
register(
id='ant-medium-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_medium.hdf5'
}
)
register(
id='ant-random-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_random.hdf5'
}
)
register(
id='ant-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_expert.hdf5'
}
)
register(
id='ant-random-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_random_expert.hdf5'
}
)
| 0 | 0 | 0 |
715a0697b3a092ad1ce6b2a3734f888e198add84 | 2,899 | py | Python | gigadetector/gigaviewer.py | EricThomson/gigadetector | c94ff09e4e6f73b803a529b165be68ad3bb0a029 | [
"MIT"
] | null | null | null | gigadetector/gigaviewer.py | EricThomson/gigadetector | c94ff09e4e6f73b803a529b165be68ad3bb0a029 | [
"MIT"
] | null | null | null | gigadetector/gigaviewer.py | EricThomson/gigadetector | c94ff09e4e6f73b803a529b165be68ad3bb0a029 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Draw boxes on images processed using gigadetector pipeline.
click n to keep going, escape to stop.
If you press q I'm not sure what will happen
"""
# Import stuff
import sys
import os
import joblib
import cv2
base_path = os.path.expanduser("~") + r"/gigadetector/"
sys.path.append(base_path + r'/gigadetector/')
import utils
#%% set path to final results file, and load data
# includes bboxes, scores, areas, and image paths
# note image paths might change if someone moves images but final node in path
# shouldn't.
processed_image_folder = base_path + r'data/processed/'
# Final bbox and confidence output of faster-rcnn + bbox trimming (bb_analysis_folder.py)
results_file = r'gigafolder_bb_results.pkl' #1801-2648
results_path = processed_image_folder + results_file
with open(results_path, 'rb') as f:
analysis_data = joblib.load(results_path)
#%% Extract it all
all_bboxes = analysis_data['all_bboxes']
all_scores = analysis_data['all_scores']
all_areas = analysis_data['all_areas']
image_paths = analysis_data['all_filepaths']
num_images = len(image_paths)
print(f"There are {num_images} images for which you have detection data.")
print(image_paths)
#%% optional test case
"""
OPTIONAL -- uncomment following to run
This is to run on a single image just to make sure it works for one image
"""
# print("\ngigaviewer Tester\nClick escape to break out, n to move on to next image.\n")
# image_ind = 1
# bboxes = all_bboxes[image_ind]
# scores = all_scores[image_ind]
# image_path = image_paths[image_ind]
# image = cv2.imread(image_path)
# utils.draw_bboxes_scores(image.copy(), bboxes, scores, bb_color = (255, 255, 255),
# name = 'ViewTester', line_width = 10, text_thickness = 3,
# shape = (900, 1000), xy = (130, 50))
#%% If test case seems ok, start from ind you want, and cycle through images
print("\ngigaimage inspector\nClick escape to break out, n to move on to next image.\n")
start_image_ind = 0
window_open = False
for ind in range(start_image_ind, num_images):
print(f"Working on image {ind} out of {num_images-1}")
bboxes = all_bboxes[ind]
scores = all_scores[ind]
image_path = image_paths[ind]
print(f"\tLoading{image_path}")
boxed_image = utils.put_bboxes_scores(cv2.imread(image_path), bboxes, scores,
bb_color = (255, 255, 255),
line_width = 10, text_thickness = 3)
if window_open:
cv2.destroyWindow(str(ind-1))
else:
window_open = True
utils.cv_loopshow(boxed_image,
name = str(ind),
shape = (950, 950),
xy = (130, 40))
k = cv2.waitKey()
if k == 27:
break
elif k == ord('n'):
continue
cv2.destroyAllWindows()
print("\nDONE!!!")
| 31.857143 | 89 | 0.668851 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Draw boxes on images processed using gigadetector pipeline.
click n to keep going, escape to stop.
If you press q I'm not sure what will happen
"""
# Import stuff
import sys
import os
import joblib
import cv2
base_path = os.path.expanduser("~") + r"/gigadetector/"
sys.path.append(base_path + r'/gigadetector/')
import utils
#%% set path to final results file, and load data
# includes bboxes, scores, areas, and image paths
# note image paths might change if someone moves images but final node in path
# shouldn't.
processed_image_folder = base_path + r'data/processed/'
# Final bbox and confidence output of faster-rcnn + bbox trimming (bb_analysis_folder.py)
results_file = r'gigafolder_bb_results.pkl' #1801-2648
results_path = processed_image_folder + results_file
with open(results_path, 'rb') as f:
analysis_data = joblib.load(results_path)
#%% Extract it all
all_bboxes = analysis_data['all_bboxes']
all_scores = analysis_data['all_scores']
all_areas = analysis_data['all_areas']
image_paths = analysis_data['all_filepaths']
num_images = len(image_paths)
print(f"There are {num_images} images for which you have detection data.")
print(image_paths)
#%% optional test case
"""
OPTIONAL -- uncomment following to run
This is to run on a single image just to make sure it works for one image
"""
# print("\ngigaviewer Tester\nClick escape to break out, n to move on to next image.\n")
# image_ind = 1
# bboxes = all_bboxes[image_ind]
# scores = all_scores[image_ind]
# image_path = image_paths[image_ind]
# image = cv2.imread(image_path)
# utils.draw_bboxes_scores(image.copy(), bboxes, scores, bb_color = (255, 255, 255),
# name = 'ViewTester', line_width = 10, text_thickness = 3,
# shape = (900, 1000), xy = (130, 50))
#%% If test case seems ok, start from ind you want, and cycle through images
print("\ngigaimage inspector\nClick escape to break out, n to move on to next image.\n")
start_image_ind = 0
window_open = False
for ind in range(start_image_ind, num_images):
print(f"Working on image {ind} out of {num_images-1}")
bboxes = all_bboxes[ind]
scores = all_scores[ind]
image_path = image_paths[ind]
print(f"\tLoading{image_path}")
boxed_image = utils.put_bboxes_scores(cv2.imread(image_path), bboxes, scores,
bb_color = (255, 255, 255),
line_width = 10, text_thickness = 3)
if window_open:
cv2.destroyWindow(str(ind-1))
else:
window_open = True
utils.cv_loopshow(boxed_image,
name = str(ind),
shape = (950, 950),
xy = (130, 40))
k = cv2.waitKey()
if k == 27:
break
elif k == ord('n'):
continue
cv2.destroyAllWindows()
print("\nDONE!!!")
| 0 | 0 | 0 |
cbd7634b91b74b633e2f8e907fc7e51fb36fadb4 | 4,518 | py | Python | apps/single_curve_tf.py | AntonBiryukovUofC/diffvg | e081098f52b82bfd0b7e91114d289d65ef969a60 | [
"Apache-2.0"
] | 488 | 2020-09-04T07:23:18.000Z | 2022-03-31T13:59:25.000Z | apps/single_curve_tf.py | AntonBiryukovUofC/diffvg | e081098f52b82bfd0b7e91114d289d65ef969a60 | [
"Apache-2.0"
] | 38 | 2020-09-04T19:27:24.000Z | 2022-03-24T01:13:45.000Z | apps/single_curve_tf.py | AntonBiryukovUofC/diffvg | e081098f52b82bfd0b7e91114d289d65ef969a60 | [
"Apache-2.0"
] | 75 | 2020-09-04T19:18:47.000Z | 2022-03-18T22:25:22.000Z | import pydiffvg_tensorflow as pydiffvg
import tensorflow as tf
import skimage
import numpy as np
canvas_width, canvas_height = 256, 256
num_control_points = tf.constant([2, 2, 2])
points = tf.constant([[120.0, 30.0], # base
[150.0, 60.0], # control point
[ 90.0, 198.0], # control point
[ 60.0, 218.0], # base
[ 90.0, 180.0], # control point
[200.0, 65.0], # control point
[210.0, 98.0], # base
[220.0, 70.0], # control point
[130.0, 55.0]]) # control point
path = pydiffvg.Path(num_control_points = num_control_points,
points = points,
is_closed = True)
shapes = [path]
path_group = pydiffvg.ShapeGroup( shape_ids = tf.constant([0], dtype=tf.int32),
fill_color = tf.constant([0.3, 0.6, 0.3, 1.0]))
shape_groups = [path_group]
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = pydiffvg.render
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(0), # seed
*scene_args)
# The output image is in linear RGB space. Do Gamma correction before saving the image.
pydiffvg.imwrite(img, 'results/single_curve_tf/target.png', gamma=2.2)
target = tf.identity(img)
# Move the path to produce initial guess
# normalize points for easier learning rate
points_n = tf.Variable([[100.0/256.0, 40.0/256.0], # base
[155.0/256.0, 65.0/256.0], # control point
[100.0/256.0, 180.0/256.0], # control point
[ 65.0/256.0, 238.0/256.0], # base
[100.0/256.0, 200.0/256.0], # control point
[170.0/256.0, 55.0/256.0], # control point
[220.0/256.0, 100.0/256.0], # base
[210.0/256.0, 80.0/256.0], # control point
[140.0/256.0, 60.0/256.0]]) # control point
color = tf.Variable([0.3, 0.2, 0.5, 1.0])
path.points = points_n * 256
path_group.fill_color = color
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(1), # seed
*scene_args)
pydiffvg.imwrite(img, 'results/single_curve_tf/init.png', gamma=2.2)
optimizer = tf.compat.v1.train.AdamOptimizer(1e-2)
for t in range(100):
print('iteration:', t)
with tf.GradientTape() as tape:
# Forward pass: render the image.
path.points = points_n * 256
path_group.fill_color = color
# Important to use a different seed every iteration, otherwise the result
# would be biased.
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(t+1), # seed,
*scene_args)
loss_value = tf.reduce_sum(tf.square(img - target))
print(f"loss_value: {loss_value}")
pydiffvg.imwrite(img, 'results/single_curve_tf/iter_{}.png'.format(t))
grads = tape.gradient(loss_value, [points_n, color])
print(grads)
optimizer.apply_gradients(zip(grads, [points_n, color]))
# Render the final result.
path.points = points_n * 256
path_group.fill_color = color
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(101), # seed
*scene_args)
# Save the images and differences.
pydiffvg.imwrite(img, 'results/single_curve_tf/final.png')
# Convert the intermediate renderings to a video.
from subprocess import call
call(["ffmpeg", "-framerate", "24", "-i",
"results/single_curve_tf/iter_%d.png", "-vb", "20M",
"results/single_curve_tf/out.mp4"])
| 41.072727 | 87 | 0.581231 | import pydiffvg_tensorflow as pydiffvg
import tensorflow as tf
import skimage
import numpy as np
canvas_width, canvas_height = 256, 256
num_control_points = tf.constant([2, 2, 2])
points = tf.constant([[120.0, 30.0], # base
[150.0, 60.0], # control point
[ 90.0, 198.0], # control point
[ 60.0, 218.0], # base
[ 90.0, 180.0], # control point
[200.0, 65.0], # control point
[210.0, 98.0], # base
[220.0, 70.0], # control point
[130.0, 55.0]]) # control point
path = pydiffvg.Path(num_control_points = num_control_points,
points = points,
is_closed = True)
shapes = [path]
path_group = pydiffvg.ShapeGroup( shape_ids = tf.constant([0], dtype=tf.int32),
fill_color = tf.constant([0.3, 0.6, 0.3, 1.0]))
shape_groups = [path_group]
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = pydiffvg.render
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(0), # seed
*scene_args)
# The output image is in linear RGB space. Do Gamma correction before saving the image.
pydiffvg.imwrite(img, 'results/single_curve_tf/target.png', gamma=2.2)
target = tf.identity(img)
# Move the path to produce initial guess
# normalize points for easier learning rate
points_n = tf.Variable([[100.0/256.0, 40.0/256.0], # base
[155.0/256.0, 65.0/256.0], # control point
[100.0/256.0, 180.0/256.0], # control point
[ 65.0/256.0, 238.0/256.0], # base
[100.0/256.0, 200.0/256.0], # control point
[170.0/256.0, 55.0/256.0], # control point
[220.0/256.0, 100.0/256.0], # base
[210.0/256.0, 80.0/256.0], # control point
[140.0/256.0, 60.0/256.0]]) # control point
color = tf.Variable([0.3, 0.2, 0.5, 1.0])
path.points = points_n * 256
path_group.fill_color = color
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(1), # seed
*scene_args)
pydiffvg.imwrite(img, 'results/single_curve_tf/init.png', gamma=2.2)
optimizer = tf.compat.v1.train.AdamOptimizer(1e-2)
for t in range(100):
print('iteration:', t)
with tf.GradientTape() as tape:
# Forward pass: render the image.
path.points = points_n * 256
path_group.fill_color = color
# Important to use a different seed every iteration, otherwise the result
# would be biased.
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(t+1), # seed,
*scene_args)
loss_value = tf.reduce_sum(tf.square(img - target))
print(f"loss_value: {loss_value}")
pydiffvg.imwrite(img, 'results/single_curve_tf/iter_{}.png'.format(t))
grads = tape.gradient(loss_value, [points_n, color])
print(grads)
optimizer.apply_gradients(zip(grads, [points_n, color]))
# Render the final result.
path.points = points_n * 256
path_group.fill_color = color
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(101), # seed
*scene_args)
# Save the images and differences.
pydiffvg.imwrite(img, 'results/single_curve_tf/final.png')
# Convert the intermediate renderings to a video.
from subprocess import call
call(["ffmpeg", "-framerate", "24", "-i",
"results/single_curve_tf/iter_%d.png", "-vb", "20M",
"results/single_curve_tf/out.mp4"])
| 0 | 0 | 0 |
f1110d01a31ed827ae3ba95933619ef6394922a9 | 3,743 | py | Python | model_zoo/jag_utils/add_overlap.py | jonesholger/lbann | 3214f189a1438565d695542e076c4fa8e7332d34 | [
"Apache-2.0"
] | 194 | 2016-07-19T15:40:21.000Z | 2022-03-19T08:06:10.000Z | model_zoo/jag_utils/add_overlap.py | jonesholger/lbann | 3214f189a1438565d695542e076c4fa8e7332d34 | [
"Apache-2.0"
] | 1,021 | 2016-07-19T12:56:31.000Z | 2022-03-29T00:41:47.000Z | model_zoo/jag_utils/add_overlap.py | jonesholger/lbann | 3214f189a1438565d695542e076c4fa8e7332d34 | [
"Apache-2.0"
] | 74 | 2016-07-28T18:24:00.000Z | 2022-01-24T19:41:04.000Z | #!/usr/tce/bin/python
import sys
import random
if len(sys.argv) < 2 :
usage = '''
usage: add_overlap.py list_base_name number_of_lists overlap_percent
example: if your lists are t0_list.txt, t1_list.txt and t2_list.txt
you want 30 percent overlap you would run as:
add_overlap.py list.txt 2 30
The output lists names in this example would be:
t0_list.txt.overlap=30 (etc)
The output list will contain 30% more samples;
specifically, t0 will receive 15% of randomly selected
samples from t1 and t2.
The input lists are unchanged
The "excluded" counts in the output files are all set to -1,
because I haven't taken the time to get them correct.
I don't think these are used anyplace in lbann, so this should
be OK.
'''
print usage
exit(9)
#============================================================================
# the List class parses and encapsulate a sample list
# the constructor parses the sample list
#returns a list that contains random samples
# add random samples from some other List to this List
# write final output (sample list file)
#============================================================================
# parse cmd line
base = sys.argv[1]
count = int(sys.argv[2])
overlap = int(sys.argv[3])
the_lists = []
random_samples = []
for j in range(count) :
# instantiate a List object; this holds all information from a sample list
c = List('t' + str(j) + '_' + base)
the_lists.append(c)
# get the random samples from the list; this is the overlap that
# will be added to the other lists
n = c.num_samples()
p = int( (overlap / (count-1))* n / 100)
random_samples.append(c.get_random_samples(p))
# add overlap to the samples
for j in range(count) :
for k in range(count) :
if j != k :
the_lists[j].add_samples(random_samples[k])
# write output files
for x in the_lists :
x.write(overlap)
| 28.572519 | 77 | 0.588298 | #!/usr/tce/bin/python
import sys
import random
if len(sys.argv) < 2 :
usage = '''
usage: add_overlap.py list_base_name number_of_lists overlap_percent
example: if your lists are t0_list.txt, t1_list.txt and t2_list.txt
you want 30 percent overlap you would run as:
add_overlap.py list.txt 2 30
The output lists names in this example would be:
t0_list.txt.overlap=30 (etc)
The output list will contain 30% more samples;
specifically, t0 will receive 15% of randomly selected
samples from t1 and t2.
The input lists are unchanged
The "excluded" counts in the output files are all set to -1,
because I haven't taken the time to get them correct.
I don't think these are used anyplace in lbann, so this should
be OK.
'''
print usage
exit(9)
#============================================================================
# the List class parses and encapsulate a sample list
class List :
# the constructor parses the sample list
def __init__(self, filename) :
self.filename = filename
a = open(filename)
self.first_line = a.readline()
assert(self.first_line.find('CONDUIT_HDF5_INCLUSION') != -1)
t = a.readline().split()
self.valid_samples = int(t[0])
self.invalid_samples = int(t[1])
self.num_files = int(t[2])
self.base_dir = a.readline()
self.samples = []
self.counts = {}
for line in a :
if len(line) > 2 :
t = line.split()
dir = t[0]
included = int(t[1])
excluded = int(t[2])
self.counts[dir] = included + excluded
for j in range(3, len(t)):
self.samples.append((dir, t[j]))
#returns a list that contains random samples
def get_random_samples(self, n) :
w = set()
while len(w) < n :
x = random.randint(0, len(self.samples)-1)
if x not in w :
w.add(x)
r = []
for x in w :
r.append(self.samples[x])
return r
def num_samples(self) :
return len(self.samples)
# add random samples from some other List to this List
def add_samples(self, samples) :
for x in samples :
self.samples.append(x)
# write final output (sample list file)
def write(self, overlap) :
out = open(self.filename + '.overlap=' + str(overlap), 'w')
out.write(self.first_line)
#build map: filename -> (included samples)
s = {}
for sample in self.samples :
if sample[0] not in s :
s[sample[0]] = set()
s[sample[0]].add(sample[1])
#write included_samples excluded_samples, num_files
out.write(str(len(self.samples)) + ' -1 ' + str(len(s)) + '\n')
out.write(self.base_dir)
#write the samples
for fn in s.keys() :
out.write(fn + ' ' + str(len(s[fn])) + ' -1 ')
for sample_id in s[fn] :
out.write(sample_id + ' ')
out.write('\n')
out.close()
#============================================================================
# parse cmd line
base = sys.argv[1]
count = int(sys.argv[2])
overlap = int(sys.argv[3])
the_lists = []
random_samples = []
for j in range(count) :
# instantiate a List object; this holds all information from a sample list
c = List('t' + str(j) + '_' + base)
the_lists.append(c)
# get the random samples from the list; this is the overlap that
# will be added to the other lists
n = c.num_samples()
p = int( (overlap / (count-1))* n / 100)
random_samples.append(c.get_random_samples(p))
# add overlap to the samples
for j in range(count) :
for k in range(count) :
if j != k :
the_lists[j].add_samples(random_samples[k])
# write output files
for x in the_lists :
x.write(overlap)
| 1,595 | -9 | 147 |
b48fb0723e997c23259ef12852aa2df5b9cb16ba | 1,817 | py | Python | src/psiopic2/cli.py | psiopic2/psiopic2 | c2be97701f023f4396bb5d15e14e1ecc7a71d16b | [
"MIT"
] | null | null | null | src/psiopic2/cli.py | psiopic2/psiopic2 | c2be97701f023f4396bb5d15e14e1ecc7a71d16b | [
"MIT"
] | null | null | null | src/psiopic2/cli.py | psiopic2/psiopic2 | c2be97701f023f4396bb5d15e14e1ecc7a71d16b | [
"MIT"
] | null | null | null | from psiopic2.app.setupwiki import SetupWiki
from psiopic2.app.createcorpus import CreateCorpus
import sys
import logging
from psiopic2.app.ui.logutils import getLogger
from appdirs import AppDirs
from docopt import docopt
import traceback
DOC="""Psiopic2 CLI Tool
Usage:
psiopic2 <command> [options]
Available Commands:
setupwiki
help
buildcorpus
For more information run:
psiopic2 <command> --help
"""
if __name__ == '__main__':
sys.exit(main())
| 20.647727 | 83 | 0.641167 | from psiopic2.app.setupwiki import SetupWiki
from psiopic2.app.createcorpus import CreateCorpus
import sys
import logging
from psiopic2.app.ui.logutils import getLogger
from appdirs import AppDirs
from docopt import docopt
import traceback
DOC="""Psiopic2 CLI Tool
Usage:
psiopic2 <command> [options]
Available Commands:
setupwiki
help
buildcorpus
For more information run:
psiopic2 <command> --help
"""
class HelpException(BaseException):
pass
class App():
def __init__(self, argv):
self.colors = False if '--no-colors' in argv else True
self.widgets = False if '--no-widgets' in argv else True
logLevel = logging.DEBUG if '-d' in argv or '--debug' in argv else logging.INFO
self.log = getLogger('psiopic', self.colors, logLevel)
self._argv = argv
self.appMap = {}
def addApp(self, appName, appFunc):
self.appMap[appName] = appFunc
def help(self):
sys.stdout.write(DOC)
def getApp(self, app=None):
if app == 'help':
raise HelpException
try:
if app == None:
app = self._argv[1]
try:
appObj = self.appMap[app]
except KeyError:
raise HelpException
return self.appMap[app]
except IndexError:
raise HelpException
def run(self):
ret = 0
try:
app = self.getApp()
app(self._argv)
except HelpException:
self.help()
except Exception as e:
self.log.critical('Unhandled exception')
self.log.critical(traceback.format_exc())
ret = 1
finally:
if ret > 0:
self.log.error('Something went wrong. Error code: %s' % ret)
return ret
def main():
app = App(sys.argv)
app.addApp('setupwiki', SetupWiki)
return app.run()
if __name__ == '__main__':
sys.exit(main())
| 1,126 | 12 | 209 |
b1397e920777b8f7128fdbd01ae3054fd9c109f2 | 2,403 | py | Python | LCD_service.py | jarzab3/smart_city_mdx | 957ecfc35414d2833f2112bf3d6e0d0e366b119a | [
"MIT"
] | 1 | 2019-01-22T17:19:22.000Z | 2019-01-22T17:19:22.000Z | LCD_service.py | jarzab3/smart_city_mdx | 957ecfc35414d2833f2112bf3d6e0d0e366b119a | [
"MIT"
] | null | null | null | LCD_service.py | jarzab3/smart_city_mdx | 957ecfc35414d2833f2112bf3d6e0d0e366b119a | [
"MIT"
] | null | null | null | from asip.services.asip_service import AsipService
import sys | 37.546875 | 118 | 0.56263 | from asip.services.asip_service import AsipService
import sys
class LCDService(AsipService):
DEBUG = False
_serviceID = 'L'
__TAG_LCD_WRITE = 'W'
__TAG_LCD_CLEAR = 'C'
# A bump sensor has a unique ID (there may be more than one bump sensor attached, each one has a different bumpID)
asip = None # The service should be attached to a client
# The constructor takes the id of the bump sensor.
def __init__(self, id, asipclient):
AsipService.__init__(self)
self.asip = asipclient
# *** Standard getters and setters ***
def get_service_id(self):
return self._serviceID
def set_service_id(self,id):
self._serviceID = id
# receives an instance of AsipClient as parameter
def set_client(self, client):
self.asip = client
def get_client(self):
return self.asip
def process_response(self, message):
# Do nothing for motors
pass
def set_LCD_message(self, message, line):
if line > 4 or line < 0:
sys.stdout.write("ERROR: line number ({}) not in range! (0-4)".format(line))
return
if self.DEBUG:
sys.stdout.write("DEBUG: Writing: {} to line {} on the LCD\n".format(message,line))
# Motors have been mounted the other way around, so swapping IDs 0 with 1 for id
# self.asip.get_asip_writer().write(self._serviceID + ","
# + self.__TAG_SET_MOTOR_SPEED + ","
# + str(0 if self._motorID == 1 else 1) # swapping
# + "," + speed)
self.asip.get_asip_writer().write("{},{},{},{}\n".format(
self._serviceID, self.__TAG_LCD_WRITE, str(line), message))
def clear_LCD(self):
if self.DEBUG:
sys.stdout.write("DEBUG: Clearing the LCD")
# Motors have been mounted the other way around, so swapping IDs 0 with 1 for id
# self.asip.get_asip_writer().write(self._serviceID + ","
# + self.__TAG_SET_MOTOR_SPEED + ","
# + str(0 if self._motorID == 1 else 1) # swapping
# + "," + speed)
self.asip.get_asip_writer().write("{},{}\n".format(
self._serviceID, self.__TAG_LCD_CLEAR)) | 1,667 | 652 | 23 |
f847f77650111ce9c7da5d9609ed517281febad1 | 2,179 | py | Python | tools/knight/knight.py | RobertoPrevato/Humbular | bd86f0c227140644873b5b1e5ba4b47939d784db | [
"MIT"
] | null | null | null | tools/knight/knight.py | RobertoPrevato/Humbular | bd86f0c227140644873b5b1e5ba4b47939d784db | [
"MIT"
] | null | null | null | tools/knight/knight.py | RobertoPrevato/Humbular | bd86f0c227140644873b5b1e5ba4b47939d784db | [
"MIT"
] | null | null | null | """
* Knight 1.0.0
* https://github.com/RobertoPrevato/Knight
*
* Copyright 2015, Roberto Prevato
* http://ugrose.com
*
* Licensed under the MIT license:
* http://www.opensource.org/licenses/MIT
"""
import argparse
separator = "******************************************************\n"
parser = argparse.ArgumentParser(description= "Packs .html templates into .js files, possibly for Angular or Knockout.",
epilog = "{}\n{}".format("author: Roberto Prevato roberto.prevato@gmail.com", separator))
parser.add_argument("-p", "--path", dest= "path",
required=True, help="path to root folder from where to start the research of .html files")
parser.add_argument("-v", "--variable", dest= "templates_variable",
required=False, help="when generating templates in custom mode (no), the name of the global variable where to store templates. For example: $.templates.")
parser.add_argument("-c", "--comment", dest= "comment",
required=False, help="allows to add an extra comment line to generated templates files.")
parser.add_argument("-m", "--mode", dest="mode",
required=False, choices=["ko", "ng", "no"], help="no for custom (default); ng to generate Angular templates; ko to generate Knockout templates")
parser.add_argument("-a", "--appname", dest="appname",
default="app", help="when generating templates for Angular, the name of the application")
parser.add_argument("-u", "--underscoreJsCompile",
dest="underscore_js_compile", default="", help="allows to run UnderscoreJs compilation on templates using the given global variable/function")
args = parser.parse_args()
from lib import ScriptsHelper
main(args)
| 44.469388 | 174 | 0.599816 | """
* Knight 1.0.0
* https://github.com/RobertoPrevato/Knight
*
* Copyright 2015, Roberto Prevato
* http://ugrose.com
*
* Licensed under the MIT license:
* http://www.opensource.org/licenses/MIT
"""
import argparse
separator = "******************************************************\n"
parser = argparse.ArgumentParser(description= "Packs .html templates into .js files, possibly for Angular or Knockout.",
epilog = "{}\n{}".format("author: Roberto Prevato roberto.prevato@gmail.com", separator))
parser.add_argument("-p", "--path", dest= "path",
required=True, help="path to root folder from where to start the research of .html files")
parser.add_argument("-v", "--variable", dest= "templates_variable",
required=False, help="when generating templates in custom mode (no), the name of the global variable where to store templates. For example: $.templates.")
parser.add_argument("-c", "--comment", dest= "comment",
required=False, help="allows to add an extra comment line to generated templates files.")
parser.add_argument("-m", "--mode", dest="mode",
required=False, choices=["ko", "ng", "no"], help="no for custom (default); ng to generate Angular templates; ko to generate Knockout templates")
parser.add_argument("-a", "--appname", dest="appname",
default="app", help="when generating templates for Angular, the name of the application")
parser.add_argument("-u", "--underscoreJsCompile",
dest="underscore_js_compile", default="", help="allows to run UnderscoreJs compilation on templates using the given global variable/function")
args = parser.parse_args()
from lib import ScriptsHelper
def main(options):
ScriptsHelper.generate_templates_files(options.path,
options.mode,
options.appname,
options.underscore_js_compile,
options.templates_variable,
options.comment)
main(args)
| 376 | 0 | 23 |
36cdf0600dca52e74cfdf928904ca27d6120a2d2 | 3,416 | py | Python | crons/navitron_crons/cli_core.py | j9ac9k/navitron | efe7fba739037da7cc35e34dbe10d7d292260860 | [
"MIT"
] | 2 | 2018-07-22T18:09:44.000Z | 2021-06-20T19:09:33.000Z | crons/navitron_crons/cli_core.py | j9ac9k/navitron | efe7fba739037da7cc35e34dbe10d7d292260860 | [
"MIT"
] | 4 | 2017-10-24T22:45:29.000Z | 2018-12-19T17:19:46.000Z | crons/navitron_crons/cli_core.py | j9ac9k/navitron | efe7fba739037da7cc35e34dbe10d7d292260860 | [
"MIT"
] | null | null | null | """cli_core.py: basic metaclass for handling generic tool layout
Acts as global namespace + parent-framework for CLI apps
"""
from os import path
import platform
from datetime import datetime
import warnings
import uuid
from plumbum import cli
import prosper.common.prosper_logging as p_logger
import prosper.common.prosper_config as p_config
import navitron_crons._version as _version
DEFAULT_LOGGER = p_logger.DEFAULT_LOGGER
HERE = path.abspath(path.dirname(__file__))
CONFIG = p_config.ProsperConfig(path.join(HERE, 'navitron_crons.cfg'))
def generate_metadata(
source_name,
source_version
):
"""if you're gonna use noSQL, you gotta have provenance! Adds reliable metadata to records
Args:
source_name (str): name of source script
source_version (str): semantic version of source script
Returns:
:obj:`dict`: specific metadata
"""
now = datetime.utcnow()
write_recipt = str(uuid.uuid1())
metadata_obj = {
'write_recipt': write_recipt,
'data_source': source_name,
'machine_source': platform.node(),
'version': source_version,
'package_version': _version.__version__,
'cron_datetime': now.isoformat()
}
return metadata_obj
def update_which_sde_data(
current_sde_df,
latest_esi_df,
index_key
):
"""validate if current table needs an update
Args:
current_sde_df (:obj:`pandas.DataFrame`): current data (from mongodb)
latest_esi_df (:obj:`pandas.DataFrame`): latest data from REST/ESI
index_key (str): name of column to match on
Returns:
(:obj:`list`): list of keys that need to be updated
"""
pass
class NavitronApplication(cli.Application):
"""parent metaclass for CLI applications
Load default args and CLI environment variables here
"""
logger = DEFAULT_LOGGER
config = CONFIG
conn = None
debug = cli.Flag(
['d', '--debug'],
help='debug mode: run without writing to db'
)
verbose = cli.Flag(
['v', '--verbose'],
help='enable verbose messaging'
)
@cli.switch(
['--config'],
str,
help='Override default config with a local config')
def override_config(self, config_path):
"""override config object with local version"""
self.config = p_config.ProsperConfig(config_path)
@cli.switch(
['--dump-config'],
help='Dump global config, for easy custom setup')
def dump_config(self):
"""dumps config file to stdout for piping into config file"""
with open(path.join(HERE, 'navitron_crons.cfg'), 'r') as cfg_fh:
base_config = cfg_fh.read()
print(base_config)
exit()
def load_logger(self, progname):
"""build a logging object for the script to use"""
log_builder = p_logger.ProsperLogger(
progname,
self.config.get('LOGGING', 'log_path'),
config_obj=self.config
)
if self.verbose:
log_builder.configure_debug_logger()
if not self.debug:
try:
log_builder.configure_discord_logger()
except Exception:
warnings.warn('Unable to config discord logger', RuntimeWarning)
self.logger = log_builder.logger
if __name__ == '__main__':
NavitronApplication.run()
| 26.6875 | 95 | 0.644614 | """cli_core.py: basic metaclass for handling generic tool layout
Acts as global namespace + parent-framework for CLI apps
"""
from os import path
import platform
from datetime import datetime
import warnings
import uuid
from plumbum import cli
import prosper.common.prosper_logging as p_logger
import prosper.common.prosper_config as p_config
import navitron_crons._version as _version
DEFAULT_LOGGER = p_logger.DEFAULT_LOGGER
HERE = path.abspath(path.dirname(__file__))
CONFIG = p_config.ProsperConfig(path.join(HERE, 'navitron_crons.cfg'))
def generate_metadata(
source_name,
source_version
):
"""if you're gonna use noSQL, you gotta have provenance! Adds reliable metadata to records
Args:
source_name (str): name of source script
source_version (str): semantic version of source script
Returns:
:obj:`dict`: specific metadata
"""
now = datetime.utcnow()
write_recipt = str(uuid.uuid1())
metadata_obj = {
'write_recipt': write_recipt,
'data_source': source_name,
'machine_source': platform.node(),
'version': source_version,
'package_version': _version.__version__,
'cron_datetime': now.isoformat()
}
return metadata_obj
def update_which_sde_data(
current_sde_df,
latest_esi_df,
index_key
):
"""validate if current table needs an update
Args:
current_sde_df (:obj:`pandas.DataFrame`): current data (from mongodb)
latest_esi_df (:obj:`pandas.DataFrame`): latest data from REST/ESI
index_key (str): name of column to match on
Returns:
(:obj:`list`): list of keys that need to be updated
"""
pass
class NavitronApplication(cli.Application):
"""parent metaclass for CLI applications
Load default args and CLI environment variables here
"""
logger = DEFAULT_LOGGER
config = CONFIG
conn = None
debug = cli.Flag(
['d', '--debug'],
help='debug mode: run without writing to db'
)
verbose = cli.Flag(
['v', '--verbose'],
help='enable verbose messaging'
)
@cli.switch(
['--config'],
str,
help='Override default config with a local config')
def override_config(self, config_path):
"""override config object with local version"""
self.config = p_config.ProsperConfig(config_path)
@cli.switch(
['--dump-config'],
help='Dump global config, for easy custom setup')
def dump_config(self):
"""dumps config file to stdout for piping into config file"""
with open(path.join(HERE, 'navitron_crons.cfg'), 'r') as cfg_fh:
base_config = cfg_fh.read()
print(base_config)
exit()
def load_logger(self, progname):
"""build a logging object for the script to use"""
log_builder = p_logger.ProsperLogger(
progname,
self.config.get('LOGGING', 'log_path'),
config_obj=self.config
)
if self.verbose:
log_builder.configure_debug_logger()
if not self.debug:
try:
log_builder.configure_discord_logger()
except Exception:
warnings.warn('Unable to config discord logger', RuntimeWarning)
self.logger = log_builder.logger
if __name__ == '__main__':
NavitronApplication.run()
| 0 | 0 | 0 |
3f8c3234ddc415138325fa4c1d197203bd7726e8 | 718 | py | Python | Piston.py | WardVx/PiServer | 53227ee2d826195eaaa1c6631535aafa466ca96c | [
"Unlicense"
] | null | null | null | Piston.py | WardVx/PiServer | 53227ee2d826195eaaa1c6631535aafa466ca96c | [
"Unlicense"
] | 3 | 2021-06-30T01:13:30.000Z | 2021-07-22T13:44:22.000Z | Piston.py | WardVx/PiServer | 53227ee2d826195eaaa1c6631535aafa466ca96c | [
"Unlicense"
] | null | null | null |
import RPi.GPIO as GPIO
import time
import settings
PistonTravelTime = settings.Piston_Reistijd
PinUp = settings.Pin_Omhoog
PinDown = settings.Pin_Omlaag
if __name__ == '__main__':
try:
setup()
except KeyboardInterrupt:
close()
| 20.514286 | 57 | 0.675487 |
import RPi.GPIO as GPIO
import time
import settings
PistonTravelTime = settings.Piston_Reistijd
PinUp = settings.Pin_Omhoog
PinDown = settings.Pin_Omlaag
def setup():
GPIO.cleanup
GPIO.setmode(GPIO.BCM)
GPIO.setup(PinUp,GPIO.LOW)
GPIO.setup(PinDown,GPIO.LOW)
def PistonUp():
GPIO.output(PinUp,GPIO.HIGH)
time.sleep(PistonTravelTime)
GPIO.output(PinUp,GPIO.LOW)
def PistonDown():
GPIO.output(PinDown,GPIO.HIGH)
time.sleep(PistonTravelTime)
GPIO.output(PinDown,GPIO.LOW)
def close():
GPIO.cleanup
print('[SERVER INFO] Cleaning up Piston.py')
if __name__ == '__main__':
try:
setup()
except KeyboardInterrupt:
close()
| 339 | 0 | 100 |
be4cb6f59afe6df8eec16e87cc5a186ac212c142 | 429 | py | Python | Autonomous/lidar_random.py | leander-dsouza/URC-2019 | 6773e6b66dfb840bdbb4463441e8a855b42b1123 | [
"MIT"
] | 5 | 2020-05-10T11:03:48.000Z | 2022-01-17T07:00:40.000Z | Autonomous/lidar_random.py | leander-dsouza/URC-2019 | 6773e6b66dfb840bdbb4463441e8a855b42b1123 | [
"MIT"
] | null | null | null | Autonomous/lidar_random.py | leander-dsouza/URC-2019 | 6773e6b66dfb840bdbb4463441e8a855b42b1123 | [
"MIT"
] | 3 | 2020-07-13T14:11:12.000Z | 2022-01-07T18:05:05.000Z | import socket
import time
import random
TCP_IP1 = '127.0.0.1'
TCP_PORT1 = 5007
transmit = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transmit.connect((TCP_IP1, TCP_PORT1))
TCP_PORT2 = 5006
transmit2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transmit2.connect((TCP_IP1, TCP_PORT2))
while True:
transmit.send(str(random.randint(0,5000)).encode())
transmit2.send(random.randint(0,5000).encode())
time.sleep(0.5) | 30.642857 | 61 | 0.776224 | import socket
import time
import random
TCP_IP1 = '127.0.0.1'
TCP_PORT1 = 5007
transmit = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transmit.connect((TCP_IP1, TCP_PORT1))
TCP_PORT2 = 5006
transmit2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transmit2.connect((TCP_IP1, TCP_PORT2))
while True:
transmit.send(str(random.randint(0,5000)).encode())
transmit2.send(random.randint(0,5000).encode())
time.sleep(0.5) | 0 | 0 | 0 |
7377eba7e2a6432c912d267b4df6a3838b0ed502 | 409 | py | Python | tests/test_tests.py | hugollm/pak | 34e543b949d12f1b58a496ce845d1d625b94779c | [
"MIT"
] | 32 | 2017-04-20T11:33:56.000Z | 2019-01-08T19:13:36.000Z | tests/test_tests.py | hugollm/pak | 34e543b949d12f1b58a496ce845d1d625b94779c | [
"MIT"
] | null | null | null | tests/test_tests.py | hugollm/pak | 34e543b949d12f1b58a496ce845d1d625b94779c | [
"MIT"
] | 2 | 2017-05-01T16:09:16.000Z | 2017-05-02T19:49:52.000Z | from unittest import TestCase
import os
import shutil
from foster.build import Build
from foster.test import Test
| 22.722222 | 73 | 0.711491 | from unittest import TestCase
import os
import shutil
from foster.build import Build
from foster.test import Test
class TestTestCase(TestCase):
def setUp(self):
root = os.path.join(os.path.dirname(__file__), 'frames', 'init')
os.chdir(root)
def test_test_command_does_not_run_if_package_is_not_specified(self):
with self.assertRaises(SystemExit):
Test().run()
| 208 | 8 | 77 |
249503ccdf764b1922b4cc43f759d0a601299c02 | 725 | py | Python | Ch10_Tuples/exercise_2.py | romitpatel/learn_python | 42230d04be5af5576ac2cfc4b1d2a9413a1e777a | [
"MIT"
] | 1 | 2021-02-24T11:40:05.000Z | 2021-02-24T11:40:05.000Z | Ch10_Tuples/exercise_2.py | Chatak1/learn_python | 198333e56557301aeff95af321f4daa29834c61e | [
"MIT"
] | null | null | null | Ch10_Tuples/exercise_2.py | Chatak1/learn_python | 198333e56557301aeff95af321f4daa29834c61e | [
"MIT"
] | 2 | 2020-10-02T17:08:42.000Z | 2021-02-24T11:40:12.000Z | fname = input('Please enter a valid file name: ')
try:
fhand = open(fname)
except:
print('Please enter an existing file name')
exit()
counts = dict()
for line in fhand:
line = line.rstrip()
words = line.split()
if not line.startswith('From ') or len(words) < 1: continue
for word in words:
if word.find(':') == -1:continue
hour, min, sec = word.split(':')
if hour not in counts:
counts[hour] = 1
else:
counts[hour] += 1
t = counts.items()
dl = list()
check = sorted(t)
# This approach uses the sorted method instead of using a list of tuples and the sort method used by list to sort the items.
for key,val in check:
print(key,val)
| 23.387097 | 124 | 0.606897 | fname = input('Please enter a valid file name: ')
try:
fhand = open(fname)
except:
print('Please enter an existing file name')
exit()
counts = dict()
for line in fhand:
line = line.rstrip()
words = line.split()
if not line.startswith('From ') or len(words) < 1: continue
for word in words:
if word.find(':') == -1:continue
hour, min, sec = word.split(':')
if hour not in counts:
counts[hour] = 1
else:
counts[hour] += 1
t = counts.items()
dl = list()
check = sorted(t)
# This approach uses the sorted method instead of using a list of tuples and the sort method used by list to sort the items.
for key,val in check:
print(key,val)
| 0 | 0 | 0 |
9f3041df0ac53320e68e428faefeac520d3cfb25 | 15,581 | py | Python | tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_ES-2011] 1.py | gour/holidata | 89c7323f9c5345a3ecbf5cd5a835b0e08cfebc13 | [
"MIT"
] | 32 | 2019-04-12T08:01:34.000Z | 2022-02-28T04:41:50.000Z | tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_ES-2011] 1.py | gour/holidata | 89c7323f9c5345a3ecbf5cd5a835b0e08cfebc13 | [
"MIT"
] | 74 | 2019-07-09T16:35:20.000Z | 2022-03-09T16:41:34.000Z | tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[es_ES-2011] 1.py | gour/holidata | 89c7323f9c5345a3ecbf5cd5a835b0e08cfebc13 | [
"MIT"
] | 20 | 2019-01-28T07:41:02.000Z | 2022-02-16T02:38:57.000Z | [
{
'date': '2011-01-01',
'description': 'Año Nuevo',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-01-06',
'description': 'Epifanía del Señor',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-02-28',
'description': 'Día de Andalucía',
'locale': 'es-ES',
'notes': '',
'region': 'AN',
'type': 'F'
},
{
'date': '2011-03-01',
'description': 'Día de las Illes Balears',
'locale': 'es-ES',
'notes': '',
'region': 'IB',
'type': 'F'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'CM',
'type': 'RF'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'GA',
'type': 'RF'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'MC',
'type': 'RF'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'ML',
'type': 'RF'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'VC',
'type': 'RF'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'AN',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'AR',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'AS',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CB',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CE',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CL',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CM',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CN',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'EX',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'GA',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'IB',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'MC',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'MD',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'ML',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'NC',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'PV',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'RI',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'VC',
'type': 'RV'
},
{
'date': '2011-04-22',
'description': 'Viernes Santo',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-04-23',
'description': 'Fiesta de Castilla y León',
'locale': 'es-ES',
'notes': '',
'region': 'CL',
'type': 'F'
},
{
'date': '2011-04-23',
'description': 'San Jorge / Día de Aragón',
'locale': 'es-ES',
'notes': '',
'region': 'AR',
'type': 'RF'
},
{
'date': '2011-04-24',
'description': 'Pascua',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'CT',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'IB',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'NC',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'PV',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'RI',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'VC',
'type': 'RV'
},
{
'date': '2011-05-01',
'description': 'Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-05-02',
'description': 'Fiesta de la Comunidad de Madrid',
'locale': 'es-ES',
'notes': '',
'region': 'MD',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'AN',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'AR',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'AS',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'CB',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'CE',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'EX',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'MC',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'VC',
'type': 'F'
},
{
'date': '2011-05-17',
'description': 'Día de las Letras Gallegas',
'locale': 'es-ES',
'notes': '',
'region': 'GA',
'type': 'F'
},
{
'date': '2011-05-30',
'description': 'Día de Canarias',
'locale': 'es-ES',
'notes': '',
'region': 'CN',
'type': 'F'
},
{
'date': '2011-05-31',
'description': 'Día de Castilla-La Mancha',
'locale': 'es-ES',
'notes': '',
'region': 'CM',
'type': 'F'
},
{
'date': '2011-06-09',
'description': 'Día de la Región de Murcia',
'locale': 'es-ES',
'notes': '',
'region': 'MC',
'type': 'F'
},
{
'date': '2011-06-09',
'description': 'Día de La Rioja',
'locale': 'es-ES',
'notes': '',
'region': 'RI',
'type': 'F'
},
{
'date': '2011-06-13',
'description': 'Lunes de Pascua Granada',
'locale': 'es-ES',
'notes': '',
'region': 'CT',
'type': 'F'
},
{
'date': '2011-06-23',
'description': 'Corpus Christi',
'locale': 'es-ES',
'notes': '',
'region': 'CM',
'type': 'RV'
},
{
'date': '2011-06-23',
'description': 'Corpus Christi',
'locale': 'es-ES',
'notes': '',
'region': 'MD',
'type': 'RV'
},
{
'date': '2011-06-24',
'description': 'San Juan',
'locale': 'es-ES',
'notes': '',
'region': 'CT',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'CL',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'MD',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'NC',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'PV',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'RI',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol / Día Nacional de Galicia',
'locale': 'es-ES',
'notes': '',
'region': 'GA',
'type': 'RF'
},
{
'date': '2011-07-28',
'description': 'Día de las Instituciones de Cantabria',
'locale': 'es-ES',
'notes': '',
'region': 'CB',
'type': 'F'
},
{
'date': '2011-08-15',
'description': 'Asunción de la Virgen',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-09-08',
'description': 'Día de Asturias',
'locale': 'es-ES',
'notes': '',
'region': 'AS',
'type': 'F'
},
{
'date': '2011-09-08',
'description': 'Día de Extremadura',
'locale': 'es-ES',
'notes': '',
'region': 'EX',
'type': 'F'
},
{
'date': '2011-09-15',
'description': 'La Bien Aparecida',
'locale': 'es-ES',
'notes': '',
'region': 'CB',
'type': 'RF'
},
{
'date': '2011-10-12',
'description': 'Fiesta Nacional de España',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-10-25',
'description': 'Día del País Vasco-Euskadiko Eguna',
'locale': 'es-ES',
'notes': '',
'region': 'PV',
'type': 'F'
},
{
'date': '2011-11-01',
'description': 'Todos los Santos',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-11-07',
'description': 'Fiesta del Sacrificio (Aid El Kebir)',
'locale': 'es-ES',
'notes': '',
'region': 'ML',
'type': 'RV'
},
{
'date': '2011-11-07',
'description': 'Lunes siguiente a la Fiesta del Sacrificio (Eidul Adha)',
'locale': 'es-ES',
'notes': '',
'region': 'CE',
'type': 'RV'
},
{
'date': '2011-12-06',
'description': 'Día de la Constitución Española',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-12-08',
'description': 'Inmaculada Concepción',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-12-25',
'description': 'Natividad del Señor',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'AN',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'AR',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'AS',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'CE',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'CL',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'CN',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'CT',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'EX',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'IB',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'ML',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'NC',
'type': 'RF'
}
] | 22.581159 | 81 | 0.374045 | [
{
'date': '2011-01-01',
'description': 'Año Nuevo',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-01-06',
'description': 'Epifanía del Señor',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-02-28',
'description': 'Día de Andalucía',
'locale': 'es-ES',
'notes': '',
'region': 'AN',
'type': 'F'
},
{
'date': '2011-03-01',
'description': 'Día de las Illes Balears',
'locale': 'es-ES',
'notes': '',
'region': 'IB',
'type': 'F'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'CM',
'type': 'RF'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'GA',
'type': 'RF'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'MC',
'type': 'RF'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'ML',
'type': 'RF'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'VC',
'type': 'RF'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'AN',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'AR',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'AS',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CB',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CE',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CL',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CM',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CN',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'EX',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'GA',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'IB',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'MC',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'MD',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'ML',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'NC',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'PV',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'RI',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'VC',
'type': 'RV'
},
{
'date': '2011-04-22',
'description': 'Viernes Santo',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-04-23',
'description': 'Fiesta de Castilla y León',
'locale': 'es-ES',
'notes': '',
'region': 'CL',
'type': 'F'
},
{
'date': '2011-04-23',
'description': 'San Jorge / Día de Aragón',
'locale': 'es-ES',
'notes': '',
'region': 'AR',
'type': 'RF'
},
{
'date': '2011-04-24',
'description': 'Pascua',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'CT',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'IB',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'NC',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'PV',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'RI',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'VC',
'type': 'RV'
},
{
'date': '2011-05-01',
'description': 'Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-05-02',
'description': 'Fiesta de la Comunidad de Madrid',
'locale': 'es-ES',
'notes': '',
'region': 'MD',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'AN',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'AR',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'AS',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'CB',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'CE',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'EX',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'MC',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'VC',
'type': 'F'
},
{
'date': '2011-05-17',
'description': 'Día de las Letras Gallegas',
'locale': 'es-ES',
'notes': '',
'region': 'GA',
'type': 'F'
},
{
'date': '2011-05-30',
'description': 'Día de Canarias',
'locale': 'es-ES',
'notes': '',
'region': 'CN',
'type': 'F'
},
{
'date': '2011-05-31',
'description': 'Día de Castilla-La Mancha',
'locale': 'es-ES',
'notes': '',
'region': 'CM',
'type': 'F'
},
{
'date': '2011-06-09',
'description': 'Día de la Región de Murcia',
'locale': 'es-ES',
'notes': '',
'region': 'MC',
'type': 'F'
},
{
'date': '2011-06-09',
'description': 'Día de La Rioja',
'locale': 'es-ES',
'notes': '',
'region': 'RI',
'type': 'F'
},
{
'date': '2011-06-13',
'description': 'Lunes de Pascua Granada',
'locale': 'es-ES',
'notes': '',
'region': 'CT',
'type': 'F'
},
{
'date': '2011-06-23',
'description': 'Corpus Christi',
'locale': 'es-ES',
'notes': '',
'region': 'CM',
'type': 'RV'
},
{
'date': '2011-06-23',
'description': 'Corpus Christi',
'locale': 'es-ES',
'notes': '',
'region': 'MD',
'type': 'RV'
},
{
'date': '2011-06-24',
'description': 'San Juan',
'locale': 'es-ES',
'notes': '',
'region': 'CT',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'CL',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'MD',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'NC',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'PV',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'RI',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol / Día Nacional de Galicia',
'locale': 'es-ES',
'notes': '',
'region': 'GA',
'type': 'RF'
},
{
'date': '2011-07-28',
'description': 'Día de las Instituciones de Cantabria',
'locale': 'es-ES',
'notes': '',
'region': 'CB',
'type': 'F'
},
{
'date': '2011-08-15',
'description': 'Asunción de la Virgen',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-09-08',
'description': 'Día de Asturias',
'locale': 'es-ES',
'notes': '',
'region': 'AS',
'type': 'F'
},
{
'date': '2011-09-08',
'description': 'Día de Extremadura',
'locale': 'es-ES',
'notes': '',
'region': 'EX',
'type': 'F'
},
{
'date': '2011-09-15',
'description': 'La Bien Aparecida',
'locale': 'es-ES',
'notes': '',
'region': 'CB',
'type': 'RF'
},
{
'date': '2011-10-12',
'description': 'Fiesta Nacional de España',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-10-25',
'description': 'Día del País Vasco-Euskadiko Eguna',
'locale': 'es-ES',
'notes': '',
'region': 'PV',
'type': 'F'
},
{
'date': '2011-11-01',
'description': 'Todos los Santos',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-11-07',
'description': 'Fiesta del Sacrificio (Aid El Kebir)',
'locale': 'es-ES',
'notes': '',
'region': 'ML',
'type': 'RV'
},
{
'date': '2011-11-07',
'description': 'Lunes siguiente a la Fiesta del Sacrificio (Eidul Adha)',
'locale': 'es-ES',
'notes': '',
'region': 'CE',
'type': 'RV'
},
{
'date': '2011-12-06',
'description': 'Día de la Constitución Española',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-12-08',
'description': 'Inmaculada Concepción',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-12-25',
'description': 'Natividad del Señor',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'AN',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'AR',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'AS',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'CE',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'CL',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'CN',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'CT',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'EX',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'IB',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'ML',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'NC',
'type': 'RF'
}
] | 0 | 0 | 0 |
d4f0c983989b154b56c72a70fe2b08f7297c789b | 28,945 | py | Python | cm_microtissue_struct/plotting.py | david-a-joy/cm-microtissue-struct | b24ce61230563eab9a8531086511b657980ef5a9 | [
"BSD-3-Clause"
] | 1 | 2020-02-17T17:08:31.000Z | 2020-02-17T17:08:31.000Z | cm_microtissue_struct/plotting.py | david-a-joy/cm-microtissue-struct | b24ce61230563eab9a8531086511b657980ef5a9 | [
"BSD-3-Clause"
] | null | null | null | cm_microtissue_struct/plotting.py | david-a-joy/cm-microtissue-struct | b24ce61230563eab9a8531086511b657980ef5a9 | [
"BSD-3-Clause"
] | null | null | null | """ Plotting tools for the simulation framework
Styling tools:
* :py:class:`set_plot_style`: Plot style context manager
* :py:class:`colorwheel`: Custom color palettes
Plotting Functions:
* :py:func:`plot_3d_sphere_cloud`: Plot a sphere cloud in 3D
Axis element functions:
* :py:func:`add_lineplot`: Add lineplots to an axis
* :py:func:`add_histogram`: Add a histogram to an axis
Utilities:
* :py:func:`bootstrap_ci`: Bootstrap estimate of confidence intervals
* :py:func:`get_histogram`: Get a kernel smoothed histogram from binned data
"""
# Imports
import itertools
from contextlib import ContextDecorator
from typing import List, Tuple, Optional, Dict, Callable
import pathlib
# 3rd party imports
import numpy as np
from scipy.stats import gamma, gaussian_kde
from scipy.integrate import simps
import pandas as pd
import seaborn as sns
import matplotlib.cm as mplcm
import matplotlib.pyplot as plt
import matplotlib.colors as mplcolors
from mpl_toolkits.mplot3d import Axes3D
# Our own imports
from .consts import (
PALETTE, RC_PARAMS_DARK, RC_PARAMS_LIGHT
)
# Styling
class set_plot_style(ContextDecorator):
""" Context manager for styling matplotlib plots
Basic usage as a context manager
.. code-block:: python
with set_plot_style('dark') as style:
# In here, plots are 'dark' styled
fig, ax = plt.subplots(1, 1)
ax.plot([1, 2, 3], [1, 2, 3])
# Save the plot with correct background colors
style.savefig('some_fig.png')
Can also be used as a decorator
.. code-block:: python
@set_plot_style('dark')
def plot_something():
# In here, plots are 'dark' styled
fig, ax = plt.subplots(1, 1)
ax.plot([1, 2, 3], [1, 2, 3])
plt.show()
For more complex use, see the
`Matplotlib rcParam <http://matplotlib.org/users/customizing.html>`_
docs which list all the parameters that can be tweaked.
:param str style:
One of 'dark', 'minimal', 'poster', 'dark_poster', 'default'
"""
_active_styles = []
@property
@classmethod
def get_active_style(cls) -> Optional[str]:
""" Get the currently active style, or None if nothing is active """
if cls._active_styles:
return cls._active_styles[-1]
return None
def twinx(self, ax: Optional = None):
""" Create a second axis sharing the x axis
:param Axes ax:
The axis instance to set to off
"""
if ax is None:
ax = plt.gca()
ax2 = ax.twinx()
# Fix up the defaults to make sense
ax2.spines['right'].set_visible(True)
ax2.tick_params(axis='y',
labelcolor=self.axis_color,
color=self.axis_color,
left=True)
return ax2
def set_axis_off(self, ax: Optional = None):
""" Remove labels and ticks from the axis
:param Axes ax:
The axis instance to set to off
"""
if ax is None:
ax = plt.gca()
# Blank all the things
ax.set_xticks([])
ax.set_yticks([])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_axis_off()
def rotate_xticklabels(self, ax,
rotation: float,
horizontalalignment: str = 'center',
verticalalignment: str = 'center',
rotation_mode: str = 'default'):
""" Rotate the x ticklabels
:param float rotation:
Rotation of the text (in degrees)
:param str rotation_mode:
Either "default" or "anchor"
"""
for tick in ax.get_xticklabels():
plt.setp(tick,
rotation=rotation,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation_mode=rotation_mode)
def rotate_yticklabels(self, ax,
rotation: float,
horizontalalignment: str = 'center',
verticalalignment: str = 'center',
rotation_mode: str = 'default'):
""" Rotate the y ticklabels
:param float rotation:
Rotation of the text (in degrees)
:param str rotation_mode:
Either "default" or "anchor"
"""
for tick in ax.get_yticklabels():
plt.setp(tick,
rotation=rotation,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation_mode=rotation_mode)
def show(self,
outfile: Optional[pathlib.Path] = None,
transparent: bool = True,
tight_layout: bool = False,
close: bool = True,
fig: Optional = None):
""" Act like matplotlib's show, but also save the file if passed
:param Path outfile:
If not None, save to this file instead of plotting
:param bool transparent:
If True, save with a transparent background if possible
:param bool tight_layout:
If True, try and squish the layout before saving
"""
if tight_layout:
plt.tight_layout()
if outfile is None:
plt.show()
else:
print('Writing {}'.format(outfile))
self.savefig(outfile, transparent=transparent, fig=fig)
if close:
plt.close()
def update(self, params: Dict):
""" Update the matplotlib rc.params
:param dict params:
rcparams to fiddle with
"""
self.params.update(params)
def savefig(self,
savefile: pathlib.Path,
fig: Optional = None,
**kwargs):
""" Save the figure, with proper background colors
:param Path savefile:
The file to save
:param fig:
The figure or plt.gcf()
:param \\*\\*kwargs:
The keyword arguments to pass to fig.savefig
"""
if fig is None:
fig = plt.gcf()
savefile = pathlib.Path(savefile)
savefile.parent.mkdir(exist_ok=True, parents=True)
savefig_params = dict(self.savefig_params)
savefig_params.update(kwargs)
fig.savefig(str(savefile), **kwargs)
class colorwheel(object):
""" Generate colors like a matplotlib color cycle
.. code-block:: python
palette = colorwheel(palette='some seaborn palette', n_colors=5)
for item, color in zip(items, colors):
# In here, the colors will cycle over and over for each item
# Access by index
color = palette[10]
:param str palette:
A palette that can be recognized by seaborn
:param int n_colors:
The number of colors to generate
"""
@classmethod
def from_colors(cls,
colors: List[str],
n_colors: Optional[int] = None):
""" Make a palette from a list of colors
:param str colors:
A list of matplotlib colors to use
"""
if n_colors is None:
n_colors = len(colors)
palette = []
for _, color in zip(range(n_colors, itertools.cycle)):
palette.append(mplcolors.to_rgba(color))
return cls(palette, n_colors=n_colors)
@classmethod
def from_color_range(cls,
color_start: str,
color_end: str,
n_colors: int):
""" Make a color range """
palette = []
color_start = mplcolors.to_rgba(color_start)
color_end = mplcolors.to_rgba(color_end)
red_color = np.linspace(color_start[0], color_end[0], n_colors)
green_color = np.linspace(color_start[1], color_end[1], n_colors)
blue_color = np.linspace(color_start[2], color_end[2], n_colors)
for r, g, b in zip(red_color, green_color, blue_color):
palette.append((r, g, b, 1.0))
return cls(palette, n_colors=n_colors)
# Dynamic color palettes
# These aren't as good as the ones that come with matplotlib
def wheel_blackwhite(self) -> List[Tuple]:
""" Colors from black to white in a linear ramp """
colors = np.linspace(0, 1, self.n_colors)
return [(c, c, c, 1.0) for c in colors]
def wheel_greyblack(self) -> List[Tuple]:
""" Colors from grey to black in a linear ramp """
colors = np.linspace(0.75, 0, self.n_colors)
return [(c, c, c, 1.0) for c in colors]
def wheel_greywhite(self) -> List[Tuple]:
""" Colors from grey to white in a linear ramp """
colors = np.linspace(0.25, 1, self.n_colors)
return [(c, c, c, 1.0) for c in colors]
def wheel_lightgreywhite(self) -> List[Tuple]:
""" Colors from grey to white in a linear ramp """
colors = np.linspace(0.608, 1, self.n_colors)
return [(c, c, c, 1.0) for c in colors]
def wheel_redgrey(self) -> List[Tuple]:
""" Grey to red color space """
red = np.linspace(155/255, 228/255, self.n_colors)
green = np.linspace(155/255, 26/255, self.n_colors)
blue = np.linspace(155/255, 28/255, self.n_colors)
return [(r, g, b, 1.0) for r, g, b in zip(red, green, blue)]
def wheel_bluegrey(self) -> List[Tuple]:
""" Grey to blue color space """
red = np.linspace(155/255, 70/255, self.n_colors)
green = np.linspace(155/255, 130/255, self.n_colors)
blue = np.linspace(155/255, 180/255, self.n_colors)
return [(r, g, b, 1.0) for r, g, b in zip(red, green, blue)]
@property
next = __next__
# Helper Functions
def bootstrap_ci(data: np.ndarray,
n_boot: int = 1000,
random_seed: Optional[int] = None,
ci: float = 95,
func: Callable = np.mean,
axis: int = 0) -> Tuple[np.ndarray]:
""" Calculate a confidence interval from the input data using bootstrapping
:param ndarray data:
The data to bootstrap sample
:param int n_boot:
Number of times to sample the frame
:param int random_seed:
Seed for the random number generator
:param float ci:
Confidence interval to calculate (mean +/- ci/2.0)
:param Callable func:
Function to calculate the ci around (default: np.mean)
:param int axis:
Which axis to sample over
:returns:
The upper and lower bounds on the CI
"""
n = data.shape[axis]
rs = np.random.RandomState(random_seed)
boot_dist = []
for i in range(n_boot):
resampler = rs.randint(0, n, n)
sample = data.take(resampler, axis=axis)
boot_dist.append(func(sample, axis=axis))
boot_dist = np.array(boot_dist)
return np.percentile(boot_dist, [50 - ci/2, 50 + ci/2], axis=0)
def get_histogram(data: np.ndarray,
bins: int,
range: Optional[Tuple[int]] = None,
kernel_smoothing: bool = True,
kernel_bandwidth: Optional[str] = None,
kernel_samples: int = 100) -> Tuple[np.ndarray]:
""" Get a histogram and a kernel fit for some data
:param ndarray data:
The data to fit
:param int bins:
The number of bins to generate
:param tuple[float] range:
The range to fit bins to (argument to np.histogram)
:param bool kernel_smoothing:
If True, also generate a kernel-smoothed fit. If False, xkernel, ykernel are None
:param str kernel_bandwidth:
If not None, the method to use to estimate the kernel smoothed fit
:param int kernel_samples:
The number of samples to draw for the kernel fit
:returns:
xbins, ybins, xkernel, ykernel
"""
bins_y, bins_x = np.histogram(data, bins=bins, range=range)
# Estimate the kernel smoothed fit
if kernel_smoothing:
kernel = gaussian_kde(data, bw_method=kernel_bandwidth)
kernel_x = np.linspace(bins_x[0], bins_x[-1], kernel_samples)
kernel_y = kernel(kernel_x)
# Rescale for equal areas
bin_width = bins_x[1:] - bins_x[:-1]
hist_area = np.sum(bin_width * bins_y)
kernel_area = simps(kernel_y, kernel_x)
kernel_y = kernel_y * hist_area / kernel_area
else:
kernel_x = kernel_y = None
return bins_x, bins_y, kernel_x, kernel_y
# Plot functions
def add_lineplot(ax,
data: pd.DataFrame,
x: str, y: str,
hue: Optional[str] = None,
order: Optional[List[str]] = None,
hue_order: Optional[List[str]] = None,
palette: str = PALETTE,
savefile: Optional[pathlib.Path] = None,
label: Optional[str] = None,
err_style: str = 'band'):
""" Add a seaborn-style lineplot with extra decorations
:param Axes ax:
The matplotlib axis to add the barplot for
:param DataFrame data:
The data to add a barplot for
:param str x:
The column to use for the categorical values
:param str y:
The column to use for the real values
:param str palette:
The palette to use
:param Path savefile:
If not None, save the figure data to this path
"""
bins = {}
data = data.dropna()
if order is None:
order = np.sort(np.unique(data[x]))
if hue is None:
hue_order = [None]
elif hue_order is None:
hue_order = np.sort(np.unique(data[hue]))
for cat in order:
for hue_cat in hue_order:
if hue_cat is None:
mask = data[x] == cat
else:
mask = np.logical_and(data[x] == cat, data[hue] == hue_cat)
# Handle missing categories
n_samples = np.sum(mask)
if n_samples >= 3:
catdata = data[mask]
ydata = catdata[y].values
ymean = np.mean(ydata)
ylow, yhigh = bootstrap_ci(ydata)
else:
ymean = ylow = yhigh = np.nan
if hue is None:
bins.setdefault(x, []).append(cat)
bins.setdefault(f'{y} Mean', []).append(ymean)
bins.setdefault(f'{y} CI Low', []).append(ylow)
bins.setdefault(f'{y} CI High', []).append(yhigh)
bins.setdefault('Samples', []).append(n_samples)
else:
bins.setdefault(x, []).append(cat)
bins.setdefault(hue, []).append(hue_cat)
bins.setdefault(f'{y} Mean', []).append(ymean)
bins.setdefault(f'{y} CI Low', []).append(ylow)
bins.setdefault(f'{y} CI High', []).append(yhigh)
bins.setdefault('Samples', []).append(n_samples)
# Save the background data
bins = pd.DataFrame(bins)
if savefile is not None:
if savefile.suffix != '.xlsx':
savefile = savefile.parent / (savefile.stem + '.xlsx')
bins.to_excel(str(savefile))
# Now draw the plots
palette = colorwheel(palette, len(hue_order))
for i, hue_cat in enumerate(hue_order):
if hue_cat is None:
xcoords = bins[x].values
ymean = bins[f'{y} Mean'].values
ylow = bins[f'{y} CI Low'].values
yhigh = bins[f'{y} CI High'].values
hue_label = label
else:
hue_bins = bins[bins[hue] == hue_cat]
xcoords = hue_bins[x].values
ymean = hue_bins[f'{y} Mean'].values
ylow = hue_bins[f'{y} CI Low'].values
yhigh = hue_bins[f'{y} CI High'].values
if label is None:
hue_label = hue_cat
else:
hue_label = f'{hue_cat} {label}'
color = palette[i]
if err_style in ('band', 'bands'):
ax.fill_between(xcoords, ylow, yhigh, facecolor=color, alpha=0.5)
ax.plot(xcoords, ymean, '-', color=color, label=hue_label)
elif err_style in ('bar', 'bars'):
ax.errorbar(xcoords, ymean, np.stack([ymean-ylow, yhigh-ymean], axis=0),
capsize=15, linewidth=3, color=color, label=hue_label)
else:
raise ValueError(f'Unknown error style: "{err_style}"')
return ax
def add_histogram(ax,
data: np.ndarray,
xlabel: Optional[str] = None,
ylabel: str = 'Counts',
title: Optional[str] = None,
bins: int = 10,
draw_bars: bool = True,
bar_width: float = 0.7,
range: Optional[Tuple[float]] = None,
fit_dist: Optional[str] = None,
fit_dist_color: str = 'r',
kernel_smoothing: bool = True,
label_kernel_peaks: Optional[str] = None,
kernel_smoothing_color: str = 'c',
kernel_bandwidth: Optional[str] = None,
vlines: Optional[List[np.ndarray]] = None,
vline_colors: str = 'b'):
""" Add a histogram plot
Basic Usage:
.. code-block:: python
fig, ax = plt.subplots(1, 1)
histogram(ax, np.random.rand(64, 64),
draw_bars=True,
kernel_smoothing=True,
fit_dist='poisson',
vlines=[0.25, 0.75])
This will draw the histogram with a kernel smoothed fit, a poisson fit,
and vertical lines at x coordinates 0.25 and 0.75.
:param Axis ax:
The axis to add the histogram to
:param ndarray data:
The data to make the histogram for
:param str xlabel:
Label for the x axis
:param str ylabel:
Label for the y axis
:param str title:
Title for the axis
:param int bins:
Number of bins in the histogram
:param bool draw_bars:
If True, draw the histogram bars
:param float bar_width:
The width of the bars to plot
:param tuple[float] range:
The range to fit bins to (argument to np.histogram)
:param str fit_dist:
The name of a distribution to fit to the data
:param str fit_dist_color:
The color of the fit dist line
:param bool kernel_smoothing:
If True, plot the kernel smoothed line over the bars
:param str label_kernel_peaks:
Any of min, max, both to label extrema in the kernel
:param str kernel_smoothing_color:
The color of the kernel smoothed fit line
:param str kernel_bandwidth:
The method to calculate the kernel width with
:param list vlines:
x coords to draw vertical lines at
:param list vline_colors:
The color or list of colors for the spectra
"""
# Estimate the histogram
data = data[np.isfinite(data)]
xbins, hist, kernel_x, kernel_y = get_histogram(
data, bins=bins, range=range,
kernel_smoothing=kernel_smoothing,
kernel_bandwidth=kernel_bandwidth)
width = bar_width * (xbins[1] - xbins[0])
center = (xbins[:-1] + xbins[1:])/2
# Add bars for the histogram
if draw_bars:
ax.bar(center, hist, align='center', width=width)
# Estimate the kernel smoothed fit
if kernel_smoothing:
# Add a kernel smoothed fit
ax.plot(kernel_x, kernel_y, color=kernel_smoothing_color)
if label_kernel_peaks in ('max', 'both', True):
maxima = (np.diff(np.sign(np.diff(kernel_y))) < 0).nonzero()[0] + 1
kx_maxima = kernel_x[maxima]
ky_maxima = kernel_y[maxima]
ax.plot(kx_maxima, ky_maxima, 'oc')
for kx, ky in zip(kx_maxima, ky_maxima):
ax.text(kx, ky*1.05, "{}".format(float("{:.2g}".format(kx))),
color="c", fontsize=12)
if label_kernel_peaks in ('min', 'both', True):
minima = (np.diff(np.sign(np.diff(kernel_y))) > 0).nonzero()[0] + 1
kx_minima = kernel_x[minima]
ky_minima = kernel_y[minima]
ax.plot(kx_minima, ky_minima, 'oy')
for kx, ky in zip(kx_minima, ky_minima):
ax.text(kx, ky*0.88, "{}".format(float("{:.2g}".format(kx))),
color="y", fontsize=12)
# Fit an model distribution to the data
if fit_dist is not None:
opt_x = np.linspace(xbins[0], xbins[-1], 100)
if fit_dist == 'gamma':
fit_alpha, fit_loc, fit_beta = gamma.fit(data + 1e-5)
# print(fit_alpha, fit_loc, fit_beta)
opt_y = data = gamma.pdf(opt_x, fit_alpha, loc=fit_loc, scale=fit_beta) * data.shape[0]
else:
raise KeyError(f'Unknown fit distribution: {fit_dist}')
ax.plot(opt_x, opt_y, fit_dist_color)
# Add spectral lines
if vlines is None:
vlines = []
if isinstance(vline_colors, (str, tuple)):
vline_colors = [vline_colors for _ in vlines]
if len(vlines) != len(vline_colors):
raise ValueError(f'Number of colors and lines needs to match: {vlines} vs {vline_colors}')
ymin, ymax = ax.get_ylim()
for vline, vline_color in zip(vlines, vline_colors):
ax.vlines(vline, ymin, ymax, colors=vline_color)
# Label the axes
if xlabel not in (None, ''):
ax.set_xlabel(xlabel)
if ylabel not in (None, ''):
ax.set_ylabel(ylabel)
if title not in (None, ''):
ax.set_title(f'{title} (n={data.shape[0]})')
else:
ax.set_title(f'n = {data.shape[0]}')
# Complete Plots
def plot_3d_sphere_cloud(centers: List[Tuple[np.ndarray]],
colors: List[str] = None,
cmap: str = 'inferno',
cvalues: Optional[List[np.ndarray]] = None,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
radii: List[float] = 1.0,
title: Optional[str] = None,
marker: str = 'o',
markersize: float = 10,
figsize: Tuple[int] = (16, 16),
outfile: Optional[pathlib.Path] = None,
add_colorbar: bool = False):
""" Plot the raw points we sampled
:param list[tuple[ndarray]] points:
A list of x, y, z tuples for each population
:param list[str] colors:
A list of colors for each population
:param str title:
The title for the plot
:param Path outfile:
The path to write the output file to
:param str marker:
Matplotlib marker shape to plot
:param int markersize:
Size for the markers to draw
"""
if isinstance(radii, (int, float)):
radii = [radii for _ in centers]
if colors is None and cvalues is None:
raise ValueError('Pass one of "colors" or "cvalues" to plot_3d_sphere_cloud')
# Convert the color values into a heatmap
if colors is None:
if vmin is None:
vmin = np.nanmin(cvalues)
if vmax is None:
vmax = np.nanmax(cvalues)
norm = mplcolors.Normalize(vmin=vmin, vmax=vmax)
cmapper = mplcm.get_cmap(cmap)
colors = []
for cvalue in cvalues:
colors.append(cmapper(norm(cvalue)))
mappable = mplcm.ScalarMappable(norm=norm, cmap=cmap)
else:
mappable = None
# Check that the shapes make sense
assert Axes3D is not None
if len(centers) != len(colors):
raise ValueError('Got {} centers but {} colors'.format(len(centers), len(colors)))
if len(centers) != len(radii):
raise ValueError('Got {} centers but {} radii'.format(len(centers), len(radii)))
# Plot everything
all_x = []
all_y = []
all_z = []
if add_colorbar:
figsize = (figsize[0]*1.4, figsize[1])
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
for center, color, radius in zip(centers, colors, radii):
px, py, pz = center
ax.scatter(px, py, pz,
marker=marker,
c=color,
s=radius*50, # Convert radius from um to dpi
depthshade=False,
cmap=cmap)
all_x.append(px)
all_y.append(py)
all_z.append(pz)
all_x = np.concatenate(all_x)
all_y = np.concatenate(all_y)
all_z = np.concatenate(all_z)
# Work out the bounding box
min_x = np.min(all_x)
max_x = np.max(all_x)
min_y = np.min(all_y)
max_y = np.max(all_y)
min_z = np.min(all_z)
max_z = np.max(all_z)
range_x = max_x - min_x
range_y = max_y - min_y
range_z = max_z - min_z
range_max = max([range_x, range_y, range_z])
center_x = (min_x + max_x)/2
center_y = (min_y + max_y)/2
center_z = (min_z + max_z)/2
ax.set_xlim([center_x - range_max/2, center_x+range_max/2])
ax.set_ylim([center_y - range_max/2, center_y+range_max/2])
ax.set_zlim([center_z - range_max/2, center_z+range_max/2])
if title is not None:
fig.suptitle(title)
if add_colorbar and mappable is not None:
plt.colorbar(mappable, ax=ax, fraction=0.15, pad=0.05)
if outfile is None:
plt.show()
else:
outfile.parent.mkdir(exist_ok=True, parents=True)
fig.savefig(str(outfile), transparent=True)
plt.close()
| 33.231917 | 99 | 0.566972 | """ Plotting tools for the simulation framework
Styling tools:
* :py:class:`set_plot_style`: Plot style context manager
* :py:class:`colorwheel`: Custom color palettes
Plotting Functions:
* :py:func:`plot_3d_sphere_cloud`: Plot a sphere cloud in 3D
Axis element functions:
* :py:func:`add_lineplot`: Add lineplots to an axis
* :py:func:`add_histogram`: Add a histogram to an axis
Utilities:
* :py:func:`bootstrap_ci`: Bootstrap estimate of confidence intervals
* :py:func:`get_histogram`: Get a kernel smoothed histogram from binned data
"""
# Imports
import itertools
from contextlib import ContextDecorator
from typing import List, Tuple, Optional, Dict, Callable
import pathlib
# 3rd party imports
import numpy as np
from scipy.stats import gamma, gaussian_kde
from scipy.integrate import simps
import pandas as pd
import seaborn as sns
import matplotlib.cm as mplcm
import matplotlib.pyplot as plt
import matplotlib.colors as mplcolors
from mpl_toolkits.mplot3d import Axes3D
# Our own imports
from .consts import (
PALETTE, RC_PARAMS_DARK, RC_PARAMS_LIGHT
)
# Styling
class set_plot_style(ContextDecorator):
""" Context manager for styling matplotlib plots
Basic usage as a context manager
.. code-block:: python
with set_plot_style('dark') as style:
# In here, plots are 'dark' styled
fig, ax = plt.subplots(1, 1)
ax.plot([1, 2, 3], [1, 2, 3])
# Save the plot with correct background colors
style.savefig('some_fig.png')
Can also be used as a decorator
.. code-block:: python
@set_plot_style('dark')
def plot_something():
# In here, plots are 'dark' styled
fig, ax = plt.subplots(1, 1)
ax.plot([1, 2, 3], [1, 2, 3])
plt.show()
For more complex use, see the
`Matplotlib rcParam <http://matplotlib.org/users/customizing.html>`_
docs which list all the parameters that can be tweaked.
:param str style:
One of 'dark', 'minimal', 'poster', 'dark_poster', 'default'
"""
_active_styles = []
def __init__(self, style: str = 'dark'):
style = style.lower().strip()
self.stylename = style
if style == 'dark':
self.params = RC_PARAMS_DARK
self.savefig_params = {'frameon': False,
'facecolor': 'k',
'edgecolor': 'k'}
elif style == 'light':
self.params = RC_PARAMS_LIGHT
self.savefig_params = {'frameon': False,
'facecolor': 'w',
'edgecolor': 'w'}
elif style == 'default':
self.params = {}
self.savefig_params = {}
else:
raise KeyError(f'Unknown plot style: "{style}"')
@property
def axis_color(self):
if self.stylename.startswith('dark'):
default = 'white'
else:
default = 'black'
return self.params.get('axes.edgecolor', default)
@classmethod
def get_active_style(cls) -> Optional[str]:
""" Get the currently active style, or None if nothing is active """
if cls._active_styles:
return cls._active_styles[-1]
return None
def twinx(self, ax: Optional = None):
""" Create a second axis sharing the x axis
:param Axes ax:
The axis instance to set to off
"""
if ax is None:
ax = plt.gca()
ax2 = ax.twinx()
# Fix up the defaults to make sense
ax2.spines['right'].set_visible(True)
ax2.tick_params(axis='y',
labelcolor=self.axis_color,
color=self.axis_color,
left=True)
return ax2
def set_axis_off(self, ax: Optional = None):
""" Remove labels and ticks from the axis
:param Axes ax:
The axis instance to set to off
"""
if ax is None:
ax = plt.gca()
# Blank all the things
ax.set_xticks([])
ax.set_yticks([])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_axis_off()
def rotate_xticklabels(self, ax,
rotation: float,
horizontalalignment: str = 'center',
verticalalignment: str = 'center',
rotation_mode: str = 'default'):
""" Rotate the x ticklabels
:param float rotation:
Rotation of the text (in degrees)
:param str rotation_mode:
Either "default" or "anchor"
"""
for tick in ax.get_xticklabels():
plt.setp(tick,
rotation=rotation,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation_mode=rotation_mode)
def rotate_yticklabels(self, ax,
rotation: float,
horizontalalignment: str = 'center',
verticalalignment: str = 'center',
rotation_mode: str = 'default'):
""" Rotate the y ticklabels
:param float rotation:
Rotation of the text (in degrees)
:param str rotation_mode:
Either "default" or "anchor"
"""
for tick in ax.get_yticklabels():
plt.setp(tick,
rotation=rotation,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation_mode=rotation_mode)
def show(self,
outfile: Optional[pathlib.Path] = None,
transparent: bool = True,
tight_layout: bool = False,
close: bool = True,
fig: Optional = None):
""" Act like matplotlib's show, but also save the file if passed
:param Path outfile:
If not None, save to this file instead of plotting
:param bool transparent:
If True, save with a transparent background if possible
:param bool tight_layout:
If True, try and squish the layout before saving
"""
if tight_layout:
plt.tight_layout()
if outfile is None:
plt.show()
else:
print('Writing {}'.format(outfile))
self.savefig(outfile, transparent=transparent, fig=fig)
if close:
plt.close()
def update(self, params: Dict):
""" Update the matplotlib rc.params
:param dict params:
rcparams to fiddle with
"""
self.params.update(params)
def savefig(self,
savefile: pathlib.Path,
fig: Optional = None,
**kwargs):
""" Save the figure, with proper background colors
:param Path savefile:
The file to save
:param fig:
The figure or plt.gcf()
:param \\*\\*kwargs:
The keyword arguments to pass to fig.savefig
"""
if fig is None:
fig = plt.gcf()
savefile = pathlib.Path(savefile)
savefile.parent.mkdir(exist_ok=True, parents=True)
savefig_params = dict(self.savefig_params)
savefig_params.update(kwargs)
fig.savefig(str(savefile), **kwargs)
def __enter__(self):
self._style = plt.rc_context(self.params)
self._style.__enter__()
self._active_styles.append(self.stylename)
return self
def __exit__(self, *args, **kwargs):
self._style.__exit__(*args, **kwargs)
self._active_styles.pop()
class colorwheel(object):
""" Generate colors like a matplotlib color cycle
.. code-block:: python
palette = colorwheel(palette='some seaborn palette', n_colors=5)
for item, color in zip(items, colors):
# In here, the colors will cycle over and over for each item
# Access by index
color = palette[10]
:param str palette:
A palette that can be recognized by seaborn
:param int n_colors:
The number of colors to generate
"""
def __init__(self,
palette: str = PALETTE,
n_colors: int = 10):
if isinstance(palette, colorwheel):
palette = palette.palette
self.palette = palette
self.n_colors = n_colors
self._idx = 0
self._color_table = None
@classmethod
def from_colors(cls,
colors: List[str],
n_colors: Optional[int] = None):
""" Make a palette from a list of colors
:param str colors:
A list of matplotlib colors to use
"""
if n_colors is None:
n_colors = len(colors)
palette = []
for _, color in zip(range(n_colors, itertools.cycle)):
palette.append(mplcolors.to_rgba(color))
return cls(palette, n_colors=n_colors)
@classmethod
def from_color_range(cls,
color_start: str,
color_end: str,
n_colors: int):
""" Make a color range """
palette = []
color_start = mplcolors.to_rgba(color_start)
color_end = mplcolors.to_rgba(color_end)
red_color = np.linspace(color_start[0], color_end[0], n_colors)
green_color = np.linspace(color_start[1], color_end[1], n_colors)
blue_color = np.linspace(color_start[2], color_end[2], n_colors)
for r, g, b in zip(red_color, green_color, blue_color):
palette.append((r, g, b, 1.0))
return cls(palette, n_colors=n_colors)
# Dynamic color palettes
# These aren't as good as the ones that come with matplotlib
def wheel_bluegrey3(self):
return [
(0x04/255, 0x04/255, 0x07/255, 1.0),
(0xb0/255, 0xb0/255, 0xb3/255, 1.0),
(0x00/255, 0x00/255, 0xff/255, 1.0),
]
def wheel_bluegrey4(self):
return [
(0xa2/255, 0xa5/255, 0xa7/255, 1.0),
(0x5c/255, 0xca/255, 0xe7/255, 1.0),
(0x04/255, 0x07/255, 0x07/255, 1.0),
(0x3e/255, 0x5b/255, 0xa9/255, 1.0),
]
def wheel_blackwhite(self) -> List[Tuple]:
""" Colors from black to white in a linear ramp """
colors = np.linspace(0, 1, self.n_colors)
return [(c, c, c, 1.0) for c in colors]
def wheel_greyblack(self) -> List[Tuple]:
""" Colors from grey to black in a linear ramp """
colors = np.linspace(0.75, 0, self.n_colors)
return [(c, c, c, 1.0) for c in colors]
def wheel_greywhite(self) -> List[Tuple]:
""" Colors from grey to white in a linear ramp """
colors = np.linspace(0.25, 1, self.n_colors)
return [(c, c, c, 1.0) for c in colors]
def wheel_lightgreywhite(self) -> List[Tuple]:
""" Colors from grey to white in a linear ramp """
colors = np.linspace(0.608, 1, self.n_colors)
return [(c, c, c, 1.0) for c in colors]
def wheel_redgrey(self) -> List[Tuple]:
""" Grey to red color space """
red = np.linspace(155/255, 228/255, self.n_colors)
green = np.linspace(155/255, 26/255, self.n_colors)
blue = np.linspace(155/255, 28/255, self.n_colors)
return [(r, g, b, 1.0) for r, g, b in zip(red, green, blue)]
def wheel_bluegrey(self) -> List[Tuple]:
""" Grey to blue color space """
red = np.linspace(155/255, 70/255, self.n_colors)
green = np.linspace(155/255, 130/255, self.n_colors)
blue = np.linspace(155/255, 180/255, self.n_colors)
return [(r, g, b, 1.0) for r, g, b in zip(red, green, blue)]
@property
def color_table(self):
if self._color_table is not None:
return self._color_table
# Magic color palettes
palette = self.palette
if isinstance(palette, str):
if palette.startswith('wheel_'):
palette = getattr(self, palette)()
elif palette.startswith('color_'):
color = palette.split('_', 1)[1]
color = mplcolors.to_rgba(color)
palette = [color for _ in range(self.n_colors)]
else:
palette = palette
else:
palette = self.palette
# Memorize the color table then output it
self._color_table = sns.color_palette(palette=palette, n_colors=self.n_colors)
return self._color_table
def __len__(self):
return len(self.color_table)
def __getitem__(self, idx):
return self.color_table[idx % len(self.color_table)]
def __iter__(self):
self._idx = 0
return self
def __next__(self):
color = self.color_table[self._idx]
self._idx = (self._idx + 1) % len(self.color_table)
return color
next = __next__
# Helper Functions
def bootstrap_ci(data: np.ndarray,
n_boot: int = 1000,
random_seed: Optional[int] = None,
ci: float = 95,
func: Callable = np.mean,
axis: int = 0) -> Tuple[np.ndarray]:
""" Calculate a confidence interval from the input data using bootstrapping
:param ndarray data:
The data to bootstrap sample
:param int n_boot:
Number of times to sample the frame
:param int random_seed:
Seed for the random number generator
:param float ci:
Confidence interval to calculate (mean +/- ci/2.0)
:param Callable func:
Function to calculate the ci around (default: np.mean)
:param int axis:
Which axis to sample over
:returns:
The upper and lower bounds on the CI
"""
n = data.shape[axis]
rs = np.random.RandomState(random_seed)
boot_dist = []
for i in range(n_boot):
resampler = rs.randint(0, n, n)
sample = data.take(resampler, axis=axis)
boot_dist.append(func(sample, axis=axis))
boot_dist = np.array(boot_dist)
return np.percentile(boot_dist, [50 - ci/2, 50 + ci/2], axis=0)
def get_histogram(data: np.ndarray,
bins: int,
range: Optional[Tuple[int]] = None,
kernel_smoothing: bool = True,
kernel_bandwidth: Optional[str] = None,
kernel_samples: int = 100) -> Tuple[np.ndarray]:
""" Get a histogram and a kernel fit for some data
:param ndarray data:
The data to fit
:param int bins:
The number of bins to generate
:param tuple[float] range:
The range to fit bins to (argument to np.histogram)
:param bool kernel_smoothing:
If True, also generate a kernel-smoothed fit. If False, xkernel, ykernel are None
:param str kernel_bandwidth:
If not None, the method to use to estimate the kernel smoothed fit
:param int kernel_samples:
The number of samples to draw for the kernel fit
:returns:
xbins, ybins, xkernel, ykernel
"""
bins_y, bins_x = np.histogram(data, bins=bins, range=range)
# Estimate the kernel smoothed fit
if kernel_smoothing:
kernel = gaussian_kde(data, bw_method=kernel_bandwidth)
kernel_x = np.linspace(bins_x[0], bins_x[-1], kernel_samples)
kernel_y = kernel(kernel_x)
# Rescale for equal areas
bin_width = bins_x[1:] - bins_x[:-1]
hist_area = np.sum(bin_width * bins_y)
kernel_area = simps(kernel_y, kernel_x)
kernel_y = kernel_y * hist_area / kernel_area
else:
kernel_x = kernel_y = None
return bins_x, bins_y, kernel_x, kernel_y
# Plot functions
def add_lineplot(ax,
data: pd.DataFrame,
x: str, y: str,
hue: Optional[str] = None,
order: Optional[List[str]] = None,
hue_order: Optional[List[str]] = None,
palette: str = PALETTE,
savefile: Optional[pathlib.Path] = None,
label: Optional[str] = None,
err_style: str = 'band'):
""" Add a seaborn-style lineplot with extra decorations
:param Axes ax:
The matplotlib axis to add the barplot for
:param DataFrame data:
The data to add a barplot for
:param str x:
The column to use for the categorical values
:param str y:
The column to use for the real values
:param str palette:
The palette to use
:param Path savefile:
If not None, save the figure data to this path
"""
bins = {}
data = data.dropna()
if order is None:
order = np.sort(np.unique(data[x]))
if hue is None:
hue_order = [None]
elif hue_order is None:
hue_order = np.sort(np.unique(data[hue]))
for cat in order:
for hue_cat in hue_order:
if hue_cat is None:
mask = data[x] == cat
else:
mask = np.logical_and(data[x] == cat, data[hue] == hue_cat)
# Handle missing categories
n_samples = np.sum(mask)
if n_samples >= 3:
catdata = data[mask]
ydata = catdata[y].values
ymean = np.mean(ydata)
ylow, yhigh = bootstrap_ci(ydata)
else:
ymean = ylow = yhigh = np.nan
if hue is None:
bins.setdefault(x, []).append(cat)
bins.setdefault(f'{y} Mean', []).append(ymean)
bins.setdefault(f'{y} CI Low', []).append(ylow)
bins.setdefault(f'{y} CI High', []).append(yhigh)
bins.setdefault('Samples', []).append(n_samples)
else:
bins.setdefault(x, []).append(cat)
bins.setdefault(hue, []).append(hue_cat)
bins.setdefault(f'{y} Mean', []).append(ymean)
bins.setdefault(f'{y} CI Low', []).append(ylow)
bins.setdefault(f'{y} CI High', []).append(yhigh)
bins.setdefault('Samples', []).append(n_samples)
# Save the background data
bins = pd.DataFrame(bins)
if savefile is not None:
if savefile.suffix != '.xlsx':
savefile = savefile.parent / (savefile.stem + '.xlsx')
bins.to_excel(str(savefile))
# Now draw the plots
palette = colorwheel(palette, len(hue_order))
for i, hue_cat in enumerate(hue_order):
if hue_cat is None:
xcoords = bins[x].values
ymean = bins[f'{y} Mean'].values
ylow = bins[f'{y} CI Low'].values
yhigh = bins[f'{y} CI High'].values
hue_label = label
else:
hue_bins = bins[bins[hue] == hue_cat]
xcoords = hue_bins[x].values
ymean = hue_bins[f'{y} Mean'].values
ylow = hue_bins[f'{y} CI Low'].values
yhigh = hue_bins[f'{y} CI High'].values
if label is None:
hue_label = hue_cat
else:
hue_label = f'{hue_cat} {label}'
color = palette[i]
if err_style in ('band', 'bands'):
ax.fill_between(xcoords, ylow, yhigh, facecolor=color, alpha=0.5)
ax.plot(xcoords, ymean, '-', color=color, label=hue_label)
elif err_style in ('bar', 'bars'):
ax.errorbar(xcoords, ymean, np.stack([ymean-ylow, yhigh-ymean], axis=0),
capsize=15, linewidth=3, color=color, label=hue_label)
else:
raise ValueError(f'Unknown error style: "{err_style}"')
return ax
def add_histogram(ax,
data: np.ndarray,
xlabel: Optional[str] = None,
ylabel: str = 'Counts',
title: Optional[str] = None,
bins: int = 10,
draw_bars: bool = True,
bar_width: float = 0.7,
range: Optional[Tuple[float]] = None,
fit_dist: Optional[str] = None,
fit_dist_color: str = 'r',
kernel_smoothing: bool = True,
label_kernel_peaks: Optional[str] = None,
kernel_smoothing_color: str = 'c',
kernel_bandwidth: Optional[str] = None,
vlines: Optional[List[np.ndarray]] = None,
vline_colors: str = 'b'):
""" Add a histogram plot
Basic Usage:
.. code-block:: python
fig, ax = plt.subplots(1, 1)
histogram(ax, np.random.rand(64, 64),
draw_bars=True,
kernel_smoothing=True,
fit_dist='poisson',
vlines=[0.25, 0.75])
This will draw the histogram with a kernel smoothed fit, a poisson fit,
and vertical lines at x coordinates 0.25 and 0.75.
:param Axis ax:
The axis to add the histogram to
:param ndarray data:
The data to make the histogram for
:param str xlabel:
Label for the x axis
:param str ylabel:
Label for the y axis
:param str title:
Title for the axis
:param int bins:
Number of bins in the histogram
:param bool draw_bars:
If True, draw the histogram bars
:param float bar_width:
The width of the bars to plot
:param tuple[float] range:
The range to fit bins to (argument to np.histogram)
:param str fit_dist:
The name of a distribution to fit to the data
:param str fit_dist_color:
The color of the fit dist line
:param bool kernel_smoothing:
If True, plot the kernel smoothed line over the bars
:param str label_kernel_peaks:
Any of min, max, both to label extrema in the kernel
:param str kernel_smoothing_color:
The color of the kernel smoothed fit line
:param str kernel_bandwidth:
The method to calculate the kernel width with
:param list vlines:
x coords to draw vertical lines at
:param list vline_colors:
The color or list of colors for the spectra
"""
# Estimate the histogram
data = data[np.isfinite(data)]
xbins, hist, kernel_x, kernel_y = get_histogram(
data, bins=bins, range=range,
kernel_smoothing=kernel_smoothing,
kernel_bandwidth=kernel_bandwidth)
width = bar_width * (xbins[1] - xbins[0])
center = (xbins[:-1] + xbins[1:])/2
# Add bars for the histogram
if draw_bars:
ax.bar(center, hist, align='center', width=width)
# Estimate the kernel smoothed fit
if kernel_smoothing:
# Add a kernel smoothed fit
ax.plot(kernel_x, kernel_y, color=kernel_smoothing_color)
if label_kernel_peaks in ('max', 'both', True):
maxima = (np.diff(np.sign(np.diff(kernel_y))) < 0).nonzero()[0] + 1
kx_maxima = kernel_x[maxima]
ky_maxima = kernel_y[maxima]
ax.plot(kx_maxima, ky_maxima, 'oc')
for kx, ky in zip(kx_maxima, ky_maxima):
ax.text(kx, ky*1.05, "{}".format(float("{:.2g}".format(kx))),
color="c", fontsize=12)
if label_kernel_peaks in ('min', 'both', True):
minima = (np.diff(np.sign(np.diff(kernel_y))) > 0).nonzero()[0] + 1
kx_minima = kernel_x[minima]
ky_minima = kernel_y[minima]
ax.plot(kx_minima, ky_minima, 'oy')
for kx, ky in zip(kx_minima, ky_minima):
ax.text(kx, ky*0.88, "{}".format(float("{:.2g}".format(kx))),
color="y", fontsize=12)
# Fit an model distribution to the data
if fit_dist is not None:
opt_x = np.linspace(xbins[0], xbins[-1], 100)
if fit_dist == 'gamma':
fit_alpha, fit_loc, fit_beta = gamma.fit(data + 1e-5)
# print(fit_alpha, fit_loc, fit_beta)
opt_y = data = gamma.pdf(opt_x, fit_alpha, loc=fit_loc, scale=fit_beta) * data.shape[0]
else:
raise KeyError(f'Unknown fit distribution: {fit_dist}')
ax.plot(opt_x, opt_y, fit_dist_color)
# Add spectral lines
if vlines is None:
vlines = []
if isinstance(vline_colors, (str, tuple)):
vline_colors = [vline_colors for _ in vlines]
if len(vlines) != len(vline_colors):
raise ValueError(f'Number of colors and lines needs to match: {vlines} vs {vline_colors}')
ymin, ymax = ax.get_ylim()
for vline, vline_color in zip(vlines, vline_colors):
ax.vlines(vline, ymin, ymax, colors=vline_color)
# Label the axes
if xlabel not in (None, ''):
ax.set_xlabel(xlabel)
if ylabel not in (None, ''):
ax.set_ylabel(ylabel)
if title not in (None, ''):
ax.set_title(f'{title} (n={data.shape[0]})')
else:
ax.set_title(f'n = {data.shape[0]}')
# Complete Plots
def plot_3d_sphere_cloud(centers: List[Tuple[np.ndarray]],
colors: List[str] = None,
cmap: str = 'inferno',
cvalues: Optional[List[np.ndarray]] = None,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
radii: List[float] = 1.0,
title: Optional[str] = None,
marker: str = 'o',
markersize: float = 10,
figsize: Tuple[int] = (16, 16),
outfile: Optional[pathlib.Path] = None,
add_colorbar: bool = False):
""" Plot the raw points we sampled
:param list[tuple[ndarray]] points:
A list of x, y, z tuples for each population
:param list[str] colors:
A list of colors for each population
:param str title:
The title for the plot
:param Path outfile:
The path to write the output file to
:param str marker:
Matplotlib marker shape to plot
:param int markersize:
Size for the markers to draw
"""
if isinstance(radii, (int, float)):
radii = [radii for _ in centers]
if colors is None and cvalues is None:
raise ValueError('Pass one of "colors" or "cvalues" to plot_3d_sphere_cloud')
# Convert the color values into a heatmap
if colors is None:
if vmin is None:
vmin = np.nanmin(cvalues)
if vmax is None:
vmax = np.nanmax(cvalues)
norm = mplcolors.Normalize(vmin=vmin, vmax=vmax)
cmapper = mplcm.get_cmap(cmap)
colors = []
for cvalue in cvalues:
colors.append(cmapper(norm(cvalue)))
mappable = mplcm.ScalarMappable(norm=norm, cmap=cmap)
else:
mappable = None
# Check that the shapes make sense
assert Axes3D is not None
if len(centers) != len(colors):
raise ValueError('Got {} centers but {} colors'.format(len(centers), len(colors)))
if len(centers) != len(radii):
raise ValueError('Got {} centers but {} radii'.format(len(centers), len(radii)))
# Plot everything
all_x = []
all_y = []
all_z = []
if add_colorbar:
figsize = (figsize[0]*1.4, figsize[1])
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
for center, color, radius in zip(centers, colors, radii):
px, py, pz = center
ax.scatter(px, py, pz,
marker=marker,
c=color,
s=radius*50, # Convert radius from um to dpi
depthshade=False,
cmap=cmap)
all_x.append(px)
all_y.append(py)
all_z.append(pz)
all_x = np.concatenate(all_x)
all_y = np.concatenate(all_y)
all_z = np.concatenate(all_z)
# Work out the bounding box
min_x = np.min(all_x)
max_x = np.max(all_x)
min_y = np.min(all_y)
max_y = np.max(all_y)
min_z = np.min(all_z)
max_z = np.max(all_z)
range_x = max_x - min_x
range_y = max_y - min_y
range_z = max_z - min_z
range_max = max([range_x, range_y, range_z])
center_x = (min_x + max_x)/2
center_y = (min_y + max_y)/2
center_z = (min_z + max_z)/2
ax.set_xlim([center_x - range_max/2, center_x+range_max/2])
ax.set_ylim([center_y - range_max/2, center_y+range_max/2])
ax.set_zlim([center_z - range_max/2, center_z+range_max/2])
if title is not None:
fig.suptitle(title)
if add_colorbar and mappable is not None:
plt.colorbar(mappable, ax=ax, fraction=0.15, pad=0.05)
if outfile is None:
plt.show()
else:
outfile.parent.mkdir(exist_ok=True, parents=True)
fig.savefig(str(outfile), transparent=True)
plt.close()
| 2,853 | 0 | 321 |
d2227418ace220aeb53f016879c9b78b6db63908 | 974 | py | Python | examples/dashboard_controller.py | arkgil/xmpp_bot | e57ee6dd936112ba2aac735f53e23d3a1f3e83ed | [
"Apache-2.0"
] | null | null | null | examples/dashboard_controller.py | arkgil/xmpp_bot | e57ee6dd936112ba2aac735f53e23d3a1f3e83ed | [
"Apache-2.0"
] | null | null | null | examples/dashboard_controller.py | arkgil/xmpp_bot | e57ee6dd936112ba2aac735f53e23d3a1f3e83ed | [
"Apache-2.0"
] | null | null | null | import sys
import logging
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
sys.path.append("../xmpp_bot")
logging.basicConfig(level=logging.DEBUG)
server = 'localhost'
port = 5222
from xmpp_bot.controllers.copernicus import DashboardController
if __name__ == '__main__':
if len(sys.argv) >= 4:
jid = sys.argv[1] # dashboard1@localhost
password = sys.argv[2] # 1234
pubsub_server = sys.argv[3] # pubsub.localhost
if len(sys.argv) >= 5:
server = sys.argv[4] # localhost
if len(sys.argv) >= 6:
port = sys.argv[5] # 5222
xmpp = DashboardController(jid, password, pubsub_server)
xmpp.connect(address = (server, port), use_tls=False)
xmpp.process(threaded=False)
else:
print("Invalid number of arguments.\n" +
"Usage: python %s " +
"<jid> <pass> <pubsub> [host] [port]" % sys.argv[0])
| 27.828571 | 66 | 0.595483 | import sys
import logging
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
sys.path.append("../xmpp_bot")
logging.basicConfig(level=logging.DEBUG)
server = 'localhost'
port = 5222
from xmpp_bot.controllers.copernicus import DashboardController
if __name__ == '__main__':
if len(sys.argv) >= 4:
jid = sys.argv[1] # dashboard1@localhost
password = sys.argv[2] # 1234
pubsub_server = sys.argv[3] # pubsub.localhost
if len(sys.argv) >= 5:
server = sys.argv[4] # localhost
if len(sys.argv) >= 6:
port = sys.argv[5] # 5222
xmpp = DashboardController(jid, password, pubsub_server)
xmpp.connect(address = (server, port), use_tls=False)
xmpp.process(threaded=False)
else:
print("Invalid number of arguments.\n" +
"Usage: python %s " +
"<jid> <pass> <pubsub> [host] [port]" % sys.argv[0])
| 0 | 0 | 0 |
5b08747bb87e0952e354935a774955499e0db627 | 4,827 | py | Python | FictionTools/amitools/test/unit/profiler_main.py | polluks/Puddle-BuildTools | c1762d53a33002b62d8cffe3db129505a387bec3 | [
"BSD-2-Clause"
] | 38 | 2021-06-18T12:56:15.000Z | 2022-03-12T20:38:40.000Z | FictionTools/amitools/test/unit/profiler_main.py | polluks/Puddle-BuildTools | c1762d53a33002b62d8cffe3db129505a387bec3 | [
"BSD-2-Clause"
] | 2 | 2021-06-20T16:28:12.000Z | 2021-11-17T21:33:56.000Z | FictionTools/amitools/test/unit/profiler_main.py | polluks/Puddle-BuildTools | c1762d53a33002b62d8cffe3db129505a387bec3 | [
"BSD-2-Clause"
] | 6 | 2021-06-18T18:18:36.000Z | 2021-12-22T08:01:32.000Z | import logging
from amitools.vamos.profiler import MainProfiler, Profiler
from amitools.vamos.cfgcore import ConfigDict
| 27.582857 | 82 | 0.591258 | import logging
from amitools.vamos.profiler import MainProfiler, Profiler
from amitools.vamos.cfgcore import ConfigDict
def profiler_main_disabled_test(caplog):
mp = MainProfiler()
assert mp.parse_config(None)
assert not mp.add_profiler(Profiler())
mp.setup()
mp.shutdown()
assert caplog.record_tuples == []
def profiler_main_config_test(caplog, tmpdir):
path = str(tmpdir.join("prof.json"))
mp = MainProfiler()
cfg = ConfigDict(
{"enabled": True, "output": {"dump": True, "file": path, "append": True}}
)
assert mp.parse_config(cfg)
assert mp.enabled
assert mp.file == path
assert mp.append
mp.setup()
mp.shutdown()
assert caplog.record_tuples == []
def profiler_main_def_profiler_test(caplog):
caplog.set_level(logging.INFO)
p = Profiler()
mp = MainProfiler(enabled=True)
cfg = ConfigDict(
{"enabled": True, "output": {"dump": True, "file": None, "append": True}}
)
assert mp.parse_config(cfg)
assert mp.add_profiler(p)
mp.setup()
mp.shutdown()
assert caplog.record_tuples == [
("prof", logging.INFO, "---------- Profiling Results ----------"),
("prof", logging.INFO, "----- profiler 'foo' -----"),
]
def profiler_main_file_test(caplog, tmpdir):
caplog.set_level(logging.DEBUG)
path = str(tmpdir.join("prof.json"))
p = Profiler()
mp = MainProfiler(enabled=True)
cfg = ConfigDict(
{"enabled": True, "output": {"dump": False, "file": path, "append": True}}
)
assert mp.parse_config(cfg)
assert mp.add_profiler(p)
mp.setup()
mp.shutdown()
assert caplog.record_tuples == [
("prof", logging.DEBUG, "added profiler 'foo'"),
("prof", logging.DEBUG, "saving profile data to '%s'" % path),
("prof", logging.DEBUG, "done saving."),
]
caplog.clear()
# now repeat setup to test appending
p = Profiler()
mp = MainProfiler(enabled=True)
assert mp.parse_config(cfg)
assert mp.add_profiler(p)
mp.setup()
mp.shutdown()
assert caplog.record_tuples == [
("prof", logging.DEBUG, "added profiler 'foo'"),
("prof", logging.DEBUG, "loading profile data from '%s'" % path),
("prof", logging.DEBUG, "done loading."),
("prof", logging.DEBUG, "saving profile data to '%s'" % path),
("prof", logging.DEBUG, "done saving."),
]
class MyProfiler(Profiler):
def __init__(self):
self.foo = 0
self.bar = "baz"
self.got_setup = False
self.got_shutdown = False
def get_name(self):
return "test"
def parse_config(self, cfg):
self.foo = cfg.foo
self.bar = cfg.bar
return True
def set_data(self, data_dict):
self.foo = data_dict.foo
self.bar = data_dict.bar
def get_data(self):
return {"foo": self.foo, "bar": self.bar}
def setup(self):
self.got_setup = True
def shutdown(self):
self.got_shutdown = True
def dump(self, write):
write("foo=%d, bar='%s'", self.foo, self.bar)
def profiler_main_test_prof_cfg_test():
p = MyProfiler()
mp = MainProfiler(enabled=True)
cfg = ConfigDict(
{
"enabled": True,
"output": {"dump": True, "file": None, "append": True},
"test": {"foo": 42, "bar": "hello"},
}
)
assert mp.parse_config(cfg)
assert mp.add_profiler(p)
assert p.foo == 42
assert p.bar == "hello"
def profiler_main_test_prof_load_test(tmpdir):
path = str(tmpdir.join("prof.json"))
cfg = ConfigDict(
{"enabled": True, "output": {"dump": True, "file": path, "append": True}}
)
p = MyProfiler()
mp = MainProfiler(enabled=True)
assert mp.parse_config(cfg)
assert mp.add_profiler(p)
mp.setup()
assert p.foo == 0
assert p.bar == "baz"
p.foo = 42
p.bar = "hello"
mp.shutdown()
# load again
p = MyProfiler()
mp = MainProfiler(enabled=True)
assert mp.parse_config(cfg)
assert mp.add_profiler(p)
mp.setup()
assert p.foo == 42
assert p.bar == "hello"
mp.shutdown()
def profiler_main_test_prof_dump_test(caplog):
caplog.set_level(logging.INFO)
cfg = ConfigDict(
{"enabled": True, "output": {"dump": True, "file": None, "append": True}}
)
p = MyProfiler()
mp = MainProfiler(enabled=True)
assert mp.parse_config(cfg)
assert mp.add_profiler(p)
mp.setup()
assert p.foo == 0
assert p.bar == "baz"
p.foo = 42
p.bar = "hello"
mp.shutdown()
assert caplog.record_tuples == [
("prof", logging.INFO, "---------- Profiling Results ----------"),
("prof", logging.INFO, "----- profiler 'test' -----"),
("prof", logging.INFO, "foo=42, bar='hello'"),
]
| 4,294 | 6 | 399 |
14fad6bcf63b423afc4c9717d1020cb63b4ab0c2 | 98 | py | Python | baekjoon/Python/2292.py | Lumia1108/TIL | fe2e233d6d05c7d04f50f688f6c168e4d6d4ce46 | [
"MIT"
] | null | null | null | baekjoon/Python/2292.py | Lumia1108/TIL | fe2e233d6d05c7d04f50f688f6c168e4d6d4ce46 | [
"MIT"
] | null | null | null | baekjoon/Python/2292.py | Lumia1108/TIL | fe2e233d6d05c7d04f50f688f6c168e4d6d4ce46 | [
"MIT"
] | null | null | null | N = int(input())
i = 1
distance = 1
road = 1
while road < N:
road += i * 6
i += 1
print(i) | 12.25 | 17 | 0.5 | N = int(input())
i = 1
distance = 1
road = 1
while road < N:
road += i * 6
i += 1
print(i) | 0 | 0 | 0 |
b3d98895f1030982d7fafa83a77c2fa878c57407 | 1,497 | py | Python | python/lib/lib_care/test/test_periodic_boundary_conditions.py | timtyree/bgmc | 891e003a9594be9e40c53822879421c2b8c44eed | [
"MIT"
] | null | null | null | python/lib/lib_care/test/test_periodic_boundary_conditions.py | timtyree/bgmc | 891e003a9594be9e40c53822879421c2b8c44eed | [
"MIT"
] | null | null | null | python/lib/lib_care/test/test_periodic_boundary_conditions.py | timtyree/bgmc | 891e003a9594be9e40c53822879421c2b8c44eed | [
"MIT"
] | null | null | null | #!/bin/bash/env python3
import os, sys
# sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
from .. import *
from ..model.minimal_model import pbc
# test cases of periodic boundary conditions on a random matrix
test = np.random.rand(111,111,3)
# trivial tests, do nothing/ slots agree
(pbc(test,1,2)==test[1,2]).all()
assert(not (pbc(test,2,1)==test[1,2]).all())
#test each pbc boundary
assert((pbc(test,-1,2)==test[110,2]).all()) # test left
assert((pbc(test,111,2)==test[0,2]).all() ) # test right
assert((pbc(test,11,112)==test[11,0]).all() ) # test top
assert((pbc(test,12,-1)==test[12,110]).all() ) # test bottom
assert((pbc(test,-1,-1)==test[110,110]).all() ) #test bottom left corner
#padded spiral tips are produced with at pixel percision of about 13 digits.
# note that this is not the same as accuracy, which will depend on sigma, threshold, and V_threshold
# test functions for unpad
# assert(0==unpad(X=20, pad=20, width=500, rejection_distance=10))
# assert(unpad(X=19, pad=20, width=500, rejection_distance=10)==499)
# assert(280==unpad(X=300, pad=20, width=500, rejection_distance=10))
# assert(499==unpad(X=519, pad=20, width=500, rejection_distance=10))
# assert(10==unpad(X=530, pad=20, width=500, rejection_distance=10))
# assert(-9999==unpad(X=531, pad=20, width=500, rejection_distance=10))
# assert(490==unpad(X=10, pad=20, width=500, rejection_distance=10))
# assert(-9999==unpad(X=9, pad=20, width=500, rejection_distance=10))
| 38.384615 | 100 | 0.703407 | #!/bin/bash/env python3
import os, sys
# sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
from .. import *
from ..model.minimal_model import pbc
def testme():
pass
# test cases of periodic boundary conditions on a random matrix
test = np.random.rand(111,111,3)
# trivial tests, do nothing/ slots agree
(pbc(test,1,2)==test[1,2]).all()
assert(not (pbc(test,2,1)==test[1,2]).all())
#test each pbc boundary
assert((pbc(test,-1,2)==test[110,2]).all()) # test left
assert((pbc(test,111,2)==test[0,2]).all() ) # test right
assert((pbc(test,11,112)==test[11,0]).all() ) # test top
assert((pbc(test,12,-1)==test[12,110]).all() ) # test bottom
assert((pbc(test,-1,-1)==test[110,110]).all() ) #test bottom left corner
#padded spiral tips are produced with at pixel percision of about 13 digits.
# note that this is not the same as accuracy, which will depend on sigma, threshold, and V_threshold
# test functions for unpad
# assert(0==unpad(X=20, pad=20, width=500, rejection_distance=10))
# assert(unpad(X=19, pad=20, width=500, rejection_distance=10)==499)
# assert(280==unpad(X=300, pad=20, width=500, rejection_distance=10))
# assert(499==unpad(X=519, pad=20, width=500, rejection_distance=10))
# assert(10==unpad(X=530, pad=20, width=500, rejection_distance=10))
# assert(-9999==unpad(X=531, pad=20, width=500, rejection_distance=10))
# assert(490==unpad(X=10, pad=20, width=500, rejection_distance=10))
# assert(-9999==unpad(X=9, pad=20, width=500, rejection_distance=10))
| 1 | 0 | 23 |
31a9813af21e42a543dd89957438d8954fe3ca84 | 1,777 | py | Python | python3/lib/python3.6/site-packages/tensorflow/_api/v1/compat/v2/strings/__init__.py | TruongThuyLiem/keras2tensorflow | 726f2370160701081cb43fbd8b56154c10d7ad63 | [
"MIT"
] | 3 | 2020-10-12T15:47:01.000Z | 2022-01-14T19:51:26.000Z | python3/lib/python3.6/site-packages/tensorflow/_api/v1/compat/v2/strings/__init__.py | TruongThuyLiem/keras2tensorflow | 726f2370160701081cb43fbd8b56154c10d7ad63 | [
"MIT"
] | null | null | null | python3/lib/python3.6/site-packages/tensorflow/_api/v1/compat/v2/strings/__init__.py | TruongThuyLiem/keras2tensorflow | 726f2370160701081cb43fbd8b56154c10d7ad63 | [
"MIT"
] | 2 | 2020-08-03T13:02:06.000Z | 2020-11-04T03:15:44.000Z | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Operations for working with string Tensors.
"""
from __future__ import print_function as _print_function
from tensorflow.python import as_string
from tensorflow.python import reduce_join_v2 as reduce_join
from tensorflow.python import regex_full_match
from tensorflow.python import regex_replace
from tensorflow.python import string_format as format
from tensorflow.python import string_join as join
from tensorflow.python import string_length_v2 as length
from tensorflow.python import string_lower as lower
from tensorflow.python import string_strip as strip
from tensorflow.python import string_to_hash_bucket as to_hash_bucket
from tensorflow.python import string_to_hash_bucket_fast as to_hash_bucket_fast
from tensorflow.python import string_to_hash_bucket_strong as to_hash_bucket_strong
from tensorflow.python import string_to_number as to_number
from tensorflow.python import string_upper as upper
from tensorflow.python import substr_v2 as substr
from tensorflow.python import unicode_script
from tensorflow.python import unicode_transcode
from tensorflow.python.ops.ragged.ragged_string_ops import string_bytes_split as bytes_split
from tensorflow.python.ops.ragged.ragged_string_ops import string_split_v2 as split
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_decode
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_decode_with_offsets
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_encode
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_split
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_split_with_offsets
del _print_function
| 52.264706 | 92 | 0.876196 | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Operations for working with string Tensors.
"""
from __future__ import print_function as _print_function
from tensorflow.python import as_string
from tensorflow.python import reduce_join_v2 as reduce_join
from tensorflow.python import regex_full_match
from tensorflow.python import regex_replace
from tensorflow.python import string_format as format
from tensorflow.python import string_join as join
from tensorflow.python import string_length_v2 as length
from tensorflow.python import string_lower as lower
from tensorflow.python import string_strip as strip
from tensorflow.python import string_to_hash_bucket as to_hash_bucket
from tensorflow.python import string_to_hash_bucket_fast as to_hash_bucket_fast
from tensorflow.python import string_to_hash_bucket_strong as to_hash_bucket_strong
from tensorflow.python import string_to_number as to_number
from tensorflow.python import string_upper as upper
from tensorflow.python import substr_v2 as substr
from tensorflow.python import unicode_script
from tensorflow.python import unicode_transcode
from tensorflow.python.ops.ragged.ragged_string_ops import string_bytes_split as bytes_split
from tensorflow.python.ops.ragged.ragged_string_ops import string_split_v2 as split
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_decode
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_decode_with_offsets
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_encode
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_split
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_split_with_offsets
del _print_function
| 0 | 0 | 0 |
b1c89bd21cd109879257aacfae2fc2b64c8bb917 | 1,963 | py | Python | utmap-server/src/app/main/model/building.py | DSC-UTMap/utmap-website | 67536bd51658dedb2ac14b59b689dbb8f9c3f632 | [
"MIT"
] | null | null | null | utmap-server/src/app/main/model/building.py | DSC-UTMap/utmap-website | 67536bd51658dedb2ac14b59b689dbb8f9c3f632 | [
"MIT"
] | 90 | 2021-01-12T15:34:14.000Z | 2021-04-09T19:22:54.000Z | utmap-server/src/app/main/model/building.py | DSC-UTMap/utmap-website | 67536bd51658dedb2ac14b59b689dbb8f9c3f632 | [
"MIT"
] | 1 | 2021-04-28T20:37:28.000Z | 2021-04-28T20:37:28.000Z | from .. import db
from .modelHelpers import (
findAll, findById, deleteById, findByName, formatId, assignId, updateDocument, formatDocuments,
)
from bson import ObjectId
| 31.15873 | 99 | 0.625573 | from .. import db
from .modelHelpers import (
findAll, findById, deleteById, findByName, formatId, assignId, updateDocument, formatDocuments,
)
from bson import ObjectId
class Building:
def __init__(self, _id=None, name='Connector', code='NA'):
self._id = _id
self.name = name
self.code = code
def connectToBuildings(self):
buildings = db.get_collection('building')
return buildings
def findBuildById(self, _id, buildings):
build = findById(_id, buildings)
return build
def deleteBuildById(self, _id, buildings):
build = deleteById(formatId(_id), buildings)
return build
def findBuildByName(self, name, buildings):
build = findByName(name, buildings)
return build
def assignBuildingId(self, buildings):
fields = {'name' : self.name, 'code' : self.code}
buildId = assignId(fields, buildings).inserted_id
self._id = formatId(buildId)
return self._id
def updateBuild(self, buildings, buildToUpdate):
fieldList = ['name', 'code']
fieldVals = [self.name, self.code]
updateDocument(buildToUpdate, buildings, fieldList, fieldVals)
def formatAllBuilds(self, buildings):
output = []
for build in findAll(buildings):
output.append(self.formatOneBuild(build))
return output
def formatOneBuild(self, buildObject):
tempBuild = self.createTempBuild(buildObject)
output = (tempBuild.formatAsResponseBody())
return output
def createTempBuild(self, buildObject):
tempBuild = Building(
_id=buildObject['_id'], name=buildObject['name'], code=buildObject['code'])
return tempBuild
def formatAsResponseBody(self):
output = {
'_id' : formatId(self._id),
'name' : self.name,
'code' : self.code
}
return output
| 1,460 | -6 | 331 |
233d39a109ddb6a6feb1139e7e4d5e2407bfba88 | 529 | py | Python | test_texthandler.py | arjo129/hnsentiment | 3f89b07d5051f127b6888e43251a7a8e21924ef7 | [
"Unlicense"
] | 2 | 2017-06-30T04:29:14.000Z | 2017-07-02T04:05:49.000Z | test_texthandler.py | arjo129/hnsentiment | 3f89b07d5051f127b6888e43251a7a8e21924ef7 | [
"Unlicense"
] | 1 | 2017-07-18T00:39:36.000Z | 2017-07-18T00:39:36.000Z | test_texthandler.py | arjo129/hnsentiment | 3f89b07d5051f127b6888e43251a7a8e21924ef7 | [
"Unlicense"
] | null | null | null | import pytest
from test_helper import get_test_data
from texthandler import CommentClassifier
@pytest.fixture
@pytest.fixture
| 26.45 | 70 | 0.797732 | import pytest
from test_helper import get_test_data
from texthandler import CommentClassifier
@pytest.fixture
def comment():
return get_test_data("comment")
@pytest.fixture
def clean_comment():
return get_test_data("clean_comment")
def test_comment_cleaner(comment, clean_comment):
assert CommentClassifier().clean_comment(comment) == clean_comment
def test_comment_classifier(comment):
sentimental_analysis = CommentClassifier()(comment)
assert sentimental_analysis["neg"] > sentimental_analysis["pos"]
| 310 | 0 | 90 |
bf68ba8a664ccff4dcda0e7887840d77579eda40 | 72,911 | py | Python | resources.py | PSMA/beta-nearest-building | 8cfcff08bc6aa2f6c6631b3af6fa6971ca1de0c9 | [
"MIT"
] | null | null | null | resources.py | PSMA/beta-nearest-building | 8cfcff08bc6aa2f6c6631b3af6fa6971ca1de0c9 | [
"MIT"
] | 1 | 2018-10-03T22:27:33.000Z | 2018-10-03T22:27:33.000Z | resources.py | PSMA/beta-nearest-building | 8cfcff08bc6aa2f6c6631b3af6fa6971ca1de0c9 | [
"MIT"
] | 2 | 2018-11-28T22:35:00.000Z | 2018-12-04T09:14:17.000Z | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.11.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x04\x3a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x18\x08\x06\x00\x00\x00\x9b\x53\xff\x34\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc2\x00\x00\x0e\xc2\x01\x15\x28\
\x4a\x80\x00\x00\x03\xcf\x49\x44\x41\x54\x48\x4b\xed\x56\x5b\x68\
\x1c\x65\x18\x3d\xff\x5c\xb2\x9b\x6d\xb3\x9b\x64\x77\x33\xcd\x15\
\xa1\x22\xbe\x58\x90\x78\x41\xad\x22\x22\x14\xec\x83\x54\x85\x3c\
\x59\x0a\x52\x28\x3e\x08\x55\xe8\xab\x6f\xd2\x07\x41\xf4\xd9\xbe\
\x09\x5e\x09\x58\x04\x8d\xa8\x01\x51\x8b\xd5\x74\xd3\x98\xcb\xa6\
\x26\xcd\xa6\xcd\x65\x2f\xb3\x6b\x76\x93\xdd\xec\xce\xcc\xce\x78\
\xfe\xd9\x0d\x36\xb0\x95\x9a\xf8\x98\x03\xdf\xcc\xce\xbf\xdf\xfc\
\xdf\x99\xf3\x9d\xfd\x66\x85\x47\x60\x8f\x18\x1d\x1d\x85\x6d\xdb\
\x18\x19\x19\x69\xae\xfc\x77\x28\xcd\xf3\x9e\x90\xc9\x64\x90\xcd\
\x66\x9b\x57\x7b\xc3\xbe\x08\xa8\xaa\x0a\x45\xd9\xd7\x16\xfb\x23\
\xf0\x7f\xe0\x80\xc0\x01\x81\x03\x02\x07\x04\xee\x8d\x80\x63\x63\
\x71\x61\x01\xa9\xd4\x92\x1f\x4b\x37\x6f\xfa\xcb\x72\x0a\x0a\x21\
\xfc\xcf\x7b\xc5\x3d\xbd\x8c\x5e\x7e\xf7\x43\x84\xfb\x87\x00\xcf\
\xe5\xf8\x55\x30\xb9\xb8\x8c\xf7\x5f\x38\x8e\xd9\xab\x3f\x63\xab\
\x5c\xc1\xf9\xf3\x6f\xc0\xae\x03\xba\xca\x64\xd7\x81\x67\x3b\x24\
\xc6\x74\x5e\x0a\x4d\x27\x53\xf9\x45\x6b\xfc\x2b\x81\x5b\xcb\xb7\
\x21\xec\x0d\x3c\xf7\xf5\x12\xee\x1f\xec\x85\xeb\x92\x00\x37\x9c\
\x9e\x98\xc0\x17\x27\x7b\x90\xb8\x3a\x81\x72\xb5\x86\x73\xa7\x5f\
\x42\x89\x44\x96\x53\xeb\xb8\x7c\x3b\x88\x50\x28\xc8\xbb\x3d\xd6\
\x55\x31\xf3\x67\x0a\x1f\x5d\x38\xd7\xd8\xb0\x05\x76\xb5\xc0\xe5\
\xab\x75\x6e\xea\x37\x24\x27\xbf\x45\x32\xf1\x39\x7a\x23\xcb\xe8\
\xef\xab\x00\x75\x01\x85\x3c\x77\xa2\xe6\x00\x87\x23\x21\x04\x02\
\x01\xb4\xb5\x05\x10\xea\x09\xe3\xc8\x60\x37\x7e\xdc\x0e\x61\x3c\
\x38\x80\x1f\xdc\x08\xa3\x0b\x63\xb5\x0e\xcc\x9a\x39\x64\x6f\x5c\
\xc6\xe2\xec\x38\xe6\xe7\x26\x51\xae\x54\x9b\xd5\x1a\xf0\x15\x28\
\x97\x6b\xb8\xf1\xc7\xf7\xb8\x78\x25\x8d\x70\x2c\x46\xa5\x15\x68\
\x9a\x86\x6f\x7e\x49\x60\xfa\xed\xa7\xf0\xec\xa5\x25\x0c\xf6\xc9\
\x75\x17\x2e\xe5\x0c\x66\x16\xf0\xd9\x85\x57\x28\x73\xbf\xbf\x89\
\xe7\xad\x01\xb5\x34\x3e\x18\xbf\x85\x2f\xb7\xfa\xd0\x2e\xc8\x90\
\x3d\xa8\xb3\x2d\xe9\x5f\xbf\x43\xe2\xe2\x49\xc0\x96\x42\xbb\xc8\
\xa4\xf3\x28\x56\xda\x61\x0c\x3d\x82\x48\x47\x08\x5a\xa1\x50\x80\
\xa8\x24\x10\xe8\xf0\x30\x17\x7d\x10\x46\xa0\xd1\x11\xf9\xaa\xfd\
\x4b\x28\x38\x44\x22\xbf\xbf\xf5\x10\x55\x70\xfd\x75\x1f\x5a\x1f\
\x0f\x15\xbc\x7a\xfa\x14\xfd\xc9\x62\x28\xd3\x00\x2c\xc8\xfe\x5b\
\x75\x07\x2a\xea\xfe\x03\x98\x65\x0b\x0f\x74\xb2\xff\x1e\x85\x76\
\x98\xe7\x09\x18\xf1\x38\x0c\xd5\xe3\x7f\x89\x9f\xe0\x58\xc3\x50\
\x8a\xb9\x49\x74\x19\x31\xb8\xfc\x32\xa8\x29\x08\xea\x1a\x02\x7e\
\xe8\x34\x96\x80\x23\x0b\xdb\xac\xe1\xd0\x55\x3b\x51\xe5\xa3\xd5\
\x2a\x38\x7b\xe6\x45\xbc\xfe\xda\x29\xd6\xdf\xc0\x3b\x9f\x24\x91\
\xce\x2b\x78\xac\xba\x8a\x61\xc7\xc4\xc6\x95\x71\x5c\x7a\xb4\x84\
\x4f\xdf\x3c\xc1\x5c\x49\x92\x90\x3f\x18\x69\x39\xaa\x61\x18\x71\
\x94\xcc\xeb\x50\x84\xef\x55\xae\x59\x0e\x56\x57\xd6\x19\x69\xac\
\x31\x16\x53\xab\x78\xbe\x57\x40\xef\x3c\x4c\xe5\x5a\xf8\xd4\xb2\
\xf1\xf4\xe3\xc7\x70\xfc\x99\x61\x5c\x9b\x5f\xc3\xc7\xb5\x01\x24\
\xf4\x18\xa6\x83\x06\xae\x07\x68\xd0\xa2\x83\x63\x47\x7b\xa5\x17\
\x1b\xd1\x02\x82\x2d\x11\xa6\x99\xf3\xea\x5b\xd7\xd0\x33\x60\x70\
\x85\x4c\xef\x2c\xa6\xd1\xcd\x9b\xb5\xe6\xc5\x5d\x40\xd5\xa6\xe6\
\x57\x70\x36\xd9\x83\xb8\xde\x68\x93\x4a\xf9\xc7\xbe\x1a\xc3\xf6\
\x7b\x27\x58\x84\xf2\xfb\x24\xe4\x81\x12\x48\xdb\xf3\x9e\x5c\x36\
\x07\x11\x7a\x18\x4a\x34\x1a\x43\xa0\xeb\x49\xcc\x4c\x65\x50\xca\
\x14\xf9\x64\x4c\xb2\x98\x55\x65\x6c\x59\xff\xdc\xec\x6f\xd0\x1a\
\x96\xe3\xc2\xac\x09\xe4\x79\x9f\xc9\x58\xdf\x16\xb8\x8f\xc2\x89\
\x36\xce\x00\x59\x54\x4a\xcf\xf9\x01\xcd\xc3\xe6\x66\x11\x33\xc9\
\x1c\xf4\xc8\x13\x88\xc5\xe2\xbb\xe7\x40\x2e\x5f\x44\xd1\x5c\xe0\
\x2c\x29\x20\x1a\xd1\x49\xb4\x8e\x48\x67\x98\x1b\x48\x23\x31\x61\
\xe7\x29\xee\x04\xdd\xee\x72\x52\xae\xac\x65\xa0\xc9\x22\x84\xc3\
\x81\x35\x34\x24\x15\x55\x51\xca\x6f\xc0\x76\x55\x14\x8a\x36\x87\
\x52\x37\x22\xd1\xa3\x88\xc7\x3a\xfd\x3c\x89\xbb\x0e\xa2\x52\xa9\
\xcc\xc1\x63\xa1\x90\x4f\xf3\x17\x50\xe1\x94\xab\xb3\x3b\xd2\xc9\
\x74\xa4\x3c\xb3\x7f\x3b\xbd\x15\x74\x92\xa6\x6a\x7e\x41\x21\x74\
\x3f\xaa\x36\x89\x89\x76\x74\x77\x1f\xe1\x40\x6a\x43\x38\x7c\xa8\
\x91\xbc\x0b\xc0\xdf\xcb\x70\xa6\xef\x92\xee\x15\xda\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x33\x0b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\xff\x00\x00\x00\x79\x08\x06\x00\x00\x00\xf6\xe1\xf7\x0f\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x09\xda\x00\x00\
\x09\xda\x01\xb8\x5e\x7a\x24\x00\x00\x32\x49\x49\x44\x41\x54\x78\
\xda\xed\x7d\x79\x78\x54\xd5\xdd\xff\xe7\xdc\x99\xec\x21\x99\x04\
\x42\x00\x59\x02\x44\x40\x04\x09\xe0\x06\xca\xa6\x68\xed\x6b\xad\
\x28\xb5\x76\x51\x09\xda\xda\xc5\x2d\xb4\x56\x5c\xde\x9f\xe2\x6b\
\x15\x68\x6b\x85\xfa\xf6\x6d\x6d\x51\xa1\xb5\xb5\x6a\x5b\x02\xad\
\x4a\x55\x20\x08\x2a\x6a\x2b\x41\x16\x85\x24\x24\x61\x09\x61\x49\
\x32\x93\x65\x66\x32\xcb\x3d\xbf\x3f\xee\xb9\x73\xcf\x3d\xf7\xde\
\xc9\x10\xb2\x21\xe7\xfb\x3c\xe7\x99\xbb\x6f\x73\x3e\xdf\xfd\x7b\
\x8e\x42\x29\x85\x6c\xb2\xc9\x76\xf6\x35\x05\x92\x24\x49\x3a\x2b\
\x49\x82\x5f\x92\x24\x09\x7e\x49\x92\x24\x49\xf0\x4b\x92\x24\x49\
\x82\xff\x4c\xa0\x69\xeb\xeb\x46\x4c\x2c\x3d\x52\x32\xfa\x6f\x87\
\x57\x0d\x7a\xed\x50\xd9\x39\xaf\x1d\x2a\x73\xff\xa9\xa6\xc9\xf5\
\x72\x6d\x93\xfb\xe5\x5a\xe3\xf7\x95\x83\x4d\xae\x57\x0e\x35\xb9\
\x5e\x39\xd4\x34\xa2\xb4\x6e\xc7\xd8\xf5\x47\xcb\x26\xfd\xb3\x7e\
\xed\xdc\x7f\x9d\x28\xf9\xca\x3b\x27\x2f\x97\xdd\x41\x92\x04\x7f\
\x1f\xa6\xa2\xb5\x47\x46\x0c\x7f\xed\xd0\xd2\x9c\x97\x6b\xca\x92\
\xd6\x54\x35\x91\xdf\xef\x57\xb7\xd7\xb5\xd4\xec\x3e\xd6\xf6\xcc\
\x81\x06\xff\x1d\xc7\x9a\x02\xb3\xea\xbc\xc1\x59\x51\x7f\xd8\xa3\
\xfa\xc3\x9e\xa8\x3f\xe2\x51\xfd\x11\x4f\x34\x10\xf1\xa8\xfe\xb0\
\x47\x0d\x84\x3d\x6a\x20\xe2\x39\xe8\x6b\x2f\xda\xef\x6d\x9f\xf5\
\x69\x53\x70\xde\xc6\xe3\xfe\x67\x5e\x3f\xda\xb6\x95\xbc\x7c\x48\
\x4d\xfd\x7b\x5d\x53\xd1\x1b\xc7\xcb\x6e\xdc\xd8\x30\xef\x6b\x9b\
\x1a\x3c\xb2\x8b\x48\x92\xe0\xef\x45\x1a\xfb\xd7\x43\x97\xe7\xbe\
\x5c\xb3\xd6\xbd\x6a\xbf\x7f\x67\x5d\x73\xcd\xa1\x13\xad\x0f\x7a\
\x9b\x82\xb3\x22\xfe\xb0\x07\x11\x95\x68\x47\x51\x80\xb2\x13\x28\
\x8d\x6d\x8a\x6d\xa4\xc6\xa2\xfd\x36\x00\x51\x95\xb4\x07\x22\x9e\
\x9d\xde\xf6\x59\x07\xda\x23\x6b\x01\x34\xdd\xb4\xb9\xa1\xfc\xe6\
\xcd\x0d\x25\xdf\x28\x6b\x2c\x90\xdd\x45\x92\x04\x7f\x0f\x50\xe1\
\x2b\xb5\x97\x67\xbd\x74\x60\x6d\xd2\xef\xf6\xf9\xf7\x1f\x6d\xde\
\xda\xd4\xe0\x9f\x17\x0d\x46\xd2\x38\x84\xdb\x9c\x45\x9d\x37\x11\
\x61\x9d\xc6\x39\x8f\x52\xec\x0d\x46\x59\x3c\x14\x93\x54\xe0\x19\
\x4a\x69\xf5\x37\xca\x1a\xcb\xbf\xb9\xa5\xb1\xe4\x5b\xef\x36\x49\
\x46\x20\x49\x82\xbf\xab\x69\xe8\x4b\x07\x4a\xb2\x56\xed\xab\xac\
\xaa\xf3\x6d\x6d\x39\xd9\x36\x2f\x12\x08\xa5\x81\x52\x43\x9a\x3b\
\x01\xd8\x76\x1b\xb7\x91\xd7\x0a\x78\x0d\x40\x5c\x66\xeb\xe1\x60\
\x14\x41\x95\x42\x65\xb7\x56\x01\xa8\xc0\x24\x4a\x35\x46\xf0\xad\
\x77\x9b\x56\xdf\xf2\x6e\xd3\x6c\xd9\x85\x24\x49\xf0\x9f\x26\x0d\
\xf9\x43\x65\x49\xe6\xff\xed\x6e\x3a\x52\xef\x7b\xa6\xc5\x17\x18\
\x0d\xaa\x42\x6b\x1c\x88\x79\xe0\xf2\xbf\x26\x35\x5f\x00\xbe\x1d\
\xd3\x30\xe9\xfb\xd4\x51\x0b\xa8\x08\x46\x41\x61\x30\x00\x4a\x29\
\x54\xb6\x8f\x50\xba\x00\xc0\xe6\x5b\xb7\x36\x95\x2d\xd8\xda\x34\
\x4f\x76\x25\x49\x12\xfc\xa7\x48\xe7\xac\xae\x28\x49\x7f\xf6\xd3\
\xa6\xa3\x47\xbd\xcf\xb4\xb5\x86\x3c\x31\x29\xaf\x83\x5a\x5f\x56\
\x55\x06\x64\xa1\x11\xf1\x8a\x4e\xaa\x7f\x82\x9a\x03\x8c\xfb\x56\
\x04\xa3\x1a\xd0\xb9\x9d\x44\x33\x05\x74\x4d\x00\x94\x62\x96\x0a\
\xac\xbd\x6d\x9b\xb7\xac\x78\x9b\x77\xb6\xec\x52\x92\x24\xf8\x3b\
\xa0\x91\xab\xf7\x5f\x9e\xba\xa2\xbc\xa9\xee\x48\xe3\x33\x81\x96\
\x76\x8f\x86\x24\x95\x03\xbc\xce\x00\x38\x0d\x00\xb0\x08\x6d\x8b\
\x93\x0f\x0e\xdb\x10\x87\x01\xd8\x9a\x01\x00\xc2\x2a\x5a\xc3\x2a\
\x03\xb9\x05\xf4\x31\x4d\x40\xd5\x98\xc2\x2c\x00\x9b\x17\xbe\xe7\
\x2b\xbb\xfd\x7d\x5f\x81\xec\x5a\x92\x24\xf8\x6d\x68\xc0\xaf\x77\
\x95\xd5\x1c\x6a\xd8\xda\xde\x12\xf4\x68\x98\x13\x54\x7c\x51\xea\
\x9b\x18\x02\x75\xb6\xdb\x6d\xcd\x02\x6a\xbf\xdd\xba\x62\xcb\x27\
\x76\x04\xa3\x06\xe8\x39\xf5\x5f\x27\x42\x29\x88\x89\x29\xd0\x59\
\x6f\x1f\xf6\x56\x8f\xfa\xf3\x81\x32\xd9\xbd\x24\x49\xf0\x33\x1a\
\xf6\xbb\xbd\x37\xbb\x96\x7d\xe4\x6f\x38\xd9\x32\x2b\x2e\xa8\x4d\
\x8c\x20\x1e\x03\x70\xf2\xfc\xd3\x8e\x1f\x86\xda\x88\x7e\x6a\x65\
\x26\x0d\xc1\x48\x0c\xec\xbc\xfa\x1f\x4f\x13\x18\x90\x92\x84\xea\
\x06\xff\xac\xec\xe7\xf6\xfa\x67\xbe\x5a\x73\xb3\xec\x66\x92\xce\
\x6a\xf0\x0f\xfc\x55\xf9\xda\xc3\x87\x4e\xfe\x45\xe5\xbd\xf7\x94\
\x6a\xe2\x54\x07\xa3\x1a\x4f\x03\xb0\x03\x3e\x75\x70\xf6\x39\x30\
\x00\x27\xdb\xdf\x72\x38\xc7\x10\x54\xa0\x21\xa2\xc6\x80\xee\x04\
\x7a\x9d\x08\xa5\xc8\x49\x76\x01\x00\x9a\x83\xe1\xb4\xad\x87\x9b\
\xfe\x72\xfe\x1f\x2a\xd6\xca\xae\x26\xe9\xac\x03\xff\xe8\xdf\xee\
\x1e\x91\xb9\xec\xc3\xa3\x27\x8e\x79\xe7\x99\x1c\x77\x26\x1b\x9d\
\x03\x7a\x4c\xb7\x56\x05\x75\x9d\xdf\xa7\x03\x1f\xf6\xd7\x12\xf0\
\x6b\xda\x6e\x07\x7a\x0b\x9f\x20\xdc\x3e\x8a\x0f\xfc\x11\x33\xe8\
\xa9\x01\x74\x9d\x74\x46\xa0\x0a\x97\x00\x80\xbd\xc7\x9a\xe7\xe5\
\xff\x76\xf7\xd1\xab\xfe\x52\x35\x42\x76\x39\x49\x67\x05\xf8\x47\
\x3e\x5b\x7e\x73\x75\xbd\xf7\xb3\x36\x6f\xdb\xa0\x98\x3a\x0f\x74\
\xa0\xc2\xf3\xfb\x55\x2b\xe8\xd5\x38\x3e\x01\x3b\xa0\xdb\xad\x73\
\xc0\x36\x40\x4a\x1d\x1d\x7f\x6a\x48\x15\x58\x83\xb3\xca\xaf\x31\
\x05\x8e\x03\x10\xad\x1d\xf7\x05\x06\x7d\x78\xac\xf9\xb3\x19\x2f\
\x55\x48\x33\x40\xd2\x17\x1b\xfc\x85\xbf\xda\x71\x73\x4d\x5d\xe3\
\xcb\xd4\xdf\x9e\x66\x02\xa0\xa3\xf3\x8e\x03\xb7\xca\xab\xfe\x36\
\x1a\x80\xc9\x37\x40\xcd\x66\x82\x05\xe1\x76\x9b\x68\x07\xeb\xc2\
\x0a\x05\x2a\xda\xa3\x16\xe9\x6f\xa7\xf2\x53\x0a\x24\xbb\x88\x01\
\xfc\xd8\x4e\x82\x16\x7f\x38\x6d\xdb\xc1\x86\x97\x8b\xd6\xec\x5b\
\x2c\xbb\x9e\xa4\x2f\x24\xf8\x47\x3c\xfd\xf1\xd2\xaa\x23\x0d\x2f\
\x23\x12\x25\x26\xbb\x5c\x8d\xe7\xc0\x83\x8d\x3a\x0f\x33\x63\xb0\
\x30\x10\xd8\x6f\x73\xc2\x3f\xa5\xce\x8c\xc1\xe2\x27\x30\xef\xff\
\x8c\x79\xfd\xed\x9c\x7f\xbc\xca\xaf\x02\x18\x90\x9e\x64\x02\xbd\
\xc6\x04\x18\x23\x88\xaa\x64\x67\x45\xfd\xb2\x31\xab\xf6\xfe\x4d\
\x76\x3f\x49\x5f\x28\xf0\x17\x2c\xff\x70\xd5\xc1\x23\x8d\x0f\x22\
\x12\x25\x66\xc7\x9e\xca\x31\x00\xd1\x9b\xaf\xda\x83\x1d\x36\xce\
\x3d\xdb\x26\x98\x13\x80\x35\xbc\x67\xb1\xf9\x89\xf5\xb8\xd8\xb1\
\xe2\x3a\x05\x42\x2a\xc2\xec\x1a\x89\x48\x7f\x10\x08\x92\x9f\x35\
\xaa\x9d\x58\x51\x71\xf4\xc6\x91\xbf\xdd\xbd\x45\x76\x41\x49\x5f\
\x08\xf0\x17\x2c\xdd\xbe\xaa\xb6\xae\xf1\x0e\xab\x5d\x4e\x63\x9d\
\xde\x04\x28\x27\xaf\x7e\x0c\x61\x76\xa0\xe7\x01\x0c\x33\xe8\x79\
\xcd\x81\x47\x2f\x11\x8e\xd5\xb7\xc7\x2d\xee\xb1\x6e\xda\xd7\x6e\
\x48\x7f\xcd\x5d\xe0\x2c\xfd\x63\xc0\xe7\x84\x3e\x08\x31\xee\x4f\
\x08\x6a\x0e\xd4\xcf\x1c\xfd\x9b\x5d\x92\x01\x48\x3a\xb3\xc1\x7f\
\xee\xcf\x3f\xba\xb9\xf6\x84\xf7\x76\xb3\x94\x86\x35\x9f\xde\x49\
\xdd\x77\x0a\xdf\x59\x8e\x53\x05\xe7\x9f\x83\xc9\xc0\x03\xbd\xa3\
\xb0\xbf\x9d\x39\x60\xe3\x08\xac\x0e\x9a\x1d\x7f\xf1\xa4\x3f\x5c\
\x0a\x03\xbe\x62\xa8\xfe\x31\x86\x60\xac\x1f\xa8\x3d\x31\xb3\xe8\
\xc5\xcf\xa5\x0f\x40\xd2\x99\x09\xfe\x71\xcb\x3f\xbc\xb9\xf2\xf0\
\xc9\x97\x11\x8a\x10\x1e\xe7\x56\x90\x3a\xc5\xea\x39\x10\xda\xed\
\x13\x1d\x7a\xf1\x62\xfe\x1d\x69\x08\x66\xd4\x1b\x60\xb4\x65\x12\
\x02\x23\x88\xa8\xf0\x45\xd5\x84\xa4\xbf\x2b\xc5\x6d\x0b\x76\xcb\
\xba\xaa\x62\xe7\xe7\x47\x96\x4d\x7e\xe1\x33\xc9\x00\x24\x9d\x59\
\xe0\x3f\xef\xe7\x1f\x8d\xd8\x77\xc2\xf7\x22\x89\x44\x89\x05\x98\
\x10\x80\x67\x91\xd2\x1c\x63\x50\xed\x18\x05\xe2\xd8\xf9\xb0\xb1\
\xeb\x01\x8b\x73\x50\xdf\x69\x31\x13\xe2\xad\xc3\xde\xac\x00\xf0\
\x61\x20\x6a\xda\xe9\x24\xfd\x29\xe1\x3c\xfe\x84\x00\x0a\xe7\xf8\
\x23\x56\x06\x50\x5e\x73\xe2\xa9\xe9\x7f\xdc\x3f\x51\x76\x49\x49\
\x67\x0c\xf8\x2b\x8e\x37\x95\x27\xb7\x05\xd3\x14\xca\xd4\x5d\x31\
\x7b\xcf\x2e\x17\xdf\x04\x28\x51\xd5\x77\x48\xf3\x85\x18\x09\x88\
\x93\x33\x00\x9b\x68\x00\x71\x78\x81\x84\xec\x7e\xe3\xe4\xf6\x90\
\xca\xf1\x24\x2d\xaf\xdf\x4e\xfa\x53\x1e\xf0\xa2\xe4\x57\xc4\x7d\
\x0a\x10\x68\x57\x76\x1c\x38\xfe\x89\xec\x92\x92\xce\x08\xf0\xe7\
\xfd\xbf\xcd\x65\x29\xcd\x01\x0f\xa1\x14\x0a\xa5\x9c\x99\x6c\x03\
\x42\x0b\x50\xf9\x6d\x80\xbd\xad\x0f\x98\x55\x7f\x24\xa8\x11\x00\
\x16\xbf\x83\x45\xe3\x8f\x97\x08\x44\x9d\xfd\x04\x2a\xc5\xc1\xb0\
\x20\xf9\x61\xf0\x39\x3d\xeb\x8f\xc2\x06\xf8\x31\x89\xaf\x18\xda\
\x80\xa2\xc4\xb6\x07\x5b\x02\xee\x61\x2b\x3e\xa9\x91\xdd\x52\x52\
\x9f\x06\xff\xa8\xff\xd9\x5a\xd2\x5e\xdf\x34\x4b\xa1\x80\x8b\x01\
\x9f\x50\x0a\x97\x1a\x2f\x6d\x17\xf6\x51\x00\xbb\x7c\xfe\x78\x61\
\x3e\xc4\x89\xed\x8b\x1a\x84\x08\x6c\xea\x90\xe0\x63\x67\xf7\x13\
\x58\x39\x07\x01\x76\x07\x23\xa0\x2c\xcb\x4f\x8c\xfb\xeb\xcc\x20\
\x27\x2d\xc9\xac\xee\x13\x2d\xe5\xc1\xa2\xf2\x13\x30\x06\xa0\x2d\
\x1f\x3e\x7c\x72\xc4\x84\x55\x7b\xff\x57\x76\x4d\x49\x7d\x12\xfc\
\xe3\x9e\x7c\x6f\xc4\x89\x63\x4d\x3f\xd7\xbd\x7b\x04\x80\x42\x29\
\x5c\x14\x50\x40\x39\xf5\x1f\x0e\x00\x86\x43\x86\x1f\x9c\x93\x7b\
\xc4\xf8\xbd\xdd\x08\x3f\x4e\x59\x83\x8e\x19\x80\xb0\x32\x2a\xd3\
\x36\x61\x99\x1d\xa3\x86\x55\xcb\xa9\x62\xce\x3f\x01\xb5\xda\xfb\
\x62\x8b\x49\x7e\xf3\xf2\x9e\xea\x63\x77\x5d\xf6\x52\x85\xb4\xff\
\x25\xf5\x3d\xf0\x1f\x6d\xf6\x97\x2a\xc1\xb0\x1b\xba\xba\x4f\x29\
\x5c\x6c\x19\x14\xb0\xd8\xff\x40\xc7\xb1\x78\x3e\xa6\x6f\x07\x5a\
\x8b\x24\xb7\xbb\x2e\xcc\xfb\xed\x7c\x0a\x16\x22\x09\x55\x00\x8b\
\x4c\x61\x0f\x1b\xe2\x8b\x9a\x94\x02\x23\xfd\xd7\x50\xfb\x39\x70\
\x53\x08\x40\xd7\xf7\x09\xcb\xa1\x08\xaa\x8e\x37\x6f\x94\xdd\x53\
\x52\x9f\x02\xff\xc8\xc7\xdf\xbd\x99\x1e\xf7\x16\xf1\x12\xd8\xc2\
\x00\x40\xa1\x74\xe4\x5d\x87\x83\x94\xb6\x55\xe9\xed\xea\x02\x20\
\x48\x7f\xd5\x7e\x3c\xbf\xb8\x29\xbd\x82\x4a\x4f\xc5\x61\x82\xe0\
\xe0\x2f\x00\xaa\x39\xe9\xaf\x87\xfd\x9c\x78\x0b\x14\x85\x35\x02\
\x28\x2e\xb3\xfd\x1f\xdb\xae\x70\x5a\x80\x82\x63\xf5\x4d\x79\x53\
\x57\xcb\x1a\x00\x49\x7d\x08\xfc\x4d\x0d\xcd\xbf\x35\x9b\xe3\xd4\
\xc2\x00\xdc\x2a\x85\x02\x5d\x13\x10\xbc\xff\xaa\x53\xdc\x1f\x66\
\xff\x80\x08\x60\x0a\x7b\xe6\x60\xd9\xcf\x1d\xa7\xd2\xf8\x0c\xc3\
\x02\x54\x62\x55\xf5\x89\x03\xa0\xc3\x14\x5a\xce\x8f\x7d\xd8\x6f\
\x6c\xba\xdb\xb8\xa6\xae\x01\xa8\xd4\x0c\x74\xdd\x1c\x10\x19\x00\
\x63\x02\x9f\x1d\x6e\xfc\xa9\xec\xa2\x92\xfa\x04\xf8\x07\x3d\xf4\
\xce\x2a\xd5\xd7\xe6\x89\x17\x6f\x57\x74\x7b\x97\x6d\x23\x16\xc9\
\x29\x48\x54\xbb\xd4\x5e\x3b\xf5\x1f\x36\x0e\x43\x38\xc4\xf3\xe3\
\x39\x01\x45\xa2\x36\xc0\xb6\x4d\xf6\xb1\x9e\xbb\xb7\x3d\x6a\x0a\
\xfb\x69\xa7\x1b\x8e\x3f\x4b\x52\x8f\xa2\x98\x99\x01\xbf\x6e\x62\
\x00\xda\x6f\xa0\x35\xe0\x1e\xfd\xdc\x1e\x99\xfe\x2b\xa9\xf7\xc1\
\x8f\x93\xbe\x05\xaa\xe0\x88\xe3\x13\x5b\xf4\x70\x9f\xc2\xec\x7e\
\xde\x0c\x88\x9f\xb8\x83\x04\x92\x79\xc4\x08\x00\xec\x43\x88\x10\
\xa4\xbf\xa5\xd0\x27\xde\xb8\x7d\x34\xb1\x11\xc1\xd8\x33\xd5\x85\
\xec\x1d\x7f\xf6\x5f\x9a\xe5\xf5\xf3\x00\x27\x8a\x96\x06\x1c\x47\
\x03\x38\x70\xb4\x71\xa6\xec\xa6\x92\x7a\x15\xfc\xf9\x8b\xdf\x5a\
\xd5\x1a\x08\xb9\x41\x69\x4c\x2b\xd7\x01\x46\x55\x5e\xd2\x53\xe6\
\xf9\xd7\xe3\xfe\xba\xf7\x1f\x36\xb1\x78\xd8\xa4\xf8\xa2\x63\x4d\
\x41\x04\xb7\x5d\xa1\x90\xe9\x1c\x41\xa3\xb0\x93\xec\xb6\x3c\x41\
\x98\xee\xc7\x94\x34\x44\x80\x28\x45\x7d\x84\x82\x0a\x27\xeb\x96\
\x8d\x25\xac\x67\xb2\xfd\x39\x06\x60\x6b\x02\xb0\x16\x8a\x62\xe8\
\xef\xf7\xbe\x27\xbb\xaa\xa4\x5e\x03\x3f\x3d\xd1\xbc\xc0\x58\xa1\
\x50\x85\xf9\xf0\x28\x35\xab\xfa\x7a\xf8\x4f\x89\x15\xd5\x39\x48\
\x78\xc0\x26\x36\xef\x74\x0c\xb5\x32\x0c\x4b\xb9\xae\x83\x77\xdf\
\xb1\xde\xdf\x2e\x03\x50\x9c\xe3\x0b\x8e\x1a\xc1\x67\x4c\xfa\x8b\
\xaa\xff\x90\x14\x17\x8c\xf4\x5e\x68\x25\xcd\x16\xe7\x9f\xc8\x00\
\xf4\xe6\x32\x96\x29\xc5\x91\xca\xfa\xe9\xb2\xab\x4a\xea\x15\xf0\
\xe7\xff\xe4\x5f\xab\xda\x82\x9a\xd4\xe7\x25\xb7\x2a\xac\xf3\x9d\
\x5f\xd7\x00\x14\x21\x0a\x60\x0b\x76\xa7\x12\x5f\x13\x43\x10\xc0\
\x2a\x3a\x00\xc5\xeb\x02\xce\x75\x06\xa0\xce\x63\xfa\xc1\x0e\xe4\
\x7c\x3c\xcf\x3c\xe9\x5f\x9b\x43\xcc\x3f\x29\x56\xd5\xc7\xef\x25\
\xce\x0c\x80\x97\xf8\xba\x06\x00\x02\x84\xa3\x40\x38\x8a\xfe\xbf\
\xd9\xf3\xb9\xec\xae\x92\x7a\x1c\xfc\xc9\xcd\x6d\xf3\x41\xf9\x8c\
\x36\x43\xf2\xaa\xd4\x1e\xc4\x84\x17\xa8\xba\x36\x10\x37\x59\xc7\
\x81\x39\x58\x7e\xed\xf2\x05\xec\x62\xfd\xb0\x99\xad\xd7\xc6\xec\
\xb0\xad\xe8\x23\x56\xae\x40\x04\x06\xa3\x93\x0a\x54\x84\x54\x5b\
\xd5\xdf\x74\xb2\xaa\xab\x40\x36\x0c\xc0\x25\x48\x7c\x17\x73\x04\
\x86\x23\xb1\xbc\x80\xc6\xe3\xbe\x31\xb2\xbb\x4a\xea\x51\xf0\x0f\
\x7b\xe8\xed\x9b\x7d\xde\x36\x8f\x16\xca\x53\xad\x80\xe5\x7d\x00\
\x9c\xfa\xcf\x4b\x7e\x43\xfa\xdb\x14\xff\x38\xda\xf4\xb0\x8f\xe3\
\xf3\x24\xd6\x04\xe8\xa8\xa3\x36\xde\x79\x71\x5c\x01\xc0\x41\x2b\
\x80\xd9\x3c\xb0\x35\x07\xcc\xc7\x1e\x08\xdb\xab\xfe\x16\xc6\xa1\
\x03\x1f\xa2\x0f\x80\xb7\xf3\xd9\x7a\x38\xaa\xdd\x4e\x67\x0e\xad\
\x01\x32\xea\xa5\xaa\x95\xb2\xcb\x4a\xea\x31\xf0\xc3\xe7\x7f\x12\
\x30\x66\xa6\xd1\x3d\xfc\xd4\xae\x2a\x4f\x30\xb7\x75\x7b\xdf\xc5\
\x36\xe8\x0c\xc0\x02\xca\x78\x75\xf9\x80\x8d\x44\xb7\x63\x12\x88\
\x13\xde\xb3\xab\xf1\x77\xd0\xf5\x1d\x47\xfa\x75\xfe\x44\xe1\x30\
\x45\xc8\xae\x7c\x20\xd9\xc5\xe5\xf5\x0b\x92\x1f\xd0\xb6\xeb\x52\
\xdf\xe5\x32\x96\xa3\x2a\xf3\x11\x70\xd9\x7f\x8a\x82\xea\xa3\xbe\
\x1f\xc8\x2e\x2b\xa9\xc7\xc0\xdf\xda\xd4\x32\x4a\x1f\x96\x9a\xe8\
\xe0\xa5\xd6\x4a\x3b\xd5\x21\x24\x47\xa8\x91\xfb\xaf\x13\x71\x1a\
\xb2\x9b\x67\x06\xfc\xba\x69\xbb\x8d\xe3\x0f\x71\x4a\x83\x4d\x38\
\xa7\xd6\x59\x79\x2c\x37\x8a\xb7\xd9\xc1\x01\x48\x81\x7d\x21\x15\
\x62\xc2\x8f\x51\xb1\xc7\x81\x5d\x64\x00\x10\x24\x3e\x00\x84\x22\
\x36\x9a\x81\x02\x78\x5b\x93\x64\x97\x95\xd4\x23\xe0\x1f\xf2\xe3\
\x37\x97\xd2\xb0\x4a\x74\x7b\x5d\x05\xd1\x6a\xd8\xf5\x3e\x2f\x38\
\xf1\x54\x1e\x63\x0c\xac\xba\xda\x7f\x4e\x46\x0a\xbe\x55\xd0\x1f\
\x4f\x4c\x2b\xc0\x93\xd3\x46\x62\xe9\xa5\x05\x78\x8a\xb5\xbb\xc7\
\x0c\xc4\x57\xf2\xb3\x1c\x32\xf8\x1c\x46\xf6\x81\x4d\xc8\x90\x3a\
\xe0\xd8\x71\x70\x50\x9b\x97\xe6\x1d\x7a\x76\xaa\xbf\x03\x8f\xa8\
\x0d\xab\xc8\x72\x11\x4c\xc9\x4a\xc2\xfc\x21\xa9\xb8\x75\x78\x3a\
\x9e\x1d\x9b\x85\x9f\x8f\xf3\xe0\xe7\xe3\x3c\xb8\x73\x68\x26\x46\
\x66\x25\x0b\xa5\xbd\x82\x19\x40\x08\x10\x0c\x9b\x43\x7e\x42\x6d\
\x80\x67\x75\xc5\x07\x67\x70\x7f\x2b\x00\x30\x9b\xfd\x4a\xea\xcb\
\xe0\x4f\x6a\x0b\xde\x44\xa8\x1a\x4b\xda\xd1\x6b\xd5\x09\x93\xe8\
\x31\x06\x60\x1b\x7a\xa3\x18\xde\x2f\x15\x4f\x5d\x35\x0e\xe5\x8f\
\xce\x45\xe5\xd3\xd7\xe0\xc5\xc5\x97\xe1\xa1\x5b\x27\xe2\xa1\x5b\
\x26\xe0\xc1\x5b\x26\xe0\xa1\x6f\x9f\x8f\x87\xbe\x35\x1e\xcf\xfe\
\x70\x2a\xfe\xf1\xe0\x74\xd0\x67\xae\xc1\x27\xdf\xbb\x18\x0f\x4f\
\x1c\x02\xb7\x5b\x31\xc0\x66\x51\xd7\x29\x96\x4e\x1e\x02\xba\xe4\
\x32\xd0\x47\xa7\x83\xfe\xf7\x34\xd0\x47\x2e\x05\x7d\xf8\x62\xd0\
\x07\x2f\x02\x7d\x60\x2a\xe8\x4f\xa6\x82\xfe\x78\x32\xe8\xa2\x49\
\x5a\xbb\x6f\x22\xe8\x3d\x13\x40\xef\x3a\x1f\xf4\x87\xe3\x41\xbf\
\x3f\x0e\xf4\x7b\x63\x40\xbf\x7b\x2e\xe8\x77\x0a\x41\xef\x28\x04\
\xbd\x7d\x34\xe8\xc2\x51\xa0\x0b\x47\x61\x6e\x6e\x0a\x57\xd2\x6b\
\x83\x7c\x9d\x21\xb8\x15\x3c\x5e\x90\x81\x3d\x33\xf3\xf0\xd2\x95\
\x79\x78\xec\x12\x0f\x6e\x1d\x9f\x89\x1b\xcf\x4d\xc7\xdd\x53\x3d\
\xb8\xff\xd2\x5c\xdc\x7f\x69\x2e\x9e\xbb\x6e\x28\x0e\xdc\x56\x08\
\xef\x1d\x63\xb1\xfe\x8a\xa1\xb8\x69\x70\x86\x55\x0b\x08\x86\x59\
\x22\x90\x62\x13\x11\xd0\x96\x7d\x4d\xfe\xa9\x9d\x04\xdd\x0a\x00\
\x65\xb0\x1a\x4a\x14\x40\x39\x80\xd5\x00\x8a\x01\x78\xba\xa8\x6f\
\x79\xd8\xf5\x4a\x01\xd4\xb0\xfb\x54\x03\xd8\xcc\x7e\x29\x00\x2f\
\x7b\xa6\x25\xdd\xc4\x10\x96\xb0\xeb\x97\x75\xd1\xf5\x67\xb3\x6b\
\x76\xa6\xcd\x3e\xc5\x6f\xbb\xa4\x9b\xda\xec\x84\xc0\x1f\x68\xf6\
\x8f\xd0\x80\xaf\x32\x6f\xbd\x31\x34\x35\xb1\xa8\xff\x34\x16\xff\
\x2f\xec\x97\x8a\xd2\x3b\xa7\x61\xd7\x8a\x6b\x71\xd7\xd7\xc7\x63\
\xd4\xd0\x7e\x09\xbf\xf1\xe4\x31\x39\x78\x72\xc1\x04\x9c\x78\x64\
\x26\x96\x5e\x34\x0c\x49\x49\x2e\x98\xb5\x81\x5e\x60\x91\xa6\xba\
\x7f\x03\xf4\xbf\x1c\xd3\x0f\xbe\xeb\x06\xe1\xd1\x4b\x3c\x18\x3f\
\x20\x31\x8d\x3c\x3b\xcd\x85\xeb\xce\xcf\xc6\xab\x5f\x2b\x40\xf5\
\xb7\xce\xc5\x4d\x43\x32\xb4\x8b\x86\x23\x5a\x8b\x31\x17\x31\xd7\
\x9f\xfd\xb6\x06\x4f\x45\xf5\x2f\x60\xe0\xab\x06\x70\x1f\x80\x59\
\x0e\xc7\x4d\x02\xb0\x00\xc0\x8b\x00\x9a\x18\x23\xe8\x2c\x58\x3c\
\xec\xfc\x1a\x76\xbd\xeb\x01\x8c\x70\xfa\x1c\xec\x99\x1e\x63\xcf\
\x58\xc6\x77\xce\xd3\xa4\x02\x76\xdd\x59\xac\x2d\xe9\x22\xf0\x3f\
\xd6\xc9\xb6\x99\x7d\xdb\x52\xc6\x14\x3b\xa2\xc7\xba\xa9\x75\x0c\
\xfe\xa1\xf7\x6f\xb8\x19\xfe\x90\x9b\x07\xbd\xa2\x0f\x5b\xc5\xd9\
\xf2\x1a\x2c\x0c\xd1\xfc\xc8\x8c\x42\x7c\xf2\xec\x75\xb8\xe2\xd2\
\x73\x4e\x4f\x6c\x64\x26\xe1\xc1\x9b\xc6\x62\xdf\x5d\x17\xe3\xda\
\x98\x49\xc0\x40\x48\x7b\x83\x03\x18\xba\xff\xad\x03\x53\xe1\xfb\
\xea\x60\x2c\x9a\x92\x8d\xac\x94\xce\x0f\x86\x54\xd0\x3f\x19\xaf\
\x7e\xad\x00\x5b\xfe\x6b\x38\x10\x89\x18\xa0\x17\x66\xfa\x31\x49\
\xff\x88\x8a\x82\x57\x0f\x26\xe2\xf5\x2f\x82\x26\xd1\xaf\x67\xeb\
\x3e\x00\xeb\x00\x3c\x0e\x60\x0e\x6b\x37\xb0\xf5\x75\x6c\xbf\x4e\
\x0b\xd8\xb9\xa7\x0a\x98\x12\x06\xfa\x05\x0c\xd8\x00\xb0\x13\xc0\
\x4a\x76\xaf\x39\xc2\xbd\x57\xb2\xfd\x3a\xcd\x62\x20\x29\xc5\xe9\
\x6b\x20\x22\xc0\x16\xa0\x6b\xb5\x8b\x9d\x00\xb6\x24\xd0\x6a\x85\
\xf3\xae\x67\x4c\xb1\x2c\xc1\xe7\xa9\x4d\xf0\x3e\x89\xb6\x9a\x0e\
\xc1\xef\x0a\x86\xbe\xa1\x3b\xea\x08\x57\xb5\xa7\x27\xf0\x50\x4a\
\xa1\xa8\x5a\xd3\xf1\xf8\x7f\xf3\x8b\xf0\xd0\x77\xa7\x74\x29\xe4\
\x46\x0e\x4e\xc7\x3f\x17\x5d\x84\xbb\x0a\xf3\x62\xd8\xef\x31\xe9\
\x6f\x73\x9f\x47\x47\x66\xe2\x0f\x73\xf3\x4e\x0b\xf4\x22\xcd\x2c\
\xec\x87\xc6\x7b\x8a\x90\x97\x93\x66\x2d\x06\xe2\x79\x0f\x63\x04\
\x47\x5b\xdb\x6f\x4a\x00\xf8\x65\x1c\x00\x1f\x67\x1d\x6d\x1e\xcc\
\xaa\x70\x29\x5b\x9f\xc7\xc0\x76\x03\xeb\x20\x60\xe7\x3e\xc6\x8e\
\x4b\x84\x56\x03\x78\x86\xbb\xe7\x3a\x00\x93\xd9\xb3\x94\xb0\x7b\
\x95\x09\xf7\x2e\x61\xfb\x47\x02\x58\x23\x00\xa4\x86\xed\xeb\x0c\
\x79\xd8\xb5\xc1\xbd\x8f\x1d\x43\x38\x1d\x2a\x81\x26\x45\x3b\x6a\
\x05\xec\xdf\x9b\xc3\xde\xd1\xc7\x31\xba\xf2\x04\xde\x71\x75\x82\
\xf7\x49\xb4\xad\xee\x10\xfc\xd4\xdf\x7e\x29\x5f\xa8\xa3\x6b\x00\
\x0a\x97\xb3\xaf\xea\x08\xa1\xc0\x3f\xee\x9a\x81\xdb\xe6\x8d\xeb\
\x36\x1c\x36\xb7\x87\x7b\x49\xe2\x1b\xf4\xbb\x09\x1e\x3c\x3e\x3d\
\xb7\x5b\xae\x9d\x93\xe1\xc6\xbe\xdb\xc7\x23\xcf\x93\xea\x3c\xcc\
\x37\xa3\x50\x4b\x30\x3f\x81\x0e\xa3\x83\x70\x21\x03\xb8\x37\x81\
\xc7\x28\x65\x1d\x64\x21\xd7\x49\x13\x39\x6f\x35\x34\xc9\x0a\x76\
\xde\x1c\xc6\x50\xca\x13\x7c\xfd\x1a\x06\xcc\xc9\x30\x34\x81\x6c\
\xc6\x24\x3a\x03\xd8\x12\xee\xfd\x8b\x19\x23\xd2\xb7\x7b\x7a\xa9\
\xfb\xe8\xef\x52\xc0\x3d\x8f\xfe\x8e\xbd\xf2\x4c\x8e\xe0\x0f\xb5\
\x06\x06\xe8\x7e\x3c\xc2\x06\xe7\x50\x28\x05\x51\x61\xb1\xff\x7f\
\x73\xd3\x64\x5c\x31\x7d\x68\xb7\x3d\xe4\xc3\x7f\xdc\x8b\x3f\x1e\
\x6c\xe2\x59\x53\x8f\x33\x82\x7b\x86\xa6\xe3\xbb\x45\xd9\xdd\x7a\
\x0f\x9d\x01\x68\xf9\x01\x70\x1e\xef\x3f\x10\x8a\xa7\x76\xcc\x83\
\x66\xc3\x03\x9a\xc4\x5f\xdd\x89\x47\x59\x0d\x4d\x22\xad\x49\x00\
\x7c\x3c\xf0\x77\xb2\xce\x5d\xd6\xc9\x4f\x50\xce\xdd\x17\x1c\x80\
\x3b\x03\x7e\x30\x90\xd5\x40\x73\x76\xea\xd7\x2b\x41\xef\x92\x97\
\xfd\x47\xfc\x3b\xae\xe8\x33\xe0\x1f\x7e\xff\x86\x11\x6e\x7f\xc8\
\xad\xab\xfa\x3a\x03\xd0\x42\xd6\x34\x16\xef\x27\x94\xe2\xeb\x85\
\x03\x71\xdb\x8d\xe7\x75\x78\xa3\x03\x87\x5a\xb0\x6c\xf5\x4e\xcc\
\x7f\x74\x13\xb2\xee\x2c\x45\xc6\xf7\xd7\x21\xfb\xbe\xd7\x71\xc3\
\x13\xef\xe2\x91\x55\xe5\xa8\x3e\xd2\x6a\x7b\xde\x4b\x1b\x0f\x62\
\xe9\xae\x3a\x87\x91\x79\xec\xe9\x4f\xef\xd7\xe3\xca\x17\xf6\x62\
\xee\x8b\x9f\xe1\xca\x35\xfb\x30\xf7\x0f\xfb\x71\xd5\x4b\xfb\x31\
\xf7\x4f\x95\xb8\xf2\xcf\x55\x98\xfb\x97\x03\xb8\xf2\x95\x6a\xcc\
\x7d\xad\x06\x57\xbd\x56\x8b\xb9\x7f\x3b\x88\x2b\xd6\x1e\xc2\x95\
\xa5\x87\x31\x77\xdd\x11\x5c\xbd\xee\x08\xde\x69\x0e\xc7\xbc\xf1\
\x63\xfb\x25\xe1\x57\x73\xf2\x3a\xbc\x6f\x73\xbb\x8a\xbf\x57\xf8\
\xb1\xe4\x23\x1f\x7e\xf8\x6e\x23\xa6\xbe\x7e\x0c\xe4\x4f\x07\x31\
\x77\xed\x61\x3c\xb4\xf1\x18\x6a\x9b\xc2\x09\x31\x80\x8d\xd7\x8d\
\x66\x60\x87\x90\x23\xc0\x5a\x38\x8a\x4b\xd6\xd7\x4d\x8f\x03\x7e\
\x9d\x4e\xa7\x53\xd5\x30\xe0\x7b\x3b\x60\x34\x3c\xf0\x67\x23\x31\
\x4d\xa1\x23\x2a\x86\xe6\x0f\x58\xd8\x09\xe6\x55\xcc\x31\x0d\xfd\
\xfd\xcb\x60\xa8\xff\xc5\xe8\x1b\x54\x0c\xc3\x1f\xd0\xd5\xfe\x88\
\xce\x83\x9f\x84\x23\x37\x80\x53\xf3\x15\xce\xc3\x1f\x6b\xa0\xc8\
\x4f\x71\xe3\xe9\x07\x3a\x2e\x37\x5f\xf6\xfc\x0e\xcc\x7c\x6a\x23\
\x96\x6e\xad\xc4\xc6\x3a\x2f\x28\x1b\xe5\x27\xd4\x1e\xc1\xeb\x47\
\xbd\x58\xbe\xe3\x10\x46\x2d\x2d\xc3\xf5\x4f\x6d\x43\x75\x9d\xc1\
\x04\x76\x54\x78\x71\xeb\x9b\x9f\xc3\x94\x02\xcc\xff\x3a\xd0\x21\
\x5f\x08\x9b\x4e\xb4\x61\xe3\x09\x3f\xfb\x6d\xc3\x3b\x27\xfc\xda\
\xfa\x49\x3f\x36\x9e\xf4\x63\x53\x43\x00\x1b\x4f\x06\xf0\x4e\x63\
\x10\x1b\x1b\x83\xd8\xdc\xd0\x8e\x4d\x0d\xed\xd8\xd8\x10\xc4\xdb\
\x8d\xed\x40\xac\x60\x87\xe2\x85\x0b\x3b\x56\xf5\xdf\xaa\x0e\x20\
\xfb\x8d\xe3\x98\xbf\xd3\x87\x9f\xd5\x07\x70\xa4\x3d\x1a\xdb\xb7\
\xb1\x31\x88\x65\x55\x3e\x14\xbc\x56\x8d\xef\xbf\x59\x07\x5f\x50\
\x8d\x7b\xad\x2b\xc6\x7b\x70\xeb\xa8\x1c\xee\x0f\x11\x1a\x80\xda\
\x60\xe4\x27\x0e\xa7\xeb\x9d\x68\x4b\x17\x01\xd1\x89\x3c\x1c\x30\
\x7d\x09\x30\x8a\x53\xa5\x12\x74\x4e\x6b\x59\xc2\xbd\x7f\x19\xb7\
\x5d\xbf\xd6\x88\x3e\xc4\x00\x96\x70\xcb\xb3\xfb\x04\xf8\x93\x42\
\x91\x59\x00\x4c\xde\x7d\x23\x4f\x9f\x49\x7f\x15\xb8\x63\xda\x68\
\xe4\x64\xa7\xc6\xbd\xc1\xbd\x3f\xdb\x86\xa7\xb6\x56\xa2\x25\x68\
\x84\xb1\x5c\xd4\xb8\x79\xcc\xa7\x00\x60\xfd\x51\x1f\xc6\xfe\xf2\
\x3d\xbc\xf4\x56\x0d\xaa\x8f\xb6\xe1\xe2\xe7\xff\x6d\x80\x9d\x76\
\xc6\xd3\x47\x9d\xe7\xeb\xb3\x1b\x3f\x80\x58\x8f\xb9\x2a\x37\x05\
\xd3\x87\xa5\xc5\xff\x07\x3f\xf4\xe2\x4b\x1f\x37\x02\x11\xad\x6c\
\x37\x10\xa6\x08\xb3\x71\x3b\xb4\x19\x79\x68\x4c\x75\x7f\xee\x50\
\x2b\xc6\xbe\xb4\x1f\x5e\x7f\x24\xfe\x35\xe7\x0e\xe3\xa6\xf6\x16\
\x1e\x8c\x00\x4d\xfe\xd0\x25\xbd\xdc\x71\x4b\x60\x96\xb0\xe5\x7d\
\x00\x4c\xc5\x30\xc2\x8a\x2b\x84\x7d\xab\x61\x48\xda\x25\x7d\x04\
\xfc\xa5\xdc\x72\x51\x9f\x00\x3f\x89\xaa\x39\x8a\x4a\xd9\x10\x76\
\x94\x9b\x5d\x9a\xc6\xec\x7f\x02\x8a\x9b\xae\x8d\xef\xe0\x7b\xe4\
\x57\xdb\xb1\x7a\x4f\x9d\x15\x70\x71\x40\x19\x0e\x45\x71\xeb\x1b\
\x9f\xe1\xca\x5f\x7f\x88\x48\x28\xca\x3d\x14\xb1\x9f\x84\x33\x0e\
\xee\x8d\xf3\x9c\x8a\x7c\xb8\x15\x87\xeb\x2d\x29\xca\x89\x7b\x9b\
\xe7\x77\xb5\xe0\xf1\x9a\x36\x4b\x75\xe0\x81\x08\x45\x0a\x6f\xab\
\xeb\xbf\xed\x61\x1c\x6b\xf0\x63\xcc\x9a\x7d\x71\x19\xc0\xa8\x81\
\xa9\xb8\x6a\x68\x96\x39\x23\x90\x13\xff\x51\x95\xa6\x77\xf0\xdf\
\x76\x77\x67\x2a\x61\xbf\xb5\x7d\x08\x4c\xc5\xdc\x33\x95\xda\xfd\
\x9d\xec\xb7\xaf\x48\x7f\x6f\x9f\x03\x7f\xa8\x35\x38\x49\x9f\x84\
\x03\x7c\x5c\x9f\x9b\xa2\x6a\xde\x98\x41\x18\x5d\xe0\xec\x8f\xd9\
\xf4\xc1\x61\xac\xfc\xb8\xc6\x0a\xb6\x98\x0f\x01\xb1\x5c\x01\xdd\
\x8f\xc0\x8b\xe1\xea\xb6\x90\xf9\x5c\xa7\x89\x37\xec\x98\x97\xa5\
\x1e\x20\x1e\x77\xb0\xdb\xa5\xed\xcb\x48\x75\xc5\x95\xfa\x87\x9b\
\x23\xf8\xce\x1e\x9f\xed\xe5\xf6\x85\x28\x06\x24\x29\xe6\x91\x7f\
\x22\x51\xc0\x1f\x02\x40\x70\xa2\x39\x84\x25\x6f\x1d\x89\xfb\x1e\
\xdf\x2c\xf4\xd8\x8f\xf7\x4f\x80\x68\x5b\xc8\x29\x73\xaa\x9c\xfd\
\x66\xa3\xfb\x54\xc9\x62\x18\x52\x7f\x75\x1f\x01\xfe\x6c\x18\x49\
\x4c\x4e\xcc\xa8\x14\x46\x14\xa3\x2f\x80\xbf\x57\xc9\x16\xfc\x2a\
\x38\x15\x9f\xab\xcf\xe7\x47\xe6\x99\x32\x66\x60\xdc\x0b\xff\xa6\
\x74\x37\x07\x24\x1a\xab\x03\xd0\x31\xa2\x70\x0c\xc0\x16\xb4\x16\
\x30\x9e\xae\x77\xbf\x03\x8d\x81\x58\xc7\xef\x2f\xce\x8b\x6f\xd2\
\xfc\xa2\xdc\xe7\xc4\x7e\x00\x95\xc2\xaf\xd7\x3a\x2b\x4c\xf2\xb7\
\x06\x4d\x47\xad\xdc\xd7\x88\x03\x27\xda\x1d\xaf\x7f\xc3\xe4\xfe\
\xc6\xf5\x20\x4c\xf9\xa5\x52\x12\xa7\x83\x83\x03\xa6\xa7\x9b\x80\
\x86\x3e\x06\x7e\x1d\xf0\xbe\x38\xcf\xe4\x85\x61\x0e\xcc\x42\x2f\
\xd8\xd9\x71\xa8\xbc\x4f\x80\x9f\x77\xf4\x29\xb1\xac\x56\x23\xa3\
\x8f\x50\x8a\x71\xe7\x0e\x70\xbc\x68\x55\x6d\x33\xde\x3c\xdc\x24\
\xc2\xc1\x02\x2e\x42\xa9\x66\xf7\x53\xce\xaa\xb5\x8c\xe1\x1f\x4f\
\x65\xb7\xa7\x69\x23\xfb\x61\xd9\xa4\x7c\xad\x5d\x90\x8f\xe5\x17\
\x0c\xc4\xb2\x89\x03\xb1\x6c\x62\x1e\x96\x4f\xcc\xc3\xb2\x09\x03\
\xb0\x6c\x7c\x7f\x2c\x1f\x9f\x8b\xe5\xe3\x73\xb1\xec\xbc\x1c\x2c\
\x3f\xcf\x83\xe5\xe7\x65\x23\x25\xd5\x18\x57\x7f\x46\x7e\x7c\xf0\
\xaf\x3c\xe2\x77\x7e\x39\x00\xbb\xc2\x9c\x79\xd3\x1a\x30\xe1\x58\
\xa7\x75\xbb\x1a\x1d\xaf\xef\x49\x77\x63\x80\x27\xc5\xca\x58\x4c\
\x55\x81\x16\x2a\x83\xe1\xd9\x1e\xc1\x3a\x55\x57\x77\x72\xfd\x7a\
\x3b\xc1\x65\x8c\xf5\x22\x15\xc0\x90\xfa\x2b\x3a\x38\x76\x85\x0d\
\xc3\xe8\x2d\x9a\xc7\x2d\xf7\xf8\x77\xb4\x77\xf8\x9d\x6c\xf1\x50\
\x93\xba\x4f\x63\x43\x70\x2b\x2c\xbd\xfe\xca\x19\xc3\x1d\x2f\xba\
\xe1\xdd\x1a\x67\xa0\xb2\x6d\x6b\xef\xbc\x14\xeb\xbe\x7b\x29\xfe\
\x79\xc7\x25\x78\xf3\x8e\x8b\xf1\xf6\xc2\x8b\xb0\x69\xe1\x45\xd8\
\xbc\xf0\x42\x6c\x5e\x30\x15\x9b\x6f\x9b\x8a\xcd\xb7\x4e\xc1\xe6\
\x5b\x26\x63\xf3\x2d\x93\x51\xf6\xed\x22\xfc\x7a\xc6\xc8\x78\x9d\
\x3e\x46\x33\xcf\xcb\xc1\xe2\x2f\x0f\xc7\xe2\x6b\x86\x61\xf1\x97\
\x86\xe2\x81\xab\x87\x62\xf1\x55\xe7\x60\xf1\x95\x83\xf1\xc0\x15\
\x83\xb1\x78\xce\x20\x2c\x9e\x33\x08\x0f\xcc\xca\xc7\x03\x33\x07\
\x62\xf1\x8c\x3c\x3c\x70\xd9\x00\x3c\x30\x7d\x00\x66\xe8\xe3\xed\
\x03\x18\x9a\xe5\x9c\x46\xff\xc1\xe1\xa0\xc3\x28\x40\x06\xb8\xeb\
\x75\xaf\x5f\x20\x64\x44\x0f\x4c\xd5\x7c\xc0\xef\x6b\x7c\x71\xdf\
\x65\x60\x8a\xdb\xea\xed\x27\x00\x5a\xdb\x49\x07\x9d\x6a\x27\xc7\
\x00\x36\xc3\x48\x32\xf1\x74\x41\xbf\xd1\x9d\x6a\xe5\x7d\x00\xf8\
\x3c\x88\x7d\x09\x80\xdf\x0b\x23\xc6\x3e\x0b\xbd\x5b\x61\x58\xc2\
\x2d\x97\xf6\x09\xf0\xc7\x06\xe5\x84\xe1\x8d\xd7\x9d\x7f\x89\x50\
\x4b\x5b\x7b\x4c\x72\xab\x7c\xf5\x9f\x7e\x71\x4a\x31\xe7\xa2\xc1\
\xac\x0d\xc2\x9c\x0b\xb5\x36\x7b\x6a\x3e\x66\x4f\xd1\xdb\x40\xcc\
\x9e\xcc\x5a\x51\x1e\x66\x4d\xca\xc3\xf9\xc3\xb2\x7a\xb0\xb0\x87\
\x20\x3b\x4e\x0a\xef\x07\xc7\xda\x9d\x4e\x33\x7f\xc7\xf6\xb0\x06\
\x7e\xbd\x82\x0f\x9c\xf3\x4f\x21\xf8\xec\x64\x20\xee\x53\x5c\xd5\
\x3f\xcd\x3a\xd7\x9f\x78\x1f\xfb\x0e\x3e\x1b\x46\x26\x99\xde\xd1\
\xf5\xc2\x9d\x32\x74\xae\xd2\x0c\x30\x6b\x11\x35\x7d\x00\xf8\x05\
\x30\x72\x0d\x4a\x91\x58\xb8\x71\x89\xc3\x72\x4f\x52\x31\x0c\x6d\
\x65\x4b\x9c\x6f\x59\x80\xae\x49\xeb\x2d\x48\x08\xfc\x7a\x28\x2f\
\x36\xee\x5e\x2c\x9f\x5f\xeb\xd1\x4a\x07\x0e\xb7\xff\x54\x9d\xe4\
\x98\x08\x04\x3b\x9b\xa0\x2f\x13\xe5\xd2\x68\x27\xe4\xa7\x9c\x02\
\xaf\x20\xf6\x5c\x34\x4a\x81\xb4\x64\xc0\xed\xe2\xb0\x4f\x3a\x38\
\xcf\x20\x8f\x69\x14\x60\x7d\xfc\x3f\x62\xf8\x11\xe2\x33\x80\x79\
\xd0\x52\x6d\xd7\x09\xfb\xf4\x4a\x3a\xbd\xd2\xac\x1c\x9a\xc4\x2c\
\xea\xd3\x7f\x8e\x3d\x95\x74\x02\xc8\x35\xdc\x37\xe9\x8d\x04\x9b\
\x62\x68\x8c\x18\x30\x72\x24\x9c\x68\x01\xfb\x9f\x4e\xb7\x15\x27\
\x06\x7e\x7e\x4c\x3e\x18\xb9\xfd\x89\xd2\x90\x9c\x34\x87\x1b\xf4\
\x64\x55\xce\xe9\x93\xaf\x5d\xed\xdc\x89\x04\x28\x4c\x55\x90\x4e\
\x55\x20\x23\x05\x97\x8c\xee\x0f\x8c\xe8\x0f\x9c\x93\x0b\xf4\xef\
\x07\x64\xa5\x01\xe9\x29\x40\x72\x92\xc6\x14\x4e\x89\xa9\x38\xc4\
\xfe\x9d\xa9\x8c\x31\x81\x91\x00\x16\xc1\x5a\xc1\x07\x68\xe9\xc0\
\xf7\x01\xd8\x01\x23\xb3\xef\x4c\x20\x0f\xf7\xac\x6b\x70\x6a\x9a\
\xc8\x0a\x07\x06\xd2\x9d\x54\x04\x4d\x3b\x79\x51\xb8\x77\x0d\x7a\
\x81\x1c\xf5\xda\x98\x9d\xcf\x77\xb9\x04\x99\xc0\xe0\xdc\x0c\xc1\
\x81\xd7\x95\xd4\x73\x79\xfd\x7b\x8e\x3b\x7b\xe2\xcf\xcf\x49\x36\
\x7f\x1c\x00\x50\x08\x46\xa6\x28\xf8\xaf\x74\x05\x99\x94\xc2\x1f\
\x88\x00\x14\xf8\xc4\x1f\xc1\xfc\xec\x64\x5c\xde\x3f\x0d\xc8\xc9\
\x00\xf2\xb2\x80\x01\x59\x40\xff\x4c\x8c\x19\xe6\x89\xfb\x0c\xaf\
\xd4\xb5\x9e\x22\xd6\x1d\xa9\x86\x75\xf8\x79\x0c\x34\x93\xa1\xa5\
\xcf\xae\x81\xb9\xec\x74\x04\xeb\x9c\x35\xe8\xfb\x9a\x40\x09\xac\
\xa9\xbc\x89\x52\x19\xcc\x29\xbf\x9e\x4e\x00\x79\x76\x02\xad\x18\
\x46\x12\xd4\x0e\x98\x4b\xac\x6f\x40\xc7\xd1\x92\x35\x30\x97\x42\
\x77\xb6\xad\x4e\x08\xfc\x94\x9b\x28\x93\x38\x0c\x71\x5f\x55\xed\
\x75\x7c\xda\x8b\x2f\x18\xec\x3c\x6d\xd5\x69\xe1\x96\x8b\x99\xc7\
\xeb\xe5\x27\x82\x78\xf7\x73\x2f\xb6\xee\xf3\x69\x6d\x7f\x33\xb6\
\x56\x34\x63\x6b\x45\x0b\xb6\x56\xb4\x60\x5b\x65\x0b\xb6\x55\xb5\
\x62\xdb\x81\x36\x6c\xab\x6e\xc3\xb6\x6a\x3f\xde\xab\xf1\xe3\xbd\
\x5a\x3f\x6a\xc3\x89\x49\xfb\x69\xe7\xa4\x98\xc0\x3f\x3c\x45\xc1\
\x35\xe9\x0a\xc6\xb9\x09\x82\x14\x28\x6f\x0e\xc5\x76\x87\x83\x51\
\xb4\x45\x54\x0c\x4a\x22\xb8\xc9\x93\x84\xa2\x4c\x37\x90\xe2\x06\
\x32\x52\xf1\xd5\x61\xf1\x6b\x57\xf2\xb2\xd2\x80\xcc\x34\x20\x2d\
\x05\x48\x76\x03\x6e\xa5\xab\x98\x41\x39\x8c\xd1\x7b\x0a\x18\x33\
\xe0\x4b\x4e\x47\x30\x80\x14\xd9\x9c\xa7\x53\x41\x1f\x00\x3f\x18\
\x88\xcb\x3b\x71\xbe\x0e\x88\xce\x14\xfc\x3c\x83\xc4\xd4\xed\x17\
\xa1\x69\x55\x93\x04\x40\x17\x20\x31\x27\x5f\x0d\xcc\xa5\xd0\x9d\
\x6d\x35\x09\x81\x5f\xcd\x49\x0f\xc4\xeb\x5b\x84\x02\xbb\xf6\x1e\
\x77\xdc\x7f\xc5\xf4\xa1\xc8\x4f\xd6\xbc\xe6\x2a\xef\x9f\xe2\x80\
\x3f\xff\xd1\x8d\xf8\xda\x92\xcd\x98\xff\xf8\x66\xdc\xf8\x78\x19\
\x6e\x78\xe2\x5d\xdc\xf0\xd3\x77\x31\xef\xc9\xad\xf1\xee\x9c\xd0\
\xbf\xf2\xca\xc7\xc7\x30\x6b\xed\x7e\xcc\x2c\xdd\x8f\x99\xeb\x2a\
\x31\x73\x7d\x25\x66\xfe\xa3\x0a\x33\x5f\x3f\x80\x99\x6f\x54\x63\
\xc6\x9b\xb5\x98\xb1\x81\xb5\x7f\x1d\xc2\x8c\xb7\x0f\xe1\xf2\xb7\
\x8f\xe0\xf2\x77\xea\x50\xd9\x12\x8e\xdd\xea\xfd\xba\xa0\xe3\x3d\
\xb2\x52\x14\xdc\x91\x9f\x8a\xc1\xa9\x0a\xae\x48\x77\x61\x5c\x92\
\xf1\x6c\xff\x6e\x0b\x6b\x23\xf0\x72\x83\x88\x6e\x6e\x35\x8a\x7a\
\xc6\xa5\x28\xb8\xc9\x93\x84\xd1\xe9\x2e\x7c\x69\x70\x7c\xbf\xc2\
\xd6\xe6\x30\x90\x9a\x04\x64\xa4\x00\xd9\xe9\x40\x4e\x26\x90\xdb\
\x0f\xc8\xcb\xea\x6a\xf5\xa7\x1c\x89\x95\x9c\x7a\x39\x06\x51\xd4\
\x8b\xc0\x2f\x86\x21\xf5\x97\x74\xf2\x1a\xab\x61\x68\x3d\x25\xdd\
\xf8\xac\xfa\x40\x2a\x8b\x00\xe4\xa0\xeb\xeb\x20\x3a\x45\xb6\xe0\
\x8f\xba\x5d\xed\xe2\x41\xb1\x51\x78\xf5\x0e\xbe\xab\x2e\xee\x85\
\xef\x9c\x36\x4a\x5b\x10\x7c\x7d\xba\xd0\xde\x58\xe7\xc3\x3b\x75\
\x5e\xfc\xab\xce\x87\x37\xeb\x9b\xf1\xfa\x51\x1f\xd6\xd7\x37\x63\
\x7d\xbd\xaf\x83\x47\xee\x42\x87\xa1\x38\x58\x86\x40\x6f\x1d\x8f\
\xef\x89\xbf\x77\x7c\x16\xc6\x25\x99\x3f\xe1\xb1\x88\x8a\x66\x7f\
\xc4\x32\x89\x68\x7b\x30\x8a\xd6\x08\x9b\xc1\x97\x02\x49\x04\xf8\
\x76\xff\x64\xcc\x1d\xee\x0c\xfe\x77\x6a\x83\x40\x66\x2a\x90\xe4\
\x32\x3f\xab\x42\x80\xd4\xa4\x68\x37\xf5\x09\x2f\xac\x25\xa7\x22\
\x30\xca\xd8\xef\x24\xf4\x9e\xf4\x5f\xc2\x01\x0b\xe8\xbc\x17\xbc\
\x94\x7b\xcf\xe2\x53\xb8\xff\x22\x74\xac\x6a\x8f\x64\x3d\xcb\xc3\
\xbe\xe9\x8a\xbe\x00\xfa\xb8\xe0\xd7\xb3\xf8\x4d\x5b\x88\x39\xd0\
\xb7\x69\xcf\xd1\xb8\x17\xfe\xfe\x37\x26\xe2\xbc\x7e\xa9\xa7\xfc\
\x40\xb4\x0b\xc0\x4d\xe2\x5d\x23\xae\xc9\x60\xce\xc5\x7f\xbb\xb1\
\x1d\x07\x7d\xce\xf9\xf7\x17\xe4\x27\x63\xde\x20\xf3\x3b\x7e\xe2\
\x6b\x77\x98\x7c\x94\x62\x83\xd7\xec\x43\xb8\x7d\x4c\x46\xdc\xf7\
\x78\xfb\x64\x04\xc8\x48\xd5\x9c\x84\x03\xfa\x01\x59\xe9\x40\x6a\
\xb2\x29\x4f\xa0\x1b\xa9\x84\x03\xd6\x3c\x07\xf0\xdb\xed\xeb\x09\
\x2a\x86\x91\x6b\x90\x8d\xd3\xf3\x82\xdf\x67\xc3\x50\x12\xa1\x72\
\x74\x42\xd5\xee\x4b\x64\x0b\x7e\x77\x46\x6a\x8d\x19\xf8\x56\x53\
\xbd\xaa\x39\x88\x4d\x5b\x0f\x3a\x5e\xd8\x93\x9d\x82\xe7\xee\xbe\
\x1c\x03\x52\xdc\xdd\xf2\xe0\x24\xd1\x03\xc8\x69\x5c\x8d\x10\xbc\
\xba\xaf\x25\xee\x99\x77\x4c\xcc\xc2\x84\x0c\xed\x1d\x3f\x6a\x0b\
\x6b\x95\x7d\xe2\xf0\xe2\xfa\xd7\x0b\x45\x71\x98\x95\xfa\xfe\xb0\
\x30\x03\x23\xb2\x9c\x3d\xfd\xad\x21\x8a\xdf\x36\x84\x8c\x18\xbf\
\xdb\xa5\x69\x01\xb9\x99\xc0\x20\x0f\x2e\x1e\xd8\xaf\xaa\x9b\xfb\
\x86\x17\x86\x54\x9c\x24\xec\x2b\x15\x98\x44\x6f\x80\xbf\x3b\xa8\
\x2f\x95\xfb\xf6\x0e\xf8\x15\x97\xcb\x67\xe7\x54\xd2\x99\x00\x65\
\x82\xe7\xb9\xd7\xca\xe3\x5e\x7c\xca\x84\x3c\xfc\x7d\xd1\x2c\x5c\
\x32\x20\xb3\x43\xb8\xe9\xeb\xb3\x72\xd3\x13\x7b\xf2\xd3\x91\x7c\
\xc4\x61\xa3\xcd\x43\xfd\xee\x50\x1b\xda\x42\xce\x4e\xc0\x8c\x64\
\x05\x4f\x4d\xcf\xc5\xfc\xc1\xa9\x68\x6c\x0b\x9b\xc1\x6e\x9a\x5f\
\x40\xdb\xb6\xcf\x1f\xc6\xe2\xf3\xfa\xe1\xb2\x73\xe2\xdb\xfa\xeb\
\x0f\xb6\xa3\x55\x05\x37\xbd\x37\x17\xe3\x77\x29\x18\x98\x9e\x54\
\xdf\x03\xfd\xa3\x26\xce\xf6\x35\x1c\x60\x7a\x92\x01\xcc\x86\x91\
\x1c\xb3\x08\xf6\xf9\x8f\xa7\xda\x72\x70\x16\x16\xfc\xd8\x82\x3f\
\x92\x9e\x52\xaa\x81\x9c\x98\x00\x2f\x1e\xbc\xf9\x60\x23\x36\xc6\
\x91\xfe\x3a\x03\x78\xfb\x97\xd7\xe2\xa1\x99\x85\x18\xdd\x2f\x15\
\x26\xc3\x9f\xc3\xe1\xf4\xdc\x74\xac\xf9\xca\x78\x94\x3d\x3e\xe7\
\xb4\x5f\x8a\x12\xe1\xea\xb6\xaa\x3e\x3f\x34\x96\xf5\xab\x24\xa7\
\xb8\x30\x21\xc3\x8d\x34\x02\xbc\xb4\x2b\xbe\x1f\x22\x23\x59\xc1\
\x9d\x45\xd9\xd8\x36\x7b\x10\xbe\x99\x9f\x66\x33\x85\x18\x05\x5c\
\x04\xbf\x18\x93\x85\xea\xff\x1a\x84\x29\xf9\xf1\x47\xdf\x6e\x08\
\xaa\xd8\x72\x32\x84\xdb\x3c\x49\x18\x9d\xa2\x98\x32\xfc\x14\xd6\
\x72\x5d\xa4\x27\x26\xef\xf0\xc4\xd9\xb7\x44\x58\x2e\xe8\xa1\x3e\
\xab\xdf\x37\x5e\x01\xcf\xa9\x92\x17\x86\x36\xd3\xd7\x0a\x7e\x7a\
\x16\xfc\xe8\x97\xb6\x3f\x86\x0d\x10\x66\xef\x93\x18\xb0\x74\x66\
\x40\x09\xf0\xc8\xf3\xdb\xd1\xe4\x0b\x76\x78\xa3\x07\xef\x98\x82\
\x4f\x7e\x75\x1d\x36\xdd\x3f\x0b\x6b\xbe\x3d\x15\x8b\x67\x14\xe2\
\xd9\xeb\x27\x62\xfd\x77\x2e\xc1\xd1\x9f\x5d\x83\xcd\x3f\xbd\x12\
\xb7\x5c\x33\xaa\xe3\x27\x4e\x40\xe2\x2f\xfe\xf2\x70\xd0\x07\x2f\
\x06\x5d\x7c\xa1\x31\x79\xc7\x8f\x26\x83\x96\x4c\x02\xbd\x77\x22\
\xe8\xdd\x13\x40\xef\x1a\x0f\xfa\x83\xf3\x40\xbf\x37\x16\xf4\xbb\
\x63\xb4\x89\x3b\xf4\x49\x3b\x6e\x1b\x89\x1f\x70\x15\x7d\xbf\x3d\
\xd8\x86\x9d\xf5\xed\x1d\xde\xf7\xb2\x11\x69\xf8\xf3\x97\x07\x81\
\x16\x8f\xc4\x7b\x73\xf2\xf1\xde\x15\xf9\x78\x6f\x4e\x3e\x76\x7f\
\x69\x08\xe8\x37\x87\xe3\xc7\x17\x79\x12\x1a\xf5\xf7\xb9\x7d\x9a\
\xa3\x31\x85\x00\x57\x67\x28\x58\x90\xed\xc6\x39\xc9\x0a\x14\x85\
\x40\x51\x08\x5c\x0a\x90\xee\x22\xe5\x0e\xa7\xaf\x46\xd7\x79\xe1\
\x75\x10\xd4\xda\xec\xab\x81\x36\x46\x20\xa0\xd9\xdd\xa5\xe8\xda\
\x0a\xc2\x62\x1b\x70\x17\xc0\x5c\xc0\xe3\xed\x06\xa6\x22\x2e\x9f\
\x5d\xe0\xaf\x7e\xf2\xca\x37\x28\x21\x50\x05\xc9\xaf\xb2\x01\x25\
\xf8\x14\xd8\x8a\x96\x00\xee\x5f\xbe\x25\xe1\x1b\x4e\x19\x3f\x00\
\xd7\xcf\x19\x81\x07\x17\x5c\x80\x5b\xaf\x2d\xc4\x9c\x8b\x06\xc3\
\xd3\x2f\xf9\xd4\x9e\x9a\x90\x1e\xfb\x30\xba\x16\xf1\xdf\xff\x6e\
\x40\x55\x63\x38\xe1\xf3\xa7\x0f\x4f\xc3\xf4\x61\x5a\x3b\x7f\x60\
\xe2\xef\xf7\xc7\xfd\x01\x54\x07\xcc\x8e\xfc\x0c\x05\x98\x97\xa9\
\xe0\x86\x7e\x2e\xf4\x53\xb4\xf7\xff\xcd\x94\xf4\x57\x1c\x80\xbf\
\x00\x5d\x33\xf9\xc5\x6c\x18\xb6\x7e\x69\x1c\xc0\xe8\x05\x44\x93\
\xd0\x75\x23\xd1\x2e\x81\x16\x1f\x5f\x00\xe7\x2a\xbc\xd5\x5d\xfc\
\x97\xd7\xa0\xef\x14\xfc\xf4\x1e\xf8\x01\x20\x92\xd7\xcf\xab\x00\
\x8c\x01\x10\x10\x9d\x11\x10\x7d\x54\x1f\xb6\x0d\xc0\x5f\x2b\x8e\
\xa1\xe4\xa7\x9b\xba\xf5\x41\xbd\xad\x61\x3c\xbd\xa9\xba\xd7\x3e\
\x54\x7d\x84\x62\xd1\x07\x27\x4f\x89\x01\x9c\x2a\xad\xd9\xe7\xc7\
\x96\x86\x30\x63\x3a\x56\x06\x37\xd8\x4d\x70\x5b\xb6\x1b\xf3\xfb\
\xb9\x76\x3a\x5c\x42\x07\x9e\xee\x01\x5f\xd2\x49\x30\x16\x09\x80\
\x5f\xd1\x01\x93\x10\x19\x40\x51\x27\x3f\x81\x87\xdd\xf7\x31\xb6\
\xce\xab\xf6\x05\x30\x0a\x78\x4e\x35\x95\x37\x51\x5a\xed\xc0\x68\
\xce\x2e\xf0\xa7\x64\xa5\xed\x54\x99\xd4\x57\x63\xde\x7e\x62\x52\
\xf9\x63\x7e\x01\x10\xbc\xb0\xab\x0e\x5f\xff\xf1\xeb\xf0\x26\x60\
\x02\x9c\x2a\xfd\xe3\x83\x3a\x0c\x59\xb6\x0d\xff\x38\xd6\xd2\x23\
\x52\x5f\x24\xc2\xee\x59\x1f\x51\xf1\xa3\x0f\x4e\x62\xfb\xe1\xae\
\x7d\xc7\x40\x84\xe2\xe9\x4f\x5b\x63\xc0\xef\xe8\xcf\x1a\xe4\x26\
\xa5\x0e\x87\xcc\x83\xa1\x8a\x83\x81\xa8\x1c\xa7\x96\xbe\x5a\x0c\
\xf3\x84\x1f\x8b\x3a\x00\x9a\xd7\x86\x01\xec\x60\x0c\xa3\xe0\x14\
\x40\xbf\x84\xdd\x47\x4f\x7f\xad\x65\xd7\x2d\xb7\x01\x63\x77\x01\
\xb3\x0c\x46\xca\x6f\xaf\x8c\xa8\xdb\x27\xc0\x1f\x4c\x4f\x2d\x55\
\x99\xea\x0f\x4e\xea\x83\x81\x9d\x10\x82\x28\x63\x00\x3a\x1e\x37\
\x1c\xf1\x62\xca\x7d\xeb\xf1\x87\xb5\x9f\x77\xc9\xc3\x55\x1f\x6d\
\xc3\xf5\xbf\xd8\x8e\xaf\xae\xdd\x83\x40\x58\xcc\x69\xe9\x9d\xea\
\xc0\x63\x11\x8a\xfb\x77\x34\xe2\x99\x8f\x9b\x3a\x1c\x85\x37\x11\
\x7a\xbf\x2e\x84\x9f\x7c\xdc\x8c\xdd\x2d\x51\xc7\x08\x88\x0d\x95\
\xc7\xb9\xe4\x12\x68\x09\x26\x7c\x3d\xbf\x5e\xca\x5b\x0a\xa3\x94\
\x57\x6c\x2b\x60\xcc\xaf\xa7\x03\x7f\x25\x12\xcb\x99\xd7\x19\x00\
\x3f\x8d\xd8\x7d\xd0\xe6\xdf\x2b\x85\x31\xbb\x0d\xcf\x80\x66\xc3\
\xb0\xeb\x6b\x18\xa3\xe2\x67\xfb\x29\xe2\xde\xd3\x03\x23\x9f\x60\
\x0b\xba\x37\x7e\xbe\xa2\x07\x98\xcc\xa9\x50\x31\xba\x26\xbd\x57\
\x6f\x2b\x3a\x04\xff\xa1\xa7\xbf\xbc\x42\x25\x46\xe9\xa8\xc9\x07\
\xc0\x49\x7d\x95\x18\x8e\x40\x10\xe0\x44\x28\x82\x1f\xfe\x75\x07\
\x2e\xfa\x61\x29\xfe\x58\xba\x0f\x5e\x2e\xc7\x3d\x51\x5a\xbf\xed\
\x08\xae\xff\xd9\xfb\x18\xf5\x8b\x6d\x58\x5f\xdf\x6c\x45\x43\x0f\
\xe2\x5e\x8f\xc4\xc7\x86\x1f\x63\xf7\x7e\x70\xbf\x17\xb9\x7f\xae\
\xc2\xc3\x09\x8e\xc7\xcf\x93\xaf\x5d\xc5\xab\x7b\x5b\x90\xf3\xd7\
\xc3\xf8\xef\x3d\xcd\x68\x8d\x3a\x0d\x2d\x66\xbf\xf9\x99\xc9\xe9\
\xa5\x1d\xdc\xa2\x8c\x81\x67\x21\xcc\xce\xba\xeb\x61\x94\xf2\xda\
\x25\xbb\xe8\x89\x33\xb5\xd0\x8a\x4e\x4a\x4e\xe1\xb5\xbc\xec\xf8\
\x39\x30\x4f\x91\x75\x3d\x8c\x3c\xf8\x26\x18\x31\x10\x3d\xef\x9d\
\x9f\xd7\x6f\x0b\x8c\xd9\x7e\xbc\xdc\x35\x4a\x70\xfa\xa9\xbc\x89\
\x52\x29\xf7\xcd\xe6\xa1\xf7\x66\xf8\xd1\x69\x04\x8c\xc9\x46\xbb\
\xa2\x15\x75\x08\x7e\x00\x48\x1a\xec\xa9\x37\x81\x5c\x77\xfa\x09\
\x52\x9f\x12\xa2\x45\x05\x74\x90\x10\x60\x5f\x4b\x10\x77\xfd\xbd\
\x1c\x23\xee\x2d\xc5\xed\x4f\x94\x61\xf9\x0b\xe5\xd8\xfc\x61\x1d\
\x36\x7f\x54\x87\xea\xc3\x5a\xe2\xcc\x8e\xcf\x1b\x51\xf6\xef\x7a\
\xac\x7f\xf7\x10\x96\xfd\x69\x0f\x8a\x7f\xb9\x1d\xa4\xe4\x0d\x5c\
\xff\xb7\x5d\x58\x5f\xdf\x02\x23\x2c\x48\x2c\xea\xfe\xaa\xcf\x4f\
\xe0\xba\xff\xdb\x81\xeb\x7e\xb3\x13\xd7\x3d\xf7\x29\xae\xfd\xdd\
\x2e\x5c\xf3\xfb\xdd\xb8\x7a\xd5\x1e\x5c\xfd\xc2\x5e\x5c\xfd\xe2\
\x67\xb8\x76\xcd\xe7\x58\xf0\x97\x4a\xdc\xf5\xb7\x03\xb8\xfb\xef\
\xd5\xb8\xa7\xb4\x06\xf7\xae\xab\xc5\x3d\xeb\x0f\xe2\x9e\x7f\x1e\
\xc2\x3d\x6f\x1c\xc1\xbd\x6f\x1e\xc1\x3d\x1b\x8e\xe2\xde\xb7\xea\
\x71\xef\x3b\xc7\x70\xdf\x3b\xc7\xb0\xe8\x9d\xe3\xf8\xb4\xd5\x0a\
\x68\x3d\xe2\x71\x3c\xa4\x22\xd4\x1a\x82\x1a\x56\xb1\x94\x8d\xc7\
\x7f\xee\x2b\xb5\x78\x64\xd3\x71\xfc\xe5\x53\x1f\xde\xab\xd5\x8a\
\x84\x00\xa0\xb6\x29\x8c\xf7\x6a\x03\x78\xaf\x36\x80\x5f\x6c\x6f\
\xc4\xb7\x37\xd4\xc3\xf3\xca\x41\xdc\xfc\x9f\x06\x78\x03\x11\x6c\
\xf6\xda\x9b\x10\x8a\x65\x3d\xf6\xfe\xeb\x4e\xa1\xe3\xac\x86\x51\
\xb4\x23\x4e\x8a\x69\xe1\x49\xec\xda\x0b\x91\x78\xd1\x89\x13\xe3\
\x99\xcd\xdd\xb3\xb6\x83\xe3\x6b\xd9\x71\x93\xd9\x79\x65\x36\xc7\
\x14\x31\xc6\xb0\x06\x9d\x9f\x0d\xe8\x54\xa8\x04\x46\xb1\xd0\x6c\
\x6e\x7b\x0d\x8c\x09\x2f\xbd\xdd\xfc\x0c\x5b\xba\xa9\x95\x27\x04\
\x7e\x35\x2b\xfd\x75\x0d\xf8\x88\x01\x9f\xb0\x75\x35\xa6\x09\x90\
\x98\xd4\x8f\xc5\xa2\x19\x58\x09\x03\xee\xdf\xab\x4f\x62\xe9\xb6\
\x2a\xdc\xf8\xfb\x0f\x70\xe3\xef\xb7\x63\xd2\xff\xbc\x83\xcc\xef\
\x97\x62\xfa\xca\x6d\xf8\xd2\x0b\x1f\xe3\x86\x57\x77\xe2\xa1\xed\
\x35\x58\x53\xdb\x68\x96\x7a\x16\x3d\xd8\x88\x77\x57\xfa\x43\xf8\
\xe7\xf1\x56\xad\x1d\x6b\xc5\x1b\xc7\x5a\xf1\xaf\x13\xad\x78\xfb\
\x44\x2b\xde\x3e\xd9\x86\x8f\x9b\xdb\x51\xd9\x16\xc2\x07\xde\x00\
\xde\x6a\x08\xe0\x5f\x0d\x01\x6c\x68\x08\xe2\xcd\xc6\x20\x36\x34\
\x05\xb1\xa1\xa9\x1d\x1b\x1a\x03\x78\xb3\xa9\x1d\x6f\x36\xb5\x6b\
\xeb\x4d\xed\xd8\xe0\x0d\x61\x83\xaf\x1d\x47\x23\xaa\xe5\xe3\xe8\
\x93\x85\x57\x34\x05\x2c\x03\x95\x54\xfa\xda\xf1\x54\x95\x0f\xdf\
\xfc\xf0\x38\x2e\x7f\xbb\x0e\x97\xbf\x53\x07\xf2\x7c\x25\x0a\xd6\
\x1e\xc4\xe5\x1b\xeb\x70\xf9\xa6\xa3\xf8\xc9\xe7\x5e\xfc\xb9\x3e\
\x60\x9e\xda\x3c\x18\xc5\xe1\x60\xd4\x59\x99\xb1\xfa\x38\x3a\x03\
\xca\x72\x18\x93\x62\xea\x49\x2d\x7c\x0e\x7a\x0e\x0c\xd5\x7a\x75\
\x17\x75\x5e\xfd\x9e\x05\x30\x26\xaa\x14\x1b\x61\xfb\x4b\x10\xdf\
\x94\x99\x07\xc3\x4c\xe8\x09\x2a\x85\x35\xf7\x5f\x67\xa6\xfa\xf6\
\xf2\x6e\x7e\x86\xd9\xdd\xd4\x4a\x12\x02\x7f\x72\x46\xea\x13\x51\
\x85\x80\x2a\x0c\xe8\x00\xa2\x04\x31\xf5\x9f\xf0\xb9\x00\x2c\x04\
\xa8\x98\xa6\x98\x12\x96\xb9\x51\x68\x29\xe7\x3c\x34\xf6\xb3\x05\
\xdb\xf1\x2b\xe2\x0c\x5f\xc5\x31\x9f\xdc\x14\x37\xc6\xa4\x25\x61\
\xa0\x5b\x0f\x53\x9a\x9d\x94\x3c\x51\x68\x1a\x8b\x62\xb3\x4f\x11\
\x40\xa7\xab\xfb\xe5\x2d\xed\x40\x38\x6a\x06\xb0\x38\x62\x11\xa5\
\xd6\xd9\x85\x4c\xc3\x98\xc1\x34\x87\xc1\xc6\xc6\x80\xe9\xbd\x94\
\xf8\xf8\xef\x0c\xf8\x45\xf2\xc2\x6c\x07\x76\xb7\x14\x03\xec\xed\
\x4f\x49\xbd\x48\x71\xc1\x5f\xf1\xd3\x2b\x6a\xfb\xe5\x67\xd5\xeb\
\x12\x5f\x67\x02\xaa\xee\xf8\xe3\x24\x3f\x15\xe6\x94\xd3\xa5\xbe\
\xee\x29\xe7\xe7\x9c\xd0\x22\x08\x06\xc8\x4d\xc0\x13\xd5\x7c\x27\
\x91\x48\xb8\x51\x6c\x09\x41\x5e\x8a\x0b\x63\xd3\xdc\x18\xe0\x56\
\x38\xd0\x5b\x4f\xd6\x98\x16\x89\x65\xca\xc6\xfb\x28\xfa\x73\xe9\
\x76\x7f\x73\x54\x45\xb0\x85\x4b\xf6\xb1\x05\xbb\xb8\x8d\xda\xcc\
\x0e\x44\xcd\x4c\xa0\x3d\x82\x9d\xcd\xa1\xb8\x91\x0c\xf6\x2c\xeb\
\x56\x4c\x49\xef\x09\xa0\x4a\x3a\xdb\xc1\x0f\x00\xa1\xac\x8c\xd7\
\x75\xcf\xbe\x15\xf8\x30\xab\xfb\x31\xa0\x13\xeb\x24\x93\x4c\x33\
\x88\xea\xe7\x81\x33\x1d\x60\x1e\xd1\x56\x3c\x0f\x44\xe1\x06\xaf\
\xe4\x18\x83\x42\x30\x30\xd9\x85\x71\x69\x6e\xe4\xba\x95\x58\xe5\
\x21\x0f\xfa\x58\x6e\x82\x9e\x1a\x6b\xa9\x56\x84\x29\x83\xd1\x65\
\xe1\x31\xc6\xf1\x9f\x37\xf8\xb5\x31\xf9\xc4\x49\x48\x20\x82\xdc\
\x81\x01\xf0\xb3\x14\x99\xb6\x03\x9f\x34\x05\x6c\xff\x10\x81\x1d\
\x94\xca\x2e\x2b\xa9\xc7\xc0\x7f\xf0\x17\xd7\x7c\x47\x4d\x76\x53\
\x3b\xe0\x13\x18\xa0\x56\x78\x46\xc0\x80\x4b\x62\x5a\x00\x5b\x87\
\x96\x33\x10\x25\x4a\x2c\x4a\xa0\x9a\x86\xa4\x23\xd6\x1a\x7b\xd1\
\xf6\x67\xd7\x1a\x98\xe2\xc2\xb8\xb4\x24\x0e\xf4\xc4\x54\x83\x60\
\xb8\x07\x74\xc0\x73\x0c\x41\x00\x3d\x75\xf8\x18\xbc\xa7\xbf\xd2\
\x1f\x06\x6d\x8f\xc0\x24\xb5\x79\x06\x20\x4a\x79\xbb\x7d\xb6\xcc\
\x82\xfd\x46\x54\x7c\xea\x6b\x37\x69\x41\x02\xf0\x7d\x2b\xa7\x66\
\xac\x96\x5d\x56\x52\x8f\x81\x1f\x00\xd2\x87\xe4\xbc\x1b\x8d\xc5\
\xfb\x0d\xe0\xeb\xcb\x0a\x27\xfd\x63\x80\x87\xae\xf6\x23\x96\x27\
\x10\x15\xd4\x5a\x95\x08\xaa\x39\x3f\x23\x8d\xdd\x3c\x77\x0a\x41\
\x5e\xb2\x0b\xe7\xa5\xb9\xd1\xdf\xa5\x30\x90\x13\xee\x14\xdd\xd9\
\x08\x93\xbd\x2f\x82\x1e\x36\xa0\x57\x88\x51\xc8\x24\x7e\x98\xa0\
\x4a\xd1\xc4\x24\xb3\x69\xd2\xd0\x78\x0c\xc0\x6e\x7e\x40\xea\xc0\
\x10\xd8\x39\xff\x66\xf7\xb0\xcb\xee\x53\x7a\x69\x0e\x77\x49\x67\
\x39\xf8\x33\xb3\x33\x16\x10\x0e\x1c\x66\xe0\x13\x01\xf8\x10\x24\
\xb6\x21\x75\x55\x02\x50\x26\xf5\xb5\x5a\x01\x18\x65\xaa\xb6\x52\
\x9f\x6d\x57\x14\xf4\x4f\x52\x70\x5e\xaa\x1b\x79\x6e\xa6\xb8\x13\
\x23\xb4\x68\x48\x71\x98\xec\xfd\x58\x4e\x02\xdf\x18\x5b\x72\x11\
\x2d\x7c\x96\x88\xb3\xaf\xa2\xd1\x6f\x0c\xcb\xc5\x83\x9b\xc6\x61\
\x00\x80\xa0\xfe\xdb\x1c\x2f\xe0\x1f\x61\x4d\xfa\xf3\x3c\x8f\x23\
\x09\x7e\x49\x3d\x0f\xfe\xbd\x8f\xcd\xaa\xcd\x18\x94\x53\x45\x39\
\xc7\x1f\x55\x88\x03\xf0\x45\x75\x5f\x6b\x51\x26\x6d\xa3\x0c\x6c\
\x51\xde\xd6\x07\x8c\x7a\x75\xde\xb6\x57\x14\xe4\xa7\xb8\x70\x6e\
\xaa\x1b\x03\x62\x43\x59\x89\x8e\x46\xb3\x14\xd7\x41\x6f\x68\x02\
\x66\x7b\x9f\x9f\x64\x47\xb4\xf7\xf9\x0f\xa2\x33\x84\xfa\x40\x18\
\xa1\xd6\x76\x0d\xfc\xaa\xaa\x8d\xc1\x45\x29\xfb\x55\xb5\xa6\x52\
\xab\x54\x77\x9c\x60\x94\x67\x1e\x3c\x03\xd0\xd6\x3f\x6a\x0c\x18\
\x7f\x0a\x7b\x58\x05\x64\xcd\xca\xa9\x19\x5e\xd9\x5d\x25\xf5\x38\
\xf8\x01\x20\x77\x40\x56\xb1\x21\xfd\x99\x8d\xcf\x26\x8f\xb0\x00\
\xdf\xa4\xee\x1b\x4e\x3e\x5d\xcd\x57\x4d\xce\x42\xc0\x62\xd8\x2b\
\x9a\x4d\x5f\x98\xea\x46\x96\x5b\xd1\x6e\x03\xb3\x1d\xaf\x4b\x7f\
\x5e\xe5\x27\xec\xb9\x08\xe1\x9d\x7e\xfa\x3c\xc0\x06\xa0\x9d\xec\
\x7d\xd1\xd9\x17\x52\x29\x8e\x1e\x6f\xd6\x40\x4f\x99\xe4\x57\x55\
\x63\x5d\x15\x98\x80\x45\xd2\xdb\x7c\x48\x51\x33\x30\x99\x10\x14\
\x08\xab\xd8\xe9\x33\x22\x0a\x0c\xff\x4b\x64\x57\x95\xd4\x6b\xe0\
\xdf\xfd\xe8\xcc\x6d\x99\xc3\xf2\xb6\x18\xce\x3d\x07\x89\x0f\x02\
\xa2\x68\xa0\xd3\x33\x03\x4d\x4e\x3e\xa2\x17\x0a\xd9\x48\x7d\x97\
\x82\xdc\x14\x17\x46\xa5\xba\xd1\xcf\xa5\x18\xe0\xe5\xb4\x01\x95\
\x57\xef\x4d\xd2\xdf\x46\xe5\x87\x59\xe5\xd7\xd1\xc4\xc7\xf7\xf5\
\x0c\x66\xd1\xde\x77\x01\x38\xe8\xf5\xb3\x61\xb9\x18\xc8\x55\x6e\
\x44\x5e\x1d\xf4\xba\x36\xa0\x33\x05\xd5\xe1\x78\x91\x01\x88\xce\
\x08\x6e\xfb\xf6\x06\x7f\x8c\x6d\x01\x58\xf9\xab\x0b\x33\x6a\x64\
\x57\x95\xd4\x6b\xe0\x07\x80\x7c\x4f\xc6\x82\xb4\x64\x37\x8d\x07\
\x7c\xd1\x6b\xaf\x12\x9d\x11\x68\x00\x8c\x58\xd4\x7c\x6d\xa4\x9a\
\x9c\x64\x17\x46\x26\xbb\xe0\xe1\x1d\x79\x0a\x89\x49\x7d\x05\xbc\
\xe4\xe6\xbc\xfb\xba\x6b\x91\x3d\x83\x49\x3b\x20\xa2\xca\x6f\xc4\
\xf7\x75\xa6\x21\x7e\x0c\xdd\xde\x6f\x08\x46\xd0\xda\xd0\x66\x05\
\xbc\x4a\x0d\xfb\x3f\x66\x02\x70\x5a\x00\xaf\x19\x50\x36\x5d\x97\
\xca\x1d\x6b\x49\xf6\xe1\xb4\x00\x7d\x7c\xf3\xb0\x8a\x4f\xb5\xc1\
\x3e\x7d\x90\x52\x5f\x52\x5f\x00\xff\x8e\x87\x2f\xab\xcd\x1c\x92\
\xbb\x2e\x1e\xf0\x09\x57\x05\xa8\xdb\xf5\x11\xc5\xac\xee\xc7\x40\
\xa7\x28\xc8\x4e\x76\x61\x78\x8a\x1b\xd9\x2e\x23\x96\x6f\x80\x92\
\x98\xb6\xe9\xaa\xbe\x49\x19\x50\x38\xfb\x1f\x44\xf0\xf2\x9b\x55\
\x7e\xbb\x10\x1f\x05\xb1\xa8\xfc\x94\x00\x87\xeb\xbd\x82\xaa\xcf\
\x01\x9d\xf2\x0c\x80\x63\x0e\x54\x3c\x4e\x35\xb4\x03\xaa\x5a\x1d\
\x7f\x54\x74\x0c\x1a\xcf\xf0\xfe\x49\x3f\x00\x2c\xf9\xdf\x0b\x33\
\xbd\xb2\x9b\x4a\xea\x75\xf0\x03\x40\xf5\x93\x57\xde\x90\x9c\x95\
\x1e\xd0\x40\xaf\x98\x54\x7d\xc2\x4d\x26\x29\xda\xf9\x51\x85\x30\
\x6f\x3f\x01\x71\x69\xa0\x1f\x96\xe2\x36\x4b\x7a\xce\x76\xb7\x93\
\xfa\x31\x47\x1f\x8c\xe4\x20\x23\xc3\x96\xbb\x9f\x2e\xd9\x15\x62\
\xf6\xf2\x33\x2d\xa0\x23\x95\xff\x70\x43\x2b\x68\x30\x6c\x63\xe3\
\x0b\x00\xe6\x27\xe6\xb0\x63\x10\xbc\xb4\x37\x69\x07\x76\xa6\x80\
\xc0\x14\x5c\x24\xf0\xeb\x8b\x32\x57\xc8\x2e\x2a\xa9\xcf\x80\x1f\
\x00\x86\x0e\xc9\x5d\x48\x38\xef\xbc\x6e\xe3\xeb\xc0\x8f\xc4\x24\
\xbf\x66\xe7\xeb\xc0\x57\x14\x05\xb9\xc9\x2e\x0c\x4d\x76\x21\x3b\
\x06\x7a\xc3\x2b\x4f\x4d\xb1\xfd\x8e\xa5\xbe\x7e\x2e\x14\x03\xcc\
\x44\x07\x79\xcc\xae\x37\xbc\xfc\xa2\xf4\x17\x3f\x84\x42\x08\x5a\
\x42\x51\x34\x9d\x68\x36\x83\xd5\xa2\xe2\x8b\xa0\x17\xc1\x0e\xc1\
\xe6\xe7\x7f\x05\x6d\x40\xb5\x61\x02\x00\xe6\x66\xa7\x2c\x94\xdd\
\x53\x52\x9f\x03\x7f\xf9\xc3\x97\xbd\x92\x7d\x4e\xff\x2d\x7c\xf6\
\x9e\x2e\x65\x45\xe0\x53\x02\xb8\x98\x4d\x3f\x24\xc5\x85\x4c\x97\
\x12\x93\xee\x94\x1b\x95\x56\x0f\x1d\xea\x1a\x80\xe8\xe1\x87\xa2\
\xb0\x61\xc3\x88\x39\x6a\x00\x98\x35\x01\xdd\xa1\x48\x60\x5a\x8e\
\x25\xf6\x98\xb2\x01\xad\x2a\xff\xa1\x23\x0d\x9a\x93\x4f\xa5\x66\
\x80\x52\x07\x67\x1e\x6f\xeb\x53\x1b\xa7\x5f\xd4\x46\xe2\xab\x7c\
\xd8\x90\x0b\x15\x32\xe0\x8f\xf2\xa4\x6c\x79\xfb\x9a\x81\xaf\xc8\
\xee\x29\xa9\xcf\x81\x1f\x00\x6a\x9f\x98\x33\x3b\x92\x93\xe9\x05\
\x51\x62\x52\x34\xc2\xec\xef\x88\xa2\x01\x5f\x71\x11\xf4\x4b\x71\
\x23\x3f\xc5\x85\x0c\x06\xfa\x58\x96\xa0\x00\x76\x5e\xdd\x77\xf2\
\xf0\xab\x82\xad\x2f\x4a\x7d\xdd\xff\xa0\x70\x2a\xbe\xe8\xe8\x03\
\x17\x0e\xd4\xb5\x0d\x5d\xe5\x3f\xe6\xf3\x23\xd2\x16\xe4\xc2\x6f\
\x76\x92\x9f\x03\xac\xaa\x3a\x3b\xfc\x4c\xe1\x41\x01\xf8\x3a\xd0\
\x79\x06\xc0\x34\x06\x25\xcd\xed\xad\xba\x7e\xc8\x6c\xd9\x35\x25\
\xf5\x59\xf0\x03\xc0\xd8\x21\xb9\x45\xd1\x24\x17\x55\x89\xe6\xd4\
\x03\x73\xee\xb9\x14\x05\xd9\x29\x2e\x0c\x48\x71\x23\xc3\x65\x4e\
\xfb\xa5\x31\x50\x82\x93\xf2\x24\x26\x9d\x01\x6d\x78\x6a\x85\x73\
\x2a\x12\x21\x75\x58\xb4\xf5\x79\xa9\x6f\x9b\xf1\x27\x26\x04\x09\
\x85\x3c\x0a\x80\x88\x4a\x71\xe2\xf0\x49\xc1\x63\x6f\x27\xa9\x45\
\x06\x20\x66\xfc\x89\xea\xbf\x6a\xe3\x38\x14\x19\x00\x6b\x84\xd2\
\x2f\x79\x52\x8a\x64\xb7\x94\xd4\xe7\xc1\xff\x9f\x1f\x5d\x54\x5b\
\x58\x30\xf0\x47\x7a\xda\x2e\xdc\x0a\xb2\x92\x5d\xc8\x4b\x71\x21\
\xcd\xa5\x98\xa4\x31\x0f\x7c\x3e\x14\x17\x4b\x1a\x82\x91\xa4\xc3\
\xab\xfb\xfc\xf0\x61\xb1\xe4\x20\x4e\xea\x13\xbe\x70\x88\x6b\x0a\
\xbf\xae\x33\x1a\x18\x05\x48\x0a\x31\x57\xec\x1d\x3a\xdc\x00\x44\
\xa2\x42\x58\x4f\x75\x50\xd7\x05\x53\xc0\x14\xeb\x77\xf0\x05\x88\
\xcc\xc3\x86\x99\x4c\x1b\x90\xf6\xa3\x37\xae\xc9\xaf\x3d\x83\xfb\
\x93\xa4\xb3\x05\xfc\x00\xf0\xc9\x03\x97\xae\x28\x18\x96\xf7\x6c\
\x66\xb2\x0b\xfd\x93\xdd\x48\x75\x29\x5c\xb9\xaf\x33\xf0\xcd\xdb\
\x38\xef\xbe\xa0\xee\x9b\x8b\x7d\x10\xcb\xea\xd3\x8b\x87\x78\xa6\
\xa0\x92\x0e\xa4\xbe\x4d\x3a\x2f\x21\x40\x43\x4b\x00\x81\xc6\x16\
\xfb\x44\x1e\x93\x33\x0f\x82\x07\x9f\x67\x00\xd1\xf8\x4e\x42\xea\
\xe4\x30\xd4\x7e\x2f\x18\x90\xfe\xfc\xfb\x5f\x3d\x67\x85\xec\x92\
\x92\xce\x18\xf0\x03\xc0\xee\x47\x2e\xbb\x37\xcd\xad\xac\xa4\x5c\
\xe6\x1f\xed\x00\xf8\x44\x11\x81\x6f\x5f\x27\x80\xd8\xd0\x61\x86\
\x93\x8f\x1f\x42\x4c\xf7\xf0\x23\xa6\x3d\x28\x16\xa9\xaf\x87\xf7\
\x78\xdb\x9f\x10\x02\x17\x80\x70\x44\xc5\xb1\x9a\xe3\x82\xda\xae\
\x5a\xc3\x74\x54\x48\xe3\x55\x85\xd0\x5c\x4c\x8d\x8f\xda\xc4\xfb\
\x61\x63\x3a\x18\x0c\x60\x68\x76\x6a\xf9\xce\xf9\xc3\xbf\x23\xbb\
\xa3\xa4\x33\x0e\xfc\x00\x50\xf3\xf4\x97\x4b\xa0\x90\x35\x44\x04\
\xb9\x03\xf0\xed\x1c\x7c\x0a\x5f\x0c\xa4\x28\x5c\x7a\x30\xb1\x75\
\xf2\xe9\xf6\x01\x11\xe2\xfa\x26\x0d\xc0\x14\xfb\x37\x46\x1b\x06\
\xb4\x63\x8e\x1f\xf7\x02\xa1\x88\xd5\x23\x6f\x17\xd7\x57\xed\x24\
\xbe\x68\xcf\xc3\x9a\xea\x4b\x55\x1b\x8d\x41\xdb\xde\x3f\x2b\xa5\
\xea\xd0\xb7\x47\x4d\x96\x5d\x51\xd2\x19\x0b\x7e\x00\x38\xf8\xf3\
\x6b\x8a\xa1\x90\x35\x2a\x27\x79\xa9\xc2\xc0\xae\x30\x89\xac\x98\
\x6d\x73\xb3\x83\x0f\x6c\x66\x20\x63\xb4\x60\x22\x8c\x16\x04\x9b\
\x70\x5f\xec\x7e\x5c\x8c\x1f\x82\xad\x6f\x27\xf5\x5b\xdb\x82\xf0\
\x1d\x69\x30\x67\xe3\x89\x99\x78\x3c\xb8\xa9\xe8\x9d\x8f\xd3\x54\
\x07\x2d\x82\xdb\x9f\x97\x95\x56\x75\x72\xc1\xb9\x85\xb2\x1b\x4a\
\x3a\xe3\xc1\x0f\x00\xb5\xcb\xaf\x2e\x86\xa2\xac\x21\x8a\x62\xf2\
\xda\x13\x02\xa8\xa2\xc4\x27\x66\xe0\x53\x62\x67\xe7\x6b\xcd\x08\
\xf7\x39\x3b\xf9\x88\x62\x48\x7a\x70\xa6\x01\xff\xab\x13\x25\xc0\
\xe1\x03\xc7\x6c\xa4\x71\x1c\xb5\x5f\xb5\xf3\x05\x08\xd2\xdf\x16\
\xf8\xaa\x50\x04\x44\x31\xc8\x93\x56\x75\x7c\xe1\x18\x09\x7c\x49\
\x5f\x1c\xf0\x03\xc0\xc1\xa5\x73\x8b\x09\xc1\x1a\x1e\xa8\xa2\x73\
\x4f\x94\xf8\x94\x58\x52\xf7\x4c\x00\x86\x83\xba\x2f\x3a\xf9\x08\
\x67\x3a\xf0\x71\x7d\xde\xc3\xef\x02\x70\xbc\xae\x11\xf0\x07\xad\
\xd2\x5a\xb5\xf1\xc6\x5b\x34\x00\x6a\xef\xd8\xb3\xe4\xfa\xc3\xa6\
\xb8\x47\xc5\x10\x4f\x5a\xd5\xd1\x3b\xc6\x49\xe0\x4b\xfa\xe2\x81\
\x1f\x00\xaa\x9f\xbc\xb2\x58\x21\x64\xa1\x49\xe5\x4f\x14\xf8\x8a\
\xa1\xce\xeb\x19\x39\xc4\xc1\xf1\x67\xeb\xe4\x63\x8c\x46\x15\x4c\
\x04\x9d\xda\xdb\xc3\x68\x8c\xc5\xf4\xa9\x83\x3d\x2e\x86\xfa\x10\
\xff\xf8\x78\x61\x3e\x18\x36\xfe\x84\x41\xd9\xcf\x1f\xb9\x73\xbc\
\x04\xbe\xa4\x2f\x2e\xf8\x01\xe0\xc0\x13\x73\x56\x13\x42\xe6\x10\
\x42\x7c\x44\x51\x62\x5e\xfd\x44\x80\x0f\x0e\xf8\xe0\x1c\x7f\x10\
\xa3\x02\xcc\xa7\x60\xe7\xe4\x23\x31\x1f\x83\x21\xf5\x15\x02\xd4\
\x54\x1c\x01\x22\x11\xb3\x07\xdf\x12\x77\x77\xb0\xf9\xa9\x6a\xaf\
\xfa\xf3\x49\x3b\x76\xe6\x82\x42\xe8\x85\x43\x73\x17\xed\x2a\x1e\
\x2b\xbd\xfa\x92\xbe\xf8\xe0\x07\x80\xca\x25\xb3\xca\x40\x48\x11\
\x21\x64\xa7\x42\xcc\x5e\x7d\xe2\x08\x7c\x23\x5d\x17\xbc\x43\x4f\
\xdf\xc6\x00\x4d\xf5\x72\x5e\x98\x2b\x0a\x45\x27\x1f\x9f\xc3\xdf\
\x54\xdf\x04\xb5\xd9\xcf\x79\xe5\xed\x0a\x71\x44\x13\x80\x0a\x1a\
\x80\x2a\xa4\xf8\xc2\xde\x2f\xc0\xae\x95\x95\x96\x14\x98\x36\x34\
\x67\xe6\xc7\x0b\xc6\xae\x90\x5d\x4e\xd2\x59\x03\x7e\x00\xa8\x78\
\x74\x46\xcd\xfe\xff\x77\x79\x91\x42\xc8\x4a\x45\x50\xe1\xed\x80\
\x6f\x80\xde\x90\xe0\xb1\xb4\x3f\xbe\x34\x18\x82\xc3\x8f\x53\xf7\
\x79\x27\x9f\x9e\xc3\x1f\x8e\xaa\x38\x56\x5d\x6f\xa3\xba\xb3\xd9\
\x76\x2d\x36\xbb\xae\xb2\x27\x60\xe7\xdb\x69\x0e\x2a\xc5\xe8\xfe\
\x99\x1f\xfa\x1e\xb8\x38\xfd\xfd\xdb\xc6\x6e\x93\xdd\x4d\xd2\x59\
\x07\x7e\x9d\xf6\x3e\x72\x59\x09\x21\x64\x0e\x08\x6a\xa9\xa2\x40\
\x6b\x4e\xc0\xd7\x26\xeb\xe0\x81\xaf\x0f\x09\x46\xf4\xb1\x04\xb8\
\xb8\xbf\x9d\xba\x2f\x3a\xf9\x0e\x7e\x76\x88\x9b\x6a\xcb\x2e\xa4\
\x07\x7b\x90\x9b\x42\x75\x6a\x07\xfb\xb4\xeb\xa6\x24\xbb\x22\x53\
\x86\xf7\x5f\x54\x79\xef\xe4\x4b\x65\x37\x93\x74\xd6\x83\x1f\x00\
\xf6\x3c\x34\xad\x6c\xcf\x83\xd3\x0a\x54\x42\x1e\x07\x81\x0f\xb1\
\x01\x3e\x09\x74\x86\xa0\xcd\x0f\x68\x05\x3e\x9f\xc5\xa7\x03\x3f\
\x56\x9b\x2f\x24\x16\x89\xea\xbe\xcf\xdb\x8a\x50\x63\xb3\x90\x8b\
\x6f\xe3\xe1\xb7\xf8\x01\xec\xe2\xfc\xd4\x5c\xf9\xc7\x33\x02\x97\
\x42\xc7\x0c\xcc\xde\x12\x7c\x6c\x46\xd2\x7f\xee\x9c\xb0\x42\x76\
\x31\x49\x12\xfc\x02\x7d\xf6\x93\x8b\x97\x80\x90\x22\x10\xb2\x86\
\x77\xee\xa9\x6c\x5c\x3f\x3b\xe0\x13\xce\xde\x57\x63\xe3\xfe\xf3\
\xc0\x57\x84\x48\x80\xc6\x00\x22\x91\x28\xea\x3e\x3f\x64\x00\x96\
\x81\x96\xd8\x34\x45\xd5\x1a\x61\xcd\x62\xff\x8b\x6a\x3e\xc7\x0c\
\x46\xf7\xcf\xac\xba\x64\x44\xde\xcc\x7d\x8b\xa6\xce\x96\x5d\x4b\
\x92\x04\x7f\x3c\x06\xf0\xe3\x8b\x6a\x3e\xfb\xd1\x85\xc5\x50\xc8\
\xc8\x18\x13\xe8\x00\xf8\x54\xa8\x1b\x10\xed\x7c\xd1\xbb\xef\x02\
\xd0\x54\x75\x14\x08\x86\xa0\xa8\x2a\x5c\x2a\xd5\x1a\xa5\x70\xab\
\xe6\x96\xc4\xf6\xbb\x55\x0a\x37\xd5\x9a\x4b\xa5\x50\x38\xc6\x40\
\x6c\x06\xf4\x18\x95\x9b\x59\x75\x49\xe1\xa0\x19\x95\x0f\x4e\x2b\
\xdc\xfe\xbd\x89\xd2\xb6\x97\x24\xc1\x9f\x28\xed\xbb\x6f\x4a\xcd\
\xbe\x7b\x27\x17\xab\x84\x8c\x24\x84\xac\x89\x07\x7c\x22\x00\xdf\
\x94\xde\xab\x70\xde\x7e\x06\xfc\xb6\x16\x3f\x1a\xea\x4e\xc2\xa5\
\x52\x10\x0a\x28\x0c\xd0\xda\xba\x06\xec\x24\x55\x85\x8b\x52\x28\
\x14\xec\xd7\x00\xbd\x9b\xed\x4f\x8a\x1d\x0f\x28\xcc\xeb\x3f\xbc\
\x7f\xbf\xaa\x4b\x0a\x07\xcd\xa8\x7a\xe4\xb2\xc2\xed\x3f\x98\x24\
\x41\x2f\x49\x82\xbf\xb3\x54\x71\xf7\xa4\x9a\xfd\x77\x5d\x50\x0c\
\x42\x72\x54\x42\x16\x81\x90\x5a\x5b\xe0\x2b\x06\xf0\x89\x62\xae\
\x04\x24\x9c\x9d\x4f\x08\x70\x70\x6f\x2d\x92\x74\x49\xcf\x40\xed\
\xe6\x40\xed\x56\x55\x10\x4a\xe1\x52\x55\xa3\x51\xad\xb9\xb9\xfd\
\xda\xf1\x14\x48\x76\x47\x26\x9c\x93\x5b\x3a\xfd\xdc\xc1\x05\xb5\
\x8f\xce\x28\xdc\x7e\xf7\x64\x09\x7a\x49\x12\xfc\x5d\x45\xfb\xbf\
\x3f\xd1\x5b\xf9\xbd\x09\x2b\x2a\xef\x3c\xbf\x00\x0a\x99\x4c\x09\
\x59\x49\x38\x46\x60\x0f\x7c\x23\x8b\x4f\xf7\xfc\x37\x56\x1c\x41\
\x72\x6b\x30\x26\xc1\xdd\xba\x84\x67\x20\xd7\x99\x80\x0e\xec\xd8\
\xba\xca\xd6\x59\x53\x92\xdd\x91\xbc\xfc\xec\xf2\x89\xa3\xf3\xbf\
\x11\xf9\xe5\x35\x49\x3b\x1f\x9a\x7e\xc3\x7b\xf7\x4e\x91\x83\x6e\
\x48\x92\xe0\xef\x4e\xaa\xb8\xe3\xbc\xf2\xca\xdb\xc7\x95\x54\x2c\
\x1c\x5b\x00\x85\x8c\x24\x84\x2c\x52\x09\xd6\x11\x85\xd4\x8a\xaa\
\x3f\x9f\xc5\x17\xf5\xb7\xa3\xe1\xf0\x49\x4d\xda\xf3\x6a\xbc\x4a\
\xe1\x62\xea\x3f\xef\xc8\xa3\x6c\x9d\xa8\x14\x6a\x4a\x52\x84\xe4\
\x64\xd6\x0f\x1c\x3a\xa0\x74\xcc\x98\x21\x33\xda\x56\x5e\x9b\x54\
\xb3\x64\xd6\xe4\x8f\xee\xbf\x44\x0e\xaa\x29\x49\x82\xbf\x37\x68\
\xff\x6d\xe7\xd6\xec\xbb\xb5\x70\x45\xc5\x2d\x85\xf3\x2a\xbe\x35\
\xaa\x80\x10\x32\x52\x25\x64\x0e\x08\x79\x5c\x21\x58\x43\x08\xd9\
\xe2\x02\x6a\x15\x00\x95\xbb\xab\x91\x1c\x8e\x68\xde\x7b\x4a\xa1\
\x50\x95\xd9\xea\x86\x07\x9f\xa4\x24\x45\x22\x59\x69\xde\xa4\x01\
\x59\x55\xd9\x83\x73\xb6\x14\x8c\x1a\xb4\x68\xf4\x98\x21\x33\x9a\
\x9f\xfd\x4a\x52\xe3\xf2\xab\x06\xef\x7f\x74\xc6\x0d\x1f\xdd\x7f\
\x89\x54\xeb\x25\x49\xf0\xf7\x39\x66\xf0\x8d\x82\x9a\xca\x9b\x47\
\x94\x55\x7e\x7d\xf8\x92\xfd\x37\x0d\x2f\xde\xf7\xb5\x61\xb3\xf7\
\x7e\x6d\x58\xc1\x9e\xf9\xc3\x88\xba\xfc\x2a\xd2\xfa\xdc\x3c\x52\
\x50\x38\x68\xc6\xf0\x82\xfc\x45\x43\x46\x0e\x5a\x34\xe2\xdc\xc1\
\x33\xbc\xab\x6e\x24\xde\x55\x37\x12\xef\xf3\xf3\x49\xd3\xaf\xaf\
\x4b\x6a\xfd\xe5\x97\x73\x4e\x2c\x9b\x5b\x58\xfb\xc4\x9c\xd9\xe5\
\x0f\x4f\x5f\xf1\xef\x07\x2e\x95\x60\x97\x24\xc1\xff\x45\xa0\x5d\
\x0f\x5c\xba\x6d\xf7\xc3\xd3\x57\xec\x7d\x78\xfa\x8a\x9d\x8b\xa7\
\x49\x60\x4b\x92\x74\xb6\x80\x5f\x92\x24\x49\x12\xfc\x92\x24\x49\
\x92\xe0\x97\x24\x49\x92\x04\xbf\x24\x49\x12\xfc\x92\x24\x49\x92\
\xe0\x97\x24\x49\x92\x04\xbf\x24\x49\x92\x24\xf8\x25\x49\x92\xf4\
\x05\xa2\xff\x0f\x62\x20\x19\xf4\x2f\xa5\x49\x2e\x00\x00\x00\x25\
\x74\x45\x58\x74\x64\x61\x74\x65\x3a\x63\x72\x65\x61\x74\x65\x00\
\x32\x30\x31\x37\x2d\x30\x33\x2d\x32\x36\x54\x32\x31\x3a\x30\x35\
\x3a\x30\x35\x2b\x31\x31\x3a\x30\x30\x36\x6e\xa9\x1b\x00\x00\x00\
\x25\x74\x45\x58\x74\x64\x61\x74\x65\x3a\x6d\x6f\x64\x69\x66\x79\
\x00\x32\x30\x31\x37\x2d\x30\x33\x2d\x32\x36\x54\x32\x31\x3a\x30\
\x35\x3a\x30\x34\x2b\x31\x31\x3a\x30\x30\xe1\x44\x1a\x13\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0b\x83\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x28\x00\x00\x00\x28\x08\x06\x00\x00\x00\x8c\xfe\xb8\x6d\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x2e\x23\x00\x00\
\x2e\x23\x01\x78\xa5\x3f\x76\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xe2\x0b\x1e\x00\x33\x3a\x62\x29\xbd\x16\x00\x00\x0b\x10\x49\x44\
\x41\x54\x58\xc3\xed\x98\x7b\x8c\x5d\x57\x75\x87\xbf\xb5\xf6\x3e\
\xf7\x31\xf7\xce\x9d\x19\xcf\xd8\xf1\xcc\x78\xfc\x1a\x3f\xb0\x9d\
\x04\x8c\x13\x12\x12\x4a\x48\x9c\x12\x4a\x13\x09\xe2\xd2\x34\xb4\
\x2a\x12\x69\xa4\x84\x96\x12\x90\x50\xe9\x23\x40\x48\x69\x8b\x48\
\x1f\xa0\x28\x4e\x9d\xb6\x40\xdb\x50\x52\xb5\x51\x4b\x65\xaa\x10\
\x12\x19\x5a\xa7\x20\x3b\x31\xc5\x71\x62\x27\xf1\x6b\x5e\x76\x66\
\x3c\xef\x3b\x73\xef\x3d\xf7\xec\xbd\xfb\xc7\xb9\xf3\xb0\x5d\xbb\
\x49\x09\xfc\xc5\x91\x96\xf6\x3e\xf7\xea\x9c\xf3\x69\xfd\xd6\xda\
\x7b\xad\x0d\x3f\xbb\x7e\xbc\x4b\xfe\xbf\x0f\x16\xff\x60\xbf\xaa\
\x6a\x6b\x08\xac\x51\xa3\x9d\x08\x5d\xd9\xac\x6d\x53\xa3\x38\x2f\
\xe3\x91\xd5\x21\x1b\xe9\xe9\x75\xab\x8b\xc7\x9a\x0b\x76\xe2\x5f\
\xb7\x2f\xf5\x3f\x15\xc0\xc2\x27\xf7\x96\x02\x7a\x75\x10\xb9\x19\
\xd5\x6b\x44\x75\xad\xaa\x36\x89\x91\xac\xaa\xa2\x46\x51\x63\x30\
\x46\x6b\x51\x46\x67\xd7\xaf\x69\x3e\xda\xda\x1c\x3d\x63\x44\xfe\
\xcd\x28\x3f\x50\x91\xe9\x6f\xbc\x73\xc9\x1b\x0f\x58\xbc\x67\x4f\
\x21\xa9\x87\x1b\x83\xca\xdd\xa8\xb9\x16\x63\x8a\x62\x14\x31\x06\
\x55\x45\x8c\xa2\x46\x16\x03\xa2\x46\xe9\xee\xcc\xd3\xbd\x3c\x4f\
\xa4\x32\x6d\x55\xf6\x1a\x91\x9d\x93\x93\xb5\xa7\xbe\x75\x4b\xe7\
\xcc\x1b\x02\x58\xbc\xeb\x09\xf1\x9e\x8d\x5e\xe4\x5e\x1f\xe4\x16\
\x31\xa6\x19\xa3\x60\x2c\x62\x53\x40\x31\x06\x35\x8a\xcc\x01\xea\
\x02\x60\xb1\x18\xb1\x7e\x6d\x91\x5c\xc6\x10\xa9\xa0\x30\xfd\xfc\
\xc1\xe1\xdd\xc3\xa7\xa7\xef\x6f\xca\xe9\x8b\x7d\xf7\x5e\x11\x2e\
\xf6\x7d\x73\xb1\x3f\x73\xb7\xff\xb3\x7a\xe7\x6f\xf2\xce\xef\x0a\
\x9e\xed\x40\x56\x00\x44\x10\x99\x1b\x05\x54\xd3\xb1\x71\xbf\xd8\
\x42\x80\x62\x21\x22\x9b\xd1\xf4\x99\x40\xb6\xaf\x6f\x72\xcb\xcc\
\x54\xf5\xba\xfa\x6c\xed\x98\xef\x7d\xdf\x31\x77\xf0\x1b\x17\x84\
\xd4\x0b\xfd\x91\x79\xff\x3f\xa8\xaf\x27\x37\xfb\x38\xd9\x15\x12\
\x77\x19\xde\x09\xde\x83\xf7\xe0\x16\x8f\x01\xbc\x27\x84\x00\x8b\
\x6c\x6e\xea\x5c\x60\x62\x2a\xc6\x85\x80\xf7\x81\x00\xd8\xc8\x48\
\x80\x4b\xe3\x4a\xbc\x8b\x10\x6e\xce\xdc\xf6\xb8\xbe\x2e\xc0\x25\
\x77\xfc\x8b\xa8\x84\x9b\x70\xfe\x41\x9c\xeb\x39\x0b\xea\x22\x16\
\x42\x68\x18\x04\x1a\x73\x02\xd3\xe5\x84\x38\xf6\xb8\x00\x01\x88\
\x32\x06\x54\xf1\x3e\xf4\x00\x0f\xaa\x91\xf7\xac\xf9\xec\x5e\x79\
\xcd\x80\xd5\xb1\xf2\xe6\x50\x4f\xbe\x88\xf7\x3d\x67\x41\x5c\x00\
\x52\xfc\x9c\x27\xcf\x86\x83\xd4\x8b\x71\xec\x29\xcf\x24\xf8\x10\
\xf0\x80\x18\x25\xa8\x12\x54\xc0\x98\x9e\x80\xfc\xe9\xf0\x89\xb1\
\xcb\x5e\x13\x60\xd3\x2d\x5f\x6d\x76\xb5\xf8\x33\xc1\xb9\xcd\x78\
\x77\x3e\x88\x9b\x03\xf2\x48\x43\xde\x39\x99\x99\xf7\x60\xea\xaa\
\xb9\xb9\xf7\x81\xc9\xe9\x04\xe7\xd3\x79\x36\x67\x09\x46\x41\x0d\
\x58\x4b\x50\xdd\x98\x24\xfe\xd3\xc5\x0f\xef\x2e\x5d\x14\x70\xed\
\xa7\x9e\xc2\xd7\xe2\x9b\x42\x3d\x79\x2f\xce\xcb\xfc\x87\x17\x5b\
\xf0\x0b\x50\xc1\x2f\xb2\xd0\xf8\x6d\x31\xdc\xc2\xbb\x67\x67\x13\
\xaa\xb1\xc7\x13\xb0\x51\x2a\x71\x30\x0a\x91\x85\x28\x92\xa0\xfa\
\x0b\x3e\xf0\xde\x2d\x0f\x3d\x7f\x61\xc0\xd3\xcf\xbd\xd2\x1a\x12\
\xf7\x9b\x38\x5f\xb8\x78\xbc\x85\x79\x59\xa5\x21\xad\xcc\x81\xfa\
\x34\xee\x1a\x02\xcf\x27\x4b\x3d\x09\x0d\x99\xd3\x38\x14\x55\x82\
\xb1\x04\x6b\x53\x48\x6b\x9b\xea\x33\xf1\x47\x5e\xf9\xf7\xe7\x97\
\x5c\x10\xd0\x55\xe3\x6b\x42\x92\x5c\x79\x2e\xcc\x42\x9c\x35\x64\
\x9d\xf3\xd8\xfc\xb8\xe0\x3d\x69\x78\x90\xc6\xc0\x22\xd0\xe9\x69\
\x87\x73\x01\x51\x41\x45\x10\xa3\x10\x45\x84\x28\xc2\x27\xe0\x26\
\x2b\x57\x78\xcf\xcf\x2d\x66\xb2\x73\x93\xd2\xcf\xef\xb4\x71\x25\
\xde\x11\x02\x05\x10\x10\x45\x42\xa0\xbb\x3d\xcf\xe5\xeb\x3a\xe8\
\x5a\x5a\x24\x88\x30\x34\x51\xe3\xd0\xd0\x0c\x43\x53\x75\x14\xd8\
\xd2\x55\xa0\xd4\x9c\x45\x8c\x01\x63\xc0\xa6\xa3\x18\xa5\x1e\x84\
\x17\x47\x63\x92\x74\xc9\xa4\xd5\x0a\x5b\x5b\x23\xda\x3a\x32\x0c\
\x16\x85\x97\x06\x23\x7e\x34\x38\xc3\x99\x31\x8f\x9f\xaa\x12\x30\
\x79\x1f\xe4\xd6\xdc\xaf\xee\xde\x5d\x7d\xf4\x17\x93\xb3\x00\x5d\
\x5c\xef\x08\x89\xbb\x0a\x49\xe1\x32\x59\xe1\x57\x6e\x5c\xc7\x3d\
\x1f\xb8\x9c\x75\xdd\x25\xb2\x51\xba\xa6\x57\xeb\x8e\xa3\xa7\x67\
\x78\x60\xf7\x51\x76\xff\x68\x84\x3f\xda\xb1\x9e\xab\xd7\xb5\x2e\
\x6c\x4a\x8b\x16\x8b\xc1\xa9\x3a\xbf\xf4\x58\x3f\xa3\x55\xcf\x8e\
\x4d\xcd\xdc\xb5\xad\x8d\xde\xb6\x88\x8c\x11\x02\xad\xcc\xc6\x9e\
\x03\x03\x65\xfe\xf0\x1f\x5f\xe2\x3b\x7d\x10\xac\x25\x08\x57\xf9\
\x24\x5c\x02\x0c\x9e\x0d\x58\x8d\xd7\x06\x1f\x56\x62\x0c\x4a\xe0\
\x43\x37\x6d\xe0\x8f\xef\x7e\x3b\x85\x7c\xc4\x89\xd3\x65\x0e\x0f\
\x4c\x91\xcf\x1a\x36\xae\x68\xa1\x77\x79\x81\xee\xb6\x1c\x12\x20\
\x6b\x95\xa6\x8c\xe1\xe8\x48\x85\x57\xa7\xeb\xa9\xab\x44\x40\x85\
\x91\x19\x87\x0b\xf0\xc1\xcb\x5a\xb8\xf7\xba\xa5\x14\x23\x65\xa0\
\x9c\x30\x30\xe3\x70\x71\xc2\xc6\xf6\x2c\xef\x58\x5b\xe2\xaf\xee\
\xdc\xc2\x87\x67\x63\xbe\xf3\xc3\x11\x02\xa1\x3b\xa9\xb9\xd5\xe7\
\x01\x12\xc2\x6a\xbc\x6f\x42\x84\x75\xdd\x2d\x7c\xe2\xf6\xad\x14\
\x9b\x32\x7c\x7b\xdf\x00\x9f\xdc\xb5\x8f\x93\x67\x2a\x98\x8c\x65\
\xc3\xca\x56\xde\xf6\xa6\x65\x3c\xf6\xfd\x21\xbc\x2e\xec\x94\x3b\
\xbf\x37\xc4\xdf\xed\x1b\x81\xc8\x22\xd6\xa4\x81\x6f\x0c\x6b\xdb\
\x73\xfc\xf6\x55\x4b\x28\x46\xca\x53\x27\x67\xf9\x93\xef\x4f\x50\
\x8b\x84\x81\xbe\x49\x36\x64\x3d\x7f\x76\xcb\x4a\xb6\xf4\x14\xf9\
\xbd\x5f\xde\xc8\xb3\xc7\xa6\x18\x2f\xc7\x79\xb1\x66\x0d\xb0\xf7\
\x2c\x40\x21\xac\xc5\x07\x83\x04\xae\xbd\xbc\x93\xd5\xcb\x4b\x8c\
\x4e\x56\xb9\xff\x6b\xcf\xf2\x52\xff\x04\x51\x26\x22\x6f\x84\x93\
\xaf\xce\x70\x72\xb4\x1f\xb5\x06\x5d\x94\x62\x3d\x6d\x59\xde\xd2\
\x53\x48\x63\xb1\x11\x87\x47\x27\x1d\xef\x5a\x5d\xa0\xab\x39\xe2\
\xd5\x99\x84\x2f\xfc\xd7\x18\x2f\x8f\x27\x34\x17\x2c\xe5\xd9\x3a\
\x4f\x1f\x18\xe6\x8b\x3e\x61\xd7\x5d\x97\xb2\xad\xb7\x85\xcd\xab\
\x4a\xec\x7d\x71\xcc\x98\x6c\xb4\xc9\x9d\x9b\x24\x3e\x4e\x56\x80\
\x40\x80\x2d\x6b\xdb\x51\x15\x4e\x9c\x9a\xe6\xe5\xfe\x09\x24\x04\
\xb6\xac\x6a\xe1\x81\xbb\xae\x22\x97\xb5\x20\x42\x2d\xf1\x7c\xe6\
\x9f\x5e\x62\x2e\x57\xef\xbe\xae\x9b\x3b\xdf\xd1\x35\x1f\x83\x3e\
\xc0\xc7\xbe\x35\xc4\x86\xf6\x0c\x2a\xd0\x3f\x95\x70\x72\x22\x01\
\x84\x4a\xd5\xe1\xa6\x62\x40\xd8\x7f\xa2\xcc\x58\x39\x61\x59\x4b\
\x44\xef\xf2\x22\xcf\x1c\x99\x24\xa9\xb9\x8e\xf3\xb2\x18\x1f\x1a\
\x2f\x0f\x24\x2e\x2d\x7e\x23\x2b\x58\x93\x7e\x31\x97\x31\xac\x5a\
\x56\xa0\x90\x8f\x68\x69\x8a\xa8\x25\x9e\x62\xce\xce\x67\x45\xff\
\x58\xf5\xac\x18\xf4\x02\xe3\x55\x87\x6f\xac\x35\x56\x85\x4c\x46\
\xc8\x65\x0d\xc1\x79\x9a\x36\xb6\xa3\x1b\xda\x59\x8a\xc7\x46\x8a\
\x0f\x90\x64\x2c\x5a\xc8\x42\xe2\xf1\xe7\x02\x9a\x6c\x34\x9c\xc4\
\x0e\x10\x0e\xbe\x7c\x06\xe7\x02\x6b\x3a\x4b\x5c\xb1\x71\x19\x4f\
\x1e\x18\xe2\xf9\x13\x13\xdc\x7a\xff\x1e\x96\xb5\x17\xd8\xf9\x91\
\xb7\xd1\xd1\x92\x3d\x2b\x63\x77\x7e\x6f\x90\xbf\xdf\x7f\x06\xac\
\x45\xa2\x74\x0b\xab\xab\x61\x65\x4b\x06\xe7\x03\xbd\x4b\x22\x6e\
\x78\x53\x91\xfd\xc3\x35\x06\xfa\x2b\x58\xab\x6c\xee\x2d\xf1\xbe\
\xd5\x4d\xb4\x35\x59\xa6\x6a\x9e\xd1\x52\x89\xb6\xcd\x0a\xde\x4d\
\x8c\x9c\xbb\x50\x7b\xe7\x5f\x41\xc4\x23\xb0\xf7\xbf\x87\x38\xd2\
\x37\x46\xa9\x90\xe1\x73\x77\x5c\xc9\xf5\x5b\xbb\x89\xac\x32\x38\
\x5a\x21\x6b\x0d\x19\xab\xf3\x2b\xca\x1c\x63\x31\x6b\x69\x2f\x44\
\x74\x14\xd3\xb1\xbd\x10\x71\x49\x73\xc4\x73\x13\x31\xc7\xcb\x09\
\xa5\x8c\xf2\x5b\x97\x96\xd8\x9c\x13\xf2\x40\x51\xe0\xca\x92\xe1\
\xd6\x35\x79\x8c\xc0\xf3\x13\x09\x71\x2e\x4f\xd7\x8a\x25\xbe\xbb\
\xa7\xe3\xf0\xf9\x12\xab\x39\x0e\xa1\x22\xa2\x85\xbe\xe1\x32\x9f\
\xff\xca\x3e\xfe\xfc\xe3\xd7\xf1\xe6\x75\xed\x3c\xfa\xfb\xd7\xf3\
\xca\xd0\x14\x22\x42\x6f\x57\x33\xad\x85\x0c\x43\xe3\x55\xca\x35\
\x37\x4f\xf8\xb1\x1b\x56\x70\xc7\xb5\x9d\xa9\xc4\x8d\xa2\x76\xb8\
\xea\xf8\x9d\x67\xc7\x78\xf8\xf0\x14\x9f\xba\xbc\x95\x0d\x2d\x11\
\x7f\xb1\xfd\x12\x8e\xbd\xb5\x8d\xac\x11\x56\xb5\x66\xc8\x5a\xe1\
\x78\xd9\xf1\xe4\xab\x31\x6b\x3a\x33\xb4\xcf\xfa\xca\x54\xc5\x1f\
\xe5\x7c\x89\x33\x27\xa8\x27\x43\x41\x74\x3d\x08\xdf\xfc\xcf\x13\
\x4c\x55\x1d\x1f\xfd\xc0\x9b\xd9\xb6\x71\x29\x9b\x57\xb5\x02\x30\
\x31\x53\xe7\xbb\x2f\x0c\xf1\xc8\xd3\x7d\x1c\x1a\x9c\x66\xb2\x92\
\x30\x5a\xae\x83\x40\x21\x67\x1a\x95\x75\xfa\xce\x72\x08\x88\xc0\
\x77\x4f\x55\x99\xae\x8c\xb2\xa3\x3b\xcf\xd6\xce\x3c\xeb\x96\x64\
\x00\x38\x33\x9b\xb0\x7f\x38\xe6\x99\x29\xcf\x58\x1c\x88\x8c\xb0\
\xbc\xc5\x0e\x75\xb6\x70\xfc\x89\x73\x7b\x92\x96\x77\xff\x65\x26\
\x2e\x57\xff\x36\x04\x6e\x9b\xdb\xb2\xc4\x58\x8a\xcd\x39\x7a\x7b\
\x5a\x69\x2e\xe6\xc0\x28\x23\x33\x09\xfd\xe3\x35\x6a\x41\xd1\xc8\
\xb2\xba\xb3\x99\x52\x29\x4b\xae\x25\x47\xae\x25\x8b\x64\x2c\xc1\
\x2a\x58\x83\x17\xe5\x74\xc5\x41\x80\x33\x03\x65\x6a\x13\x35\xd6\
\x76\xe4\x68\xc9\x5b\x44\x85\x91\x59\xcf\x78\x02\x5b\x37\xb5\xd1\
\x52\x88\xc8\x18\xc5\xaa\x3c\xa6\xc2\xaf\x7f\x69\x5b\x31\x3e\xaf\
\x69\x6a\x7a\xd7\x43\xb7\x79\xe7\xbf\x86\x6a\x36\xdd\x5b\x1b\x8b\
\xae\x35\x88\xb5\x8d\xbd\x76\x61\x21\xb6\xb9\x88\x7c\x6b\x9e\x7c\
\x5b\x1e\xdb\x14\x35\x4a\x27\x43\x68\x8c\xd6\x28\x91\x0a\xb5\xa9\
\x98\xc1\xa3\x93\x04\x0f\x62\x04\x99\x6f\x4f\x15\x63\x94\xf5\x2b\
\x9b\xd9\xbc\xa6\x44\xc6\x68\xcd\xa8\x7c\xe8\xc1\x2b\x8a\x8f\xfd\
\xaf\xd5\x8c\xc9\x66\xf6\x88\xb5\x87\xe6\x36\x7e\x31\x0a\x46\x41\
\x17\x4c\x54\x30\x19\x4b\xb1\x2d\x47\x7b\x57\x91\xe6\xf6\x3c\x36\
\x6b\x40\xd3\xed\x8d\x86\xc4\x82\x20\x80\xab\x7b\x46\xfa\x26\x71\
\xb5\x3a\xde\x7b\xbc\x0b\x04\xef\xd3\xb9\xf7\x38\x17\x18\x18\xae\
\x30\x53\x71\x00\x87\x20\xec\xb9\x60\xb9\x95\x6d\xce\x0f\x6b\x26\
\xda\x85\x31\xb5\x14\x32\xad\x7a\x45\xd3\x02\xd3\x66\x2d\xcd\x4b\
\xf2\x74\x74\x15\x69\x69\xcf\x63\xb3\x16\x4c\x03\x4c\x15\x44\xd2\
\x32\x5e\x04\xd1\x54\x9e\x89\xc1\x29\x2a\xe3\x15\x70\x0e\x92\x04\
\x5c\x42\x70\x9e\xe0\x52\x58\xef\x3d\x33\x95\x84\x93\xa7\x67\x6a\
\x3e\x84\x47\x08\x0c\x5f\xb0\xed\x9c\x7d\xe1\x71\x72\x9b\x77\x1c\
\x45\xcd\x5b\x51\x5d\x4f\xa3\xe7\xb5\xd9\x88\x62\x5b\x9e\x96\x8e\
\x02\xf9\x52\x0e\xcd\xda\xc6\x76\xa6\x60\x35\x95\x7e\x6e\xde\xb8\
\x57\x55\xea\xd3\x55\xce\xbc\x3c\x8a\x4f\x12\x24\x90\x5a\xa3\x3e\
\x9c\xcf\x25\x11\x82\x08\x33\x55\xf7\x74\x2e\x6b\xee\xfb\xfa\x0d\
\xed\x95\x8b\xf6\x24\xeb\xde\xb9\x69\x42\x33\xd1\x7d\x62\xcd\xf1\
\x28\x1b\x51\x5a\xd2\x44\x47\x57\x89\x52\x7b\x01\x9b\xb3\x67\x49\
\x1e\xe6\xe7\x02\x66\xce\x7b\x8d\x1e\xd9\x79\xc6\x8e\x9e\xc1\xcd\
\xd6\x20\x69\x78\xaf\x61\x92\xb8\xc6\x6f\x0d\x73\xfe\x64\xe2\xc3\
\x7d\x3e\x62\xfc\xff\x6c\xdc\x4f\x3f\xf9\x08\x76\xc3\xad\xa7\x4a\
\x4b\x9a\x86\x5b\x97\x16\xb6\x37\x95\x72\x39\xcd\xd8\x05\x4f\x35\
\x32\x34\x34\x0a\x02\xce\x29\x54\x31\x8a\xaa\x50\xee\x1f\x67\xea\
\xe4\x58\x5a\x85\xcf\x55\xda\x84\x86\x17\xc3\xbc\xf7\x54\x98\xb4\
\x56\x7f\xb7\x9e\x84\xdd\x2f\xfc\x46\x6f\x78\x4d\x27\x0b\xf5\x23\
\x8f\x87\x15\x37\xde\x71\xc4\x64\xa3\x72\xb0\xe6\xed\x58\xcd\xcd\
\xc3\x2d\x82\x5c\x6c\x61\x51\x62\xb9\x72\x8d\xd1\x83\x03\xf8\x5a\
\x7d\xbe\x05\x98\x93\x57\x42\x68\x24\x11\x08\x32\x09\x7c\xda\xa8\
\x7c\x65\xfc\xb3\x5b\x93\xd7\x75\xb2\x70\xf8\x81\x77\xc7\xc1\xe8\
\xc3\x18\xfd\x28\xd6\x9c\x48\xe1\xd2\x18\x0b\x73\x31\x77\x96\xa5\
\x32\xe3\x03\x13\x47\x4e\x53\x9f\xaa\xcc\x4b\x28\xf3\xf2\x3a\x70\
\x0e\x49\x1c\xe2\x5d\xbf\xd5\xf0\x09\xf1\xfe\xe1\xd1\xcf\x6d\x8b\
\x5f\xf7\xd1\x07\xc0\x91\xcf\x5f\x1f\x63\xcd\xd7\x31\xfa\x41\x8c\
\xf9\x36\xd6\xc4\x61\x3e\x31\x16\x2c\xcc\x2f\x41\x4a\x65\x68\x9c\
\xd9\xfe\x33\x88\x6b\x80\xb9\xa4\x01\x99\xde\x8b\x73\x75\x0d\x7e\
\x4f\xa4\xf2\x6b\x21\x4e\xbe\x3a\xf9\xc0\x35\xf1\x8f\x7d\xfc\xb6\
\xf1\x0b\x3f\x00\xa3\xed\xc1\xe8\xed\x18\x73\x27\x56\x37\x61\x4c\
\x84\xd5\xf9\x58\x14\xab\xf8\xd9\x98\xe1\xff\x38\x42\x3c\x31\xd3\
\x78\xb9\x20\xda\x38\x48\x32\x26\x11\xa3\x87\x35\x13\xfd\x4d\xbe\
\x25\xff\x68\xe7\x9a\xf6\xe1\x03\xf7\xbc\xe5\x8d\x3d\xc0\xdc\xf0\
\xe5\x1f\x0a\x46\xbb\xb0\xba\x1d\x63\xde\x1f\xac\xd9\x8a\xd5\x4b\
\x30\x26\xab\x82\x8c\x3f\x77\x9c\xa9\xc3\xa7\xd2\xf6\x33\xcd\x81\
\xaa\xa8\x8e\x88\xd1\x03\x26\x13\x7d\x53\x23\xf3\x44\xae\x98\x1b\
\x3c\xf5\xd0\x7b\xc2\x4f\xf4\x08\x78\xc3\x5f\xbf\x08\xaa\xd9\x60\
\xb4\x07\x6b\xd6\x88\xd1\x4d\x6e\x72\x76\xc3\xe9\x27\x0f\x46\x6e\
\xb6\x86\xaa\x38\x63\xf4\x68\x80\x43\xc6\x9a\x63\x62\xb4\x6f\xfd\
\xd5\xbd\xd5\x7d\x1f\xdf\xf6\xb3\x43\xf7\x9f\xfa\xf5\x3f\xe5\x4a\
\x50\xe4\x07\x90\xdf\x8f\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x10\
\x0a\x8a\xcd\x47\
\x00\x6e\
\x00\x65\x00\x61\x00\x72\x00\x65\x00\x73\x00\x74\x00\x5f\x00\x62\x00\x75\x00\x69\x00\x6c\x00\x64\x00\x69\x00\x6e\x00\x67\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x05\xe0\x84\x67\
\x00\x67\
\x00\x65\x00\x6f\x00\x73\x00\x63\x00\x61\x00\x70\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x0c\xcd\xf0\x47\
\x00\x67\
\x00\x65\x00\x6f\x00\x73\x00\x63\x00\x61\x00\x70\x00\x65\x00\x5f\x00\x69\x00\x63\x00\x6f\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x03\x00\x00\x00\x03\
\x00\x00\x00\x50\x00\x00\x00\x00\x00\x01\x00\x00\x04\x3e\
\x00\x00\x00\x3a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x6e\x00\x00\x00\x00\x00\x01\x00\x00\x37\x4d\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x03\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x50\x00\x00\x00\x00\x00\x01\x00\x00\x04\x3e\
\x00\x00\x01\x66\xe8\x91\x28\x26\
\x00\x00\x00\x3a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x67\x0b\x6e\x20\xd0\
\x00\x00\x00\x6e\x00\x00\x00\x00\x00\x01\x00\x00\x37\x4d\
\x00\x00\x01\x67\x62\x19\xd2\xf4\
"
qt_version = QtCore.qVersion().split('.')
if qt_version < ['5', '8', '0']:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
qInitResources()
| 63.290799 | 121 | 0.726996 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.11.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x04\x3a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x18\x08\x06\x00\x00\x00\x9b\x53\xff\x34\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc2\x00\x00\x0e\xc2\x01\x15\x28\
\x4a\x80\x00\x00\x03\xcf\x49\x44\x41\x54\x48\x4b\xed\x56\x5b\x68\
\x1c\x65\x18\x3d\xff\x5c\xb2\x9b\x6d\xb3\x9b\x64\x77\x33\xcd\x15\
\xa1\x22\xbe\x58\x90\x78\x41\xad\x22\x22\x14\xec\x83\x54\x85\x3c\
\x59\x0a\x52\x28\x3e\x08\x55\xe8\xab\x6f\xd2\x07\x41\xf4\xd9\xbe\
\x09\x5e\x09\x58\x04\x8d\xa8\x01\x51\x8b\xd5\x74\xd3\x98\xcb\xa6\
\x26\xcd\xa6\xcd\x65\x2f\xb3\x6b\x76\x93\xdd\xec\xce\xcc\xce\x78\
\xfe\xd9\x0d\x36\xb0\x95\x9a\xf8\x98\x03\xdf\xcc\xce\xbf\xdf\xfc\
\xdf\x99\xf3\x9d\xfd\x66\x85\x47\x60\x8f\x18\x1d\x1d\x85\x6d\xdb\
\x18\x19\x19\x69\xae\xfc\x77\x28\xcd\xf3\x9e\x90\xc9\x64\x90\xcd\
\x66\x9b\x57\x7b\xc3\xbe\x08\xa8\xaa\x0a\x45\xd9\xd7\x16\xfb\x23\
\xf0\x7f\xe0\x80\xc0\x01\x81\x03\x02\x07\x04\xee\x8d\x80\x63\x63\
\x71\x61\x01\xa9\xd4\x92\x1f\x4b\x37\x6f\xfa\xcb\x72\x0a\x0a\x21\
\xfc\xcf\x7b\xc5\x3d\xbd\x8c\x5e\x7e\xf7\x43\x84\xfb\x87\x00\xcf\
\xe5\xf8\x55\x30\xb9\xb8\x8c\xf7\x5f\x38\x8e\xd9\xab\x3f\x63\xab\
\x5c\xc1\xf9\xf3\x6f\xc0\xae\x03\xba\xca\x64\xd7\x81\x67\x3b\x24\
\xc6\x74\x5e\x0a\x4d\x27\x53\xf9\x45\x6b\xfc\x2b\x81\x5b\xcb\xb7\
\x21\xec\x0d\x3c\xf7\xf5\x12\xee\x1f\xec\x85\xeb\x92\x00\x37\x9c\
\x9e\x98\xc0\x17\x27\x7b\x90\xb8\x3a\x81\x72\xb5\x86\x73\xa7\x5f\
\x42\x89\x44\x96\x53\xeb\xb8\x7c\x3b\x88\x50\x28\xc8\xbb\x3d\xd6\
\x55\x31\xf3\x67\x0a\x1f\x5d\x38\xd7\xd8\xb0\x05\x76\xb5\xc0\xe5\
\xab\x75\x6e\xea\x37\x24\x27\xbf\x45\x32\xf1\x39\x7a\x23\xcb\xe8\
\xef\xab\x00\x75\x01\x85\x3c\x77\xa2\xe6\x00\x87\x23\x21\x04\x02\
\x01\xb4\xb5\x05\x10\xea\x09\xe3\xc8\x60\x37\x7e\xdc\x0e\x61\x3c\
\x38\x80\x1f\xdc\x08\xa3\x0b\x63\xb5\x0e\xcc\x9a\x39\x64\x6f\x5c\
\xc6\xe2\xec\x38\xe6\xe7\x26\x51\xae\x54\x9b\xd5\x1a\xf0\x15\x28\
\x97\x6b\xb8\xf1\xc7\xf7\xb8\x78\x25\x8d\x70\x2c\x46\xa5\x15\x68\
\x9a\x86\x6f\x7e\x49\x60\xfa\xed\xa7\xf0\xec\xa5\x25\x0c\xf6\xc9\
\x75\x17\x2e\xe5\x0c\x66\x16\xf0\xd9\x85\x57\x28\x73\xbf\xbf\x89\
\xe7\xad\x01\xb5\x34\x3e\x18\xbf\x85\x2f\xb7\xfa\xd0\x2e\xc8\x90\
\x3d\xa8\xb3\x2d\xe9\x5f\xbf\x43\xe2\xe2\x49\xc0\x96\x42\xbb\xc8\
\xa4\xf3\x28\x56\xda\x61\x0c\x3d\x82\x48\x47\x08\x5a\xa1\x50\x80\
\xa8\x24\x10\xe8\xf0\x30\x17\x7d\x10\x46\xa0\xd1\x11\xf9\xaa\xfd\
\x4b\x28\x38\x44\x22\xbf\xbf\xf5\x10\x55\x70\xfd\x75\x1f\x5a\x1f\
\x0f\x15\xbc\x7a\xfa\x14\xfd\xc9\x62\x28\xd3\x00\x2c\xc8\xfe\x5b\
\x75\x07\x2a\xea\xfe\x03\x98\x65\x0b\x0f\x74\xb2\xff\x1e\x85\x76\
\x98\xe7\x09\x18\xf1\x38\x0c\xd5\xe3\x7f\x89\x9f\xe0\x58\xc3\x50\
\x8a\xb9\x49\x74\x19\x31\xb8\xfc\x32\xa8\x29\x08\xea\x1a\x02\x7e\
\xe8\x34\x96\x80\x23\x0b\xdb\xac\xe1\xd0\x55\x3b\x51\xe5\xa3\xd5\
\x2a\x38\x7b\xe6\x45\xbc\xfe\xda\x29\xd6\xdf\xc0\x3b\x9f\x24\x91\
\xce\x2b\x78\xac\xba\x8a\x61\xc7\xc4\xc6\x95\x71\x5c\x7a\xb4\x84\
\x4f\xdf\x3c\xc1\x5c\x49\x92\x90\x3f\x18\x69\x39\xaa\x61\x18\x71\
\x94\xcc\xeb\x50\x84\xef\x55\xae\x59\x0e\x56\x57\xd6\x19\x69\xac\
\x31\x16\x53\xab\x78\xbe\x57\x40\xef\x3c\x4c\xe5\x5a\xf8\xd4\xb2\
\xf1\xf4\xe3\xc7\x70\xfc\x99\x61\x5c\x9b\x5f\xc3\xc7\xb5\x01\x24\
\xf4\x18\xa6\x83\x06\xae\x07\x68\xd0\xa2\x83\x63\x47\x7b\xa5\x17\
\x1b\xd1\x02\x82\x2d\x11\xa6\x99\xf3\xea\x5b\xd7\xd0\x33\x60\x70\
\x85\x4c\xef\x2c\xa6\xd1\xcd\x9b\xb5\xe6\xc5\x5d\x40\xd5\xa6\xe6\
\x57\x70\x36\xd9\x83\xb8\xde\x68\x93\x4a\xf9\xc7\xbe\x1a\xc3\xf6\
\x7b\x27\x58\x84\xf2\xfb\x24\xe4\x81\x12\x48\xdb\xf3\x9e\x5c\x36\
\x07\x11\x7a\x18\x4a\x34\x1a\x43\xa0\xeb\x49\xcc\x4c\x65\x50\xca\
\x14\xf9\x64\x4c\xb2\x98\x55\x65\x6c\x59\xff\xdc\xec\x6f\xd0\x1a\
\x96\xe3\xc2\xac\x09\xe4\x79\x9f\xc9\x58\xdf\x16\xb8\x8f\xc2\x89\
\x36\xce\x00\x59\x54\x4a\xcf\xf9\x01\xcd\xc3\xe6\x66\x11\x33\xc9\
\x1c\xf4\xc8\x13\x88\xc5\xe2\xbb\xe7\x40\x2e\x5f\x44\xd1\x5c\xe0\
\x2c\x29\x20\x1a\xd1\x49\xb4\x8e\x48\x67\x98\x1b\x48\x23\x31\x61\
\xe7\x29\xee\x04\xdd\xee\x72\x52\xae\xac\x65\xa0\xc9\x22\x84\xc3\
\x81\x35\x34\x24\x15\x55\x51\xca\x6f\xc0\x76\x55\x14\x8a\x36\x87\
\x52\x37\x22\xd1\xa3\x88\xc7\x3a\xfd\x3c\x89\xbb\x0e\xa2\x52\xa9\
\xcc\xc1\x63\xa1\x90\x4f\xf3\x17\x50\xe1\x94\xab\xb3\x3b\xd2\xc9\
\x74\xa4\x3c\xb3\x7f\x3b\xbd\x15\x74\x92\xa6\x6a\x7e\x41\x21\x74\
\x3f\xaa\x36\x89\x89\x76\x74\x77\x1f\xe1\x40\x6a\x43\x38\x7c\xa8\
\x91\xbc\x0b\xc0\xdf\xcb\x70\xa6\xef\x92\xee\x15\xda\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x33\x0b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\xff\x00\x00\x00\x79\x08\x06\x00\x00\x00\xf6\xe1\xf7\x0f\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x09\xda\x00\x00\
\x09\xda\x01\xb8\x5e\x7a\x24\x00\x00\x32\x49\x49\x44\x41\x54\x78\
\xda\xed\x7d\x79\x78\x54\xd5\xdd\xff\xe7\xdc\x99\xec\x21\x99\x04\
\x42\x00\x59\x02\x44\x40\x04\x09\xe0\x06\xca\xa6\x68\xed\x6b\xad\
\x28\xb5\x76\x51\x09\xda\xda\xc5\x2d\xb4\x56\x5c\xde\x9f\xe2\x6b\
\x15\x68\x6b\x85\xfa\xf6\x6d\x6d\x51\xa1\xb5\xb5\x6a\x5b\x02\xad\
\x4a\x55\x20\x08\x2a\x6a\x2b\x41\x16\x85\x24\x24\x61\x09\x61\x49\
\x32\x93\x65\x66\x32\xcb\x3d\xbf\x3f\xee\xb9\x73\xcf\x3d\xf7\xde\
\xc9\x10\xb2\x21\xe7\xfb\x3c\xe7\x99\xbb\x6f\x73\x3e\xdf\xfd\x7b\
\x8e\x42\x29\x85\x6c\xb2\xc9\x76\xf6\x35\x05\x92\x24\x49\x3a\x2b\
\x49\x82\x5f\x92\x24\x09\x7e\x49\x92\x24\x49\xf0\x4b\x92\x24\x49\
\x82\xff\x4c\xa0\x69\xeb\xeb\x46\x4c\x2c\x3d\x52\x32\xfa\x6f\x87\
\x57\x0d\x7a\xed\x50\xd9\x39\xaf\x1d\x2a\x73\xff\xa9\xa6\xc9\xf5\
\x72\x6d\x93\xfb\xe5\x5a\xe3\xf7\x95\x83\x4d\xae\x57\x0e\x35\xb9\
\x5e\x39\xd4\x34\xa2\xb4\x6e\xc7\xd8\xf5\x47\xcb\x26\xfd\xb3\x7e\
\xed\xdc\x7f\x9d\x28\xf9\xca\x3b\x27\x2f\x97\xdd\x41\x92\x04\x7f\
\x1f\xa6\xa2\xb5\x47\x46\x0c\x7f\xed\xd0\xd2\x9c\x97\x6b\xca\x92\
\xd6\x54\x35\x91\xdf\xef\x57\xb7\xd7\xb5\xd4\xec\x3e\xd6\xf6\xcc\
\x81\x06\xff\x1d\xc7\x9a\x02\xb3\xea\xbc\xc1\x59\x51\x7f\xd8\xa3\
\xfa\xc3\x9e\xa8\x3f\xe2\x51\xfd\x11\x4f\x34\x10\xf1\xa8\xfe\xb0\
\x47\x0d\x84\x3d\x6a\x20\xe2\x39\xe8\x6b\x2f\xda\xef\x6d\x9f\xf5\
\x69\x53\x70\xde\xc6\xe3\xfe\x67\x5e\x3f\xda\xb6\x95\xbc\x7c\x48\
\x4d\xfd\x7b\x5d\x53\xd1\x1b\xc7\xcb\x6e\xdc\xd8\x30\xef\x6b\x9b\
\x1a\x3c\xb2\x8b\x48\x92\xe0\xef\x45\x1a\xfb\xd7\x43\x97\xe7\xbe\
\x5c\xb3\xd6\xbd\x6a\xbf\x7f\x67\x5d\x73\xcd\xa1\x13\xad\x0f\x7a\
\x9b\x82\xb3\x22\xfe\xb0\x07\x11\x95\x68\x47\x51\x80\xb2\x13\x28\
\x8d\x6d\x8a\x6d\xa4\xc6\xa2\xfd\x36\x00\x51\x95\xb4\x07\x22\x9e\
\x9d\xde\xf6\x59\x07\xda\x23\x6b\x01\x34\xdd\xb4\xb9\xa1\xfc\xe6\
\xcd\x0d\x25\xdf\x28\x6b\x2c\x90\xdd\x45\x92\x04\x7f\x0f\x50\xe1\
\x2b\xb5\x97\x67\xbd\x74\x60\x6d\xd2\xef\xf6\xf9\xf7\x1f\x6d\xde\
\xda\xd4\xe0\x9f\x17\x0d\x46\xd2\x38\x84\xdb\x9c\x45\x9d\x37\x11\
\x61\x9d\xc6\x39\x8f\x52\xec\x0d\x46\x59\x3c\x14\x93\x54\xe0\x19\
\x4a\x69\xf5\x37\xca\x1a\xcb\xbf\xb9\xa5\xb1\xe4\x5b\xef\x36\x49\
\x46\x20\x49\x82\xbf\xab\x69\xe8\x4b\x07\x4a\xb2\x56\xed\xab\xac\
\xaa\xf3\x6d\x6d\x39\xd9\x36\x2f\x12\x08\xa5\x81\x52\x43\x9a\x3b\
\x01\xd8\x76\x1b\xb7\x91\xd7\x0a\x78\x0d\x40\x5c\x66\xeb\xe1\x60\
\x14\x41\x95\x42\x65\xb7\x56\x01\xa8\xc0\x24\x4a\x35\x46\xf0\xad\
\x77\x9b\x56\xdf\xf2\x6e\xd3\x6c\xd9\x85\x24\x49\xf0\x9f\x26\x0d\
\xf9\x43\x65\x49\xe6\xff\xed\x6e\x3a\x52\xef\x7b\xa6\xc5\x17\x18\
\x0d\xaa\x42\x6b\x1c\x88\x79\xe0\xf2\xbf\x26\x35\x5f\x00\xbe\x1d\
\xd3\x30\xe9\xfb\xd4\x51\x0b\xa8\x08\x46\x41\x61\x30\x00\x4a\x29\
\x54\xb6\x8f\x50\xba\x00\xc0\xe6\x5b\xb7\x36\x95\x2d\xd8\xda\x34\
\x4f\x76\x25\x49\x12\xfc\xa7\x48\xe7\xac\xae\x28\x49\x7f\xf6\xd3\
\xa6\xa3\x47\xbd\xcf\xb4\xb5\x86\x3c\x31\x29\xaf\x83\x5a\x5f\x56\
\x55\x06\x64\xa1\x11\xf1\x8a\x4e\xaa\x7f\x82\x9a\x03\x8c\xfb\x56\
\x04\xa3\x1a\xd0\xb9\x9d\x44\x33\x05\x74\x4d\x00\x94\x62\x96\x0a\
\xac\xbd\x6d\x9b\xb7\xac\x78\x9b\x77\xb6\xec\x52\x92\x24\xf8\x3b\
\xa0\x91\xab\xf7\x5f\x9e\xba\xa2\xbc\xa9\xee\x48\xe3\x33\x81\x96\
\x76\x8f\x86\x24\x95\x03\xbc\xce\x00\x38\x0d\x00\xb0\x08\x6d\x8b\
\x93\x0f\x0e\xdb\x10\x87\x01\xd8\x9a\x01\x00\xc2\x2a\x5a\xc3\x2a\
\x03\xb9\x05\xf4\x31\x4d\x40\xd5\x98\xc2\x2c\x00\x9b\x17\xbe\xe7\
\x2b\xbb\xfd\x7d\x5f\x81\xec\x5a\x92\x24\xf8\x6d\x68\xc0\xaf\x77\
\x95\xd5\x1c\x6a\xd8\xda\xde\x12\xf4\x68\x98\x13\x54\x7c\x51\xea\
\x9b\x18\x02\x75\xb6\xdb\x6d\xcd\x02\x6a\xbf\xdd\xba\x62\xcb\x27\
\x76\x04\xa3\x06\xe8\x39\xf5\x5f\x27\x42\x29\x88\x89\x29\xd0\x59\
\x6f\x1f\xf6\x56\x8f\xfa\xf3\x81\x32\xd9\xbd\x24\x49\xf0\x33\x1a\
\xf6\xbb\xbd\x37\xbb\x96\x7d\xe4\x6f\x38\xd9\x32\x2b\x2e\xa8\x4d\
\x8c\x20\x1e\x03\x70\xf2\xfc\xd3\x8e\x1f\x86\xda\x88\x7e\x6a\x65\
\x26\x0d\xc1\x48\x0c\xec\xbc\xfa\x1f\x4f\x13\x18\x90\x92\x84\xea\
\x06\xff\xac\xec\xe7\xf6\xfa\x67\xbe\x5a\x73\xb3\xec\x66\x92\xce\
\x6a\xf0\x0f\xfc\x55\xf9\xda\xc3\x87\x4e\xfe\x45\xe5\xbd\xf7\x94\
\x6a\xe2\x54\x07\xa3\x1a\x4f\x03\xb0\x03\x3e\x75\x70\xf6\x39\x30\
\x00\x27\xdb\xdf\x72\x38\xc7\x10\x54\xa0\x21\xa2\xc6\x80\xee\x04\
\x7a\x9d\x08\xa5\xc8\x49\x76\x01\x00\x9a\x83\xe1\xb4\xad\x87\x9b\
\xfe\x72\xfe\x1f\x2a\xd6\xca\xae\x26\xe9\xac\x03\xff\xe8\xdf\xee\
\x1e\x91\xb9\xec\xc3\xa3\x27\x8e\x79\xe7\x99\x1c\x77\x26\x1b\x9d\
\x03\x7a\x4c\xb7\x56\x05\x75\x9d\xdf\xa7\x03\x1f\xf6\xd7\x12\xf0\
\x6b\xda\x6e\x07\x7a\x0b\x9f\x20\xdc\x3e\x8a\x0f\xfc\x11\x33\xe8\
\xa9\x01\x74\x9d\x74\x46\xa0\x0a\x97\x00\x80\xbd\xc7\x9a\xe7\xe5\
\xff\x76\xf7\xd1\xab\xfe\x52\x35\x42\x76\x39\x49\x67\x05\xf8\x47\
\x3e\x5b\x7e\x73\x75\xbd\xf7\xb3\x36\x6f\xdb\xa0\x98\x3a\x0f\x74\
\xa0\xc2\xf3\xfb\x55\x2b\xe8\xd5\x38\x3e\x01\x3b\xa0\xdb\xad\x73\
\xc0\x36\x40\x4a\x1d\x1d\x7f\x6a\x48\x15\x58\x83\xb3\xca\xaf\x31\
\x05\x8e\x03\x10\xad\x1d\xf7\x05\x06\x7d\x78\xac\xf9\xb3\x19\x2f\
\x55\x48\x33\x40\xd2\x17\x1b\xfc\x85\xbf\xda\x71\x73\x4d\x5d\xe3\
\xcb\xd4\xdf\x9e\x66\x02\xa0\xa3\xf3\x8e\x03\xb7\xca\xab\xfe\x36\
\x1a\x80\xc9\x37\x40\xcd\x66\x82\x05\xe1\x76\x9b\x68\x07\xeb\xc2\
\x0a\x05\x2a\xda\xa3\x16\xe9\x6f\xa7\xf2\x53\x0a\x24\xbb\x88\x01\
\xfc\xd8\x4e\x82\x16\x7f\x38\x6d\xdb\xc1\x86\x97\x8b\xd6\xec\x5b\
\x2c\xbb\x9e\xa4\x2f\x24\xf8\x47\x3c\xfd\xf1\xd2\xaa\x23\x0d\x2f\
\x23\x12\x25\x26\xbb\x5c\x8d\xe7\xc0\x83\x8d\x3a\x0f\x33\x63\xb0\
\x30\x10\xd8\x6f\x73\xc2\x3f\xa5\xce\x8c\xc1\xe2\x27\x30\xef\xff\
\x8c\x79\xfd\xed\x9c\x7f\xbc\xca\xaf\x02\x18\x90\x9e\x64\x02\xbd\
\xc6\x04\x18\x23\x88\xaa\x64\x67\x45\xfd\xb2\x31\xab\xf6\xfe\x4d\
\x76\x3f\x49\x5f\x28\xf0\x17\x2c\xff\x70\xd5\xc1\x23\x8d\x0f\x22\
\x12\x25\x66\xc7\x9e\xca\x31\x00\xd1\x9b\xaf\xda\x83\x1d\x36\xce\
\x3d\xdb\x26\x98\x13\x80\x35\xbc\x67\xb1\xf9\x89\xf5\xb8\xd8\xb1\
\xe2\x3a\x05\x42\x2a\xc2\xec\x1a\x89\x48\x7f\x10\x08\x92\x9f\x35\
\xaa\x9d\x58\x51\x71\xf4\xc6\x91\xbf\xdd\xbd\x45\x76\x41\x49\x5f\
\x08\xf0\x17\x2c\xdd\xbe\xaa\xb6\xae\xf1\x0e\xab\x5d\x4e\x63\x9d\
\xde\x04\x28\x27\xaf\x7e\x0c\x61\x76\xa0\xe7\x01\x0c\x33\xe8\x79\
\xcd\x81\x47\x2f\x11\x8e\xd5\xb7\xc7\x2d\xee\xb1\x6e\xda\xd7\x6e\
\x48\x7f\xcd\x5d\xe0\x2c\xfd\x63\xc0\xe7\x84\x3e\x08\x31\xee\x4f\
\x08\x6a\x0e\xd4\xcf\x1c\xfd\x9b\x5d\x92\x01\x48\x3a\xb3\xc1\x7f\
\xee\xcf\x3f\xba\xb9\xf6\x84\xf7\x76\xb3\x94\x86\x35\x9f\xde\x49\
\xdd\x77\x0a\xdf\x59\x8e\x53\x05\xe7\x9f\x83\xc9\xc0\x03\xbd\xa3\
\xb0\xbf\x9d\x39\x60\xe3\x08\xac\x0e\x9a\x1d\x7f\xf1\xa4\x3f\x5c\
\x0a\x03\xbe\x62\xa8\xfe\x31\x86\x60\xac\x1f\xa8\x3d\x31\xb3\xe8\
\xc5\xcf\xa5\x0f\x40\xd2\x99\x09\xfe\x71\xcb\x3f\xbc\xb9\xf2\xf0\
\xc9\x97\x11\x8a\x10\x1e\xe7\x56\x90\x3a\xc5\xea\x39\x10\xda\xed\
\x13\x1d\x7a\xf1\x62\xfe\x1d\x69\x08\x66\xd4\x1b\x60\xb4\x65\x12\
\x02\x23\x88\xa8\xf0\x45\xd5\x84\xa4\xbf\x2b\xc5\x6d\x0b\x76\xcb\
\xba\xaa\x62\xe7\xe7\x47\x96\x4d\x7e\xe1\x33\xc9\x00\x24\x9d\x59\
\xe0\x3f\xef\xe7\x1f\x8d\xd8\x77\xc2\xf7\x22\x89\x44\x89\x05\x98\
\x10\x80\x67\x91\xd2\x1c\x63\x50\xed\x18\x05\xe2\xd8\xf9\xb0\xb1\
\xeb\x01\x8b\x73\x50\xdf\x69\x31\x13\xe2\xad\xc3\xde\xac\x00\xf0\
\x61\x20\x6a\xda\xe9\x24\xfd\x29\xe1\x3c\xfe\x84\x00\x0a\xe7\xf8\
\x23\x56\x06\x50\x5e\x73\xe2\xa9\xe9\x7f\xdc\x3f\x51\x76\x49\x49\
\x67\x0c\xf8\x2b\x8e\x37\x95\x27\xb7\x05\xd3\x14\xca\xd4\x5d\x31\
\x7b\xcf\x2e\x17\xdf\x04\x28\x51\xd5\x77\x48\xf3\x85\x18\x09\x88\
\x93\x33\x00\x9b\x68\x00\x71\x78\x81\x84\xec\x7e\xe3\xe4\xf6\x90\
\xca\xf1\x24\x2d\xaf\xdf\x4e\xfa\x53\x1e\xf0\xa2\xe4\x57\xc4\x7d\
\x0a\x10\x68\x57\x76\x1c\x38\xfe\x89\xec\x92\x92\xce\x08\xf0\xe7\
\xfd\xbf\xcd\x65\x29\xcd\x01\x0f\xa1\x14\x0a\xa5\x9c\x99\x6c\x03\
\x42\x0b\x50\xf9\x6d\x80\xbd\xad\x0f\x98\x55\x7f\x24\xa8\x11\x00\
\x16\xbf\x83\x45\xe3\x8f\x97\x08\x44\x9d\xfd\x04\x2a\xc5\xc1\xb0\
\x20\xf9\x61\xf0\x39\x3d\xeb\x8f\xc2\x06\xf8\x31\x89\xaf\x18\xda\
\x80\xa2\xc4\xb6\x07\x5b\x02\xee\x61\x2b\x3e\xa9\x91\xdd\x52\x52\
\x9f\x06\xff\xa8\xff\xd9\x5a\xd2\x5e\xdf\x34\x4b\xa1\x80\x8b\x01\
\x9f\x50\x0a\x97\x1a\x2f\x6d\x17\xf6\x51\x00\xbb\x7c\xfe\x78\x61\
\x3e\xc4\x89\xed\x8b\x1a\x84\x08\x6c\xea\x90\xe0\x63\x67\xf7\x13\
\x58\x39\x07\x01\x76\x07\x23\xa0\x2c\xcb\x4f\x8c\xfb\xeb\xcc\x20\
\x27\x2d\xc9\xac\xee\x13\x2d\xe5\xc1\xa2\xf2\x13\x30\x06\xa0\x2d\
\x1f\x3e\x7c\x72\xc4\x84\x55\x7b\xff\x57\x76\x4d\x49\x7d\x12\xfc\
\xe3\x9e\x7c\x6f\xc4\x89\x63\x4d\x3f\xd7\xbd\x7b\x04\x80\x42\x29\
\x5c\x14\x50\x40\x39\xf5\x1f\x0e\x00\x86\x43\x86\x1f\x9c\x93\x7b\
\xc4\xf8\xbd\xdd\x08\x3f\x4e\x59\x83\x8e\x19\x80\xb0\x32\x2a\xd3\
\x36\x61\x99\x1d\xa3\x86\x55\xcb\xa9\x62\xce\x3f\x01\xb5\xda\xfb\
\x62\x8b\x49\x7e\xf3\xf2\x9e\xea\x63\x77\x5d\xf6\x52\x85\xb4\xff\
\x25\xf5\x3d\xf0\x1f\x6d\xf6\x97\x2a\xc1\xb0\x1b\xba\xba\x4f\x29\
\x5c\x6c\x19\x14\xb0\xd8\xff\x40\xc7\xb1\x78\x3e\xa6\x6f\x07\x5a\
\x8b\x24\xb7\xbb\x2e\xcc\xfb\xed\x7c\x0a\x16\x22\x09\x55\x00\x8b\
\x4c\x61\x0f\x1b\xe2\x8b\x9a\x94\x02\x23\xfd\xd7\x50\xfb\x39\x70\
\x53\x08\x40\xd7\xf7\x09\xcb\xa1\x08\xaa\x8e\x37\x6f\x94\xdd\x53\
\x52\x9f\x02\xff\xc8\xc7\xdf\xbd\x99\x1e\xf7\x16\xf1\x12\xd8\xc2\
\x00\x40\xa1\x74\xe4\x5d\x87\x83\x94\xb6\x55\xe9\xed\xea\x02\x20\
\x48\x7f\xd5\x7e\x3c\xbf\xb8\x29\xbd\x82\x4a\x4f\xc5\x61\x82\xe0\
\xe0\x2f\x00\xaa\x39\xe9\xaf\x87\xfd\x9c\x78\x0b\x14\x85\x35\x02\
\x28\x2e\xb3\xfd\x1f\xdb\xae\x70\x5a\x80\x82\x63\xf5\x4d\x79\x53\
\x57\xcb\x1a\x00\x49\x7d\x08\xfc\x4d\x0d\xcd\xbf\x35\x9b\xe3\xd4\
\xc2\x00\xdc\x2a\x85\x02\x5d\x13\x10\xbc\xff\xaa\x53\xdc\x1f\x66\
\xff\x80\x08\x60\x0a\x7b\xe6\x60\xd9\xcf\x1d\xa7\xd2\xf8\x0c\xc3\
\x02\x54\x62\x55\xf5\x89\x03\xa0\xc3\x14\x5a\xce\x8f\x7d\xd8\x6f\
\x6c\xba\xdb\xb8\xa6\xae\x01\xa8\xd4\x0c\x74\xdd\x1c\x10\x19\x00\
\x63\x02\x9f\x1d\x6e\xfc\xa9\xec\xa2\x92\xfa\x04\xf8\x07\x3d\xf4\
\xce\x2a\xd5\xd7\xe6\x89\x17\x6f\x57\x74\x7b\x97\x6d\x23\x16\xc9\
\x29\x48\x54\xbb\xd4\x5e\x3b\xf5\x1f\x36\x0e\x43\x38\xc4\xf3\xe3\
\x39\x01\x45\xa2\x36\xc0\xb6\x4d\xf6\xb1\x9e\xbb\xb7\x3d\x6a\x0a\
\xfb\x69\xa7\x1b\x8e\x3f\x4b\x52\x8f\xa2\x98\x99\x01\xbf\x6e\x62\
\x00\xda\x6f\xa0\x35\xe0\x1e\xfd\xdc\x1e\x99\xfe\x2b\xa9\xf7\xc1\
\x8f\x93\xbe\x05\xaa\xe0\x88\xe3\x13\x5b\xf4\x70\x9f\xc2\xec\x7e\
\xde\x0c\x88\x9f\xb8\x83\x04\x92\x79\xc4\x08\x00\xec\x43\x88\x10\
\xa4\xbf\xa5\xd0\x27\xde\xb8\x7d\x34\xb1\x11\xc1\xd8\x33\xd5\x85\
\xec\x1d\x7f\xf6\x5f\x9a\xe5\xf5\xf3\x00\x27\x8a\x96\x06\x1c\x47\
\x03\x38\x70\xb4\x71\xa6\xec\xa6\x92\x7a\x15\xfc\xf9\x8b\xdf\x5a\
\xd5\x1a\x08\xb9\x41\x69\x4c\x2b\xd7\x01\x46\x55\x5e\xd2\x53\xe6\
\xf9\xd7\xe3\xfe\xba\xf7\x1f\x36\xb1\x78\xd8\xa4\xf8\xa2\x63\x4d\
\x41\x04\xb7\x5d\xa1\x90\xe9\x1c\x41\xa3\xb0\x93\xec\xb6\x3c\x41\
\x98\xee\xc7\x94\x34\x44\x80\x28\x45\x7d\x84\x82\x0a\x27\xeb\x96\
\x8d\x25\xac\x67\xb2\xfd\x39\x06\x60\x6b\x02\xb0\x16\x8a\x62\xe8\
\xef\xf7\xbe\x27\xbb\xaa\xa4\x5e\x03\x3f\x3d\xd1\xbc\xc0\x58\xa1\
\x50\x85\xf9\xf0\x28\x35\xab\xfa\x7a\xf8\x4f\x89\x15\xd5\x39\x48\
\x78\xc0\x26\x36\xef\x74\x0c\xb5\x32\x0c\x4b\xb9\xae\x83\x77\xdf\
\xb1\xde\xdf\x2e\x03\x50\x9c\xe3\x0b\x8e\x1a\xc1\x67\x4c\xfa\x8b\
\xaa\xff\x90\x14\x17\x8c\xf4\x5e\x68\x25\xcd\x16\xe7\x9f\xc8\x00\
\xf4\xe6\x32\x96\x29\xc5\x91\xca\xfa\xe9\xb2\xab\x4a\xea\x15\xf0\
\xe7\xff\xe4\x5f\xab\xda\x82\x9a\xd4\xe7\x25\xb7\x2a\xac\xf3\x9d\
\x5f\xd7\x00\x14\x21\x0a\x60\x0b\x76\xa7\x12\x5f\x13\x43\x10\xc0\
\x2a\x3a\x00\xc5\xeb\x02\xce\x75\x06\xa0\xce\x63\xfa\xc1\x0e\xe4\
\x7c\x3c\xcf\x3c\xe9\x5f\x9b\x43\xcc\x3f\x29\x56\xd5\xc7\xef\x25\
\xce\x0c\x80\x97\xf8\xba\x06\x00\x02\x84\xa3\x40\x38\x8a\xfe\xbf\
\xd9\xf3\xb9\xec\xae\x92\x7a\x1c\xfc\xc9\xcd\x6d\xf3\x41\xf9\x8c\
\x36\x43\xf2\xaa\xd4\x1e\xc4\x84\x17\xa8\xba\x36\x10\x37\x59\xc7\
\x81\x39\x58\x7e\xed\xf2\x05\xec\x62\xfd\xb0\x99\xad\xd7\xc6\xec\
\xb0\xad\xe8\x23\x56\xae\x40\x04\x06\xa3\x93\x0a\x54\x84\x54\x5b\
\xd5\xdf\x74\xb2\xaa\xab\x40\x36\x0c\xc0\x25\x48\x7c\x17\x73\x04\
\x86\x23\xb1\xbc\x80\xc6\xe3\xbe\x31\xb2\xbb\x4a\xea\x51\xf0\x0f\
\x7b\xe8\xed\x9b\x7d\xde\x36\x8f\x16\xca\x53\xad\x80\xe5\x7d\x00\
\x9c\xfa\xcf\x4b\x7e\x43\xfa\xdb\x14\xff\x38\xda\xf4\xb0\x8f\xe3\
\xf3\x24\xd6\x04\xe8\xa8\xa3\x36\xde\x79\x71\x5c\x01\xc0\x41\x2b\
\x80\xd9\x3c\xb0\x35\x07\xcc\xc7\x1e\x08\xdb\xab\xfe\x16\xc6\xa1\
\x03\x1f\xa2\x0f\x80\xb7\xf3\xd9\x7a\x38\xaa\xdd\x4e\x67\x0e\xad\
\x01\x32\xea\xa5\xaa\x95\xb2\xcb\x4a\xea\x31\xf0\xc3\xe7\x7f\x12\
\x30\x66\xa6\xd1\x3d\xfc\xd4\xae\x2a\x4f\x30\xb7\x75\x7b\xdf\xc5\
\x36\xe8\x0c\xc0\x02\xca\x78\x75\xf9\x80\x8d\x44\xb7\x63\x12\x88\
\x13\xde\xb3\xab\xf1\x77\xd0\xf5\x1d\x47\xfa\x75\xfe\x44\xe1\x30\
\x45\xc8\xae\x7c\x20\xd9\xc5\xe5\xf5\x0b\x92\x1f\xd0\xb6\xeb\x52\
\xdf\xe5\x32\x96\xa3\x2a\xf3\x11\x70\xd9\x7f\x8a\x82\xea\xa3\xbe\
\x1f\xc8\x2e\x2b\xa9\xc7\xc0\xdf\xda\xd4\x32\x4a\x1f\x96\x9a\xe8\
\xe0\xa5\xd6\x4a\x3b\xd5\x21\x24\x47\xa8\x91\xfb\xaf\x13\x71\x1a\
\xb2\x9b\x67\x06\xfc\xba\x69\xbb\x8d\xe3\x0f\x71\x4a\x83\x4d\x38\
\xa7\xd6\x59\x79\x2c\x37\x8a\xb7\xd9\xc1\x01\x48\x81\x7d\x21\x15\
\x62\xc2\x8f\x51\xb1\xc7\x81\x5d\x64\x00\x10\x24\x3e\x00\x84\x22\
\x36\x9a\x81\x02\x78\x5b\x93\x64\x97\x95\xd4\x23\xe0\x1f\xf2\xe3\
\x37\x97\xd2\xb0\x4a\x74\x7b\x5d\x05\xd1\x6a\xd8\xf5\x3e\x2f\x38\
\xf1\x54\x1e\x63\x0c\xac\xba\xda\x7f\x4e\x46\x0a\xbe\x55\xd0\x1f\
\x4f\x4c\x2b\xc0\x93\xd3\x46\x62\xe9\xa5\x05\x78\x8a\xb5\xbb\xc7\
\x0c\xc4\x57\xf2\xb3\x1c\x32\xf8\x1c\x46\xf6\x81\x4d\xc8\x90\x3a\
\xe0\xd8\x71\x70\x50\x9b\x97\xe6\x1d\x7a\x76\xaa\xbf\x03\x8f\xa8\
\x0d\xab\xc8\x72\x11\x4c\xc9\x4a\xc2\xfc\x21\xa9\xb8\x75\x78\x3a\
\x9e\x1d\x9b\x85\x9f\x8f\xf3\xe0\xe7\xe3\x3c\xb8\x73\x68\x26\x46\
\x66\x25\x0b\xa5\xbd\x82\x19\x40\x08\x10\x0c\x9b\x43\x7e\x42\x6d\
\x80\x67\x75\xc5\x07\x67\x70\x7f\x2b\x00\x30\x9b\xfd\x4a\xea\xcb\
\xe0\x4f\x6a\x0b\xde\x44\xa8\x1a\x4b\xda\xd1\x6b\xd5\x09\x93\xe8\
\x31\x06\x60\x1b\x7a\xa3\x18\xde\x2f\x15\x4f\x5d\x35\x0e\xe5\x8f\
\xce\x45\xe5\xd3\xd7\xe0\xc5\xc5\x97\xe1\xa1\x5b\x27\xe2\xa1\x5b\
\x26\xe0\xc1\x5b\x26\xe0\xa1\x6f\x9f\x8f\x87\xbe\x35\x1e\xcf\xfe\
\x70\x2a\xfe\xf1\xe0\x74\xd0\x67\xae\xc1\x27\xdf\xbb\x18\x0f\x4f\
\x1c\x02\xb7\x5b\x31\xc0\x66\x51\xd7\x29\x96\x4e\x1e\x02\xba\xe4\
\x32\xd0\x47\xa7\x83\xfe\xf7\x34\xd0\x47\x2e\x05\x7d\xf8\x62\xd0\
\x07\x2f\x02\x7d\x60\x2a\xe8\x4f\xa6\x82\xfe\x78\x32\xe8\xa2\x49\
\x5a\xbb\x6f\x22\xe8\x3d\x13\x40\xef\x3a\x1f\xf4\x87\xe3\x41\xbf\
\x3f\x0e\xf4\x7b\x63\x40\xbf\x7b\x2e\xe8\x77\x0a\x41\xef\x28\x04\
\xbd\x7d\x34\xe8\xc2\x51\xa0\x0b\x47\x61\x6e\x6e\x0a\x57\xd2\x6b\
\x83\x7c\x9d\x21\xb8\x15\x3c\x5e\x90\x81\x3d\x33\xf3\xf0\xd2\x95\
\x79\x78\xec\x12\x0f\x6e\x1d\x9f\x89\x1b\xcf\x4d\xc7\xdd\x53\x3d\
\xb8\xff\xd2\x5c\xdc\x7f\x69\x2e\x9e\xbb\x6e\x28\x0e\xdc\x56\x08\
\xef\x1d\x63\xb1\xfe\x8a\xa1\xb8\x69\x70\x86\x55\x0b\x08\x86\x59\
\x22\x90\x62\x13\x11\xd0\x96\x7d\x4d\xfe\xa9\x9d\x04\xdd\x0a\x00\
\x65\xb0\x1a\x4a\x14\x40\x39\x80\xd5\x00\x8a\x01\x78\xba\xa8\x6f\
\x79\xd8\xf5\x4a\x01\xd4\xb0\xfb\x54\x03\xd8\xcc\x7e\x29\x00\x2f\
\x7b\xa6\x25\xdd\xc4\x10\x96\xb0\xeb\x97\x75\xd1\xf5\x67\xb3\x6b\
\x76\xa6\xcd\x3e\xc5\x6f\xbb\xa4\x9b\xda\xec\x84\xc0\x1f\x68\xf6\
\x8f\xd0\x80\xaf\x32\x6f\xbd\x31\x34\x35\xb1\xa8\xff\x34\x16\xff\
\x2f\xec\x97\x8a\xd2\x3b\xa7\x61\xd7\x8a\x6b\x71\xd7\xd7\xc7\x63\
\xd4\xd0\x7e\x09\xbf\xf1\xe4\x31\x39\x78\x72\xc1\x04\x9c\x78\x64\
\x26\x96\x5e\x34\x0c\x49\x49\x2e\x98\xb5\x81\x5e\x60\x91\xa6\xba\
\x7f\x03\xf4\xbf\x1c\xd3\x0f\xbe\xeb\x06\xe1\xd1\x4b\x3c\x18\x3f\
\x20\x31\x8d\x3c\x3b\xcd\x85\xeb\xce\xcf\xc6\xab\x5f\x2b\x40\xf5\
\xb7\xce\xc5\x4d\x43\x32\xb4\x8b\x86\x23\x5a\x8b\x31\x17\x31\xd7\
\x9f\xfd\xb6\x06\x4f\x45\xf5\x2f\x60\xe0\xab\x06\x70\x1f\x80\x59\
\x0e\xc7\x4d\x02\xb0\x00\xc0\x8b\x00\x9a\x18\x23\xe8\x2c\x58\x3c\
\xec\xfc\x1a\x76\xbd\xeb\x01\x8c\x70\xfa\x1c\xec\x99\x1e\x63\xcf\
\x58\xc6\x77\xce\xd3\xa4\x02\x76\xdd\x59\xac\x2d\xe9\x22\xf0\x3f\
\xd6\xc9\xb6\x99\x7d\xdb\x52\xc6\x14\x3b\xa2\xc7\xba\xa9\x75\x0c\
\xfe\xa1\xf7\x6f\xb8\x19\xfe\x90\x9b\x07\xbd\xa2\x0f\x5b\xc5\xd9\
\xf2\x1a\x2c\x0c\xd1\xfc\xc8\x8c\x42\x7c\xf2\xec\x75\xb8\xe2\xd2\
\x73\x4e\x4f\x6c\x64\x26\xe1\xc1\x9b\xc6\x62\xdf\x5d\x17\xe3\xda\
\x98\x49\xc0\x40\x48\x7b\x83\x03\x18\xba\xff\xad\x03\x53\xe1\xfb\
\xea\x60\x2c\x9a\x92\x8d\xac\x94\xce\x0f\x86\x54\xd0\x3f\x19\xaf\
\x7e\xad\x00\x5b\xfe\x6b\x38\x10\x89\x18\xa0\x17\x66\xfa\x31\x49\
\xff\x88\x8a\x82\x57\x0f\x26\xe2\xf5\x2f\x82\x26\xd1\xaf\x67\xeb\
\x3e\x00\xeb\x00\x3c\x0e\x60\x0e\x6b\x37\xb0\xf5\x75\x6c\xbf\x4e\
\x0b\xd8\xb9\xa7\x0a\x98\x12\x06\xfa\x05\x0c\xd8\x00\xb0\x13\xc0\
\x4a\x76\xaf\x39\xc2\xbd\x57\xb2\xfd\x3a\xcd\x62\x20\x29\xc5\xe9\
\x6b\x20\x22\xc0\x16\xa0\x6b\xb5\x8b\x9d\x00\xb6\x24\xd0\x6a\x85\
\xf3\xae\x67\x4c\xb1\x2c\xc1\xe7\xa9\x4d\xf0\x3e\x89\xb6\x9a\x0e\
\xc1\xef\x0a\x86\xbe\xa1\x3b\xea\x08\x57\xb5\xa7\x27\xf0\x50\x4a\
\xa1\xa8\x5a\xd3\xf1\xf8\x7f\xf3\x8b\xf0\xd0\x77\xa7\x74\x29\xe4\
\x46\x0e\x4e\xc7\x3f\x17\x5d\x84\xbb\x0a\xf3\x62\xd8\xef\x31\xe9\
\x6f\x73\x9f\x47\x47\x66\xe2\x0f\x73\xf3\x4e\x0b\xf4\x22\xcd\x2c\
\xec\x87\xc6\x7b\x8a\x90\x97\x93\x66\x2d\x06\xe2\x79\x0f\x63\x04\
\x47\x5b\xdb\x6f\x4a\x00\xf8\x65\x1c\x00\x1f\x67\x1d\x6d\x1e\xcc\
\xaa\x70\x29\x5b\x9f\xc7\xc0\x76\x03\xeb\x20\x60\xe7\x3e\xc6\x8e\
\x4b\x84\x56\x03\x78\x86\xbb\xe7\x3a\x00\x93\xd9\xb3\x94\xb0\x7b\
\x95\x09\xf7\x2e\x61\xfb\x47\x02\x58\x23\x00\xa4\x86\xed\xeb\x0c\
\x79\xd8\xb5\xc1\xbd\x8f\x1d\x43\x38\x1d\x2a\x81\x26\x45\x3b\x6a\
\x05\xec\xdf\x9b\xc3\xde\xd1\xc7\x31\xba\xf2\x04\xde\x71\x75\x82\
\xf7\x49\xb4\xad\xee\x10\xfc\xd4\xdf\x7e\x29\x5f\xa8\xa3\x6b\x00\
\x0a\x97\xb3\xaf\xea\x08\xa1\xc0\x3f\xee\x9a\x81\xdb\xe6\x8d\xeb\
\x36\x1c\x36\xb7\x87\x7b\x49\xe2\x1b\xf4\xbb\x09\x1e\x3c\x3e\x3d\
\xb7\x5b\xae\x9d\x93\xe1\xc6\xbe\xdb\xc7\x23\xcf\x93\xea\x3c\xcc\
\x37\xa3\x50\x4b\x30\x3f\x81\x0e\xa3\x83\x70\x21\x03\xb8\x37\x81\
\xc7\x28\x65\x1d\x64\x21\xd7\x49\x13\x39\x6f\x35\x34\xc9\x0a\x76\
\xde\x1c\xc6\x50\xca\x13\x7c\xfd\x1a\x06\xcc\xc9\x30\x34\x81\x6c\
\xc6\x24\x3a\x03\xd8\x12\xee\xfd\x8b\x19\x23\xd2\xb7\x7b\x7a\xa9\
\xfb\xe8\xef\x52\xc0\x3d\x8f\xfe\x8e\xbd\xf2\x4c\x8e\xe0\x0f\xb5\
\x06\x06\xe8\x7e\x3c\xc2\x06\xe7\x50\x28\x05\x51\x61\xb1\xff\x7f\
\x73\xd3\x64\x5c\x31\x7d\x68\xb7\x3d\xe4\xc3\x7f\xdc\x8b\x3f\x1e\
\x6c\xe2\x59\x53\x8f\x33\x82\x7b\x86\xa6\xe3\xbb\x45\xd9\xdd\x7a\
\x0f\x9d\x01\x68\xf9\x01\x70\x1e\xef\x3f\x10\x8a\xa7\x76\xcc\x83\
\x66\xc3\x03\x9a\xc4\x5f\xdd\x89\x47\x59\x0d\x4d\x22\xad\x49\x00\
\x7c\x3c\xf0\x77\xb2\xce\x5d\xd6\xc9\x4f\x50\xce\xdd\x17\x1c\x80\
\x3b\x03\x7e\x30\x90\xd5\x40\x73\x76\xea\xd7\x2b\x41\xef\x92\x97\
\xfd\x47\xfc\x3b\xae\xe8\x33\xe0\x1f\x7e\xff\x86\x11\x6e\x7f\xc8\
\xad\xab\xfa\x3a\x03\xd0\x42\xd6\x34\x16\xef\x27\x94\xe2\xeb\x85\
\x03\x71\xdb\x8d\xe7\x75\x78\xa3\x03\x87\x5a\xb0\x6c\xf5\x4e\xcc\
\x7f\x74\x13\xb2\xee\x2c\x45\xc6\xf7\xd7\x21\xfb\xbe\xd7\x71\xc3\
\x13\xef\xe2\x91\x55\xe5\xa8\x3e\xd2\x6a\x7b\xde\x4b\x1b\x0f\x62\
\xe9\xae\x3a\x87\x91\x79\xec\xe9\x4f\xef\xd7\xe3\xca\x17\xf6\x62\
\xee\x8b\x9f\xe1\xca\x35\xfb\x30\xf7\x0f\xfb\x71\xd5\x4b\xfb\x31\
\xf7\x4f\x95\xb8\xf2\xcf\x55\x98\xfb\x97\x03\xb8\xf2\x95\x6a\xcc\
\x7d\xad\x06\x57\xbd\x56\x8b\xb9\x7f\x3b\x88\x2b\xd6\x1e\xc2\x95\
\xa5\x87\x31\x77\xdd\x11\x5c\xbd\xee\x08\xde\x69\x0e\xc7\xbc\xf1\
\x63\xfb\x25\xe1\x57\x73\xf2\x3a\xbc\x6f\x73\xbb\x8a\xbf\x57\xf8\
\xb1\xe4\x23\x1f\x7e\xf8\x6e\x23\xa6\xbe\x7e\x0c\xe4\x4f\x07\x31\
\x77\xed\x61\x3c\xb4\xf1\x18\x6a\x9b\xc2\x09\x31\x80\x8d\xd7\x8d\
\x66\x60\x87\x90\x23\xc0\x5a\x38\x8a\x4b\xd6\xd7\x4d\x8f\x03\x7e\
\x9d\x4e\xa7\x53\xd5\x30\xe0\x7b\x3b\x60\x34\x3c\xf0\x67\x23\x31\
\x4d\xa1\x23\x2a\x86\xe6\x0f\x58\xd8\x09\xe6\x55\xcc\x31\x0d\xfd\
\xfd\xcb\x60\xa8\xff\xc5\xe8\x1b\x54\x0c\xc3\x1f\xd0\xd5\xfe\x88\
\xce\x83\x9f\x84\x23\x37\x80\x53\xf3\x15\xce\xc3\x1f\x6b\xa0\xc8\
\x4f\x71\xe3\xe9\x07\x3a\x2e\x37\x5f\xf6\xfc\x0e\xcc\x7c\x6a\x23\
\x96\x6e\xad\xc4\xc6\x3a\x2f\x28\x1b\xe5\x27\xd4\x1e\xc1\xeb\x47\
\xbd\x58\xbe\xe3\x10\x46\x2d\x2d\xc3\xf5\x4f\x6d\x43\x75\x9d\xc1\
\x04\x76\x54\x78\x71\xeb\x9b\x9f\xc3\x94\x02\xcc\xff\x3a\xd0\x21\
\x5f\x08\x9b\x4e\xb4\x61\xe3\x09\x3f\xfb\x6d\xc3\x3b\x27\xfc\xda\
\xfa\x49\x3f\x36\x9e\xf4\x63\x53\x43\x00\x1b\x4f\x06\xf0\x4e\x63\
\x10\x1b\x1b\x83\xd8\xdc\xd0\x8e\x4d\x0d\xed\xd8\xd8\x10\xc4\xdb\
\x8d\xed\x40\xac\x60\x87\xe2\x85\x0b\x3b\x56\xf5\xdf\xaa\x0e\x20\
\xfb\x8d\xe3\x98\xbf\xd3\x87\x9f\xd5\x07\x70\xa4\x3d\x1a\xdb\xb7\
\xb1\x31\x88\x65\x55\x3e\x14\xbc\x56\x8d\xef\xbf\x59\x07\x5f\x50\
\x8d\x7b\xad\x2b\xc6\x7b\x70\xeb\xa8\x1c\xee\x0f\x11\x1a\x80\xda\
\x60\xe4\x27\x0e\xa7\xeb\x9d\x68\x4b\x17\x01\xd1\x89\x3c\x1c\x30\
\x7d\x09\x30\x8a\x53\xa5\x12\x74\x4e\x6b\x59\xc2\xbd\x7f\x19\xb7\
\x5d\xbf\xd6\x88\x3e\xc4\x00\x96\x70\xcb\xb3\xfb\x04\xf8\x93\x42\
\x91\x59\x00\x4c\xde\x7d\x23\x4f\x9f\x49\x7f\x15\xb8\x63\xda\x68\
\xe4\x64\xa7\xc6\xbd\xc1\xbd\x3f\xdb\x86\xa7\xb6\x56\xa2\x25\x68\
\x84\xb1\x5c\xd4\xb8\x79\xcc\xa7\x00\x60\xfd\x51\x1f\xc6\xfe\xf2\
\x3d\xbc\xf4\x56\x0d\xaa\x8f\xb6\xe1\xe2\xe7\xff\x6d\x80\x9d\x76\
\xc6\xd3\x47\x9d\xe7\xeb\xb3\x1b\x3f\x80\x58\x8f\xb9\x2a\x37\x05\
\xd3\x87\xa5\xc5\xff\x07\x3f\xf4\xe2\x4b\x1f\x37\x02\x11\xad\x6c\
\x37\x10\xa6\x08\xb3\x71\x3b\xb4\x19\x79\x68\x4c\x75\x7f\xee\x50\
\x2b\xc6\xbe\xb4\x1f\x5e\x7f\x24\xfe\x35\xe7\x0e\xe3\xa6\xf6\x16\
\x1e\x8c\x00\x4d\xfe\xd0\x25\xbd\xdc\x71\x4b\x60\x96\xb0\xe5\x7d\
\x00\x4c\xc5\x30\xc2\x8a\x2b\x84\x7d\xab\x61\x48\xda\x25\x7d\x04\
\xfc\xa5\xdc\x72\x51\x9f\x00\x3f\x89\xaa\x39\x8a\x4a\xd9\x10\x76\
\x94\x9b\x5d\x9a\xc6\xec\x7f\x02\x8a\x9b\xae\x8d\xef\xe0\x7b\xe4\
\x57\xdb\xb1\x7a\x4f\x9d\x15\x70\x71\x40\x19\x0e\x45\x71\xeb\x1b\
\x9f\xe1\xca\x5f\x7f\x88\x48\x28\xca\x3d\x14\xb1\x9f\x84\x33\x0e\
\xee\x8d\xf3\x9c\x8a\x7c\xb8\x15\x87\xeb\x2d\x29\xca\x89\x7b\x9b\
\xe7\x77\xb5\xe0\xf1\x9a\x36\x4b\x75\xe0\x81\x08\x45\x0a\x6f\xab\
\xeb\xbf\xed\x61\x1c\x6b\xf0\x63\xcc\x9a\x7d\x71\x19\xc0\xa8\x81\
\xa9\xb8\x6a\x68\x96\x39\x23\x90\x13\xff\x51\x95\xa6\x77\xf0\xdf\
\x76\x77\x67\x2a\x61\xbf\xb5\x7d\x08\x4c\xc5\xdc\x33\x95\xda\xfd\
\x9d\xec\xb7\xaf\x48\x7f\x6f\x9f\x03\x7f\xa8\x35\x38\x49\x9f\x84\
\x03\x7c\x5c\x9f\x9b\xa2\x6a\xde\x98\x41\x18\x5d\xe0\xec\x8f\xd9\
\xf4\xc1\x61\xac\xfc\xb8\xc6\x0a\xb6\x98\x0f\x01\xb1\x5c\x01\xdd\
\x8f\xc0\x8b\xe1\xea\xb6\x90\xf9\x5c\xa7\x89\x37\xec\x98\x97\xa5\
\x1e\x20\x1e\x77\xb0\xdb\xa5\xed\xcb\x48\x75\xc5\x95\xfa\x87\x9b\
\x23\xf8\xce\x1e\x9f\xed\xe5\xf6\x85\x28\x06\x24\x29\xe6\x91\x7f\
\x22\x51\xc0\x1f\x02\x40\x70\xa2\x39\x84\x25\x6f\x1d\x89\xfb\x1e\
\xdf\x2c\xf4\xd8\x8f\xf7\x4f\x80\x68\x5b\xc8\x29\x73\xaa\x9c\xfd\
\x66\xa3\xfb\x54\xc9\x62\x18\x52\x7f\x75\x1f\x01\xfe\x6c\x18\x49\
\x4c\x4e\xcc\xa8\x14\x46\x14\xa3\x2f\x80\xbf\x57\xc9\x16\xfc\x2a\
\x38\x15\x9f\xab\xcf\xe7\x47\xe6\x99\x32\x66\x60\xdc\x0b\xff\xa6\
\x74\x37\x07\x24\x1a\xab\x03\xd0\x31\xa2\x70\x0c\xc0\x16\xb4\x16\
\x30\x9e\xae\x77\xbf\x03\x8d\x81\x58\xc7\xef\x2f\xce\x8b\x6f\xd2\
\xfc\xa2\xdc\xe7\xc4\x7e\x00\x95\xc2\xaf\xd7\x3a\x2b\x4c\xf2\xb7\
\x06\x4d\x47\xad\xdc\xd7\x88\x03\x27\xda\x1d\xaf\x7f\xc3\xe4\xfe\
\xc6\xf5\x20\x4c\xf9\xa5\x52\x12\xa7\x83\x83\x03\xa6\xa7\x9b\x80\
\x86\x3e\x06\x7e\x1d\xf0\xbe\x38\xcf\xe4\x85\x61\x0e\xcc\x42\x2f\
\xd8\xd9\x71\xa8\xbc\x4f\x80\x9f\x77\xf4\x29\xb1\xac\x56\x23\xa3\
\x8f\x50\x8a\x71\xe7\x0e\x70\xbc\x68\x55\x6d\x33\xde\x3c\xdc\x24\
\xc2\xc1\x02\x2e\x42\xa9\x66\xf7\x53\xce\xaa\xb5\x8c\xe1\x1f\x4f\
\x65\xb7\xa7\x69\x23\xfb\x61\xd9\xa4\x7c\xad\x5d\x90\x8f\xe5\x17\
\x0c\xc4\xb2\x89\x03\xb1\x6c\x62\x1e\x96\x4f\xcc\xc3\xb2\x09\x03\
\xb0\x6c\x7c\x7f\x2c\x1f\x9f\x8b\xe5\xe3\x73\xb1\xec\xbc\x1c\x2c\
\x3f\xcf\x83\xe5\xe7\x65\x23\x25\xd5\x18\x57\x7f\x46\x7e\x7c\xf0\
\xaf\x3c\xe2\x77\x7e\x39\x00\xbb\xc2\x9c\x79\xd3\x1a\x30\xe1\x58\
\xa7\x75\xbb\x1a\x1d\xaf\xef\x49\x77\x63\x80\x27\xc5\xca\x58\x4c\
\x55\x81\x16\x2a\x83\xe1\xd9\x1e\xc1\x3a\x55\x57\x77\x72\xfd\x7a\
\x3b\xc1\x65\x8c\xf5\x22\x15\xc0\x90\xfa\x2b\x3a\x38\x76\x85\x0d\
\xc3\xe8\x2d\x9a\xc7\x2d\xf7\xf8\x77\xb4\x77\xf8\x9d\x6c\xf1\x50\
\x93\xba\x4f\x63\x43\x70\x2b\x2c\xbd\xfe\xca\x19\xc3\x1d\x2f\xba\
\xe1\xdd\x1a\x67\xa0\xb2\x6d\x6b\xef\xbc\x14\xeb\xbe\x7b\x29\xfe\
\x79\xc7\x25\x78\xf3\x8e\x8b\xf1\xf6\xc2\x8b\xb0\x69\xe1\x45\xd8\
\xbc\xf0\x42\x6c\x5e\x30\x15\x9b\x6f\x9b\x8a\xcd\xb7\x4e\xc1\xe6\
\x5b\x26\x63\xf3\x2d\x93\x51\xf6\xed\x22\xfc\x7a\xc6\xc8\x78\x9d\
\x3e\x46\x33\xcf\xcb\xc1\xe2\x2f\x0f\xc7\xe2\x6b\x86\x61\xf1\x97\
\x86\xe2\x81\xab\x87\x62\xf1\x55\xe7\x60\xf1\x95\x83\xf1\xc0\x15\
\x83\xb1\x78\xce\x20\x2c\x9e\x33\x08\x0f\xcc\xca\xc7\x03\x33\x07\
\x62\xf1\x8c\x3c\x3c\x70\xd9\x00\x3c\x30\x7d\x00\x66\xe8\xe3\xed\
\x03\x18\x9a\xe5\x9c\x46\xff\xc1\xe1\xa0\xc3\x28\x40\x06\xb8\xeb\
\x75\xaf\x5f\x20\x64\x44\x0f\x4c\xd5\x7c\xc0\xef\x6b\x7c\x71\xdf\
\x65\x60\x8a\xdb\xea\xed\x27\x00\x5a\xdb\x49\x07\x9d\x6a\x27\xc7\
\x00\x36\xc3\x48\x32\xf1\x74\x41\xbf\xd1\x9d\x6a\xe5\x7d\x00\xf8\
\x3c\x88\x7d\x09\x80\xdf\x0b\x23\xc6\x3e\x0b\xbd\x5b\x61\x58\xc2\
\x2d\x97\xf6\x09\xf0\xc7\x06\xe5\x84\xe1\x8d\xd7\x9d\x7f\x89\x50\
\x4b\x5b\x7b\x4c\x72\xab\x7c\xf5\x9f\x7e\x71\x4a\x31\xe7\xa2\xc1\
\xac\x0d\xc2\x9c\x0b\xb5\x36\x7b\x6a\x3e\x66\x4f\xd1\xdb\x40\xcc\
\x9e\xcc\x5a\x51\x1e\x66\x4d\xca\xc3\xf9\xc3\xb2\x7a\xb0\xb0\x87\
\x20\x3b\x4e\x0a\xef\x07\xc7\xda\x9d\x4e\x33\x7f\xc7\xf6\xb0\x06\
\x7e\xbd\x82\x0f\x9c\xf3\x4f\x21\xf8\xec\x64\x20\xee\x53\x5c\xd5\
\x3f\xcd\x3a\xd7\x9f\x78\x1f\xfb\x0e\x3e\x1b\x46\x26\x99\xde\xd1\
\xf5\xc2\x9d\x32\x74\xae\xd2\x0c\x30\x6b\x11\x35\x7d\x00\xf8\x05\
\x30\x72\x0d\x4a\x91\x58\xb8\x71\x89\xc3\x72\x4f\x52\x31\x0c\x6d\
\x65\x4b\x9c\x6f\x59\x80\xae\x49\xeb\x2d\x48\x08\xfc\x7a\x28\x2f\
\x36\xee\x5e\x2c\x9f\x5f\xeb\xd1\x4a\x07\x0e\xb7\xff\x54\x9d\xe4\
\x98\x08\x04\x3b\x9b\xa0\x2f\x13\xe5\xd2\x68\x27\xe4\xa7\x9c\x02\
\xaf\x20\xf6\x5c\x34\x4a\x81\xb4\x64\xc0\xed\xe2\xb0\x4f\x3a\x38\
\xcf\x20\x8f\x69\x14\x60\x7d\xfc\x3f\x62\xf8\x11\xe2\x33\x80\x79\
\xd0\x52\x6d\xd7\x09\xfb\xf4\x4a\x3a\xbd\xd2\xac\x1c\x9a\xc4\x2c\
\xea\xd3\x7f\x8e\x3d\x95\x74\x02\xc8\x35\xdc\x37\xe9\x8d\x04\x9b\
\x62\x68\x8c\x18\x30\x72\x24\x9c\x68\x01\xfb\x9f\x4e\xb7\x15\x27\
\x06\x7e\x7e\x4c\x3e\x18\xb9\xfd\x89\xd2\x90\x9c\x34\x87\x1b\xf4\
\x64\x55\xce\xe9\x93\xaf\x5d\xed\xdc\x89\x04\x28\x4c\x55\x90\x4e\
\x55\x20\x23\x05\x97\x8c\xee\x0f\x8c\xe8\x0f\x9c\x93\x0b\xf4\xef\
\x07\x64\xa5\x01\xe9\x29\x40\x72\x92\xc6\x14\x4e\x89\xa9\x38\xc4\
\xfe\x9d\xa9\x8c\x31\x81\x91\x00\x16\xc1\x5a\xc1\x07\x68\xe9\xc0\
\xf7\x01\xd8\x01\x23\xb3\xef\x4c\x20\x0f\xf7\xac\x6b\x70\x6a\x9a\
\xc8\x0a\x07\x06\xd2\x9d\x54\x04\x4d\x3b\x79\x51\xb8\x77\x0d\x7a\
\x81\x1c\xf5\xda\x98\x9d\xcf\x77\xb9\x04\x99\xc0\xe0\xdc\x0c\xc1\
\x81\xd7\x95\xd4\x73\x79\xfd\x7b\x8e\x3b\x7b\xe2\xcf\xcf\x49\x36\
\x7f\x1c\x00\x50\x08\x46\xa6\x28\xf8\xaf\x74\x05\x99\x94\xc2\x1f\
\x88\x00\x14\xf8\xc4\x1f\xc1\xfc\xec\x64\x5c\xde\x3f\x0d\xc8\xc9\
\x00\xf2\xb2\x80\x01\x59\x40\xff\x4c\x8c\x19\xe6\x89\xfb\x0c\xaf\
\xd4\xb5\x9e\x22\xd6\x1d\xa9\x86\x75\xf8\x79\x0c\x34\x93\xa1\xa5\
\xcf\xae\x81\xb9\xec\x74\x04\xeb\x9c\x35\xe8\xfb\x9a\x40\x09\xac\
\xa9\xbc\x89\x52\x19\xcc\x29\xbf\x9e\x4e\x00\x79\x76\x02\xad\x18\
\x46\x12\xd4\x0e\x98\x4b\xac\x6f\x40\xc7\xd1\x92\x35\x30\x97\x42\
\x77\xb6\xad\x4e\x08\xfc\x94\x9b\x28\x93\x38\x0c\x71\x5f\x55\xed\
\x75\x7c\xda\x8b\x2f\x18\xec\x3c\x6d\xd5\x69\xe1\x96\x8b\x99\xc7\
\xeb\xe5\x27\x82\x78\xf7\x73\x2f\xb6\xee\xf3\x69\x6d\x7f\x33\xb6\
\x56\x34\x63\x6b\x45\x0b\xb6\x56\xb4\x60\x5b\x65\x0b\xb6\x55\xb5\
\x62\xdb\x81\x36\x6c\xab\x6e\xc3\xb6\x6a\x3f\xde\xab\xf1\xe3\xbd\
\x5a\x3f\x6a\xc3\x89\x49\xfb\x69\xe7\xa4\x98\xc0\x3f\x3c\x45\xc1\
\x35\xe9\x0a\xc6\xb9\x09\x82\x14\x28\x6f\x0e\xc5\x76\x87\x83\x51\
\xb4\x45\x54\x0c\x4a\x22\xb8\xc9\x93\x84\xa2\x4c\x37\x90\xe2\x06\
\x32\x52\xf1\xd5\x61\xf1\x6b\x57\xf2\xb2\xd2\x80\xcc\x34\x20\x2d\
\x05\x48\x76\x03\x6e\xa5\xab\x98\x41\x39\x8c\xd1\x7b\x0a\x18\x33\
\xe0\x4b\x4e\x47\x30\x80\x14\xd9\x9c\xa7\x53\x41\x1f\x00\x3f\x18\
\x88\xcb\x3b\x71\xbe\x0e\x88\xce\x14\xfc\x3c\x83\xc4\xd4\xed\x17\
\xa1\x69\x55\x93\x04\x40\x17\x20\x31\x27\x5f\x0d\xcc\xa5\xd0\x9d\
\x6d\x35\x09\x81\x5f\xcd\x49\x0f\xc4\xeb\x5b\x84\x02\xbb\xf6\x1e\
\x77\xdc\x7f\xc5\xf4\xa1\xc8\x4f\xd6\xbc\xe6\x2a\xef\x9f\xe2\x80\
\x3f\xff\xd1\x8d\xf8\xda\x92\xcd\x98\xff\xf8\x66\xdc\xf8\x78\x19\
\x6e\x78\xe2\x5d\xdc\xf0\xd3\x77\x31\xef\xc9\xad\xf1\xee\x9c\xd0\
\xbf\xf2\xca\xc7\xc7\x30\x6b\xed\x7e\xcc\x2c\xdd\x8f\x99\xeb\x2a\
\x31\x73\x7d\x25\x66\xfe\xa3\x0a\x33\x5f\x3f\x80\x99\x6f\x54\x63\
\xc6\x9b\xb5\x98\xb1\x81\xb5\x7f\x1d\xc2\x8c\xb7\x0f\xe1\xf2\xb7\
\x8f\xe0\xf2\x77\xea\x50\xd9\x12\x8e\xdd\xea\xfd\xba\xa0\xe3\x3d\
\xb2\x52\x14\xdc\x91\x9f\x8a\xc1\xa9\x0a\xae\x48\x77\x61\x5c\x92\
\xf1\x6c\xff\x6e\x0b\x6b\x23\xf0\x72\x83\x88\x6e\x6e\x35\x8a\x7a\
\xc6\xa5\x28\xb8\xc9\x93\x84\xd1\xe9\x2e\x7c\x69\x70\x7c\xbf\xc2\
\xd6\xe6\x30\x90\x9a\x04\x64\xa4\x00\xd9\xe9\x40\x4e\x26\x90\xdb\
\x0f\xc8\xcb\xea\x6a\xf5\xa7\x1c\x89\x95\x9c\x7a\x39\x06\x51\xd4\
\x8b\xc0\x2f\x86\x21\xf5\x97\x74\xf2\x1a\xab\x61\x68\x3d\x25\xdd\
\xf8\xac\xfa\x40\x2a\x8b\x00\xe4\xa0\xeb\xeb\x20\x3a\x45\xb6\xe0\
\x8f\xba\x5d\xed\xe2\x41\xb1\x51\x78\xf5\x0e\xbe\xab\x2e\xee\x85\
\xef\x9c\x36\x4a\x5b\x10\x7c\x7d\xba\xd0\xde\x58\xe7\xc3\x3b\x75\
\x5e\xfc\xab\xce\x87\x37\xeb\x9b\xf1\xfa\x51\x1f\xd6\xd7\x37\x63\
\x7d\xbd\xaf\x83\x47\xee\x42\x87\xa1\x38\x58\x86\x40\x6f\x1d\x8f\
\xef\x89\xbf\x77\x7c\x16\xc6\x25\x99\x3f\xe1\xb1\x88\x8a\x66\x7f\
\xc4\x32\x89\x68\x7b\x30\x8a\xd6\x08\x9b\xc1\x97\x02\x49\x04\xf8\
\x76\xff\x64\xcc\x1d\xee\x0c\xfe\x77\x6a\x83\x40\x66\x2a\x90\xe4\
\x32\x3f\xab\x42\x80\xd4\xa4\x68\x37\xf5\x09\x2f\xac\x25\xa7\x22\
\x30\xca\xd8\xef\x24\xf4\x9e\xf4\x5f\xc2\x01\x0b\xe8\xbc\x17\xbc\
\x94\x7b\xcf\xe2\x53\xb8\xff\x22\x74\xac\x6a\x8f\x64\x3d\xcb\xc3\
\xbe\xe9\x8a\xbe\x00\xfa\xb8\xe0\xd7\xb3\xf8\x4d\x5b\x88\x39\xd0\
\xb7\x69\xcf\xd1\xb8\x17\xfe\xfe\x37\x26\xe2\xbc\x7e\xa9\xa7\xfc\
\x40\xb4\x0b\xc0\x4d\xe2\x5d\x23\xae\xc9\x60\xce\xc5\x7f\xbb\xb1\
\x1d\x07\x7d\xce\xf9\xf7\x17\xe4\x27\x63\xde\x20\xf3\x3b\x7e\xe2\
\x6b\x77\x98\x7c\x94\x62\x83\xd7\xec\x43\xb8\x7d\x4c\x46\xdc\xf7\
\x78\xfb\x64\x04\xc8\x48\xd5\x9c\x84\x03\xfa\x01\x59\xe9\x40\x6a\
\xb2\x29\x4f\xa0\x1b\xa9\x84\x03\xd6\x3c\x07\xf0\xdb\xed\xeb\x09\
\x2a\x86\x91\x6b\x90\x8d\xd3\xf3\x82\xdf\x67\xc3\x50\x12\xa1\x72\
\x74\x42\xd5\xee\x4b\x64\x0b\x7e\x77\x46\x6a\x8d\x19\xf8\x56\x53\
\xbd\xaa\x39\x88\x4d\x5b\x0f\x3a\x5e\xd8\x93\x9d\x82\xe7\xee\xbe\
\x1c\x03\x52\xdc\xdd\xf2\xe0\x24\xd1\x03\xc8\x69\x5c\x8d\x10\xbc\
\xba\xaf\x25\xee\x99\x77\x4c\xcc\xc2\x84\x0c\xed\x1d\x3f\x6a\x0b\
\x6b\x95\x7d\xe2\xf0\xe2\xfa\xd7\x0b\x45\x71\x98\x95\xfa\xfe\xb0\
\x30\x03\x23\xb2\x9c\x3d\xfd\xad\x21\x8a\xdf\x36\x84\x8c\x18\xbf\
\xdb\xa5\x69\x01\xb9\x99\xc0\x20\x0f\x2e\x1e\xd8\xaf\xaa\x9b\xfb\
\x86\x17\x86\x54\x9c\x24\xec\x2b\x15\x98\x44\x6f\x80\xbf\x3b\xa8\
\x2f\x95\xfb\xf6\x0e\xf8\x15\x97\xcb\x67\xe7\x54\xd2\x99\x00\x65\
\x82\xe7\xb9\xd7\xca\xe3\x5e\x7c\xca\x84\x3c\xfc\x7d\xd1\x2c\x5c\
\x32\x20\xb3\x43\xb8\xe9\xeb\xb3\x72\xd3\x13\x7b\xf2\xd3\x91\x7c\
\xc4\x61\xa3\xcd\x43\xfd\xee\x50\x1b\xda\x42\xce\x4e\xc0\x8c\x64\
\x05\x4f\x4d\xcf\xc5\xfc\xc1\xa9\x68\x6c\x0b\x9b\xc1\x6e\x9a\x5f\
\x40\xdb\xb6\xcf\x1f\xc6\xe2\xf3\xfa\xe1\xb2\x73\xe2\xdb\xfa\xeb\
\x0f\xb6\xa3\x55\x05\x37\xbd\x37\x17\xe3\x77\x29\x18\x98\x9e\x54\
\xdf\x03\xfd\xa3\x26\xce\xf6\x35\x1c\x60\x7a\x92\x01\xcc\x86\x91\
\x1c\xb3\x08\xf6\xf9\x8f\xa7\xda\x72\x70\x16\x16\xfc\xd8\x82\x3f\
\x92\x9e\x52\xaa\x81\x9c\x98\x00\x2f\x1e\xbc\xf9\x60\x23\x36\xc6\
\x91\xfe\x3a\x03\x78\xfb\x97\xd7\xe2\xa1\x99\x85\x18\xdd\x2f\x15\
\x26\xc3\x9f\xc3\xe1\xf4\xdc\x74\xac\xf9\xca\x78\x94\x3d\x3e\xe7\
\xb4\x5f\x8a\x12\xe1\xea\xb6\xaa\x3e\x3f\x34\x96\xf5\xab\x24\xa7\
\xb8\x30\x21\xc3\x8d\x34\x02\xbc\xb4\x2b\xbe\x1f\x22\x23\x59\xc1\
\x9d\x45\xd9\xd8\x36\x7b\x10\xbe\x99\x9f\x66\x33\x85\x18\x05\x5c\
\x04\xbf\x18\x93\x85\xea\xff\x1a\x84\x29\xf9\xf1\x47\xdf\x6e\x08\
\xaa\xd8\x72\x32\x84\xdb\x3c\x49\x18\x9d\xa2\x98\x32\xfc\x14\xd6\
\x72\x5d\xa4\x27\x26\xef\xf0\xc4\xd9\xb7\x44\x58\x2e\xe8\xa1\x3e\
\xab\xdf\x37\x5e\x01\xcf\xa9\x92\x17\x86\x36\xd3\xd7\x0a\x7e\x7a\
\x16\xfc\xe8\x97\xb6\x3f\x86\x0d\x10\x66\xef\x93\x18\xb0\x74\x66\
\x40\x09\xf0\xc8\xf3\xdb\xd1\xe4\x0b\x76\x78\xa3\x07\xef\x98\x82\
\x4f\x7e\x75\x1d\x36\xdd\x3f\x0b\x6b\xbe\x3d\x15\x8b\x67\x14\xe2\
\xd9\xeb\x27\x62\xfd\x77\x2e\xc1\xd1\x9f\x5d\x83\xcd\x3f\xbd\x12\
\xb7\x5c\x33\xaa\xe3\x27\x4e\x40\xe2\x2f\xfe\xf2\x70\xd0\x07\x2f\
\x06\x5d\x7c\xa1\x31\x79\xc7\x8f\x26\x83\x96\x4c\x02\xbd\x77\x22\
\xe8\xdd\x13\x40\xef\x1a\x0f\xfa\x83\xf3\x40\xbf\x37\x16\xf4\xbb\
\x63\xb4\x89\x3b\xf4\x49\x3b\x6e\x1b\x89\x1f\x70\x15\x7d\xbf\x3d\
\xd8\x86\x9d\xf5\xed\x1d\xde\xf7\xb2\x11\x69\xf8\xf3\x97\x07\x81\
\x16\x8f\xc4\x7b\x73\xf2\xf1\xde\x15\xf9\x78\x6f\x4e\x3e\x76\x7f\
\x69\x08\xe8\x37\x87\xe3\xc7\x17\x79\x12\x1a\xf5\xf7\xb9\x7d\x9a\
\xa3\x31\x85\x00\x57\x67\x28\x58\x90\xed\xc6\x39\xc9\x0a\x14\x85\
\x40\x51\x08\x5c\x0a\x90\xee\x22\xe5\x0e\xa7\xaf\x46\xd7\x79\xe1\
\x75\x10\xd4\xda\xec\xab\x81\x36\x46\x20\xa0\xd9\xdd\xa5\xe8\xda\
\x0a\xc2\x62\x1b\x70\x17\xc0\x5c\xc0\xe3\xed\x06\xa6\x22\x2e\x9f\
\x5d\xe0\xaf\x7e\xf2\xca\x37\x28\x21\x50\x05\xc9\xaf\xb2\x01\x25\
\xf8\x14\xd8\x8a\x96\x00\xee\x5f\xbe\x25\xe1\x1b\x4e\x19\x3f\x00\
\xd7\xcf\x19\x81\x07\x17\x5c\x80\x5b\xaf\x2d\xc4\x9c\x8b\x06\xc3\
\xd3\x2f\xf9\xd4\x9e\x9a\x90\x1e\xfb\x30\xba\x16\xf1\xdf\xff\x6e\
\x40\x55\x63\x38\xe1\xf3\xa7\x0f\x4f\xc3\xf4\x61\x5a\x3b\x7f\x60\
\xe2\xef\xf7\xc7\xfd\x01\x54\x07\xcc\x8e\xfc\x0c\x05\x98\x97\xa9\
\xe0\x86\x7e\x2e\xf4\x53\xb4\xf7\xff\xcd\x94\xf4\x57\x1c\x80\xbf\
\x00\x5d\x33\xf9\xc5\x6c\x18\xb6\x7e\x69\x1c\xc0\xe8\x05\x44\x93\
\xd0\x75\x23\xd1\x2e\x81\x16\x1f\x5f\x00\xe7\x2a\xbc\xd5\x5d\xfc\
\x97\xd7\xa0\xef\x14\xfc\xf4\x1e\xf8\x01\x20\x92\xd7\xcf\xab\x00\
\x8c\x01\x10\x10\x9d\x11\x10\x7d\x54\x1f\xb6\x0d\xc0\x5f\x2b\x8e\
\xa1\xe4\xa7\x9b\xba\xf5\x41\xbd\xad\x61\x3c\xbd\xa9\xba\xd7\x3e\
\x54\x7d\x84\x62\xd1\x07\x27\x4f\x89\x01\x9c\x2a\xad\xd9\xe7\xc7\
\x96\x86\x30\x63\x3a\x56\x06\x37\xd8\x4d\x70\x5b\xb6\x1b\xf3\xfb\
\xb9\x76\x3a\x5c\x42\x07\x9e\xee\x01\x5f\xd2\x49\x30\x16\x09\x80\
\x5f\xd1\x01\x93\x10\x19\x40\x51\x27\x3f\x81\x87\xdd\xf7\x31\xb6\
\xce\xab\xf6\x05\x30\x0a\x78\x4e\x35\x95\x37\x51\x5a\xed\xc0\x68\
\xce\x2e\xf0\xa7\x64\xa5\xed\x54\x99\xd4\x57\x63\xde\x7e\x62\x52\
\xf9\x63\x7e\x01\x10\xbc\xb0\xab\x0e\x5f\xff\xf1\xeb\xf0\x26\x60\
\x02\x9c\x2a\xfd\xe3\x83\x3a\x0c\x59\xb6\x0d\xff\x38\xd6\xd2\x23\
\x52\x5f\x24\xc2\xee\x59\x1f\x51\xf1\xa3\x0f\x4e\x62\xfb\xe1\xae\
\x7d\xc7\x40\x84\xe2\xe9\x4f\x5b\x63\xc0\xef\xe8\xcf\x1a\xe4\x26\
\xa5\x0e\x87\xcc\x83\xa1\x8a\x83\x81\xa8\x1c\xa7\x96\xbe\x5a\x0c\
\xf3\x84\x1f\x8b\x3a\x00\x9a\xd7\x86\x01\xec\x60\x0c\xa3\xe0\x14\
\x40\xbf\x84\xdd\x47\x4f\x7f\xad\x65\xd7\x2d\xb7\x01\x63\x77\x01\
\xb3\x0c\x46\xca\x6f\xaf\x8c\xa8\xdb\x27\xc0\x1f\x4c\x4f\x2d\x55\
\x99\xea\x0f\x4e\xea\x83\x81\x9d\x10\x82\x28\x63\x00\x3a\x1e\x37\
\x1c\xf1\x62\xca\x7d\xeb\xf1\x87\xb5\x9f\x77\xc9\xc3\x55\x1f\x6d\
\xc3\xf5\xbf\xd8\x8e\xaf\xae\xdd\x83\x40\x58\xcc\x69\xe9\x9d\xea\
\xc0\x63\x11\x8a\xfb\x77\x34\xe2\x99\x8f\x9b\x3a\x1c\x85\x37\x11\
\x7a\xbf\x2e\x84\x9f\x7c\xdc\x8c\xdd\x2d\x51\xc7\x08\x88\x0d\x95\
\xc7\xb9\xe4\x12\x68\x09\x26\x7c\x3d\xbf\x5e\xca\x5b\x0a\xa3\x94\
\x57\x6c\x2b\x60\xcc\xaf\xa7\x03\x7f\x25\x12\xcb\x99\xd7\x19\x00\
\x3f\x8d\xd8\x7d\xd0\xe6\xdf\x2b\x85\x31\xbb\x0d\xcf\x80\x66\xc3\
\xb0\xeb\x6b\x18\xa3\xe2\x67\xfb\x29\xe2\xde\xd3\x03\x23\x9f\x60\
\x0b\xba\x37\x7e\xbe\xa2\x07\x98\xcc\xa9\x50\x31\xba\x26\xbd\x57\
\x6f\x2b\x3a\x04\xff\xa1\xa7\xbf\xbc\x42\x25\x46\xe9\xa8\xc9\x07\
\xc0\x49\x7d\x95\x18\x8e\x40\x10\xe0\x44\x28\x82\x1f\xfe\x75\x07\
\x2e\xfa\x61\x29\xfe\x58\xba\x0f\x5e\x2e\xc7\x3d\x51\x5a\xbf\xed\
\x08\xae\xff\xd9\xfb\x18\xf5\x8b\x6d\x58\x5f\xdf\x6c\x45\x43\x0f\
\xe2\x5e\x8f\xc4\xc7\x86\x1f\x63\xf7\x7e\x70\xbf\x17\xb9\x7f\xae\
\xc2\xc3\x09\x8e\xc7\xcf\x93\xaf\x5d\xc5\xab\x7b\x5b\x90\xf3\xd7\
\xc3\xf8\xef\x3d\xcd\x68\x8d\x3a\x0d\x2d\x66\xbf\xf9\x99\xc9\xe9\
\xa5\x1d\xdc\xa2\x8c\x81\x67\x21\xcc\xce\xba\xeb\x61\x94\xf2\xda\
\x25\xbb\xe8\x89\x33\xb5\xd0\x8a\x4e\x4a\x4e\xe1\xb5\xbc\xec\xf8\
\x39\x30\x4f\x91\x75\x3d\x8c\x3c\xf8\x26\x18\x31\x10\x3d\xef\x9d\
\x9f\xd7\x6f\x0b\x8c\xd9\x7e\xbc\xdc\x35\x4a\x70\xfa\xa9\xbc\x89\
\x52\x29\xf7\xcd\xe6\xa1\xf7\x66\xf8\xd1\x69\x04\x8c\xc9\x46\xbb\
\xa2\x15\x75\x08\x7e\x00\x48\x1a\xec\xa9\x37\x81\x5c\x77\xfa\x09\
\x52\x9f\x12\xa2\x45\x05\x74\x90\x10\x60\x5f\x4b\x10\x77\xfd\xbd\
\x1c\x23\xee\x2d\xc5\xed\x4f\x94\x61\xf9\x0b\xe5\xd8\xfc\x61\x1d\
\x36\x7f\x54\x87\xea\xc3\x5a\xe2\xcc\x8e\xcf\x1b\x51\xf6\xef\x7a\
\xac\x7f\xf7\x10\x96\xfd\x69\x0f\x8a\x7f\xb9\x1d\xa4\xe4\x0d\x5c\
\xff\xb7\x5d\x58\x5f\xdf\x02\x23\x2c\x48\x2c\xea\xfe\xaa\xcf\x4f\
\xe0\xba\xff\xdb\x81\xeb\x7e\xb3\x13\xd7\x3d\xf7\x29\xae\xfd\xdd\
\x2e\x5c\xf3\xfb\xdd\xb8\x7a\xd5\x1e\x5c\xfd\xc2\x5e\x5c\xfd\xe2\
\x67\xb8\x76\xcd\xe7\x58\xf0\x97\x4a\xdc\xf5\xb7\x03\xb8\xfb\xef\
\xd5\xb8\xa7\xb4\x06\xf7\xae\xab\xc5\x3d\xeb\x0f\xe2\x9e\x7f\x1e\
\xc2\x3d\x6f\x1c\xc1\xbd\x6f\x1e\xc1\x3d\x1b\x8e\xe2\xde\xb7\xea\
\x71\xef\x3b\xc7\x70\xdf\x3b\xc7\xb0\xe8\x9d\xe3\xf8\xb4\xd5\x0a\
\x68\x3d\xe2\x71\x3c\xa4\x22\xd4\x1a\x82\x1a\x56\xb1\x94\x8d\xc7\
\x7f\xee\x2b\xb5\x78\x64\xd3\x71\xfc\xe5\x53\x1f\xde\xab\xd5\x8a\
\x84\x00\xa0\xb6\x29\x8c\xf7\x6a\x03\x78\xaf\x36\x80\x5f\x6c\x6f\
\xc4\xb7\x37\xd4\xc3\xf3\xca\x41\xdc\xfc\x9f\x06\x78\x03\x11\x6c\
\xf6\xda\x9b\x10\x8a\x65\x3d\xf6\xfe\xeb\x4e\xa1\xe3\xac\x86\x51\
\xb4\x23\x4e\x8a\x69\xe1\x49\xec\xda\x0b\x91\x78\xd1\x89\x13\xe3\
\x99\xcd\xdd\xb3\xb6\x83\xe3\x6b\xd9\x71\x93\xd9\x79\x65\x36\xc7\
\x14\x31\xc6\xb0\x06\x9d\x9f\x0d\xe8\x54\xa8\x04\x46\xb1\xd0\x6c\
\x6e\x7b\x0d\x8c\x09\x2f\xbd\xdd\xfc\x0c\x5b\xba\xa9\x95\x27\x04\
\x7e\x35\x2b\xfd\x75\x0d\xf8\x88\x01\x9f\xb0\x75\x35\xa6\x09\x90\
\x98\xd4\x8f\xc5\xa2\x19\x58\x09\x03\xee\xdf\xab\x4f\x62\xe9\xb6\
\x2a\xdc\xf8\xfb\x0f\x70\xe3\xef\xb7\x63\xd2\xff\xbc\x83\xcc\xef\
\x97\x62\xfa\xca\x6d\xf8\xd2\x0b\x1f\xe3\x86\x57\x77\xe2\xa1\xed\
\x35\x58\x53\xdb\x68\x96\x7a\x16\x3d\xd8\x88\x77\x57\xfa\x43\xf8\
\xe7\xf1\x56\xad\x1d\x6b\xc5\x1b\xc7\x5a\xf1\xaf\x13\xad\x78\xfb\
\x44\x2b\xde\x3e\xd9\x86\x8f\x9b\xdb\x51\xd9\x16\xc2\x07\xde\x00\
\xde\x6a\x08\xe0\x5f\x0d\x01\x6c\x68\x08\xe2\xcd\xc6\x20\x36\x34\
\x05\xb1\xa1\xa9\x1d\x1b\x1a\x03\x78\xb3\xa9\x1d\x6f\x36\xb5\x6b\
\xeb\x4d\xed\xd8\xe0\x0d\x61\x83\xaf\x1d\x47\x23\xaa\xe5\xe3\xe8\
\x93\x85\x57\x34\x05\x2c\x03\x95\x54\xfa\xda\xf1\x54\x95\x0f\xdf\
\xfc\xf0\x38\x2e\x7f\xbb\x0e\x97\xbf\x53\x07\xf2\x7c\x25\x0a\xd6\
\x1e\xc4\xe5\x1b\xeb\x70\xf9\xa6\xa3\xf8\xc9\xe7\x5e\xfc\xb9\x3e\
\x60\x9e\xda\x3c\x18\xc5\xe1\x60\xd4\x59\x99\xb1\xfa\x38\x3a\x03\
\xca\x72\x18\x93\x62\xea\x49\x2d\x7c\x0e\x7a\x0e\x0c\xd5\x7a\x75\
\x17\x75\x5e\xfd\x9e\x05\x30\x26\xaa\x14\x1b\x61\xfb\x4b\x10\xdf\
\x94\x99\x07\xc3\x4c\xe8\x09\x2a\x85\x35\xf7\x5f\x67\xa6\xfa\xf6\
\xf2\x6e\x7e\x86\xd9\xdd\xd4\x4a\x12\x02\x7f\x72\x46\xea\x13\x51\
\x85\x80\x2a\x0c\xe8\x00\xa2\x04\x31\xf5\x9f\xf0\xb9\x00\x2c\x04\
\xa8\x98\xa6\x98\x12\x96\xb9\x51\x68\x29\xe7\x3c\x34\xf6\xb3\x05\
\xdb\xf1\x2b\xe2\x0c\x5f\xc5\x31\x9f\xdc\x14\x37\xc6\xa4\x25\x61\
\xa0\x5b\x0f\x53\x9a\x9d\x94\x3c\x51\x68\x1a\x8b\x62\xb3\x4f\x11\
\x40\xa7\xab\xfb\xe5\x2d\xed\x40\x38\x6a\x06\xb0\x38\x62\x11\xa5\
\xd6\xd9\x85\x4c\xc3\x98\xc1\x34\x87\xc1\xc6\xc6\x80\xe9\xbd\x94\
\xf8\xf8\xef\x0c\xf8\x45\xf2\xc2\x6c\x07\x76\xb7\x14\x03\xec\xed\
\x4f\x49\xbd\x48\x71\xc1\x5f\xf1\xd3\x2b\x6a\xfb\xe5\x67\xd5\xeb\
\x12\x5f\x67\x02\xaa\xee\xf8\xe3\x24\x3f\x15\xe6\x94\xd3\xa5\xbe\
\xee\x29\xe7\xe7\x9c\xd0\x22\x08\x06\xc8\x4d\xc0\x13\xd5\x7c\x27\
\x91\x48\xb8\x51\x6c\x09\x41\x5e\x8a\x0b\x63\xd3\xdc\x18\xe0\x56\
\x38\xd0\x5b\x4f\xd6\x98\x16\x89\x65\xca\xc6\xfb\x28\xfa\x73\xe9\
\x76\x7f\x73\x54\x45\xb0\x85\x4b\xf6\xb1\x05\xbb\xb8\x8d\xda\xcc\
\x0e\x44\xcd\x4c\xa0\x3d\x82\x9d\xcd\xa1\xb8\x91\x0c\xf6\x2c\xeb\
\x56\x4c\x49\xef\x09\xa0\x4a\x3a\xdb\xc1\x0f\x00\xa1\xac\x8c\xd7\
\x75\xcf\xbe\x15\xf8\x30\xab\xfb\x31\xa0\x13\xeb\x24\x93\x4c\x33\
\x88\xea\xe7\x81\x33\x1d\x60\x1e\xd1\x56\x3c\x0f\x44\xe1\x06\xaf\
\xe4\x18\x83\x42\x30\x30\xd9\x85\x71\x69\x6e\xe4\xba\x95\x58\xe5\
\x21\x0f\xfa\x58\x6e\x82\x9e\x1a\x6b\xa9\x56\x84\x29\x83\xd1\x65\
\xe1\x31\xc6\xf1\x9f\x37\xf8\xb5\x31\xf9\xc4\x49\x48\x20\x82\xdc\
\x81\x01\xf0\xb3\x14\x99\xb6\x03\x9f\x34\x05\x6c\xff\x10\x81\x1d\
\x94\xca\x2e\x2b\xa9\xc7\xc0\x7f\xf0\x17\xd7\x7c\x47\x4d\x76\x53\
\x3b\xe0\x13\x18\xa0\x56\x78\x46\xc0\x80\x4b\x62\x5a\x00\x5b\x87\
\x96\x33\x10\x25\x4a\x2c\x4a\xa0\x9a\x86\xa4\x23\xd6\x1a\x7b\xd1\
\xf6\x67\xd7\x1a\x98\xe2\xc2\xb8\xb4\x24\x0e\xf4\xc4\x54\x83\x60\
\xb8\x07\x74\xc0\x73\x0c\x41\x00\x3d\x75\xf8\x18\xbc\xa7\xbf\xd2\
\x1f\x06\x6d\x8f\xc0\x24\xb5\x79\x06\x20\x4a\x79\xbb\x7d\xb6\xcc\
\x82\xfd\x46\x54\x7c\xea\x6b\x37\x69\x41\x02\xf0\x7d\x2b\xa7\x66\
\xac\x96\x5d\x56\x52\x8f\x81\x1f\x00\xd2\x87\xe4\xbc\x1b\x8d\xc5\
\xfb\x0d\xe0\xeb\xcb\x0a\x27\xfd\x63\x80\x87\xae\xf6\x23\x96\x27\
\x10\x15\xd4\x5a\x95\x08\xaa\x39\x3f\x23\x8d\xdd\x3c\x77\x0a\x41\
\x5e\xb2\x0b\xe7\xa5\xb9\xd1\xdf\xa5\x30\x90\x13\xee\x14\xdd\xd9\
\x08\x93\xbd\x2f\x82\x1e\x36\xa0\x57\x88\x51\xc8\x24\x7e\x98\xa0\
\x4a\xd1\xc4\x24\xb3\x69\xd2\xd0\x78\x0c\xc0\x6e\x7e\x40\xea\xc0\
\x10\xd8\x39\xff\x66\xf7\xb0\xcb\xee\x53\x7a\x69\x0e\x77\x49\x67\
\x39\xf8\x33\xb3\x33\x16\x10\x0e\x1c\x66\xe0\x13\x01\xf8\x10\x24\
\xb6\x21\x75\x55\x02\x50\x26\xf5\xb5\x5a\x01\x18\x65\xaa\xb6\x52\
\x9f\x6d\x57\x14\xf4\x4f\x52\x70\x5e\xaa\x1b\x79\x6e\xa6\xb8\x13\
\x23\xb4\x68\x48\x71\x98\xec\xfd\x58\x4e\x02\xdf\x18\x5b\x72\x11\
\x2d\x7c\x96\x88\xb3\xaf\xa2\xd1\x6f\x0c\xcb\xc5\x83\x9b\xc6\x61\
\x00\x80\xa0\xfe\xdb\x1c\x2f\xe0\x1f\x61\x4d\xfa\xf3\x3c\x8f\x23\
\x09\x7e\x49\x3d\x0f\xfe\xbd\x8f\xcd\xaa\xcd\x18\x94\x53\x45\x39\
\xc7\x1f\x55\x88\x03\xf0\x45\x75\x5f\x6b\x51\x26\x6d\xa3\x0c\x6c\
\x51\xde\xd6\x07\x8c\x7a\x75\xde\xb6\x57\x14\xe4\xa7\xb8\x70\x6e\
\xaa\x1b\x03\x62\x43\x59\x89\x8e\x46\xb3\x14\xd7\x41\x6f\x68\x02\
\x66\x7b\x9f\x9f\x64\x47\xb4\xf7\xf9\x0f\xa2\x33\x84\xfa\x40\x18\
\xa1\xd6\x76\x0d\xfc\xaa\xaa\x8d\xc1\x45\x29\xfb\x55\xb5\xa6\x52\
\xab\x54\x77\x9c\x60\x94\x67\x1e\x3c\x03\xd0\xd6\x3f\x6a\x0c\x18\
\x7f\x0a\x7b\x58\x05\x64\xcd\xca\xa9\x19\x5e\xd9\x5d\x25\xf5\x38\
\xf8\x01\x20\x77\x40\x56\xb1\x21\xfd\x99\x8d\xcf\x26\x8f\xb0\x00\
\xdf\xa4\xee\x1b\x4e\x3e\x5d\xcd\x57\x4d\xce\x42\xc0\x62\xd8\x2b\
\x9a\x4d\x5f\x98\xea\x46\x96\x5b\xd1\x6e\x03\xb3\x1d\xaf\x4b\x7f\
\x5e\xe5\x27\xec\xb9\x08\xe1\x9d\x7e\xfa\x3c\xc0\x06\xa0\x9d\xec\
\x7d\xd1\xd9\x17\x52\x29\x8e\x1e\x6f\xd6\x40\x4f\x99\xe4\x57\x55\
\x63\x5d\x15\x98\x80\x45\xd2\xdb\x7c\x48\x51\x33\x30\x99\x10\x14\
\x08\xab\xd8\xe9\x33\x22\x0a\x0c\xff\x4b\x64\x57\x95\xd4\x6b\xe0\
\xdf\xfd\xe8\xcc\x6d\x99\xc3\xf2\xb6\x18\xce\x3d\x07\x89\x0f\x02\
\xa2\x68\xa0\xd3\x33\x03\x4d\x4e\x3e\xa2\x17\x0a\xd9\x48\x7d\x97\
\x82\xdc\x14\x17\x46\xa5\xba\xd1\xcf\xa5\x18\xe0\xe5\xb4\x01\x95\
\x57\xef\x4d\xd2\xdf\x46\xe5\x87\x59\xe5\xd7\xd1\xc4\xc7\xf7\xf5\
\x0c\x66\xd1\xde\x77\x01\x38\xe8\xf5\xb3\x61\xb9\x18\xc8\x55\x6e\
\x44\x5e\x1d\xf4\xba\x36\xa0\x33\x05\xd5\xe1\x78\x91\x01\x88\xce\
\x08\x6e\xfb\xf6\x06\x7f\x8c\x6d\x01\x58\xf9\xab\x0b\x33\x6a\x64\
\x57\x95\xd4\x6b\xe0\x07\x80\x7c\x4f\xc6\x82\xb4\x64\x37\x8d\x07\
\x7c\xd1\x6b\xaf\x12\x9d\x11\x68\x00\x8c\x58\xd4\x7c\x6d\xa4\x9a\
\x9c\x64\x17\x46\x26\xbb\xe0\xe1\x1d\x79\x0a\x89\x49\x7d\x05\xbc\
\xe4\xe6\xbc\xfb\xba\x6b\x91\x3d\x83\x49\x3b\x20\xa2\xca\x6f\xc4\
\xf7\x75\xa6\x21\x7e\x0c\xdd\xde\x6f\x08\x46\xd0\xda\xd0\x66\x05\
\xbc\x4a\x0d\xfb\x3f\x66\x02\x70\x5a\x00\xaf\x19\x50\x36\x5d\x97\
\xca\x1d\x6b\x49\xf6\xe1\xb4\x00\x7d\x7c\xf3\xb0\x8a\x4f\xb5\xc1\
\x3e\x7d\x90\x52\x5f\x52\x5f\x00\xff\x8e\x87\x2f\xab\xcd\x1c\x92\
\xbb\x2e\x1e\xf0\x09\x57\x05\xa8\xdb\xf5\x11\xc5\xac\xee\xc7\x40\
\xa7\x28\xc8\x4e\x76\x61\x78\x8a\x1b\xd9\x2e\x23\x96\x6f\x80\x92\
\x98\xb6\xe9\xaa\xbe\x49\x19\x50\x38\xfb\x1f\x44\xf0\xf2\x9b\x55\
\x7e\xbb\x10\x1f\x05\xb1\xa8\xfc\x94\x00\x87\xeb\xbd\x82\xaa\xcf\
\x01\x9d\xf2\x0c\x80\x63\x0e\x54\x3c\x4e\x35\xb4\x03\xaa\x5a\x1d\
\x7f\x54\x74\x0c\x1a\xcf\xf0\xfe\x49\x3f\x00\x2c\xf9\xdf\x0b\x33\
\xbd\xb2\x9b\x4a\xea\x75\xf0\x03\x40\xf5\x93\x57\xde\x90\x9c\x95\
\x1e\xd0\x40\xaf\x98\x54\x7d\xc2\x4d\x26\x29\xda\xf9\x51\x85\x30\
\x6f\x3f\x01\x71\x69\xa0\x1f\x96\xe2\x36\x4b\x7a\xce\x76\xb7\x93\
\xfa\x31\x47\x1f\x8c\xe4\x20\x23\xc3\x96\xbb\x9f\x2e\xd9\x15\x62\
\xf6\xf2\x33\x2d\xa0\x23\x95\xff\x70\x43\x2b\x68\x30\x6c\x63\xe3\
\x0b\x00\xe6\x27\xe6\xb0\x63\x10\xbc\xb4\x37\x69\x07\x76\xa6\x80\
\xc0\x14\x5c\x24\xf0\xeb\x8b\x32\x57\xc8\x2e\x2a\xa9\xcf\x80\x1f\
\x00\x86\x0e\xc9\x5d\x48\x38\xef\xbc\x6e\xe3\xeb\xc0\x8f\xc4\x24\
\xbf\x66\xe7\xeb\xc0\x57\x14\x05\xb9\xc9\x2e\x0c\x4d\x76\x21\x3b\
\x06\x7a\xc3\x2b\x4f\x4d\xb1\xfd\x8e\xa5\xbe\x7e\x2e\x14\x03\xcc\
\x44\x07\x79\xcc\xae\x37\xbc\xfc\xa2\xf4\x17\x3f\x84\x42\x08\x5a\
\x42\x51\x34\x9d\x68\x36\x83\xd5\xa2\xe2\x8b\xa0\x17\xc1\x0e\xc1\
\xe6\xe7\x7f\x05\x6d\x40\xb5\x61\x02\x00\xe6\x66\xa7\x2c\x94\xdd\
\x53\x52\x9f\x03\x7f\xf9\xc3\x97\xbd\x92\x7d\x4e\xff\x2d\x7c\xf6\
\x9e\x2e\x65\x45\xe0\x53\x02\xb8\x98\x4d\x3f\x24\xc5\x85\x4c\x97\
\x12\x93\xee\x94\x1b\x95\x56\x0f\x1d\xea\x1a\x80\xe8\xe1\x87\xa2\
\xb0\x61\xc3\x88\x39\x6a\x00\x98\x35\x01\xdd\xa1\x48\x60\x5a\x8e\
\x25\xf6\x98\xb2\x01\xad\x2a\xff\xa1\x23\x0d\x9a\x93\x4f\xa5\x66\
\x80\x52\x07\x67\x1e\x6f\xeb\x53\x1b\xa7\x5f\xd4\x46\xe2\xab\x7c\
\xd8\x90\x0b\x15\x32\xe0\x8f\xf2\xa4\x6c\x79\xfb\x9a\x81\xaf\xc8\
\xee\x29\xa9\xcf\x81\x1f\x00\x6a\x9f\x98\x33\x3b\x92\x93\xe9\x05\
\x51\x62\x52\x34\xc2\xec\xef\x88\xa2\x01\x5f\x71\x11\xf4\x4b\x71\
\x23\x3f\xc5\x85\x0c\x06\xfa\x58\x96\xa0\x00\x76\x5e\xdd\x77\xf2\
\xf0\xab\x82\xad\x2f\x4a\x7d\xdd\xff\xa0\x70\x2a\xbe\xe8\xe8\x03\
\x17\x0e\xd4\xb5\x0d\x5d\xe5\x3f\xe6\xf3\x23\xd2\x16\xe4\xc2\x6f\
\x76\x92\x9f\x03\xac\xaa\x3a\x3b\xfc\x4c\xe1\x41\x01\xf8\x3a\xd0\
\x79\x06\xc0\x34\x06\x25\xcd\xed\xad\xba\x7e\xc8\x6c\xd9\x35\x25\
\xf5\x59\xf0\x03\xc0\xd8\x21\xb9\x45\xd1\x24\x17\x55\x89\xe6\xd4\
\x03\x73\xee\xb9\x14\x05\xd9\x29\x2e\x0c\x48\x71\x23\xc3\x65\x4e\
\xfb\xa5\x31\x50\x82\x93\xf2\x24\x26\x9d\x01\x6d\x78\x6a\x85\x73\
\x2a\x12\x21\x75\x58\xb4\xf5\x79\xa9\x6f\x9b\xf1\x27\x26\x04\x09\
\x85\x3c\x0a\x80\x88\x4a\x71\xe2\xf0\x49\xc1\x63\x6f\x27\xa9\x45\
\x06\x20\x66\xfc\x89\xea\xbf\x6a\xe3\x38\x14\x19\x00\x6b\x84\xd2\
\x2f\x79\x52\x8a\x64\xb7\x94\xd4\xe7\xc1\xff\x9f\x1f\x5d\x54\x5b\
\x58\x30\xf0\x47\x7a\xda\x2e\xdc\x0a\xb2\x92\x5d\xc8\x4b\x71\x21\
\xcd\xa5\x98\xa4\x31\x0f\x7c\x3e\x14\x17\x4b\x1a\x82\x91\xa4\xc3\
\xab\xfb\xfc\xf0\x61\xb1\xe4\x20\x4e\xea\x13\xbe\x70\x88\x6b\x0a\
\xbf\xae\x33\x1a\x18\x05\x48\x0a\x31\x57\xec\x1d\x3a\xdc\x00\x44\
\xa2\x42\x58\x4f\x75\x50\xd7\x05\x53\xc0\x14\xeb\x77\xf0\x05\x88\
\xcc\xc3\x86\x99\x4c\x1b\x90\xf6\xa3\x37\xae\xc9\xaf\x3d\x83\xfb\
\x93\xa4\xb3\x05\xfc\x00\xf0\xc9\x03\x97\xae\x28\x18\x96\xf7\x6c\
\x66\xb2\x0b\xfd\x93\xdd\x48\x75\x29\x5c\xb9\xaf\x33\xf0\xcd\xdb\
\x38\xef\xbe\xa0\xee\x9b\x8b\x7d\x10\xcb\xea\xd3\x8b\x87\x78\xa6\
\xa0\x92\x0e\xa4\xbe\x4d\x3a\x2f\x21\x40\x43\x4b\x00\x81\xc6\x16\
\xfb\x44\x1e\x93\x33\x0f\x82\x07\x9f\x67\x00\xd1\xf8\x4e\x42\xea\
\xe4\x30\xd4\x7e\x2f\x18\x90\xfe\xfc\xfb\x5f\x3d\x67\x85\xec\x92\
\x92\xce\x18\xf0\x03\xc0\xee\x47\x2e\xbb\x37\xcd\xad\xac\xa4\x5c\
\xe6\x1f\xed\x00\xf8\x44\x11\x81\x6f\x5f\x27\x80\xd8\xd0\x61\x86\
\x93\x8f\x1f\x42\x4c\xf7\xf0\x23\xa6\x3d\x28\x16\xa9\xaf\x87\xf7\
\x78\xdb\x9f\x10\x02\x17\x80\x70\x44\xc5\xb1\x9a\xe3\x82\xda\xae\
\x5a\xc3\x74\x54\x48\xe3\x55\x85\xd0\x5c\x4c\x8d\x8f\xda\xc4\xfb\
\x61\x63\x3a\x18\x0c\x60\x68\x76\x6a\xf9\xce\xf9\xc3\xbf\x23\xbb\
\xa3\xa4\x33\x0e\xfc\x00\x50\xf3\xf4\x97\x4b\xa0\x90\x35\x44\x04\
\xb9\x03\xf0\xed\x1c\x7c\x0a\x5f\x0c\xa4\x28\x5c\x7a\x30\xb1\x75\
\xf2\xe9\xf6\x01\x11\xe2\xfa\x26\x0d\xc0\x14\xfb\x37\x46\x1b\x06\
\xb4\x63\x8e\x1f\xf7\x02\xa1\x88\xd5\x23\x6f\x17\xd7\x57\xed\x24\
\xbe\x68\xcf\xc3\x9a\xea\x4b\x55\x1b\x8d\x41\xdb\xde\x3f\x2b\xa5\
\xea\xd0\xb7\x47\x4d\x96\x5d\x51\xd2\x19\x0b\x7e\x00\x38\xf8\xf3\
\x6b\x8a\xa1\x90\x35\x2a\x27\x79\xa9\xc2\xc0\xae\x30\x89\xac\x98\
\x6d\x73\xb3\x83\x0f\x6c\x66\x20\x63\xb4\x60\x22\x8c\x16\x04\x9b\
\x70\x5f\xec\x7e\x5c\x8c\x1f\x82\xad\x6f\x27\xf5\x5b\xdb\x82\xf0\
\x1d\x69\x30\x67\xe3\x89\x99\x78\x3c\xb8\xa9\xe8\x9d\x8f\xd3\x54\
\x07\x2d\x82\xdb\x9f\x97\x95\x56\x75\x72\xc1\xb9\x85\xb2\x1b\x4a\
\x3a\xe3\xc1\x0f\x00\xb5\xcb\xaf\x2e\x86\xa2\xac\x21\x8a\x62\xf2\
\xda\x13\x02\xa8\xa2\xc4\x27\x66\xe0\x53\x62\x67\xe7\x6b\xcd\x08\
\xf7\x39\x3b\xf9\x88\x62\x48\x7a\x70\xa6\x01\xff\xab\x13\x25\xc0\
\xe1\x03\xc7\x6c\xa4\x71\x1c\xb5\x5f\xb5\xf3\x05\x08\xd2\xdf\x16\
\xf8\xaa\x50\x04\x44\x31\xc8\x93\x56\x75\x7c\xe1\x18\x09\x7c\x49\
\x5f\x1c\xf0\x03\xc0\xc1\xa5\x73\x8b\x09\xc1\x1a\x1e\xa8\xa2\x73\
\x4f\x94\xf8\x94\x58\x52\xf7\x4c\x00\x86\x83\xba\x2f\x3a\xf9\x08\
\x67\x3a\xf0\x71\x7d\xde\xc3\xef\x02\x70\xbc\xae\x11\xf0\x07\xad\
\xd2\x5a\xb5\xf1\xc6\x5b\x34\x00\x6a\xef\xd8\xb3\xe4\xfa\xc3\xa6\
\xb8\x47\xc5\x10\x4f\x5a\xd5\xd1\x3b\xc6\x49\xe0\x4b\xfa\xe2\x81\
\x1f\x00\xaa\x9f\xbc\xb2\x58\x21\x64\xa1\x49\xe5\x4f\x14\xf8\x8a\
\xa1\xce\xeb\x19\x39\xc4\xc1\xf1\x67\xeb\xe4\x63\x8c\x46\x15\x4c\
\x04\x9d\xda\xdb\xc3\x68\x8c\xc5\xf4\xa9\x83\x3d\x2e\x86\xfa\x10\
\xff\xf8\x78\x61\x3e\x18\x36\xfe\x84\x41\xd9\xcf\x1f\xb9\x73\xbc\
\x04\xbe\xa4\x2f\x2e\xf8\x01\xe0\xc0\x13\x73\x56\x13\x42\xe6\x10\
\x42\x7c\x44\x51\x62\x5e\xfd\x44\x80\x0f\x0e\xf8\xe0\x1c\x7f\x10\
\xa3\x02\xcc\xa7\x60\xe7\xe4\x23\x31\x1f\x83\x21\xf5\x15\x02\xd4\
\x54\x1c\x01\x22\x11\xb3\x07\xdf\x12\x77\x77\xb0\xf9\xa9\x6a\xaf\
\xfa\xf3\x49\x3b\x76\xe6\x82\x42\xe8\x85\x43\x73\x17\xed\x2a\x1e\
\x2b\xbd\xfa\x92\xbe\xf8\xe0\x07\x80\xca\x25\xb3\xca\x40\x48\x11\
\x21\x64\xa7\x42\xcc\x5e\x7d\xe2\x08\x7c\x23\x5d\x17\xbc\x43\x4f\
\xdf\xc6\x00\x4d\xf5\x72\x5e\x98\x2b\x0a\x45\x27\x1f\x9f\xc3\xdf\
\x54\xdf\x04\xb5\xd9\xcf\x79\xe5\xed\x0a\x71\x44\x13\x80\x0a\x1a\
\x80\x2a\xa4\xf8\xc2\xde\x2f\xc0\xae\x95\x95\x96\x14\x98\x36\x34\
\x67\xe6\xc7\x0b\xc6\xae\x90\x5d\x4e\xd2\x59\x03\x7e\x00\xa8\x78\
\x74\x46\xcd\xfe\xff\x77\x79\x91\x42\xc8\x4a\x45\x50\xe1\xed\x80\
\x6f\x80\xde\x90\xe0\xb1\xb4\x3f\xbe\x34\x18\x82\xc3\x8f\x53\xf7\
\x79\x27\x9f\x9e\xc3\x1f\x8e\xaa\x38\x56\x5d\x6f\xa3\xba\xb3\xd9\
\x76\x2d\x36\xbb\xae\xb2\x27\x60\xe7\xdb\x69\x0e\x2a\xc5\xe8\xfe\
\x99\x1f\xfa\x1e\xb8\x38\xfd\xfd\xdb\xc6\x6e\x93\xdd\x4d\xd2\x59\
\x07\x7e\x9d\xf6\x3e\x72\x59\x09\x21\x64\x0e\x08\x6a\xa9\xa2\x40\
\x6b\x4e\xc0\xd7\x26\xeb\xe0\x81\xaf\x0f\x09\x46\xf4\xb1\x04\xb8\
\xb8\xbf\x9d\xba\x2f\x3a\xf9\x0e\x7e\x76\x88\x9b\x6a\xcb\x2e\xa4\
\x07\x7b\x90\x9b\x42\x75\x6a\x07\xfb\xb4\xeb\xa6\x24\xbb\x22\x53\
\x86\xf7\x5f\x54\x79\xef\xe4\x4b\x65\x37\x93\x74\xd6\x83\x1f\x00\
\xf6\x3c\x34\xad\x6c\xcf\x83\xd3\x0a\x54\x42\x1e\x07\x81\x0f\xb1\
\x01\x3e\x09\x74\x86\xa0\xcd\x0f\x68\x05\x3e\x9f\xc5\xa7\x03\x3f\
\x56\x9b\x2f\x24\x16\x89\xea\xbe\xcf\xdb\x8a\x50\x63\xb3\x90\x8b\
\x6f\xe3\xe1\xb7\xf8\x01\xec\xe2\xfc\xd4\x5c\xf9\xc7\x33\x02\x97\
\x42\xc7\x0c\xcc\xde\x12\x7c\x6c\x46\xd2\x7f\xee\x9c\xb0\x42\x76\
\x31\x49\x12\xfc\x02\x7d\xf6\x93\x8b\x97\x80\x90\x22\x10\xb2\x86\
\x77\xee\xa9\x6c\x5c\x3f\x3b\xe0\x13\xce\xde\x57\x63\xe3\xfe\xf3\
\xc0\x57\x84\x48\x80\xc6\x00\x22\x91\x28\xea\x3e\x3f\x64\x00\x96\
\x81\x96\xd8\x34\x45\xd5\x1a\x61\xcd\x62\xff\x8b\x6a\x3e\xc7\x0c\
\x46\xf7\xcf\xac\xba\x64\x44\xde\xcc\x7d\x8b\xa6\xce\x96\x5d\x4b\
\x92\x04\x7f\x3c\x06\xf0\xe3\x8b\x6a\x3e\xfb\xd1\x85\xc5\x50\xc8\
\xc8\x18\x13\xe8\x00\xf8\x54\xa8\x1b\x10\xed\x7c\xd1\xbb\xef\x02\
\xd0\x54\x75\x14\x08\x86\xa0\xa8\x2a\x5c\x2a\xd5\x1a\xa5\x70\xab\
\xe6\x96\xc4\xf6\xbb\x55\x0a\x37\xd5\x9a\x4b\xa5\x50\x38\xc6\x40\
\x6c\x06\xf4\x18\x95\x9b\x59\x75\x49\xe1\xa0\x19\x95\x0f\x4e\x2b\
\xdc\xfe\xbd\x89\xd2\xb6\x97\x24\xc1\x9f\x28\xed\xbb\x6f\x4a\xcd\
\xbe\x7b\x27\x17\xab\x84\x8c\x24\x84\xac\x89\x07\x7c\x22\x00\xdf\
\x94\xde\xab\x70\xde\x7e\x06\xfc\xb6\x16\x3f\x1a\xea\x4e\xc2\xa5\
\x52\x10\x0a\x28\x0c\xd0\xda\xba\x06\xec\x24\x55\x85\x8b\x52\x28\
\x14\xec\xd7\x00\xbd\x9b\xed\x4f\x8a\x1d\x0f\x28\xcc\xeb\x3f\xbc\
\x7f\xbf\xaa\x4b\x0a\x07\xcd\xa8\x7a\xe4\xb2\xc2\xed\x3f\x98\x24\
\x41\x2f\x49\x82\xbf\xb3\x54\x71\xf7\xa4\x9a\xfd\x77\x5d\x50\x0c\
\x42\x72\x54\x42\x16\x81\x90\x5a\x5b\xe0\x2b\x06\xf0\x89\x62\xae\
\x04\x24\x9c\x9d\x4f\x08\x70\x70\x6f\x2d\x92\x74\x49\xcf\x40\xed\
\xe6\x40\xed\x56\x55\x10\x4a\xe1\x52\x55\xa3\x51\xad\xb9\xb9\xfd\
\xda\xf1\x14\x48\x76\x47\x26\x9c\x93\x5b\x3a\xfd\xdc\xc1\x05\xb5\
\x8f\xce\x28\xdc\x7e\xf7\x64\x09\x7a\x49\x12\xfc\x5d\x45\xfb\xbf\
\x3f\xd1\x5b\xf9\xbd\x09\x2b\x2a\xef\x3c\xbf\x00\x0a\x99\x4c\x09\
\x59\x49\x38\x46\x60\x0f\x7c\x23\x8b\x4f\xf7\xfc\x37\x56\x1c\x41\
\x72\x6b\x30\x26\xc1\xdd\xba\x84\x67\x20\xd7\x99\x80\x0e\xec\xd8\
\xba\xca\xd6\x59\x53\x92\xdd\x91\xbc\xfc\xec\xf2\x89\xa3\xf3\xbf\
\x11\xf9\xe5\x35\x49\x3b\x1f\x9a\x7e\xc3\x7b\xf7\x4e\x91\x83\x6e\
\x48\x92\xe0\xef\x4e\xaa\xb8\xe3\xbc\xf2\xca\xdb\xc7\x95\x54\x2c\
\x1c\x5b\x00\x85\x8c\x24\x84\x2c\x52\x09\xd6\x11\x85\xd4\x8a\xaa\
\x3f\x9f\xc5\x17\xf5\xb7\xa3\xe1\xf0\x49\x4d\xda\xf3\x6a\xbc\x4a\
\xe1\x62\xea\x3f\xef\xc8\xa3\x6c\x9d\xa8\x14\x6a\x4a\x52\x84\xe4\
\x64\xd6\x0f\x1c\x3a\xa0\x74\xcc\x98\x21\x33\xda\x56\x5e\x9b\x54\
\xb3\x64\xd6\xe4\x8f\xee\xbf\x44\x0e\xaa\x29\x49\x82\xbf\x37\x68\
\xff\x6d\xe7\xd6\xec\xbb\xb5\x70\x45\xc5\x2d\x85\xf3\x2a\xbe\x35\
\xaa\x80\x10\x32\x52\x25\x64\x0e\x08\x79\x5c\x21\x58\x43\x08\xd9\
\xe2\x02\x6a\x15\x00\x95\xbb\xab\x91\x1c\x8e\x68\xde\x7b\x4a\xa1\
\x50\x95\xd9\xea\x86\x07\x9f\xa4\x24\x45\x22\x59\x69\xde\xa4\x01\
\x59\x55\xd9\x83\x73\xb6\x14\x8c\x1a\xb4\x68\xf4\x98\x21\x33\x9a\
\x9f\xfd\x4a\x52\xe3\xf2\xab\x06\xef\x7f\x74\xc6\x0d\x1f\xdd\x7f\
\x89\x54\xeb\x25\x49\xf0\xf7\x39\x66\xf0\x8d\x82\x9a\xca\x9b\x47\
\x94\x55\x7e\x7d\xf8\x92\xfd\x37\x0d\x2f\xde\xf7\xb5\x61\xb3\xf7\
\x7e\x6d\x58\xc1\x9e\xf9\xc3\x88\xba\xfc\x2a\xd2\xfa\xdc\x3c\x52\
\x50\x38\x68\xc6\xf0\x82\xfc\x45\x43\x46\x0e\x5a\x34\xe2\xdc\xc1\
\x33\xbc\xab\x6e\x24\xde\x55\x37\x12\xef\xf3\xf3\x49\xd3\xaf\xaf\
\x4b\x6a\xfd\xe5\x97\x73\x4e\x2c\x9b\x5b\x58\xfb\xc4\x9c\xd9\xe5\
\x0f\x4f\x5f\xf1\xef\x07\x2e\x95\x60\x97\x24\xc1\xff\x45\xa0\x5d\
\x0f\x5c\xba\x6d\xf7\xc3\xd3\x57\xec\x7d\x78\xfa\x8a\x9d\x8b\xa7\
\x49\x60\x4b\x92\x74\xb6\x80\x5f\x92\x24\x49\x12\xfc\x92\x24\x49\
\x92\xe0\x97\x24\x49\x92\x04\xbf\x24\x49\x12\xfc\x92\x24\x49\x92\
\xe0\x97\x24\x49\x92\x04\xbf\x24\x49\x92\x24\xf8\x25\x49\x92\xf4\
\x05\xa2\xff\x0f\x62\x20\x19\xf4\x2f\xa5\x49\x2e\x00\x00\x00\x25\
\x74\x45\x58\x74\x64\x61\x74\x65\x3a\x63\x72\x65\x61\x74\x65\x00\
\x32\x30\x31\x37\x2d\x30\x33\x2d\x32\x36\x54\x32\x31\x3a\x30\x35\
\x3a\x30\x35\x2b\x31\x31\x3a\x30\x30\x36\x6e\xa9\x1b\x00\x00\x00\
\x25\x74\x45\x58\x74\x64\x61\x74\x65\x3a\x6d\x6f\x64\x69\x66\x79\
\x00\x32\x30\x31\x37\x2d\x30\x33\x2d\x32\x36\x54\x32\x31\x3a\x30\
\x35\x3a\x30\x34\x2b\x31\x31\x3a\x30\x30\xe1\x44\x1a\x13\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0b\x83\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x28\x00\x00\x00\x28\x08\x06\x00\x00\x00\x8c\xfe\xb8\x6d\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x2e\x23\x00\x00\
\x2e\x23\x01\x78\xa5\x3f\x76\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xe2\x0b\x1e\x00\x33\x3a\x62\x29\xbd\x16\x00\x00\x0b\x10\x49\x44\
\x41\x54\x58\xc3\xed\x98\x7b\x8c\x5d\x57\x75\x87\xbf\xb5\xf6\x3e\
\xf7\x31\xf7\xce\x9d\x19\xcf\xd8\xf1\xcc\x78\xfc\x1a\x3f\xb0\x9d\
\x04\x8c\x13\x12\x12\x4a\x48\x9c\x12\x4a\x13\x09\xe2\xd2\x34\xb4\
\x2a\x12\x69\xa4\x84\x96\x12\x90\x50\xe9\x23\x40\x48\x69\x8b\x48\
\x1f\xa0\x28\x4e\x9d\xb6\x40\xdb\x50\x52\xb5\x51\x4b\x65\xaa\x10\
\x12\x19\x5a\xa7\x20\x3b\x31\xc5\x71\x62\x27\xf1\x6b\x5e\x76\x66\
\x3c\xef\x3b\x73\xef\x3d\xf7\xec\xbd\xfb\xc7\xb9\xf3\xb0\x5d\xbb\
\x49\x09\xfc\xc5\x91\x96\xf6\x3e\xf7\xea\x9c\xf3\x69\xfd\xd6\xda\
\x7b\xad\x0d\x3f\xbb\x7e\xbc\x4b\xfe\xbf\x0f\x16\xff\x60\xbf\xaa\
\x6a\x6b\x08\xac\x51\xa3\x9d\x08\x5d\xd9\xac\x6d\x53\xa3\x38\x2f\
\xe3\x91\xd5\x21\x1b\xe9\xe9\x75\xab\x8b\xc7\x9a\x0b\x76\xe2\x5f\
\xb7\x2f\xf5\x3f\x15\xc0\xc2\x27\xf7\x96\x02\x7a\x75\x10\xb9\x19\
\xd5\x6b\x44\x75\xad\xaa\x36\x89\x91\xac\xaa\xa2\x46\x51\x63\x30\
\x46\x6b\x51\x46\x67\xd7\xaf\x69\x3e\xda\xda\x1c\x3d\x63\x44\xfe\
\xcd\x28\x3f\x50\x91\xe9\x6f\xbc\x73\xc9\x1b\x0f\x58\xbc\x67\x4f\
\x21\xa9\x87\x1b\x83\xca\xdd\xa8\xb9\x16\x63\x8a\x62\x14\x31\x06\
\x55\x45\x8c\xa2\x46\x16\x03\xa2\x46\xe9\xee\xcc\xd3\xbd\x3c\x4f\
\xa4\x32\x6d\x55\xf6\x1a\x91\x9d\x93\x93\xb5\xa7\xbe\x75\x4b\xe7\
\xcc\x1b\x02\x58\xbc\xeb\x09\xf1\x9e\x8d\x5e\xe4\x5e\x1f\xe4\x16\
\x31\xa6\x19\xa3\x60\x2c\x62\x53\x40\x31\x06\x35\x8a\xcc\x01\xea\
\x02\x60\xb1\x18\xb1\x7e\x6d\x91\x5c\xc6\x10\xa9\xa0\x30\xfd\xfc\
\xc1\xe1\xdd\xc3\xa7\xa7\xef\x6f\xca\xe9\x8b\x7d\xf7\x5e\x11\x2e\
\xf6\x7d\x73\xb1\x3f\x73\xb7\xff\xb3\x7a\xe7\x6f\xf2\xce\xef\x0a\
\x9e\xed\x40\x56\x00\x44\x10\x99\x1b\x05\x54\xd3\xb1\x71\xbf\xd8\
\x42\x80\x62\x21\x22\x9b\xd1\xf4\x99\x40\xb6\xaf\x6f\x72\xcb\xcc\
\x54\xf5\xba\xfa\x6c\xed\x98\xef\x7d\xdf\x31\x77\xf0\x1b\x17\x84\
\xd4\x0b\xfd\x91\x79\xff\x3f\xa8\xaf\x27\x37\xfb\x38\xd9\x15\x12\
\x77\x19\xde\x09\xde\x83\xf7\xe0\x16\x8f\x01\xbc\x27\x84\x00\x8b\
\x6c\x6e\xea\x5c\x60\x62\x2a\xc6\x85\x80\xf7\x81\x00\xd8\xc8\x48\
\x80\x4b\xe3\x4a\xbc\x8b\x10\x6e\xce\xdc\xf6\xb8\xbe\x2e\xc0\x25\
\x77\xfc\x8b\xa8\x84\x9b\x70\xfe\x41\x9c\xeb\x39\x0b\xea\x22\x16\
\x42\x68\x18\x04\x1a\x73\x02\xd3\xe5\x84\x38\xf6\xb8\x00\x01\x88\
\x32\x06\x54\xf1\x3e\xf4\x00\x0f\xaa\x91\xf7\xac\xf9\xec\x5e\x79\
\xcd\x80\xd5\xb1\xf2\xe6\x50\x4f\xbe\x88\xf7\x3d\x67\x41\x5c\x00\
\x52\xfc\x9c\x27\xcf\x86\x83\xd4\x8b\x71\xec\x29\xcf\x24\xf8\x10\
\xf0\x80\x18\x25\xa8\x12\x54\xc0\x98\x9e\x80\xfc\xe9\xf0\x89\xb1\
\xcb\x5e\x13\x60\xd3\x2d\x5f\x6d\x76\xb5\xf8\x33\xc1\xb9\xcd\x78\
\x77\x3e\x88\x9b\x03\xf2\x48\x43\xde\x39\x99\x99\xf7\x60\xea\xaa\
\xb9\xb9\xf7\x81\xc9\xe9\x04\xe7\xd3\x79\x36\x67\x09\x46\x41\x0d\
\x58\x4b\x50\xdd\x98\x24\xfe\xd3\xc5\x0f\xef\x2e\x5d\x14\x70\xed\
\xa7\x9e\xc2\xd7\xe2\x9b\x42\x3d\x79\x2f\xce\xcb\xfc\x87\x17\x5b\
\xf0\x0b\x50\xc1\x2f\xb2\xd0\xf8\x6d\x31\xdc\xc2\xbb\x67\x67\x13\
\xaa\xb1\xc7\x13\xb0\x51\x2a\x71\x30\x0a\x91\x85\x28\x92\xa0\xfa\
\x0b\x3e\xf0\xde\x2d\x0f\x3d\x7f\x61\xc0\xd3\xcf\xbd\xd2\x1a\x12\
\xf7\x9b\x38\x5f\xb8\x78\xbc\x85\x79\x59\xa5\x21\xad\xcc\x81\xfa\
\x34\xee\x1a\x02\xcf\x27\x4b\x3d\x09\x0d\x99\xd3\x38\x14\x55\x82\
\xb1\x04\x6b\x53\x48\x6b\x9b\xea\x33\xf1\x47\x5e\xf9\xf7\xe7\x97\
\x5c\x10\xd0\x55\xe3\x6b\x42\x92\x5c\x79\x2e\xcc\x42\x9c\x35\x64\
\x9d\xf3\xd8\xfc\xb8\xe0\x3d\x69\x78\x90\xc6\xc0\x22\xd0\xe9\x69\
\x87\x73\x01\x51\x41\x45\x10\xa3\x10\x45\x84\x28\xc2\x27\xe0\x26\
\x2b\x57\x78\xcf\xcf\x2d\x66\xb2\x73\x93\xd2\xcf\xef\xb4\x71\x25\
\xde\x11\x02\x05\x10\x10\x45\x42\xa0\xbb\x3d\xcf\xe5\xeb\x3a\xe8\
\x5a\x5a\x24\x88\x30\x34\x51\xe3\xd0\xd0\x0c\x43\x53\x75\x14\xd8\
\xd2\x55\xa0\xd4\x9c\x45\x8c\x01\x63\xc0\xa6\xa3\x18\xa5\x1e\x84\
\x17\x47\x63\x92\x74\xc9\xa4\xd5\x0a\x5b\x5b\x23\xda\x3a\x32\x0c\
\x16\x85\x97\x06\x23\x7e\x34\x38\xc3\x99\x31\x8f\x9f\xaa\x12\x30\
\x79\x1f\xe4\xd6\xdc\xaf\xee\xde\x5d\x7d\xf4\x17\x93\xb3\x00\x5d\
\x5c\xef\x08\x89\xbb\x0a\x49\xe1\x32\x59\xe1\x57\x6e\x5c\xc7\x3d\
\x1f\xb8\x9c\x75\xdd\x25\xb2\x51\xba\xa6\x57\xeb\x8e\xa3\xa7\x67\
\x78\x60\xf7\x51\x76\xff\x68\x84\x3f\xda\xb1\x9e\xab\xd7\xb5\x2e\
\x6c\x4a\x8b\x16\x8b\xc1\xa9\x3a\xbf\xf4\x58\x3f\xa3\x55\xcf\x8e\
\x4d\xcd\xdc\xb5\xad\x8d\xde\xb6\x88\x8c\x11\x02\xad\xcc\xc6\x9e\
\x03\x03\x65\xfe\xf0\x1f\x5f\xe2\x3b\x7d\x10\xac\x25\x08\x57\xf9\
\x24\x5c\x02\x0c\x9e\x0d\x58\x8d\xd7\x06\x1f\x56\x62\x0c\x4a\xe0\
\x43\x37\x6d\xe0\x8f\xef\x7e\x3b\x85\x7c\xc4\x89\xd3\x65\x0e\x0f\
\x4c\x91\xcf\x1a\x36\xae\x68\xa1\x77\x79\x81\xee\xb6\x1c\x12\x20\
\x6b\x95\xa6\x8c\xe1\xe8\x48\x85\x57\xa7\xeb\xa9\xab\x44\x40\x85\
\x91\x19\x87\x0b\xf0\xc1\xcb\x5a\xb8\xf7\xba\xa5\x14\x23\x65\xa0\
\x9c\x30\x30\xe3\x70\x71\xc2\xc6\xf6\x2c\xef\x58\x5b\xe2\xaf\xee\
\xdc\xc2\x87\x67\x63\xbe\xf3\xc3\x11\x02\xa1\x3b\xa9\xb9\xd5\xe7\
\x01\x12\xc2\x6a\xbc\x6f\x42\x84\x75\xdd\x2d\x7c\xe2\xf6\xad\x14\
\x9b\x32\x7c\x7b\xdf\x00\x9f\xdc\xb5\x8f\x93\x67\x2a\x98\x8c\x65\
\xc3\xca\x56\xde\xf6\xa6\x65\x3c\xf6\xfd\x21\xbc\x2e\xec\x94\x3b\
\xbf\x37\xc4\xdf\xed\x1b\x81\xc8\x22\xd6\xa4\x81\x6f\x0c\x6b\xdb\
\x73\xfc\xf6\x55\x4b\x28\x46\xca\x53\x27\x67\xf9\x93\xef\x4f\x50\
\x8b\x84\x81\xbe\x49\x36\x64\x3d\x7f\x76\xcb\x4a\xb6\xf4\x14\xf9\
\xbd\x5f\xde\xc8\xb3\xc7\xa6\x18\x2f\xc7\x79\xb1\x66\x0d\xb0\xf7\
\x2c\x40\x21\xac\xc5\x07\x83\x04\xae\xbd\xbc\x93\xd5\xcb\x4b\x8c\
\x4e\x56\xb9\xff\x6b\xcf\xf2\x52\xff\x04\x51\x26\x22\x6f\x84\x93\
\xaf\xce\x70\x72\xb4\x1f\xb5\x06\x5d\x94\x62\x3d\x6d\x59\xde\xd2\
\x53\x48\x63\xb1\x11\x87\x47\x27\x1d\xef\x5a\x5d\xa0\xab\x39\xe2\
\xd5\x99\x84\x2f\xfc\xd7\x18\x2f\x8f\x27\x34\x17\x2c\xe5\xd9\x3a\
\x4f\x1f\x18\xe6\x8b\x3e\x61\xd7\x5d\x97\xb2\xad\xb7\x85\xcd\xab\
\x4a\xec\x7d\x71\xcc\x98\x6c\xb4\xc9\x9d\x9b\x24\x3e\x4e\x56\x80\
\x40\x80\x2d\x6b\xdb\x51\x15\x4e\x9c\x9a\xe6\xe5\xfe\x09\x24\x04\
\xb6\xac\x6a\xe1\x81\xbb\xae\x22\x97\xb5\x20\x42\x2d\xf1\x7c\xe6\
\x9f\x5e\x62\x2e\x57\xef\xbe\xae\x9b\x3b\xdf\xd1\x35\x1f\x83\x3e\
\xc0\xc7\xbe\x35\xc4\x86\xf6\x0c\x2a\xd0\x3f\x95\x70\x72\x22\x01\
\x84\x4a\xd5\xe1\xa6\x62\x40\xd8\x7f\xa2\xcc\x58\x39\x61\x59\x4b\
\x44\xef\xf2\x22\xcf\x1c\x99\x24\xa9\xb9\x8e\xf3\xb2\x18\x1f\x1a\
\x2f\x0f\x24\x2e\x2d\x7e\x23\x2b\x58\x93\x7e\x31\x97\x31\xac\x5a\
\x56\xa0\x90\x8f\x68\x69\x8a\xa8\x25\x9e\x62\xce\xce\x67\x45\xff\
\x58\xf5\xac\x18\xf4\x02\xe3\x55\x87\x6f\xac\x35\x56\x85\x4c\x46\
\xc8\x65\x0d\xc1\x79\x9a\x36\xb6\xa3\x1b\xda\x59\x8a\xc7\x46\x8a\
\x0f\x90\x64\x2c\x5a\xc8\x42\xe2\xf1\xe7\x02\x9a\x6c\x34\x9c\xc4\
\x0e\x10\x0e\xbe\x7c\x06\xe7\x02\x6b\x3a\x4b\x5c\xb1\x71\x19\x4f\
\x1e\x18\xe2\xf9\x13\x13\xdc\x7a\xff\x1e\x96\xb5\x17\xd8\xf9\x91\
\xb7\xd1\xd1\x92\x3d\x2b\x63\x77\x7e\x6f\x90\xbf\xdf\x7f\x06\xac\
\x45\xa2\x74\x0b\xab\xab\x61\x65\x4b\x06\xe7\x03\xbd\x4b\x22\x6e\
\x78\x53\x91\xfd\xc3\x35\x06\xfa\x2b\x58\xab\x6c\xee\x2d\xf1\xbe\
\xd5\x4d\xb4\x35\x59\xa6\x6a\x9e\xd1\x52\x89\xb6\xcd\x0a\xde\x4d\
\x8c\x9c\xbb\x50\x7b\xe7\x5f\x41\xc4\x23\xb0\xf7\xbf\x87\x38\xd2\
\x37\x46\xa9\x90\xe1\x73\x77\x5c\xc9\xf5\x5b\xbb\x89\xac\x32\x38\
\x5a\x21\x6b\x0d\x19\xab\xf3\x2b\xca\x1c\x63\x31\x6b\x69\x2f\x44\
\x74\x14\xd3\xb1\xbd\x10\x71\x49\x73\xc4\x73\x13\x31\xc7\xcb\x09\
\xa5\x8c\xf2\x5b\x97\x96\xd8\x9c\x13\xf2\x40\x51\xe0\xca\x92\xe1\
\xd6\x35\x79\x8c\xc0\xf3\x13\x09\x71\x2e\x4f\xd7\x8a\x25\xbe\xbb\
\xa7\xe3\xf0\xf9\x12\xab\x39\x0e\xa1\x22\xa2\x85\xbe\xe1\x32\x9f\
\xff\xca\x3e\xfe\xfc\xe3\xd7\xf1\xe6\x75\xed\x3c\xfa\xfb\xd7\xf3\
\xca\xd0\x14\x22\x42\x6f\x57\x33\xad\x85\x0c\x43\xe3\x55\xca\x35\
\x37\x4f\xf8\xb1\x1b\x56\x70\xc7\xb5\x9d\xa9\xc4\x8d\xa2\x76\xb8\
\xea\xf8\x9d\x67\xc7\x78\xf8\xf0\x14\x9f\xba\xbc\x95\x0d\x2d\x11\
\x7f\xb1\xfd\x12\x8e\xbd\xb5\x8d\xac\x11\x56\xb5\x66\xc8\x5a\xe1\
\x78\xd9\xf1\xe4\xab\x31\x6b\x3a\x33\xb4\xcf\xfa\xca\x54\xc5\x1f\
\xe5\x7c\x89\x33\x27\xa8\x27\x43\x41\x74\x3d\x08\xdf\xfc\xcf\x13\
\x4c\x55\x1d\x1f\xfd\xc0\x9b\xd9\xb6\x71\x29\x9b\x57\xb5\x02\x30\
\x31\x53\xe7\xbb\x2f\x0c\xf1\xc8\xd3\x7d\x1c\x1a\x9c\x66\xb2\x92\
\x30\x5a\xae\x83\x40\x21\x67\x1a\x95\x75\xfa\xce\x72\x08\x88\xc0\
\x77\x4f\x55\x99\xae\x8c\xb2\xa3\x3b\xcf\xd6\xce\x3c\xeb\x96\x64\
\x00\x38\x33\x9b\xb0\x7f\x38\xe6\x99\x29\xcf\x58\x1c\x88\x8c\xb0\
\xbc\xc5\x0e\x75\xb6\x70\xfc\x89\x73\x7b\x92\x96\x77\xff\x65\x26\
\x2e\x57\xff\x36\x04\x6e\x9b\xdb\xb2\xc4\x58\x8a\xcd\x39\x7a\x7b\
\x5a\x69\x2e\xe6\xc0\x28\x23\x33\x09\xfd\xe3\x35\x6a\x41\xd1\xc8\
\xb2\xba\xb3\x99\x52\x29\x4b\xae\x25\x47\xae\x25\x8b\x64\x2c\xc1\
\x2a\x58\x83\x17\xe5\x74\xc5\x41\x80\x33\x03\x65\x6a\x13\x35\xd6\
\x76\xe4\x68\xc9\x5b\x44\x85\x91\x59\xcf\x78\x02\x5b\x37\xb5\xd1\
\x52\x88\xc8\x18\xc5\xaa\x3c\xa6\xc2\xaf\x7f\x69\x5b\x31\x3e\xaf\
\x69\x6a\x7a\xd7\x43\xb7\x79\xe7\xbf\x86\x6a\x36\xdd\x5b\x1b\x8b\
\xae\x35\x88\xb5\x8d\xbd\x76\x61\x21\xb6\xb9\x88\x7c\x6b\x9e\x7c\
\x5b\x1e\xdb\x14\x35\x4a\x27\x43\x68\x8c\xd6\x28\x91\x0a\xb5\xa9\
\x98\xc1\xa3\x93\x04\x0f\x62\x04\x99\x6f\x4f\x15\x63\x94\xf5\x2b\
\x9b\xd9\xbc\xa6\x44\xc6\x68\xcd\xa8\x7c\xe8\xc1\x2b\x8a\x8f\xfd\
\xaf\xd5\x8c\xc9\x66\xf6\x88\xb5\x87\xe6\x36\x7e\x31\x0a\x46\x41\
\x17\x4c\x54\x30\x19\x4b\xb1\x2d\x47\x7b\x57\x91\xe6\xf6\x3c\x36\
\x6b\x40\xd3\xed\x8d\x86\xc4\x82\x20\x80\xab\x7b\x46\xfa\x26\x71\
\xb5\x3a\xde\x7b\xbc\x0b\x04\xef\xd3\xb9\xf7\x38\x17\x18\x18\xae\
\x30\x53\x71\x00\x87\x20\xec\xb9\x60\xb9\x95\x6d\xce\x0f\x6b\x26\
\xda\x85\x31\xb5\x14\x32\xad\x7a\x45\xd3\x02\xd3\x66\x2d\xcd\x4b\
\xf2\x74\x74\x15\x69\x69\xcf\x63\xb3\x16\x4c\x03\x4c\x15\x44\xd2\
\x32\x5e\x04\xd1\x54\x9e\x89\xc1\x29\x2a\xe3\x15\x70\x0e\x92\x04\
\x5c\x42\x70\x9e\xe0\x52\x58\xef\x3d\x33\x95\x84\x93\xa7\x67\x6a\
\x3e\x84\x47\x08\x0c\x5f\xb0\xed\x9c\x7d\xe1\x71\x72\x9b\x77\x1c\
\x45\xcd\x5b\x51\x5d\x4f\xa3\xe7\xb5\xd9\x88\x62\x5b\x9e\x96\x8e\
\x02\xf9\x52\x0e\xcd\xda\xc6\x76\xa6\x60\x35\x95\x7e\x6e\xde\xb8\
\x57\x55\xea\xd3\x55\xce\xbc\x3c\x8a\x4f\x12\x24\x90\x5a\xa3\x3e\
\x9c\xcf\x25\x11\x82\x08\x33\x55\xf7\x74\x2e\x6b\xee\xfb\xfa\x0d\
\xed\x95\x8b\xf6\x24\xeb\xde\xb9\x69\x42\x33\xd1\x7d\x62\xcd\xf1\
\x28\x1b\x51\x5a\xd2\x44\x47\x57\x89\x52\x7b\x01\x9b\xb3\x67\x49\
\x1e\xe6\xe7\x02\x66\xce\x7b\x8d\x1e\xd9\x79\xc6\x8e\x9e\xc1\xcd\
\xd6\x20\x69\x78\xaf\x61\x92\xb8\xc6\x6f\x0d\x73\xfe\x64\xe2\xc3\
\x7d\x3e\x62\xfc\xff\x6c\xdc\x4f\x3f\xf9\x08\x76\xc3\xad\xa7\x4a\
\x4b\x9a\x86\x5b\x97\x16\xb6\x37\x95\x72\x39\xcd\xd8\x05\x4f\x35\
\x32\x34\x34\x0a\x02\xce\x29\x54\x31\x8a\xaa\x50\xee\x1f\x67\xea\
\xe4\x58\x5a\x85\xcf\x55\xda\x84\x86\x17\xc3\xbc\xf7\x54\x98\xb4\
\x56\x7f\xb7\x9e\x84\xdd\x2f\xfc\x46\x6f\x78\x4d\x27\x0b\xf5\x23\
\x8f\x87\x15\x37\xde\x71\xc4\x64\xa3\x72\xb0\xe6\xed\x58\xcd\xcd\
\xc3\x2d\x82\x5c\x6c\x61\x51\x62\xb9\x72\x8d\xd1\x83\x03\xf8\x5a\
\x7d\xbe\x05\x98\x93\x57\x42\x68\x24\x11\x08\x32\x09\x7c\xda\xa8\
\x7c\x65\xfc\xb3\x5b\x93\xd7\x75\xb2\x70\xf8\x81\x77\xc7\xc1\xe8\
\xc3\x18\xfd\x28\xd6\x9c\x48\xe1\xd2\x18\x0b\x73\x31\x77\x96\xa5\
\x32\xe3\x03\x13\x47\x4e\x53\x9f\xaa\xcc\x4b\x28\xf3\xf2\x3a\x70\
\x0e\x49\x1c\xe2\x5d\xbf\xd5\xf0\x09\xf1\xfe\xe1\xd1\xcf\x6d\x8b\
\x5f\xf7\xd1\x07\xc0\x91\xcf\x5f\x1f\x63\xcd\xd7\x31\xfa\x41\x8c\
\xf9\x36\xd6\xc4\x61\x3e\x31\x16\x2c\xcc\x2f\x41\x4a\x65\x68\x9c\
\xd9\xfe\x33\x88\x6b\x80\xb9\xa4\x01\x99\xde\x8b\x73\x75\x0d\x7e\
\x4f\xa4\xf2\x6b\x21\x4e\xbe\x3a\xf9\xc0\x35\xf1\x8f\x7d\xfc\xb6\
\xf1\x0b\x3f\x00\xa3\xed\xc1\xe8\xed\x18\x73\x27\x56\x37\x61\x4c\
\x84\xd5\xf9\x58\x14\xab\xf8\xd9\x98\xe1\xff\x38\x42\x3c\x31\xd3\
\x78\xb9\x20\xda\x38\x48\x32\x26\x11\xa3\x87\x35\x13\xfd\x4d\xbe\
\x25\xff\x68\xe7\x9a\xf6\xe1\x03\xf7\xbc\xe5\x8d\x3d\xc0\xdc\xf0\
\xe5\x1f\x0a\x46\xbb\xb0\xba\x1d\x63\xde\x1f\xac\xd9\x8a\xd5\x4b\
\x30\x26\xab\x82\x8c\x3f\x77\x9c\xa9\xc3\xa7\xd2\xf6\x33\xcd\x81\
\xaa\xa8\x8e\x88\xd1\x03\x26\x13\x7d\x53\x23\xf3\x44\xae\x98\x1b\
\x3c\xf5\xd0\x7b\xc2\x4f\xf4\x08\x78\xc3\x5f\xbf\x08\xaa\xd9\x60\
\xb4\x07\x6b\xd6\x88\xd1\x4d\x6e\x72\x76\xc3\xe9\x27\x0f\x46\x6e\
\xb6\x86\xaa\x38\x63\xf4\x68\x80\x43\xc6\x9a\x63\x62\xb4\x6f\xfd\
\xd5\xbd\xd5\x7d\x1f\xdf\xf6\xb3\x43\xf7\x9f\xfa\xf5\x3f\xe5\x4a\
\x50\xe4\x07\x90\xdf\x8f\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x10\
\x0a\x8a\xcd\x47\
\x00\x6e\
\x00\x65\x00\x61\x00\x72\x00\x65\x00\x73\x00\x74\x00\x5f\x00\x62\x00\x75\x00\x69\x00\x6c\x00\x64\x00\x69\x00\x6e\x00\x67\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x05\xe0\x84\x67\
\x00\x67\
\x00\x65\x00\x6f\x00\x73\x00\x63\x00\x61\x00\x70\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x0c\xcd\xf0\x47\
\x00\x67\
\x00\x65\x00\x6f\x00\x73\x00\x63\x00\x61\x00\x70\x00\x65\x00\x5f\x00\x69\x00\x63\x00\x6f\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x03\x00\x00\x00\x03\
\x00\x00\x00\x50\x00\x00\x00\x00\x00\x01\x00\x00\x04\x3e\
\x00\x00\x00\x3a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x6e\x00\x00\x00\x00\x00\x01\x00\x00\x37\x4d\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x03\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x50\x00\x00\x00\x00\x00\x01\x00\x00\x04\x3e\
\x00\x00\x01\x66\xe8\x91\x28\x26\
\x00\x00\x00\x3a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x67\x0b\x6e\x20\xd0\
\x00\x00\x00\x6e\x00\x00\x00\x00\x00\x01\x00\x00\x37\x4d\
\x00\x00\x01\x67\x62\x19\xd2\xf4\
"
qt_version = QtCore.qVersion().split('.')
if qt_version < ['5', '8', '0']:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 209 | 0 | 46 |
0abe88533c9c01d4af058f79c7a784e84858d57f | 3,881 | py | Python | pymatgen/io/abinit/tests/test_abiinspect.py | adozier/pymatgen | f1cc4d8db24ec11063be2fd84b4ea911f006eeb7 | [
"MIT"
] | null | null | null | pymatgen/io/abinit/tests/test_abiinspect.py | adozier/pymatgen | f1cc4d8db24ec11063be2fd84b4ea911f006eeb7 | [
"MIT"
] | null | null | null | pymatgen/io/abinit/tests/test_abiinspect.py | adozier/pymatgen | f1cc4d8db24ec11063be2fd84b4ea911f006eeb7 | [
"MIT"
] | null | null | null | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import os
import tempfile
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinit.abiinspect import *
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', "abinit")
try:
import matplotlib
matplotlib.use("pdf") # Use non-graphical display backend during test.
have_matplotlib = "DISPLAY" in os.environ
except ImportError:
have_matplotlib = False
class YamlTokenizerTest(PymatgenTest):
"""Test YamlTokenizer."""
if __name__ == '__main__':
import unittest2 as unittest
unittest.main()
| 26.951389 | 105 | 0.605256 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import os
import tempfile
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinit.abiinspect import *
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', "abinit")
try:
import matplotlib
matplotlib.use("pdf") # Use non-graphical display backend during test.
have_matplotlib = "DISPLAY" in os.environ
except ImportError:
have_matplotlib = False
def ref_file(filename):
return os.path.join(_test_dir, filename)
def ref_files(*filenames):
return list(map(ref_file, filenames))
class YamlTokenizerTest(PymatgenTest):
"""Test YamlTokenizer."""
def test_base(self):
string = \
"""---
none: [~, null]
bool: [true, false, on, off]
int: 42
float: 3.14159
list: [LITE, RES_ACID, SUS_DEXT]
dict: {hp: 13, sp: 5}
...
this is not a YAML document!
and the tokenizer will ignore it
--- !Monster
name: Cave spider
hp: [2,6] # 2d6
ac: 16
attacks: [BITE, HURT]
...
This is not a proper document since it does not start with ---
the end tag below is ignored
...
--- !Monster
name: Dragon
hp: [2,6] # 2d6
ac: 32
attacks: [BITE, HURT]
...
"""
#for i, line in enumerate(string.splitlines()): print(i, line)
fd, filename = tempfile.mkstemp(text=True)
with open(filename, "w") as fh:
fh.write(string)
doc_tags = [None, "!Monster", "!Monster"]
doc_linenos = [1, 13, 23]
with YamlTokenizer(filename) as r:
# Iterate the docs
n = 0
for i, doc in enumerate(r):
n += 1
print("doc", doc)
self.assertTrue(doc.tag == doc_tags[i])
self.assertTrue(doc.lineno == doc_linenos[i])
self.assertTrue(n == len(doc_tags))
# Read all docs present in the file.
r.seek(0)
all_docs = r.all_yaml_docs()
#print(all_docs)
self.assertTrue(len(all_docs) == 3)
# We should be at the begining at the file.
self.assertTrue(all_docs == r.all_yaml_docs())
# Find documents by tag.
r.seek(0)
monster = r.next_doc_with_tag("!Monster")
#print("monster",monster)
self.assertTrue(monster == all_docs[1])
monster = r.next_doc_with_tag("!Monster")
self.assertTrue(monster == all_docs[2])
# this should raise StopIteration
with self.assertRaises(StopIteration):
monster = r.next_doc_with_tag("!Monster")
os.remove(filename)
class AbinitInpectTest(PymatgenTest):
def test_scfcycle(self):
"""Testing ScfCycle."""
cycle = GroundStateScfCycle.from_file(ref_file("mgb2_scf.abo"))
print(cycle)
assert cycle.num_iterations == 6
last = cycle.last_iteration
assert last["Etot(hartree)"] == -7.1476241568657 and last["vres2"] == 3.879E-08
assert list(cycle["vres2"]) == [1.769E+02, 7.920E-01, 1.570E-01, 4.259E-03, 4.150E-05, 3.879E-08]
if have_matplotlib:
cycle.plot(show=False)
def test_relaxation(self):
"""Testing Relaxation object."""
relaxation = Relaxation.from_file(ref_file("sic_relax.abo"))
print(relaxation)
assert len(relaxation) == 4
assert relaxation[0]["Etot(hartree)"][-1] == -8.8077409200473
assert relaxation[-1]["Etot(hartree)"][-1] == -8.8234906607147
for scf_step in relaxation:
print(scf_step.num_iterations)
if have_matplotlib:
relaxation.plot(show=False)
if __name__ == '__main__':
import unittest2 as unittest
unittest.main()
| 2,010 | 1,004 | 95 |
3ec35d9d1486ee75b021e26c52b86b4d98020625 | 6,142 | py | Python | function.py | phenixace/labeltxt_cn | 104963d7b3b9ebfb7267a14bf48767277ac71da1 | [
"MIT"
] | 3 | 2021-02-03T14:29:21.000Z | 2021-02-26T02:05:49.000Z | function.py | phenixace/labeltxt_cn | 104963d7b3b9ebfb7267a14bf48767277ac71da1 | [
"MIT"
] | null | null | null | function.py | phenixace/labeltxt_cn | 104963d7b3b9ebfb7267a14bf48767277ac71da1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#Provide function logic for UI
from UI import Ui_MainWindow
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import configparser
import os
from nlp import *
#初始化options.ini(如果不存在就创建)
| 33.933702 | 113 | 0.551123 | # -*- coding: utf-8 -*-
#Provide function logic for UI
from UI import Ui_MainWindow
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import configparser
import os
from nlp import *
class window(QMainWindow,Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
#read configs
self.source,self.save,self.mode=self.init_configs()
self.filename=''
self.result=[]
self.num=0
self.init_window()
#slot functions
self.pushButton.clicked.connect(self.func_last)
self.pushButton_2.clicked.connect(self.func_next)
self.pushButton_3.clicked.connect(self.func_save)
self.actionFile_Directory.triggered.connect(self.func_source_dir)
self.actionSaveFile_Path.triggered.connect(self.func_save_dir)
self.actionMode.triggered.connect(self.func_mode)
#初始化options.ini(如果不存在就创建)
def init_configs(self):
if os.path.exists("options.ini"):
# 实例化configParser对象
config = configparser.ConfigParser()
# -read读取ini文件
config.read('options.ini')
configs_temp=config.items('OPTIONS')
configs=[]
for row in configs_temp:
configs.append(row[1])
if len(configs)!=3:
QMessageBox.information(self,"警告","配置文件损坏,将会重置!",QMessageBox.Yes|QMessageBox.No,QMessageBox.Yes)
# 实例化configParser对象
config = configparser.ConfigParser()
config.add_section("OPTIONS")
config.set("OPTIONS", "SOURCE", "./")
config.set("OPTIONS", "SAVE", "./")
config.set("OPTIONS", "MODE", "1")
# write to file
config.write(open('options.ini', "w"))
return './','./','1'
return configs[0],configs[1],configs[2]
else:
# 实例化configParser对象
config = configparser.ConfigParser()
config.add_section("OPTIONS")
config.set("OPTIONS", "SOURCE", "./")
config.set("OPTIONS", "SAVE", "./")
config.set("OPTIONS", "MODE", "1")
# write to file
config.write(open('options.ini', "w"))
return './','./','1'
def init_window(self):
#获取source文件夹下所有的txt文件
self.result=[]
self.num=0
filter=[".txt"]
for maindir, subdir, file_name_list in os.walk(self.source):
for filename in file_name_list:
apath = os.path.join(maindir, filename)#合并成一个完整路径
ext = os.path.splitext(apath)[1] # 获取文件后缀 [0]获取的是除了文件名以外的内容
if ext in filter:
self.result.append(apath)
self.filename=self.result[0]
f=open(self.filename,'r',encoding='utf-8')
data=f.read()
self.textBrowser.setText(data)
words=divide_words(data)
self.textEdit.setText(words)
f.close()
def func_last(self):
if 0<self.num<=len(self.result)-1:
self.num=self.num-1
self.filename=self.result[self.num]
f=open(self.filename,'r',encoding='utf-8')
data=f.read()
self.textBrowser.setText(data)
words=divide_words(data)
self.textEdit.setText(words)
f.close()
else:
QMessageBox.information(self,"警告","当前是第一个!",QMessageBox.Yes|QMessageBox.No,QMessageBox.Yes)
def func_next(self):
if 0<=self.num<len(self.result)-1:
self.num=self.num+1
self.filename=self.result[self.num]
f=open(self.filename,'r',encoding='utf-8')
data=f.read()
self.textBrowser.setText(data)
words=divide_words(data)
self.textEdit.setText(words)
f.close()
else:
QMessageBox.information(self,"警告","当前是最后一个!",QMessageBox.Yes|QMessageBox.No,QMessageBox.Yes)
def func_save(self):
self.savefilename=self.save+"/"+self.filename.split('\\')[-1]
f=open(self.savefilename,'w',encoding='utf-8')
f.write(self.textEdit.toPlainText())
f.close()
def func_source_dir(self):
text = QFileDialog.getExistingDirectory(self,"choose directory",r"C:\Users\Administrator\Desktop")
if text != '':
# 实例化configParser对象
config = configparser.ConfigParser()
config.add_section("OPTIONS")
config.set("OPTIONS", "SOURCE", text)
config.set("OPTIONS", "SAVE",self.save)
config.set("OPTIONS", "MODE", self.mode)
# write to file
config.write(open('options.ini', "w+"))
self.source=text
self.init_window()
def func_save_dir(self):
text = QFileDialog.getExistingDirectory(self,"choose directory",r"C:\Users\Administrator\Desktop")
if text != '':
# 实例化configParser对象
config = configparser.ConfigParser()
config.add_section("OPTIONS")
config.set("OPTIONS", "SOURCE", self.source)
config.set("OPTIONS", "SAVE", text)
config.set("OPTIONS", "MODE", self.mode)
# write to file
config.write(open('options.ini', "w+"))
self.save=text
def func_mode(self):
text, okPressed = QInputDialog.getText(self, "设置","模式(1-命名实体标注 2-情感标注):", QLineEdit.Normal, "")
if okPressed and (text == '2' or text=='1'):
# 实例化configParser对象
config = configparser.ConfigParser()
config.add_section("OPTIONS")
config.set("OPTIONS", "SOURCE", self.source)
config.set("OPTIONS", "SAVE", self.save)
config.set("OPTIONS", "MODE", text)
# write to file
config.write(open('options.ini', "w+"))
self.mode=text
self.init_window() | 5,826 | 19 | 281 |
21d8647174779af8e9e17b0dbb12715a2d7543d5 | 4,845 | py | Python | lib/datasets/tless/ct.py | bertid/clean-pvnet | 8e1afdfe450c7d73274581d2907ad0215cba8331 | [
"Apache-2.0"
] | 284 | 2019-12-14T08:09:40.000Z | 2022-03-26T02:17:26.000Z | lib/datasets/tless/ct.py | danikhani/clean-pvnet | 4f91324c5bc9d2a05624f49c6cad15a33a446106 | [
"Apache-2.0"
] | 208 | 2019-12-16T13:09:49.000Z | 2022-03-25T07:38:20.000Z | lib/datasets/tless/ct.py | danikhani/clean-pvnet | 4f91324c5bc9d2a05624f49c6cad15a33a446106 | [
"Apache-2.0"
] | 88 | 2019-12-14T12:33:51.000Z | 2022-03-22T21:07:09.000Z | import torch.utils.data as data
import cv2
import numpy as np
import math
from lib.utils import data_utils
from pycocotools.coco import COCO
import os
from lib.utils.tless import tless_utils, visualize_utils, tless_config
from PIL import Image
import glob
| 36.984733 | 103 | 0.617131 | import torch.utils.data as data
import cv2
import numpy as np
import math
from lib.utils import data_utils
from pycocotools.coco import COCO
import os
from lib.utils.tless import tless_utils, visualize_utils, tless_config
from PIL import Image
import glob
class Dataset(data.Dataset):
def __init__(self, ann_file, split):
super(Dataset, self).__init__()
self.split = split
self.coco = COCO(ann_file)
self.anns = np.array(self.coco.getImgIds())
self.anns = self.anns[:500] if split == 'mini' else self.anns
self.json_category_id_to_contiguous_id = {v: i for i, v in enumerate(self.coco.getCatIds())}
self.bg_paths = np.array(glob.glob('data/sun/JPEGImages/*.jpg'))
def get_training_data(self, index):
np.random.seed(index)
img_ids = np.random.choice(self.anns, tless_config.num_obj_in_training_image)
train_img = cv2.imread(self.bg_paths[np.random.randint(len(self.bg_paths))])
train_img = cv2.resize(train_img, (tless_config.train_w, tless_config.train_h))
train_mask = np.zeros((tless_config.train_h, tless_config.train_w), dtype=np.int16)
rgb_paths = []
mask_paths = []
category_ids = []
for instance_id, img_id in enumerate(img_ids):
ann_ids = self.coco.getAnnIds(imgIds=img_id)
anno = self.coco.loadAnns(ann_ids)[0]
rgb_paths.append(self.coco.loadImgs(int(img_id))[0]['rgb_path'])
mask_paths.append(anno['mask_path'])
category_ids.append(anno['category_id'])
for instance_id in range(len(rgb_paths)):
rgb_path = rgb_paths[instance_id]
mask_path = mask_paths[instance_id]
category_id = category_ids[instance_id]
img = cv2.imread(rgb_path)
mask = np.array(Image.open(mask_path))
mask_id = category_id * 1000 + instance_id
tless_utils.cut_and_paste(img, mask, train_img, train_mask, mask_id)
cls_ids = [self.json_category_id_to_contiguous_id[category_id] for category_id in category_ids]
return train_img, train_mask, category_ids, cls_ids
def get_bbox(self, mask, category_ids, trans_output, out_h, out_w):
bboxes = []
for instance_id in range(len(category_ids)):
category_id = category_ids[instance_id]
mask_id = category_id * 1000 + instance_id
mask_ = (mask == mask_id).astype(np.float32)
mask_ = cv2.warpAffine(mask_, trans_output, (out_w, out_h), flags=cv2.INTER_LINEAR)
mask_ = (mask_ != 0).astype(np.uint8)
bbox = tless_utils.xywh_to_xyxy(cv2.boundingRect(mask_))
bbox[2] = min(bbox[2], out_w-1)
bbox[3] = min(bbox[3], out_h-1)
bboxes.append(bbox)
return bboxes
def prepare_detection(self, box, ct_hm, cls_id, wh, ct_cls, ct_ind):
ct_hm = ct_hm[cls_id]
ct_cls.append(cls_id)
x_min, y_min, x_max, y_max = box
ct = np.array([(x_min + x_max) / 2, (y_min + y_max) / 2], dtype=np.float32)
ct = np.round(ct).astype(np.int32)
h, w = y_max - y_min, x_max - x_min
radius = data_utils.gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
data_utils.draw_umich_gaussian(ct_hm, ct, radius)
wh.append([w, h])
ct_ind.append(ct[1] * ct_hm.shape[1] + ct[0])
x_min, y_min = ct[0] - w / 2, ct[1] - h / 2
x_max, y_max = ct[0] + w / 2, ct[1] + h / 2
decode_box = [x_min, y_min, x_max, y_max]
return decode_box
def __getitem__(self, index):
img, train_mask, category_ids, cls_ids = self.get_training_data(index)
orig_img, inp, trans_input, trans_output, center, scale, inp_out_hw = \
tless_utils.augment(img, self.split)
bboxes = self.get_bbox(train_mask, category_ids, trans_output, inp_out_hw[2], inp_out_hw[3])
output_h, output_w = inp_out_hw[2:]
ct_hm = np.zeros([30, output_h, output_w], dtype=np.float32)
wh = []
ct_cls = []
ct_ind = []
bboxes_ = []
for i in range(len(bboxes)):
cls_id = cls_ids[i]
bbox = bboxes[i]
if bbox[2] == bbox[0] or bbox[3] == bbox[1]:
continue
bboxes_.append(bbox)
self.prepare_detection(bbox, ct_hm, cls_id, wh, ct_cls, ct_ind)
ret = {'inp': inp}
detection = {'ct_hm': ct_hm, 'wh': wh, 'ct_cls': ct_cls, 'ct_ind': ct_ind}
ret.update(detection)
# visualize_utils.visualize_detection(orig_img, ret)
ct_num = len(ct_ind)
meta = {'center': center, 'scale': scale, 'ct_num': ct_num}
ret.update({'meta': meta})
return ret
def __len__(self):
return len(self.anns)
| 4,396 | 7 | 184 |
ef96b55a964939651f5211ef2b2a2559d58fb158 | 1,929 | py | Python | openjij/model/higher_order_model.py | zeta1999/OpenJij | 0fe03f07af947f519a32ad58fe20423919651634 | [
"Apache-2.0"
] | null | null | null | openjij/model/higher_order_model.py | zeta1999/OpenJij | 0fe03f07af947f519a32ad58fe20423919651634 | [
"Apache-2.0"
] | null | null | null | openjij/model/higher_order_model.py | zeta1999/OpenJij | 0fe03f07af947f519a32ad58fe20423919651634 | [
"Apache-2.0"
] | 1 | 2021-04-09T09:13:56.000Z | 2021-04-09T09:13:56.000Z | import numpy as np
class BinaryHigherOrderModel:
"""Higher order model.
"""
def adj_dict(self):
"""adjacency list of each variables
Returns:
dict: key (variables key), value (list of tuple represents connected indices)
"""
adj_dict = {i: [] for i in self.indices}
for coeff in self.interactions[1:]:
for _inds, value in coeff.items():
for i in _inds:
_inds_list = list(_inds)
_inds_list.remove(i)
adj_dict[i].append([_inds_list, value])
return adj_dict
def energy(self, state):
"""calculate energy of state
Args:
state (list of int): list of SPIN or BINARY
Returns:
float: energy of state
"""
energy = 0.0
if isinstance(state, dict):
# convert to array
state = [state[elem] for elem in self.indices]
state = np.array(state)
for coeff in self.interactions[1:]:
for _inds, value in coeff.items():
energy += value * np.prod(state[list(_inds)])
for i, hi in self.interactions[0].items():
energy += hi * state[i]
return energy
def calc_energy(self, state):
"""alias of `energy`
Args:
state (list of int): list of SPIN or BINARY
Returns:
float: energy of state
"""
return self.energy(state)
| 27.557143 | 89 | 0.534992 | import numpy as np
class BinaryHigherOrderModel:
"""Higher order model.
"""
def __init__(self, interactions: list):
self.interactions = interactions
indices = set(self.interactions[0].keys())
for coeff in self.interactions[1:]:
for _inds in coeff.keys():
indices = indices | set(_inds)
self.indices = list(indices)
for i in self.indices:
if i not in self.interactions[0]:
self.interactions[0][i] = 0.0
def adj_dict(self):
"""adjacency list of each variables
Returns:
dict: key (variables key), value (list of tuple represents connected indices)
"""
adj_dict = {i: [] for i in self.indices}
for coeff in self.interactions[1:]:
for _inds, value in coeff.items():
for i in _inds:
_inds_list = list(_inds)
_inds_list.remove(i)
adj_dict[i].append([_inds_list, value])
return adj_dict
def energy(self, state):
"""calculate energy of state
Args:
state (list of int): list of SPIN or BINARY
Returns:
float: energy of state
"""
energy = 0.0
if isinstance(state, dict):
# convert to array
state = [state[elem] for elem in self.indices]
state = np.array(state)
for coeff in self.interactions[1:]:
for _inds, value in coeff.items():
energy += value * np.prod(state[list(_inds)])
for i, hi in self.interactions[0].items():
energy += hi * state[i]
return energy
def calc_energy(self, state):
"""alias of `energy`
Args:
state (list of int): list of SPIN or BINARY
Returns:
float: energy of state
"""
return self.energy(state)
| 403 | 0 | 27 |
0526bbb7e5da4874db62543fb9424916eaf91631 | 1,259 | py | Python | metashare/edelivery/management/commands/get_new_edelivery_messages.py | MiltosD/ELRC2 | 0caf1a0b62bb4e33e03f62169d5cd189249397c9 | [
"BSD-3-Clause"
] | 1 | 2017-07-10T08:15:07.000Z | 2017-07-10T08:15:07.000Z | metashare/edelivery/management/commands/get_new_edelivery_messages.py | MiltosD/ELRC2 | 0caf1a0b62bb4e33e03f62169d5cd189249397c9 | [
"BSD-3-Clause"
] | null | null | null | metashare/edelivery/management/commands/get_new_edelivery_messages.py | MiltosD/ELRC2 | 0caf1a0b62bb4e33e03f62169d5cd189249397c9 | [
"BSD-3-Clause"
] | 1 | 2018-07-03T07:55:56.000Z | 2018-07-03T07:55:56.000Z | import logging
from django.core.mail import send_mail
from django.core.management.base import BaseCommand, CommandError
from metashare.edelivery.wsdl_services import download_messages
from metashare.settings import LOG_HANDLER, CONTRIBUTIONS_ALERT_EMAILS
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(LOG_HANDLER)
| 39.34375 | 110 | 0.636219 | import logging
from django.core.mail import send_mail
from django.core.management.base import BaseCommand, CommandError
from metashare.edelivery.wsdl_services import download_messages
from metashare.settings import LOG_HANDLER, CONTRIBUTIONS_ALERT_EMAILS
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(LOG_HANDLER)
class Command(BaseCommand):
def handle(self, *args, **options):
download_result = download_messages()
# if success
if download_result[0]:
LOGGER.info(download_result[1])
try:
send_mail("New contributions through eDelivery",
"You have new unmanaged contributed resources on elrc-share.eu, through eDelivery.",
recipient_list=CONTRIBUTIONS_ALERT_EMAILS,
from_email='no-reply@elrc-share.eu', \
fail_silently=False)
except:
LOGGER.error("An error has occurred while trying to send email to contributions "
"alert recipients.")
elif len(download_result) > 2:
LOGGER.error("{}: {}".format(download_result[1], download_result[2]))
else:
LOGGER.info(download_result[1])
| 878 | 6 | 49 |
ad1c2e5ea80f21af0c5c49c0a02ceeba5884cc7f | 4,529 | py | Python | queue-summary.py | ycrc/Orwell-CLI | 49dd2c8cebf77bbe09bd050032b880e98008acfa | [
"MIT"
] | 2 | 2021-04-25T12:18:19.000Z | 2021-04-25T12:21:04.000Z | queue-summary.py | ycrc/Orwell-CLI | 49dd2c8cebf77bbe09bd050032b880e98008acfa | [
"MIT"
] | null | null | null | queue-summary.py | ycrc/Orwell-CLI | 49dd2c8cebf77bbe09bd050032b880e98008acfa | [
"MIT"
] | 1 | 2021-04-25T12:18:26.000Z | 2021-04-25T12:18:26.000Z | #!/usr/bin/env python
import sys
import argparse
import subprocess
from collections import defaultdict as dd
size_multipliers = {'M':1, 'G':1024, 'T':1024**2}
core_node_keys = {'c':'ReqCPUS', 'n':'ReqNodes'}
avail_sort = ['Jobs', 'Nodes', 'CPUs', 'GPUs', 'RAM']
avail_levels = ['User', 'Account', 'State', 'Partition']
if __name__ == '__main__':
args = get_args()
levels = get_levels(args['levels'])
job_summary = summarize_jobs(levels)
if 'GPUs' in args['sort_on']:
args['gpu']=True
print_summary(job_summary, levels, args['gpu'], args['units'], args['sort_on'], args['ascending'])
| 43.970874 | 150 | 0.597924 | #!/usr/bin/env python
import sys
import argparse
import subprocess
from collections import defaultdict as dd
size_multipliers = {'M':1, 'G':1024, 'T':1024**2}
core_node_keys = {'c':'ReqCPUS', 'n':'ReqNodes'}
avail_sort = ['Jobs', 'Nodes', 'CPUs', 'GPUs', 'RAM']
avail_levels = ['User', 'Account', 'State', 'Partition']
def get_levels(level_string):
levels = []
for l in level_string.split(','):
level_ok = False
for avail_level in avail_levels:
if avail_level.startswith(l.capitalize()) or avail_level.startswith == l.capitalize():
levels.append(avail_level)
level_ok = True
if not level_ok:
sys.exit("Level not recognized: {}".format(l))
return levels
def get_subprocess_lines(cmd):
try:
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
for line in pipe.stdout:
yield line.decode().strip()
pipe.wait()
except OSError as e:
print("Couldn't find slurm commands on your path. Are you sure you're on a slurm cluster?")
sys.exit(1)
def get_job_memory(job_info):
units = job_info['ReqMem'][-2]
core_node = job_info['ReqMem'][-1]
raw_memory = float(job_info['ReqMem'][:-2])
return int(raw_memory * size_multipliers[units] * int(job_info[core_node_keys[core_node]]))
def summarize_jobs(summary_levels):
summary = dd(lambda: {'Jobs': 0, 'CPUs': 0, 'GPUs': 0,
'RAM': 0, 'Nodes': 0})
sacct_cmd = ['sacct', '-XaPsR,PD,RQ', '-oUser,Account,State,Partition,ReqCPUS,ReqNodes,ReqMem,ReqGRES']
for i, line in enumerate(get_subprocess_lines(sacct_cmd)):
if i == 0:
header = line.split('|')
else:
job_info = dict(zip(header, line.split('|')))
job_info['State'] = job_info['State'].lower()
job_memory = get_job_memory(job_info)
level_idx = tuple(job_info[x] for x in summary_levels)
summary[level_idx]['Jobs'] += 1
summary[level_idx]['CPUs'] += int(job_info['ReqCPUS'])
summary[level_idx]['RAM'] += get_job_memory(job_info)
summary[level_idx]['Nodes'] += int(job_info['ReqNodes'])
if job_info['ReqGRES'].startswith('gpu'):
summary[level_idx]['GPUs'] += int(job_info['ReqGRES'].split(':')[1])
return summary
def print_summary(summary_dict, summary_levels, show_gpu, ram_units, sort_on, ascending):
sortable_columns = avail_sort
rows = [ ]
if not show_gpu:
sortable_columns.remove('GPUs')
rows.append(summary_levels+sortable_columns)
for level_idx, info_dict in sorted(summary_dict.items(), key=lambda x: tuple(x[1][y] for y in sort_on), reverse=ascending):
info_dict['RAM'] = round((info_dict['RAM'] / size_multipliers[ram_units]), 1)
rows.append([str(a) for a in level_idx+tuple(info_dict[x] for x in sortable_columns)])
max_widths = [max(map(len, col)) for col in zip(*rows)]
for row in rows:
print(" ".join((val.ljust(width) for val, width in zip(row, max_widths))))
def get_args():
parser = argparse.ArgumentParser(description="get a summary of job usage", prog='job-summary')
parser.add_argument('-l', '--levels',
default='user,state',
help='What to summarize output on. Can specify more than one of: u user a account s state p partiton. e.g. u,s or user,state')
parser.add_argument('-g', '--gpu',
action='store_true',
help='Show GPUs too.')
parser.add_argument('-s', '--sort-on',
default=['CPUs'],
action='append',
choices=avail_sort,
help='What to sort output on. Can specify more than one.')
parser.add_argument('-a', '--ascending',
action='store_true',
help='Sort in ascending order (default is descending).')
parser.add_argument('-u', '--units',
default='G',
choices=list(size_multipliers.keys()),
help='What units to report memory in.')
return vars(parser.parse_args())
if __name__ == '__main__':
args = get_args()
levels = get_levels(args['levels'])
job_summary = summarize_jobs(levels)
if 'GPUs' in args['sort_on']:
args['gpu']=True
print_summary(job_summary, levels, args['gpu'], args['units'], args['sort_on'], args['ascending'])
| 3,777 | 0 | 138 |
731891cff96c05634a3a119a2e83c44b6b33ea4d | 1,234 | py | Python | rma/setup/rma/odoo/addons/rma/models/account_invoice.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | null | null | null | rma/setup/rma/odoo/addons/rma/models/account_invoice.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | null | null | null | rma/setup/rma/odoo/addons/rma/models/account_invoice.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | 1 | 2021-05-05T07:59:08.000Z | 2021-05-05T07:59:08.000Z | # Copyright 2020 Tecnativa - Ernesto Tejeda
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import _, fields, models
from odoo.exceptions import ValidationError
from odoo.tools import float_compare
| 34.277778 | 75 | 0.648298 | # Copyright 2020 Tecnativa - Ernesto Tejeda
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import _, fields, models
from odoo.exceptions import ValidationError
from odoo.tools import float_compare
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
def action_invoice_open(self):
""" Avoids to validate a refund with less quantity of product than
quantity in the linked RMA.
"""
precision = self.env['decimal.precision'].precision_get(
'Product Unit of Measure')
if self.mapped('invoice_line_ids').filtered(
lambda r: (r.rma_id and float_compare(
r.quantity, r.rma_id.product_uom_qty, precision) < 0)):
raise ValidationError(
_("There is at least one invoice lines whose quantity is "
"less than the quantity specified in its linked RMA."))
res = super().action_invoice_open()
self.mapped('invoice_line_ids.rma_id').write({'state': 'refunded'})
return res
class AccountInvoiceLine(models.Model):
_inherit = 'account.invoice.line'
rma_id = fields.Many2one(
comodel_name='rma',
string='RMA',
)
| 0 | 960 | 46 |
e8376106a09250397a18ceffbc1864b9f9a3f74c | 3,699 | py | Python | office365/graph/directory/group.py | stardust85/Office365-REST-Python-Client | cd369c607c7d137a000734e9c5e8f03ae3e3c603 | [
"MIT"
] | null | null | null | office365/graph/directory/group.py | stardust85/Office365-REST-Python-Client | cd369c607c7d137a000734e9c5e8f03ae3e3c603 | [
"MIT"
] | null | null | null | office365/graph/directory/group.py | stardust85/Office365-REST-Python-Client | cd369c607c7d137a000734e9c5e8f03ae3e3c603 | [
"MIT"
] | null | null | null | import json
from office365.graph.directory.directoryObject import DirectoryObject
from office365.graph.directory.directoryObjectCollection import DirectoryObjectCollection
from office365.graph.onedrive.driveCollection import DriveCollection
from office365.graph.onedrive.siteCollection import SiteCollection
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.resource_path import ResourcePath
from office365.runtime.serviceOperationQuery import ServiceOperationQuery
from office365.graph.teams.team import Team
def _delete_group_from_directory(target_group):
"""
Deletes the group from directory
:type target_group: Group
"""
deleted_item = target_group.context.directory.deletedGroups[target_group.id]
deleted_item.delete_object()
class Group(DirectoryObject):
"""Represents an Azure Active Directory (Azure AD) group, which can be an Office 365 group, or a security group."""
def add_team(self):
"""Create a new team under a group."""
team = Team(self.context)
team._parent_collection = self.parent_collection
qry = ServiceOperationQuery(self, "team", None, team, None, team)
self.context.add_query(qry)
self.context.get_pending_request().beforeExecute += self._construct_create_team_request
return team
def delete_object(self, permanent_delete=False):
"""
:param permanent_delete: Permanently deletes the group from directory
:type permanent_delete: bool
"""
super(Group, self).delete_object()
if permanent_delete:
self.ensure_property("id", _delete_group_from_directory)
@property
def members(self):
"""Users and groups that are members of this group."""
if self.is_property_available('members'):
return self.properties['members']
else:
return DirectoryObjectCollection(self.context,
ResourcePath("members", self.resource_path))
@property
def owners(self):
"""The owners of the group."""
if self.is_property_available('owners'):
return self.properties['owners']
else:
return DirectoryObjectCollection(self.context,
ResourcePath("owners", self.resource_path))
@property
def drives(self):
"""The group's drives. Read-only."""
if self.is_property_available('drives'):
return self.properties['drives']
else:
return DriveCollection(self.context, ResourcePath("drives", self.resource_path))
@property
def sites(self):
"""The list of SharePoint sites in this group. Access the default site with /sites/root."""
if self.is_property_available('sites'):
return self.properties['sites']
else:
return SiteCollection(self.context,
ResourcePath("sites", self.resource_path))
| 39.351064 | 119 | 0.671263 | import json
from office365.graph.directory.directoryObject import DirectoryObject
from office365.graph.directory.directoryObjectCollection import DirectoryObjectCollection
from office365.graph.onedrive.driveCollection import DriveCollection
from office365.graph.onedrive.siteCollection import SiteCollection
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.resource_path import ResourcePath
from office365.runtime.serviceOperationQuery import ServiceOperationQuery
from office365.graph.teams.team import Team
def _delete_group_from_directory(target_group):
"""
Deletes the group from directory
:type target_group: Group
"""
deleted_item = target_group.context.directory.deletedGroups[target_group.id]
deleted_item.delete_object()
class Group(DirectoryObject):
"""Represents an Azure Active Directory (Azure AD) group, which can be an Office 365 group, or a security group."""
def add_team(self):
"""Create a new team under a group."""
team = Team(self.context)
team._parent_collection = self.parent_collection
qry = ServiceOperationQuery(self, "team", None, team, None, team)
self.context.add_query(qry)
self.context.get_pending_request().beforeExecute += self._construct_create_team_request
return team
def delete_object(self, permanent_delete=False):
"""
:param permanent_delete: Permanently deletes the group from directory
:type permanent_delete: bool
"""
super(Group, self).delete_object()
if permanent_delete:
self.ensure_property("id", _delete_group_from_directory)
def _construct_create_team_request(self, request):
request.method = HttpMethod.Put
request.set_header('Content-Type', "application/json")
request.data = json.dumps(request.data)
self.context.get_pending_request().beforeExecute -= self._construct_create_team_request
@property
def members(self):
"""Users and groups that are members of this group."""
if self.is_property_available('members'):
return self.properties['members']
else:
return DirectoryObjectCollection(self.context,
ResourcePath("members", self.resource_path))
@property
def owners(self):
"""The owners of the group."""
if self.is_property_available('owners'):
return self.properties['owners']
else:
return DirectoryObjectCollection(self.context,
ResourcePath("owners", self.resource_path))
@property
def drives(self):
"""The group's drives. Read-only."""
if self.is_property_available('drives'):
return self.properties['drives']
else:
return DriveCollection(self.context, ResourcePath("drives", self.resource_path))
@property
def sites(self):
"""The list of SharePoint sites in this group. Access the default site with /sites/root."""
if self.is_property_available('sites'):
return self.properties['sites']
else:
return SiteCollection(self.context,
ResourcePath("sites", self.resource_path))
def set_property(self, name, value, persist_changes=True):
super(Group, self).set_property(name, value, persist_changes)
# fallback: create a new resource path
if self._resource_path is None:
if name == "id":
self._resource_path = ResourcePath(
value,
self._parent_collection.resource_path)
| 637 | 0 | 54 |
647b408316ed53849edd4bea04f8ae726be2cac5 | 9,328 | py | Python | PA3/main.py | SebastianJay/LDI-Cool | 85744fa493bd6a11463aababe7b484a57c6c47b7 | [
"Apache-2.0"
] | null | null | null | PA3/main.py | SebastianJay/LDI-Cool | 85744fa493bd6a11463aababe7b484a57c6c47b7 | [
"Apache-2.0"
] | null | null | null | PA3/main.py | SebastianJay/LDI-Cool | 85744fa493bd6a11463aababe7b484a57c6c47b7 | [
"Apache-2.0"
] | null | null | null | import sys
import yacc
from cool_lexer import CoolLexer, tokens
from ast import *
#precedence of terminals listed in ascending order
#first string of each tuple shows left, right, or non associativity
precedence = (
('right', 'larrow'),
('nonassoc', 'not'),
('nonassoc', 'lt', 'le', 'equals'),
('left', 'plus', 'minus'),
('left', 'times', 'divide'),
('nonassoc', 'isvoid'),
('nonassoc', 'tilde'),
('left', 'at'),
('left', 'dot'),
)
#start symbol
start = 'program'
#Empty production
#Put at top so that reduce/reduce conflicts always choose this production
def p_empty(p):
'empty :'
pass #do nothing
#begin program grammar
def p_program(p):
'program : classdef semi classlist'
p[0] = AST([p[1]] + p[3])
def p_classlist_head(p):
'classlist : classdef semi classlist'
p[0] = [p[1]] + p[3]
def p_classlist_tail(p):
'classlist : empty'
p[0] = []
#end program grammar
#begin class grammar
def p_classdef(p):
'classdef : class type optinherits lbrace featurelist rbrace'
p[0] = ASTClass(
ASTIdentifier(p.lineno(2),p[2]),
p[3],
p[5])
def p_optinherits_nonempty(p):
'optinherits : inherits type'
p[0] = ASTIdentifier(p.lineno(2), p[2])
def p_optinherits_empty(p):
'optinherits : empty'
p[0] = None
##class features (methods and fields)
def p_featurelist_head(p):
'featurelist : feature semi featurelist'
p[0] = [p[1]] + p[3]
def p_featurelist_tail(p):
'featurelist : empty'
p[0] = []
def p_feature_method(p):
'feature : identifier lparen formalargs rparen colon type lbrace expr rbrace'
p[0] = ASTMethod(
ASTIdentifier(p.lineno(1), p[1]),
p[3],
ASTIdentifier(p.lineno(6), p[6]),
p[8])
def p_formalargs_first(p):
'formalargs : formal formallist'
p[0] = [p[1]] + p[2]
def p_formalargs_empty(p):
'formalargs : empty'
p[0] = []
def p_formallist_head(p):
'formallist : comma formal formallist'
p[0] = [p[2]] + p[3]
def p_formallist_tail(p):
'formallist : empty'
p[0] = []
def p_feature_field(p):
'feature : identifier colon type optinit'
p[0] = ASTAttribute(
ASTIdentifier(p.lineno(1), p[1]),
ASTIdentifier(p.lineno(3), p[3]),
p[4])
def p_formal(p):
'formal : identifier colon type'
p[0] = (ASTIdentifier(p.lineno(1), p[1]),
ASTIdentifier(p.lineno(3), p[3]))
#end class grammar
### BEGIN Expression Grammars
#begin dynamic/static dispatch grammar
def p_expression_dispatch(p):
'expr : expr opttype dot identifier lparen funcargs rparen'
# Static dispatch, class is specified
if p[2] is not None:
p[0] = ASTExpression(
p.lineno(1),
"static_dispatch",
(
p[1],
p[2],
ASTIdentifier(p.lineno(4), p[4]),
p[6]
))
# Dynamic dispatch, no type
else:
p[0] = ASTExpression(
p.lineno(1),
"dynamic_dispatch",
(
p[1],
ASTIdentifier(p.lineno(4), p[4]),
p[6]
))
def p_opttype_nonempty(p):
'opttype : at type'
p[0] = ASTIdentifier(p.lineno(2), p[2])
def p_opttype_empty(p):
'opttype : empty'
p[0] = None
def p_funcargs_first(p):
'funcargs : expr funclist'
p[0] = [p[1]] + p[2]
def p_funcargs_empty(p):
'funcargs : empty'
p[0] = []
def p_funclist_head(p):
'funclist : comma expr funclist'
p[0] = [p[2]] + p[3]
def p_funclist_tail(p):
'funclist : empty'
p[0] = []
#end dynamic/static dispatch grammar
#begin self dispatch grammar
def p_expression_selfdispatch(p):
'expr : identifier lparen funcargs rparen'
p[0] = ASTExpression(
p.lineno(1),
"self_dispatch",
(
ASTIdentifier(p.lineno(1), p[1]),
p[3]
)
)
#end self dispatch grammar
##If expression
def p_expression_if(p):
'expr : if expr then expr else expr fi'
p[0] = ASTExpression(
p.lineno(1),
"if",
(p[2],p[4],p[6]))
##While expression
def p_expression_while(p):
'expr : while expr loop expr pool'
p[0] = ASTExpression(
p.lineno(1),
"while",
(p[2],p[4])
)
#begin block statement grammar
def p_expression_block(p):
'expr : lbrace expr semi blocklist rbrace'
p[0] = ASTExpression(
p.lineno(1),
"block",
[p[2]] + p[4])
def p_blocklist_head(p):
'blocklist : expr semi blocklist'
p[0] = [p[1]] + p[3]
def p_blocklist_tail(p):
'blocklist : empty'
p[0] = []
#end block statement grammar
#begin let statement grammar
def p_expression_let(p):
'expr : let identifier colon type optinit letlist in expr'
p[0] = ASTExpression(
p.lineno(1),
"let",
([ASTLetBinding(
ASTIdentifier(p.lineno(2), p[2]),
ASTIdentifier(p.lineno(4), p[4]),
p[5])] + p[6],
p[8]))
def p_optinit_nonempty(p):
'optinit : larrow expr'
p[0] = p[2]
def p_optinit_empty(p):
'optinit : empty'
p[0] = None
def p_letlist_head(p):
'letlist : comma identifier colon type optinit letlist'
p[0] = [ASTLetBinding(\
ASTIdentifier(p.lineno(2), p[2]),
ASTIdentifier(p.lineno(4), p[4]),
p[5])] + p[6]
def p_letlist_tail(p):
'letlist : empty'
p[0] = []
#end let statement grammar
#begin case statement grammar
def p_expression_case(p):
'expr : case expr of identifier colon type rarrow expr semi caselist esac'
p[0] = ASTExpression(
p.lineno(1),
"case",
(p[2],[ASTCase(ASTIdentifier(p.lineno(4),p[4]),
ASTIdentifier(p.lineno(6),p[6]),
p[8])] + p[10]))
def p_caselist_head(p):
'caselist : identifier colon type rarrow expr semi caselist'
p[0] = [ASTCase(ASTIdentifier(p.lineno(1),p[1]),
ASTIdentifier(p.lineno(3),p[3]),
p[5])] + p[7]
def p_caselist_tail(p):
'caselist : empty'
p[0] = []
#end case statement grammar
##expressions with unary and binary operators
def p_expression_assign(p):
'expr : identifier larrow expr'
p[0] = ASTExpression(p.lineno(1), "assign", (ASTIdentifier(p.lineno(1), p[1]), p[3]))
def p_expression_newtype(p):
'expr : new type'
p[0] = ASTExpression(p.lineno(1), "new", ASTIdentifier(p.lineno(2), p[2]))
def p_expression_isvoid(p):
'expr : isvoid expr'
p[0] = ASTExpression(p.lineno(1), "isvoid", p[2])
def p_expression_plus(p):
'expr : expr plus expr'
p[0] = ASTExpression(
p.lineno(1),
"plus",
(p[1],p[3]))
def p_expression_minus(p):
'expr : expr minus expr'
p[0] = ASTExpression(
p.lineno(1),
"minus",
(p[1],p[3]))
def p_expression_times(p):
'expr : expr times expr'
p[0] = ASTExpression(
p.lineno(1),
"times",
(p[1],p[3]))
def p_expression_divide(p):
'expr : expr divide expr'
p[0] = ASTExpression(
p.lineno(1),
"divide",
(p[1],p[3]))
def p_expression_negate(p):
'expr : tilde expr'
p[0] = ASTExpression(
p.lineno(1),
"negate",
p[2])
def p_expression_lt(p):
'expr : expr lt expr'
p[0] = ASTExpression(
p.lineno(1),
"lt",
(p[1],p[3]))
def p_expression_lte(p):
'expr : expr le expr'
p[0] = ASTExpression(
p.lineno(1),
"le",
(p[1],p[3]))
def p_expression_equals(p):
'expr : expr equals expr'
p[0] = ASTExpression(
p.lineno(1),
"eq",
(p[1],p[3]))
def p_expression_not(p):
'expr : not expr'
p[0] = ASTExpression(
p.lineno(1),
"not",
p[2])
def p_expression_paren(p):
'expr : lparen expr rparen'
p[0] = p[2]
def p_expression_id(p):
'expr : identifier'
p[0] = ASTExpression(p.lineno(1),
"identifier",
ASTIdentifier(p.lineno(1),p[1]))
##constant expressions
def p_expression_integer(p):
'expr : integer'
p[0] = ASTExpression(p.lineno(1),
"integer",
int(p[1]))
def p_expression_string(p):
'expr : string'
p[0] = ASTExpression(p.lineno(1),
"string",
p[1])
def p_expression_true(p):
'expr : true'
p[0] = ASTExpression(p.lineno(1),
"true",
"")
def p_expression_false(p):
'expr : false'
p[0] = ASTExpression(p.lineno(1),
"false",
"")
if __name__ == '__main__':
lexer = CoolLexer()
lexer.loadFromFile(sys.argv[1])
parser = yacc.yacc()
result = parser.parse(lexer=lexer, tracking=True, debug=False)
with open(sys.argv[1].replace("-lex",'-ast'), 'w') as outFile:
outFile.write(str(result))
| 24.103359 | 89 | 0.556068 | import sys
import yacc
from cool_lexer import CoolLexer, tokens
from ast import *
#precedence of terminals listed in ascending order
#first string of each tuple shows left, right, or non associativity
precedence = (
('right', 'larrow'),
('nonassoc', 'not'),
('nonassoc', 'lt', 'le', 'equals'),
('left', 'plus', 'minus'),
('left', 'times', 'divide'),
('nonassoc', 'isvoid'),
('nonassoc', 'tilde'),
('left', 'at'),
('left', 'dot'),
)
#start symbol
start = 'program'
#Empty production
#Put at top so that reduce/reduce conflicts always choose this production
def p_empty(p):
'empty :'
pass #do nothing
#begin program grammar
def p_program(p):
'program : classdef semi classlist'
p[0] = AST([p[1]] + p[3])
def p_classlist_head(p):
'classlist : classdef semi classlist'
p[0] = [p[1]] + p[3]
def p_classlist_tail(p):
'classlist : empty'
p[0] = []
#end program grammar
#begin class grammar
def p_classdef(p):
'classdef : class type optinherits lbrace featurelist rbrace'
p[0] = ASTClass(
ASTIdentifier(p.lineno(2),p[2]),
p[3],
p[5])
def p_optinherits_nonempty(p):
'optinherits : inherits type'
p[0] = ASTIdentifier(p.lineno(2), p[2])
def p_optinherits_empty(p):
'optinherits : empty'
p[0] = None
##class features (methods and fields)
def p_featurelist_head(p):
'featurelist : feature semi featurelist'
p[0] = [p[1]] + p[3]
def p_featurelist_tail(p):
'featurelist : empty'
p[0] = []
def p_feature_method(p):
'feature : identifier lparen formalargs rparen colon type lbrace expr rbrace'
p[0] = ASTMethod(
ASTIdentifier(p.lineno(1), p[1]),
p[3],
ASTIdentifier(p.lineno(6), p[6]),
p[8])
def p_formalargs_first(p):
'formalargs : formal formallist'
p[0] = [p[1]] + p[2]
def p_formalargs_empty(p):
'formalargs : empty'
p[0] = []
def p_formallist_head(p):
'formallist : comma formal formallist'
p[0] = [p[2]] + p[3]
def p_formallist_tail(p):
'formallist : empty'
p[0] = []
def p_feature_field(p):
'feature : identifier colon type optinit'
p[0] = ASTAttribute(
ASTIdentifier(p.lineno(1), p[1]),
ASTIdentifier(p.lineno(3), p[3]),
p[4])
def p_formal(p):
'formal : identifier colon type'
p[0] = (ASTIdentifier(p.lineno(1), p[1]),
ASTIdentifier(p.lineno(3), p[3]))
#end class grammar
### BEGIN Expression Grammars
#begin dynamic/static dispatch grammar
def p_expression_dispatch(p):
'expr : expr opttype dot identifier lparen funcargs rparen'
# Static dispatch, class is specified
if p[2] is not None:
p[0] = ASTExpression(
p.lineno(1),
"static_dispatch",
(
p[1],
p[2],
ASTIdentifier(p.lineno(4), p[4]),
p[6]
))
# Dynamic dispatch, no type
else:
p[0] = ASTExpression(
p.lineno(1),
"dynamic_dispatch",
(
p[1],
ASTIdentifier(p.lineno(4), p[4]),
p[6]
))
def p_opttype_nonempty(p):
'opttype : at type'
p[0] = ASTIdentifier(p.lineno(2), p[2])
def p_opttype_empty(p):
'opttype : empty'
p[0] = None
def p_funcargs_first(p):
'funcargs : expr funclist'
p[0] = [p[1]] + p[2]
def p_funcargs_empty(p):
'funcargs : empty'
p[0] = []
def p_funclist_head(p):
'funclist : comma expr funclist'
p[0] = [p[2]] + p[3]
def p_funclist_tail(p):
'funclist : empty'
p[0] = []
#end dynamic/static dispatch grammar
#begin self dispatch grammar
def p_expression_selfdispatch(p):
'expr : identifier lparen funcargs rparen'
p[0] = ASTExpression(
p.lineno(1),
"self_dispatch",
(
ASTIdentifier(p.lineno(1), p[1]),
p[3]
)
)
#end self dispatch grammar
##If expression
def p_expression_if(p):
'expr : if expr then expr else expr fi'
p[0] = ASTExpression(
p.lineno(1),
"if",
(p[2],p[4],p[6]))
##While expression
def p_expression_while(p):
'expr : while expr loop expr pool'
p[0] = ASTExpression(
p.lineno(1),
"while",
(p[2],p[4])
)
#begin block statement grammar
def p_expression_block(p):
'expr : lbrace expr semi blocklist rbrace'
p[0] = ASTExpression(
p.lineno(1),
"block",
[p[2]] + p[4])
def p_blocklist_head(p):
'blocklist : expr semi blocklist'
p[0] = [p[1]] + p[3]
def p_blocklist_tail(p):
'blocklist : empty'
p[0] = []
#end block statement grammar
#begin let statement grammar
def p_expression_let(p):
'expr : let identifier colon type optinit letlist in expr'
p[0] = ASTExpression(
p.lineno(1),
"let",
([ASTLetBinding(
ASTIdentifier(p.lineno(2), p[2]),
ASTIdentifier(p.lineno(4), p[4]),
p[5])] + p[6],
p[8]))
def p_optinit_nonempty(p):
'optinit : larrow expr'
p[0] = p[2]
def p_optinit_empty(p):
'optinit : empty'
p[0] = None
def p_letlist_head(p):
'letlist : comma identifier colon type optinit letlist'
p[0] = [ASTLetBinding(\
ASTIdentifier(p.lineno(2), p[2]),
ASTIdentifier(p.lineno(4), p[4]),
p[5])] + p[6]
def p_letlist_tail(p):
'letlist : empty'
p[0] = []
#end let statement grammar
#begin case statement grammar
def p_expression_case(p):
'expr : case expr of identifier colon type rarrow expr semi caselist esac'
p[0] = ASTExpression(
p.lineno(1),
"case",
(p[2],[ASTCase(ASTIdentifier(p.lineno(4),p[4]),
ASTIdentifier(p.lineno(6),p[6]),
p[8])] + p[10]))
def p_caselist_head(p):
'caselist : identifier colon type rarrow expr semi caselist'
p[0] = [ASTCase(ASTIdentifier(p.lineno(1),p[1]),
ASTIdentifier(p.lineno(3),p[3]),
p[5])] + p[7]
def p_caselist_tail(p):
'caselist : empty'
p[0] = []
#end case statement grammar
##expressions with unary and binary operators
def p_expression_assign(p):
'expr : identifier larrow expr'
p[0] = ASTExpression(p.lineno(1), "assign", (ASTIdentifier(p.lineno(1), p[1]), p[3]))
def p_expression_newtype(p):
'expr : new type'
p[0] = ASTExpression(p.lineno(1), "new", ASTIdentifier(p.lineno(2), p[2]))
def p_expression_isvoid(p):
'expr : isvoid expr'
p[0] = ASTExpression(p.lineno(1), "isvoid", p[2])
def p_expression_plus(p):
'expr : expr plus expr'
p[0] = ASTExpression(
p.lineno(1),
"plus",
(p[1],p[3]))
def p_expression_minus(p):
'expr : expr minus expr'
p[0] = ASTExpression(
p.lineno(1),
"minus",
(p[1],p[3]))
def p_expression_times(p):
'expr : expr times expr'
p[0] = ASTExpression(
p.lineno(1),
"times",
(p[1],p[3]))
def p_expression_divide(p):
'expr : expr divide expr'
p[0] = ASTExpression(
p.lineno(1),
"divide",
(p[1],p[3]))
def p_expression_negate(p):
'expr : tilde expr'
p[0] = ASTExpression(
p.lineno(1),
"negate",
p[2])
def p_expression_lt(p):
'expr : expr lt expr'
p[0] = ASTExpression(
p.lineno(1),
"lt",
(p[1],p[3]))
def p_expression_lte(p):
'expr : expr le expr'
p[0] = ASTExpression(
p.lineno(1),
"le",
(p[1],p[3]))
def p_expression_equals(p):
'expr : expr equals expr'
p[0] = ASTExpression(
p.lineno(1),
"eq",
(p[1],p[3]))
def p_expression_not(p):
'expr : not expr'
p[0] = ASTExpression(
p.lineno(1),
"not",
p[2])
def p_expression_paren(p):
'expr : lparen expr rparen'
p[0] = p[2]
def p_expression_id(p):
'expr : identifier'
p[0] = ASTExpression(p.lineno(1),
"identifier",
ASTIdentifier(p.lineno(1),p[1]))
##constant expressions
def p_expression_integer(p):
'expr : integer'
p[0] = ASTExpression(p.lineno(1),
"integer",
int(p[1]))
def p_expression_string(p):
'expr : string'
p[0] = ASTExpression(p.lineno(1),
"string",
p[1])
def p_expression_true(p):
'expr : true'
p[0] = ASTExpression(p.lineno(1),
"true",
"")
def p_expression_false(p):
'expr : false'
p[0] = ASTExpression(p.lineno(1),
"false",
"")
def p_error(p):
if p:
print 'ERROR: '+str(p.lineno)+': Parser: syntax error'
sys.exit(1)
else:
#TODO report line number instead of EOF (low priority)
#(apparently no test cases check this condition)
print 'ERROR: EOF: Parser: syntax error'
sys.exit(1)
if __name__ == '__main__':
lexer = CoolLexer()
lexer.loadFromFile(sys.argv[1])
parser = yacc.yacc()
result = parser.parse(lexer=lexer, tracking=True, debug=False)
with open(sys.argv[1].replace("-lex",'-ast'), 'w') as outFile:
outFile.write(str(result))
| 286 | 0 | 23 |
9b7280552b6d784381bbc1ef58f7a2eaca703c51 | 7,369 | py | Python | extractCongressPartyAffiliationSentences.py | RDulepet19/congressional-records | 0ef2909f9db091794e07df7cd785c5c3fbee0579 | [
"MIT"
] | null | null | null | extractCongressPartyAffiliationSentences.py | RDulepet19/congressional-records | 0ef2909f9db091794e07df7cd785c5c3fbee0579 | [
"MIT"
] | null | null | null | extractCongressPartyAffiliationSentences.py | RDulepet19/congressional-records | 0ef2909f9db091794e07df7cd785c5c3fbee0579 | [
"MIT"
] | null | null | null | #!/home/ubuntu/anaconda3/bin//python
'''
MIT License
Copyright (c) 2018 Riya Dulepet <riyadulepet123@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
The code is inspired by https://github.com/erikor/medline project, but the logic to
parse medline XML was substantially modified.
'''
# pre-requisites: pip install elasticsearch
# pip install --upgrade pip
# to execute this code:
# STEP 0: ensure elastic search and kibana are running on port 9200
# and 5601 correspondingly
# STEP 1: make sure you have all the medline XML files downloaded from
# STEP 2: then you run nohup ls *.xml | xargs -n 1 -P 4 python ./parseMedline.py &
# the above step assume quad-core processor, and runs it as daemon process so when
# you exit SSH session, it runs in background.
# this should load the data into elastic search
import pandas as pd
import glob
import sys
import sys, os
descr_filenames = glob.glob("." + "/descr*.txt")
speech_filenames = glob.glob("." + "/speech*.txt")
speakermap_filenames = glob.glob("." + "/*SpeakerMap.txt")
NO_PARTY_SENTENCE = "N"
REPUBLICAN_SENTENCE = "R"
DEMOCRAT_SENTENCE = "D"
BOTH_PARTY_SENTENCE = "B"
republican = ["rnc", "gop", "republican", "republicans", "conservative", "conservatives", "right wing", "alt right", "far right"]
democrat = ["dnc", "democrat", "democrats", "democratic", "liberal", "liberals", "progressive", "progressives", "moderates", "nonconservative", "nonconservatives", "alt left", "far left", "left wing"]
from datetime import datetime
import json
import logging
from collections import deque
from pathlib import Path
import os.path
logging.basicConfig(filename='parse.log',level=logging.INFO)
DESTINATION_FILE = "congress_party_affiliation_sentences.csv"
import spacy
import textacy
nlp = spacy.load('en_core_web_sm')
import nltk
from nltk.tokenize import sent_tokenize
nltk.download('punkt')
for speakermap_filename in speakermap_filenames:
try:
prefix = speakermap_filename[2:5]
print("prefix=", prefix)
descr_filename = "./descr_" + str(prefix) + ".txt"
speech_filename = "./speeches_" + str(prefix) + ".txt"
list_descr = []
list_speech = []
list_speakermap = []
list_descr.append(pd.read_csv(descr_filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
list_speech.append(pd.read_csv(speech_filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
list_speakermap.append(pd.read_csv(speakermap_filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
df_descr = pd.concat(list_descr)
df_speech = pd.concat(list_speech)
df_speakermap = pd.concat(list_speakermap)
print("len df_descr=", len(df_descr))
print("len df_speech=", len(df_speech))
print("len df_speakerma=", len(df_speakermap))
list_descr = None
list_speech = None
list_speakermap = None
df_descr_speech_speakermap = pd.merge(pd.merge(df_descr, df_speech, on='speech_id'), df_speakermap, on='speech_id')
df_descr = None
df_speech = None
df_speakermap = None
# convert date
df_descr_speech_speakermap['speech'] = df_descr_speech_speakermap['speech'].fillna('')
df_descr_speech_speakermap['party'] = df_descr_speech_speakermap['party'].fillna('')
df_congressPartySentences = pd.DataFrame(columns=('congress', 'speech_id', 'speaker_party', 'spoken_party', 'sentence'))
for index, row in df_descr_speech_speakermap.iterrows():
# process NLP on the text, primarily to extract sentences most reliabily
# doc = nlp(row["speech"])
doc = sent_tokenize(row["speech"])
# for sent in doc.sents:
for sent in doc:
party_affiliation = partyTypeSentence(str(sent))
if party_affiliation in [REPUBLICAN_SENTENCE, DEMOCRAT_SENTENCE]:
last_index = len(df_congressPartySentences)
df_congressPartySentences.loc[last_index] = "ignore"
df_congressPartySentences.loc[last_index]["congress"] = prefix
df_congressPartySentences.loc[last_index]["speech_id"] = row["speech_id"]
df_congressPartySentences.loc[last_index]["speaker_party"] = row["party"]
df_congressPartySentences.loc[last_index]["spoken_party"] = party_affiliation
df_congressPartySentences.loc[last_index]["sentence"] = sent
print ("CONGRESS={},LENGTH={}", prefix, len(df_congressPartySentences))
if os.path.exists(DESTINATION_FILE):
# file exists
df_congressPartySentences.to_csv(DESTINATION_FILE, mode='a', header=False)
else:
# brand new file
df_congressPartySentences.to_csv(DESTINATION_FILE, mode='w', header=True)
except Exception as e:
print("Error reading description file = ", descr_filename)
print("Error reading speech file = ", speech_filename)
print("Error reading speakermap file = ", speakermap_filename)
print(e) # for the repr
print(str(e)) # for just the message
print(e.args) # the arguments that the exception has been called with.
# the first one is usually the message.
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
# logging.info(datetime.now().isoformat() + " imported " + str(res[0]) + " records from " + sys.argv[1]) | 46.345912 | 203 | 0.700909 | #!/home/ubuntu/anaconda3/bin//python
'''
MIT License
Copyright (c) 2018 Riya Dulepet <riyadulepet123@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
The code is inspired by https://github.com/erikor/medline project, but the logic to
parse medline XML was substantially modified.
'''
# pre-requisites: pip install elasticsearch
# pip install --upgrade pip
# to execute this code:
# STEP 0: ensure elastic search and kibana are running on port 9200
# and 5601 correspondingly
# STEP 1: make sure you have all the medline XML files downloaded from
# STEP 2: then you run nohup ls *.xml | xargs -n 1 -P 4 python ./parseMedline.py &
# the above step assume quad-core processor, and runs it as daemon process so when
# you exit SSH session, it runs in background.
# this should load the data into elastic search
import pandas as pd
import glob
import sys
import sys, os
descr_filenames = glob.glob("." + "/descr*.txt")
speech_filenames = glob.glob("." + "/speech*.txt")
speakermap_filenames = glob.glob("." + "/*SpeakerMap.txt")
NO_PARTY_SENTENCE = "N"
REPUBLICAN_SENTENCE = "R"
DEMOCRAT_SENTENCE = "D"
BOTH_PARTY_SENTENCE = "B"
republican = ["rnc", "gop", "republican", "republicans", "conservative", "conservatives", "right wing", "alt right", "far right"]
democrat = ["dnc", "democrat", "democrats", "democratic", "liberal", "liberals", "progressive", "progressives", "moderates", "nonconservative", "nonconservatives", "alt left", "far left", "left wing"]
from datetime import datetime
import json
import logging
from collections import deque
from pathlib import Path
import os.path
logging.basicConfig(filename='parse.log',level=logging.INFO)
DESTINATION_FILE = "congress_party_affiliation_sentences.csv"
import spacy
import textacy
nlp = spacy.load('en_core_web_sm')
import nltk
from nltk.tokenize import sent_tokenize
nltk.download('punkt')
def partyTypeSentence(sent):
global NO_PARTY_SENTENCE, REPUBLICAN_SENTENCE, DEMOCRAT_SENTENCE, BOTH_PARTY_SENTENCE
global republican, democrat
from sklearn.feature_extraction.text import CountVectorizer
# extract unigrams and bigrams
vectorizer = CountVectorizer(ngram_range=(1,2))
analyzer = vectorizer.build_analyzer()
sent_analyzer = analyzer(sent)
if any(word in sent_analyzer for word in republican) and any(word in sent_analyzer for word in democrat):
return BOTH_PARTY_SENTENCE
elif any(word in sent_analyzer for word in republican):
return REPUBLICAN_SENTENCE
elif any(word in sent_analyzer for word in democrat):
return DEMOCRAT_SENTENCE
return NO_PARTY_SENTENCE
for speakermap_filename in speakermap_filenames:
try:
prefix = speakermap_filename[2:5]
print("prefix=", prefix)
descr_filename = "./descr_" + str(prefix) + ".txt"
speech_filename = "./speeches_" + str(prefix) + ".txt"
list_descr = []
list_speech = []
list_speakermap = []
list_descr.append(pd.read_csv(descr_filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
list_speech.append(pd.read_csv(speech_filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
list_speakermap.append(pd.read_csv(speakermap_filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
df_descr = pd.concat(list_descr)
df_speech = pd.concat(list_speech)
df_speakermap = pd.concat(list_speakermap)
print("len df_descr=", len(df_descr))
print("len df_speech=", len(df_speech))
print("len df_speakerma=", len(df_speakermap))
list_descr = None
list_speech = None
list_speakermap = None
df_descr_speech_speakermap = pd.merge(pd.merge(df_descr, df_speech, on='speech_id'), df_speakermap, on='speech_id')
df_descr = None
df_speech = None
df_speakermap = None
# convert date
df_descr_speech_speakermap['speech'] = df_descr_speech_speakermap['speech'].fillna('')
df_descr_speech_speakermap['party'] = df_descr_speech_speakermap['party'].fillna('')
df_congressPartySentences = pd.DataFrame(columns=('congress', 'speech_id', 'speaker_party', 'spoken_party', 'sentence'))
for index, row in df_descr_speech_speakermap.iterrows():
# process NLP on the text, primarily to extract sentences most reliabily
# doc = nlp(row["speech"])
doc = sent_tokenize(row["speech"])
# for sent in doc.sents:
for sent in doc:
party_affiliation = partyTypeSentence(str(sent))
if party_affiliation in [REPUBLICAN_SENTENCE, DEMOCRAT_SENTENCE]:
last_index = len(df_congressPartySentences)
df_congressPartySentences.loc[last_index] = "ignore"
df_congressPartySentences.loc[last_index]["congress"] = prefix
df_congressPartySentences.loc[last_index]["speech_id"] = row["speech_id"]
df_congressPartySentences.loc[last_index]["speaker_party"] = row["party"]
df_congressPartySentences.loc[last_index]["spoken_party"] = party_affiliation
df_congressPartySentences.loc[last_index]["sentence"] = sent
print ("CONGRESS={},LENGTH={}", prefix, len(df_congressPartySentences))
if os.path.exists(DESTINATION_FILE):
# file exists
df_congressPartySentences.to_csv(DESTINATION_FILE, mode='a', header=False)
else:
# brand new file
df_congressPartySentences.to_csv(DESTINATION_FILE, mode='w', header=True)
except Exception as e:
print("Error reading description file = ", descr_filename)
print("Error reading speech file = ", speech_filename)
print("Error reading speakermap file = ", speakermap_filename)
print(e) # for the repr
print(str(e)) # for just the message
print(e.args) # the arguments that the exception has been called with.
# the first one is usually the message.
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
# logging.info(datetime.now().isoformat() + " imported " + str(res[0]) + " records from " + sys.argv[1]) | 729 | 0 | 23 |
2a988b2872ea49edb22b25e10194df20ee22f68e | 4,413 | py | Python | permutation_test/tests/test_csv_parser.py | cmohl2013/permutation_test | 788803248d6fbff43ac440e0d69a6cd53dac7853 | [
"MIT"
] | 5 | 2018-02-02T02:41:25.000Z | 2021-01-12T09:30:04.000Z | permutation_test/tests/test_csv_parser.py | cmohl2013/permutation_test | 788803248d6fbff43ac440e0d69a6cd53dac7853 | [
"MIT"
] | 4 | 2017-05-24T01:48:04.000Z | 2021-07-02T07:02:30.000Z | permutation_test/tests/test_csv_parser.py | cmohl2013/permutation_test | 788803248d6fbff43ac440e0d69a6cd53dac7853 | [
"MIT"
] | 2 | 2017-05-25T17:23:50.000Z | 2017-11-15T12:21:59.000Z | from unittest import TestCase
from ..functions import permutationtest
import numpy as np
import pandas as pd
import permutation_test.csv_parser as csv_parser
| 35.02381 | 95 | 0.504419 | from unittest import TestCase
from ..functions import permutationtest
import numpy as np
import pandas as pd
import permutation_test.csv_parser as csv_parser
class TestCsvParser(TestCase):
def test_parse_dataframe(self):
pdata = {'exp1' : [1, 2, 3, 4]\
, 'exp2' : [5, 6, 7, 8]\
, 'treatment' : ['wt', 'mutant', 'wt', 'mutant']}
df = pd.DataFrame(pdata)
val = {'exp1' : { 'wt' : [1, 3], 'mutant' : [2, 4]}\
, 'exp2' : { 'wt' : [5,7], 'mutant' : [6, 8]}\
}
res = csv_parser.parse_dataframe(df, exp_names=['exp1', 'exp2']\
, treatment_colname='treatment')
print(res)
self.assertEqual(res, val)
def test_parse_dataframe_autoexp(self):
pdata = {'exp1' : [1, 2, 3, 4]\
, 'exp2' : [5, 6, 7, 8]\
, 'treatment' : ['wt', 'mutant', 'wt', 'mutant']}
df = pd.DataFrame(pdata)
val = {'exp1' : { 'wt' : [1, 3], 'mutant' : [2, 4]}\
, 'exp2' : { 'wt' : [5,7], 'mutant' : [6, 8]}\
}
res = csv_parser.parse_dataframe(df, treatment_colname='treatment')
print(res)
self.assertEqual(res, val)
def test_parse_dataframe_ioerror(self):
pdata = {'exp1' : [1, 2, 3, 4]\
, 'exp2' : [5, 6, 7, 8]\
, 'treatment' : ['wt', 'mutant1', 'wt', 'mutant2']}
df = pd.DataFrame(pdata)
self.assertRaises(IOError,lambda:\
csv_parser.parse_dataframe(df, exp_names=['exp1', 'exp2']\
, treatment_colname='treatment'))
def test_parse_dataframe_nonumeric_cols(self):
pdata = {'exp1' : [1, 2, 3, 4]\
, 'exp2' : [5, 'heinz', 7, 8]\
, 'treatment' : ['wt', 'mutant1', 'wt', 'mutant2']}
df = pd.DataFrame(pdata)
self.assertRaises(IOError,lambda:\
csv_parser.parse_dataframe(df, exp_names=['exp1', 'exp2']\
, treatment_colname='treatment'))
def test_get_treatments_from_df(self):
pdata = {'exp1' : [1, 2, 3, 4]\
, 'exp2' : [5, 6, 7, 8]\
, 'treatment' : ['wt', 'mutant', 'wt', 'mutant']}
df = pd.DataFrame(pdata)
val = ['mutant', 'wt']
res = csv_parser.get_treatments_from_df(df, 'treatment')
print(res)
self.assertEqual(set(res), set(val))
def test_init_data_dict(self):
exp_names = ['exp1', 'exp2', 'exp3']
treatments = ['wt','mutant']
d = csv_parser.init_data_dict(exp_names, treatments)
def test_are_exp_cols_numeric(self):
exp_names = ['exp1', 'exp2']
pdata = {'exp1' : [1, 2, 3, 4]\
, 'exp2' : [5, 6, 7, 8]\
, 'treatment' : ['wt', 'mutant', 'wt', 'mutant']}
df = pd.DataFrame(pdata)
res = csv_parser.are_exp_cols_numeric(df,exp_names)
self.assertTrue(res)
pdata = {'exp1' : [1, 2, 3, 4]\
, 'exp2' : [5, 'heinz', 7, 8]\
, 'treatment' : ['wt', 'mutant', 'wt', 'mutant']}
df = pd.DataFrame(pdata)
res = csv_parser.are_exp_cols_numeric(df,exp_names)
self.assertFalse(res)
def test_dat_from_csv(self):
val = {'exp2':\
{'mutant': [10.52631579, 0.0, 2.9411764710000003, 0.0, 0.0]\
, 'WT': [0.0, 9.0909090910000003, 23.07692308, 2.0833333330000001]}\
, 'exp1':\
{'mutant': [15.78947368, 4.3478260869999996, 5.8823529410000006, 0.0, 0.0]\
, 'WT': [11.11111111, 9.0909090910000003, 23.07692308, 6.25]}\
, 'exp3':\
{'mutant': [5.263157895, 0.0, 2.9411764710000003, 0.0, 0.0]\
, 'WT': [0.0, 6.8181818179999993, 15.38461538, 2.0833333330000001]}\
}
path = 'permutation_test/test_data/good_data.csv'
dat = csv_parser.dat_from_csv(path, treatment_colname='treatment')
print(dat)
self.assertEqual(dat,val)
def test_dat_from_csv_ioerror(self):
path = 'permutation_test/test_data/bad_data_three_conditions.csv'
self.assertRaises(IOError\
, lambda: csv_parser.dat_from_csv(path, treatment_colname='treatment'))
| 3,915 | 9 | 308 |
de6e233d4d8fd3b0c7afb4f2312f032b36e54475 | 6,420 | py | Python | relion_star_handler.py | kttn8769/relion_star_handler | a5ec0b71bcdfbb239cb76e6e92ebbec34e71eedf | [
"MIT"
] | null | null | null | relion_star_handler.py | kttn8769/relion_star_handler | a5ec0b71bcdfbb239cb76e6e92ebbec34e71eedf | [
"MIT"
] | null | null | null | relion_star_handler.py | kttn8769/relion_star_handler | a5ec0b71bcdfbb239cb76e6e92ebbec34e71eedf | [
"MIT"
] | null | null | null | import os
import datetime
import numpy as np
import pandas as pd
class RelionMetaData:
"""RELION metadata handling class.
Parameters
----------
df_particles : pandas.DataFrame
DataFrame containing particle data block contents.
df_optics : pandas.DataFrame, optional
DataFrame containing optics group data block contents. By default None
starfile : string
starfile name
"""
@classmethod
def load(cls, starfile):
"""Load RELION metadata from a particle star file.
Parameters
----------
starfile : string
star file
Returns
-------
RelionMetaData
RelionMetaData class instance.
"""
with open(starfile, 'r') as f:
# Check RELION version
relion31 = None
for line in f:
words = line.strip().split()
if len(words) == 0:
continue
elif words[0] == 'data_optics':
relion31 = True
break
elif words[0] == 'data_':
relion31 = False
break
elif words[0][0] == '#':
# Comment line
continue
assert relion31 is not None, f'The starfile {starfile} is invalid.'
# Load starfile
if relion31:
df_particles, df_optics = cls._load_relion31(starfile)
else:
df_particles = cls._load_relion(starfile)
df_optics = None
return cls(df_particles, df_optics, starfile)
@classmethod
def _load_relion31(cls, starfile):
"""Load RELION 3.1 style starfile
Parameters
----------
starfile : string
RELION 3.1 style star file
Returns
-------
df_particles : pandas.DataFrame
dataframe containing particle data block
df_optics : pandas.DataFrame
dataframe containing optics group data block.
"""
with open(starfile, 'r') as f:
headers_optics, data_optics = cls._read_block(f, 'data_optics')
headers_particles, data_particles = cls._read_block(
f, 'data_particles')
df_optics = pd.DataFrame(data_optics, columns=headers_optics)
df_particles = pd.DataFrame(data_particles, columns=headers_particles)
return df_particles, df_optics
@classmethod
def _load_relion(cls, starfile):
"""Load RELION 2.x/3.0 style starfile
Parameters
----------
starfile : string
RELION 2.x/3.0 style starfile
Returns
-------
pandas.DataFrame
dataframe containing data block
"""
with open(starfile, 'r') as f:
headers, data = cls._read_block(f, 'data_')
df = pd.DataFrame(data, columns=headers)
return df
@classmethod
def _read_block(cls, f, blockname):
"""Read data block from starfile
Parameters
----------
f : file-like object
File-like object of starfile
blockname : string
Data block name to read.
Returns
-------
headers : list of strings
Metadata labels
body : ndarray
Metadatas
"""
# Get to the block (data_, data_optics, data_particles, etc...)
for line in f:
if line.startswith(blockname):
break
# Get to header loop
for line in f:
if line.startswith('loop_'):
break
# Get list of column headers
headers = []
for line in f:
if line.startswith('_'):
headers.append(line.strip().split()[0])
else:
break
# All subsequent lines until empty line is the data block body
body = [line.strip().split()]
for line in f:
if line.strip() == '':
break
else:
body.append(line.strip().split())
body = np.array(body)
assert len(headers) == body.shape[1]
return headers, body
def write(self, outdir, outfile_rootname):
"""Save metadata in file
Parameters
----------
outdir : string
Output directory.
outfile_rootname : string
Output file rootname.
"""
os.makedirs(outdir, exist_ok=True)
outfile = os.path.join(outdir, outfile_rootname + '.star')
with open(outfile, 'w') as f:
f.write('# Created by cryoPICLS at {}\n'.format(
datetime.datetime.now()))
f.write('\n')
if self.df_optics is not None:
self._write_block(f, 'data_optics', self.df_optics)
self._write_block(f, 'data_particles', self.df_particles)
else:
self._write_block(f, 'data_', self.df_particles)
def _write_block(self, f, blockname, df):
"""Write data block as star format
Parameters
----------
f : File-like object
Star file object
blockname : string
Data block name (e.g. data_optics)
df : pandas.DataFrame
DataFrame containing metadata labels and metadatas
"""
f.write(blockname.strip())
f.write('\n\n')
f.write('loop_\n')
f.write('\n'.join(df.columns))
f.write('\n')
for i in df.index:
f.write(' '.join(df.loc[i]))
f.write('\n')
f.write('\n')
def iloc(self, idxs):
"""Fancy indexing.
Parameters
----------
idxs : array-like
Indices to select.
Returns
-------
RelionMetaData
New metadata object with the selected rows.
"""
df_particles_new = self.df_particles.iloc[idxs]
return self.__class__(df_particles=df_particles_new,
df_optics=self.df_optics)
| 28.157895 | 79 | 0.530841 | import os
import datetime
import numpy as np
import pandas as pd
class RelionMetaData:
"""RELION metadata handling class.
Parameters
----------
df_particles : pandas.DataFrame
DataFrame containing particle data block contents.
df_optics : pandas.DataFrame, optional
DataFrame containing optics group data block contents. By default None
starfile : string
starfile name
"""
def __init__(self, df_particles, df_optics=None, starfile=None):
# data_ block in RELION 2.x/3.0, data_particles block in RELION 3.1
self.df_particles = df_particles
# data_optics block in RELION 3.1
self.df_optics = df_optics
self.starfile = starfile
@classmethod
def load(cls, starfile):
"""Load RELION metadata from a particle star file.
Parameters
----------
starfile : string
star file
Returns
-------
RelionMetaData
RelionMetaData class instance.
"""
with open(starfile, 'r') as f:
# Check RELION version
relion31 = None
for line in f:
words = line.strip().split()
if len(words) == 0:
continue
elif words[0] == 'data_optics':
relion31 = True
break
elif words[0] == 'data_':
relion31 = False
break
elif words[0][0] == '#':
# Comment line
continue
assert relion31 is not None, f'The starfile {starfile} is invalid.'
# Load starfile
if relion31:
df_particles, df_optics = cls._load_relion31(starfile)
else:
df_particles = cls._load_relion(starfile)
df_optics = None
return cls(df_particles, df_optics, starfile)
@classmethod
def _load_relion31(cls, starfile):
"""Load RELION 3.1 style starfile
Parameters
----------
starfile : string
RELION 3.1 style star file
Returns
-------
df_particles : pandas.DataFrame
dataframe containing particle data block
df_optics : pandas.DataFrame
dataframe containing optics group data block.
"""
with open(starfile, 'r') as f:
headers_optics, data_optics = cls._read_block(f, 'data_optics')
headers_particles, data_particles = cls._read_block(
f, 'data_particles')
df_optics = pd.DataFrame(data_optics, columns=headers_optics)
df_particles = pd.DataFrame(data_particles, columns=headers_particles)
return df_particles, df_optics
@classmethod
def _load_relion(cls, starfile):
"""Load RELION 2.x/3.0 style starfile
Parameters
----------
starfile : string
RELION 2.x/3.0 style starfile
Returns
-------
pandas.DataFrame
dataframe containing data block
"""
with open(starfile, 'r') as f:
headers, data = cls._read_block(f, 'data_')
df = pd.DataFrame(data, columns=headers)
return df
@classmethod
def _read_block(cls, f, blockname):
"""Read data block from starfile
Parameters
----------
f : file-like object
File-like object of starfile
blockname : string
Data block name to read.
Returns
-------
headers : list of strings
Metadata labels
body : ndarray
Metadatas
"""
# Get to the block (data_, data_optics, data_particles, etc...)
for line in f:
if line.startswith(blockname):
break
# Get to header loop
for line in f:
if line.startswith('loop_'):
break
# Get list of column headers
headers = []
for line in f:
if line.startswith('_'):
headers.append(line.strip().split()[0])
else:
break
# All subsequent lines until empty line is the data block body
body = [line.strip().split()]
for line in f:
if line.strip() == '':
break
else:
body.append(line.strip().split())
body = np.array(body)
assert len(headers) == body.shape[1]
return headers, body
def write(self, outdir, outfile_rootname):
"""Save metadata in file
Parameters
----------
outdir : string
Output directory.
outfile_rootname : string
Output file rootname.
"""
os.makedirs(outdir, exist_ok=True)
outfile = os.path.join(outdir, outfile_rootname + '.star')
with open(outfile, 'w') as f:
f.write('# Created by cryoPICLS at {}\n'.format(
datetime.datetime.now()))
f.write('\n')
if self.df_optics is not None:
self._write_block(f, 'data_optics', self.df_optics)
self._write_block(f, 'data_particles', self.df_particles)
else:
self._write_block(f, 'data_', self.df_particles)
def _write_block(self, f, blockname, df):
"""Write data block as star format
Parameters
----------
f : File-like object
Star file object
blockname : string
Data block name (e.g. data_optics)
df : pandas.DataFrame
DataFrame containing metadata labels and metadatas
"""
f.write(blockname.strip())
f.write('\n\n')
f.write('loop_\n')
f.write('\n'.join(df.columns))
f.write('\n')
for i in df.index:
f.write(' '.join(df.loc[i]))
f.write('\n')
f.write('\n')
def iloc(self, idxs):
"""Fancy indexing.
Parameters
----------
idxs : array-like
Indices to select.
Returns
-------
RelionMetaData
New metadata object with the selected rows.
"""
df_particles_new = self.df_particles.iloc[idxs]
return self.__class__(df_particles=df_particles_new,
df_optics=self.df_optics)
| 270 | 0 | 26 |
b3da40749a5ee02602b24369ea78a5224f727105 | 1,298 | py | Python | pajbot/managers/kvi.py | MrBean355/pajbot | 3f27aabccfb242f5e3e8eedd20c97633b0d39950 | [
"MIT"
] | 1 | 2021-10-02T10:19:38.000Z | 2021-10-02T10:19:38.000Z | pajbot/managers/kvi.py | MrBean355/pajbot | 3f27aabccfb242f5e3e8eedd20c97633b0d39950 | [
"MIT"
] | 2 | 2020-02-18T03:30:30.000Z | 2020-02-18T03:31:44.000Z | pajbot/managers/kvi.py | MrBean355/pajbot | 3f27aabccfb242f5e3e8eedd20c97633b0d39950 | [
"MIT"
] | 1 | 2021-10-02T10:19:38.000Z | 2021-10-02T10:19:38.000Z | import logging
from collections import UserDict
from pajbot.managers.redis import RedisManager
from pajbot.streamhelper import StreamHelper
log = logging.getLogger(__name__)
| 24.037037 | 53 | 0.619414 | import logging
from collections import UserDict
from pajbot.managers.redis import RedisManager
from pajbot.streamhelper import StreamHelper
log = logging.getLogger(__name__)
class KVIData:
def __init__(self, streamer, kvi_id):
self.key = f"{streamer}:kvi"
self.id = kvi_id
def set(self, new_value, redis=None):
if redis is None:
redis = RedisManager.get()
redis.hset(self.key, self.id, new_value)
def get(self, redis=None):
if redis is None:
redis = RedisManager.get()
try:
raw_value = redis.hget(self.key, self.id)
value = int(raw_value)
except (TypeError, ValueError):
value = 0
return value
def inc(self):
redis = RedisManager.get()
old_value = self.get(redis=redis)
self.set(old_value + 1, redis=redis)
def dec(self):
redis = RedisManager.get()
old_value = self.get(redis=redis)
self.set(old_value - 1, redis=redis)
def __str__(self):
return str(self.get())
class KVIManager(UserDict):
def __init__(self):
self.streamer = StreamHelper.get_streamer()
UserDict.__init__(self)
def __getitem__(self, kvi_id):
return KVIData(self.streamer, kvi_id)
| 861 | -1 | 260 |
8e0d8156ffc73a70b412f2e569c64b49b3388642 | 5,811 | py | Python | cloud_integration/integration.py | RobbyAkbar/JobFlex | 7223ca366a0fa40822061aea63db17bf7ea8c947 | [
"MIT"
] | null | null | null | cloud_integration/integration.py | RobbyAkbar/JobFlex | 7223ca366a0fa40822061aea63db17bf7ea8c947 | [
"MIT"
] | null | null | null | cloud_integration/integration.py | RobbyAkbar/JobFlex | 7223ca366a0fa40822061aea63db17bf7ea8c947 | [
"MIT"
] | 1 | 2021-06-27T05:38:05.000Z | 2021-06-27T05:38:05.000Z | #!/usr/bin/python3
from google.cloud import bigquery
from google.cloud import storage
import flask
from flask import request, jsonify, abort
import json
#for ML
import tensorflow as tf
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import PyPDF2
import pandas as pd
import os
from sklearn.preprocessing import LabelBinarizer
# Load labels
filename = 'train_labels.csv'
data = pd.read_csv(filename, header=0, names=['Query'])
filename2 = 'train_descs.csv'
data2 = pd.read_csv(filename2, header = 0, names = ['Description'])
# Initialize tokenizer
tokenizer = Tokenizer(num_words = 3000)
tokenizer.fit_on_texts(data2['Description'])
#Load Model
model = tf.keras.models.load_model('../saved_model')
predicted = model.predict(token_list, verbose = 0)
app = flask.Flask(__name__)
app.config["DEBUG"] = True
bucketName="job-flex-storage"
@app.route('/', methods=['GET'])
@app.route('/search', methods=['POST'])
@app.route('/pdfPredict', methods=['POST'])
@app.route('/getRecommendation', methods=['POST'])
app.run(host = "0.0.0.0",port=8080)
| 34.384615 | 130 | 0.669076 | #!/usr/bin/python3
from google.cloud import bigquery
from google.cloud import storage
import flask
from flask import request, jsonify, abort
import json
#for ML
import tensorflow as tf
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import PyPDF2
import pandas as pd
import os
from sklearn.preprocessing import LabelBinarizer
# Load labels
filename = 'train_labels.csv'
data = pd.read_csv(filename, header=0, names=['Query'])
filename2 = 'train_descs.csv'
data2 = pd.read_csv(filename2, header = 0, names = ['Description'])
# Initialize tokenizer
tokenizer = Tokenizer(num_words = 3000)
tokenizer.fit_on_texts(data2['Description'])
#Load Model
model = tf.keras.models.load_model('../saved_model')
predicted = model.predict(token_list, verbose = 0)
app = flask.Flask(__name__)
app.config["DEBUG"] = True
bucketName="job-flex-storage"
@app.route('/', methods=['GET'])
def home():
return '''<h1>This server doesn't handle GET</h1>
<p>Use Post Instead</p>'''
def jobQuery(jobNameList):
client = bigquery.Client()
toQuery = "SELECT * from `b21-cap0139-jobflex.jobsData.main_jobs_data` where lower(Title) LIKE '%"+jobNameList[0].lower()+"%'"
for jobName in jobNameList[1:]:
toQuery += " OR lower(Title) LIKE '%"+jobName.lower()+"%'"
toQuery+= " LIMIT 10"
query_job = client.query(
toQuery
)
results = query_job.result() # Waits for job to complete.
toReturn = [dict(row) for row in results]
for x in toReturn:
x["EndDate"]=x["EndDate"].strftime("%Y-%m-%d %H:%M:%S.%f")
return (toReturn)
def resultQuery(id):
client = bigquery.Client()
query_job = client.query(
'''
SELECT
JSON_EXTRACT_SCALAR(h,'$.JobID') as JobID,
JSON_EXTRACT_SCALAR(h,'$.WindowID') as WindowID,
JSON_EXTRACT_SCALAR(h,'$.Title') as Title,
JSON_EXTRACT_SCALAR(h,'$.Description') as Description,
JSON_EXTRACT_SCALAR(h,'$.Requirements') as Requirements,
JSON_EXTRACT_SCALAR(h,'$.City') as City,
JSON_EXTRACT_SCALAR(h,'$.State') as State,
JSON_EXTRACT_SCALAR(h,'$.Country') as Country,
JSON_EXTRACT_SCALAR(h,'$.Zip5') as Zip5,
JSON_EXTRACT_SCALAR(h,'$.StartDate') as StartDate,
JSON_EXTRACT_SCALAR(h,'$.EndDate') as EndDate
FROM `b21-cap0139-jobflex.jobsData.results_data`
LEFT join unnest(json_extract_array(recommendation)) as h
'''+'WHERE id LIKE "'+id+'"'
)
results = query_job.result() # Waits for job to complete.
toReturn = [dict(row) for row in results]
return (toReturn)
def download_blob(bucket_name, source_blob_name, destination_file_name):
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
# Construct a client side representation of a blob.
# Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve
# any content from Google Cloud Storage. As we don't need additional data,
# using `Bucket.blob` is preferred here.
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
print(
"Blob {} downloaded to {}.".format(
source_blob_name, destination_file_name
)
)
def getPrediction(filename):
# Read and extract text from PDF file
pdf_file = filename
try:
pdf_read = PyPDF2.PdfFileReader(pdf_file)
page = pdf_read.getPage(0)
page_content = page.extractText()
except:
page_content = ""
token_list = tokenizer.texts_to_sequences([page_content])[0]
token_list = pad_sequences([token_list], maxlen = 1200, padding = 'post')
encoder = LabelBinarizer()
encoder.fit(data['Query'])
prediction = encoder.classes_[np.argmax(predicted)]
return str(prediction)
def appendRecommend(id,recommendation):
client = bigquery.Client()
rowsToInsert = [
{u"id":id,u"recommendation":recommendation}
]
errors = client.insert_rows_json(
"jobsData.results_data", rowsToInsert, row_ids=[None] * len(rowsToInsert)
) # Make an API request.
if errors == []:
print("New rows have been added.")
else:
print("Encountered errors while inserting rows: {}".format(errors))
@app.route('/search', methods=['POST'])
def search():
request_data = request.get_json()
if "toSearch" in request_data:
queryResult = jobQuery(request_data["toSearch"].split())
return jsonify(queryResult)
else:
abort(404,description = 'Wrong post method, make sure to use JSON with "toSearch" as the key')
@app.route('/pdfPredict', methods=['POST'])
def pdfPredict():
request_data = request.get_json()
filename = request_data["name"]
contentType = request_data["contentType"]
if (contentType == "multipart/form-data" and ".pdf" in filename)or(contentType=="application/pdf"):
download_blob(bucketName,filename,filename[4:])
prediction=getPrediction(filename[4:])
print(prediction)
recommendation=jobQuery(prediction.split())
appendRecommend(filename[4:],json.dumps(recommendation))
os.remove(filename[4:])
return(f"It's a PDF, and the prediction is {prediction}")
else:
print("It's not a PDF file so I don't care")
return("It's not a PDF")
@app.route('/getRecommendation', methods=['POST'])
def getRecommendation():
request_data = request.get_json()
if "id" in request_data:
queryResult = resultQuery(request_data["id"]+".pdf")
return jsonify(queryResult)
else:
abort(404,description = 'Wrong post method, make sure to use JSON with "id" as the key')
app.run(host = "0.0.0.0",port=8080)
| 4,467 | 0 | 213 |
39ee602cf8253fc5a5f61f89afe1ed8899ca53b6 | 1,908 | py | Python | Python/algorithm/mergeInterval.py | xyj77/CodeRecord | 805cdf4b2622b16b30b6734e360ea1c1c352be3a | [
"MIT"
] | null | null | null | Python/algorithm/mergeInterval.py | xyj77/CodeRecord | 805cdf4b2622b16b30b6734e360ea1c1c352be3a | [
"MIT"
] | null | null | null | Python/algorithm/mergeInterval.py | xyj77/CodeRecord | 805cdf4b2622b16b30b6734e360ea1c1c352be3a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
合并区间问题:
输入:
3
1,10;32,45
78,94;5,16
80,100;200,220;16,32
输出:
1,45;78,100;200,220
Created on Sun Aug 12 09:58:08 2018
"""
from __future__ import absolute_import
from __future__ import print_function
class Solution(object):
'''
def merge(self, parts):
n = len(parts)
if n <= 1:
return parts
result = []
parts.sort(key=lambda d: d.start)
left, right = parts[0].start, parts[0].end
for index in range(1,n): #从第二个区间开始判断
# 下一个区间的起始位置小于或等于当前的right值,说明可以合并
if parts[index].start <= right:
right = max(parts[index].end, right)
# 下一个区间的起始位置大于当前的right值,说明应该重新生成区间
else:
# 实际上是以left, right为初始变量生成一个Part型的对象,并加入结果列表
result.append(Part(left, right))
left = parts[index].start
right = parts[index].end
index += 1
result.append(Part(left, right))
return result
'''
if __name__ == '__main__':
main() | 23.555556 | 67 | 0.5 | # -*- coding: utf-8 -*-
"""
合并区间问题:
输入:
3
1,10;32,45
78,94;5,16
80,100;200,220;16,32
输出:
1,45;78,100;200,220
Created on Sun Aug 12 09:58:08 2018
"""
from __future__ import absolute_import
from __future__ import print_function
class Part(object):
def __init__(self, start, end):
self.start = start
self.end = end
class Solution(object):
def merge(self, intervals):
out = []
for i in sorted(intervals, key=lambda i: i.start):
if out and i.start <= out[-1].end:
out[-1].end = max(out[-1].end, i.end)
else:
out += i,
return out
'''
def merge(self, parts):
n = len(parts)
if n <= 1:
return parts
result = []
parts.sort(key=lambda d: d.start)
left, right = parts[0].start, parts[0].end
for index in range(1,n): #从第二个区间开始判断
# 下一个区间的起始位置小于或等于当前的right值,说明可以合并
if parts[index].start <= right:
right = max(parts[index].end, right)
# 下一个区间的起始位置大于当前的right值,说明应该重新生成区间
else:
# 实际上是以left, right为初始变量生成一个Part型的对象,并加入结果列表
result.append(Part(left, right))
left = parts[index].start
right = parts[index].end
index += 1
result.append(Part(left, right))
return result
'''
def main():
s = []
n = int(raw_input())
solver = Solution()
for i in range(n):
temp = [x.split(',') for x in list(raw_input().split(';'))]
for pair in temp:
s.append(Part(int(pair[0]), int(pair[1])))
s = solver.merge(s)
result = ''
for x in s:
result = result + str(x.start) + ',' + str(x.end) + ';'
print(result[:-1])
if __name__ == '__main__':
main() | 702 | -2 | 103 |
c3907ab3a6cb71ea4fd5c1f7a9c2038e4d416a8e | 1,091 | py | Python | scripts/dump-sizes.py | mozilla/jydoop | a1ce82f3c6f3d335ba2b0cbc310dac52624a6e0b | [
"Apache-2.0"
] | 8 | 2015-03-17T19:19:10.000Z | 2018-03-26T23:48:05.000Z | scripts/dump-sizes.py | mozilla/jydoop | a1ce82f3c6f3d335ba2b0cbc310dac52624a6e0b | [
"Apache-2.0"
] | 3 | 2015-05-15T09:17:44.000Z | 2019-03-28T04:13:17.000Z | scripts/dump-sizes.py | mozilla/jydoop | a1ce82f3c6f3d335ba2b0cbc310dac52624a6e0b | [
"Apache-2.0"
] | 6 | 2015-11-05T03:01:40.000Z | 2019-11-03T11:57:54.000Z | import crashstatsutils
import jydoop
import json
from org.python.core.util import StringUtil
setupjob = crashstatsutils.dosetupjob([])
output = jydoop.outputWithKey
| 30.305556 | 93 | 0.636114 | import crashstatsutils
import jydoop
import json
from org.python.core.util import StringUtil
setupjob = crashstatsutils.dosetupjob([])
def map(k, context):
result = context.cx.getCurrentValue()
meta_data = StringUtil.fromBytes(result.getValue("meta_data", "json"))
meta = json.loads(meta_data)
product = meta['ProductName']
version = meta['Version']
ispluginhang = meta.get('PluginHang', None) == "1"
err = 0
kv = result.getColumnLatest("raw_data", "dump")
if kv is None:
err += 1
dumplen = 0
else:
dumplen = kv.getValueLength()
if "additional_minidumps" in meta:
extradumps = meta["additional_minidumps"].split(",")
for extradump in extradumps:
extrakv = result.getColumnLatest("raw_data", "upload_file_minidump_" + extradump)
if extrakv is None:
err += 1
else:
extralen = extrakv.getValueLength()
dumplen += extralen
context.write(k, (product, version, ispluginhang, dumplen, err))
output = jydoop.outputWithKey
| 902 | 0 | 22 |