blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8c9ede3bb7f9724ebd700bbbda189af02ac9e358 | Python | kev1/snt | /main.py | UTF-8 | 5,663 | 2.9375 | 3 | [] | no_license | import os
# print(env.variables.config['theme']['palette']) # access palette color. Automatic toggle of color ?
def define_env(env):
"Hook function"
#---------------- <exo perso>--------------------
env.variables['compteur_exo'] = 0
@env.macro
def exercice():
env.variables['compteur_exo'] += 1
return f"Exercice { env.variables['compteur_exo']}"
#---------------- </exo perso>--------------------
#---------------- <PYODIDE>--------------------
env.variables['term_counter'] = 0
env.variables['IDE_counter'] = 0
@env.macro
def terminal() -> str:
"""
Purpose : Create a Python Terminal.
Methods : Two layers to avoid focusing on the Terminal. 1) Fake Terminal using CSS 2) A click hides the fake
terminal and triggers the actual Terminal.
"""
tc = env.variables['term_counter']
env.variables['term_counter'] += 1
return f"""<div onclick='start_term("id{tc}")' id="fake_id{tc}" class="terminal_f"><label class="terminal"><span>>>> </span></label></div><div id="id{tc}" class="hide"></div>"""
def read_ext_file(nom_script : str) -> str:
"""
Purpose : Read a Python file that is uploaded on the server.
Methods : The content of the file is hidden in the webpage. Replacing \n by a string makes it possible
to integrate the content in mkdocs admonitions.
"""
short_path = f"""docs/{os.path.dirname(env.variables.page.url.rstrip('/'))}"""
try:
f = open(f"""{short_path}/scripts/{nom_script}.py""")
content = ''.join(f.readlines())
f.close()
content = content+ "\n"
# Hack to integrate code lines in admonitions in mkdocs
return content.replace('\n','backslash_newline')
except :
return
def generate_content(nom_script : str) -> str:
"""
Purpose : Return content and current number IDE {tc}.
"""
tc = env.variables['IDE_counter']
env.variables['IDE_counter'] += 1
content = read_ext_file(nom_script)
if content is not None :
return content, tc
else : return "", tc
def create_upload_button(tc : str) -> str:
"""
Purpose : Create upoad button for a IDE number {tc}.
Methods : Use an HTML input to upload a file from user. The user clicks on the button to fire a JS event
that triggers the hidden input.
"""
return f"""<button class="emoji" onclick="document.getElementById('input_editor_{tc}').click()">⤴️</button>\
<input type="file" id="input_editor_{tc}" name="file" enctype="multipart/form-data" class="hide"/>"""
def create_unittest_button(tc: str, nom_script: str, mode: str) -> str:
"""
Purpose : Generate the button for IDE {tc} to perform the unit tests if a valid test_script.py is present.
Methods : Hide the content in a div that is called in the Javascript
"""
stripped_nom_script = nom_script.split('/')[-1]
relative_path = '/'.join(nom_script.split('/')[:-1])
nom_script = f"{relative_path}/test_{stripped_nom_script}"
content = read_ext_file(nom_script)
if content is not None:
return f"""<span id="test_term_editor_{tc}" class="hide">{content}</span><button class="emoji_dark" onclick=\'executeTest("{tc}","{mode}")\'>🛂</button><span class="compteur">5/5</span>"""
else:
return ''
def blank_space() -> str:
"""
Purpose : Return 5em blank spaces. Use to spread the buttons evenly
"""
return f"""<span style="indent-text:5em"> </span>"""
@env.macro
def IDEv(nom_script : str ='') -> str:
"""
Purpose : Easy macro to generate vertical IDE in Markdown mkdocs.
Methods : Fire the IDE function with 'v' mode.
"""
return IDE(nom_script, 'v')
@env.macro
def IDE(nom_script : str ='', mode : str = 'h') -> str:
"""
Purpose : Create a IDE (Editor+Terminal) on a Mkdocs document. {nom_script}.py is loaded on the editor if present.
Methods : Two modes are available : vertical or horizontal. Buttons are added through functioncal calls.
Last span hides the code content of the IDE if loaded.
"""
content, tc = generate_content(nom_script)
corr_content, tc = generate_content(f"""{'/'.join(nom_script.split('/')[:-1])}/corr_{nom_script.split('/')[-1]}""")
div_edit = f'<div class="ide_classe">'
if mode == 'v':
div_edit += f'<div class="wrapper"><div class="interior_wrapper"><div id="editor_{tc}"></div></div><div id="term_editor_{tc}" class="term_editor"></div></div>'
else:
div_edit += f'<div class="wrapper_h"><div class="line" id="editor_{tc}"></div><div id="term_editor_{tc}" class="term_editor_h terminal_f_h"></div></div>'
div_edit += f"""<button class="emoji" onclick='interpretACE("editor_{tc}","{mode}")'>▶️</button>"""
div_edit += f"""{blank_space()}<button class="emoji" onclick=\'download_file("editor_{tc}","{nom_script}")\'>⤵️</button>{blank_space()}"""
div_edit += create_upload_button(tc)
div_edit += create_unittest_button(tc, nom_script, mode)
div_edit += '</div>'
div_edit += f"""<span id="content_editor_{tc}" class="hide">{content}</span>"""
div_edit += f"""<span id="corr_content_editor_{tc}" class="hide">{corr_content}</span>"""
return div_edit
#---------------- </PYODIDE>-------------------- | true |
baacbe025c2a055ffd3dff7b6520cd9b841d6425 | Python | BLUECARVIN/Several-ReinforcementLearning | /atari_game/Agent/DoubleDQN.py | UTF-8 | 10,939 | 2.515625 | 3 | [
"MIT"
] | permissive | import sys
sys.path.append("..")
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
import os
import copy
import pickle
import gym
import numpy as np
import random
from PIL import Image
from Utils import hard_update
from MLP import QNet
import ReplayBuffer
class DoubleDQNAgent(object):
def __init__(self,
env,
random_seed,
save_path,
q_net=QNet,
gamma=0.99,
batch_size=32,
initial_eps = 1.0,
end_eps = 0.1,
eps_plan = 500000,
lr=0.00025,
learning_start=50000,
learning_freq=4,
frame_history_len=4,
target_update_freq=10000,
memory_size=1000000,
max_steps = 10000000,
**kwargs):
"""
DQN Agent
paras:
env: the gym environment
seed: the random seed
save_path: the path to save model parameters
q_net: the Q learning network function
gamma: the reward's decrease parameter
initial_e: the initial prob to choose random action
end_e: the end prob to choose random action
lr: the optimizer's learning rate
target_update_freq: the target netwok's update frequency
test_freq: the test frequency
learning_start: begin to learn after learning_start steps
learning_freq: the training frequency
frame_history_len: how much frames should be feed to the model as one data
memory_size: the maxmium size of replay buffer
"""
assert type(env.observation_space) == gym.spaces.Box
assert type(env.action_space) == gym.spaces.Discrete
# fix random seed
torch.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
# set env
self.env = env
self.test_env = copy.deepcopy(env)
# get observation dim
if len(env.observation_space.shape) == 1: # running on low-dimension observation(RAM)
self.observation_dim = env.observation_space.shape[0]
else:
img_h, img_w, img_c = env.observation_space.shape
self.observation_dim = frame_history_len
# get action dim
self.action_dim = env.action_space.n
# set Q network
self.learning_Q = q_net(self.observation_dim, self.action_dim).cuda()
self.target_Q = q_net(self.observation_dim, self.action_dim).cuda()
# sync two networks' parameter
hard_update(self.target_Q, self.learning_Q)
# set replay buffer
self.replay_buffer = ReplayBuffer.ReplayBuffer(memory_size, frame_history_len)
# define learning Q network's optimizer
self.optimizer = torch.optim.RMSprop(self.learning_Q.parameters(), lr=lr, eps=0.01)
# define loss function
self.loss_func = nn.MSELoss()
# initial other parameters
self.gamma = gamma
self.batch_size = batch_size
self.initial_eps = initial_eps
self.end_eps = end_eps
self.eps_plan = eps_plan
self.learning_start = learning_start
self.learning_freq = learning_freq
self.frame_history_len = frame_history_len
self.max_steps = max_steps
self.target_update_freq = target_update_freq
self.steps = 0
self.save_path = save_path
# set the eps
self.eps = self.initial_eps
# ============================ save and load model ===============================
def save_model(self, name, path=None):
if path:
self.save_path = path
if not os.path.isdir(self.save_path):
os.makedirs(self.save_path)
torch.save(self.target_Q.state_dict(), self.save_path + name + '.pt')
print("The target model's parameters have been saved sucessfully!")
def load_model(self, file_path):
self.target_Q.load_state_dict(torch.load(file_path))
hard_update(self.learning_Q, self.target_Q)
print("The models' parameters have been loaded sucessfully!")
# ============================ utils ==========================================
def cal_eps(self):
self.eps = self.initial_eps - (self.initial_eps - self.end_eps) / self.eps_plan * (self.steps - self.learning_start)
if self.eps < self.end_eps:
self.eps = self.end_eps
def pre_process(self, observation):
img = np.reshape(observation, [210, 160, 3]).astype(np.float32)
img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
img = Image.fromarray(img)
resized_screen = img.resize((84, 110), Image.BILINEAR)
resized_screen = np.array(resized_screen)
x_t = resized_screen[18:102, :]
x_t = np.reshape(x_t, [84, 84, 1])
return x_t.astype(np.uint8)
# ============================= evaluate ======================================
def get_exploration_action(self, state):
sample = random.random()
self.cal_eps()
if sample > self.eps:
state = torch.from_numpy(state).type(torch.float32).unsqueeze(0) / 255.0
state = Variable(state).cuda()
# action = torch.argmax(self.learning_Q(state)).detach().cpu()
action = torch.argmax(self.learning_Q(state), dim=1).cpu()
# action = self.learning_Q(state).data.max(1)[1].cpu()
else:
# action = int(np.random.uniform() * self.action_dim)
# action = torch.from_numpy(action)
action = torch.IntTensor([random.randrange(self.action_dim)])
return action
def get_exploitation_action(self, state):
state = torch.from_numpy(state).type(torch.float32).unsqueeze(0) / 255.0
state = Variable(state).cuda()
action = torch.argmax(self.target_Q(state)).detach().cpu()
return action
# ============================= train ======================================
def train(self, is_render=False, path=None):
last_observation = self.env.reset()
last_observation = self.pre_process(last_observation)
mean_episode_reward = -float('nan')
best_mean_episode_reward = -float('inf')
log = {'mean_episode_reward':[], 'best_mean_episode_reward':[]}
num_param_updates = 0
episode_rewards = []
one_episode_reward = []
loss = []
while self.steps < self.max_steps:
# store lastest observation
last_index = self.replay_buffer.store_frame(last_observation)
recent_observation = self.replay_buffer.encoder_recent_observation()
# choose a random action if not state learning yet
if self.steps < self.learning_start:
action = random.randrange(self.action_dim)
else:
action = self.get_exploration_action(recent_observation)[0].numpy()
# make a step
observation, reward, done, _ = self.env.step(action)
observation = self.pre_process(observation)
one_episode_reward.append(reward)
if is_render:
self.env.render()
# clip rewards between -1 and 1
reward = max(-1.0, min(reward, 1.0))
# store ohter info in replay memory
self.replay_buffer.store_effct(last_index, action, reward, done)
# if done, restat env
if done:
observation = self.env.reset()
observation = self.pre_process(observation)
episode_rewards.append(np.sum(one_episode_reward))
one_episode_reward = []
last_observation = observation
# perform experience replay and train the network
if ((self.steps > self.learning_start) and
(self.steps % self.learning_freq == 0) and
self.replay_buffer.can_sample):
# get batch from replay buffer
obs_batch, act_batch, rew_batch, next_obs_batch, done_mask = self.replay_buffer.sample_batch(self.batch_size)
# turn all data to tensor
obs_batch = Variable(torch.from_numpy(obs_batch).type(torch.float32) / 255.0).cuda()
act_batch = Variable(torch.from_numpy(act_batch).long()).cuda()
rew_batch = Variable(torch.from_numpy(rew_batch).type(torch.float32)).cuda()
next_obs_batch = Variable(torch.from_numpy(next_obs_batch).type(torch.float32) / 255.).cuda()
not_done_mask = Variable(torch.from_numpy(1 - done_mask).type(torch.float32)).cuda()
# ================================ calculate bellman =========================================
# get current Q value
current_q_value = self.learning_Q(obs_batch).gather(1, act_batch.unsqueeze(1))
# using learning Q net to choose the next action and using target Q to calculate next Q
next_action = self.learning_Q(next_obs_batch).max(1)[1]
next_max_q = self.target_Q(next_obs_batch).gather(1, next_action.unsqueeze(1))
next_q_values = not_done_mask.view(-1, 1) * next_max_q
# compute the target of the current q values
target_q_values = rew_batch.view(-1, 1) + (self.gamma * next_q_values)
# compute bellman error
bellman_error = target_q_values - current_q_value
loss.append(bellman_error.detach().cpu().numpy())
# clip bellman error between [-1, 1]
clipped_bellman_error = bellman_error.clamp(-1, 1)
# * -1
bellman_d = -1. * clipped_bellman_error
# optimize
self.optimizer.zero_grad()
current_q_value.backward(bellman_d.data)
self.optimizer.step()
# update steps
num_param_updates += 1
# update network
if num_param_updates % self.target_update_freq == 0:
hard_update(self.target_Q, self.learning_Q)
if len(episode_rewards) > 0:
mean_episode_reward = np.mean(episode_rewards[-100:])
if len(episode_rewards) > 100:
best_mean_episode_reward = max(best_mean_episode_reward, mean_episode_reward)
log['mean_episode_reward'].append(mean_episode_reward)
log['best_mean_episode_reward'].append(best_mean_episode_reward)
if self.steps % 5000 == 0 and self.steps > self.learning_start:
print("Steps: {}".format(self.steps))
print("mean reward (lastest 100 episodes): {:.4f}".format(mean_episode_reward))
print("best mean reward: {:.4f}".format(best_mean_episode_reward))
print("episodes: {}".format(len(episode_rewards)))
print("exploration: {:.4f}".format(self.eps))
print("loss: {:.4f}".format(np.mean(loss)))
sys.stdout.flush()
loss = []
with open(self.save_path + 'log.pkl', 'wb') as f:
pickle.dump(log, f)
self.save_model('DQNtest', path=path)
self.steps += 1
# =================================== test ===============================================
def test(self, path, epoch, is_render=False):
self.load_model(path)
last_observation = self.env.reset()
last_observation = self.pre_process(last_observation)
mean_episode_reward = -float('nan')
log = {'mean_episode_reward':[]}
episode_rewards = []
one_episode_reward = []
loss = []
done_epoch = 0
for i in range(epoch):
done = False
while not done:
# store lastest observation
last_index = self.replay_buffer.store_frame(last_observation)
recent_observation = self.replay_buffer.store_frame(last_observation)
# choose the action by ste strategy
action = self.get_exploitation_action(recent_observation)[0].numpy()
# make a step
observation, reward, done, _ = self.env.step(action)
observation = self.pre_process(observation)
one_episode_reward.append(reward)
if is_render:
self.env.render()
self.replay_buffer.store_effect(last_index, action, reward, done)
if done:
observation = self.env.reset()
observation = self.pre_process(observation)
print("For the {}th epoch: ".format(i))
print("Agent has been taken {} steps, and the total reward is {:.2f}.".format(len(one_episode_reward), sum(one_episode_reward)))
one_episode_reward = []
print("Done!")
env.close()
| true |
5e74f80f6b69e6ba959e3c0c8fb221bc414e18a5 | Python | mj-will/nessai | /nessai/utils/indices.py | UTF-8 | 1,684 | 3.03125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Utilities related to insertion indices.
"""
import numpy as np
from scipy import stats
def compute_indices_ks_test(indices, nlive, mode="D+"):
"""
Compute the two-sided KS test for discrete insertion indices for a given
number of live points
Parameters
----------
indices : array_like
Indices of newly inserted live points
nlive : int
Number of live points
Returns
-------
D : float
Two-sided KS statistic
p : float
p-value
"""
if len(indices):
counts = np.zeros(nlive)
u, c = np.unique(indices, return_counts=True)
counts[u] = c
cdf = np.cumsum(counts) / len(indices)
if mode == "D+":
D = np.max(np.arange(1.0, nlive + 1) / nlive - cdf)
elif mode == "D-":
D = np.max(cdf - np.arange(0.0, nlive) / nlive)
else:
raise RuntimeError(f"{mode} is not a valid mode. Choose D+ or D-")
p = stats.ksone.sf(D, len(indices))
return D, p
else:
return None, None
def bonferroni_correction(p_values, alpha=0.05):
"""
Apply the Bonferroni correction for multiple tests.
Based on the implementation in ``statmodels.stats.multitest``
Parameters
----------
p_values : array_like
Uncorrelated p-values.
alpha : float, optional
Family wise error rate.
"""
p_values = np.asarray(p_values)
alpha_bon = alpha / p_values.size
reject = p_values <= alpha_bon
p_values_corrected = p_values * p_values.size
p_values_corrected[p_values_corrected > 1] = 1
return reject, p_values_corrected, alpha_bon
| true |
79e1b3a06f9e493296db4d8e2aa39d53e46c66d7 | Python | LucasMaiale/Libro1-python | /Cap2/Programa 2_10.py | UTF-8 | 940 | 4.21875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
@author: guardati
Solución del problema 2.10
Calcula e imprime el total a pagar por alimento, a lo largo de un mes,
en un refugio para perros en el cual viven perros de distinta edad y tamaño.
Se considera que el mes tiene 30 días.
"""
precio_alim_ad = float(input('Ingrese el precio del alimento para adulto: $'))
precio_alim_ca = float(input('Ingrese el precio del alimento para cachorro: $'))
'''
Se multiplica por 30 para obtener el consumo en un mes y se divide
por 1000 para expresar el consumo en kilos.
'''
consumo_kg_adulto = (10 * 450 + 12 * 380) * 30 / 1000
consumo_kg_cachorro = 6 * 230 * 30 / 1000
costo_alim_ad = consumo_kg_adulto * precio_alim_ad
costo_alim_ca = consumo_kg_cachorro * precio_alim_ca
print(f'\nTotal a pagar por el alimento para perros adultos = ${costo_alim_ad:.2f}')
print(f'Total a pagar por el alimento para perros cachorros = ${costo_alim_ca:.2f}') | true |
8c95f82d65e97ad13aff5abca8b5121bed89f185 | Python | sixty-north/added-value | /source/added_value/tabulator.py | UTF-8 | 15,887 | 2.625 | 3 | [
"BSD-3-Clause"
] | permissive | from collections import deque
from collections.abc import Mapping
from itertools import product, chain, repeat
from added_value.items_table_directive import NonStringIterable
from added_value.multisort import tuplesorted
from added_value.sorted_frozen_set import SortedFrozenSet
from added_value.toposet import TopoSet
from added_value.util import unchain, empty_iterable
depth_marker = object()
ROOT = object()
LEAF = object()
_UNSET = object()
def breadth_first(obj, leaves=False):
queue = deque()
queue.append(obj)
queue.append(None)
level_keys = []
current_level_keys = TopoSet()
while len(queue) > 0:
node = queue.popleft()
if node is None:
level_keys.append(current_level_keys)
current_level_keys = TopoSet()
queue.append(None)
if queue[0] is None:
break
else:
continue
if isinstance(node, Mapping):
current_level_keys.update(node.keys())
for value in node.values():
queue.append(value)
elif isinstance(node, NonStringIterable):
current_level_keys.update(range(len(node)))
for value in node:
queue.append(value)
else:
if leaves:
current_level_keys.add(node)
return [
list(s) for s in level_keys[:-1]
] # Why the slice? Remove leaves? Is the last always empty?
class Missing(object):
def __str__(self):
return ""
def __repr__(self):
return self.__class__.__name__
MISSING = Missing()
def tabulate_body(
obj,
level_keys,
v_level_indexes,
h_level_indexes,
v_level_sort_keys=None,
h_level_sort_keys=None,
):
"""
Args:
v_level_indexes: A sequence of level indexes.
h_level_indexes: A sequence of level indexes.
"""
v_key_sorted = make_sorter(v_level_sort_keys, v_level_indexes)
h_key_sorted = make_sorter(h_level_sort_keys, h_level_indexes)
h_level_keys = [level_keys[level] for level in h_level_indexes]
v_level_keys = [level_keys[level] for level in v_level_indexes]
h_key_tuples = h_key_sorted(product(*h_level_keys))
v_key_tuples = v_key_sorted(product(*v_level_keys))
h_size = len(h_key_tuples)
v_size = len(v_key_tuples)
table = [[MISSING for _ in range(h_size)] for _ in range(v_size)]
for h_index, h_keys in enumerate(h_key_tuples):
for v_index, v_keys in enumerate(v_key_tuples):
key_path = [None] * len(level_keys)
merge_into_by_index(key_path, h_level_indexes, h_keys)
merge_into_by_index(key_path, v_level_indexes, v_keys)
for v_level, v_key in zip(v_level_indexes, v_keys):
key_path[v_level] = v_key
item = obj
for key in key_path:
try:
item = item[key]
except (IndexError, KeyError):
break
else: # no-break
table[v_index][h_index] = item
return table, v_key_tuples, h_key_tuples
def make_sorter(level_sort_keys, level_indexes):
if level_sort_keys is not None:
if len(level_sort_keys) != len(level_indexes):
raise ValueError(
"level_sort_keys with length {} does not correspond to level_indexes with length {}".format(
len(level_sort_keys), len(level_indexes)
)
)
def key_sorted(level_keys):
return tuplesorted(level_keys, *level_sort_keys)
else:
key_sorted = list
return key_sorted
def strip_missing_rows(table, row_keys):
stripped_table = []
stripped_v_key_tuples = []
for row, v_key_tuple in zip(table, row_keys):
if any(cell is not MISSING for cell in row):
stripped_table.append(list(row))
stripped_v_key_tuples.append(v_key_tuple)
return stripped_table, stripped_v_key_tuples
def strip_missing_columns(table, h_key_tuples):
transposed_table = transpose(table)
stripped_transposed_table, stripped_h_key_tuples = strip_missing_rows(
transposed_table, h_key_tuples
)
stripped_table = transpose(stripped_transposed_table)
return stripped_table, stripped_h_key_tuples
def merge_into_by_index(sequence, indexes, values):
for index, value in zip(indexes, values):
sequence[index] = value
def is_rectangular(seq_of_seqs):
return len(set(map(len, seq_of_seqs))) <= 1
def size_h(rows_of_columns):
try:
first_row = rows_of_columns[0]
except IndexError:
return 0
else:
return len(first_row)
def size_v(rows_of_columns):
return sum(1 for row in rows_of_columns if len(row) != 0)
def size(rows_of_columns):
return size_v(rows_of_columns), size_h(rows_of_columns)
def transpose(rows_of_columns):
return list(map(list, zip(*rows_of_columns)))
def assemble_table(
table_body, v_key_tuples, h_key_tuples, v_level_titles=None, h_level_titles=None, empty=""
):
if not is_rectangular(table_body):
raise ValueError("table_body {} is not rectangular".format(table_body))
if not is_rectangular(v_key_tuples):
raise ValueError("v_key_tuples {} is not rectangular".format(v_key_tuples))
if not is_rectangular(h_key_tuples):
raise ValueError("h_key_tuples {} is not rectangular".format(h_key_tuples))
if size_v(v_key_tuples) > 0 and (size_v(table_body) != size_v(v_key_tuples)):
raise ValueError("table body and v_key_tuples have incompatible dimensions")
h_key_tuples_transposed = transpose(h_key_tuples)
if size_h(h_key_tuples_transposed) > 0 and (
size_h(table_body) != size_h(h_key_tuples_transposed)
):
raise ValueError("table body and h_key_tuples have incompatible dimensions")
if (v_level_titles is not None) and (len(v_level_titles) != size_h(v_key_tuples)):
raise ValueError("v_level_titles and v_key_tuples have incompatible dimensions")
if (h_level_titles is not None) and (len(h_level_titles) != size_v(h_key_tuples_transposed)):
raise ValueError("h_level_titles and h_key_tuples have incompatible dimensions")
boxed_h_level_titles = (
unchain(h_level_titles)
if (h_level_titles is not None)
else repeat(empty_iterable(), size_v(h_key_tuples_transposed))
)
num_h_level_title_columns = int(bool(h_level_titles))
num_stub_columns = max(size_h(v_key_tuples), num_h_level_title_columns)
table = []
num_empty_columns = num_stub_columns - num_h_level_title_columns
for boxed_h_level_title, h_key_row in zip(boxed_h_level_titles, h_key_tuples_transposed):
row = list(chain(repeat(" ", num_empty_columns), boxed_h_level_title, h_key_row))
table.append(row)
if v_level_titles is not None:
v_level_titles_row = v_level_titles + [empty] * size_h(table_body)
table.append(v_level_titles_row)
for v_key_row, table_row in zip(v_key_tuples, table_body):
row = list(v_key_row)
row.extend(table_row)
table.append(row)
assert is_rectangular(table)
return table
def tabulate(
obj,
v_level_indexes=None,
h_level_indexes=None,
v_level_visibility=None,
h_level_visibility=None,
v_level_sort_keys=None,
h_level_sort_keys=None,
v_level_titles=None,
h_level_titles=None,
empty="",
):
"""Render a nested data structure into a two-dimensional table.
Args:
obj: The indexable data structure to be rendered, which can
either be a non-string sequence or a mapping containing other
sequences and mappings nested to arbitrarily many levels,
with all the leaf items (which are neither sequences nor
mappings, excluding strings).
v_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the vertical axis of the table. Taken together
with the levels in h_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both v_level_indexes and
h_level_indexes, but all level indexes must appear in
either v_level_indexes or h_level_indexes. If None,
the levels not used in h_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
h_level_indexes: An iterable of the zero-based indexes of
the levels for which the keys/indexes will be displayed
along the horizontal axis of the table. Taken together
with the levels in v_levels these must represent the
complete set of levels in the obj data structure. No
level index should appear in both h_level_indexes and
v_level_indexes, but all level indexes must appear in
either h_level_indexes or v_level_indexes. If None,
the levels not used in v_level_indexes will be used.
If both v_level_indexes and h_level_indexes are not
alternate indexes will be used as v_level and h_level
indexes.
v_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in v_level_indexes, and
controls whether than level of index is included in
the table stub columns. This iterable must contain
the same number of items as v_level_indexes.
h_level_visibility: An optional iterable of booleans, where each
item corresponds to a level in h_level_indexes, and
controls whether than level of index is included in
the table header rows. This iterable must contain
the same number of items as h_level_indexes.
v_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
h_level_sort_keys: An optional iterable of Keys, where each
key corresponds to a level in v_level_indexes, and
controls how that key is sorted. If None, keys are sorted
as-is.
v_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in v_level_indexes,
and which will be displayed against the row keys for that level.
If None, no titles will be included.
h_level_titles: An optional iterable of strings, where each
string is a title which corresponds to a level in h_level_indexes,
and which will be displayed against the column keys for that level.
If None, no titles will be included.
empty: An optional string value to use for empty cells.
Returns:
A list of lists representing the rows of cells.
Example:
tabulate(dict_of_dicts, [0, 1], [])
"""
level_keys = breadth_first(obj)
v_level_indexes, h_level_indexes = validate_level_indexes(
len(level_keys), v_level_indexes, h_level_indexes
)
if v_level_visibility is None:
v_level_visibility = [True] * len(v_level_indexes)
if h_level_visibility is None:
h_level_visibility = [True] * len(h_level_indexes)
table, v_key_tuples, h_key_tuples = tabulate_body(
obj, level_keys, v_level_indexes, h_level_indexes, v_level_sort_keys, h_level_sort_keys
)
table, v_key_tuples = strip_missing_rows(table, v_key_tuples)
table, h_key_tuples = strip_missing_columns(table, h_key_tuples)
v_key_tuples = strip_hidden(v_key_tuples, v_level_visibility)
h_key_tuples = strip_hidden(h_key_tuples, h_level_visibility)
return assemble_table(
table, v_key_tuples, h_key_tuples, v_level_titles, h_level_titles, empty=empty
)
def validate_level_indexes(num_levels, v_level_indexes, h_level_indexes):
"""Ensure that v_level_indexes and h_level_indexes are consistent.
Args:
num_levels: The number of levels of keys in the data structure being tabulated.
v_level_indexes: A sequence of level indexes between zero and num_levels for
the vertical axis, or None.
h_level_indexes: A sequence of level indexes between zero and num_levels for for
the horizontal axis, or None.
Returns:
A 2-tuple containing v_level_indexes and h_level_indexes sequences.
Raises:
ValueError: If v_level_indexes contains duplicate values.
ValueError: If h_level_indexes contains duplicate values.
ValueError: If v_level_indexes contains out of range values.
ValueError: If h_level_indexes contains out of range values.
ValueError: If taken together v_level_indexes and h_level_indexes
do not include all levels from zero to up to, but not including
num_levels.
ValueError: If v_level_indexes and h_level_indexes have items in
common.
"""
if num_levels < 1:
raise ValueError("num_levels {} is less than one".format(num_levels))
all_levels = SortedFrozenSet(range(num_levels))
if (h_level_indexes is None) and (v_level_indexes is None):
v_level_indexes = range(0, num_levels, 2)
h_level_indexes = range(1, num_levels, 2)
h_level_set = SortedFrozenSet(h_level_indexes)
v_level_set = SortedFrozenSet(v_level_indexes)
if h_level_indexes is None:
h_level_indexes = all_levels - v_level_set
if v_level_indexes is None:
v_level_indexes = all_levels - h_level_set
if len(h_level_indexes) != len(h_level_set):
raise ValueError("h_level_indexes contains duplicate values")
if h_level_set and ((h_level_set[0] < 0) or (h_level_set[-1] >= num_levels)):
raise ValueError("h_level_indexes contains out of range values")
if len(v_level_indexes) != len(v_level_set):
raise ValueError("v_level_indexes contains duplicate values")
if v_level_set and ((v_level_set[0] < 0) or (v_level_set[-1] >= num_levels)):
raise ValueError("v_level_indexes contains out of range values")
unmentioned_levels = all_levels - v_level_set - h_level_set
if len(unmentioned_levels) > 0:
raise ValueError(
"v_level_indexes and h_level_indexes do not together include levels {}".format(
", ".join(map(str, unmentioned_levels))
)
)
if not h_level_set.isdisjoint(v_level_set):
raise ValueError("h_level_indexes and v_level_indexes are not disjoint")
v_level_indexes = list(v_level_indexes)
h_level_indexes = list(h_level_indexes)
return v_level_indexes, h_level_indexes
def strip_hidden(key_tuples, visibilities):
"""Filter each tuple according to visibility.
Args:
key_tuples: A sequence of tuples of equal length (i.e. rectangular)
visibilities: A sequence of booleans equal in length to the tuples contained in key_tuples.
Returns:
A sequence equal in length to key_tuples where the items are tuples with a length corresponding
to the number of items in visibility which are True.
"""
result = []
for key_tuple in key_tuples:
if len(key_tuple) != len(visibilities):
raise ValueError(
"length of key tuple {} is not equal to length of visibilities {}".format(
key_tuple, visibilities
)
)
filtered_tuple = tuple(item for item, visible in zip(key_tuple, visibilities) if visible)
result.append(filtered_tuple)
return result
# TODO: Multidimensional arrays. e.g. ndarray
| true |
4a582c4364f53a3d0844e8aa2a44063f6d4a8577 | Python | sunary/image-process | /preprocess/edge_detect.py | UTF-8 | 3,918 | 2.515625 | 3 | [] | no_license | __author__ = 'sunary'
import cv2
from utils import helper
from preprocess import histogram_equalization
import numpy as np
def basic(pix):
temp_x = [[0] * len(pix[0]) for _ in range(len(pix))]
temp_y = [[0] * len(pix[0]) for _ in range(len(pix))]
edge_pix = [[0] * len(pix[0]) for _ in range(len(pix))]
gx, gy = _sobel()
sum_gray = 0
for i in range(len(gx)/2, len(pix) - len(gx)/2):
for j in range(len(gx)/2, len(pix[0]) - len(gy)/2):
for d1 in range(len(gx)):
for d2 in range(len(gx)):
temp_x[i][j] += gx[d1][d2] * pix[i + d1 - len(gx)/2][j + d2 - len(gx)/2]
temp_y[i][j] += gy[d1][d2] * pix[i + d1 - len(gy)/2][j + d2 - len(gy)/2]
sum_gray += temp_x[i][j]*temp_x[i][j] + temp_y[i][j]*temp_y[i][j]
threshold = 3*sum_gray/((len(pix))*len(pix[0]))
for i in range(0, len(pix) - 0):
for j in range(0, len(pix[0]) - 0):
edge_pix[i][j] = 0x000000 if (temp_x[i][j]*temp_x[i][j] + temp_y[i][j]*temp_y[i][j] > threshold) else 0xffffff
return edge_pix
def edge_detect(img):
temp_x = np.zeros_like(img)
temp_y = np.zeros_like(img)
sobel = _sobel()
gx = np.array(sobel[0])
gy = np.array(sobel[1])
for i in np.arange(1, np.size(img, 0) - 1):
for j in np.arange(1, np.size(img, 1) - 1):
temp_x[i][j] = np.sum(gx * img[i - 1: i + 2, j - 1: j + 2])
temp_y[i][j] = np.sum(gy * img[i - 1: i + 2, j - 1: j + 2])
sum_square = np.square(temp_x) + np.square(temp_y)
threshold = 2 * np.mean(sum_square)
edge_pix = np.zeros_like(img, dtype=np.uint8)
for (i, j), value in np.ndenumerate(sum_square):
edge_pix[i][j] = 0 if value < threshold else 0xffffff
return edge_pix
def auto_canny(img, sigma=0.33):
v = np.median(img)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(img, lower, upper)
return edged
def full_detect(img, is_binary=True, canny=True):
if not is_binary:
# img = histogram_equalization.clahe(img)
img = histogram_equalization.ostu_algorithm(img)
if canny:
img = auto_canny(img)
else:
img = edge_detect(img)
return img
def extractEdges(input, cannyThreshold1=50, cannyThreshold2=200, borderApertureSize=3):
input_gray = cv2.cvtColor(input, cv2.COLOR_BGR2GRAY)
input_gray = cv2.createCLAHE(clipLimit=4.5, tileGridSize=(9,9)).apply(input_gray)
input_gray = cv2.GaussianBlur(input_gray, (3,3), 1)
return cv2.Canny(input_gray, cannyThreshold1, cannyThreshold2, apertureSize=borderApertureSize, L2gradient=False)
def _sobel():
return ([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]],
[[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
def _robert(self):
return ([[1, 0], [0, -1]],
[[0, 1], [-1, 0]])
def _prewitt(self):
return ([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]],
[[-1, -1, -1], [0, 0, 0], [1, 1, 1]])
def bounding_box(img, range_w=None, range_h=None):
img = histogram_equalization.adaptive_mean(img)
cv2.imshow('binary', img)
contours, hierarchy = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
boundings = []
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
if (not range_w or (w >= range_w[0] and w <= range_w[1])) and \
(not range_h or (h >= range_h[0] and h <= range_h[1])):
boundings.append((x, y, w, h))
cv2.rectangle(img, (x, y), (x + w, y + h), (255), 2)
return img, boundings
if __name__ == '__main__':
pix = helper.read_image('../resources/fp01.jpg')
pix = helper.convert_gray(pix)
helper.save_image('../resources/gray.png', True)
pix = basic(pix)
# noise_removal = NoiseRemoval()
# pix = noise_removal.opening(pix)
helper.save_image(pix, '../resources/edge.png') | true |
36c306b33199594ed1d4b1a05ce2758bf99318e8 | Python | Haestad/datatek | /oving5/kpc.py | UTF-8 | 4,324 | 3.171875 | 3 | [] | no_license | """ This module contains the KeyPad Controller (KPC) class. """
from time import sleep
from typing import Callable
from keypad import Keypad
from led_board import LEDBoard
class KPC:
""" Class that contains all the logic for operating the keypad. """
def __init__(self):
self.keypad = Keypad()
self.led_board = LEDBoard()
self.password_path = "password.txt"
self.override_signal = None
self.current_signal = None
self.passcode_buffer = ""
self.led_pin = 0
self.led_duration = ""
def reset_passcode_entry(self):
""" Clear passcode_buffer and initiate 'power up' lighting sequence. """
self.passcode_buffer = ""
self.power_up_led()
def reset_agent(self):
""" Resets the agent to a neutral state. """
self.override_signal = None
self.current_signal = None
self.passcode_buffer = ""
def append_next_password_digit(self):
""" Adds the given digit to the passcode_buffer. """
self.passcode_buffer += str(self.current_signal)
def get_next_signal(self):
""" return override signal if non-blank,
else query keypad for next pressed key"""
if self.override_signal is not None:
override_signal = self.override_signal
self.override_signal = None
return override_signal
self.current_signal = self.keypad.get_next_signal()
return self.current_signal
def verify_login(self):
""" Compares the password in the password file to the password in passcode_buffer.
If it matches, set override_signal to 'Y', else set it to 'N'. """
with open(self.password_path, 'r') as pw_file:
correct_pw = pw_file.read()
if self.passcode_buffer == correct_pw:
self.override_signal = 'Y'
self.twinkle_led()
else:
self.override_signal = 'N'
self.flash_led()
def validate_password_change(self):
""" Checks if the new password is legal. A legal password should be at least 4 digits long,
and should only contain a combination of symbols 0-9.
If the password is legal, save it to 'password.txt'. The LED board should also flash
based on failure or success. """
if len(self.passcode_buffer) >= 4:
with open(self.password_path, 'w') as pw_file:
pw_file.write(self.passcode_buffer)
self.override_signal = 'Y'
self.twinkle_led()
else:
self.override_signal = 'N'
self.flash_led()
def fully_activate_agent(self):
""" Called when agent is activated. """
self.reset_agent()
def exit_action(self):
""" Called when exiting agent. """
self.reset_agent()
self.power_down_led()
def select_pin(self):
""" Selects a LED pin to light up. """
self.led_pin = self.current_signal
self.led_duration = ""
def append_dur(self):
""" Selects how long the LED should light up. """
self.led_duration += str(self.current_signal)
@staticmethod
def do_action(action: Callable[[], bool]):
""" Executes the given action in KPC. """
return action()
# LED Board methods
def light_one_led(self):
""" Lights the LED with the agents led pin and duration, and then clears the values. """
self.led_board.turn_on_led(self.led_pin)
sleep(int(self.led_duration))
self.led_board.turn_off_led(self.led_pin)
self.led_pin = None
self.led_duration = ""
def flash_led(self):
""" Flashes all LEDs for 1 seconds. """
self.led_board.flash_all_led(1)
def twinkle_led(self):
""" Twinkles all LEDs for 1 seconds """
self.led_board.twinkle_all_led(1)
def power_up_led(self):
""" The LED sequence for powering up. """
led_list = []
for i in range(0, 5):
led_list.append(i)
self.led_board.flash_all_led(0.2, led_list)
def power_down_led(self):
""" The LED sequence for powering down. """
led_list = [0, 1, 2, 3, 4]
for i in range(0, 5):
self.led_board.flash_all_led(0.2, led_list)
led_list.pop()
| true |
e57813e7a7871b3b99db5957b4aac3e31d0cd66a | Python | gistable/gistable | /all-gists/1770447/snippet.py | UTF-8 | 11,267 | 2.65625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# zmqc: a small but powerful command-line interface to ZMQ.
## Usage:
# zmqc [-0] (-r | -w) (-b | -c) SOCK_TYPE [-o SOCK_OPT=VALUE...] address [address ...]
## Examples:
# zmqc -rc SUB 'tcp://127.0.0.1:5000'
#
# Subscribe to 'tcp://127.0.0.1:5000', reading messages from it and printing
# them to the console. This will subscribe to all messages by default.
#
# ls | zmqc -wb PUSH 'tcp://*:4000'
#
# Send the name of every file in the current directory as a message from a
# PUSH socket bound to port 4000 on all interfaces. Don't forget to quote the
# address to avoid glob expansion.
#
# zmqc -rc PULL 'tcp://127.0.0.1:5202' | tee $TTY | zmqc -wc PUSH 'tcp://127.0.0.1:5404'
#
# Read messages coming from a PUSH socket bound to port 5202 (note that we're
# connecting with a PULL socket), echo them to the active console, and
# forward them to a PULL socket bound to port 5404 (so we're connecting with
# a PUSH).
#
# zmqc -n 10 -0rb PULL 'tcp://*:4123' | xargs -0 grep 'pattern'
#
# Bind to a PULL socket on port 4123, receive 10 messages from the socket
# (with each message representing a filename), and grep the files for
# `'pattern'`. The `-0` option means messages will be NULL-delimited rather
# than separated by newlines, so that filenames with spaces in them are not
# considered two separate arguments by xargs.
## License:
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# For more information, please refer to <http://unlicense.org/>
import argparse
import array
import errno
import itertools
import re
import sys
import zmq
__version__ = '0.0.1'
class ParserError(Exception):
"""An exception which occurred when parsing command-line arguments."""
pass
parser = argparse.ArgumentParser(
prog='zmqc', version=__version__,
usage=
"%(prog)s [-h] [-v] [-0] (-r | -w) (-b | -c)\n "
"SOCK_TYPE [-o SOCK_OPT=VALUE...]\n "
"address [address ...]",
description="zmqc is a small but powerful command-line interface to ZMQ. "
"It allows you to create a socket of a given type, bind or connect it to "
"multiple addresses, set options on it, and receive or send messages over "
"it using standard I/O, in the shell or in scripts.",
epilog="This is free and unencumbered software released into the public "
"domain. For more information, please refer to <http://unlicense.org>.",
)
parser.add_argument('-0',
dest='delimiter', action='store_const',
const='\x00', default='\n',
help="Separate messages on input/output should be "
"delimited by NULL characters (instead of newlines). Use "
"this if your messages may contain newlines, and you want "
"to avoid ambiguous message borders.")
parser.add_argument('-n', metavar='NUM',
dest='number', type=int, default=None,
help="Receive/send only NUM messages. By default, zmqc "
"lives forever in 'write' mode, or until the end of input "
"in 'read' mode.")
mode_group = parser.add_argument_group(title='Mode')
mode = mode_group.add_mutually_exclusive_group(required=True)
mode.add_argument('-r', '--read',
dest='mode', action='store_const', const='r',
help="Read messages from the socket onto stdout.")
mode.add_argument('-w', '--write',
dest='mode', action='store_const', const='w',
help="Write messages from stdin to the socket.")
behavior_group = parser.add_argument_group(title='Behavior')
behavior = behavior_group.add_mutually_exclusive_group(required=True)
behavior.add_argument('-b', '--bind',
dest='behavior', action='store_const', const='bind',
help="Bind to the specified address(es).")
behavior.add_argument('-c', '--connect',
dest='behavior', action='store_const', const='connect',
help="Connect to the specified address(es).")
sock_params = parser.add_argument_group(title='Socket parameters')
sock_type = sock_params.add_argument('sock_type', metavar='SOCK_TYPE',
choices=('PUSH', 'PULL', 'PUB', 'SUB', 'PAIR'), type=str.upper,
help="Which type of socket to create. Must be one of 'PUSH', 'PULL', "
"'PUB', 'SUB' or 'PAIR'. See `man zmq_socket` for an explanation of the "
"different types. 'REQ', 'REP', 'DEALER' and 'ROUTER' sockets are "
"currently unsupported. --read mode is unsupported for PUB sockets, and "
"--write mode is unsupported for SUB sockets.")
sock_opts = sock_params.add_argument('-o', '--option',
metavar='SOCK_OPT=VALUE', dest='sock_opts', action='append', default=[],
help="Socket option names and values to set on the created socket. "
"Consult `man zmq_setsockopt` for a comprehensive list of options. Note "
"that you can safely omit the 'ZMQ_' prefix from the option name. If the "
"created socket is of type 'SUB', and no 'SUBSCRIBE' options are given, "
"the socket will automatically be subscribed to everything.")
addresses = sock_params.add_argument('addresses', nargs='+', metavar='address',
help="One or more addresses to bind/connect to. Must be in full ZMQ "
"format (e.g. 'tcp://<host>:<port>')")
def read_until_delimiter(stream, delimiter):
"""
Read from a stream until a given delimiter or EOF, or raise EOFError.
>>> io = StringIO("abcXdefgXfoo")
>>> read_until_delimiter(io, "X")
"abc"
>>> read_until_delimiter(io, "X")
"defg"
>>> read_until_delimiter(io, "X")
"foo"
>>> read_until_delimiter(io, "X")
Traceback (most recent call last):
...
EOFError
"""
output = array.array('c')
c = stream.read(1)
while c and c != delimiter:
output.append(c)
c = stream.read(1)
if not (c or output):
raise EOFError
return output.tostring()
def get_sockopts(sock_opts):
"""
Turn a list of 'OPT=VALUE' into a list of (opt_code, value).
Work on byte string options:
>>> get_sockopts(['SUBSCRIBE=', 'SUBSCRIBE=abc'])
[(6, ''), (6, 'abc')]
Automatically convert integer options to integers:
>>> zmqc.get_sockopts(['LINGER=0', 'LINGER=-1', 'LINGER=50'])
[(17, 0), (17, -1), (17, 50)]
Spew on invalid input:
>>> zmqc.get_sockopts(['LINGER=foo'])
Traceback (most recent call last):
...
zmqc.ParserError: Invalid value for option LINGER: 'foo'
>>> zmqc.get_sockopts(['NONEXISTENTOPTION=blah'])
Traceback (most recent call last):
...
zmqc.ParserError: Unrecognised socket option: 'NONEXISTENTOPTION'
"""
option_coerce = {
int: set(zmq.core.constants.int_sockopts).union(
zmq.core.constants.int64_sockopts),
str: set(zmq.core.constants.bytes_sockopts)
}
options = []
for option in sock_opts:
match = re.match(r'^([A-Z_]+)\=(.*)$', option)
if not match:
raise ParserError("Invalid option spec: %r" % match)
opt_name = match.group(1)
if opt_name.startswith('ZMQ_'):
opt_name = opt_name[4:]
try:
opt_code = getattr(zmq.core.constants, opt_name.upper())
except AttributeError:
raise ParserError("Unrecognised socket option: %r" % (
match.group(1),))
opt_value = match.group(2)
for converter, opt_codes in option_coerce.iteritems():
if opt_code in opt_codes:
try:
opt_value = converter(opt_value)
except (TypeError, ValueError):
raise ParserError("Invalid value for option %s: %r" % (
opt_name, opt_value))
break
options.append((opt_code, opt_value))
return options
def main():
args = parser.parse_args()
context = zmq.Context.instance()
sock = context.socket(getattr(zmq, args.sock_type))
# Bind or connect to the provided addresses.
for address in args.addresses:
getattr(sock, args.behavior)(address)
# Set any specified socket options.
try:
sock_opts = get_sockopts(args.sock_opts)
except ParserError, exc:
parser.error(str(exc))
else:
for opt_code, opt_value in sock_opts:
sock.setsockopt(opt_code, opt_value)
# If we have a 'SUB' socket that's not explicitly subscribed to
# anything, subscribe it to everything.
if (sock.socket_type == zmq.SUB and
not any(opt_code == zmq.SUBSCRIBE
for (opt_code, _) in sock_opts)):
sock.setsockopt(zmq.SUBSCRIBE, '')
# Live forever if no `-n` argument was given, otherwise die after a fixed
# number of messages.
if args.number is None:
iterator = itertools.repeat(None)
else:
iterator = itertools.repeat(None, args.number)
try:
if args.mode == 'r':
read_loop(iterator, sock, args.delimiter, sys.stdout)
elif args.mode == 'w':
write_loop(iterator, sock, args.delimiter, sys.stdin)
finally:
sock.close()
def read_loop(iterator, sock, delimiter, output):
"""Continously get messages from the socket and print them on output."""
for _ in iterator:
try:
message = sock.recv()
output.write(message + delimiter)
output.flush()
except KeyboardInterrupt:
return
except IOError, exc:
if exc.errno == errno.EPIPE:
return
raise
def write_loop(iterator, sock, delimiter, input):
"""Continously get messages from input and send them through the socket."""
for _ in iterator:
try:
message = read_until_delimiter(input, delimiter)
sock.send(message)
except (KeyboardInterrupt, EOFError):
return
if __name__ == '__main__':
main() | true |
cd90be762ee720db02d613d2c8dacbc2d103514c | Python | P-ppc/leetcode | /algorithms/SpiralMatrixIII/solution.py | UTF-8 | 934 | 3.0625 | 3 | [] | no_license | class Solution(object):
def spiralMatrixIII(self, R, C, r0, c0):
"""
:type R: int
:type C: int
:type r0: int
:type c0: int
:rtype: List[List[int]]
"""
res = []
directions = [[0, 1], [1, 0], [0, -1], [-1, 0]]
direction_index = 0
max_step = 1
cur_step = 0
while len(res) < R * C:
if 0 <= r0 < R and 0 <= c0 < C:
res.append([r0, c0])
if cur_step == max_step:
direction_index = (direction_index + 1) % 4
elif cur_step == max_step * 2:
direction_index = (direction_index + 1) % 4
max_step += 1
cur_step = 0
r0 += directions[direction_index][0]
c0 += directions[direction_index][1]
cur_step += 1
return res | true |
581e4bc4765822601bb0593ee3c64bee539611e6 | Python | c0indev3l/btccharts-tick2candlestick | /tick2candlestick.py | UTF-8 | 12,641 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. module:: symbol
:platform: Unix, Windows, Mac OS X
:synopsis: Module to download tick data from BitcoinCharts
http://api.bitcoincharts.com/v1/csv/
.. moduleauthor:: Working4coins <working4coins@gmail.com>
Copyright (C) 2013 "Working4coins" <working4coins@gmail.com>
You can donate: https://sites.google.com/site/working4coins/donate
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
import os
import argparse
from symbol import Symbol
from timeframe import TF
import urllib2
from urlparse import urljoin
import datetime
import dateutil.parser
import pandas as pd
class ApiRequestBitcoinchartsGetTicks:
""""Class to download (or read) tick data from BitcoinCharts
using http://api.bitcoincharts.com/v1/csv/{symbol}.csv
with {symbol}=mtgoxUSD
"""
filename = ''
data = ''
dataframe = None
dataframe_out = None
def __init__(self, args):
self.args = args
self.basepath = ARGS.basepath
self.symbol = ARGS.symbol
self.dt1 = ARGS.dt1
self.dt2 = ARGS.dt2
self.timeframe = ARGS.tf
self.flag_download_overwrite = ARGS.downloadagain
self.api_url = self.symbol.shortname()+'.csv'
self.api_base_url = "http://api.bitcoincharts.com/v1/csv/"
self.url = urljoin(self.api_base_url, self.api_url)
def update(self):
"""Update data (get, convert to DataFrame, print, resample..."""
self.get_data()
#self.pretty_print_data()
self.convert_to_dataframe()
self.pretty_print_dataframe(self.dataframe)
self.calculate()
def get_data(self):
"""Get data (download and write or read from file)"""
self.filename = os.path.join(self.basepath,
"data_in/ticks_{api_url}".format(
api_url=self.api_url,
)
)
file_not_exists = not os.path.exists(self.filename)
if self.flag_download_overwrite or file_not_exists:
self.download()
self.write_data()
else:
self.read_data()
def download(self):
"""Download data"""
print("Downloading using url {url} (please wait)".format(url=self.url))
response = urllib2.urlopen(self.url)
self.data = response.read()
def write_data(self):
"""Write data to file"""
print("Writing {api_url} to {filename}"\
.format(api_url=self.api_url, filename=self.filename))
my_file = open(self.filename, 'w')
my_file.write(self.data)
my_file.close()
def read_data(self):
"""Display read data from file message"""
print("Reading {api_url} from {filename}"\
.format(api_url=self.api_url, filename=self.filename))
print(" instead of downloading using url {url}".format(url=self.url))
def pretty_print_data(self):
"""Print raw data (CSV here)"""
print(self.data)
def pretty_print_dataframe(self, dataframe):
"""Print DataFrame (head, ... tail, dtypes)"""
print(dataframe.head())
print(dataframe)
print(dataframe.tail())
print(dataframe.dtypes)
#def conv_timestamp_to_datetime(x):
# return(datetime.datetime.fromtimestamp(x, dateutil.tz.tzutc()))
def convert_to_dataframe(self):
"""Convert raw data to a Python Pandas DataFrame"""
self.dataframe = pd.read_csv(self.filename,
names=['TIMESTAMP', 'PRICE', 'VOL'])
def calculate(self):
"""Convert data to appropriate type, calculate, resample"""
print("Convert data type")
self.dataframe['TIMESTAMP'] = \
pd.to_datetime(self.dataframe['TIMESTAMP']*int(1e9))
#print(self.dataframe.dtypes)
self.dataframe = \
self.dataframe.set_index('TIMESTAMP').astype('float64')
self.dataframe['TICK_VOL'] = 1
#self.dataframe = self.dataframe.set_index('TIMESTAMP')
#self.dataframe['TIMESTAMP'] = pd.DatetimeIndex(
#self.dataframe['TIMESTAMP'])
price_digits = 5
vol_digits = 8
self.dataframe['PRICE'] = self.dataframe['PRICE'].map(
lambda x: int(x * 10**price_digits))
self.dataframe['VOL'] = self.dataframe['VOL'].map(
lambda x: int(x * 10**vol_digits))
if self.dt1 != None:
self.dataframe = self.dataframe[self.dataframe.index >= self.dt1]
if self.dt2 != None:
self.dataframe = self.dataframe[self.dataframe.index <= self.dt2]
print("="*100)
print("Tick data")
self.pretty_print_dataframe(self.dataframe)
print("="*100)
#self.dataframe_out = self.dataframe.resample('15Min', how='ohlc')
print("Resample ticks data to OHLC candlesticks {tf} (please wait)"\
.format(tf=self.timeframe))
timeframes_pandas_names = {
TF.M1: '1min',
TF.M5: '5min',
TF.M15: '15min',
TF.M30: '30min',
TF.H1: '1H',
TF.H2: '2H',
TF.H4: '4H',
TF.H6: '6H',
TF.H12: '12H',
TF.D1: '1D',
TF.W1: '1W',
TF.MN: '1M',
}
timeframe_pd = timeframes_pandas_names[self.timeframe]
self.dataframe_out = self.dataframe['PRICE']\
.resample(timeframe_pd, how='ohlc')
self.dataframe_out['VOL'] = self.dataframe['VOL']\
.resample(timeframe_pd, how='sum')
self.dataframe_out['TICK_VOL'] = self.dataframe['TICK_VOL']\
.resample(timeframe_pd, how='sum')
self.dataframe_out['TICK_VOL'] = self.dataframe_out['TICK_VOL'].fillna(0)
# or .fillna(1)
self.dataframe_out = self.dataframe_out.rename(
columns={
'open': 'OPEN',
'high': 'HIGH',
'low': 'LOW',
'close': 'CLOSE',
}
)
print("Fill NaN (VOLUME=0 and OPEN=HIGH=LOW=CLOSE=CLOSE_PREVIOUS)")
#self.dataframe_out['MISSING'] = self.dataframe_out['VOL'].isnull()
self.dataframe_out['VOL'] = self.dataframe_out['VOL'].fillna(0)
self.dataframe_out['CLOSE'] = self.dataframe_out['CLOSE'].fillna()
self.dataframe_out['OPEN'] = self.dataframe_out['OPEN']\
.fillna(self.dataframe_out['CLOSE'])
self.dataframe_out['LOW'] = self.dataframe_out['LOW']\
.fillna(self.dataframe_out['CLOSE'])
self.dataframe_out['HIGH'] = self.dataframe_out['HIGH']\
.fillna(self.dataframe_out['CLOSE'])
for col in ['OPEN', 'HIGH', 'LOW', 'CLOSE', 'VOL', 'TICK_VOL']:
self.dataframe_out[col] = self.dataframe_out[col].map(int)
#self.dataframe_out.index = self.dataframe_out.index.map(lambda x: (x.to_pydatetime()))
self.dataframe_out['TIMESTAMP'] = self.dataframe_out.index
#self.dataframe_out['TIMESTAMP'] = self.dataframe_out.index.map(lambda x: int(x.to_pydatetime().strftime('%s')))
epoch = datetime.datetime(1970, 1, 1)
self.dataframe_out['TIMESTAMP'] = self.dataframe_out.index.map(lambda x: int((x - epoch).total_seconds()))
#self.dataframe_out['TIMESTAMP'] = self.dataframe_out.index.map(lambda x: int(x.to_pydatetime().strftime('%s')))
# see http://delorean.readthedocs.org/en/latest/quickstart.html
# d.epoch()
# or http://stackoverflow.com/questions/6999726/python-getting-millis-since-epoch-from-datetime
# epoch = datetime.datetime.utcfromtimestamp(0)
# delta = dt - epoch
# delta.total_seconds()
# http://stackoverflow.com/questions/8777753/converting-datetime-date-to-utc-timestamp-in-python/8778548#8778548
print("Reorder columns")
self.dataframe_out = self.dataframe_out.reindex_axis(['TIMESTAMP', 'OPEN', 'HIGH', 'LOW', 'CLOSE', 'VOL', 'TICK_VOL'], axis=1)
print("="*100)
print("Candlestick data")
self.pretty_print_dataframe(self.dataframe_out)
print("="*100)
self.output_file()
def output_file(self):
if ARGS.to != None:
for to_format in ARGS.to.split(','):
to_format = to_format.lower()
if to_format == 'csv':
self.to_csv()
elif to_format == 'xls':
self.to_xls()
elif to_format == 'hdf5':
self.to_hdf5()
else:
print("File format '{to_format}' is not supported".format(to_format=to_format))
def output_filename(self, ext):
dt_format = '%Y%m%d%H%M'
dt1_str = self.dataframe_out.index[0]\
.to_pydatetime().strftime(dt_format)
dt2_str = self.dataframe_out.index[-1]\
.to_pydatetime().strftime(dt_format)
filename = os.path.join(self.basepath,
"data_out/{symbol}-{timeframe}-{dt1}-{dt2}.{ext}".format(
symbol = self.symbol.longname(),
timeframe = self.timeframe.name(),
dt1 = dt1_str,
dt2 = dt2_str,
ext = ext))
return(filename)
def to_xls(self):
"""Output excel file"""
filename = self.output_filename('xls')
print("Save to Excel file as {filename}".format(filename=filename))
self.dataframe_out.to_excel(filename, index=False)
def to_csv(self):
"""Output CSV file"""
filename = self.output_filename('csv')
print("Save to CSV file as {filename}".format(filename=filename))
self.dataframe_out.to_csv(filename, index=False)
def to_hdf5(self):
"""Output HDF5 file"""
filename = self.output_filename('h5')
#self.dataframe_out.to_hdf(filename,'table',append=True)
print("Save to HDF5 file as {filename}".format(filename=filename))
try:
os.remove(filename) # remove h5 file to avoid it to inflate
except:
print("Can't remove {filename} (maybe this file doesn't exist)".format(filename=filename))
store = pd.HDFStore(filename, complevel=9, complib='blosc')
store.append('df', self.dataframe_out)
store.close()
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(description='Use the following parameters')
PARSER.add_argument('--downloadagain', action="store_true",
help="use this flag to overwrite data (downloading them again...)\n\
please don't use this too often!")
PARSER.add_argument('--dt1', action="store",
help="use this flag to set datetime from (2012-01-01T00:00Z)",
default=None)
PARSER.add_argument('--dt2', action="store",
help="use this flag to set datetime to (2013-12-31T00:00Z)",
default=None)
PARSER.add_argument('--tf', action="store",
help="use this flag to set timeframe \
(M1, M5, M15, M30, H1, H4, D1, W1, MN, YR)",
default='M15')
PARSER.add_argument('--symbol', action="store",
help="use this flag to set market symbol (mtgox|BTC/USD)",
default='mtgox|BTC/USD')
PARSER.add_argument('--to', action="store",
help="use this flag to set output file ('csv', 'xls', 'hdf5', 'csv,hdf5')")
ARGS = PARSER.parse_args()
ARGS.basepath = os.path.dirname(__file__)
ARGS.symbol = Symbol(ARGS.symbol)
ARGS.tf = TF.from_string(ARGS.tf)
if ARGS.dt1 != None:
ARGS.dt1 = dateutil.parser.parse(ARGS.dt1)
if ARGS.dt2 != None:
ARGS.dt2 = dateutil.parser.parse(ARGS.dt2)
if ARGS.dt2 < ARGS.dt1:
raise(Exception('dt2 < dt1 !'))
#print(ARGS.dt1)
DATATICKS = ApiRequestBitcoinchartsGetTicks(ARGS)
DATATICKS.update()
| true |
3609e44572690965cb83d8cb123eaa89b7a23e56 | Python | papadave11/DeepLearnig | /datatime_to_string.py | UTF-8 | 1,447 | 2.9375 | 3 | [] | no_license | from pandas import read_csv
from datetime import datetime
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.layers.core import Dense, Dropout, Activation
from keras.callbacks import EarlyStopping
import pandas as pd, numpy as np
from matplotlib import pyplot
# Python program to convert time
# from 12 hour to 24 hour format
# Function to convert the date format
def convert24(str1):
# Checking if last two elements of time
# is AM and first two elements are 12
if str1[0] == "AM" and str1[0] == "12":
return "00" + str1[1]
# remove the AM
elif str1[0] == "AM":
return str1[1]
# Checking if last two elements of time
# is PM and first two elements are 12
elif str1[0] == "PM" and str1[0] == "12":
return str1[1]
else:
# add 12 to hours and remove PM
return str(int(str1[1]) + 12)
dataset = read_csv('kumdan_2019.csv')
print(dataset.date[0])
dataset.date = convert24(dataset.date[0])
print(dataset.date)
print("ksksksks")
"""
print(dataset.hour[0])
#print(convert24("08:05:45 PM"))
""" | true |
6e06351a7a4fdefe6d608ea87d394fdae8933789 | Python | laolee010126/algorithm-with-python | /problems_solving/baekjoon/acm_craft.py | UTF-8 | 1,242 | 3.21875 | 3 | [] | no_license | """Get the mininum cost of building a wanted building
url: https://www.acmicpc.net/problem/1005
"""
import sys
sys.setrecursionlimit(10 ** 9)
def get_building_time(w, time, rule_cache):
total_time_cache = [-1 for _ in range(len(time))]
for i in rule_cache[0]:
total_time_cache[i] = time[i]
def get_time(w):
if total_time_cache[w] == -1:
ret = -1
for src in rule_cache[w]:
ret = max(ret, get_time(src))
total_time_cache[w] = ret + time[w]
return total_time_cache[w]
return get_time(w)
if __name__ == '__main__':
C = int(input())
ans = []
for _ in range(C):
N, R = (int(n) for n in input().split())
time = [0] + [int(n) for n in input().split()]
rule_cache = [[] for _ in range(len(time))]
start = [1 for _ in range(len(time))]
for _ in range(R):
src, dest = (int(n) for n in input().split())
rule_cache[dest].append(src)
start[dest] = 0
rule_cache[0] = [dest for dest in range(len(time)) if start[dest] == 1]
W = int(input())
v = get_building_time(W, time, rule_cache)
ans.append(v)
for n in ans:
print(n)
| true |
74acff7cf11e5086d5d5edc9eaa3c47acbb27e40 | Python | GarciaJhonLucas/colorise | /src/colorise/__init__.py | UTF-8 | 8,066 | 2.796875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python module for easy, cross-platform colored output to the terminal."""
import atexit
import itertools
import os
import platform
import sys
import colorise.formatter
from colorise.attributes import Attr # noqa: F401
_SYSTEM_OS = platform.system().lower()
__author__ = 'Alexander Asp Bock'
__version__ = '1.0.1'
__license__ = 'BSD 3-Clause'
__all__ = [
'can_redefine_colors',
'redefine_colors',
'color_names',
'num_colors',
'set_color',
'reset_color',
'cprint',
'fprint',
'highlight'
]
# Determine which platform-specific color manager to import
if _SYSTEM_OS.startswith('win'):
from colorise.win.color_functions import (
num_colors as _num_colors,
redefine_colors as _redefine_colors,
reset_color as _reset_color,
set_color as _set_color,
)
from colorise.win.win32_functions import (
can_redefine_colors as _can_redefine_colors,
restore_console_modes,
)
# Ensure that the console mode set before colorise was loaded is restored
atexit.register(restore_console_modes)
else:
from colorise.nix.cluts import can_redefine_colors as _can_redefine_colors
from colorise.nix.color_functions import (
num_colors as _num_colors,
redefine_colors as _redefine_colors,
reset_color as _reset_color,
set_color as _set_color,
)
def num_colors():
"""Return the number of colors supported by the terminal."""
return _num_colors()
def can_redefine_colors(file):
"""Return True if the terminal supports redefinition of colors.
Only returns True for Windows 7/Vista and beyond as of now.
"""
return _can_redefine_colors(file)
def redefine_colors(color_map, file=sys.stdout):
"""Redefine colors using a color map of indices and RGB tuples.
.. note::
It is not currently possible to redefine colors on Mac and Linux
systems via colorise.
"""
_redefine_colors(color_map, file)
def color_names():
"""Return a list of supported color names."""
return [
'black',
'red',
'green',
'yellow',
'blue',
'purple',
'magenta',
'cyan',
'gray',
'grey',
'lightgrey',
'lightgray',
'lightred',
'lightgreen',
'lightyellow',
'lightblue',
'lightpurple',
'lightmagenta',
'lightcyan',
'white',
]
def set_color(fg=None, bg=None, attributes=None, file=sys.stdout):
"""Set the current colors.
If no arguments are given, sets default colors.
"""
if attributes is None:
attributes = []
_set_color(fg, bg, attributes, file)
def reset_color(file=sys.stdout):
"""Reset all colors and attributes."""
_reset_color(file)
def cprint(
string,
fg=None,
bg=None,
attributes=None,
end=os.linesep,
file=sys.stdout,
enabled=True,
):
"""Print a string to a target stream with colors and attributes.
The fg and bg keywords specify foreground- and background colors while
attributes is a list of desired attributes. The remaining two keyword
arguments are the same as Python's built-in print function.
Colors and attributes are reset before the function returns.
"""
if attributes is None:
attributes = []
# Flush any remaining stuff before resetting colors
file.flush()
if not enabled:
file.write(string)
file.write(end)
file.flush()
else:
reset_color(file)
set_color(fg, bg, attributes, file)
file.write(string)
file.flush() # Flush before resetting colors
reset_color(file)
# Make sure to print the end keyword after resetting so the next line
# is not affected by a newline or similar
file.write(end)
file.flush() # Flush before resetting colors
# Global color formatter instance
_COLOR_FORMATTER = colorise.formatter.ColorFormatter(set_color, reset_color)
def fprint(fmt, autoreset=True, end=os.linesep, file=sys.stdout, enabled=True):
"""Print a string with color formatting.
The autoreset keyword controls if colors and attributes are reset before
each new color format. For example:
>>> colorise.fprint('{fg=blue}Hi {bg=red}world', autoreset=False)
would print 'Hi' in blue foreground colors and 'world' in blue foreground
colors AND a red background color, whereas:
>>> colorise.fprint('{fg=blue}Hi {bg=red}world', autoreset=True)
would print 'Hi' in blue foreground colors but 'world' only with a red
background color since colors are reset when '{bg=red}' is encountered.
The remaining two keyword arguments are the same as Python's built-in print
function.
Colors and attribtues are reset before the function returns.
"""
_COLOR_FORMATTER.autoreset = autoreset
_COLOR_FORMATTER.file = file
_COLOR_FORMATTER.enabled = enabled
_COLOR_FORMATTER.format(fmt)
if enabled:
file.flush() # Flush before resetting colors
reset_color(file)
# Make sure to print the end keyword after resetting so the next line is
# not affected by a newline or similar
file.write(end)
def highlight(
string,
indices,
fg=None,
bg=None,
attributes=None,
end=os.linesep,
file=sys.stdout,
enabled=True,
):
"""Highlight characters using indices and print to a target stream.
The indices argument is a list of indices (not necessarily sorted) for
which to apply the colors and attributes. Indices that are out of bounds
are ignored.
fg and bg specify foreground- and background colors while attributes is a
list of desired attributes. The remaining two keyword arguments are the
same as Python's built-in print function.
Colors and attribtues are reset before the function returns.
"""
if attributes is None:
attributes = []
if not string or not indices or not (fg or bg or attributes)\
or not enabled:
file.write(string + end)
return
idx = 0
sorted_indices = sorted(indices)
if sorted_indices[0] < 0 or sorted_indices[-1] >= len(string):
raise IndexError('Index out of bounds')
# Group consecutive indices, e.g. [0, 2, 3, 5, 6] -> [(0), (2, 3), (5, 6)]
groups = itertools.groupby(
enumerate(sorted_indices),
lambda x: x[0] - x[1],
)
# Flush any remaining stuff before resetting colors
file.flush()
reset_color(file)
for _, group in groups:
# Get the starting and ending indices of the group
group = list(group)
start_idx, end_idx = group[0][1], group[-1][1] + 1
# Write anything up until the start index of the current group
file.write(string[idx:start_idx])
file.flush()
set_color(fg, bg, attributes, file)
# Write the range of characters specified by the group
file.write(string[start_idx:end_idx])
file.flush()
reset_color(file)
# Set current index to end of group
idx = end_idx
# Write anything that is left to write
if idx < len(string):
file.write(string[idx:])
file.write(end)
def safe_atexit_reset_colors():
"""Safely reset colors."""
# This is necessary when running the tests since pytest will redirect
# stdout and stderr then restore them to the original values after the test
# session. This closes stdout and stderr causing writes to the pytest
# redirected streams to fail.
#
# By calling this function using the atexit module, stdout and stderr are
# evaluated after having been restored by pytest
#
# See https://github.com/pytest-dev/pytest/issues/5502
if not sys.stdout.closed:
reset_color(sys.stdout)
if not sys.stderr.closed:
reset_color(sys.stderr)
# Ensure colors and attributes return to normal when colorise is quit
atexit.register(safe_atexit_reset_colors)
| true |
a6e1cadf4e950f07e61e81db37b3e056e61b552f | Python | stbman/cs5228 | /latlong/pairdist_latlong.py | UTF-8 | 1,201 | 2.953125 | 3 | [] | no_license | import pandas as pd
import numpy as np
df = pd.read_csv("pairs.csv")
db = pd.read_csv("latlong.csv")
def updateLat(port):
port = db[db["Port"] == port]
assert(port is not None)
assert(port.shape == (1,3,))
return port.iloc[0,1]
def updateLong(port):
port = db[db["Port"] == port]
assert(port is not None)
assert(port.shape == (1,3,))
return port.iloc[0,2]
def calcDistance(data):
origin_lat = np.radians(data[0])
origin_long = np.radians(data[1])
dest_lat = np.radians(data[2])
dest_long = np.radians(data[3])
R = 6373. # in km
dlon = dest_long - origin_long
dlat = dest_lat - origin_lat
a = (np.sin(dlat/2.))**2. + np.cos(origin_lat) * np.cos(dest_lat) * (np.sin(dlon/2))**2.
c = 2 * np.math.atan2( np.sqrt(a), np.sqrt(1.-a) )
d = R * c #(where R is the radius of the Earth)
return d
df["Origin_lat"] = df["Origin"].apply(updateLat)
df["Origin_long"] = df["Origin"].apply(updateLong)
df["Dest_lat"] = df["Dest"].apply(updateLat)
df["Dest_long"] = df["Dest"].apply(updateLong)
df["Distance"] = df[["Origin_lat", "Origin_long", "Dest_lat", "Dest_long"]].apply(calcDistance, axis=1)
df.to_csv("pairdistances.latlong.csv")
| true |
9836e64bf7656bf1a2b2d36573c55bb408a24646 | Python | spadwal7039/ttl255.com | /netbox/pynetbox-part4/prep_tags_for_search.py | UTF-8 | 1,030 | 2.6875 | 3 | [
"MIT"
] | permissive | import ipaddress
import itertools
import pynetbox
from config import NETBOX_URL, NETBOX_TOKEN
# Instantiate pynetbox.api class with URL of your NETBOX and your API TOKEN
nb = pynetbox.api(url=NETBOX_URL, token=NETBOX_TOKEN)
# Prepare tags we want to combine
mc_side = ["a_side", "b_side"]
mc_exchange = ["nasdaq", "nyse"]
mc_type = ["prod", "cert", "dr"]
# Create product of the tag families
mc_tag_prod = itertools.product(mc_side, mc_exchange, mc_type)
# Create list with lists of resulting tag combinations
# E.g. ['mc_src', 'a_side', 'nasdaq', 'prod']
mc_tags = sorted([["mc_src"] + (list(t)) for t in mc_tag_prod])
# Container from which we will be assigning prefixes
mc_src_container = ipaddress.IPv4Network("10.255.0.0/16")
mc_src_pfxs = mc_src_container.subnets(new_prefix=28)
# Create new prefixes and attach tags to them
for pfx, tag_list in zip(mc_src_pfxs, mc_tags):
new_pfx = nb.ipam.prefixes.create(prefix=str(pfx), tags=tag_list)
print("Prefix: {0}, tags: {1}".format(new_pfx.prefix, new_pfx.tags))
| true |
19190c777abe5418ab6a3ac218cfdb7ebfc5d2a6 | Python | mahmoud-taya/OWS-Mastering_python_course | /Files/064.Files_handling_part_three_write_and_append_in_files.py | UTF-8 | 1,184 | 3.734375 | 4 | [] | no_license | # -------------------------------------------------
# --- File handling => write and append in file ---
# -------------------------------------------------
# Write => Replace the new value with the old value
my_file = open("C:\Users\حسن صلاح\Google Drive\1. Projects_\Python course (Osama)\Learning code\hasan.txt", "w")
my_file.write("This is a first line\n")
my_file.write("Second line")
my_file = open(r"C:\Users\حسن صلاح\Google Drive\1. Projects_\Python course (Osama)\Learning code", "w")
my_file.write("Hasan salah\n" * 1000)
my_list = ["Osama\n", "Ahmed\n", "Sayed"]
my_file = open("C:\Users\حسن صلاح\Google Drive\1. Projects_\Python course (Osama)\Learning code", "w")
my_file.writelines(my_list)
# Append => Don't replace the new values with the old values
# Start typing from the last line typed
my_file = open("C:\Users\حسن صلاح\Google Drive\1. Projects_\Python course (Osama)\Learning code", "a")
my_file.write("hasan")
my_file.write("hasansalah\n") # Write after the word hasan not in the new line because there is no \n in the last line
my_file.write("hasansalaheisa") # Write in the new line because there is a \n in the last line
| true |
f4ebb288aa151fcd939f80917040a35c37834565 | Python | slominskir/rfwtools | /rfwtools/example_set.py | UTF-8 | 51,218 | 2.5625 | 3 | [] | no_license | """This package is for managing a collection of Examples.
ExampleSet objects are typically created by a DataSet, but may be created directly.
Basic Usage Examples:
Start by saving this data in my-sample-labels.txt in the Config().label_dir directory (defaults to ./data/labels/).
**THESE FIELDS SHOULD BE TAB SEPARATED. DOCUMENTATION SYSTEM INSISTS ON CONVERTING THEM TO SPACES. PLEASE FIX IF YOU
TRY THIS EXAMPLE ON YOUR OWN**
::
zone cavity cav# fault time
1L25 4 44 Microphonics 2020/03/10 01:08:41
2L24 5 77 Controls Fault 2020/03/10 01:42:03
1L25 5 45 Microphonics 2020/03/10 02:50:07
2L26 8 96 E_Quench 2020/03/10 02:58:13
1L25 5 45 Microphonics 2020/03/10 04:55:21
1L22 4 20 Quench_3ms 2020/03/10 05:06:13
1L25 5 45 Microphonics 2020/03/10 07:35:32
2L22 0 57 Multi Cav turn off 2020/03/10 07:59:49
2L23 0 65 Multi Cav turn off 2020/03/10 07:59:56
2L24 0 73 Multi Cav turn off 2020/03/10 08:00:03
Creating from scratch. This assumes you have label files in Config().label_dir, and will save a CSV file to
Config().output_dir (defaults to ./processed-output/)
::
from rfwtools.example_set import ExampleSet
from rfwtools.example_validator import ExampleValidator
es = ExampleSet()
es.add_label_file_data(label_files=['my-sample-labels.txt'])
es.get_label_file_report()
es.remove_duplicates_and_mismatches()
es.purge_invalid_examples(ExampleValidator())
es.save_csv("my_example_set.csv")
Reporting and Visualization. This assumes that you have created and saved an ExampleSet as in the example above.
::
from rfwtools.example_set import ExampleSet
es = ExampleSet()
es.load_csv("my_example_set.csv")
es.display_frequency_barplot(x='zone', color_by='cavity_label')
es.display_zone_label_heatmap(zones=['1L22', '1L23', '1L24', '1L25', '1L26'])
es.display_summary_label_heatmap(title='2L22 7AM Summary',
query = 'zone=="2L22" & dtime < "2020-03-10 08:00:00" & dtime > "2020-03-10 07:00:00"')
"""
import sys
from datetime import datetime
import warnings
from typing import List, Tuple
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
import tzlocal
from tqdm import tqdm
from rfwtools import utils
from rfwtools.config import Config
from rfwtools.example import ExampleType, Factory, IExample
from rfwtools.example_validator import ExampleValidator
from rfwtools.timestamp import is_datetime_in_range, TimestampMapper
from rfwtools.visualize.timeline import swarm_timeline
from rfwtools.visualize import heatmap
class ExampleSet:
"""A class for managing a collection of examples, including metadata about the collection of examples.
Each ExampleSet supports having only one type of IExample object, and each only supports one set of kwargs.
This class has methods for building collections of examples from our standard label files or from the waveform
browser webservice. It also includes many methods for visualizing and reporting.
Attributes:
known_zones:
A list of strings identifying the minimum set of zone categories to be included in the categorical. The
class version is the default set. The instance version is the known to that instance.
known_cavity_labels:
A list of strings identifying the minimum set of cavity label categories to be included in the categorical.
The class version is the default set. The instance version is the known to that instance.
known_fault_labels:
A list of strings identifying the minimum set of fault label categories to be included in the categorical.
The class version is the default set. The instance version is the known to that instance.
"""
#: The expected fault levels as of Dec 2020. New faults may appear over time, but this is a baseline.
_known_zones = ['0L04', '1L07', '1L22', '1L23', '1L24', '1L25', '1L26', '2L22', '2L23', '2L24', '2L25', '2L26']
_known_cavity_labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8']
_known_fault_labels = ['Single Cav Turn off', 'Multi Cav turn off', 'E_Quench', 'Quench_3ms',
'Quench_100ms', 'Microphonics', 'Controls Fault', 'Heat Riser Choke', 'Unknown']
# Expected column names - others may exist, but these are what are required no matter.
__mandatory_columns = ['zone', 'dtime', 'cavity_label', 'fault_label', 'cavity_conf', 'fault_conf', 'example',
'label_source']
def __init__(self, e_type: ExampleType = ExampleType.EXAMPLE, example_kwargs: dict = {},
known_zones: List[str] = None, known_cavity_labels: List[str] = None,
known_fault_labels: List[str] = None, req_columns: List[str] = None):
"""Create an instance of an ExampleSet. Optionally override the default levels for zones and labels.
Arguments:
e_type:
The type of example that should be created within this ExampleSet
example_kwargs:
A dictionary of keyword arguments that will be passed to every IExample object constructed.
known_zones:
A list of strings identifying the minimum set of zone categories to be included in the categorical.
known_cavity_labels:
A list of strings identifying the minimum set of cavity label categories to be included in the
categorical.
known_fault_labels:
A list of strings identifying the minimum set of fault label categories to be included in the
categorical.
req_columns:
A list of column names that are required to be in valid DataFrames used internally. These are in
addition to the class defined list of "zone", "dtime", etc..
"""
# For constructing examples.
self.e_type = e_type
self.example_kwargs = example_kwargs
self.example_factory = Factory(e_type=e_type, **example_kwargs)
# These columns are also required, but the list contents is variable based on use case.
self._req_columns = []
if req_columns is not None:
self._req_columns = req_columns
# Setup the standard default values for zone and label options
#: Instance's customized default list of known zones
self._known_zones = ExampleSet._known_zones
if known_zones is not None:
self._known_zones = known_zones
#: Instance's customized default list of known cavity_labels
self._known_cavity_labels = ExampleSet._known_cavity_labels
if known_cavity_labels is not None:
self._known_cavity_labels = known_cavity_labels
#: Instance's customized default list of known fault_labels
self._known_fault_labels = ExampleSet._known_fault_labels
if known_fault_labels is not None:
self._known_fault_labels = known_fault_labels
#: ExampleSet DataFrame with proper dtypes
self._example_df = pd.DataFrame(
{'zone': pd.Categorical([], categories=self._known_zones),
'dtime': pd.Series([], dtype='datetime64[ns]'),
'cavity_label': pd.Categorical([], categories=self._known_cavity_labels),
'fault_label': pd.Categorical([], categories=self._known_fault_labels),
'cavity_conf': pd.Series([], dtype="float64"),
'fault_conf': pd.Series([], dtype='float64'),
'example': pd.Series([], dtype="object"),
'label_source': pd.Series([], dtype="object")
}
)
# Create a hash for holding on to the label file data. This will preserve the original data after cleaning for
# duplicates, mismatches, etc.
#: A dictionary holding label file contents, keyed on file names
self.label_file_dataframes = {}
def get_required_columns(self) -> List[str]:
"""Generates the list of column names that must appear in a DataFrame for it to be a valid example_df.
Returns:
The list of column names
"""
return ExampleSet.__mandatory_columns + self._req_columns
def has_required_columns(self, df: pd.DataFrame, dtypes: bool = False, skip_example: bool = True) -> bool:
"""Check if the given DataFrame has the required columns.
Arguments:
df:
The DataFrame to check
dtypes:
Check for matching dtypes of "mandatory" columns. Uses existing _example_df's dtypes. Skips if
_examples_df is None.
skip_example:
If True, requires that the 'example' column name is present. Otherwise it is not checked.
Returns:
True if all required column names are present. False otherwise.
"""
req_cols = self.get_required_columns()
man_cols = ExampleSet.__mandatory_columns.copy()
if skip_example:
man_cols.remove("example")
req_cols.remove("example")
# Check that the columns have the same names
for col in req_cols:
if col not in df.columns.to_list():
if Config().debug:
print(f"New DataFrame missing column '{col}'", file=sys.stderr)
return False
# Now that we know the df has an example column. Check that the examples have the same type as defined for this
# ExampleSet.
if not skip_example:
df_e_type = df.example[0].get_example_type()
if self.e_type != df_e_type:
if Config().debug:
print(f"New DataFrame different ExampleType '{df_e_type}' from '{self.e_type}", file=sys.stderr)
return False
# Check that the columns have the same dtype
if dtypes and self._example_df is not None:
# Check that all of the columns have matching dtypes. CategoricalDtypes match only if the have the same
# categories, not only if they are by categorical. Here that's a problem since new categories may arise.
# Just check that the names of the dtypes match.
e_df_dtype_names = [str(x) for x in self._example_df[man_cols].dtypes]
df_dtype_names = [str(x) for x in df[man_cols].dtypes]
if not e_df_dtype_names == df_dtype_names:
print(f"New DataFrame has at least one wrong dtype.", file=sys.stderr)
return False
return True
def save_csv(self, filename: str, out_dir: str = None, sep: str = ',') -> None:
"""Write out the ExampleSet data as a CSV file relative to out_dir. Only writes out example_df equivalent.
Arguments:
filename: The filename to save. Will be relative out_dir
out_dir: The directory to save the file in. Defaults to Config().output_dir
sep: Delimiter string used by Pandas to parse given "csv" file
"""
if out_dir is None:
out_dir = Config().output_dir
self._example_df.drop('example', axis=1).to_csv(os.path.join(out_dir, filename), sep=sep, index=False)
def load_csv(self, filename: str, in_dir: str = None, sep: str = ',') -> None:
"""Read in a CSV file that has ExampleSet data.
Arguments:
filename: The filename to save. Will be relative in_dir
in_dir: The directory to find the file in. Defaults to Config().output_dir
sep: Delimiter string used by Pandas to parse given "csv" file
Raises:
ValueError: If the CSV file does not have the expected column names.
"""
if in_dir is None:
in_dir = Config().output_dir
if type(filename).__name__ == 'str':
df = pd.read_csv(os.path.join(in_dir, filename), sep=sep)
else:
# Allows for tricks with file-like objects
df = pd.read_csv(filename, sep=sep)
if not self.has_required_columns(df):
raise ValueError("Cannot load CSV file. Unexpected column format.")
# Put the DataFrame into a standard structure - categories, column order, etc.
self.__standardize_df_format(df)
# Add the example column
df['example'] = df.apply(self._Example_from_row, axis=1, raw=False)
self._example_df = df
def update_example_set(self, df: pd.DataFrame, keep_label_file_dataframes: bool = False) -> None:
"""Replaces the contents of this ExampleSet with the supplied DataFrame.
Note: A copy of df is used.
Arguments:
df: A DataFrame formatted for ExampleSet that will replace the the contents of this ExampleSet.
keep_label_file_dataframes: Should the dictionary of label file DataFrames be kept. If False, the
dictionary recreated. If True, no action is taken.
Raises:
ValueError: If columns do not match
"""
if not self.has_required_columns(df, dtypes=True):
raise ValueError(f"New df does not have the required columns or column dtypes.")
if not keep_label_file_dataframes:
self.label_file_dataframes = {}
self._example_df = df.copy()
def get_example_df(self) -> pd.DataFrame:
"""Returns the example set as a DataFrame (copy)
Returns:
A copy of the internal ExampleSet DataFrame
"""
return self._example_df.copy()
def add_label_file_data(self, label_files: List[str] = None, exclude_zones: List[str] = None,
exclude_times: List[Tuple[datetime, datetime]] = None) -> None:
"""Process and add label files' data to the ExampleSet's internal collection.
Arguments:
label_files:
List of label files to process. If None, all files in Config().label_dir are read. Relative paths are
resolved relative to Config().label_dir.
exclude_zones:
List of zones to exclude. Defaults to Config().exclude_zones.
exclude_times:
List of 2-tuples of datetime objects. Each 2-tuple specifies a range to exclude. None implie +/-Inf.
"""
# Use the defaults from the config file if None is given
e_zones = exclude_zones if exclude_zones is not None else Config().exclude_zones
e_times = exclude_times if exclude_times is not None else Config().exclude_times
l_files = label_files
if l_files is None:
# Only want to process regular files, not directories, etc.
l_files = [f for f in os.listdir(Config().label_dir) if os.path.isfile(os.path.join(Config().label_dir, f))]
if len(l_files) == 0:
raise RuntimeError(f"No label files specified or discovered in default label_dir '{Config().label_dir}'")
# Iterate through the supplied label files. Non-absolute paths will be assumed to be relative to the configured
# label directory
for label_file in l_files:
if not os.path.isabs(label_file):
label_file = os.path.join(Config().label_dir, label_file)
# Process the label file into a DataFrame
df = self._create_dataframe_from_label_file(filepath=label_file, exclude_zones=e_zones,
exclude_times=e_times)
# Stash the label file DataFrame into a dictionary in case we needed it later
self.label_file_dataframes[label_file] = df.copy()
# Add the DataFrame to the internal collection
self._add_example_df(df)
def add_web_service_data(self, server: str = None, begin: datetime = None, end: datetime = None,
models: List[str] = None) -> None:
"""Add web service data (faults labeled by in-service model) to the ExampleSet.
Note: Should be used exclusive of label data since they will largely overlap
Arguments:
server: The server to query for the data. If None, use the value in Config
begin: The earliest time for which a fault should be included. If None, defaults to Jan 1, 2018
end: The latest time for which a fault should be included. If None defaults to "now"
models: A list of model names that should be included in the results. None means include all
"""
if server is None:
server = Config().data_server
if begin is None:
begin = datetime(year=2018, month=1, day=1)
if end is None:
end = datetime.now()
# Get the data from the web service
df = self._create_dataframe_from_web_query(server=server, begin=begin, end=end, models=models)
# Add it to the existing ExampleSet
self._add_example_df(df)
def get_label_file_report(self) -> str:
"""Generate a string containing a report on the processed label files
Returns:
A formatted string containing the report.
"""
# Check to see if we have any duplicates and print them out
num_total_events = self.count_events()
num_total_labels = len(self._example_df)
num_events_with_multiple_labels = self.count_duplicated_events()
num_duplicate_labels = self.count_duplicated_labels()
num_events_with_mismatched_labels = self.count_duplicated_events_with_mismatched_labels()
num_mismatched_labels = self.count_mismatched_labels()
mismatched_output = "None Found"
if num_events_with_mismatched_labels != 0:
mismatched_output = self.get_events_with_mismatched_labels().to_string()
out = f"""#### Summary ####
Note: event == unique zone/timestamp, label == row in label_file
Number of events: {num_total_events}
Number of labels: {num_total_labels}
Number of events with multiple labels: {num_events_with_multiple_labels}
Number of duplicate labels: {num_duplicate_labels}
Number of 'extra' labels: {num_duplicate_labels - num_events_with_multiple_labels}
Number of events with mismatched labels: {num_events_with_mismatched_labels}
Number of mismatched labels: {num_mismatched_labels}
#### Events With Mismatched Labels ####
{mismatched_output}
"""
return out
def remove_duplicates_and_mismatches(self, report: bool = False) -> None:
"""Removes duplicate example entries and removes all instances of examples that have mismatched labels.
Args:
report: Should information about what was removed be included?
"""
# Split into event groups
gb = self._example_df.groupby(['zone', 'dtime'])
# Keep only groups that that have exactly one unique cavity and fault label
df = gb.filter(lambda x: x.cavity_label.nunique() == 1 and x.fault_label.nunique() == 1)
# Print out the entries that were removed
if report:
tmp_df = gb.filter(lambda x: not (x.cavity_label.nunique() == 1 and x.fault_label.nunique() == 1))
print(f"## Removing the following {len(tmp_df)} entries from the ExampleSet as label mismatches ##")
print(tmp_df.sort_values(["zone", "dtime"]).to_string())
print("\n")
# Track the size so we can report if needed
orig_size = len(self._example_df)
# Replace the original example_df with this reduced set.
self._example_df = df.drop_duplicates(["zone", "dtime"])
# Print out how many entries were removed as duplicates
if report:
num_dupes = orig_size - len(self._example_df)
print(f"## Removed {num_dupes} entries from the ExampleSet for being duplicates ##")
def purge_invalid_examples(self, validator: ExampleValidator, report: bool = True, progress: bool = True) -> None:
"""Removes all examples from the ExampleSet that do not pass validation
Args:
validator: A object that follows the ExampleValidator interface.
report: Should information about what is purged be printed?
progress: Should a progress bar be displayed
"""
# Variable for report output
out = "\n## Validation Results ##\n"
# Count of how many examples were removed
count = 0
# Private function that allows for easy reporting
def __apply_validator(row, _validator):
"""Applies the validator function in the context of DataFrame apply method. Updates out, count from parent
Args:
row (DataFrame) - A DataFrame row containing the example to be validated. Should contain an Example
under a column named 'example'
_validator (ExampleValidator) - Object doing the validation
Returns:
(bool) - True if example passed validation, Otherwise, False.
"""
# Allow this function to modify out and count from the parent function
nonlocal out
nonlocal count
_validator.set_example(row.example)
try:
_validator.validate_data()
except Exception as exc:
count += 1
msg = f"Invalid event - {row.example}\n {exc}\n"
out += msg
# If we're debugging, we probably don't want to wait until the end to see what happened.
if Config().debug:
print(msg)
return False
return True
# tqdm provides a progress bar and the pd.DataFrame.progress_apply method. This registers a new instance with
# pandas
if progress:
# tqdm/pandas generate a Future warning about the Panel class. Suppress that since I can't do anything
# about it. Will fix if it breaks.
print("## Validating Examples ##")
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="The Panel class is removed from pandas.")
tqdm.pandas()
# Apply the validator to generate a bool column we can filter on
valid = self._example_df.progress_apply(func=__apply_validator, axis=1, _validator=validator)
else:
# Apply the validator to generate a bool column we can filter on
valid = self._example_df.apply(func=__apply_validator, axis=1, _validator=validator)
if report:
print(out)
print(f"\nPurging {count} invalid examples")
# Keep only the events that are valid
self._example_df = self._example_df[valid]
def _add_example_df(self, df: pd.DataFrame, allow_new_columns: bool = False) -> None:
"""Add a DataFrame of examples to the ExampleSet's internal collection.
Args:
df:
A dataframe of examples to be added to the existing examples
allow_new_columns:
An exception will be raised if df has any columns that do not map to existing attributes in the existing
collection (e.g., you would be adding new columns to an existing DataFrame)
Raises:
ValueError: If new columns are being added and allow_new_columns != True
"""
# Make sure we are adding similar data unless otherwise stated
if not allow_new_columns:
if (len(df.columns.values.tolist()) != len(self._example_df.columns.values.tolist())) and (
sorted(df.columns.values.tolist()) != sorted(self._example_df.columns.values.tolist())):
raise ValueError(
"New DataFrame does not have same columns as example_df and allow_new_columns=False")
if not self.has_required_columns(df, dtypes=True):
raise ValueError("New DataFrame does not have required column dtypes.")
# Union the categories present in the existing examples with those presented in new examples
for col in [col for col in self._example_df.columns if self._example_df[col].dtype.name == 'category']:
uc = pd.api.types.union_categoricals([self._example_df[col], df[col]])
self._example_df[col] = pd.Categorical(self._example_df[col], categories=uc.categories)
df[col] = pd.Categorical(df[col], categories=uc.categories)
# Add the new data to the bottom of the internal DataFrame, and add it to the dict of included label files
self._example_df = pd.concat((self._example_df, df), ignore_index=True)
def _create_dataframe_from_web_query(self, begin: datetime, end: datetime, server: str = None,
models: List[str] = None) -> pd.DataFrame:
"""This creates a ExampleSet consistent DataFrame based on the responses of the web query. Labeled faults only.
Arguments:
server: The server to query for the data. If None, use the value in Config
begin: The earliest time for which a fault should be included. If None, defaults to Jan 1, 2018
end: The latest time for which a fault should be included. If None defaults to "now"
models: A list of model names that are to be included. All other results are excluded.
Returns:
The ExampleSet consistent DataFrame containing the web query response
"""
# Make the web query and get results
web_fmt = "%Y-%m-%d %H:%M:%S"
web_events = utils.get_events_from_web(server, begin=begin.strftime(web_fmt), end=end.strftime(web_fmt))
# Parse the web query. The web service returns fault events with a UTC timestamp. We convert it to the
# localtime zone for simplicity and compatibility with the (untimezoned) label files. Assumption here is
# that we are running this code in the same timezone as CEBAF is in.
# TODO - Is there a better way to handle this? The label files not being TZ'ed throw a wrench in the works.
fmt = "%Y-%m-%d %H:%M:%S.%f%z"
event_list = web_events['events']
extracted_events = list()
for event in event_list:
# Get a timezone aware datetime object of UTC timestamp (manually add GMT offset string) then convert it
# to local time
dt_local = datetime.strptime(event['datetime_utc'] + "-00:00", fmt).astimezone(
tzlocal.get_localzone()).replace(tzinfo=None)
zone = event['location']
# Read in label info
f_label = None
c_label = None
f_conf = None
c_conf = None
l_source = None
# Skip any fault events that were not labeled
if event['labels'] is not None:
for label in event['labels']:
# Check that this was labeled by one of the models we requested
if models is not None:
if label['model-name'] not in models:
continue
# Process the model source
if l_source is None:
l_source = label['model-name']
elif l_source != label['model-name']:
# Make the source a combo with cavity model first
if label['name'] == "cavity":
l_source = f"{label['model-name']}/{l_source}"
elif label['name'] == "fault-type":
l_source = f"{l_source}/{label['model-name']}"
else:
print(f"Skipping {zone} / {dt_local} because of unrecognized label name")
continue
# The operator facing models may present slightly processed label names. Here we convert back to
# names used in the label files. I guess this is a potential error point should future models
# use these names in a different way. Not sure what to do about it here though.
if label['name'] == "cavity":
if label['value'] == 'multiple':
c_label = "0"
else:
c_label = label['value']
c_conf = label['confidence']
elif label['name'] == "fault-type":
if label['value'] == 'Multi Cav Turn off':
f_label = "Multi Cav turn off"
else:
f_label = label['value']
f_conf = label['confidence']
# We only want labeled data
if f_label is None or c_label is None:
continue
# Accumulate the events into a list of dictionaries. Each dictionary is one event
extracted_events.append(
{'zone': zone, 'dtime': dt_local, 'fault_label': f_label, 'cavity_label': c_label,
'fault_conf': f_conf, 'cavity_conf': c_conf, 'label_source': l_source})
# Construct an empty DataFrame
df = pd.DataFrame({
'zone': pd.Categorical([]),
'dtime': pd.Series([], dtype='datetime64[ns]'),
'fault_label': pd.Categorical([]),
'cavity_label': pd.Categorical([]),
'cavity_conf': pd.Series([], dtype='float64'),
'fault_conf': pd.Series([], dtype='float64'),
'label_source': pd.Series([], dtype='object')
})
# Append the fault events to the DataFrame
for event in extracted_events:
df = df.append(event, ignore_index=True)
# Operates in place on DataFrame
self.__standardize_df_format(df)
return df
def _create_dataframe_from_label_file(self, filepath: str, exclude_zones: List[str] = None,
exclude_times: List[List[datetime]] = None) -> pd.DataFrame:
"""This parses the DataSet's specified label files and saves the constructed IExamples.
Arguments:
filepath:
Location of the label file
exclude_zones:
List of zones to exclude. Defaults to Config().exclude_zones.
exclude_times:
List of 2-tuples of datetime objects. Each 2-tuple specifies a range to exclude. None implie +/-Inf.
Returns:
A DataFrame of the Examlpes listed in the label file.
"""
# This is the header we expect in all files - tab separated
exp_header = "zone cavity cav# fault time\n"
# Work through the file and build a dictionary keyed on events
# with an array of labels found for each event. We'll print out summary information,
# and then print label files for each "good" event
if not os.path.isfile(filepath):
raise FileNotFoundError(f"File not found - {filepath}")
if Config().debug:
print(f"Processing {filepath}")
zones = []
dts = []
c_labels = []
f_labels = []
c_confs = []
f_confs = []
l_sources = []
# Read each file line by line and create a new TSV file for each
# labeled example we encounter
with open(filepath, 'r') as fh:
# Toss the header line by reading another one in loop - ignore any trailing whitespace
line = fh.readline().lstrip()
header = line
if Config().debug:
print("Skipping header: {}".format(header))
if header != exp_header:
print("Error: Unexpected header: '{}'".format(header))
# Keep track of how many lines were skipped due to some error
skip_count = 0
# Process each line. At this point we are expected labeled examples or comments
while line:
# Strip off leading and trailing whitespace
line = fh.readline().rstrip().lstrip()
# Check special cases
if not line:
if Config().debug:
print("Found last line.")
break
if line.startswith("#"):
if Config().debug:
print("Skipping: '{}'".format(line))
continue
if len(line) == 0:
continue
# Process the label fields
fields = line.split('\t')
zone = fields[0]
cavity_label = fields[1]
fault_label = fields[3]
# Label files don't provide confidence levels
cavity_conf = None
fault_conf = None
try:
tsm = TimestampMapper()
ts = tsm.get_full_timestamp(zone, datetime.strptime(fields[4], "%Y/%m/%d %H:%M:%S"))
except ValueError as exc:
skip_count += 1
print("Error processing line '{}'.".format(line))
print(f" {exc}")
continue
# Check if the zone should be excluded
if exclude_zones is not None and zone in exclude_zones:
continue
# check if the label should be excluded because of the timestamp
if is_datetime_in_range(ts, exclude_times):
continue
# Add entries to all of the lists for this example.
zones.append(zone)
dts.append(ts)
c_labels.append(cavity_label)
f_labels.append(fault_label)
c_confs.append(cavity_conf)
f_confs.append(fault_conf)
l_sources.append(os.path.basename(filepath))
if Config().debug:
print("Processed: {} {} - {}".format(zone, ts, line))
if Config().debug:
print(f"Skipped {skip_count} events from {filepath} due to processing issues")
# Construct a DataFrame for the new data
df = pd.DataFrame(
{'zone': pd.Categorical(zones),
'dtime': dts,
'cavity_label': pd.Categorical(c_labels),
'fault_label': pd.Categorical(f_labels),
'cavity_conf': c_confs,
'fault_conf': f_confs,
'label_source': l_sources})
# Update the DataFrame to have a standard format (column dtypes, order, etc.) Should add example column.
self.__standardize_df_format(df)
return df
#### Reporting-related methods ####
def count_events(self) -> int:
"""Count the number of unique events (zone/datetime combinations
This would count as two since two unique zone/datetime pairs appeared
4240 2L25 2020-09-21 06:53:16.500 5 E_Quench
4241 2L26 2020-09-22 06:53:17.500 6 E_Quench
4242 2L26 2020-09-22 06:53:17.500 6 E_Quench
Returns:
the number of unique events (zone/datetime combinations
"""
return len(self._example_df.drop_duplicates(subset=['zone', 'dtime']))
def count_labels(self) -> int:
"""Counts the number of labels (rows in label files)
Returns:
the number of labels (rows in label files)"""
return len(self._example_df)
def get_duplicated_labels(self) -> pd.DataFrame:
""""Identify the fault events that appear multiple times in the ExampleSet.
Returns:
A DataFrame containing labels for events that appear multiple times"""
# Split on event. observed=True only includes categorical levels that are seen and improves performance
gb = self._example_df.groupby(["zone", "dtime"], as_index=False, observed=True)
# Keep event groups that have > 1 rows. Return length of the resulting DataFrame
return gb.filter(lambda x: len(x) > 1)
def count_duplicated_events(self) -> int:
"""Count the number of events that appear multiple times, i.e., were labeled more than once.
Returns:
the number of events that appear multiple times, i.e., were labeled more than once.
This would count as one since only one event appeared that did occur multiple times
4240 2L25 2020-09-21 06:53:16.500 5 E_Quench
4241 2L26 2020-09-22 06:53:17.500 6 E_Quench
4242 2L26 2020-09-22 06:53:17.500 6 E_Quench
"""
# Get the duplicated labels, then remove duplicate zone/timestamp pairs
return len(self.get_duplicated_labels().drop_duplicates(["zone", "dtime"]))
def count_duplicated_labels(self) -> int:
"""Count the number of labeling occurrences for events that appear multiple times.
This is basically the number of rows in the label files that are not for unique fault events.
Returns:
The number of labeling occurrences for events that appear multiple times"""
return len(self.get_duplicated_labels())
def get_unduplicated_events(self) -> pd.DataFrame:
"""Identify the fault events that appear exactly once in the ExampleSet.
Returns:
DataFrame of the events that appear exactly once in the ExampleSet"""
# Split on event. observed=True only includes categorical levels that are seen and improves performance
gb = self._example_df.groupby(["zone", "dtime"], as_index=False, observed=True)
# Keep event groups that have exactly one row.
return gb.filter(lambda x: len(x) == 1)
def count_unduplicated_events(self) -> int:
"""Count the number of events that appear exactly once.
Returns:
The number of events that appear exactly once.
This would count as one since only one event appeared that did not occur multiple times
4240 2L25 2020-09-21 06:53:16.500 5 E_Quench
4241 2L26 2020-09-22 06:53:17.500 6 E_Quench
4242 2L26 2020-09-22 06:53:17.500 6 E_Quench
"""
# Return the length of resulting DataFrame
return len(self.get_unduplicated_events())
def get_events_with_mismatched_labels(self) -> pd.DataFrame:
"""Identify fault events that appear multiple times with different labels.
Returns:
A DataFrame containing the events that have mismatched labels"""
# Split on events.
gb = self._example_df.groupby(['zone', 'dtime'], as_index=False, observed=True)
# Keep event groups that have more than one unique fault or cavity label. Return the length of resulting
# DataFrame.
return gb.filter(lambda x: x.cavity_label.nunique() > 1 or x.fault_label.nunique() > 1)
def count_duplicated_events_with_mismatched_labels(self) -> int:
"""Count the number of events that appear multiple times with different labels.
Returns:
The number of events that appear multiple times with different labels
This would count as one since one event appeared that had mismatched labels
4240 2L26 2020-09-21 06:53:16.500 5 E_Quench
4241 2L26 2020-09-21 06:53:16.500 6 E_Quench
4242 2L26 2020-09-21 06:53:16.500 6 E_Quench
"""
# Get events that have mismatched labels
mismatch_df = self.get_events_with_mismatched_labels()
# Drop duplicates so that we have the event count, not the count of mismatched occurrences. Return the length
# of resulting DataFrame
return len(mismatch_df.drop_duplicates(['zone', 'dtime']))
def count_mismatched_labels(self) -> int:
"""Count the number of times an event with mismatched labels appears in the ExampleSet.
Returns:
The number of times an event with mismatched labels appears in the ExampleSet.
This would count as three mismatched labels since one event with mismatched labels appeared three times
4240 2L26 2020-09-21 06:53:16.500 5 E_Quench
4241 2L26 2020-09-21 06:53:16.500 6 E_Quench
4242 2L26 2020-09-21 06:53:16.500 6 E_Quench
"""
return len(self.get_events_with_mismatched_labels())
#### Visualization Methods ####
def display_timeline(self, query: str = None, **kwargs) -> None:
"""Display a timeline of examples as a swarmplot
Arguments:
query: The expr argument to DataFrame.query. Subsets data before plot
kwargs: Other named parameters are passed to swarm_timeline method
"""
df = self._example_df.copy()
if query is not None:
df = df.query(query)
swarm_timeline(df, **kwargs)
def display_summary_label_heatmap(self, title: str = "Label Summary", query: str = None) -> None:
"""Display a heatmap of fault vs cavity labels for all examples in this object
Arguments:
title: The title of the plot
query: The expr argument to DataFrame.query. Subsets data before plot
"""
df = self._example_df.copy()
if query is not None:
df = df.query(query)
heatmap.heatmap_cavity_vs_fault_label_counts(data=df, title=title)
def display_zone_label_heatmap(self, zones: List[str] = None, query: str = None) -> None:
"""Display a heatmap of fault vs cavity labels for all examples in this object for each unique zone category
Arguments:
zones: A list of the zones to display.
query: The expr argument to DataFrame.query. Subsets data before plot
"""
if zones is None:
zones = self._example_df.zone.cat.categories
df = self._example_df.copy()
if query is not None:
df = df.query(query)
heatmap.show_fault_cavity_count_by_zone(df, zones=zones)
def display_examples_by_weekday_barplot(self, color_by: str = None, title: str = None, query: str = None) -> None:
"""Show example counts by the day of the week as a stacked barplot
Arguments:
color_by: The DataFrame column on which the bars will be split/colored.
title: The title to put on the plot. A reasonable default will be generated if None.
query: The expr argument to DataFrame.query. Subsets data before plot
"""
df = self._example_df.copy()
# Query/subset the data
if query is not None:
df = df.query(query)
# Get the day names
day_names = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
df['day'] = df['dtime'].dt.day_name()
df['day'] = pd.Categorical(df['day'], categories=day_names)
if color_by is None:
if title is None:
title = "Example Count by Day of Week"
# Get the counts by day
count_df = df.groupby(['day'])['day'].count().unstack(color_by).loc[day_names, :]
else:
if title is None:
title = f"{color_by} Count by Day of Week"
df[color_by] = self._example_df[color_by]
# Get the counts by the color_by column
count_df = df.groupby(['day', color_by])['day'].count().unstack(color_by).loc[day_names, :]
# Create the plot
ax = count_df.plot(kind="bar", stacked=True, title=title)
# Format the legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(reversed(handles), reversed(labels), loc='center left', bbox_to_anchor=(1, 0.5), ncol=1)
# Display it
plt.gcf().subplots_adjust(left=0.1, top=0.9, right=0.7, bottom=0.2)
plt.show()
def display_frequency_barplot(self, x: str, color_by: str = None, title: str = None, query: str = None) -> None:
"""Display the example count against one or two different factors, as a (stacked) bar chart.
Arguments:
x: The column name for which each bar will appear. Should probably be categorical.
color_by: The column name by which each bar will be split and colored (for a stacked bar plot). If
None, then a simple bar plot will be displayed.
title: The title to put on the chart. If None, a reasonable default will be generated.
query: The expr argument to DataFrame.query. Subsets data before plot
"""
df = self._example_df.copy()
if query is not None:
df = df.query(query)
# Set a reasonable default
if title is None:
start = df["dtime"].min().strftime("%Y-%m-%d %H:%M:%S.%f")[:-5]
end = df["dtime"].max().strftime("%Y-%m-%d %H:%M:%S.%f")[:-5]
title = f"{x}\n({start} - {end})"
if color_by is None:
# Simple chart if no factor to color by
df[x].value_counts().sort_index().plot(kind="bar", title=title)
else:
# Get the counts
count_df = df.groupby([x, color_by])[x].count()
# Create the plot
ax = count_df.unstack(color_by).fillna(0).plot(kind='bar', stacked=True, title=title)
# Format the legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(reversed(handles), reversed(labels), loc='center left', bbox_to_anchor=(1, 0.5), ncol=1)
# Display it
plt.subplots_adjust(left=0.1, top=0.9, right=0.7, bottom=0.4)
plt.show()
def get_classification_report(self, other: 'ExampleSet', label: str = "cavity_label", query: str = None,
other_query: str = None):
"""This prints a classification report of this ExampleSet's cavity labels considering other as ground truth.
Only examples from other for which there is an example in this ExampleSet are considered
Arguments:
other (ExampleSet): An ExampleSet that contains cavity labels considered the ground truth.
label (str): The column name of containing the label values to compare.
query (str) - The expr argument to DataFrame.query. Subsets data before comparison.
other_query (str) - The expr argument to DataFrame.query. Subsets 'other' before comparison.
"""
# Subset this ExampleSet if requested
df = self._example_df.copy()
if query is not None:
df = df.query(query)
# Subset the other ExampleSet if requested
o_df = other.get_example_df()
if other_query is not None:
o_df = o_df.query(query)
df = df.merge(o_df[['zone', 'dtime', label]], how="inner", on=['zone', 'dtime'])
print(classification_report(y_true=df[label + "_y"], y_pred=df[label + "_x"]))
def __eq__(self, other: 'ExampleSet') -> bool:
"""Check if this ExampleSet is equivalent to the other."""
# Short circuit check
if self is other:
return True
# Short circuit check
if type(self) != type(other):
return False
eq = True
# Check the example DataFrame - first consider None case
if self._example_df is None and other.get_example_df() is not None:
eq = False
elif not self._example_df.equals(other.get_example_df()):
eq = False
# Check the dict of label file dataframes - first consider None case
elif self.label_file_dataframes is None and other.label_file_dataframes is not None:
eq = False
elif self.label_file_dataframes is not None and other.label_file_dataframes is None:
eq = False
# Now check that the contents/lengths are the same
elif len(self.label_file_dataframes.keys()) != len(other.label_file_dataframes.keys()):
eq = False
elif len(self.label_file_dataframes.keys()) == len(other.label_file_dataframes.keys()):
for k in self.label_file_dataframes.keys():
if k not in other.label_file_dataframes.keys():
eq = False
break
if not self.label_file_dataframes[k].equals(other.label_file_dataframes[k]):
eq = False
break
return eq
def _Example_from_row(self, x: pd.DataFrame) -> IExample:
"""Creates an Example object from a row of a standard ExampleSet DataFrame"""
return self.example_factory.get_example(zone=x.zone, dt=x.dtime, cavity_label=x.cavity_label,
fault_label=x.fault_label, cavity_conf=x.cavity_conf,
fault_conf=x.fault_conf, label_source=x.label_source)
def __standardize_df_format(self, df: pd.DataFrame) -> None:
"""Attempts to put a DataFrame in a 'standard' format.
This affects IN-PLACE variables that should be categorical, datetime, float, etc. and creates the example column
if not already present. Columns are reordered.
Arguments:
df: The DataFrame to reformat
"""
# Seems like the datetime dtype doesn't want to stick
df['dtime'] = df['dtime'].astype('datetime64[ns]')
# Update the dtypes so that we get categories, etc. where it makes sense
df['zone'] = df['zone'].astype('category')
df['fault_label'] = df['fault_label'].astype('category')
df['cavity_label'] = df['cavity_label'].astype('str')
df['cavity_label'] = df['cavity_label'].astype('category')
df.fault_conf = df.fault_conf.astype("float64")
df.cavity_conf = df.cavity_conf.astype("float64")
# Construct the Example objects based on row values if needed
if 'example' not in df.columns.to_list():
df['example'] = df.apply(self._Example_from_row, axis=1, raw=False)
# Ensure a consistent set of category levels and their order.
master = {
'zone': ExampleSet._known_zones,
'fault_label': ExampleSet._known_fault_labels,
'cavity_label': ExampleSet._known_cavity_labels
}
# Add any missing levels and the make sure they are in a predictable order
for factor in master.keys():
for f in master[factor]:
# Add the category if it is not present
if f not in df[factor].cat.categories.values:
df[factor].cat.add_categories(f, inplace=True)
# Enforce a known ordering
df[factor].cat.reorder_categories(sorted(df[factor].cat.categories), inplace=True)
| true |
30e8d065634dd196eea631bda8532f71801958a7 | Python | Matiyaa/kattis | /1.0/heartrate.py | UTF-8 | 185 | 3.15625 | 3 | [] | no_license | n = int(input())
for i in range(n):
b, p = map(float, input().split())
bpm = (60*b)/p
min_bmp = bpm - (60 / p)
max_bpm = bpm + (60 / p)
print(min_bmp, bpm, max_bpm)
| true |
dafe2fc9be51dbb54fa5b860d82aed591ca39e23 | Python | nickwu241/coding-problems | /leetcode/1103-distribute-candies-to-people.py | UTF-8 | 524 | 3.265625 | 3 | [] | no_license | # https://leetcode.com/problems/distribute-candies-to-people/
import itertools
class Solution:
def distributeCandies(self, candies: int, num_people: int) -> List[int]:
result = [0] * num_people
candies_to_give = 0
for i in itertools.cycle(range(num_people)):
candies_to_give += 1
if candies <= candies_to_give:
result[i] += candies
return result
candies -= candies_to_give
result[i] += candies_to_give
| true |
14ae7e311f29b2a70a0a6a757358570d2c37d99a | Python | zjarci/schematic-file-converter | /upconvert.py | UTF-8 | 2,461 | 3.0625 | 3 | [] | no_license | #!/usr/bin/env python
""" A universal hardware design file format converter using
Upverter's Open JSON Interchange Format """
# upconvert.py - A universal hardware design file format converter using
# Upverter's Open JSON Interchange Format
# (http://upverter.com/resources/open-json-format/)
#
# Authors:
# Alex Ray ajray@ncsu.edu
# Upverter support@upverter.com
#
# Usage example:
# ./upconvert.py -i test.upv -o test.json
import os, re, copy, json
import parser.openjson, parser.kicad, parser.eaglexml
import writer.openjson, writer.kicad
from argparse import ArgumentParser
PARSERS = {
'openjson': parser.openjson.JSON,
'kicad': parser.kicad.KiCAD,
'eaglexml': parser.eaglexml.EagleXML,
}
WRITERS = {
'openjson': writer.openjson.JSON,
'kicad': writer.kicad.KiCAD
}
def parse(in_file, in_format='openjson'):
""" Parse the given input file using the in_format """
try:
p = PARSERS[in_format]()
except KeyError:
print "ERROR: Unsupported input type:", in_format
exit(1)
return p.parse(in_file)
def write(dsgn, out_file, out_format='openjson'):
""" Write the converted input file to the out_format """
try:
w = WRITERS[out_format]()
except KeyError:
print "ERROR: Unsupported output type:", out_format
exit(1)
return w.write(dsgn, out_file)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-i", "--input", dest="inputfile",
help="read INPUT file in", metavar="INPUT")
parser.add_argument("-f", "--from", dest="inputtype",
help="read input file as TYPE", metavar="TYPE",
default="openjson")
parser.add_argument("-o", "--output", dest="outputfile",
help="write OUTPUT file out", metavar="OUTPUT")
parser.add_argument("-t", "--to", dest="outputtype",
help="write output file as TYPE", metavar="TYPE",
default="openjson")
args = parser.parse_args()
inputtype = args.inputtype
outputtype = args.outputtype
inputfile = args.inputfile
outputfile = args.outputfile
if None == inputfile:
args.print_help()
exit(1)
# parse and export the data
design = parse(inputfile, inputtype)
if design is not None: # we got a good result
write(design, outputfile, outputtype)
else: # parse returned None -> something went wrong
print "Output cancelled due to previous errors."
exit(1)
| true |
ae723561829c3badd2f53ac6b8f498a7814ecf70 | Python | rafaelmsartor/python-studies | /PythonAndBlockchain/assignments/assignment2.py | UTF-8 | 1,100 | 4.84375 | 5 | [] | no_license |
def print_header(header_text):
print(header_text)
print('-' * len(header_text))
# 1) Create a list of names and use a for loop to output the length of each name (len() ).
names_list = ['Rafael', 'Fernanda', 'Anthony',
'Maria', 'Ana', 'Joe', 'Nathaly', 'Noah']
print_header('First Task')
for name in names_list:
print(name + ': ' + str(len(name)))
print()
# 2) Add an if check inside the loop to only output names longer than 5 characters.
print_header('Second task')
for name in names_list:
if len(name) > 5:
print(name + ': ' + str(len(name)))
print()
# 3) Add another if check to see whether a name includes a “n” or “N” character.
print_header('Third task')
for name in names_list:
if len(name) > 5 and ('n' in name or 'N' in name):
print(name + ': ' + str(len(name)))
print()
# 4) Use a while loop to empty the list of names (via pop() )
print_header('Fourth task')
print('List before emptying: ' + str(names_list))
while len(names_list) > 0:
names_list.pop()
print('List after emptying: ' + str(names_list))
| true |
412e026e9e1b85bf1f6dbcb68485e4f05cb9e398 | Python | PurpleBubble123/pp | /basic/list.py | UTF-8 | 511 | 4.40625 | 4 | [] | no_license | # information about using list
list1 = [1, 2, 3, "a", "b", "c", [1, 2, 3]] # 嵌套列表
#print(list)
#print(type(list)) ## 查看type
# 访问
# print(list1[0:3]) ## 左开右闭
# print(list1[1:])
# 添加
# list1.append("m")
# list1 = list1 + ["n"]
# print(list1)
# 数据 CRUD 增删改查
# 更新
# list1[1] = "9"
# print(list1)
# 删除
# del list1
# del list1[1]
# print(list1)
# 嵌套列表访问
print(list1[6])
print(list1[6][0])
# 实践
list1.
| true |
8e7828cddcf57767ed8107fd22bfe9933ed524b0 | Python | Christian-Fisher/SYSC3010T6_IRPS | /Python codes/IRpoller.py | UTF-8 | 1,410 | 2.84375 | 3 | [] | no_license | import RPi.GPIO as IO
import time
import socket
IO.setwarnings(False)
IO.setmode(IO.BCM)
IRPins = [12,13,14,15,16,17,18,19,20] # array of GPIO pins
Socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
receiveSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
receiveSocket.bind(("", 3001))
port =2001
local = "127.0.0.1"
IO.setup(12,IO.IN) #GPIO 12 -> IR sensorA1 as input
IO.setup(13,IO.IN) #GPIO 13 -> IR sensorA2 as input
IO.setup(14,IO.IN) #GPIO 14 -> IR sensorA3 as input
IO.setup(15,IO.IN) #GPIO 15 -> IR sensorB1 as input
IO.setup(16,IO.IN) #GPIO 16 -> IR sensorB2 as input
IO.setup(17,IO.IN) #GPIO 17 -> IR sensorB3 as input
IO.setup(18,IO.IN) #GPIO 18 -> IR sensorC1 as input
IO.setup(19,IO.IN) #GPIO 19 -> IR sensorC2 as input
IO.setup(20,IO.IN) #GPIO 20 -> IR sensorC3 as input
flag=[1,1,1,1,1,1,1,1,1] # initializing sensor states
def sendToMain(spot, occupancy):
Socket.sendto(("IR:"+str(spot)+","+str(occupancy)).encode('utf-8'),(local, port))
print("IR:"+str(spot)+","+str(occupancy))
while 1:# polling indefinitely
time.sleep(2)
for x in range(0,9): #loop through nine IR sensors
if(IO.input(IRPins[x])!= flag[x]): #comparing current state with previous state
flag[x] = (IO.input(IRPins[x])) # update flag element when state change occurs
sendToMain(x, flag[x])
GPIO.cleanup() # Clearing GPIO setup at exit
| true |
ac238bf130461ca382c2fe277569145a9a2fcaad | Python | uchenna-j-edeh/dailly_problems | /arrays_manipulations_algorithms/run_length_encoding.py | UTF-8 | 1,491 | 3.890625 | 4 | [] | no_license | """
Author: Uchenna Edeh
Run-length encoding is a fast and simple method of encoding strings. The basic idea is to represent repeated successive characters as a single count and character. For example, the string "AAAABBBCCDAA" would be encoded as "4A3B2C1D2A".
Implement run-length encoding and decoding. You can assume the string to be encoded have no digits and consists solely of alphabetic characters. You can assume the string to be decoded is valid.
"""
import sys
def solution1(my_str):
unique_counter = 0
unique_char = ''
previous_str = ''
running_str = ''
#import pdb; pdb.set_trace()
for i, val in enumerate(my_str):
if i == 0:
unique_counter = 1
previous_str = val
#running_str =
continue
if val == previous_str:
unique_counter = unique_counter + 1
else:
running_str = running_str + str(unique_counter) + previous_str
unique_counter = 1
#unique_char = val
previous_str = val
running_str = running_str + str(unique_counter) + previous_str
return running_str
def main(args):
if len(args) != 2:
raise AssertionError("Usage:\n\tpython3 {0} '{1}'\n\tExpected Result: {2}\n\tPlease Try Again!\n\t".format(__file__, "AAAABBBCCDAA", "4A3B2C1D2A" ))
print(solution1(args[1]))
if __name__ == "__main__":
try:
main(sys.argv)
except AssertionError as e:
print(e)
sys.exit(1)
| true |
57872fddc9ef4f3724a37856bdcc44674063dd46 | Python | rositahbakken/prosjekt | /MessageParser.py | UTF-8 | 2,789 | 2.921875 | 3 | [] | no_license | __author__ = 'Anna'
import json
class MessageParser():
def __init__(self):
self.possible_responses = {
'error': self.parse_error,
'info': self.parse_info,
'message': self.parse_message,
'history': self.parse_history,
'login': self.parse_login,
'logout': self.parse_logout,
'names': self.parse_names,
'help': self.parse_help
# More key:values pairs are needed
}
def parse(self, payload):
decodedPayload = payload.decode()
payload = json.loads(decodedPayload) # decode the JSON object
# JSON object on form: { 'timestamp': <timestampt>, 'sender': <username>, 'response': <response>, 'content': <content>, }
if payload['response'] in self.possible_responses:
return self.possible_responses[payload['response']](payload)
else:
return 'Not valid server response'
# Response not valid
def parse_error(self, payload):
timestamp = payload['timestamp']
content = payload['content']
return timestamp+' '+'Error: '+content
def parse_info(self, payload):
timestamp = payload['timestamp']
content = payload['content']
return timestamp+' '+content
def parse_message(self, payload):
sender = payload['sender']
timestamp = payload['timestamp']
content = payload['content']
return timestamp+' '+sender+': '+content
def parse_history(self, payload):
# history = json.load(payload['content'])
# history_msg = ''
# history_list= []
# for user in history:
# history_list.append([user, history[user]]) # [[timestap user, content],[timestamp user, content]...]
# history_list.sort()
# for user in history_list:
# history_msg += user+': '+history[user]+'\n'
# return history_msg
sender = payload['sender']
timestamp = payload['timestamp']
content = payload['content']
return timestamp+' '+sender+': '+content
def parse_login(self, payload):
timestamp = payload['timestamp']
content = payload['content']
return timestamp+' '+content
def parse_logout(self, payload):
timestamp = payload['timestamp']
content = payload['content']
return timestamp+' '+content
def parse_names(self, payload):
timestamp = payload['timestamp']
content = payload['content']
return timestamp+' '+content
def parse_help(self, payload):
timestamp = payload['timestamp']
content = payload['content']
return timestamp+' '+content
# Include more methods for handling the different responses...
| true |
790769dd2837866dd1ea5e3e5d7d2fcb2f6c68c1 | Python | sberbank-ai-lab/embeddings-valid | /embeddings_validation/file_reader.py | UTF-8 | 4,514 | 2.640625 | 3 | [] | no_license | import os
import pickle
import numpy as np
import pandas as pd
ID_TYPE_MAPPING = {
'str': str,
'int': np.int32,
'date': 'datetime64[D]',
'datetime': 'datetime64[s]',
}
class BaseReader:
def __init__(self, conf):
self.conf = conf
self.source_path = []
self.df = None
target_conf = conf['target']
self.cols_id = [x for x in target_conf['cols_id']]
self.cols_id_type = [ID_TYPE_MAPPING.get(x) for x in target_conf['cols_id_type']]
self.col_target = target_conf['col_target']
_unknown = [s for t, s in zip(self.cols_id_type, target_conf['cols_id_type']) if t is None]
if len(_unknown) > 0:
raise AttributeError(f'Unknown types: {_unknown}')
def clone_schema(self):
new = self.__class__(self.conf)
new.source_path = self.source_path
return new
def dump(self, path):
with open(path, 'wb') as f:
pickle.dump(self, f)
@classmethod
def load(cls, path):
with open(path, 'rb') as f:
file = pickle.load(f)
return file
@staticmethod
def _read_pd(path, read_args):
ext = os.path.splitext(path)[1]
if ext == '.csv':
return pd.read_csv(path, **read_args)
if ext == '.pickle':
return pd.read_pickle(path, **read_args)
if ext == '.parquet':
return pd.read_parquet(path, **read_args)
raise NotImplementedError(f'Not implemented for "{ext}" file type')
@classmethod
def read_table(cls, conf, file_name, rename_cols=None, drop_cols=None, read_args=None,
drop_duplicated_ids=False, **kwargs):
if read_args is None:
read_args = {}
self = cls(conf)
columns = self.keep_columns()
if type(file_name) is not list:
file_name = [file_name]
self.source_path = []
df = []
for f in file_name:
path = os.path.join(conf.root_path, f)
self.source_path.append(path)
df.append(self._read_pd(path, read_args))
self.df = pd.concat(df, axis=0)
if rename_cols is not None:
self.df = self.df.rename(columns=rename_cols)
if drop_cols is not None:
self.df = self.df.drop(columns=drop_cols)
if columns is not None:
self.df = self.df[columns]
self.cols_id_type_cast()
if drop_duplicated_ids:
self.drop_duplicated_ids()
else:
self.check_duplicated_ids()
return self
def __len__(self):
return len(self.df)
def keep_columns(self):
raise NotImplementedError()
def cols_id_type_cast(self):
for col, dtype in zip(self.cols_id, self.cols_id_type):
self.df[col] = self.df[col].astype(dtype)
def drop_duplicated_ids(self):
self.df = self.df.drop_duplicates(subset=self.cols_id, keep='first', ignore_index=True)
def check_duplicated_ids(self):
duplicated_count = self.df.duplicated(subset=self.cols_id, keep=False).sum()
if duplicated_count > 0:
raise IndexError(f'Found {duplicated_count} duplicated rows in "{self.source_path}". '
f'Check and fix your dataset or use `drop_duplicated_ids=True`')
@property
def target_values(self):
return self.df[self.col_target]
@property
def ids_values(self):
return self.df[self.cols_id]
def select_ids(self, df_ids):
new_df = self.clone_schema()
new_df.df = pd.merge(self.df, df_ids.ids_values, left_on=self.cols_id, right_on=df_ids.cols_id, how='inner')
return new_df
def exclude_ids(self, df_ids):
new_df = self.clone_schema()
df = pd.merge(self.df, df_ids.ids_values,
left_on=self.cols_id, right_on=df_ids.cols_id, how='left', indicator=True)
excluded_cnt = df['_merge'].eq('both').sum()
df = df[df['_merge'].eq('left_only')].drop(columns='_merge')
new_df.df = df
return new_df, int(excluded_cnt)
def select_pos(self, df_pos):
new_df = self.clone_schema()
new_df.df = self.df.iloc[df_pos]
return new_df
class TargetFile(BaseReader):
def keep_columns(self):
return self.cols_id + [self.col_target]
class IdFile(BaseReader):
def keep_columns(self):
return self.cols_id
class FeatureFile(BaseReader):
def keep_columns(self):
return None
| true |
9a5b6520c8c66baeccce1ebdc98bb447c5432f8d | Python | bryceklinker/rasperry-pi-fun | /simple-lights/simple_lights/start.py | UTF-8 | 212 | 2.625 | 3 | [
"MIT"
] | permissive | from gpiozero import LED
from time import sleep
pin_18_led = LED(18)
pin_17_led = LED(17)
while True:
pin_17_led.off()
pin_18_led.on()
sleep(1)
pin_17_led.on()
pin_18_led.off()
sleep(1)
| true |
3e0ac4257eb29fe4aea3231e33d786c74ed8bccc | Python | pseudonym117/Riot-Watcher | /src/riotwatcher/_apis/league_of_legends/LeagueApiV4.py | UTF-8 | 3,875 | 2.71875 | 3 | [
"MIT"
] | permissive | from .. import BaseApi, NamedEndpoint
from .urls import LeagueApiV4Urls
class LeagueApiV4(NamedEndpoint):
"""
This class wraps the League-v4 Api calls provided by the Riot API.
See https://developer.riotgames.com/api-methods/#league-v4/ for more detailed
information
"""
def __init__(self, base_api: BaseApi):
"""
Initialize a new LeagueApiV4 which uses the provided base_api
:param BaseApi base_api: the root API object to use for making all requests.
"""
super().__init__(base_api, self.__class__.__name__)
def challenger_by_queue(self, region: str, queue: str):
"""
Get the challenger league for a given queue.
:param string region: the region to execute this request on
:param string queue: the queue to get the challenger players for
:returns: LeagueListDTO
"""
return self._request_endpoint(
self.challenger_by_queue.__name__,
region,
LeagueApiV4Urls.challenger_by_queue,
queue=queue,
)
def grandmaster_by_queue(self, region: str, queue: str):
"""
Get the grandmaster league for a given queue.
:param string region: the region to execute this request on
:param string queue: the queue to get the grandmaster players for
:returns: LeagueListDTO
"""
return self._request_endpoint(
self.grandmaster_by_queue.__name__,
region,
LeagueApiV4Urls.grandmaster_by_queue,
queue=queue,
)
def masters_by_queue(self, region: str, queue: str):
"""
Get the master league for a given queue.
:param string region: the region to execute this request on
:param string queue: the queue to get the master players for
:returns: LeagueListDTO
"""
return self._request_endpoint(
self.masters_by_queue.__name__,
region,
LeagueApiV4Urls.master_by_queue,
queue=queue,
)
def by_id(self, region: str, league_id: str):
"""
Get league with given ID, including inactive entries
:param string region: the region to execute this request on
:param string league_id: the league ID to query
:returns: LeagueListDTO
"""
return self._request_endpoint(
self.by_id.__name__, region, LeagueApiV4Urls.by_id, league_id=league_id
)
def by_summoner(self, region: str, encrypted_summoner_id: str):
"""
Get league entries in all queues for a given summoner ID
:param string region: the region to execute this request on
:param string encrypted_summoner_id: the summoner ID to query
:returns: Set[LeagueEntryDTO]
"""
return self._request_endpoint(
self.by_summoner.__name__,
region,
LeagueApiV4Urls.by_summoner,
encrypted_summoner_id=encrypted_summoner_id,
)
def entries(self, region: str, queue: str, tier: str, division: str, page: int = 1):
"""
Get all the league entries
:param string region: the region to execute this request on
:param string queue: the queue to query, i.e. RANKED_SOLO_5x5
:param string tier: the tier to query, i.e. DIAMOND
:param string division: the division to query, i.e. III
:param int page: the page for the query to paginate to. Starts at 1.
:returns: Set[LeagueEntryDTO]
"""
return self._request_endpoint(
self.entries.__name__,
region,
LeagueApiV4Urls.entries,
queue=queue,
tier=tier,
division=division,
page=page,
)
| true |
0a33a4f4cc388f29ac47e01f18e8845e45fcb0dd | Python | YnievesDotNet/calysto_lc3 | /calysto_lc3/lc3.py | UTF-8 | 75,203 | 3.09375 | 3 | [
"BSD-2-Clause"
] | permissive | """
This code based on:
http://www.daniweb.com/software-development/python/code/367871/
assembler-for-little-computer-3-lc-3-in-python
Order of BRanch flags relaxed, BR without flags interpreted as BRnzp
(always).
"""
from array import array
import sys
try:
from IPython.display import HTML
except:
pass
def ascii_str(i):
if i < 256:
if i < 32 or i > 127: # integers
return "(or %s)" % i
else: # int, or ASCII
return "(or %s, %s)" % (i, repr(chr(i)))
else:
return ""
class HEX(int):
def __repr__(self):
return lc_hex(self)
def lc_hex(h):
""" Format the value in the form xFFFF """
try:
return 'x%04X' % lc_bin(h)
except:
return h
def lc_bin(v):
""" Truncate any extra bytes """
return v & 0xFFFF
def is_composed_of(s, letters):
return len(s) > 0 and sum([s.count(letter) for letter in letters]) == len(s)
def is_hex(s):
if len(s) > 1:
if s[0] == "x":
if s[1] == "-":
return is_composed_of(s[2:].upper(), "0123456789ABCDEF")
else:
return is_composed_of(s[1:].upper(), "0123456789ABCDEF")
return False
def is_bin(s):
return is_composed_of(s, "01")
def sext(binary, bits):
"""
Sign-extend the binary number, check the most significant
bit
"""
neg = binary & (1 << (bits - 1))
if neg:
mask = 0
for i in range(bits, 16):
mask |= (0b1 << i)
return (mask | binary)
else:
return binary
def lc_int(v):
if v & (1 << 15): # negative
return -((~(v & 0xFFFF) + 1) & 0xFFFF)
else:
return v
def plus(v1, v2):
"""
Add two values together, return a positive or negative value.
"""
return lc_int(v1) + lc_int(v2)
class LC3(object):
"""
The LC3 Computer. This object can assemble, disassemble, and execute
LC3 programs.
"""
# if RX is used in an instruction, these are the positions:
# 0000111222000333
reg_pos = [9, 6, 0]
special_reg_pos = {"JSRR": [6]} ## JSRR R1
flags = {'n': 1 << 11, 'z': 1 << 10, 'p': 1 << 9}
# All variations of instructions:
# Note that the following are handled in code:
# * two modes of ADD and AND
# * variations of BR (BRn, BRnp, etc)
# as they have the same mnemonic
instruction_info = {
'ADD': 0b1 << 12,
'AND': 0b0101 << 12,
'BR': 0b0,
'GETC': (0b1111 << 12) + 0x20,
'HALT': (0b1111 << 12) + 0x25,
'IN': (0b1111 << 12) + 0x23,
'JMP': 0b1100 << 12,
'JMPT': (0b1100000 << 9) + 1,
'JSR': 0b01001 << 11,
'JSRR': 0b01000 << 11,
'LD': 0b0010 << 12,
'LDI': 0b1010 << 12,
'LDR': 0b0110 << 12,
'LEA': 0b1110 << 12,
'NOT': (0b1001 << 12) + 0b111111,
'OUT': (0b1111 << 12) + 0x21,
'PUTS': (0b1111 << 12) + 0x22,
'PUTSP': (0b1111 << 12) + 0x24,
'RET': 0b1100000111000000,
'RTI': 0b1000 << 12,
'RTT': 0b1100000111000001,
'ST': 0b0011 << 12,
'STI': 0b1011 << 12,
'STR': 0b0111 << 12,
'TRAP': 0b1111 << 12,
'SHIFT': 0b1101 << 12,
}
# bits of immediate mode field:
immediate = {
'ADD': 5,
'AND': 5,
'BR': 9,
'GETC': 0,
'HALT': 0,
'IN': 0,
'JMP': 0,
'JMPT': 0,
'JSR': 11,
'JSRR': 0,
'LD': 9,
'LDI': 9,
'LDR': 6,
'LEA': 9,
'NOT': 9,
'OUT': 0,
'PUTS': 0,
'PUTSP': 0,
'RET': 0,
'RTI': 0,
'RTT': 0,
'ST': 9,
'STI': 9,
'STR': 6,
'TRAP': 8,
'SHIFT': 6, ## SHIFT R2, #1
}
# Based on appendix figure C.2 and C.7 states, and 1 cycle for each memory read
cycles = {
0b0000: 5 + 1, # BR
0b0001: 5 + 1, # ADD
0b0010: 7 + 3, # LD, + 2 memory reads
0b0011: 7 + 2, # ST, + 1 memory read, one store
0b0100: 6 + 1, # JSR
0b0101: 5 + 1, # AND
0b0110: 7 + 2, # LDR
0b0111: 7 + 3, # STR
0b1000: 12 + 3, # RTI
0b1001: 5 + 1, # NOT
0b1010: 9 + 3, # LDI
0b1011: 9 + 3, # STI
0b1100: 5 + 1, # JMP and RET
0b1101: 5 + 1, # SHIFT
0b1110: 5 + 1, # LEA
0b1111: 7 + 2, # TRAP
}
mnemonic = {
0b0000: "BR",
0b0001: "ADD",
0b0010: "LD",
0b0011: "ST",
0b0100: "JSR",
0b0101: "AND",
0b0110: "LDR",
0b0111: "STR",
0b1000: "RTI",
0b1001: "NOT",
0b1010: "LDI",
0b1011: "STI",
0b1100: "JMP",
0b1101: "SHIFT",
0b1110: "LEA",
0b1111: "TRAP",
}
def __init__(self, kernel=None):
# Functions for interpreting instructions:
self.kernel = kernel
self.char_buffer = []
self.breakpoints = {}
self.dump_mode = "dis"
self.apply = {
0b0000: self.BR,
0b0001: self.ADD,
0b0010: self.LD,
0b0011: self.ST,
0b0100: self.JSR,
0b0101: self.AND,
0b0110: self.LDR,
0b0111: self.STR,
0b1000: self.RTI,
0b1001: self.NOT,
0b1010: self.LDI,
0b1011: self.STI,
0b1100: self.JMP, # and RET
0b1101: self.SHIFT,
0b1110: self.LEA,
0b1111: self.TRAP,
}
# Functions for formatting instructions:
self.format = {
0b0000: self.BR_format,
0b0001: self.ADD_format,
0b0010: self.LD_format,
0b0011: self.ST_format,
0b0100: self.JSR_format,
0b0101: self.AND_format,
0b0110: self.LDR_format,
0b0111: self.STR_format,
0b1000: self.RTI_format,
0b1001: self.NOT_format,
0b1010: self.LDI_format,
0b1011: self.STI_format,
0b1100: self.JMP_format, # and RET_format
0b1101: self.SHIFT_format,
0b1110: self.LEA_format,
0b1111: self.TRAP_format,
}
self.initialize()
#### The following allow different hardware implementations:
#### memory, register, nzp, and pc can be implemented in different
#### means.
def initialize(self, runit=False):
self.filename = ""
self.debug = False
self.meta = False
self.warn = True
self.noop_error = True
self.source = {}
self.cycle = 0
self.orig = 0x3000
self.line_count = 1
self.set_pc(HEX(0x3000))
self.cont = True
self.suspended = False
self.instruction_count = 0
self.immediate_mask = {}
for im in self.immediate:
self.immediate_mask[im] = (1 << self.immediate[im]) - 1
self.instructions = self.instruction_info.keys()
self.regs = dict(('R%1i' % r, r) for r in range(8))
self.labels = {}
self.label_location = {}
self.register = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0}
self.reset_memory(runit=runit) # assembles OS
self.reset_registers()
def reset_memory(self, filename=None, runit=False):
text = get_os()
debug = self.debug
self.debug = self.meta
self.memory = array('i', [0] * (1 << 16))
self.breakpoints = {}
# We reset these items here and below because of
# bug (related to hack in interpreter?)
self.assemble(text)
self.debug = debug
if runit:
self.set_pc(0x0200)
self.run()
self.source = {}
self.labels = {}
self.label_location = {}
self.set_pc(0x3000)
self.orig = HEX(0x3000)
self.line_count = 1
def reset_registers(self):
debug = self.debug
self.debug = self.meta
for i in range(8):
self.set_register(i, 0)
self.set_nzp(0)
self.debug = debug
def set_nzp(self, value):
self.nzp = (int(value & (1 << 15) > 0),
int(value == 0),
int((value & (1 << 15) == 0) and value != 0))
if self.debug:
self.Print(" NZP <=", self.get_nzp())
def get_nzp(self, register=None):
if register is not None:
value = self.nzp[register]
else:
value = self.nzp
if self.meta:
if register:
self.Print(" %s => %s" % ("NZP"[register], lc_hex(value)))
else:
self.Print(" NZP => %s" % (lc_hex(value), ))
return value
def get_pc(self):
return self.pc
def set_pc(self, value):
self.pc = HEX(value)
if self.debug:
self.Print(" PC <= %s" % lc_hex(value))
def increment_pc(self, value=1):
self.set_pc(self.get_pc() + value)
def get_register(self, position):
value = self.register[position]
if self.meta:
self.Print(" R%d => %s" % (position, lc_hex(value)))
return value
def set_register(self, position, value):
self.register[position] = value
if self.debug:
self.Print(" R%d <= %s" % (position, lc_hex(value)))
def set_instruction(self, location, n, line):
"""
Put n into memory[location]; also checks to to make sure represented
correctly.
"""
self.set_memory(location, lc_bin(n))
def get_memory(self, location):
value = self.memory[location]
if self.meta:
self.Print(" memory[%s] => %s" % (lc_hex(location), lc_hex(value)))
return value
def set_memory(self, location, value):
self.memory[location] = value
if self.debug:
self.Print(" memory[%s] <= %s" % (lc_hex(location), lc_hex(value)))
def memory_tofile(self, start, stop, f):
self.memory[start:stop].tofile(f)
def memory_byteswap(self):
self.memory.byteswap()
#### End of overridden methods
def make_label(self, label):
return label.replace(":", "").upper()
def in_range(self, n, bits):
"""
Is n in range? -2**bits <= n < 2**bits
"""
return -(1 << (bits-1)) <= n < (1 << (bits-1))
def get_mem_str(self, loc):
return 'x{0:04X}: {1:016b} {1:04x} '.format(loc, self.get_memory(loc))
#def reg(self, s, n=1):
# return self.registers[s.rstrip(', ')] << self.reg_pos[n]
def undefined(self, data):
raise ValueError('Undefined Instruction: "%s"' % data)
def valid_label(self, word):
if word[0] == 'x' and word[1].isdigit():
return False
return (word[0].isalpha() and
all(c.isalpha() or c.isdigit() or c in ['_', ':'] for c in word))
def bitwise_and(self, value, mask):
if value >= 0:
if (value & ~mask) and self.warn:
self.Error("Warning: Possible overflow of immediate: %s at line %s\n" % (value, self.source.get(self.get_pc(), "unknown")))
else:
if (-value & ~(mask >> 1)) and self.warn:
self.Error("Warning: Possible overflow of immediate: %s at line %s\n" % (value, self.source.get(self.get_pc(), "unknown")))
return (value & mask)
def get_immediate(self, word, mask=0xFFFF):
if (word.startswith('x') and
all(n in '-0123456789abcdefgABCDEF' for n in word[1:])):
if word[1] == "-":
raw = (-int('0x' + word[2:], 0))
return self.bitwise_and(raw, mask)
else:
raw = int('0' + word, 0)
return self.bitwise_and(raw, mask)
elif word.startswith('#'):
if word[1] == "-":
raw = -int(word[2:])
return self.bitwise_and(raw, mask)
else:
raw = int(word[1:])
return self.bitwise_and(raw, mask)
else:
try:
if word[0] == "-":
raw = -int(word[1:])
return self.bitwise_and(raw, mask)
else:
raw = int(word)
return self.bitwise_and(raw, mask)
except ValueError:
# could be a label
return
def set_assembly_mode(self, mode):
# clear out spare instructions
for key in list(self.instruction_info.keys()):
if (self.instruction_info[key] >> 12) == 0b1101:
del self.instruction_info[key]
for key in list(self.immediate.keys()):
if (self.immediate[key] >> 12) == 0b1101:
del self.immediate[key]
# Add new instructions
## SHIFT DST, SRC, immed6
## TERMINAL SRC, 0/1=clear
if mode == "SHIFT":
# assembler:
self.instruction_info["SHIFT"] = 0b1101 << 12
self.immediate["SHIFT"] = 6
# interpreter:
self.cycles[0b1101] = 5 + 1
self.apply[0b1101] = self.SHIFT
self.format[0b1101] = self.SHIFT_format
elif mode == "TERMINAL":
# assembler:
self.instruction_info["TERMINAL"] = 0b1101 << 12
self.immediate["TERMINAL"] = 8
# interpreter:
self.cycles[0b1101] = 10 + 1
self.apply[0b1101] = self.TERMINAL
self.format[0b1101] = self.TERMINAL_format
elif mode == "GRAPHICS":
# assembler:
self.instruction_info["CLEAR"] = (0b1101 << 12) + (0b010 << 3)
self.instruction_info["GETCUR"] = (0b1101 << 12) + (0b100 << 3)
self.instruction_info["SETCUR"] = (0b1101 << 12) + (0b101 << 3)
self.instruction_info["POKE"] = (0b1101 << 12) + (0b001 << 3)
self.instruction_info["PEEK"] = (0b1101 << 12) + (0b000 << 3)
self.immediate["SCREEN"] = 0
self.immediate["CLEAR"] = 0
self.immediate["GETCUR"] = 0
self.immediate["SETCUR"] = 0
self.immediate["POKE"] = 0
self.immediate["PEEK"] = 0
# interpreter:
self.cycles[0b1101] = 5 + 1
self.apply[0b1101] = self.SCREEN
self.format[0b1101] = self.SCREEN_format
else:
raise ValueError("Invalid .SET MODE, '%s'. Use 'GRAPHICS', 'SHIFT', or 'TERMINAL'" % mode)
self.instructions = self.instruction_info.keys()
self.immediate_mask = {}
for im in self.immediate:
self.immediate_mask[im] = (1 << self.immediate[im]) - 1
def process_instruction(self, words, line_count, line):
"""
Process ready split words from line and parse the line use
put to show the instruction line without label values
"""
self.source[self.get_pc()] = line_count
found = ''
alltogether = "".join(words)
alltogether1 = "".join(words[1:])
if not words or words[0].startswith(';'):
return
elif is_bin(alltogether):
## Allow:
## 0001 000 000 0 00000
## 0001000000000000
inst = eval("0b" + alltogether)
self.set_instruction(self.get_pc(), inst, line_count)
self.increment_pc()
self.dump_mode = "dump"
return
elif len(words) > 1 and is_bin(alltogether1) and not self.is_keyword(words[0]):
## Allow:
## LABEL 0001000000000000
self.labels[self.make_label(words[0])] = self.get_pc()
inst = eval("0b" + alltogether1)
self.set_instruction(self.get_pc(), inst, line_count)
self.increment_pc()
self.dump_mode = "dump"
return
elif len(words) == 1 and is_hex(words[0]):
## Allow:
## x10F4
inst = eval("0" + words[0])
self.set_instruction(self.get_pc(), inst, line_count)
self.increment_pc()
self.dump_mode = "dump"
return
elif len(words) == 2 and is_hex(words[1]) and not self.is_keyword(words[0]):
## Allow:
## LABEL x2045
self.labels[self.make_label(words[0])] = self.get_pc()
inst = eval("0" + words[1])
self.set_instruction(self.get_pc(), inst, line_count)
self.increment_pc()
self.dump_mode = "dump"
return
elif '.FILL' in words:
word = words[words.index('.FILL') + 1]
try:
self.set_instruction(self.get_pc(), int(word), line_count)
except ValueError:
value = self.get_immediate(word)
if value is None:
label = self.make_label(word)
if label in self.label_location:
self.label_location[label].append([self.get_pc(), 0xFFFF, -1])
else:
self.label_location[label] = [[self.get_pc(), 0xFFFF, -1]]
else:
self.set_memory(self.get_pc(), lc_bin(value))
if words[0] != '.FILL':
self.labels[self.make_label(words[0])] = self.get_pc()
self.increment_pc()
return
elif '.ORIG' in [word.upper() for word in words]:
self.set_pc(int('0' + words[1]
if words[1].startswith('x')
else words[1], 0))
self.orig = self.get_pc()
self.breakpoints = {}
self.line_count = 0
self.dump_mode = "dis"
self.reset_registers()
return
elif '.STRINGZ' in words:
if self.valid_label(words[0]):
self.labels[self.make_label(words[0])] = self.get_pc()
#else:
# no label... could be a block of .STRINGZ
# raise ValueError('No label for .STRINGZ in line for PC = %s: %s, line #%s' % (lc_hex(self.get_pc()), line, line_count))
s = line.split('"')
string1 = string = s[1]
# rejoin if " inside quotes
for st in s[2:]:
if string.endswith('\\'):
string += '"' + st
# encode backslash to get special characters
backslash = False
for c in string:
if not backslash:
if c == '\\':
if not backslash:
backslash = True
continue
m = ord(c)
else:
if c in 'nblr':
m = ord(c) - 100
else:
# easiest to implement:
# anything else escaped is itself (unlike Python)
m = ord(c)
backslash = False
self.set_instruction(self.get_pc(), m, line_count)
self.increment_pc()
self.set_instruction(self.get_pc(), 0, line_count)
self.increment_pc()
return
elif '.STRINGC' in words:
if self.valid_label(words[0]):
self.labels[self.make_label(words[0])] = self.get_pc()
#else:
# no label... could be a block of .STRINGZ
# raise ValueError('No label for .STRINGZ in line for PC = %s: %s, line #%s' % (lc_hex(self.get_pc()), line, line_count))
s = line.split('"')
string1 = string = s[1]
# rejoin if " inside quotes
for st in s[2:]:
if string.endswith('\\'):
string += '"' + st
# encode backslash to get special characters
backslash = False
count = 1
last_m = None
for c in string:
if not backslash:
if c == '\\':
if not backslash:
backslash = True
continue
m = ord(c)
else:
if c in 'nblr':
m = ord(c) - 100
else:
# easiest to implement:
# anything else escaped is itself (unlike Python)
m = ord(c)
backslash = False
if count % 2 == 0:
self.set_instruction(self.get_pc(), m << 8 | last_m, line_count)
self.increment_pc()
else:
last_m = m
count += 1
if count % 2 == 0:
self.set_instruction(self.get_pc(), last_m, line_count)
self.increment_pc()
self.set_instruction(self.get_pc(), 0, line_count)
self.increment_pc()
return
elif '.BLKW' in words:
self.labels[self.make_label(words[0])] = self.get_pc()
value = self.get_immediate(words[-1])
if value is None or value <= 0:
raise ValueError('Bad .BLKW immediate: "%s", %r' % (words[-1], value))
self.increment_pc(value)
return
elif '.SET' == words[0]:
if words[1] == "MODE":
self.set_assembly_mode(words[2])
return
# -------------------------------------------------------------
self.dump_mode = "dis"
ind = -1
if words[0].startswith('BR'):
ind = 0
elif words[1:] and words[1].startswith('BR'):
ind = 1
if ind >= 0 and len(words[ind]) <= 5:
if all(c in self.flags for c in words[ind][2:].lower()):
fl = 0
# BR means BRnzp
if words[ind] == 'BR':
words[ind] = 'BRnzp'
for f in words[ind][2:].lower():
fl |= self.flags[f]
words[ind] = 'BR'
if words[0].upper() in self.instructions:
found = words[0].upper()
else:
if self.valid_label(words[0]):
self.labels[self.make_label(words[0])] = self.get_pc()
else:
raise ValueError('Invalid label "%s" in source line "%s", line #: %s' % (words[0], line, line_count))
if len(words) < 2:
return
found = words[1] if words[1] in self.instructions else ''
if not found:
word = words[0]
if len(words) > 1:
raise ValueError('Not an instruction: "%s"' % line)
else:
if self.valid_label(word):
if self.make_label(word) in self.label_location:
# FIXME: ? is this same as .FILL?
self.label_location[self.make_label(word)].append([self.get_pc(), 0xFFFF, 16])
else:
self.label_location[self.make_label(word)] = [[self.get_pc(), 0xFFFF, 16]]
else:
raise ValueError('Invalid label: "%r", line: %s' % (word, line))
return
try:
instruction = self.instruction_info[found]
except KeyError:
raise ValueError('Unknown: instruction "%s"' % found)
else:
if found == 'BR':
instruction |= fl
r = rc = 0
rc += found == 'JMPT'
if found in self.special_reg_pos:
reg_pos = self.special_reg_pos[found]
else:
reg_pos = self.reg_pos
for word in words[1:]:
word = word.rstrip(',')
if word in self.regs:
if found == "JMP":
t = self.regs[word] << 6
else:
t = self.regs[word] << reg_pos[rc]
r |= t
rc += 1
else:
value = self.get_immediate(word, self.immediate_mask[found])
if value is not None:
instruction |= value
# set the immediate bit in ADD and AND instruction:
if found in ('ADD', 'AND'):
instruction |= 1 << 5
elif word != found:
if self.valid_label(word):
if self.make_label(word) in self.label_location:
self.label_location[self.make_label(word)].append([self.get_pc(), self.immediate_mask[found], self.immediate[found]])
else:
self.label_location[self.make_label(word)] = [[self.get_pc(), self.immediate_mask[found], self.immediate[found]]]
else:
raise ValueError('Invalid label: "%r", line: %s' % (word, line))
instruction |= r
if found == 'JMPT':
break
self.set_instruction(self.get_pc(), instruction, line_count)
self.increment_pc()
def is_keyword(self, s):
return (s in self.instruction_info.keys() or
s.startswith(".") or
s in ["GETC", "OUT", "PUTS", "IN", "PUTSP", "HALT"])
def assemble(self, code):
self.source = {}
self.labels = {}
self.label_location = {}
# processing the lines
# first pass:
for line in code.splitlines():
# remove comments
## FIXME: can't do like this! Need a real parser:
orig_line, line = line, line.split(';')[0]
# add space after comma to make sure registers are space separated also (not with strings)
if '"' not in line:
line = line.replace(',', ', ')
# drop comments
words = (line.split()) if ';' in line else line.split()
if '.END' in words:
break
self.process_instruction(words, self.line_count, line)
self.line_count += 1
# second pass:
for label, value in self.label_location.items():
if label not in self.labels:
raise ValueError('Bad label: "%s"' % label)
else:
for ref, mask, bits in value:
current = self.labels[label] - ref - 1
# kludge for absolute addresses,
# but seems correct for some code (lc3os.asm)
if self.get_memory(ref) == 0: # not instruction -> absolute
self.set_memory(ref, self.labels[label])
elif bits != -1 and not self.in_range(current, bits) :
raise ValueError(("Not an instruction: \"%s\", mask %s, offset %s, %s, ref %s" %
(label,
bin(mask),
self.labels[label] - ref,
bin(self.labels[label]),
lc_hex(ref))))
else:
# FIXME:
# Sets memory to value of label
# ref, mask, bits: [x4000, 511, 9], [x4000, FFFF, -1]
# where label was used in instruction
# requires init memory first
if bits == -1:
self.set_memory(ref, self.labels[label])
else:
self.set_memory(ref,
plus(self.get_memory(ref),
lc_bin(mask & current)))
def handleDebug(self, lineno):
pass
def Print(self, *args, end="\n"):
print(*args, end=end)
def Error(self, string):
if self.kernel:
self.kernel.Error(string)
else:
sys.stderr.write(string)
def run(self, reset=True):
if reset:
self.cycle = 0
self.instruction_count = 0
self.set_memory(0xFE04, 0xFFFF) ## OS_DSR Display Ready
self.set_memory(0xFE00, 0xFFFF) ## OS_KBSR Keyboard Ready
self.cont = True
self.suspended = False
if self.debug:
self.Print("Tracing Script! PC* is incremented Program Counter")
self.Print("(Instr/Cycles Count) INSTR [source line] (PC*: xHEX)")
self.Print("----------------------------------------------------")
while self.cont:
self.step()
def step(self):
if self.debug:
self.Print("=" * 60)
self.Print("Stepping... => read, <= write, (Instructions/Cycles):")
self.Print("=" * 60)
pc = self.get_pc()
self.handleDebug(self.source.get(pc, -1))
instruction = self.get_memory(pc)
instr = (instruction >> 12) & 0xF
self.instruction_count += 1
self.cycle += self.cycles[instr]
self.increment_pc()
if self.debug:
line = self.source.get(pc, -1)
line_str = (" [%s]" % line) if (line != -1) else ""
self.Print("(%s/%s) %s%s (%s*: %s)" % (
self.instruction_count,
self.cycle,
self.format[instr](instruction, pc),
line_str,
lc_hex(self.get_pc()),
lc_hex(instruction)))
#if (instr in self.apply):
self.apply[instr](instruction)
if self.pc in self.breakpoints:
self.cont = False
self.suspended = True
self.Print("...breakpoint hit at", lc_hex(self.pc))
def dump_registers(self):
self.Print()
self.Print("=" * 60)
self.Print("Registers:")
self.Print("=" * 60)
self.Print("PC:", lc_hex(self.get_pc()))
for r,v in zip("NZP", self.get_nzp()):
self.Print("%s: %s" % (r,v), end=" ")
self.Print()
count = 1
for key in range(8):
self.Print("R%d: %s" % (key, lc_hex(self.get_register(key))), end=" ")
if count % 4 == 0:
self.Print()
count += 1
def dump(self, orig_start=None, orig_stop=None, raw=False, header=True):
if orig_start is None:
start = self.orig
else:
start = orig_start
if orig_stop is None:
stop = max(self.source.keys()) + 1
else:
stop = orig_stop + 1
if stop <= start:
stop = start + 10
if stop - start > 100:
stop = start + 100
if raw or self.dump_mode == "dump":
if header:
self.Print("=" * 60)
self.Print("Memory dump:")
self.Print("=" * 60)
for x in range(start, stop):
self.Print("%-10s %s: %s" % ("", lc_hex(x), lc_hex(self.get_memory(x))))
else:
if header:
self.Print("=" * 60)
self.Print("Memory disassembled:")
self.Print("=" * 60)
for memory in range(start, stop):
instruction = self.get_memory(memory)
instr = (instruction >> 12) & 0xF
label = self.lookup(memory, "")
if label:
label = label + ":"
instr_str = self.source.get(memory, "")
if instr_str:
self.Print("%-10s %s: %s %-41s [line: %s]" % (
label, lc_hex(memory), lc_hex(instruction),
self.format[instr](instruction, memory), instr_str))
else:
if instruction == 0:
ascii = "\\0"
else:
ascii = "%s %s" % (instruction, ascii_str(instruction))
self.Print("%-10s %s: %s - %s" % (
label, lc_hex(memory), lc_hex(instruction), ascii))
def disassemble(self):
start = min(self.source.keys())
stop = max(self.source.keys()) + 1
self.Print(".ORIG %s " % lc_hex(start))
for memory in range(start, stop):
instruction = self.get_memory(memory)
instr = (instruction >> 12) & 0xF
label = self.lookup(memory, "")
if label:
label = label + ":"
self.Print("%-10s %s" % (label, self.format[instr](instruction, memory)))
self.Print(".END")
def lookup(self, location, default=None):
for label in self.labels:
if self.labels[label] == location:
return label
if default is None:
return location
else:
return default
def STR(self, instruction):
src = (instruction & 0b0000111000000000) >> 9
base = (instruction & 0b0000000111000000) >> 6
offset6 = instruction & 0b0000000000111111
self.set_memory(plus(self.get_register(base), sext(offset6, 6)),
self.get_register(src))
def STR_format(self, instruction, location):
src = (instruction & 0b0000111000000000) >> 9
base = (instruction & 0b0000000111000000) >> 6
offset6 = instruction & 0b0000000000111111
return "STR R%d, R%d, %s" % (src, base, offset6)
def RTI(self, instruction):
if (self.psr & 0b1000000000000000):
raise ValueError("priviledge mode exception")
else:
self.set_pc(self.get_memory(self.get_register(6))) # R6 is the SSP
self.set_register(6, lc_bin(plus(self.get_register(6), 1)))
temp = self.get_memory(self.get_register(6))
self.set_register(6, lc_bin(plus(self.get_register(6), 1)))
self.psr = temp
def RTI_format(self, instruction, location):
return "RTI"
def NOT(self, instruction):
dst = (instruction & 0b0000111000000000) >> 9
src = (instruction & 0b0000000111000000) >> 6
self.set_register(dst, lc_bin(~self.get_register(src)))
self.set_nzp(self.get_register(dst))
def NOT_format(self, instruction, location):
dst = (instruction & 0b0000111000000000) >> 9
src = (instruction & 0b0000000111000000) >> 6
return "NOT R%d, R%d" % (dst, src)
def LDI(self, instruction):
dst = (instruction & 0b0000111000000000) >> 9
pc_offset9 = instruction & 0b0000000111111111
location = plus(self.get_pc(), sext(pc_offset9, 9))
memory1 = self.get_memory(location)
memory2 = self.get_memory(memory1)
if self.debug:
self.Print(" Reading memory[x%04x] (x%04x) =>" % (location, memory1))
self.Print(" Reading memory[x%04x] (x%04x) =>" % (memory1, memory2))
self.set_register(dst, memory2)
self.set_nzp(self.get_register(dst))
def LDI_format(self, instruction, location):
dst = (instruction & 0b0000111000000000) >> 9
pc_offset9 = instruction & 0b0000000111111111
return "LDI R%d, %s" % (dst, lc_hex(self.lookup(plus(sext(pc_offset9,9), location) + 1)))
def STI(self, instruction):
src = (instruction & 0b0000111000000000) >> 9
pc_offset9 = instruction & 0b0000000111111111
memory = self.get_memory(plus(self.get_pc(), sext(pc_offset9,9)))
self.set_memory(memory, self.get_register(src))
## Hook up, side effect display:
if memory == 0xFE06: ## OS_DDR
try:
self.Print(chr(self.get_register(src)), end="")
except:
raise ValueError("value in R%d (%s) is not in range 0-255 (x00-xFF)" % (src, lc_hex(self.get_register(src))))
def STI_format(self, instruction, location):
dst = (instruction & 0b0000111000000000) >> 9
pc_offset9 = instruction & 0b0000000111111111
return "STI R%d, %s" % (dst, lc_hex(self.lookup(plus(sext(pc_offset9,9), location) + 1)))
def RESERVED(self, instruction):
raise ValueError("attempt to execute reserved instruction")
def RESERVED_format(self, instruction, location):
return ";; RESERVED %s %s" % (lc_hex((instruction >> 12) & 0xF),
lc_hex(instruction & 0b0000111111111111))
def LEA(self, instruction):
dst = (instruction & 0b0000111000000000) >> 9
pc_offset9 = instruction & 0b0000000111111111
self.set_register(dst, lc_bin(plus(self.get_pc(), sext(pc_offset9,9))))
self.set_nzp(self.get_register(dst))
def LEA_format(self, instruction, location):
dst = (instruction & 0b0000111000000000) >> 9
pc_offset9 = instruction & 0b0000000111111111
return "LEA R%d, %s" % (dst, lc_hex(self.lookup(plus(sext(pc_offset9,9), location) + 1)))
def getc(self):
### No prompt for input:
if len(self.char_buffer) == 0:
data = self.kernel.raw_input()
data = data.replace("\\n", "\n")
if len(data) == 0:
self.char_buffer = [0] # end of string
elif len(data) == 1:
self.char_buffer = [ord(char) for char in data] # single char mode
else:
self.char_buffer = [ord(char) for char in data] + [0]
return self.char_buffer.pop(0)
def TRAP(self, instruction):
vector = instruction & 0b0000000011111111
self.set_register(7, self.get_pc())
self.set_pc(self.get_memory(vector))
if vector == 0x20:
self.set_memory(0xFE02, self.getc())
elif vector == 0x21:
pass
elif vector == 0x22: # PUTS
pass
elif vector == 0x23:
pass
elif vector == 0x24: # PUTSP
pass
elif vector == 0x25:
self.cont = False
else:
raise ValueError("invalid TRAP vector: %s" % lc_hex(vector))
def TRAP_format(self, instruction, location):
vector = instruction & 0b0000000011111111
if vector == 0x20:
return "GETC"
elif vector == 0x21:
return "OUT"
elif vector == 0x22:
return "PUTS"
elif vector == 0x23:
return "IN"
elif vector == 0x24:
return "PUTSP"
elif vector == 0x25:
return "HALT"
else:
return ";; Invalid TRAP vector: %s" % lc_hex(vector)
def BR(self, instruction):
n = instruction & 0b0000100000000000
z = instruction & 0b0000010000000000
p = instruction & 0b0000001000000000
pc_offset9 = instruction & 0b0000000111111111
if (not any([n, z, p])):
if self.noop_error:
raise Exception("Attempting to execute NOOP at %s\n" % lc_hex(self.get_pc() - 1))
elif self.warn:
self.Error("Attempting to execute NOOP at %s\n" % lc_hex(self.get_pc() - 1))
if (n and self.get_nzp(0) or
z and self.get_nzp(1) or
p and self.get_nzp(2)):
self.set_pc(plus(self.get_pc(), sext(pc_offset9,9)))
if self.debug:
self.Print(" True - branching to", lc_hex(self.get_pc()))
else:
if self.debug:
self.Print(" False - continuing...")
def BR_format(self, instruction, location):
n = instruction & 0b0000100000000000
z = instruction & 0b0000010000000000
p = instruction & 0b0000001000000000
pc_offset9 = instruction & 0b0000000111111111
instr = "BR"
if n:
instr += "n"
if z:
instr += "z"
if p:
instr += "p"
val = self.lookup(plus(sext(pc_offset9,9), location) + 1)
ascii = ascii_str(pc_offset9)
if not (n or z or p):
return "NOOP - (no BR to %s) %s" % (lc_hex(val), ascii)
else:
return "%s %s %s" % (instr, lc_hex(val), ascii)
def LD(self, instruction):
dst = (instruction & 0b0000111000000000) >> 9
pc_offset9 = instruction & 0b0000000111111111
location = plus(self.get_pc(), sext(pc_offset9,9))
memory = self.get_memory(location)
if self.debug:
self.Print(" Reading memory[x%04x] (x%04x) =>" % (location, memory))
self.set_register(dst, memory)
self.set_nzp(self.get_register(dst))
def LD_format(self, instruction, location):
dst = (instruction & 0b0000111000000000) >> 9
pc_offset9 = instruction & 0b0000000111111111
return "LD R%d, %s" % (dst, lc_hex(self.lookup(plus(sext(pc_offset9,9), location) + 1)))
def LDR(self, instruction):
dst = (instruction & 0b0000111000000000) >> 9
base = (instruction & 0b0000000111000000) >> 6
offset6 = instruction & 0b0000000000111111
location = plus(self.get_register(base), sext(offset6,6))
memory = self.get_memory(location)
if self.debug:
self.Print(" Reading memory[x%04x] (x%04x) =>" % (location, memory))
self.set_register(dst, memory)
self.set_nzp(self.get_register(dst))
def LDR_format(self, instruction, location):
dst = (instruction & 0b0000111000000000) >> 9
base = (instruction & 0b0000000111000000) >> 6
offset6 = instruction & 0b0000000000111111
return "LDR R%d, R%d, %s" % (dst, base, offset6)
def ST(self, instruction):
src = (instruction & 0b0000111000000000) >> 9
pc_offset9 = instruction & 0b0000000111111111
self.set_memory(plus(self.get_pc(), sext(pc_offset9,9)), self.get_register(src))
def ST_format(self, instruction, location):
src = (instruction & 0b0000111000000000) >> 9
pc_offset9 = instruction & 0b0000000111111111
return "ST R%d, %s" % (src, lc_hex(self.lookup(plus(sext(pc_offset9,9), location) + 1)))
def JMP(self, instruction):
base = (instruction & 0b0000000111000000) >> 6
self.set_pc(self.get_register(base))
def JMP_format(self, instruction, location):
base = (instruction & 0b0000000111000000) >> 6
if base == 7:
return "RET"
else:
return "JMP R%d" % base
def JSR(self, instruction):
temp = self.get_pc()
if (instruction & 0b0000100000000000): # JSR
pc_offset11 = instruction & 0b0000011111111111
self.set_pc(plus(self.get_pc(), sext(pc_offset11,11)))
else: # JSRR
base = (instruction & 0b0000000111000000) >> 6
self.set_pc(self.get_register(base))
self.set_register(7, temp)
def JSR_format(self, instruction, location):
if (instruction & 0b0000100000000000): # JSR
pc_offset11 = instruction & 0b0000011111111111
return "JSR %s" % lc_hex(self.lookup(plus(sext(pc_offset11,11), location) + 1))
else: # JSRR
base = (instruction & 0b0000000111000000) >> 6
return "JSRR R%d" % base
def ADD(self, instruction):
dst = (instruction & 0b0000111000000000) >> 9
sr1 = (instruction & 0b0000000111000000) >> 6
if (instruction & 0b0000000000100000) == 0:
sr2 = instruction & 0b0000000000000111
self.set_register(dst, lc_bin(plus(self.get_register(sr1),
self.get_register(sr2))))
else:
imm5 = instruction & 0b0000000000011111
self.set_register(dst, lc_bin(plus(self.get_register(sr1), sext(imm5, 5))))
self.set_nzp(self.get_register(dst))
def ADD_format(self, instruction, location):
dst = (instruction & 0b0000111000000000) >> 9
sr1 = (instruction & 0b0000000111000000) >> 6
if (instruction & 0b0000000000100000):
imm5 = instruction & 0b0000000000011111
return "ADD R%d, R%d, #%s" % (dst, sr1, lc_int(sext(imm5, 5)))
else:
sr2 = instruction & 0b0000000000000111
return "ADD R%d, R%d, R%d" % (dst, sr1, sr2)
def AND(self, instruction):
dst = (instruction & 0b0000111000000000) >> 9
sr1 = (instruction & 0b0000000111000000) >> 6
if (instruction & 0b0000000000100000) == 0:
sr2 = instruction & 0b0000000000000111
self.set_register(dst, self.get_register(sr1) & self.get_register(sr2))
else:
imm5 = instruction & 0b0000000000011111
self.set_register(dst, self.get_register(sr1) & sext(imm5, 5))
self.set_nzp(self.get_register(dst))
def AND_format(self, instruction, location):
dst = (instruction & 0b0000111000000000) >> 9
sr1 = (instruction & 0b0000000111000000) >> 6
if (instruction & 0b0000000000100000):
imm5 = instruction & 0b0000000000011111
return "AND R%d, R%d, #%s" % (dst, sr1, lc_int(sext(imm5, 5)))
else:
sr2 = instruction & 0b0000000000000111
return "AND R%d, R%d, R%d" % (dst, sr1, sr2)
def SHIFT(self, instruction):
## SHIFT DST, SRC, immed6
dst = (instruction & 0b0000111000000000) >> 9
src = (instruction & 0b0000000111000000) >> 6
imm6 = lc_int(sext(instruction & 0b0000000000111111,6))
if imm6 < 0: # arithmetic shift right preserves sign
value = sext(self.get_register(src) >> -imm6, 16 + imm6)
self.set_register(dst, value)
else:
self.set_register(dst, self.get_register(src) << imm6)
self.set_nzp(self.get_register(dst))
def TERMINAL(self, instruction):
## TERMINAL SRC, 1
src = (instruction & 0b0000111000000000) >> 9
clear = (instruction & 0b0000000000000001)
string = ""
location = self.get_register(src)
memory = self.get_memory(location)
while memory != 0:
string += chr(memory & 0b0000000011111111)
if memory & 0b1111111100000000:
string += chr((memory & 0b1111111100000000) >> 8)
location += 1
memory = self.get_memory(location)
self.kernel.Display(HTML("<pre>" + string + "</pre>"), clear_output=clear)
def TERMINAL_format(self, instruction):
## TERMINAL SRC, 1
src = (instruction & 0b0000111000000000) >> 9
clear = (instruction & 0b0000000100000000)
return "TERMINAL R%d, %d" % (src, clear)
def SHIFT_format(self, instruction, location):
## SHIFT DST, SRC, DIR, immed4
dst = (instruction & 0b0000111000000000) >> 9
src = (instruction & 0b0000000111000000) >> 6
imm6 = instruction & 0b0000000000111111
return "SHIFT R%d, R%d, #%s" % (dst, src, lc_int(sext(imm6, 6)))
def screen_set_cursor(self, x, y):
pass
def screen_get_cursor(self):
return 0,0
def screen_clear(self):
pass
def screen_poke(self, x, y, value):
pass
def screen_peek(self, x, y):
return 0
def SCREEN(self, instruction):
## SCREEN
if (instruction & 0b010 << 3): # CLEAR
#CLEAR: SCREEN ...,...,010,...
self.screen_clear()
elif (instruction & 0b100 << 3): # cursor
#GETCUR: SCREEN Rx , Ry,100,...
#SETCUR: SCREEN Rx , Ry,101,...
if (instruction & 0b001 << 3): # setcur
rx = (instruction & 0b0000111000000000) >> 9
ry = (instruction & 0b0000000111000000) >> 6
self.screen_set_cursor(self.get_register(rx), self.get_register(ry))
else:
rx = (instruction & 0b0000111000000000) >> 9
ry = (instruction & 0b0000000111000000) >> 6
x, y = self.screen_get_cursor()
self.set_register(rx, x)
self.set_register(ry, y)
else: # peek/poke
#POKE: SCREEN Rx , Ry,001, RSRC
#PEEK: SCREEN Rx , Ry,000, RDST
if (instruction & 0b001 << 3): # poke
rx = (instruction & 0b0000111000000000) >> 9
ry = (instruction & 0b0000000111000000) >> 6
src = (instruction & 0b0000000000000111)
self.screen_poke(self.get_register(rx), self.get_register(ry), self.get_register(src))
else:
rx = (instruction & 0b0000111000000000) >> 9
ry = (instruction & 0b0000000111000000) >> 6
dst = (instruction & 0b0000000000000111)
self.set_register(dst, self.screen_peek(self.get_register(rx), self.get_register(ry)))
def SCREEN_format(self, instruction, location):
## SCREEN
if (instruction & 0b010 << 3): # CLEAR
#CLEAR: SCREEN ...,...,010,...
return "CLEAR"
elif (instruction & 0b100 << 3): # cursor
#GETCUR: SCREEN Rx , Ry,100,...
#SETCUR: SCREEN Rx , Ry,101,...
if (instruction & 0b001 << 3): # setcur
rx = (instruction & 0b0000111000000000) >> 9
ry = (instruction & 0b0000000111000000) >> 6
return "SETCUR R%d, R%d" % (rx, ry)
else:
rx = (instruction & 0b0000111000000000) >> 9
ry = (instruction & 0b0000000111000000) >> 6
return "GETCUR R%d, R%d" % (rx, ry)
else: # peek/poke
#POKE: SCREEN Rx , Ry,001, RSRC
#PEEK: SCREEN Rx , Ry,000, RDST
if (instruction & 0b001 << 3): # poke
rx = (instruction & 0b0000111000000000) >> 9
ry = (instruction & 0b0000000111000000) >> 6
src = (instruction & 0b0000000000000111)
return "POKE R%d, R%d, R%d" % (rx, ry, src)
else:
rx = (instruction & 0b0000111000000000) >> 9
ry = (instruction & 0b0000000111000000) >> 6
dst = (instruction & 0b0000000000000111)
return "PEEK R%d, R%d, R%d" % (rx, ry, dst)
def load(self, filename):
self.filename = filename
fp = open(filename)
text = "".join(fp.readlines())
fp.close()
return text
def execute_file(self, filename):
text = self.load(filename)
self.assemble(text)
self.set_pc(self.orig)
self.cycle = 0
self.instruction_count = 0
self.run()
if self.suspended:
self.Print("=" * 60)
self.Print("Computation SUSPENDED")
self.Print("=" * 60)
else:
self.Print("=" * 60)
self.Print("Computation completed")
self.Print("=" * 60)
self.Print("Instructions:", self.instruction_count)
self.Print("Cycles: %s (%f milliseconds)" %
(self.cycle, self.cycle * 1./2000000))
self.dump_registers()
def save(self, base):
# producing output
# symbol list for Simulators
with open(base + '.sym', 'w') as f:
self.Print('''//Symbol Name Page Address
//---------------- ------------
//''', end='\t', file=f)
self.Print('\n//\t'.join('\t%-20s%4x' % (name, value)
for name, value in self.labels.items()), file=f)
with open(base + '.bin', 'w') as f:
self.Print('{0:016b}'.format(self.orig), file=f) # orig address
self.Print('\n'.join('{0:016b}'.format(self.get_memory(m)) for m in range(self.orig, self.get_pc())),
file=f)
# object file for running in Simulator
with open(base + '.obj', 'wb') as f:
#do slice from right after code and write
#(byteorder of 0 does not matter)
self.set_memory(self.get_pc(), self.orig)
self.memory_byteswap()
self.memory_tofile(self.get_pc(), self.get_pc() + 1, f)
self.memory_tofile(self.orig,self.get_pc(), f)
self.memory_byteswap()
def execute(self, text):
words = [word.strip() for word in text.split()]
if words[0].startswith("%"):
if words[0] == "%dump":
try:
self.dump(*[int("0" + word, 16) for word in words[1:]], raw=True)
except:
self.Error("Error; did you load code first?")
return True
elif words[0] == "%regs":
try:
self.dump_registers()
except:
self.Error("Error; did you load code first?")
return True
elif words[0] == "%dis":
try:
self.dump(*[int("0" + word, 16) for word in words[1:]])
except:
self.Error("Error; did you run load first?")
return True
elif words[0] == "%d":
self.debug = not self.debug
self.Print("Debug is now %s" % ["off", "on"][int(self.debug)])
return True
elif words[0] == "%noop":
self.noop_error = not self.noop_error
self.Print("NOOP is now %s" % ["a warning", "an error"][int(self.noop_error)])
return True
elif words[0] == "%pc":
self.cycle = 0
self.instruction_count = 0
self.set_pc(int("0" + words[1], 16))
self.dump_registers()
return True
elif words[0] == "%labels":
print("Label", "Location")
for key in self.labels:
print(key + ":", hex(self.labels[key]))
print(self.label_location)
return True
elif words[0] == "%mem":
location = int("0" + words[1], 16)
self.set_memory(location, int("0" + words[2], 16))
self.dump(location, location)
return True
elif words[0] == "%reg":
self.set_register(int(words[1]), int("0" + words[2], 16))
self.dump_registers()
return True
elif words[0] == "%warn":
self.warn = bool(int(words[1]))
return True
elif words[0] == "%reset":
self.initialize(runit=True)
self.dump_registers()
return True
elif words[0] == "%step":
orig_debug = self.debug
self.debug = True
if self.get_pc() in self.source:
lineno = self.source[self.get_pc()]
## show trace
self.step()
self.debug = orig_debug
self.dump_registers()
return True
elif words[0] == "%bp":
if len(words) > 1:
if words[1] == "clear":
self.breakpoints = {}
self.Print("All breakpoints cleared")
return
location = int("0" + words[1], 16)
self.breakpoints[location] = True
if self.breakpoints:
count = 1
self.Print("=" * 60)
self.Print("Breakpoints")
self.Print("=" * 60)
for memory in sorted(self.breakpoints.keys()):
self.Print(" %d) " % count, end="")
self.dump(memory, memory, header=False)
count += 1
else:
self.Print(" No breakpoints set")
return True
elif words[0] == "%exe" or words[0] == "%cont":
ok = False
try:
# if .orig in code, then run, otherwise just assemble:
self.debug = False
if words[0] == "%exe":
self.char_buffer = []
self.cycle = 0
self.instruction_count = 0
self.set_pc(self.orig)
self.reset_registers()
self.run()
else:
self.run(reset=False)
if self.suspended:
self.Print("=" * 60)
self.Print("Computation SUSPENDED")
self.Print("=" * 60)
else:
self.Print("=" * 60)
self.Print("Computation completed")
self.Print("=" * 60)
self.Print("Instructions:", self.instruction_count)
self.Print("Cycles: %s (%f milliseconds)" %
(self.cycle, self.cycle * 1./2000000))
self.dump_registers()
ok = True
except Exception as exc:
if self.get_pc() - 1 in self.source:
self.Error("\nRuntime error:\n line %s:\n%s" %
(self.source[self.get_pc() - 1], str(exc)))
else:
self.Error("\nRuntime error:\n memory %s\n%s" %
(lc_hex(self.get_pc() - 1), str(exc)))
ok = False
return ok
else:
self.Error("Invalid Interactive Magic Directive\nHint: %help")
return False
else:
### Else, must be code to assemble:
self.labels = {}
self.label_location = {}
ok = False
try:
self.assemble(text)
self.Print("Assembled! Use %dis or %dump to examine; use %exe to run.")
#self.dump()
#self.dump_registers()
ok = True
except Exception as exc:
if self.get_pc() - 1 in self.source:
self.Error("\nAssemble error\n line %s\n" %
self.source[self.get_pc()])
else:
self.Error("\nAssemble error\n memory %s\n" %
lc_hex(self.get_pc() - 1))
self.Error(str(exc) + "\n")
ok = False
return ok
def get_os():
return """
;##############################################################################
;#
;# lc3os.asm -- the LC-3 operating system
;#
;# "Copyright (c) 2003 by Steven S. Lumetta."
;#
;# Permission to use, copy, modify, and distribute this software and its
;# documentation for any purpose, without fee, and without written
;# agreement is hereby granted, provided that the above copyright notice
;# and the following two paragraphs appear in all copies of this software,
;# that the files COPYING and NO_WARRANTY are included verbatim with
;# any distribution, and that the contents of the file README are included
;# verbatim as part of a file named README with any distribution.
;#
;# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT,
;# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
;# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE AUTHOR
;# HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;#
;# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
;# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
;# A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS"
;# BASIS, AND THE AUTHOR NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT,
;# UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
;#
;# Author: Steve Lumetta
;# Version: 1
;# Creation Date: 18 October 2003
;# Filename: lc3os.asm
;# History:
;# SSL 1 18 October 2003
;# Copyright notices and Gnu Public License marker added.
;#
;##############################################################################
.ORIG x0000
; the TRAP vector table
.FILL BAD_TRAP ; x00
.FILL BAD_TRAP ; x01
.FILL BAD_TRAP ; x02
.FILL BAD_TRAP ; x03
.FILL BAD_TRAP ; x04
.FILL BAD_TRAP ; x05
.FILL BAD_TRAP ; x06
.FILL BAD_TRAP ; x07
.FILL BAD_TRAP ; x08
.FILL BAD_TRAP ; x09
.FILL BAD_TRAP ; x0A
.FILL BAD_TRAP ; x0B
.FILL BAD_TRAP ; x0C
.FILL BAD_TRAP ; x0D
.FILL BAD_TRAP ; x0E
.FILL BAD_TRAP ; x0F
.FILL BAD_TRAP ; x10
.FILL BAD_TRAP ; x11
.FILL BAD_TRAP ; x12
.FILL BAD_TRAP ; x13
.FILL BAD_TRAP ; x14
.FILL BAD_TRAP ; x15
.FILL BAD_TRAP ; x16
.FILL BAD_TRAP ; x17
.FILL BAD_TRAP ; x18
.FILL BAD_TRAP ; x19
.FILL BAD_TRAP ; x1A
.FILL BAD_TRAP ; x1B
.FILL BAD_TRAP ; x1C
.FILL BAD_TRAP ; x1D
.FILL BAD_TRAP ; x1E
.FILL BAD_TRAP ; x1F
.FILL TRAP_GETC ; x20
.FILL TRAP_OUT ; x21
.FILL TRAP_PUTS ; x22
.FILL TRAP_IN ; x23
.FILL TRAP_PUTSP ; x24
.FILL TRAP_HALT ; x25
.FILL BAD_TRAP ; x26
.FILL BAD_TRAP ; x27
.FILL BAD_TRAP ; x28
.FILL BAD_TRAP ; x29
.FILL BAD_TRAP ; x2A
.FILL BAD_TRAP ; x2B
.FILL BAD_TRAP ; x2C
.FILL BAD_TRAP ; x2D
.FILL BAD_TRAP ; x2E
.FILL BAD_TRAP ; x2F
.FILL BAD_TRAP ; x30
.FILL BAD_TRAP ; x31
.FILL BAD_TRAP ; x32
.FILL BAD_TRAP ; x33
.FILL BAD_TRAP ; x34
.FILL BAD_TRAP ; x35
.FILL BAD_TRAP ; x36
.FILL BAD_TRAP ; x37
.FILL BAD_TRAP ; x38
.FILL BAD_TRAP ; x39
.FILL BAD_TRAP ; x3A
.FILL BAD_TRAP ; x3B
.FILL BAD_TRAP ; x3C
.FILL BAD_TRAP ; x3D
.FILL BAD_TRAP ; x3E
.FILL BAD_TRAP ; x3F
.FILL BAD_TRAP ; x40
.FILL BAD_TRAP ; x41
.FILL BAD_TRAP ; x42
.FILL BAD_TRAP ; x43
.FILL BAD_TRAP ; x44
.FILL BAD_TRAP ; x45
.FILL BAD_TRAP ; x46
.FILL BAD_TRAP ; x47
.FILL BAD_TRAP ; x48
.FILL BAD_TRAP ; x49
.FILL BAD_TRAP ; x4A
.FILL BAD_TRAP ; x4B
.FILL BAD_TRAP ; x4C
.FILL BAD_TRAP ; x4D
.FILL BAD_TRAP ; x4E
.FILL BAD_TRAP ; x4F
.FILL BAD_TRAP ; x50
.FILL BAD_TRAP ; x51
.FILL BAD_TRAP ; x52
.FILL BAD_TRAP ; x53
.FILL BAD_TRAP ; x54
.FILL BAD_TRAP ; x55
.FILL BAD_TRAP ; x56
.FILL BAD_TRAP ; x57
.FILL BAD_TRAP ; x58
.FILL BAD_TRAP ; x59
.FILL BAD_TRAP ; x5A
.FILL BAD_TRAP ; x5B
.FILL BAD_TRAP ; x5C
.FILL BAD_TRAP ; x5D
.FILL BAD_TRAP ; x5E
.FILL BAD_TRAP ; x5F
.FILL BAD_TRAP ; x60
.FILL BAD_TRAP ; x61
.FILL BAD_TRAP ; x62
.FILL BAD_TRAP ; x63
.FILL BAD_TRAP ; x64
.FILL BAD_TRAP ; x65
.FILL BAD_TRAP ; x66
.FILL BAD_TRAP ; x67
.FILL BAD_TRAP ; x68
.FILL BAD_TRAP ; x69
.FILL BAD_TRAP ; x6A
.FILL BAD_TRAP ; x6B
.FILL BAD_TRAP ; x6C
.FILL BAD_TRAP ; x6D
.FILL BAD_TRAP ; x6E
.FILL BAD_TRAP ; x6F
.FILL BAD_TRAP ; x70
.FILL BAD_TRAP ; x71
.FILL BAD_TRAP ; x72
.FILL BAD_TRAP ; x73
.FILL BAD_TRAP ; x74
.FILL BAD_TRAP ; x75
.FILL BAD_TRAP ; x76
.FILL BAD_TRAP ; x77
.FILL BAD_TRAP ; x78
.FILL BAD_TRAP ; x79
.FILL BAD_TRAP ; x7A
.FILL BAD_TRAP ; x7B
.FILL BAD_TRAP ; x7C
.FILL BAD_TRAP ; x7D
.FILL BAD_TRAP ; x7E
.FILL BAD_TRAP ; x7F
.FILL BAD_TRAP ; x80
.FILL BAD_TRAP ; x81
.FILL BAD_TRAP ; x82
.FILL BAD_TRAP ; x83
.FILL BAD_TRAP ; x84
.FILL BAD_TRAP ; x85
.FILL BAD_TRAP ; x86
.FILL BAD_TRAP ; x87
.FILL BAD_TRAP ; x88
.FILL BAD_TRAP ; x89
.FILL BAD_TRAP ; x8A
.FILL BAD_TRAP ; x8B
.FILL BAD_TRAP ; x8C
.FILL BAD_TRAP ; x8D
.FILL BAD_TRAP ; x8E
.FILL BAD_TRAP ; x8F
.FILL BAD_TRAP ; x90
.FILL BAD_TRAP ; x91
.FILL BAD_TRAP ; x92
.FILL BAD_TRAP ; x93
.FILL BAD_TRAP ; x94
.FILL BAD_TRAP ; x95
.FILL BAD_TRAP ; x96
.FILL BAD_TRAP ; x97
.FILL BAD_TRAP ; x98
.FILL BAD_TRAP ; x99
.FILL BAD_TRAP ; x9A
.FILL BAD_TRAP ; x9B
.FILL BAD_TRAP ; x9C
.FILL BAD_TRAP ; x9D
.FILL BAD_TRAP ; x9E
.FILL BAD_TRAP ; x9F
.FILL BAD_TRAP ; xA0
.FILL BAD_TRAP ; xA1
.FILL BAD_TRAP ; xA2
.FILL BAD_TRAP ; xA3
.FILL BAD_TRAP ; xA4
.FILL BAD_TRAP ; xA5
.FILL BAD_TRAP ; xA6
.FILL BAD_TRAP ; xA7
.FILL BAD_TRAP ; xA8
.FILL BAD_TRAP ; xA9
.FILL BAD_TRAP ; xAA
.FILL BAD_TRAP ; xAB
.FILL BAD_TRAP ; xAC
.FILL BAD_TRAP ; xAD
.FILL BAD_TRAP ; xAE
.FILL BAD_TRAP ; xAF
.FILL BAD_TRAP ; xB0
.FILL BAD_TRAP ; xB1
.FILL BAD_TRAP ; xB2
.FILL BAD_TRAP ; xB3
.FILL BAD_TRAP ; xB4
.FILL BAD_TRAP ; xB5
.FILL BAD_TRAP ; xB6
.FILL BAD_TRAP ; xB7
.FILL BAD_TRAP ; xB8
.FILL BAD_TRAP ; xB9
.FILL BAD_TRAP ; xBA
.FILL BAD_TRAP ; xBB
.FILL BAD_TRAP ; xBC
.FILL BAD_TRAP ; xBD
.FILL BAD_TRAP ; xBE
.FILL BAD_TRAP ; xBF
.FILL BAD_TRAP ; xC0
.FILL BAD_TRAP ; xC1
.FILL BAD_TRAP ; xC2
.FILL BAD_TRAP ; xC3
.FILL BAD_TRAP ; xC4
.FILL BAD_TRAP ; xC5
.FILL BAD_TRAP ; xC6
.FILL BAD_TRAP ; xC7
.FILL BAD_TRAP ; xC8
.FILL BAD_TRAP ; xC9
.FILL BAD_TRAP ; xCA
.FILL BAD_TRAP ; xCB
.FILL BAD_TRAP ; xCC
.FILL BAD_TRAP ; xCD
.FILL BAD_TRAP ; xCE
.FILL BAD_TRAP ; xCF
.FILL BAD_TRAP ; xD0
.FILL BAD_TRAP ; xD1
.FILL BAD_TRAP ; xD2
.FILL BAD_TRAP ; xD3
.FILL BAD_TRAP ; xD4
.FILL BAD_TRAP ; xD5
.FILL BAD_TRAP ; xD6
.FILL BAD_TRAP ; xD7
.FILL BAD_TRAP ; xD8
.FILL BAD_TRAP ; xD9
.FILL BAD_TRAP ; xDA
.FILL BAD_TRAP ; xDB
.FILL BAD_TRAP ; xDC
.FILL BAD_TRAP ; xDD
.FILL BAD_TRAP ; xDE
.FILL BAD_TRAP ; xDF
.FILL BAD_TRAP ; xE0
.FILL BAD_TRAP ; xE1
.FILL BAD_TRAP ; xE2
.FILL BAD_TRAP ; xE3
.FILL BAD_TRAP ; xE4
.FILL BAD_TRAP ; xE5
.FILL BAD_TRAP ; xE6
.FILL BAD_TRAP ; xE7
.FILL BAD_TRAP ; xE8
.FILL BAD_TRAP ; xE9
.FILL BAD_TRAP ; xEA
.FILL BAD_TRAP ; xEB
.FILL BAD_TRAP ; xEC
.FILL BAD_TRAP ; xED
.FILL BAD_TRAP ; xEE
.FILL BAD_TRAP ; xEF
.FILL BAD_TRAP ; xF0
.FILL BAD_TRAP ; xF1
.FILL BAD_TRAP ; xF2
.FILL BAD_TRAP ; xF3
.FILL BAD_TRAP ; xF4
.FILL BAD_TRAP ; xF5
.FILL BAD_TRAP ; xF6
.FILL BAD_TRAP ; xF7
.FILL BAD_TRAP ; xF8
.FILL BAD_TRAP ; xF9
.FILL BAD_TRAP ; xFA
.FILL BAD_TRAP ; xFB
.FILL BAD_TRAP ; xFC
.FILL BAD_TRAP ; xFD
.FILL BAD_TRAP ; xFE
.FILL BAD_TRAP ; xFF
; the interrupt vector table
.FILL INT_PRIV ; x00
.FILL INT_ILL ; x01
.FILL BAD_INT ; x02
.FILL BAD_INT ; x03
.FILL BAD_INT ; x04
.FILL BAD_INT ; x05
.FILL BAD_INT ; x06
.FILL BAD_INT ; x07
.FILL BAD_INT ; x08
.FILL BAD_INT ; x09
.FILL BAD_INT ; x0A
.FILL BAD_INT ; x0B
.FILL BAD_INT ; x0C
.FILL BAD_INT ; x0D
.FILL BAD_INT ; x0E
.FILL BAD_INT ; x0F
.FILL BAD_INT ; x10
.FILL BAD_INT ; x11
.FILL BAD_INT ; x12
.FILL BAD_INT ; x13
.FILL BAD_INT ; x14
.FILL BAD_INT ; x15
.FILL BAD_INT ; x16
.FILL BAD_INT ; x17
.FILL BAD_INT ; x18
.FILL BAD_INT ; x19
.FILL BAD_INT ; x1A
.FILL BAD_INT ; x1B
.FILL BAD_INT ; x1C
.FILL BAD_INT ; x1D
.FILL BAD_INT ; x1E
.FILL BAD_INT ; x1F
.FILL BAD_INT ; x20
.FILL BAD_INT ; x21
.FILL BAD_INT ; x22
.FILL BAD_INT ; x23
.FILL BAD_INT ; x24
.FILL BAD_INT ; x25
.FILL BAD_INT ; x26
.FILL BAD_INT ; x27
.FILL BAD_INT ; x28
.FILL BAD_INT ; x29
.FILL BAD_INT ; x2A
.FILL BAD_INT ; x2B
.FILL BAD_INT ; x2C
.FILL BAD_INT ; x2D
.FILL BAD_INT ; x2E
.FILL BAD_INT ; x2F
.FILL BAD_INT ; x30
.FILL BAD_INT ; x31
.FILL BAD_INT ; x32
.FILL BAD_INT ; x33
.FILL BAD_INT ; x34
.FILL BAD_INT ; x35
.FILL BAD_INT ; x36
.FILL BAD_INT ; x37
.FILL BAD_INT ; x38
.FILL BAD_INT ; x39
.FILL BAD_INT ; x3A
.FILL BAD_INT ; x3B
.FILL BAD_INT ; x3C
.FILL BAD_INT ; x3D
.FILL BAD_INT ; x3E
.FILL BAD_INT ; x3F
.FILL BAD_INT ; x40
.FILL BAD_INT ; x41
.FILL BAD_INT ; x42
.FILL BAD_INT ; x43
.FILL BAD_INT ; x44
.FILL BAD_INT ; x45
.FILL BAD_INT ; x46
.FILL BAD_INT ; x47
.FILL BAD_INT ; x48
.FILL BAD_INT ; x49
.FILL BAD_INT ; x4A
.FILL BAD_INT ; x4B
.FILL BAD_INT ; x4C
.FILL BAD_INT ; x4D
.FILL BAD_INT ; x4E
.FILL BAD_INT ; x4F
.FILL BAD_INT ; x50
.FILL BAD_INT ; x51
.FILL BAD_INT ; x52
.FILL BAD_INT ; x53
.FILL BAD_INT ; x54
.FILL BAD_INT ; x55
.FILL BAD_INT ; x56
.FILL BAD_INT ; x57
.FILL BAD_INT ; x58
.FILL BAD_INT ; x59
.FILL BAD_INT ; x5A
.FILL BAD_INT ; x5B
.FILL BAD_INT ; x5C
.FILL BAD_INT ; x5D
.FILL BAD_INT ; x5E
.FILL BAD_INT ; x5F
.FILL BAD_INT ; x60
.FILL BAD_INT ; x61
.FILL BAD_INT ; x62
.FILL BAD_INT ; x63
.FILL BAD_INT ; x64
.FILL BAD_INT ; x65
.FILL BAD_INT ; x66
.FILL BAD_INT ; x67
.FILL BAD_INT ; x68
.FILL BAD_INT ; x69
.FILL BAD_INT ; x6A
.FILL BAD_INT ; x6B
.FILL BAD_INT ; x6C
.FILL BAD_INT ; x6D
.FILL BAD_INT ; x6E
.FILL BAD_INT ; x6F
.FILL BAD_INT ; x70
.FILL BAD_INT ; x71
.FILL BAD_INT ; x72
.FILL BAD_INT ; x73
.FILL BAD_INT ; x74
.FILL BAD_INT ; x75
.FILL BAD_INT ; x76
.FILL BAD_INT ; x77
.FILL BAD_INT ; x78
.FILL BAD_INT ; x79
.FILL BAD_INT ; x7A
.FILL BAD_INT ; x7B
.FILL BAD_INT ; x7C
.FILL BAD_INT ; x7D
.FILL BAD_INT ; x7E
.FILL BAD_INT ; x7F
.FILL BAD_INT ; x80
.FILL BAD_INT ; x81
.FILL BAD_INT ; x82
.FILL BAD_INT ; x83
.FILL BAD_INT ; x84
.FILL BAD_INT ; x85
.FILL BAD_INT ; x86
.FILL BAD_INT ; x87
.FILL BAD_INT ; x88
.FILL BAD_INT ; x89
.FILL BAD_INT ; x8A
.FILL BAD_INT ; x8B
.FILL BAD_INT ; x8C
.FILL BAD_INT ; x8D
.FILL BAD_INT ; x8E
.FILL BAD_INT ; x8F
.FILL BAD_INT ; x90
.FILL BAD_INT ; x91
.FILL BAD_INT ; x92
.FILL BAD_INT ; x93
.FILL BAD_INT ; x94
.FILL BAD_INT ; x95
.FILL BAD_INT ; x96
.FILL BAD_INT ; x97
.FILL BAD_INT ; x98
.FILL BAD_INT ; x99
.FILL BAD_INT ; x9A
.FILL BAD_INT ; x9B
.FILL BAD_INT ; x9C
.FILL BAD_INT ; x9D
.FILL BAD_INT ; x9E
.FILL BAD_INT ; x9F
.FILL BAD_INT ; xA0
.FILL BAD_INT ; xA1
.FILL BAD_INT ; xA2
.FILL BAD_INT ; xA3
.FILL BAD_INT ; xA4
.FILL BAD_INT ; xA5
.FILL BAD_INT ; xA6
.FILL BAD_INT ; xA7
.FILL BAD_INT ; xA8
.FILL BAD_INT ; xA9
.FILL BAD_INT ; xAA
.FILL BAD_INT ; xAB
.FILL BAD_INT ; xAC
.FILL BAD_INT ; xAD
.FILL BAD_INT ; xAE
.FILL BAD_INT ; xAF
.FILL BAD_INT ; xB0
.FILL BAD_INT ; xB1
.FILL BAD_INT ; xB2
.FILL BAD_INT ; xB3
.FILL BAD_INT ; xB4
.FILL BAD_INT ; xB5
.FILL BAD_INT ; xB6
.FILL BAD_INT ; xB7
.FILL BAD_INT ; xB8
.FILL BAD_INT ; xB9
.FILL BAD_INT ; xBA
.FILL BAD_INT ; xBB
.FILL BAD_INT ; xBC
.FILL BAD_INT ; xBD
.FILL BAD_INT ; xBE
.FILL BAD_INT ; xBF
.FILL BAD_INT ; xC0
.FILL BAD_INT ; xC1
.FILL BAD_INT ; xC2
.FILL BAD_INT ; xC3
.FILL BAD_INT ; xC4
.FILL BAD_INT ; xC5
.FILL BAD_INT ; xC6
.FILL BAD_INT ; xC7
.FILL BAD_INT ; xC8
.FILL BAD_INT ; xC9
.FILL BAD_INT ; xCA
.FILL BAD_INT ; xCB
.FILL BAD_INT ; xCC
.FILL BAD_INT ; xCD
.FILL BAD_INT ; xCE
.FILL BAD_INT ; xCF
.FILL BAD_INT ; xD0
.FILL BAD_INT ; xD1
.FILL BAD_INT ; xD2
.FILL BAD_INT ; xD3
.FILL BAD_INT ; xD4
.FILL BAD_INT ; xD5
.FILL BAD_INT ; xD6
.FILL BAD_INT ; xD7
.FILL BAD_INT ; xD8
.FILL BAD_INT ; xD9
.FILL BAD_INT ; xDA
.FILL BAD_INT ; xDB
.FILL BAD_INT ; xDC
.FILL BAD_INT ; xDD
.FILL BAD_INT ; xDE
.FILL BAD_INT ; xDF
.FILL BAD_INT ; xE0
.FILL BAD_INT ; xE1
.FILL BAD_INT ; xE2
.FILL BAD_INT ; xE3
.FILL BAD_INT ; xE4
.FILL BAD_INT ; xE5
.FILL BAD_INT ; xE6
.FILL BAD_INT ; xE7
.FILL BAD_INT ; xE8
.FILL BAD_INT ; xE9
.FILL BAD_INT ; xEA
.FILL BAD_INT ; xEB
.FILL BAD_INT ; xEC
.FILL BAD_INT ; xED
.FILL BAD_INT ; xEE
.FILL BAD_INT ; xEF
.FILL BAD_INT ; xF0
.FILL BAD_INT ; xF1
.FILL BAD_INT ; xF2
.FILL BAD_INT ; xF3
.FILL BAD_INT ; xF4
.FILL BAD_INT ; xF5
.FILL BAD_INT ; xF6
.FILL BAD_INT ; xF7
.FILL BAD_INT ; xF8
.FILL BAD_INT ; xF9
.FILL BAD_INT ; xFA
.FILL BAD_INT ; xFB
.FILL BAD_INT ; xFC
.FILL BAD_INT ; xFD
.FILL BAD_INT ; xFE
.FILL BAD_INT ; xFF
OS_START ; machine starts executing at x0200
LEA R0,OS_START_MSG ; print a welcome message
PUTS
HALT
OS_START_MSG .STRINGZ "\\nWelcome to the LC-3 simulator.\\n\\nThe contents of the LC-3 tools distribution, including sources, management\\ntools, and data, are Copyright (c) 2003 Steven S. Lumetta.\\n\\nThe LC-3 tools distribution is free software covered by the GNU General\\nPublic License, and you are welcome to modify it and/or distribute copies\\nof it under certain conditions. The file COPYING (distributed with the\\ntools) specifies those conditions. There is absolutely no warranty for\\nthe LC-3 tools distribution, as described in the file NO_WARRANTY (also\\ndistributed with the tools).\\n\\nHave fun.\\n"
OS_KBSR .FILL xFE00
OS_KBDR .FILL xFE02
OS_DSR .FILL xFE04
OS_DDR .FILL xFE06
OS_MCR .FILL xFFFE
MASK_HI .FILL x7FFF
LOW_8_BITS .FILL x00FF
TOUT_R1 .BLKW 1
TIN_R7 .BLKW 1
OS_R0 .BLKW 1
OS_R1 .BLKW 1
OS_R2 .BLKW 1
OS_R3 .BLKW 1
OS_R7 .BLKW 1
TRAP_GETC
LDI R0,OS_KBSR ; wait for a keystroke
BRzp TRAP_GETC
LDI R0,OS_KBDR ; read it and return
RET
TRAP_OUT
ST R1,TOUT_R1 ; save R1
TRAP_OUT_WAIT
LDI R1,OS_DSR ; wait for the display to be ready
BRzp TRAP_OUT_WAIT
STI R0,OS_DDR ; write the character and return
LD R1,TOUT_R1 ; restore R1
RET
TRAP_PUTS
ST R0,OS_R0 ; save R0, R1, and R7
ST R1,OS_R1
ST R7,OS_R7
ADD R1,R0,#0 ; move string pointer (R0) into R1
TRAP_PUTS_LOOP
LDR R0,R1,#0 ; write characters in string using OUT
BRz TRAP_PUTS_DONE
OUT
ADD R1,R1,#1
BRnzp TRAP_PUTS_LOOP
TRAP_PUTS_DONE
LD R0,OS_R0 ; restore R0, R1, and R7
LD R1,OS_R1
LD R7,OS_R7
RET
TRAP_IN
ST R7,TIN_R7 ; save R7 (no need to save R0, since we
; overwrite later
LEA R0,TRAP_IN_MSG ; prompt for input
PUTS
GETC ; read a character
OUT ; echo back to monitor
ST R0,OS_R0 ; save the character
AND R0,R0,#0 ; write a linefeed, too
ADD R0,R0,#10
OUT
LD R0,OS_R0 ; restore the character
LD R7,TIN_R7 ; restore R7
RET
TRAP_PUTSP
; NOTE: This trap will end when it sees any NUL, even in
; packed form, despite the P&P second edition's requirement
; of a double NUL.
ST R0,OS_R0 ; save R0, R1, R2, R3, and R7
ST R1,OS_R1
ST R2,OS_R2
ST R3,OS_R3
ST R7,OS_R7
ADD R1,R0,#0 ; move string pointer (R0) into R1
TRAP_PUTSP_LOOP
LDR R2,R1,#0 ; read the next two characters
LD R0,LOW_8_BITS ; use mask to get low byte
AND R0,R0,R2 ; if low byte is NUL, quit printing
BRz TRAP_PUTSP_DONE
OUT ; otherwise print the low byte
AND R0,R0,#0 ; shift high byte into R0
ADD R3,R0,#8
TRAP_PUTSP_S_LOOP
ADD R0,R0,R0 ; shift R0 left
ADD R2,R2,#0 ; move MSB from R2 into R0
BRzp TRAP_PUTSP_MSB_0
ADD R0,R0,#1
TRAP_PUTSP_MSB_0
ADD R2,R2,R2 ; shift R2 left
ADD R3,R3,#-1
BRp TRAP_PUTSP_S_LOOP
ADD R0,R0,#0 ; if high byte is NUL, quit printing
BRz TRAP_PUTSP_DONE
OUT ; otherwise print the low byte
ADD R1,R1,#1 ; and keep going
BRnzp TRAP_PUTSP_LOOP
TRAP_PUTSP_DONE
LD R0,OS_R0 ; restore R0, R1, R2, R3, and R7
LD R1,OS_R1
LD R2,OS_R2
LD R3,OS_R3
LD R7,OS_R7
RET
TRAP_HALT
; an infinite loop of lowering OS_MCR's MSB
LEA R0,TRAP_HALT_MSG ; give a warning
PUTS
LDI R0,OS_MCR ; halt the machine
LD R1,MASK_HI
AND R0,R0,R1
STI R0,OS_MCR
HALT ;; BRnzp TRAP_HALT ; HALT again...
BAD_TRAP
; print an error message, then HALT
LEA R0,BAD_TRAP_MSG ; give an error message
PUTS
BRnzp TRAP_HALT ; execute HALT
; interrupts aren't really defined, since privilege doesn't
; quite work
INT_PRIV RTI
INT_ILL RTI
BAD_INT RTI
TRAP_IN_MSG .STRINGZ "\\nInput a character> "
TRAP_HALT_MSG .STRINGZ "\\n\\n--- halting the LC-3 ---\\n\\n"
BAD_TRAP_MSG .STRINGZ "\\n\\n--- undefined trap executed ---\\n\\n"
.END
"""
| true |
c6850d346c0bbf1368263b8514100fb7f064e8c2 | Python | mjepronk/euler-python | /problem20.py | UTF-8 | 172 | 2.84375 | 3 | [] | no_license | # vim: sw=4:ts=4:et:ai
from math import factorial
def main(n=100):
return sum(int(d) for d in str(factorial(n)))
if __name__ == '':
print("Result: %i" % main())
| true |
80ab507197305726b100bd90acee6b8442d5f6a9 | Python | thorwhalen/ut | /ut/daf/struct.py | UTF-8 | 2,866 | 2.734375 | 3 | [
"MIT"
] | permissive | __author__ = 'thor'
import ut as ms
import pandas as pd
import ut.pcoll.order_conserving
from functools import reduce
class SquareMatrix(object):
def __init__(self, df, index_vars=None, sort=False):
if isinstance(df, SquareMatrix):
self = df.copy()
elif isinstance(df, pd.DataFrame):
self.df = df
self.index_vars = index_vars
self.value_vars = ms.pcoll.order_conserving.setdiff(
list(self.df.columns), self.index_vars
)
self.df = self.df[self.index_vars + self.value_vars]
else:
raise NotImplementedError("This case hasn't been implemented yet")
if sort:
self.df.sort(columns=self.index_vars, inplace=True)
def copy(self):
return SquareMatrix(df=self.df.copy(), index_vars=self.index_vars)
def transpose(self):
return SquareMatrix(
df=self.df, index_vars=[self.index_vars[1], self.index_vars[0]]
)
def reflexive_mapreduce(self, map_fun, reduce_fun=None, broadcast_functions=True):
df = self.df.merge(
self.df,
how='inner',
left_on=self.index_vars[1],
right_on=self.index_vars[0],
suffixes=('', '_y'),
)
df[self.index_vars[1]] = df[self.index_vars[1] + '_y']
df.drop(
labels=[self.index_vars[0] + '_y', self.index_vars[1] + '_y'],
axis=1,
inplace=True,
)
if not isinstance(map_fun, dict) and broadcast_functions:
map_fun = dict(list(zip(self.value_vars, [map_fun] * len(self.value_vars))))
for k, v in map_fun.items():
df[k] = v(df[k], df[k + '_y'])
df.drop(labels=[x + '_y' for x in self.value_vars], axis=1, inplace=True)
if not reduce_fun:
reduce_fun = dict()
for k, v in map_fun.items():
reduce_fun[k] = lambda x: reduce(v, x)
elif not isinstance(reduce_fun, dict) and broadcast_functions:
reduce_fun = dict(
list(zip(self.value_vars, [reduce_fun] * len(self.value_vars)))
)
df = df.groupby(self.index_vars).agg(reduce_fun).reset_index(drop=False)
return SquareMatrix(df=df, index_vars=self.index_vars)
def reverse_indices(self):
return [self.index_vars[1], self.index_vars[0]]
def sort(self, **kwargs):
kwargs = dict({'columns': self.index_vars}, **kwargs)
sm = self.copy()
sm.df = sm.df.sort(**kwargs)
return sm
def __str__(self):
return self.df.__str__()
def __repr__(self):
return self.df.set_index(self.index_vars).__str__()
def head(self, num_of_rows=5):
return self.df.head(num_of_rows)
def tail(self, num_of_rows=5):
return self.df.tail(num_of_rows)
| true |
699c0b3aa9af299ab971d6580ef6fcf35e449d7e | Python | Mniharbanu/guvi | /code kata/positive.py | UTF-8 | 112 | 3.25 | 3 | [] | no_license | aaa=int(input())
if(aaa>0):
print("Positive")
elif(aaa<0):
print("Negative")
else:
print("Zero")
| true |
26b51fac6d96d5bb239868b5dbce620eefc14b4d | Python | ZoranPandovski/al-go-rithms | /math/Matrix multiplication/python/matrixmul.py | UTF-8 | 1,521 | 4.1875 | 4 | [
"CC0-1.0"
] | permissive | """ SQUARE Matrix Multiplication
Matrix multiplication is a binary operation that produces a matrix from two matrices.
For matrix multiplication, the number of columns in the first matrix must be
equal to the number of rows in the second matrix.
The resulting matrix, known as the matrix product, has the number of rows of the
first and the number of columns of the second matrix.
Let A be a m x n matrix and B be a n x p matrix then the matrix C ,where
C =A.B
is defined as cij = Ai1.B1j + Ai2.B2j +...+ Ain.Bnj
"""
import numpy as np #Importing Numpy Library
n=int(input()) #no.of rows/columns in the matrices
A=[] #Array input from user one row at a time in the form of comma separated values.
for i in range(n):
a= [ ]
for num in input().split(','):
a.append(int(num))
A.append(a)
# Coversion of Array into n x n matrix for both Aand B
B=[]
for i in range(n):
b= [ ]
for num in input().split(','):
b.append(int(num))
B.append(b)
X=np.mat(A)
Y=np.mat(B)
Z=X*Y
z=np.array(Z)
from array import *
def array_list(z):
m = z.tolist() # list
print(*m ,sep=",")
for i in range(n):
array_list(z[i])
"""
TEST CASE 1:
>>>
2
1,2
3,4
5,6
7,8
OUTPUT:
19,22
43,50
TEST CASE 2:
>>>
3
1,2,3
4,5,6
7,8,9
10,11,12
13,14,15
16,17,18
OUTPUT:
84,90,96
201,216,231
318,342,366
"""
| true |
341707b940695b2069d93e4c3e3fa346159db254 | Python | astrozot/imks | /imks/units_mcerp.py | UTF-8 | 4,075 | 2.671875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import mcerp
import mcerp.umath as umath
import math
import uncertainties
from .units import Value
from . import units
def umathdoc(f):
"Decorator to copy the uncertainties.umath __doc__ string."
f.__doc__ = getattr(umath, f.__name__, {"__doc__": ""}).__doc__
return f
@umathdoc
def ceil(x):
if isinstance(x, Value):
return Value(umath.ceil(x.value), x.unit)
else:
return umath.ceil(x)
@umathdoc
def fabs(x):
if isinstance(x, Value):
return Value(umath.fabs(x.value), x.unit)
else:
return umath.fabs(x)
@umathdoc
def floor(x):
if isinstance(x, Value):
return Value(umath.floor(x.value), x.unit)
else:
return umath.floor(x)
@umathdoc
def hypot(x, y):
return sqrt(x*x + y*y)
@umathdoc
def pow(x, y):
return x**y
@umathdoc
def sqrt(x):
if isinstance(x, Value):
return Value(umath.sqrt(x.value), x.unit / 2)
else:
return umath.sqrt(x)
def fraction(q, p):
"""Given Python integers `(p, q)`, return the fraction p/q."""
if isinstance(q, Value) or isinstance(p, Value):
q1 = Value(q)
p1 = Value(p)
return Value(float(q1.value) / float(p1.value),
q1.unit - p1.unit)
else:
return Value(float(q) / float(p))
def mconvert(f):
"""Decorator for generic one-argument functions."""
g = lambda x: f(x.check_pure(f.__name__)) \
if isinstance(x, Value) else f(x)
g.__doc__ = f.__doc__
return g
def ufloat(s):
"""Convert a number in the format 12.2+/-0.3 into a Normal distribution."""
if s.find("+/-") >= 0 or s.find("(") >= 0 or s.find(u"±") >= 0:
u = uncertainties.ufloat_fromstr(s)
return mcerp.N(u.nominal_value, u.std_dev)
else:
return float(s)
def ufloat_repr(self):
if units.showerrors == 0:
return str(self.mean)
elif units.showerrors == 1:
u = uncertainties.ufloat(self.mean, math.sqrt(self.var))
s = str(u)
i = s.find(r"+/-")
if i >= 0:
return s[0:i]
else:
return s
else:
u = uncertainties.ufloat(self.mean, math.sqrt(self.var))
return str(u)
def ufloat_repr_latex(self):
s = "${" + ufloat_repr(self).replace("+/-", "} \pm {") + "}$"
return s.replace("e", r"} \times 10^{")
######################################################################
# Load and unload functions
def load(namespace):
"""Load all math defined functions, using when appropriate modified versions."""
globs = globals()
names = ["Beta", "Bradford", "Burr", "ChiSquared", "Chi2", "Erf",
"Erlang", "Exponential", "Exp", "ExtValueMax", "EVMax",
"ExtValueMin", "EVMin", "Fisher", "F", "Gamma", "LogNormal",
"LogN", "Normal", "N", "Pareto", "Pareto2", "PERT",
"StudentT", "Triangular", "Tri", "Uniform", "Weibull", "Weib",
"Bernoulli", "Bern", "Binomial", "B", "Geometric", "G",
"Hypergeometric", "H", "Poisson", "Pois"]
for name in names:
if name[0] != '_':
namespace[name] = globs.get(name, mconvert(getattr(mcerp, name)))
names = dir(umath)
for name in names:
if name[0] != '_':
namespace[name] = globs.get(name, mconvert(getattr(umath, name)))
mcerp.UncertainVariable.__repr__ = mcerp.UncertainFunction.__repr__ = \
ufloat_repr
mcerp.UncertainVariable.__str__ = mcerp.UncertainFunction.__str__ = \
ufloat_repr
mcerp.UncertainVariable._repr_latex_ = \
mcerp.UncertainFunction._repr_latex_ = ufloat_repr_latex
namespace["fraction"] = fraction
namespace["ufloat"] = ufloat
namespace["pi"] = math.pi
namespace["e"] = math.e
def unload(namespace):
"""Unload all math defined functions."""
names = dir(mcerp) + dir(umath) + ["fraction", "ufloat", "pi", "e"]
for name in names:
if name[0] != '_':
try:
del namespace[name]
except KeyError:
pass
| true |
74a58a86128dc0b4884dc38d002d2dcc34ab4e76 | Python | vdmklchv/simple_weather_retriever | /main.py | UTF-8 | 768 | 3.453125 | 3 | [] | no_license | import requests
import config
api_key = config.API_KEY
while True:
city = input("Enter a city. Enter quit to exit: ").lower()
if city == "quit":
break
try:
response_data = requests.get(f"https://api.openweathermap.org/data/2.5/weather?q={city}&appid={api_key}&units=metric").json()
if response_data['cod'] == '404':
print(response_data['message'])
else:
print(f"Weather in {city.capitalize()} - {response_data['sys']['country']} now:")
print(f"Current temperature is {response_data['main']['temp']:.0f} C")
print(f"Feels like {response_data['main']['feels_like']:.0f} C")
except TypeError:
print("Couldn't get weather data from remote server.")
print("Goodbye")
| true |
bdea9cf7dbe7c417d424012892e784b5a5a15c76 | Python | ParkJiSu28/Python_cote | /1419.py | UTF-8 | 224 | 3.203125 | 3 | [] | no_license | s = input()
tmp = list(s)
answer = 0
for i in range(len(tmp)):
if tmp[i] =='l':
if i+3 <len(tmp):
if tmp[i+1] =='o' and tmp[i+2] =='v' and tmp[i+3] =='e':
answer +=1
print(answer)
| true |
08659868f0e78ee296306397e563686d41f4dbb9 | Python | heiwushi/MyFlow | /myflow/optimizer.py | UTF-8 | 7,112 | 2.796875 | 3 | [] | no_license | import abc
import numpy as np
import functools
from myflow.ops import Tensor, Op, ones, zeros, add
from myflow.common import _GradientMode
from myflow.graph import Graph
class ApplyGradient(Op):
'''
用计算好的梯度更新Variable
'''
def __init__(self, compute_var_delta):
'''
:param compute_var_delta: 根据梯度计算Variable的更新值,由不同的优化器来做具体实现
'''
self.op_name = "ApplyGradient"
self.compute_var_delta = compute_var_delta
def __call__(self, vars_gradients: dict, tensor_name=""):
'''
将应用梯度这一操作作为一个计算图tensor节点返回,
该节点的输入节点为要训练的若干个Variable及其对应的梯度
:param vars_gradients:一个字典,key为要训练的Variable节点,value为Variable节点的梯度
:param tensor_name:
:return:
'''
new_tensor = super().__call__(tensor_name)
vars = list(vars_gradients.keys())
gradients = [vars_gradients[var] for var in vars]
new_tensor.input_tensors.extend(vars)
new_tensor.input_tensors.extend(gradients)
return new_tensor
def compute(self, tensor, input_vals):
'''
在Session.run()期间调用,对Variable节点的实际值进行更新
:param tensor:
:param input_vals:
:return:
'''
var_vals = input_vals[0:int(len(input_vals) / 2)]
gradient_vals = input_vals[int(len(input_vals) / 2):]
var_delta = self.compute_var_delta(gradient_vals)
for i, var in enumerate(tensor.input_tensors[0:int(len(input_vals) / 2)]):
var.value = np.add(var_vals[i], var_delta[i])
def gradient(self, tensor: Tensor, output_grad: Tensor):
return None
class Optimizer(abc.ABC):
'''
所有优化器的基类
优化器实现了三个图操作:
compute_gradient:给出梯度的计算图节点
apply_gradient:给出梯度更新的计算图节点
minimize:等价于先调用compute_gradient,再调用apply_gradient
所有派生于Optimizer的子类应该实现compute_var_delta方法。该方法决定不同优化器在计算时,如何根据梯度算出Variable节点的变化值
'''
def __init__(self):
self.apply_gradient = ApplyGradient(self.compute_var_delta)
@abc.abstractmethod
def compute_var_delta(self, grad_vals_list):
'''
所有派生于Optimizer的子类应该实现compute_var_delta方法。该方法决定不同优化器在计算时,如何根据梯度算出Variable节点的变化值
:param grad_vals_list: 计算出的梯度值list
:return:
'''
pass
def compute_gradient(self, loss: Tensor, var_list=None, cache_dict: dict = None, name=None):
'''
给出因变量loss关于自变量var_list的梯度的计算图
:param loss: 所求梯度的因变量节点
:param var_list: 所求梯度的自变量节点list
:param name:节点名
:return:
'''
# 这里要求必须是[]形状,即标量。实际上机器学习中的损失loss一般都是标量。
assert loss.shape == []
if var_list is None:
var_list = Graph.get_default_graph().TRAIN_VARS_COLLECTIONS
if cache_dict is None:
cache_dict = dict()
vars_gradients = {}
with _GradientMode():
for var in var_list:
result = cache_dict.get(var)
if result is None:
# 如果求的是关于自身的梯度,则直接返回一个形状与var一样、元素全为1的矩阵
if var == loss:
result = ones(var.shape)
else:
# 根据多元复合函数求导法则,loss关于var的导数,应该为loss对var的所有输出节点所在路径分别求导,之后求和
result = zeros(var.shape)
for output_n in var.out_tensors:
# 对于每条输出路径,先对输出节点求导
output_grad = self.compute_gradient(loss, [output_n], cache_dict)[output_n]
# 之后根据该节点的操作的gradient函数,计算该条路径对var的导数
order = output_n.input_tensors.index(var)
var_g = output_n.op.gradient(output_n, output_grad)[order]
# 与之前各条路径的结果累加
result = add(result, var_g)
cache_dict[var] = result
vars_gradients[var] = result
return vars_gradients
def minimize(self, loss, var_list=None):
'''
:param loss:所求梯度的因变量节点
:param var_list:所求梯度的自变量节点list
:return:
'''
assert loss.shape == []
vars_gradients = self.compute_gradient(loss, var_list)
return self.apply_gradient(vars_gradients)
class SGD(Optimizer):
'''
基本的随机梯度下降算法
'''
def __init__(self, learn_rate=0.001):
'''
:param learn_rate: 学习率
'''
super().__init__()
self.learn_rate = learn_rate
def compute_var_delta(self, grad_vals_list):
for i in range(len(grad_vals_list)):
grad_vals_list[i] = -self.learn_rate * grad_vals_list[i]
return grad_vals_list
class RMSProp(Optimizer):
'''
Root Mean Square Prop优化算法
动态调整每个参数的学习率,使其反比于该参数的历史偏导数平方值总和的平方根。
而不同于AdaGrad的是,历史偏导数平方值的总和会以decay的速率衰减
'''
def __init__(self, learn_rate=0.001, decay=0.9):
'''
:param learn_rate: 初始学习率
:param decay: 衰减率
'''
super().__init__()
self.learn_rate = learn_rate
self.decay = decay
self.r = 0
def compute_var_delta(self, grad_vals_list):
grad_vals_shape_list = list(map(lambda x: x.shape, grad_vals_list))
grad_vals_flatten_list = list(map(lambda x: x.flatten(), grad_vals_list))
grad_vals_concat = np.concatenate(grad_vals_flatten_list)
self.r = self.r * self.decay + (1 - self.decay) * grad_vals_concat * grad_vals_concat
var_delta_concat = - self.learn_rate * (1 / np.sqrt(self.r + 1e-8)) * grad_vals_concat
var_delta = []
tmp_pos = 0
for i in range(len(grad_vals_list)):
shape = grad_vals_shape_list[i]
length = functools.reduce(lambda x, y: x * y, shape)
var_delta.append(np.reshape(var_delta_concat[tmp_pos:tmp_pos + length], shape))
tmp_pos += length
return var_delta
class Adam(Optimizer):
def __init__(self):
super().__init__()
# TODO
def compute_var_delta(self, grad_vals_list):
# TODO
pass
| true |
4328d49dcbf8c4386078f9833526075cdb36dedb | Python | ideoforms/isobar | /tests/test_pattern_sequence.py | UTF-8 | 5,571 | 2.71875 | 3 | [
"MIT"
] | permissive | import pytest
import isobar as iso
def test_psequence_ints():
a = iso.PSequence([1, 2, 3], 1)
assert list(a) == [1, 2, 3]
def test_psequence_tuples():
a = iso.PSequence([(1, 2), (3, 4), (5, 6)], 2)
assert list(a) == [(1, 2), (3, 4), (5, 6), (1, 2), (3, 4), (5, 6)]
def test_psequence_keys():
a = iso.PSequence([iso.Key("C", "major"), iso.Key("F", "minor")], 1)
assert list(a) == [iso.Key("C", "major"), iso.Key("F", "minor")]
def test_pseries():
a = iso.PSeries(2, iso.PSequence([1, 2]), iso.PConstant(5))
assert list(a) == [2, 3, 5, 6, 8]
def test_prange():
a = iso.PRange(0, iso.PConstant(10), iso.PSequence([1, 2]))
assert list(a) == [0, 1, 3, 4, 6, 7, 9]
def test_pgeom():
a = iso.PGeom(1, iso.PSequence([1, 2]), 8)
assert list(a) == [1, 1, 2, 2, 4, 4, 8, 8]
def test_pimpulse():
a = iso.PImpulse(4)
b = iso.PSubsequence(a, 0, 8)
assert list(b) == [1, 0, 0, 0, 1, 0, 0, 0]
def test_ploop():
a = iso.PSequence([1, 2, 3], 1)
b = iso.PLoop(a, 3)
assert list(b) == [1, 2, 3, 1, 2, 3, 1, 2, 3]
def test_ploop_bang():
pass
def test_ppingpong():
a = iso.PSequence([1, 2, 3], 1)
b = iso.PPingPong(a, 2)
assert list(b) == [1, 2, 3, 2, 1, 2, 3, 2, 1]
def test_pcreep():
a = iso.PSequence([1, 2, 3, 4, 5], 1)
b = iso.PCreep(a, 3, 1, 2)
assert list(b) == [1, 2, 3, 1, 2, 3, 2, 3, 4, 2, 3, 4, 3, 4, 5, 3, 4, 5]
def test_pstutter():
a = iso.PSequence([1, 2, 3, 4], 1)
b = iso.PStutter(a, iso.PSequence([2, 3]))
assert b.nextn(16) == [1, 1, 2, 2, 2, 3, 3, 4, 4, 4]
def test_psubsequence():
a = iso.PSeries()
b = iso.PSubsequence(a, 4, 4)
assert list(b) == [4, 5, 6, 7]
def test_pinterpolate():
a = iso.PSequence([0, 1, 2], 1)
steps = iso.PSequence([4, 2], 1)
b = iso.PInterpolate(a, steps, iso.INTERPOLATION_NONE)
assert list(b) == [0, 0, 0, 0, 1, 1, 2]
a = iso.PSequence([0, 1, 2], 1)
steps = iso.PSequence([4, 2], 1)
b = iso.PInterpolate(a, steps, iso.INTERPOLATION_LINEAR)
assert list(b) == [0, 0.25, 0.5, 0.75, 1.0, 1.5, 2.0]
b = iso.PInterpolate(iso.PSequence([0, 1, 2]), iso.PSequence([2, 2]), 99)
print(next(b))
with pytest.raises(ValueError):
next(b)
def test_preverse():
a = iso.PSequence([1, 2, 3, 4, 5], 1)
b = iso.PReverse(a)
assert list(b) == [5, 4, 3, 2, 1]
def test_preset():
a = iso.PSeries(0, 1)
b = iso.PReset(a, iso.PImpulse(4))
c = iso.PSubsequence(b, 0, 10)
assert list(c) == [0, 1, 2, 3, 0, 1, 2, 3, 0, 1]
def test_pcounter():
a = iso.PCounter(iso.PImpulse(4))
b = iso.PSubsequence(a, 0, 10)
assert list(b) == [1, 1, 1, 1, 2, 2, 2, 2, 3, 3]
def test_pcollapse():
a = iso.PSequence([1, 2, None, 3, 4, None, None, 5, 6, None], 1)
b = iso.PCollapse(a)
assert list(b) == [1, 2, 3, 4, 5, 6]
def test_pnorepeats():
a = iso.PSequence([1, 2, 2, 3, 3.5, 3.5, 4, None, None, 5], 1)
b = iso.PNoRepeats(a)
assert list(b) == [1, 2, 3, 3.5, 4, None, 5]
def test_ppad():
a = iso.PSequence([1, None, 2], 1)
b = iso.PPad(a, 6)
assert list(b) == [1, None, 2, None, None, None]
a = iso.PSequence([1, None, 2], 1)
b = iso.PPad(a, 3)
assert list(b) == [1, None, 2]
def test_ppadtomultiple():
a = iso.PSequence([1, 2, 3, 4, None, 6], 1)
b = iso.PPadToMultiple(a, 4)
assert list(b) == [1, 2, 3, 4, None, 6, None, None]
a = iso.PSequence([1, None, 2], 1)
b = iso.PPadToMultiple(a, 3)
assert list(b) == [1, None, 2]
def test_parpeggiator():
a = iso.PArpeggiator([0, 1, 2, 3], iso.PArpeggiator.UP)
assert a.nextn(16) == [0, 1, 2, 3]
a = iso.PArpeggiator([0, 1, 2, 3], iso.PArpeggiator.DOWN)
assert a.nextn(16) == [3, 2, 1, 0]
a = iso.PArpeggiator([0, 1, 2, 3], iso.PArpeggiator.CONVERGE)
assert a.nextn(16) == [0, 3, 1, 2]
a = iso.PArpeggiator([0, 1, 2, 3, 4], iso.PArpeggiator.CONVERGE)
assert a.nextn(16) == [0, 4, 1, 3, 2]
a = iso.PArpeggiator([0, 1, 2, 3], iso.PArpeggiator.CONVERGE)
assert a.nextn(16) == [0, 3, 1, 2]
a = iso.PArpeggiator([0, 1, 2, 3, 4], iso.PArpeggiator.DIVERGE)
assert a.nextn(16) == [2, 1, 3, 0, 4]
a = iso.PArpeggiator([0, 1, 2, 3], iso.PArpeggiator.DIVERGE)
assert a.nextn(16) == [1, 2, 0, 3]
a = iso.PArpeggiator([0, 1, 2, 3, 4], iso.PArpeggiator.RANDOM)
a.seed(0)
a.reset()
assert a.nextn(16) == [2, 1, 0, 4, 3]
def test_peuclidean():
a = iso.PEuclidean(4, 7, 0)
assert a.nextn(16) == [1, None, 1, None, 1, None, 1, 1, None, 1, None, 1, None, 1, 1, None]
a = iso.PEuclidean(4, iso.PSequence([7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 4, 4, 4, 4], 1))
assert a.nextn(19) == [1, None, 1, None, 1, None, 1, None, 1, None, 1, None, 1, None, 1, 1, 1, 1, 1]
def test_ppermut():
a = iso.PPermut(iso.PSequence([1, 11, 111]), 3)
assert list(a) == [1, 11, 111, 1, 111, 11, 11, 1, 111, 11, 111, 1, 111, 1, 11, 111, 11, 1]
def test_ppatterngeneratoraction():
n = 0
def generate():
nonlocal n
n += 1
if n == 1:
return iso.PSequence([0], 2)
elif n == 2:
return iso.PSequence([1, 2], 1)
else:
return None
a = iso.PPatternGeneratorAction(generate)
assert next(a) == 0
assert next(a) == 0
assert next(a) == 1
assert next(a) == 2
with pytest.raises(StopIteration):
next(a)
def test_psequenceaction():
a = iso.PSequenceAction([1, 2, 3], lambda a: list(reversed(a)), 4)
assert list(a) == [1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1]
| true |
f85d766ababb30ac625661420851dea2f4810277 | Python | sujiny-tech/preparing-for-coding-test | /leetcode/Palindrome Linked List.py | UTF-8 | 593 | 3.796875 | 4 | [] | no_license | class ListNode:
def __init__(self, val=0, next=None):
self.val=val
self.next=next
def isPalindrome(head:ListNode) -> bool:
isTrue=True
list_=[]
while head!=None:
list_.append(head.val)
head=head.next
print(list_)
left, right=0, len(list_)-1
while left<right:
if list_[left]==list_[right]:
left+=1
right-=1
else:
isTrue=False
break
return isTrue
a=ListNode(1)
b=ListNode(2)
c=ListNode(3)
a.next=b
b.next=c
ans=isPalindrome(a)
print(ans) | true |
48e4c539554a7155433b336524d5f72e32e1a03c | Python | lizhaodong/PrunedYOLO | /spar_v3.py | UTF-8 | 2,552 | 2.9375 | 3 | [] | no_license | #This is the library for weights sparsification
import tensorflow as tf
import numpy as np
#masks is a list of tuple, each tuple is (var_name, mask)
masks = []
#name_tfv is a dictionary, where key is the variable name,
#and value is tf.variable
name_tfv = {}
#name_ph, key variable name and value is tf.placeholder (with same dtype and shape)
name_ph = {}
def get_masks(sess, percent):
#get_mask generates masks sparsifying model weights
#Criteria for sparsification is specified by percientile,
#i.e. fraction of elements maksed
#arguments:
#sess: tf sess where weights are stored
#percent: fraction of sparsified
tf_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
#name_vals is list, each is tuple (var_name, value)
name_vals = []
data=[]
#old_vals only contains values, it's a list
old_vals = sess.run(tf_variables)
for val in old_vals:
if len(val.shape) > 1:
data.extend(np.absolute(val).flatten().tolist())
CUTOFF = np.percentile(data, percent)
for i in range(len(tf_variables)):
if(len(old_vals[i].shape) > 1):
name_tfv[tf_variables[i].name] = tf_variables[i]
cur_ph = tf.placeholder(tf_variables[i].dtype, shape=tf_variables[i].get_shape())
name_ph[tf_variables[i].name] = cur_ph
name_vals.append((tf_variables[i].name, old_vals[i]))
global masks
for pair in name_vals:
weight_val = pair[1]
mask_cur = np.abs(weight_val) < CUTOFF
"""
mask_cur = np.ones(weight_val.shape)
if len(weight_val.shape)==2:
for i in range(weight_val.shape[0]):
for j in range(weight_val.shape[1]):
if abs(weight_val[i, j]) < CUTOFF:
mask_cur[i ,j] = 0
#pair[0] is the variable name
"""
masks.append((pair[0], mask_cur))
return masks
'''
def apply_masks(sess):
ops = []
for mask_tuple in masks:
mask = mask_tuple[1]
#mask_tuple[0] is the variable name,
#name_tfv[var_name] gives the corresponding tf.var
variable = name_tfv[mask_tuple[0]]
new_var = tf.multiply(variable, mask)
ops.append(tf.assign(variable, new_var))
sess.run(ops)
'''
def apply_masks(sess):
for mask_tuple in masks:
mask = mask_tuple[1]
variable = name_tfv[mask_tuple[0]]
new_var = variable * mask
sess.run(variable.assign(name_ph[mask_tuple[0]]), {name_ph[mask_tuple[0]]: sess.run(new_var)}) | true |
5d2a37897de4a1e1444c39f2b735d65a4cc79fcf | Python | gxsgxs/1808 | /13day/06-列表遍历的坑.py | UTF-8 | 297 | 4 | 4 | [] | no_license | list = [1,2,3,4,5,6,7,8,9]
#注意 最后不要用循环去删除列表
'''
for i in range(len(list)):#0 1 2 3 4 5 6 7 8
list.pop(i)
print(list)
'''
'''
for i in list:
print(i)
list.pop()
#list = [1,2,3,4,5,6,7,8]
'''
for i in range(len(list)-1,-1,-1):
list.pop(i)
print(list)
| true |
367a1fd5dfd3ebec60cfe31572944291e44adc45 | Python | zingzheng/LeetCode_py | /117Populating Next Right Pointers in Each Node II.py | UTF-8 | 1,861 | 3.328125 | 3 | [] | no_license | ##Populating Next Right Pointers in Each Node II
##Follow up for problem "Populating Next Right Pointers in Each Node".
##What if the given tree could be any binary tree? Would your previous solution still work?
##2015年8月26日 18:05:48 AC
##zss
# Definition for binary tree with next pointer.
class TreeLinkNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
self.next = None
class Solution(object):
def connect(self, root):
"""
:type root: TreeLinkNode
:rtype: nothing
"""
if not root:return
node=root
while node:
down = None
while node:
if not down:
if node.left: down = node.left
elif node.right: down = node.right
nex = node.next
while nex:
if nex.left or nex.right:
break
nex=nex.next
if node.left and node.right:
node.left.next = node.right
if node.left and not node.right and nex:
if nex.left:
node.left.next=nex.left
else:
node.left.next=nex.right
if node.right and nex:
if nex.left:
node.right.next=nex.left
else:
node.right.next=nex.right
node=nex
node=down
class Test(object):
def t(self):
t1= TreeLinkNode(1)
t2= TreeLinkNode(2)
t3= TreeLinkNode(3)
t4= TreeLinkNode(4)
t5= TreeLinkNode(5)
t1.left=t2
t1.right=t3
t2.right=t4
t3.left=t5
return t1
| true |
f32ba6b90c374c6076d66420b138c27d1c8020a3 | Python | marwahaha/studio_xkcd | /app/model.py | UTF-8 | 2,939 | 2.75 | 3 | [] | no_license | import logging
from peewee import *
from utils.settings import Settings
settings = Settings.get_instance()
mysql_db = MySQLDatabase(
settings['mysql']['database'],
user=settings['mysql']['username'],
password=settings['mysql']['password'],
host=settings['mysql']['hostname'],
port=settings['mysql']['hostport']
)
def mysql_query(func):
def function_wrapper(*args, **kwargs):
if mysql_db.is_closed():
logging.info('Opening MySQL Connection')
mysql_db.connect()
result = func(*args, **kwargs)
if not mysql_db.is_closed():
logging.info('Closing MySQL Connection')
mysql_db.close()
return result
return function_wrapper
class BaseModel(Model):
class Meta:
database = mysql_db
class Favorite(BaseModel):
# id = IntegerField(null=False)
user_id = CharField(max_length=36, null=False)
xkcd_id = IntegerField(null=False)
class Meta:
db_table = 'favorites'
def to_dict(self):
return {
'id': self.id,
'user_id': self.user_id,
'xkcd_id': self.xkcd_id
}
@classmethod
def from_dict(cls, favorite_dict):
favorite = Favorite(
id=favorite_dict.get('id'),
user_id=favorite_dict.get('user_id'),
xkcd_id=favorite_dict.get('xkcd_id')
)
return favorite
@classmethod
@mysql_query
def delete_favorite(cls, user_id, xkcd_id):
try:
delete_count = 0
item = cls.select().where(cls.user_id == user_id, cls.xkcd_id == xkcd_id).get()
if item:
delete_count = item.delete_instance()
except cls.DoesNotExist:
item = None
delete_count = 0
return delete_count, item
@classmethod
@mysql_query
def save_favorite(cls, favorite_object):
favorite = Favorite.from_dict(favorite_object)
favorite.save()
favorite_response = cls.select().where(cls.user_id == favorite.user_id, cls.xkcd_id == favorite.xkcd_id).get()
if favorite_response:
return favorite_response.to_dict()
raise Exception('there were issues saving the data')
@classmethod
@mysql_query
def get_user_favorites(cls, user_id):
if not user_id:
return []
try:
favorites_json = []
favorites = cls.select().where(cls.user_id == user_id).order_by(cls.id)
for item in favorites:
favorites_json.append(Favorite.to_dict(item))
except cls.DoesNotExist:
favorites_json = []
return favorites_json
@classmethod
def get_user_favorites_array(cls, user_id):
favorites_data = Favorite.get_user_favorites(user_id)
favorites = []
for f in favorites_data:
favorites.append(f["xkcd_id"])
return favorites
| true |
f53e11fa4c8e3a568308f3f3f64c62be824b1f82 | Python | dtdannen/LUiGi-hierarchical-GDA | /experiments/GraphScripts/src/TestSetup.py | UTF-8 | 602 | 2.859375 | 3 | [
"MIT"
] | permissive | '''
Created on Apr 7, 2014
@author: dustin
'''
"""
You can use the proper typesetting unicode minus (see
http://en.wikipedia.org/wiki/Plus_sign#Plus_sign) or the ASCII hypen
for minus, which some people prefer. The matplotlibrc param
axes.unicode_minus controls the default behavior.
The default is to use the unicode minus
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['axes.unicode_minus'] = False
fig, ax = plt.subplots()
ax.plot(10*np.random.randn(100), 10*np.random.randn(100), 'o')
ax.set_title('Using hypen instead of unicode minus')
plt.show() | true |
6f8af1a97b71b8c7c88737cf1993b9de7913cf47 | Python | gregoritoo/UI_Anomaly_Detection | /Alertes/Alert_Prediction.py | UTF-8 | 4,461 | 2.609375 | 3 | [] | no_license |
import os
path_to_kap = os.environ['kapacitor']
path_to_script = os.environ['script']
class Alert_Prediction():
def __init__(self, host, measurement):
self.host = host
self.measurement = measurement
self.texte = ""
def create(self, message, form, period):
self.form = form
'''
Create the tick alert
Note : NEED TO DEFINE the path of the script, which will be launched when an alert is trigged, as a variable environnement
Parameters
----------
message : str
Message to be shown as an alert on slack etc ; need to be written with kapacitor syntax
Returns
-------
None.
'''
where_condition = ""
where = [[element.split("=") for element in form[1:].split(",")][i][0] for i in range(len(form[1:].split(",")))]
for ele in where:
where_condition = where_condition + ele + "=" + ele + " AND "
texte = ""
cond = ["var " + (form[1:].replace(",", " AND").split("AND")[i]).replace("=", "='") + "'" for i in
range(len(form[1:].replace(",", " AND").split("AND")))]
for element in cond:
texte = texte + element + "\n"
texte = texte + "\n\n" + """var realtime = batch
|query('SELECT mean(yhat) as real_value FROM "telegraf"."autogen".pred_""" + self.measurement + """ WHERE """ + where_condition[
: -5] + """')
.period(5m)
.every(5m)
.align()
|last('real_value')
.as('real_value')
|log()
.prefix('P0-1')
.level('DEBUG')
var predicted = batch
|query('SELECT mean(yhat) as prediction FROM "telegraf"."autogen".pred_3""" + self.measurement + """ WHERE """ + where_condition[
: -5] + """')
.period(5m)
.every(1h)
.align()
|last('prediction')
.as('prediction')
|log()
.prefix('P0-2')
.level('DEBUG')
var joined_data = realtime
|join(predicted)
.as('realtime', 'predicted')
.tolerance(20m)
var performance_error = joined_data
|eval(lambda: abs("realtime.real_value" - "predicted.prediction"))
.as('performance_error')
|alert()
.crit(lambda: "performance_error" > 0 )
.message('""" + message + """')
.slack()
.exec('""" + path_to_script + """', '\"""" + self.host + "\"'" + """, '\"""" + self.measurement + "\"'" + """, '\"""" + str(
form[1:]) + "\"'" + """, '\"""" + period + "\"'" + """)
|log()
.prefix('P0-3')
.level('DEBUG')"""
self.texte = texte
def save_alert(self):
self.form = self.form[1:].replace("=", ".")
self.form = self.form.replace(",", "_")
self.form = self.form.replace(":", "")
self.path = r"Alerte/alerte_" + self.measurement + "_" + self.form + ".tick"
print(self.path)
with open(self.path, "w") as f:
f.write(self.texte)
f.close()
def define_alert(self):
self.form = self.form.replace("=", ".")
self.form = self.form.replace(",", "_")
self.form = self.form.replace(":", "")
cmd_define_alert = path_to_kap + " define " + "alerte_" + self.measurement + "_" + self.form + " -type batch -tick " + self.path + " -dbrp telegraf.autogen"
print(cmd_define_alert)
os.system('cmd /c ' + cmd_define_alert)
def enable_alert(self):
self.form = self.form.replace("=", ".")
self.form = self.form.replace(",", "_")
self.form = self.form.replace(":", "")
cmd_enable_alert = path_to_kap + " enable " + "alerte_" + self.measurement + "_" + self.form
os.system('cmd /c ' + cmd_enable_alert)
def launch(self):
self.define_alert()
self.enable_alert()
| true |
1a9c665d367165827a3b803197573f65af5c9e56 | Python | gtieng/web-scraping-challenge | /app.py | UTF-8 | 729 | 2.515625 | 3 | [] | no_license | #import dependencies
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
# Use PyMongo to establish Mongo connection
mongo = PyMongo(app, uri="mongodb://localhost:27017/mars_app")
# Set route
@app.route('/')
def index():
# Find one record of data from the mongo database
mars_data = mongo.db.mars.find_one()
# Return the template with the teams list passed in
return render_template('index.html', content=mars_data)
# Set route
@app.route('/scrape')
def action():
scrape = scrape_mars.scrape()
mongo.db.mars.update({}, scrape, upsert=True)
return redirect("/")
if __name__ == "__main__":
app.run(debug=True) | true |
b8338fb2ac6972260f90e061ef0f0a99c7b603aa | Python | ramki123456/newrepository | /python/pythonclass/pratice/dictionaries.py | UTF-8 | 1,817 | 3.75 | 4 | [] | no_license | '''#dictionaries:- the group of items which are enclosed by two {} are known as dictionaries.
in dictionaries items are separated by comma.
an item is a combination of key and value fair. key and value separated by :
dictionaries are mutable datastructures so we can modify a dictionary.
syntax: dictionary_name={item1,item2,------} item: key+value
dictionary_name={key1:val1, key2:val2}
in dictionaries keys are unique and should accept immutable data only.
immutable-(int, float, )
in dictionaries values can be of any type and they do not contain
dictionaries are unordered datastructures which uses heap as memory unit.
note: the datastructures from which we can not access data by using indexing process are known as unordered.
unordered datastructures like dictionaries will perform data manifulation operations very faster compared to ordered datastructures.
dictionaries uses hashkey maechanism process (key paired with value) which will make data operations very faster.
in dictionaries we can use a process named as keying which is used to access the data.
use below syntax to perform keying.
syntax: dictionary_name[key]
d = {'a':1, 'b':2, 'c':3}
print d[a]
#updating an item: to update an item the key should exit in dictionary.
follow the below syntax to update an element
dictionary_name[key]=value
d['c']=15
print d
to insert an item the key could not exit.
dictionary_name[key]=value
d['x']=20
print d
deleting an item:
syntax: del dictionary_name[key]
note: if you are inserting multiple val with same name dictionary crate an item with one key mapped with latest val.
d = {'a':1, 'b':4, c:'23', 'a':43}
print d
employee={1179:{'name':'hari', 'age':26, 'comp':['ibm, hcl, tcs'], 'loc':'banglore'},
1180:{'name':'ravi', 'age':23, 'comp':['ibm, hcl'], 'loc':'chennai'}}
| true |
406700c61cad11c1eedb31d49f8e83ee37020e79 | Python | karwootang-gft/tb-houston-service | /solution.py | UTF-8 | 11,318 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | """
This is the deployments module and supports all the ReST actions for the
solutions collection
"""
# 3rd party modules
from flask import make_response, jsonify, abort
from config import db, app
from models import Solution, SolutionSchema
from models import ModelTools
from extendedSchemas import ExtendedSolutionSchema
from extendedSchemas import SolutionDeploymentSchema
from extendedSchemas import SolutionNamesOnlySchema
import solution_extension
from pprint import pformat
import requests
import os
import json
from sqlalchemy import literal_column
def read_all(active=None, namesonly=None, page=None, page_size=None, sort=None):
"""
This function responds to a request for /api/solutions
with the complete lists of solutions
:return: json string of list of solutions
"""
app.logger.debug("solution.read_all")
app.logger.debug(f"Active: {active}, namesonly: {namesonly}")
# pre-process sort instructions
if (sort==None):
solution_query = Solution.query.order_by(Solution.id)
else:
try:
sort_inst = [ si.split(":") for si in sort ]
orderby_arr = []
for si in sort_inst:
si1 = si[0]
if len(si) > 1:
si2 = si[1]
else:
si2 = "asc"
orderby_arr.append(f"{si1} {si2}")
#print("orderby: {}".format(orderby_arr))
solution_query = Solution.query.order_by(literal_column(", ".join(orderby_arr)))
except Exception as e:
print("Exception: {}".format(pformat(e)))
solution_query = Solution.query.order_by(Solution.id)
# Create the list of solutions from our data
if active != None:
solution_query = solution_query.filter(Solution.active == active)
# do limit and offset last
if (page==None or page_size==None):
solutions = solution_query.all()
else:
solutions = solution_query.limit(page_size).offset(page * page_size)
if namesonly == True:
# Serialize the data for the response
schema = SolutionNamesOnlySchema(many=True)
data = schema.dump(solutions)
else:
solutions_arr = []
for sol in solutions:
solutions_arr.append(solution_extension.build_solution(sol))
app.logger.debug("solutions array:")
app.logger.debug(pformat(solutions_arr))
# Serialize the data for the response
schema = ExtendedSolutionSchema(many=True)
data = schema.dump(solutions_arr)
app.logger.debug("solutions data:")
app.logger.debug(data)
return data, 200
def read_one(oid):
"""
This function responds to a request for /api/solution/{oid}
with one matching solution from solutions
:param application: id of solution to find
:return: solution matching id
"""
sol = (Solution.query.filter(Solution.id == oid).one_or_none())
if sol is not None:
solution = solution_extension.build_solution(sol)
# Serialize the data for the response
solution_schema = ExtendedSolutionSchema()
data = solution_schema.dump(solution)
return data, 200
else:
abort(
404, f"Solution with id {oid} not found".format(id=oid)
)
def create(solutionDetails):
"""
This function creates a new solution in the solutions list
based on the passed in solutions data
:param solution: solution to create in solutions list
:return: 201 on success, 406 on solutions exists
"""
# Defaults
if (solutionDetails.get('active') == None):
solutionDetails['active'] = True
if (solutionDetails.get('favourite') == None):
solutionDetails['favourite'] = True
if (solutionDetails.get('teams') == None):
solutionDetails['teams'] = 0
if (solutionDetails.get('deployed') == None):
solutionDetails['deployed'] = False
if (solutionDetails.get('deploymentState') == None):
solutionDetails['deploymentState'] = ""
if (solutionDetails.get('statusId') == None):
solutionDetails['statusId'] = 0
if (solutionDetails.get('statusCode') == None):
solutionDetails['statusCode'] = ""
if (solutionDetails.get('statusMessage') == None):
solutionDetails['statusMessage'] = ""
# Remove applications because Solutions don't have
# any applications when they are first created
if ('applications' in solutionDetails):
del solutionDetails['applications']
# we don't need the id, the is generated automatically on the database
if ('id' in solutionDetails):
del solutionDetails["id"]
solutionDetails['lastUpdated'] = ModelTools.get_utc_timestamp()
solutionDetails['environments'] = json.dumps(solutionDetails.get('environments') or [])
print("Create name 2: " + solutionDetails['name'])
schema = SolutionSchema(many=False)
new_solution = schema.load(solutionDetails, session=db.session)
db.session.add(new_solution)
db.session.commit()
print("Create name 3: " + new_solution.name)
# Serialize and return the newly created solution
# in the response
print(pformat(solutionDetails['environments']))
print("create solution")
print(pformat(new_solution))
print(pformat(new_solution.environments))
schema = ExtendedSolutionSchema()
data = schema.dump(new_solution)
return data, 201
def update(oid, solutionDetails):
"""
Updates an existing solutions in the solutions list.
:param key: key of the solutions to update in the solutions list
:param solutions: solutions to update
:return: updated solutions
"""
app.logger.debug(solutionDetails)
# Does the solutions exist in solutions list?
existing_solution = Solution.query.filter(
Solution.id == oid
).one_or_none()
# Does solutions exist?
if existing_solution is not None:
solutionDetails['environments'] = json.dumps(solutionDetails.get('environments') or existing_solution.environments)
schema = SolutionSchema()
update_solution = schema.load(solutionDetails, session=db.session)
update_solution.key = solutionDetails.get('id', oid)
update_solution.lastUpdated = ModelTools.get_utc_timestamp()
db.session.merge(update_solution)
db.session.commit()
# return the updted solutions in the response
schema = ExtendedSolutionSchema(many=False)
print(">>>>> " + pformat(solutionDetails))
solutionDetails['environments'] = json.loads(solutionDetails['environments'])
data = schema.dump(solutionDetails)
return data, 200
# otherwise, nope, deployment doesn't exist, so that's an error
else:
abort(404, f"Solution not found")
def delete(oid):
"""
This function deletes a solution from the solutions list
:param key: id of the solutions to delete
:return: 200 on successful delete, 404 if not found
"""
# Does the solution to delete exist?
existing_solution = Solution.query.filter(Solution.id == oid).one_or_none()
# if found?
if existing_solution is not None:
db.session.delete(existing_solution)
db.session.commit()
return make_response(f"Solution {oid} successfully deleted", 200)
# Otherwise, nope, solution to delete not found
else:
abort(404, f"Solution {oid} not found")
def deployment_read_all():
"""
This function responds to a request for /api/solutiondeployments
with the complete lists of deployed solutions
:return: json string of list of deployed solutions
id and deployed fields
"""
app.logger.debug("solution.deployment_read_all")
solutions = Solution.query.all()
schema = SolutionDeploymentSchema(many=True)
data = schema.dump(solutions)
app.logger.debug("solutions data:")
app.logger.debug(data)
return data, 200
def deployment_read_one(oid):
"""
This function responds to a request for /api/solutiondeployment/{oid}
with one matching solution deployment from solutions
:param application: id of solution to find
:return: solution matching id
"""
sol = (Solution.query.filter(Solution.id == oid).one_or_none())
if sol is not None:
# Serialize the data for the response
solution_schema = SolutionDeploymentSchema(many=False)
data = solution_schema.dump(sol)
return data, 200
else:
abort(
404, f"Solution with id {oid} not found".format(id=oid)
)
def deployment_create(solutionDeploymentDetails):
"""
This function queries a solution forwards the request to the DaC
:param solution: id
:return: 201 on success, 406 if solution doesn't exist
"""
app.logger.debug(pformat(solutionDeploymentDetails))
oid = solutionDeploymentDetails['id'];
sol_json_payload = read_one(oid)
resp = sol_json_payload[0]
print(pformat(resp))
data = send_deployment_request_to_the_dac(resp)
resp_json = data.json()
print(pformat(resp_json))
return resp_json, 201
def deployment_update(oid, solutionDeploymentDetails):
"""
Updates an existing solutions in the solutions list with the deployed status.
:param key: id of the solution
:param solutionDetails: solution details to update
:return: updated solution
"""
app.logger.debug(solutionDeploymentDetails)
# Does the solutions exist in solutions list?
existing_solution = Solution.query.filter(
Solution.id == oid
).one_or_none()
# Does solutions exist?
if existing_solution is not None:
schema = SolutionSchema(many=False)
update_solution = schema.load(solutionDeploymentDetails, session=db.session)
update_solution.id = oid
update_solution.lastUpdated = ModelTools.get_utc_timestamp()
update_solution.deployed = solutionDeploymentDetails.get('deployed', update_solution.deployed)
update_solution.deploymentState = solutionDeploymentDetails.get('deploymentState', update_solution.deploymentState)
update_solution.statusId = solutionDeploymentDetails.get('statusId', update_solution.statusId)
update_solution.statusCode = solutionDeploymentDetails.get('statusCode', update_solution.statusCode)
update_solution.statusMessage = solutionDeploymentDetails.get('statusMessage', update_solution.statusMessage)
db.session.merge(update_solution)
db.session.commit()
# return the updted solutions in the response
schema = SolutionDeploymentSchema(many=False)
data = schema.dump(update_solution)
return data, 200
# otherwise, nope, deployment doesn't exist, so that's an error
else:
abort(404, f"Solution {oid} not found")
def send_deployment_request_to_the_dac(sol_json_payload):
url = "http://" + os.environ['GCP_DAC_URL'] + "/api/solution/"
print(f"url: {url}")
print(f"data: {sol_json_payload}")
headers = { 'Content-Type': "application/json" }
response = requests.post(url, data=json.dumps(sol_json_payload), headers=headers)
print(pformat(response))
#resp_json = response.json()
return response
| true |
de7f3d5ab6bb2f29f7951974483170f9f0144848 | Python | alanespinozaz/S1-TAREA_1 | /14.py | UTF-8 | 780 | 3.953125 | 4 | [] | no_license |
# """ Determinar si un número entero proporcionado por el usuario es primo.
# Un número primo es un entero que no tiene más divisores que él mismo y la unidad. """
class Ejemplo14:
def __init__(self):
pass
def evaluarprimo(self):
divisor, num, res= 0,0,0
primo = True
divisor = 2
num = int(input("ingrese el numero:"))
while divisor < num:
res = num/2
if res<0:
divisor = divisor+1
print("f")
if primo == True:
print("numero:",num,"no es primo:")
else:
print("numer:",num,"no es primo:")
eje14 = Ejemplo14()
eje14.evaluarprimo()
| true |
f2c35140f8cfed45fb952a4ddc2f260a9d58780b | Python | RajkumarMittal/DS-ALGO | /Sorting/BubbleSort.py | UTF-8 | 382 | 3.75 | 4 | [] | no_license | def bubble_sort(array):
length = len(array)
if length <= 1:
return array
for i in range(length-1):
for j in range(i+1, length):
if array[i] > array[j]:
array[i], array[j] = array[j], array[i]
return array
def selection_sort(arr):
if len(arr) <= 1:
return arr
arr = [4, 3, 2, 1]
print(bubble_sort(arr)) | true |
d552ee40a25fda6a17f3416a6bebf26dde74dcf6 | Python | alegarpa/warmupp2 | /logincounter/tests.py | UTF-8 | 5,554 | 2.734375 | 3 | [] | no_license | from django.test import TestCase, Client
from logincounter.models import User
import json
class TestUsers(TestCase):
MAX_LENGTH_INPUT = "abcdefghjiklmnopqrstuvwxyzabcdefghjiklmnopqrstuvwxyzabcdefghjiklmnopqrstuvwxyzabcdefghjiklmnopqrstuvwxyzabcdefghjiklmnopqrstuvwxyz"
def setUp(self):
self.client = Client()
self.user1 = User.objects.create(user="user1", password="password")
self.user2 = User.objects.create(user="user2", password="password")
def testDefault(self):
self.assertEquals(self.user1.login_count, 1)
self.assertEquals(self.user2.login_count, 1)
def testAdd(self):
response = self.client.post('/users/add/', data = json.dumps({'user': 'user3', 'password': 'password3'}), content_type="application/json")
result = json.loads(response.content)
user3 = User.objects.get(user='user3')
self.assertEquals(200, response.status_code)
self.assertEquals(user3.login_count, result['count'])
self.assertEquals(User.SUCCESS, result['errCode'])
def testLogin(self):
response = self.client.post('/users/login/', data = json.dumps({'user': self.user1.user, 'password': self.user1.password}), content_type="application/json")
result = json.loads(response.content)
user1 = User.objects.get(user=self.user1.user)
self.assertEquals(200, response.status_code)
self.assertEquals(user1.login_count, result['count'])
self.assertEquals(User.SUCCESS, result['errCode'])
def testDuplicateUser(self):
response = self.client.post('/users/add/', data = json.dumps({'user': self.user1.user, 'password': 'wrong password'}), content_type="application/json")
result = json.loads(response.content)
user1 = User.objects.get(user=self.user1.user)
self.assertEquals(200, response.status_code)
self.assertEquals(self.user1.password, user1.password)
self.assertEquals(User.ERR_USER_EXISTS, result['errCode'])
def testInvalidUser(self):
response = self.client.post('/users/login/', data = json.dumps({'user': 'bad user', 'password': 'wrong password'}), content_type="application/json")
result = json.loads(response.content)
try:
bad_user = User.objects.get(user='bad user')
except User.DoesNotExist as e:
bad_user = None
self.assertEquals(200, response.status_code)
self.assertEquals(None, bad_user)
self.assertEquals(User.ERR_BAD_CREDENTIALS, result['errCode'])
def testInvalidPassword(self):
response = self.client.post('/users/login/', data = json.dumps({'user': self.user1.user, 'password': 'wrong password'}), content_type="application/json")
result = json.loads(response.content)
user1 = User.objects.get(user=self.user1.user)
self.assertEquals(200, response.status_code)
self.assertEquals(self.user1.login_count, user1.login_count)
self.assertEquals(User.ERR_BAD_CREDENTIALS, result['errCode'])
def testBlankUser(self):
response = self.client.post('/users/add/', data = json.dumps({'user': '', 'password': 'wrong password'}), content_type="application/json")
result = json.loads(response.content)
try:
bad_user = User.objects.get(user='')
except User.DoesNotExist as e:
bad_user = None
self.assertEquals(200, response.status_code)
self.assertEquals(None, bad_user)
self.assertEquals(User.ERR_BAD_USERNAME, result['errCode'])
def testBlankPassword(self):
response = self.client.post('/users/add/', data = json.dumps({'user': 'asdf', 'password': ''}), content_type="application/json")
result = json.loads(response.content)
try:
good_user = User.objects.get(user='asdf')
except User.DoesNotExist as e:
good_user = None
self.assertEquals(200, response.status_code)
self.assertNotEquals(None, good_user)
self.assertEquals(User.SUCCESS, result['errCode'])
def testLongUser(self):
response = self.client.post('/users/add/', data = json.dumps({'user': self.MAX_LENGTH_INPUT, 'password': 'wrong password'}), content_type="application/json")
result = json.loads(response.content)
try:
bad_user = User.objects.get(user=self.MAX_LENGTH_INPUT)
except User.DoesNotExist as e:
bad_user = None
self.assertEquals(200, response.status_code)
self.assertEquals(None, bad_user)
self.assertEquals(User.ERR_BAD_USERNAME, result['errCode'])
def testLongPassword(self):
response = self.client.post('/users/add/', data = json.dumps({'user': 'bad user', 'password': self.MAX_LENGTH_INPUT}), content_type="application/json")
result = json.loads(response.content)
try:
bad_user = User.objects.get(user='bad user')
except User.DoesNotExist as e:
bad_user = None
self.assertEquals(200, response.status_code)
self.assertEquals(None, bad_user)
self.assertEquals(User.ERR_BAD_PASSWORD, result['errCode'])
def testResetFixture(self):
response = self.client.post('/users/add/', data = json.dumps({'user': 'user323', 'password': 'password'}), content_type="application/json")
result = json.loads(response.content)
User.resetFixture()
try:
bad_user = User.objects.get(user='323')
except User.DoesNotExist as e:
bad_user = None
self.assertEquals(200, response.status_code)
self.assertEquals(None, bad_user)
self.assertEquals(User.SUCCESS, result['errCode']) | true |
8ffdf10f3ee959f30bfb90e5292f9e918908f228 | Python | vmarcella/DS23 | /spark/rdd.py | UTF-8 | 1,061 | 3.359375 | 3 | [] | no_license | import math
from pyspark import SparkContext
sc = SparkContext()
yeet = [2.3, 3.4, 4.4, 2.4, 3.3, 4.0]
# Parallelize the list, yeet
parallel_yeet = sc.parallelize(yeet, 2)
# Collect all of the yeets
print(parallel_yeet.collect())
# Take two elements from the list
print(parallel_yeet.take(2))
# Get the number of partitions
print(parallel_yeet.getNumPartitions())
temp_data = [59, 58, 34, 43, 23, 42]
parallel_temp_data = sc.parallelize(temp_data, 2)
print(parallel_temp_data.collect())
def f(ls):
s = 0
for i in ls:
if (i * 2) % 4 == 0:
s += i * 2
return math.sqrt(s)
def f(ls):
s = 0
for i in ls:
if (i * 2) % 4 == 0:
s += i * 2
return math.sqrt(s)
print(f(range(100000)))
values = sc.parallelize(range(100000))
new = lambda x, y: x + (y * 2)
print(math.sqrt(values.filter(lambda x: (x * 2) % 4 == 0).reduce(new)))
print(
math.sqrt(
values.map(lambda n: n * 2)
.filter(lambda n: n % 4 == 0)
.reduce(lambda a, b: a + b if b % 4 == 0 else a)
)
)
| true |
aadabbcad1d951c1333eaca0b102bc3ab21e72ac | Python | pytest-dev/pytest-xdist | /src/xdist/scheduler/loadfile.py | UTF-8 | 2,172 | 2.84375 | 3 | [
"MIT"
] | permissive | from .loadscope import LoadScopeScheduling
from xdist.remote import Producer
class LoadFileScheduling(LoadScopeScheduling):
"""Implement load scheduling across nodes, but grouping test test file.
This distributes the tests collected across all nodes so each test is run
just once. All nodes collect and submit the list of tests and when all
collections are received it is verified they are identical collections.
Then the collection gets divided up in work units, grouped by test file,
and those work units get submitted to nodes. Whenever a node finishes an
item, it calls ``.mark_test_complete()`` which will trigger the scheduler
to assign more work units if the number of pending tests for the node falls
below a low-watermark.
When created, ``numnodes`` defines how many nodes are expected to submit a
collection. This is used to know when all nodes have finished collection.
This class behaves very much like LoadScopeScheduling, but with a file-level scope.
"""
def __init__(self, config, log=None):
super().__init__(config, log)
if log is None:
self.log = Producer("loadfilesched")
else:
self.log = log.loadfilesched
def _split_scope(self, nodeid):
"""Determine the scope (grouping) of a nodeid.
There are usually 3 cases for a nodeid::
example/loadsuite/test/test_beta.py::test_beta0
example/loadsuite/test/test_delta.py::Delta1::test_delta0
example/loadsuite/epsilon/__init__.py::epsilon.epsilon
#. Function in a test module.
#. Method of a class in a test module.
#. Doctest in a function in a package.
This function will group tests with the scope determined by splitting
the first ``::`` from the left. That is, test will be grouped in a
single work unit when they reside in the same file.
In the above example, scopes will be::
example/loadsuite/test/test_beta.py
example/loadsuite/test/test_delta.py
example/loadsuite/epsilon/__init__.py
"""
return nodeid.split("::", 1)[0]
| true |
66a9c733e91025c7d548c3d3515595b448978dc0 | Python | JeyFernandez/proyecto-de-pytohn- | /este es mi codigo/main.py | UTF-8 | 1,379 | 3.65625 | 4 | [] | no_license | from Registro import Registro
from Matricula import Matricula
if __name__ == '__main__':
run = True
while(run):
print("\n|REGISTRO DE ESTUDIENTES|\n")
select = int(input("Seleccione la opcio que hara:\n1-Asignar Carrera\n2-Matricular estudiantes\n3-Revisar la matricula\n4-Salir\n:"))
if select == 1:
nombre = input("Dijite el nombre de la carrera: ")
registro = Registro(nombre)
print("\nSe asigno la carrera ~{}~ correctamente!".format(registro.Asisnar_carrera(nombre)))
elif select == 2:
nombre_y_apellido = input("Nombre y Apellido: ")
edad = input("Edad: ")
cedula = input("Numero de cedula: ")
sexo = input("Sexo: ")
estado_civil = input("Estado Civil: ")
direccion = input("Direccion: ")
numero_telefono = input("Numero de Celular: ")
correo_electronico = input("Correo Electronico: ")
matricula = Matricula(nombre_y_apellido , edad, cedula, sexo, estado_civil, direccion, numero_telefono, correo_electronico)
registro.matricular_estudiante(matricula)
elif select == 3:
print("|MATRICULA ACTUAL|")
for i in registro.ver_matricula():
print(i)
elif select == 4:
run = False | true |
f64fd4078ca205be03c8096cb39932650fe35dd6 | Python | baraloni/IntroToCS | /ex8/ship_helper.py | UTF-8 | 761 | 3.890625 | 4 | [] | no_license | def direction_repr_str(direction_class, direction):
"""
Converts a direction to string.
:param direction: The direction to convert to string. Should be one of the
constants of the Direction class ([UP, DOWN, LEFT, RIGHT, NOT_MOVING)
:return: A string representation for a valid direction input or the string
'UNKNOWN' if the input direction is not valid.
"""
if direction == direction_class.NOT_MOVING:
return 'NOT MOVING'
elif direction == direction_class.UP:
return 'UP'
elif direction == direction_class.DOWN:
return 'DOWN'
elif direction == direction_class.LEFT:
return 'LEFT'
elif direction == direction_class.RIGHT:
return 'RIGHT'
else:
return 'UNKNOWN'
| true |
38d42ce3da88f3ced4649a5b033405f4e23834b7 | Python | ZoranPandovski/al-go-rithms | /data_structures/Tree/Binary-tree/left-view.py | UTF-8 | 2,623 | 3.59375 | 4 | [
"CC0-1.0"
] | permissive |
#Initial Template for Python 3
import atexit
import io
import sys
import queue
from collections import defaultdict # default dict used as a map, to store node-value mapping.
_INPUT_LINES = sys.stdin.read().splitlines()
input = iter(_INPUT_LINES).__next__
_OUTPUT_BUFFER = io.StringIO()
sys.stdout = _OUTPUT_BUFFER
@atexit.register
def write():
sys.__stdout__.write(_OUTPUT_BUFFER.getvalue())
def LeftView(root):
'''
:param root: root of given tree.
:return: print the left view of tree, dont print new line
'''
# code here
q=[]
q.append((root,0))
hmap={}
while(len(q)!=0):
curr=q.pop(0)
node=curr[0]
level=curr[1]
if hmap.get(level)==None:
hmap[level]=node.data
level+=1
if node.left!=None:
q.append((node.left,level))
if node.right!=None:
q.append((node.right,level))
# print(hmap)
# print(level)
for i in range(level):
print(hmap[i],end=" ")
class Node:
def __init__(self,val):
self.data = val
self.left = None
self.right = None
# Tree Class
class Tree:
def __init__(self):
self.root = None
self.map_nodes = defaultdict(Node)
def Insert(self,parent, child,dir):
if self.root is None:
root_node = Node(parent)
child_node = Node(child)
if dir == 'L':
root_node.left = child_node
else:
root_node.right = child_node
self.root = root_node
self.map_nodes[parent] = root_node
self.map_nodes[child] = child_node
return
parent_node = self.map_nodes[parent]
child_node = Node(child)
self.map_nodes[child] = child_node
if dir == 'L':
parent_node.left = child_node
else:
parent_node.right = child_node
return
if __name__ == '__main__':
test_cases = int(input())
for cases in range(test_cases):
n = int(input()) # number of nodes in tree
a = list(map(str, input().strip().split())) # parent child info in list
# construct the tree according to given list
tree = Tree()
i = 0
while (i < len(a)):
parent = int(a[i])
child = int(a[i + 1])
dir = a[i + 2]
i += 3
tree.Insert(parent, child, dir) # Insert the nodes in tree.
LeftView(tree.root)
print()
'''
# Node Class:
class Node:
def __init__(self,val):
self.data = val
self.left = None
self.right = None
''' | true |
3a0ff29ee1ef3c21586885d3190e3e537b0b347b | Python | geolee1/Covid-Entry-Log | /src/console/SearchConsole.py | UTF-8 | 1,634 | 3.359375 | 3 | [
"MIT"
] | permissive | from core.database import find_db, time_search_db, get_all_db
from core.person import print_person
from core.setting import get_search_time
from core.tools import menu_input, yes_or_no, clear
def type_input() -> str:
while True:
user_input = input("검색할 항목을 선택하세요. (이름/전화번호/날짜/모두)\n>> ")
if user_input == "이름" or user_input.lower() == "name":
return "name"
elif user_input == "전화번호" or user_input.lower() == "phone":
return "phone"
elif user_input == "날짜" or user_input.lower() == "date":
return "date"
elif user_input == "모두" or user_input.lower() == "all":
return "all"
else:
print("없는 항목입니다.\n")
def search_person():
while True:
clear()
type = type_input()
if type == "all":
persons = get_all_db()
else:
print()
value = input("검색할 내용을 입력하세요.\n>> ")
persons = find_db(type, value)
print_person(persons, index=True)
if persons != []:
select = menu_input(
f"선택한 사람으로부터 {get_search_time()} 시간 동안 출입한 사람을 검색합니다.\n검색할 사람의 번호를 입력해주세요.", 0, len(persons)-1)
time_persons = time_search_db(persons[select])
print_person(time_persons)
if yes_or_no("다시 검색하시겠습니까?"):
continue
else:
return
| true |
b383ea732e760828c027104f515a2ebdc9b2af94 | Python | sasazlat/UdacitySolution-ITSDC | /data_structure/other_data_structures.py | UTF-8 | 5,563 | 4.34375 | 4 | [] | no_license |
# coding: utf-8
# # Other Data Structures [optional]
#
# The purpose of this notebook is to show you some of the many other data
# structures you can use without going into too much detail. You can learn
# more by reading [documentation from Python's collections
# library](https://docs.python.org/3.3/library/collections.html).
# ## 1. Tuples
#
# The only standard library data structure that we haven't discussed. The
# tuple is an immutable (unchangeable) sequence of Python objects.
#
# The tuple is very similar to a list. You can read more about it in the
# [Python tuple
# documentation](https://docs.python.org/3/tutorial/datastructures.html#tuples-and-sequences)
# In[ ]:
# tuples are created with (parentheses)
my_tuple = (1,2,3)
print(my_tuple)
print(type(my_tuple))
# In[ ]:
# elements can be accessed just like they are with lists.
print(my_tuple[0])
print(my_tuple[1])
print(my_tuple[2])
# In[ ]:
# there are some things you can't do with tuples
# due to them being immutable.
my_tuple[1] = 4
# In[ ]:
# but there are also some things you CAN do with tuples
# that you can't do with lists...
t1 = ('a','b','c')
t2 = (1, 2, 3)
set_of_tuples = set()
set_of_tuples.add(t1)
set_of_tuples.add(t2)
print(set_of_tuples)
# In[ ]:
L1 = ['a','b','c']
L2 = [1, 2, 3]
set_of_lists = set()
set_of_lists.add(L1)
set_of_lists.add(L2)
print(set_of_lists)
# ## 2. Namedtuple
#
# Very similar to a tuple except the fields can be named as well! I use
# namedtuples when I want to use `object.property` notation but don't want to
# define a full class.
# In[ ]:
# named tuple's need to be imported from the collections library
from collections import namedtuple
# here we define Point as a new type of thing.
# It has properties x and y.
Point = namedtuple("Point", ["x", "y"])
# here we actually instantiate a point
p1 = Point(5, -3)
print(p1)
# In[ ]:
# there are two ways to access the fields in a point...
# ... by position
print(p1[0])
print(p1[1])
# In[ ]:
# ... or by name
print(p1.x)
print(p1.y)
# ## 3. Counter
# Often we want to count how many times something occurs. The code below
# demonstrates how to use a `Counter` to count the number of occurrences of
# various characters in a string.
# In[ ]:
from collections import Counter
string = "the quick brown fox jumped over the lazy dog"
character_counter = Counter()
for character in string:
character_counter[character] += 1
character_counter.most_common()
# It looks like this string had 8 spaces, 4 e's, 4 o's, etc...
# In[ ]:
# something that's nice about counters is that they don't throw
# an error if you try to access a key that isn't there. Instead
# they return 0.
# how many capital A's are in the string above?
print(character_counter["A"])
# In[ ]:
# but how many lowercase a's?
print(character_counter["a"])
# ## 4. defaultdict
#
# A default dict is best explained by example. Let's go back to the "three
# boxes of tickets" example from earlier.
# In[ ]:
TICKET_BOXES = {
"low" : [],
"medium" : [],
"high" : []
}
unfiled_tickets = [{
"priority" : "high",
"description" : "slammed on brakes"
},
{
"priority" : "low",
"description" : "windshield chipped"
},
{
"priority" : "low",
"description" : "failed to use turn signal"
}
,
{
"priority" : "medium",
"description" : "did not come to complete stop at stop sign"
}]
def file_ticket(ticket):
priority = ticket['priority']
TICKET_BOXES[priority].append(ticket)
for ticket in unfiled_tickets:
file_ticket(ticket)
print(TICKET_BOXES)
# In[ ]:
# so far so good! But what if we try to file a ticket
# with a priority "highest" (as we saw in Jira)?
new_ticket = {
"priority" : "highest",
"description": "vehicle crashed!"
}
file_ticket(new_ticket)
# In[ ]:
# as expected, we get a key error... one way to fix this
# is as follows
def file_ticket_fixed(ticket):
priority = ticket['priority']
# new code
if priority not in TICKET_BOXES:
TICKET_BOXES[priority] = []
TICKET_BOXES[priority].append(ticket)
file_ticket_fixed(new_ticket)
print(TICKET_BOXES)
# In[ ]:
# OR we can use a "defaultdict"
from collections import defaultdict
TICKET_BOXES = defaultdict(list) # notice the argument of list...
def file_ticket(ticket):
priority = ticket['priority']
TICKET_BOXES[priority].append(ticket)
for ticket in unfiled_tickets:
file_ticket(ticket)
file_ticket(new_ticket)
print(TICKET_BOXES)
# When you try to access a key that doesn't exist, defaultdict adds that key to
# the dictionary and associates a **default** value with it (in this case a
# list).
#
# If you want to learn more you can read the [documentation on
# defaultdict](https://docs.python.org/3.3/library/collections.html#collections.defaultdict)
# ## 5. Other data structures from `collections`
# In[ ]:
from collections import deque, OrderedDict
# In[ ]:
d = deque([4,5,6])
print(d)
# In[ ]:
d.append(7)
print(d)
# In[ ]:
d.appendleft(3)
print(d)
# In[ ]:
last = d.pop()
print("last element was", last)
print("now d is", d)
# In[ ]:
first = d.popleft()
print("first element was", first)
print("now d is", d)
# In[ ]:
# # # # #
# In[ ]:
od = OrderedDict()
# In[ ]:
od['a'] = 1
od['b'] = 2
od['c'] = 3
# In[ ]:
# as the name implies, an OrderedDict is a dictionary that
# keeps track of the order in which elements were added.
print(od)
| true |
92df7e86f102356fa8dc4d1f29c47f3398dac423 | Python | andreu-gonzalez/socio | /sociocontroller.py | UTF-8 | 1,905 | 2.984375 | 3 | [] | no_license | from socio import socio
class sociocontroller:
def __init__(self):
self.listasocios={}
self.productos={'naranja':5,'platano':10,'manzana':3}
def addsocio(self,socio):
if socio.getIdsocio() not in self.listasocios:
if socio.getDni() not in self.listasocios:
self.listasocios[socio.getIdsocio()] = socio
return True
return False
def borrarsocio(self,id_socio):
if id in self.listasocios:
del self.listasocios[id_socio]
return True
return False
def listar(self):
return self.listasocios
def registrarsocio(self,id_socio,nombre,kilos):
if id_socio in self.listasocios:
if nombre in self.productos:
self.listasocios[id_socio].setRegistro(nombre,kilos)
return True
return False
return False
def lisrarproductos(self):
for key in self.productos:
print (key, ":", self.productos[key])
def actualizasaldo(self,id_socio):
saldo=0.0
if id_socio in self.listasocios:
for clave,valor in self.listasocios[id_socio].getregistro().items():
saldo+= self.productos[clave] * float(valor)
self.listasocios[id_socio].actualizaSaldo(saldo)
self.listasocios[id_socio].delRegistros()
return True
return False
def fichaSocio(self,id_socio):
socio=""
if id_socio in self.listasocios:
for clave,valor in self.listasocios.items():
socio = ("Id_socio: "+valor.getIdsocio()+"tDni: "+valor.getDni()+"Nombre: "+valor.getNombre()+"Apellidos: "+valor.getApellido()+"fecha: "+str(valor.getFecha())+"Saldo: "+str( "{:10.2f}".format(valor.getSaldo()))+"Registros Pendientes: "+str(valor.getregistro()))
return socio | true |
85235c00e1aedc41ff781a44dc38b26cb82476c8 | Python | Jozkings/wag | /degrees.py | UTF-8 | 2,124 | 3.578125 | 4 | [] | no_license | class Degrees(object):
"""object for saving degree informatians about graph"""
def __init__(self, graph):
self.max_degree_node = None
self.max_degree_value = float("-inf")
self.min_degree_node = None
self.min_degree_value = float("inf")
self.avg_degree = 0
self.degrees = {}
self.graph_ref = graph
self.compute_degrees()
def get_degree(self, node):
"""returns degree of node"""
""":param node: node of the graph"""
return self.degrees[node]
def get_degrees(self):
"""returns all nodes degrees as dictionary"""
return self.degrees
def get_min_degree(self):
"""returns minimal degree as pair: node, node degree value"""
return self.min_degree_node, self.min_degree_value
def get_max_degree(self):
"""returns maximum degree as pair: node, node degree value"""
return self.max_degree_node, self.max_degree_value
def get_avg_degree_value(self):
"""returns average degree value of nodes"""
return self.avg_degree
def get_stats_degrees(self):
"""returns maximum and minimal degree as pair: node, node degree value and average degree value"""
return (self.max_degree_node, self.max_degree_value), \
(self.min_degree_node, self.min_degree_value), self.avg_degree
def compute_degrees(self):
"""computes degrees of nodes in graph and saves them (together with minimal, maximum and avarage degree)"""
degree_sum = 0
for key in self.graph_ref:
degree = len(self.graph_ref[key])
self.degrees[key] = degree
if degree < self.min_degree_value:
self.min_degree_value = degree
self.min_degree_node = key
if degree > self.max_degree_value:
self.max_degree_value = degree
self.max_degree_node = key
degree_sum += degree
self.avg_degree = round(degree_sum / len(self.degrees), 5) if len(self.degrees) > 0 else 0 | true |
64f42544224f6681c30780e7e21b6b0d56d3d7c2 | Python | davidliii/Automatic-INO-Deploy | /arduino_command_line.py | UTF-8 | 2,200 | 2.875 | 3 | [] | no_license | #===================================================================================================
# Developped by: David Li
# Email: davidli2881@gmail.com
# Date: 1/14/2020
#
# This is a lightweight arduino command line interface, created with the purpose of
# selecting LED strip patterns more easily (as each pattern uses a different .ino file)
# Goal of this project is to eventually develop a GUI that allows a user to select a
# folder containing all of the LED patterns they wish to cycle through, adjust upload
# parameters, and run any sketch without needing to open the IDE. A naive saving method
# will be implemented by creating a save.txt file containing information about previous
# upload preferences and previously loaded folders. The save.txt will be compiled using json
#objects and updated everytime a new folder is uploaded/deleted or preferences change.
# Important setup:
# Arduino program folder must be set to environment PATH
# Com port number needs to be predetermined (automatic detection in the future)
#
#===================================================================================================
import subprocess
import os
def open_ide(): #opens the arduino IDE
os.system("arduino")
def open_ino(*filepaths): #open input files in arduino IDE
for file in filepaths:
command_name = "arduino " + file
os.system(command_name)
def upload_ino(board, chip, port, filepath): #compiles and uploads sketch according to user parameters
board_command = parse_board_flag(board, chip)
port_command = parse_port_flag(port)
upload_command = parse_upload_flag(filepath)
command_name = "arduino" + board_command + port_command + upload_command
os.system(command_name);
def parse_board_flag(board, chip): #generates board selection command
command = " --board arduino:avr:" + board + ":cpu=" + "chip"
return command
def parse_port_flag(port): #generates port selection command
command = " --port " + port
return command
def parse_upload_flag(filepath): #generates upload selection command
commmand = " --upload " + filepath
return command
if __name__ == "__main__":
os.system("arduino --version")
| true |
fd1622a3b7788afd4d3675257b60519f7d66842d | Python | Scarygami/aoc2019 | /19/19.py | UTF-8 | 1,686 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive | import os
import sys
currentdir = os.path.dirname(os.path.abspath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
try:
from lib.intcode import IntcodeVM
except ImportError:
print("Intcode library could not be found")
exit(1)
def check_square(machine, startx, starty, size=100):
if not check_row(machine, startx, starty, size):
return False
if not check_row(machine, startx, starty + size - 1, size):
return False
return True
def check_row(machine, startx, y, size=100):
if not check_field(machine, startx, y):
return False
if not check_field(machine, startx + size - 1, y):
return False
return True
def check_field(machine, x, y):
if machine.run([x, y]) == [1]:
return True
return False
def find_square(machine, size=100):
y = -1
minx = 0
while True:
y = y + 1
startx = None
for x in range(minx, minx + 5):
if check_field(machine, x, y):
startx = x
minx = x
break
if startx is None:
# No tractor beam in this row
continue
x = startx
while check_row(machine, x, y, size):
if check_square(machine, x, y, size):
return (x, y)
x = x + 1
machine = IntcodeVM(IntcodeVM.read_intcode(os.path.join(currentdir, "input.txt")), silent=True)
affected = []
for y in range(50):
for x in range(50):
if check_field(machine, x, y):
affected.append((x, y))
print("Part 1: %s" % len(affected))
x, y = find_square(machine, 100)
print("Part 2: %s" % (x * 10000 + y))
| true |
25fd0539a07bcf835d5a104adba05848f89e20fb | Python | littlesnell/learngit | /python/ceshi_little.py | UTF-8 | 2,182 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python
#encoding=utf-8
# 导入MySQL驱动
import MySQLdb
#进行连接数据库
conn = MySQLdb.connect(host="localhost",user="root",passwd="111111",db="littledog",port=3306,charset="utf8")
cursor = conn.cursor()
# 创建用户表
#cursor.execute('create table ceshi (id varchar(20) primary key, name varchar(20),age int,class varchar(50))')
# 菜单选择
a = 1#防止while无法正常运行
while a!=5:
print "1-查询数据"
print "2-修改数据"
print "3-删除数据"
print "4-增添数据"
print "5-退出"
a = input("请输入你的选择:")
if a==1:#有问题
# 运行查询:
print "请选择查询的方式"
print "1-按ID查询"
print "2-按姓名查询"
print "3-按年龄查询"
print "4-按班级查询"
b =input( "请输入你的选择")
if b==1:
id = raw_input('请输入你要查询的ID:')
cursor.execute('select * from ceshi where id = %s',id)
values = cursor.fetchall()
print values
if b==2:
name = raw_input('请输入你要查询的姓名')
cursor.execute('select * from ceshi where name = %s',name)
if b==3:
age = raw_input('请输入你要查询的年龄')
cursor.execute('select * from ceshi where age = %s',age)
if b==4:
classes = raw_input('请输入你要查询的班级')
cursor.execute('select * from ceshi where class = %s',classes)
cursor = conn.cursor()
cursor.execute('select * from ceshi where id = %s','1' )
values = cursor.fetchall()
#if a==2:
if a==3:
# 删除数据
id = raw_input('请输入你要删除数据的ID:')
cursor.execute('delete * from ceshi where id = %s',id)
if a==4:
# 插入数据
id = raw_input('id:')
name = raw_input('name:')
age = raw_input('age:')
classes = raw_input('class:')
cursor.execute('insert into ceshi (id,name,age,class) values (%s,%s,%s,%s)',[id,name,age,classes])
#提交事物
conn.commit()
if a==5:
exit
| true |
4e2204630faf0c48b5157bfce5672d0ce3498a82 | Python | roni-kemp/python_programming_curricula | /CS1/0200_turtles/project_turtle_funny_face/student_code/funny_face_BCS.py | UTF-8 | 2,041 | 3.734375 | 4 | [
"MIT"
] | permissive | #Brendan Clark-Slakey
#9/27/2017
# Funny Face Project
import turtle, time
#Create and name a variable for the turtle
tommy = turtle.Turtle()
tommy.shape("turtle")
#All our circles will be this size
size = 30
#Draw the right eye
tommy.penup()
tommy.color("red")
tommy.fillcolor("red")
tommy.goto(200,70)
tommy.begin_fill()
tommy.pendown()
tommy.circle(size)
tommy.penup()
tommy.end_fill()
#draw the right eyebrow
tommy.penup()
tommy.color("black")
tommy.goto(175,125)
tommy.setheading(-45)
tommy.pensize(10)
tommy.pendown()
tommy.left(75)
tommy.fd(75)
#Draw the left eye
tommy.penup()
tommy.color("red")
tommy.fillcolor("red")
tommy.goto(0,70)
tommy.pensize(1)
tommy.begin_fill()
tommy.pendown()
tommy.circle(size)
tommy.penup()
tommy.end_fill()
#draw the left eyebrow
tommy.penup()
tommy.color("black")
tommy.goto(0,125)
tommy.setheading(-45)
tommy.pensize(10)
tommy.pendown()
tommy.right(155)
tommy.fd(75)
#Draw the smile
tommy.penup()
tommy.color("black")
tommy.goto(50,0)
tommy.setheading(-45)
tommy.pensize(10)
tommy.pendown()
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
tommy.fd(5)
tommy.left(5)
#draw one of the horns
tommy.penup()
tommy.color("red")
tommy.goto(175,200)
tommy.setheading(80)
tommy.pensize(10)
tommy.pendown()
tommy.fd(50)
tommy.right(130)
tommy.fd(50)
#draw the other horn
tommy.penup()
tommy.color("red")
tommy.goto(-50,200)
tommy.setheading(60)
tommy.pensize(10)
tommy.pendown()
tommy.fd(50)
tommy.right(130)
tommy.fd(50)
#Makes turtle not visible in final product
tommy.shape("circle")
tommy.shapesize(.01)
time.sleep(4) #Wait some number of seconds to see the result? | true |
e5e5105913dea8c99b217f5759f92190e1c8c7d3 | Python | peterk87/blast2xl | /blast2xl/util.py | UTF-8 | 233 | 2.875 | 3 | [
"MIT"
] | permissive | from typing import Mapping, Dict, Any, Set
def invert_dict(d: Mapping) -> Dict[Any, Set]:
out = {}
for k, v in d.items():
if v in out:
out[v].add(k)
else:
out[v] = {k}
return out
| true |
7be5aa5b585cc292f8af5679a5fdbd87f8f6c562 | Python | arita37/d-script | /recurnets/basic_recurrent.py | UTF-8 | 5,622 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive |
# coding: utf-8
# # Basic Recurrent Neural Network
#
# Testing out original code for a simple LSTM to understand the sequential writing of an author from left to right. (To do: bi-directional recurrent LSTMs.)
#
# Details:
# We require two additional layers that I've written to make the dimensions of the input to other layers consistent.
# ### Imports
# In[1]:
import pickle
import numpy
import keras
import time
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils.np_utils import to_categorical
from keras.layers.normalization import BatchNormalization as BN
from keras.layers.core import Layer
from keras.layers.recurrent import LSTM
import theano.tensor as T
import h5py
import random
import numpy as np
from collections import defaultdict
from minibatcher import MiniBatcher
import matplotlib.pylab as plt
# get_ipython().magic(u'matplotlib inline')
# ### New Keras layers for use in the recurrent network
# In[2]:
class Squeeze(Layer):
'''
Get rid of any dimensions of size 1.
First dimension is assumed to be nb_samples.
'''
def __init__(self, **kwargs):
super(Squeeze, self).__init__(**kwargs)
@property
def output_shape(self):
input_shape = self.input_shape
data_shape = tuple( np.array(input_shape)[ np.array(input_shape) > 1 ] )
return (input_shape[0],)+ data_shape
def get_output(self, train=False):
X = self.get_input(train)
# size = T.prod(X.shape) // X.shape[0]
# nshape = (X.shape[0], size)
# return T.reshape(X, output_shape)
return X.squeeze()
class Transpose3(Layer):
'''
Get rid of any dimensions of size 1.
First dimension is assumed to be nb_samples.
'''
def __init__(self, transpose_order, **kwargs):
self.transpose_order = transpose_order
super(Transpose3, self).__init__(**kwargs)
@property
def output_shape(self):
input_shape = self.input_shape
data_shape = ()
for j in self.transpose_order:
data_shape+=(input_shape[j],)
return data_shape
def get_output(self, train=False):
X = self.get_input(train)
# size = T.prod(X.shape) // X.shape[0]
# nshape = (X.shape[0], size)
# return T.reshape(X, output_shape)
return X.transpose(self.transpose_order)
# ### Data (40 authors, 15 forms per author)
# In[3]:
num_authors=40
num_forms_per_author=15
hdf5_file = '/work/data/output_shingles_sparse.hdf5'
fIn = h5py.File(hdf5_file, 'r')
authors = []
# Filter on number of forms per author
for author in fIn.keys():
if len(fIn[author]) > num_forms_per_author:
authors.append(author)
if len(authors) < num_authors:
raise ValueError("There are only %d authors with more than %d forms"%(len(authors), num_forms_per_author))
keys = []
# Get all the keys from our hdf5 file
for author in authors[:num_authors]: # Limit us to num_authors
forms = list(fIn[author])
for form in forms[:num_forms_per_author]: # Limit us to num_form_per_author
for line_name in fIn[author][form].keys():
for shingle in range(fIn[author][form][line_name].shape[0]):
keys.append([(author,form,line_name), shingle])
# Normalization function which scales values from 0 (white) to 1 (black)
normalize = lambda x: 1.0 - x.astype(np.float32)/255.0
m = MiniBatcher(fIn, keys,normalize=normalize, batch_size=32, min_shingles=20*7*num_forms_per_author)
m.batch_size = 32*20
m.set_mode(MiniBatcher.TEST)
[X_test, Y_test] = m.get_batch()
X_test = np.expand_dims(X_test, 1)
Y_test = to_categorical(Y_test, num_authors)
print 'test_size:', X_test.shape, Y_test.shape
m.batch_size = 32*100
m.set_mode(MiniBatcher.TRAIN)
# ### Define the neural network
#
# #### Current architecture
# 1. Convolution2D (48, 12, 12) + Relu + MaxPool (2,2)
# 2. Convolution2D (48, 6, 6 ) + Relu + MaxPool (2,2)
# 3. Convolution2D->1D (48, 6, 35) + Relu
# In[4]:
model = Sequential()
model.add(Convolution2D(48, 12, 12,
border_mode='full',
input_shape=(1, 120, 120),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Convolution2D(48, 6, 6,
border_mode='full',
activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
# model.add(MaxPooling2D(pool_size=(70,2)))
model.add(Convolution2D(48, 6, 35, activation='relu'))
model.add(Squeeze())
model.add(Transpose3((0,2,1)))
model.add(LSTM(output_dim=48, activation='sigmoid', inner_activation='hard_sigmoid'))
model.add(Dense(40, activation='softmax'))
sgd = SGD(lr=0.015, decay=1e-6, momentum=0.5, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
print "Finished compilation with optimization set to SGD"
model.load_weights('basic_recurrent.hd5')
m.batch_size = 32*100
m.set_mode(MiniBatcher.TRAIN)
for i in range(500):
print 'Starting Epoch: ', i
start_time = time.time()
(X_train, Y_train) = m.get_batch()
X_train = np.expand_dims(X_train, 1)
Y_train = to_categorical(Y_train, num_authors)
print X_train.shape, Y_train.shape
model.fit(X_train, Y_train, batch_size=32, nb_epoch=1, show_accuracy=True, verbose=1, validation_data=(X_test, Y_test))
print 'Elapsed Time: ', time.time() - start_time
if numpy.mod(i,50)==0:
print "Checkpoint at"+str(i)
model.save_weights('basic_recurrent'+str(i)+'.hd5')
| true |
e715d9b161daafa82f24e07e3cb1ff9996609e03 | Python | TeamKun/MineTexTool | /main.py | UTF-8 | 1,922 | 2.703125 | 3 | [] | no_license | import glob
import os
import shutil as ut
import numpy as np
from PIL import Image
def md(path):
if not os.path.isdir(path):
os.makedirs(path)
print("makedir: " + path)
size = float(input())
for name in glob.glob('assets/**/*.png', recursive=True):
if size < 1.0:
img = Image.open(name)
height = int(img.height*size)
width = int(img.width*size)
if height < 1:
height = 1
if width < 1:
width = 1
resize = img.resize((width, height), Image.LANCZOS)
# resize = img.resize((width, height), Image.NEAREST)
out = "out\\" + str(size) + "\\" + name
pathlist = out.split('\\')
del pathlist[-1]
mdpath = ""
for st in pathlist:
mdpath = mdpath + st + "\\"
md(mdpath)
resize.save(out)
print("resize Pillow->" + str(size) + "x" + str(size) + " : " + name)
else:
image = np.asarray(Image.open(name).convert("RGB"), dtype=np.uint8)
zoomed_image = image.repeat(size, axis=0).repeat(size, axis=1)
resize = Image.fromarray(zoomed_image)
out = "out\\" + str(size) + "\\" + name
pathlist = out.split('\\')
del pathlist[-1]
mdpath = ""
for st in pathlist:
mdpath = mdpath + st + "\\"
md(mdpath)
resize.save(out)
print("resize Numpy->" + str(size) + "x" + str(size) + " : " + name)
# mcmeta
for name in glob.glob('assets/**/*.mcmeta', recursive=True):
out = "out\\" + str(size) + "\\" + name
pathlist = out.split('\\')
del pathlist[-1]
mdpath = ""
for st in pathlist:
mdpath = mdpath + st + "\\"
md(mdpath)
ut.copyfile(name, out)
print("mcmeta copy->" + str(out))
| true |
3da103b85a9f322bea8709b48a3ff00551eff581 | Python | leejz/misc-scripts | /randomize_fasta.py | UTF-8 | 2,786 | 3.265625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""
--------------------------------------------------------------------------------
Created: Jackson Lee 12/4/12
This script reads in a fasta file and randomizes the lines
Input fasta file format:
4098968.combined_unique.fa
>Sequence0000000001
GCGCCCCTACGGGGAACGTTTTACTTCCAGTTTTAAAGCAGCTTTTACCCATCCAAACTCTGCGGTAACTTTATCATAAATTGTGGTAATATCTTCTGAT
Output file format:
same
--------------------------------------------------------------------------------
usage: randomize_fasta.py -i input.fa -o output_fasta
"""
#-------------------------------------------------------------------------------
#Header - Linkers, Libs, Constants
from string import strip
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from random import shuffle
#-------------------------------------------------------------------------------
#function declarations
#-------------------------------------------------------------------------------
#Body
print "Running..."
if __name__ == '__main__':
parser = ArgumentParser(usage = "randomize_fasta.py -i input.fa -o output.fa",
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-i", "--input_fasta", action="store",
dest="inputfilename",
help="fasta file of input sequences")
parser.add_argument("-o", "--output_fasta",
action="store", dest="outputfilename",
help="output fasta file name")
options = parser.parse_args()
mandatories = ["inputfilename", "outputfilename"]
for m in mandatories:
if not options.__dict__[m]:
print "\nError: Missing Arguments\n"
parser.print_help()
exit(-1)
fastafilename = options.inputfilename
outputfilename = options.outputfilename
print "Reading Fasta file..."
with open(fastafilename,'U') as fastainfile:
fasta_lines = [line.strip() for line in fastainfile]
large_fasta = []
header = ''
fasta = ''
for iter, line in enumerate(fasta_lines):
if line[0] == '>':
if iter > 0:
large_fasta.append([header,fasta])
header = line
fasta = ''
else:
fasta = fasta + line
if iter == len(fasta_lines)-1:
large_fasta.append([header,fasta])
print 'last line!'
print "Writing randomized Fasta file: " + outputfilename
shuffle(large_fasta)
with open(outputfilename, 'w') as outfile:
for fastaline in large_fasta:
outfile.write(fastaline[0]+'\n')
outfile.write(fastaline[1]+'\n')
print "Done!"
| true |
85ce3bb57a6072a065f82a2f089d28040170cdb3 | Python | phlax/aio.signals | /aio/signals/tests/test_signals.py | UTF-8 | 3,398 | 2.703125 | 3 | [] | no_license | import unittest
import asyncio
import aio.testing
from aio.signals import Signals
class AioSignalsTestCase(unittest.TestCase):
def test_listen(self):
def signal_called(signal):
pass
signals = Signals()
signals.listen('test-signal', signal_called)
self.assertEqual(
signals._signals,
{"test-signal": set([signal_called])})
def test_listen_again(self):
def signal_called(signal):
pass
signals = Signals()
signals.listen('test-signal', signal_called)
signals.listen('test-signal', signal_called)
self.assertEqual(
signals._signals,
{"test-signal": set([signal_called])})
def test_unlisten(self):
def signal_called(signal):
pass
def signal_called2(signal):
pass
signals = Signals()
signals.listen('test-signal', signal_called)
signals.listen('test-signal', signal_called2)
signals.unlisten('test-signal', signal_called)
self.assertEqual(
signals._signals,
{"test-signal": set([signal_called2])})
def test_unlisten_again(self):
"""
calling signals.unlisten twice does nothing
"""
def signal_called():
pass
def signal_called2():
pass
signals = Signals()
signals.listen('test-signal', signal_called)
signals.listen('test-signal', signal_called2)
signals.unlisten('test-signal', signal_called)
signals.unlisten('test-signal', signal_called)
self.assertEqual(
signals._signals,
{"test-signal": set([signal_called2])})
def test_unlisten_missing_signal(self):
"""
if signals.unlisten is called with non-existent signal
silently ignore
"""
def signal_called():
pass
signals = Signals()
signals.listen('test-signal', signal_called)
signals.unlisten('FOO-SIGNAL', signal_called)
self.assertEqual(
signals._signals,
{"test-signal": set([signal_called])})
def test_unlisten_missing_func(self):
"""
if signals.unlisten is called with non-existent callback func
silently ignore
"""
def signal_called():
pass
def signal_called2():
pass
signals = Signals()
signals.listen('test-signal', signal_called)
signals.unlisten('test-signal', signal_called2)
self.assertEqual(
signals._signals,
{"test-signal": set([signal_called])})
@aio.testing.run_until_complete
def test_emit(self):
"""
"""
class Checker:
signal = None
args = None
checker = Checker()
@asyncio.coroutine
def signal_called(signal):
yield from asyncio.sleep(2)
checker.signal = signal.name
checker.args = signal.data
return "done"
signals = Signals()
signals.listen('test-signal', signal_called)
result = yield from signals.emit('test-signal', "EXPECTED RESULT")
self.assertEqual(result, ["done"])
self.assertEqual(checker.signal, "test-signal")
self.assertEqual(checker.args, "EXPECTED RESULT")
| true |
27955c81c418f80de63fcff5d29cc09590996d50 | Python | clockworksspheres/ramdisk | /src/ramdisk/lib/libHelperExceptions.py | UTF-8 | 1,515 | 2.5625 | 3 | [] | no_license | """
Class for ramdisk management specific creations
Should be OS agnostic
@author: Roy Nielsen
"""
class UnsupportedOSError(Exception):
"""
Meant for being thrown when an action/class being run/instanciated is not
applicable for the running operating system.
@author: Roy Nielsen
"""
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class NotValidForThisOS(Exception):
"""
Meant for being thrown when an action/class being run/instanciated is not
applicable for the running operating system.
@author: Roy Nielsen
"""
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class SystemToolNotAvailable(Exception):
"""
Meant for being thrown when a system command is not available for
use by the library.
@author: Roy Nielsen
"""
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class NotEnoughMemoryError(Exception):
"""
Thrown when there is not enough memory for this operation.
@author: Roy Nielsen
"""
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class NotACyLoggerError(Exception):
"""
Custom Exception
"""
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class UserMustBeRootError(Exception):
"""
Custom Exception
"""
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
| true |
a891507491a0dd3ffb583adb51caf021734d79dc | Python | israelferrazaraujo/dcsp | /quantum classifier/cin/pennylane/templates/dc_hqc.py | UTF-8 | 1,175 | 2.59375 | 3 | [] | no_license | import pennylane as qml
from pennylane import numpy as np
from cin.pennylane.qml.hierarchical_classifier import circuit as circuit_hierarchical_classifier
from cin.pennylane.encoding.divide_and_conquer import Encoding
def config(X):
n = int(np.ceil(np.log2(len(X[0])))) # pylint: disable=no-member
n = 2**int(np.ceil(np.log2(n))) # n tem que ser potência de 2. # pylint: disable=no-member
N = 2**n-1 # len(X[0])-1 # N precisa ser tal que n seja potência de 2 mais próxima de log_2(X[0]). O hierarchical exige que n seja dessa maneira.
w = 2*n - 1 # número de parâmetros do circuito (weights)
X = np.c_[X, np.zeros((len(X), 2**n-len(X[0])))] # o número de qubits necessários para codificar os dados (log_2(N)) precisa ser uma potencia de 2. # pylint: disable=no-member
return n, N, w, X
def circuit(weights, state_vector=None, n=4):
encode = Encoding(state_vector, 'dc_amplitude_encoding', entangle=True)
q = encode.output_qubits # o comprimento de q deve ser igual a n.
return circuit_hierarchical_classifier(n , q, weights) | true |
bd9ea365241d35234622b0938ed9229256051be8 | Python | gdamjan/Scalable | /Echo/client-old.py | UTF-8 | 918 | 3.015625 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python
import asyncore, socket
class Client(asyncore.dispatcher_with_send):
def __init__(self, host, port, message, n):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((host, port))
self.out_buffer = message
self.N = n
def handle_close(self):
self.N = self.N - 1
self.close()
def handle_read(self):
self.N = self.N - 1
buf = self.recv(1024)
print 'Received: %s' % buf
if self.N == 0:
raise "STOP"
# self.close() # so I'm not closing it
def main(n):
host, port = 'localhost', 9901
print "Opening %d connections to %s:%d" % (n, host, port)
for i in xrange(n):
Client(host, port, 'Hello, world!', n)
asyncore.loop()
import sys
try:
main(int(sys.argv[1]))
except KeyboardInterrupt:
sys.exit(0)
| true |
4dea064d0ba3a8536ada5cbcedcc567be4c0fe8c | Python | RomanYatsuniak/physics-projects | /4/Project.py | UTF-8 | 4,615 | 2.765625 | 3 | [] | no_license | import numpy as np
import io
from sympy import *
import sympy
import matplotlib.pyplot as plt
import math
s = io.BytesIO(open('input.txt', 'rb').read().replace(b',', b';').replace(b')', b' ').replace(b'(', b' ').replace(b'[',b' ').replace(b']', b' '))
data1 = np.genfromtxt(s, dtype=(float, float,float, float, float, float, float, float, float, float, float, float, float, float, float, float),delimiter=";")
Array=[]
g=9.81
G=6.7*(10**(-11))
x0 = 10
y0 = 10
V0x = 0
V0y = 0
m1 = 100
x1 = 20
y1 = 10
V1x = 0
V1y = 0
m2 = 300
x3 = 15
y3 = 0
V2x = 0
V2y = 0
m3 = 150
T=80
res_x=[]
res_y=[]
res_x2=[]
res_y2=[]
res_x3=[]
res_y3=[]
i=0.001
V00=sqrt((V0x**2)+(V0y**2))
V01=sqrt((V1x**2)+(V1y**2))
V02=sqrt((V2x**2)+(V2y**2))
a0=V00/T
a1=V01/T
a2=V02/T
x1c=0
y1c=0
x2c=0
y2c=0
Bit1=False
Bit2=False
Bit3=False
while i<=T:
if(Bit1==False):
x_r_1=x0+V0x*i+(a0*(i**2))/2
y_r_1=y0+V0y*i+(a0*(i**2))/2
res_x.append(x_r_1)
res_y.append(y_r_1)
if(Bit2==False):
x_r_2=x1+V1x*i+(a1*(i**2))/2
y_r_2=y1+V1y*i+(a1*(i**2))/2
res_x2.append(x_r_2)
res_y2.append(y_r_2)
if(Bit3==False):
x_r_3=x3+V2x*i+(a2*(i**2))/2
y_r_3=y3+V2y*i+(a2*(i**2))/2
res_x3.append(x_r_3)
res_y3.append(y_r_3)
if(x_r_1>=x_r_2 and y_r_1>=y_r_2 or x_r_1>=x_r_2 and y_r_1<=y_r_2 or x_r_1<=x_r_2 and y_r_1>=y_r_2 or x_r_1<=x_r_2 and y_r_1<=y_r_2 ):
Beta=(x_r_1+x_r_2)/(y_r_1+y_r_2)
Beta=np.arctan(int(Beta))
V00=V00*np.cos(Beta)
Bit2==True
x1c=x_r_1
y1c=y_r_1
X1=True
if(x_r_1>=x_r_3 and y_r_1>=y_r_3 or x_r_1>=x_r_3 and y_r_1<=y_r_3 or x_r_1<=x_r_3 and y_r_1>=y_r_3 or x_r_1<=x_r_3 and y_r_1<=y_r_3 ):
Beta=(x_r_1+x_r_3)/(y_r_1+y_r_3)
Beta=np.arctan(int(Beta))
V00=V00*np.cos(Beta)
Bit3==True
x1c=x_r_1
y1c=y_r_1
X1=True
if(x_r_2>=x_r_3 and y_r_2>=y_r_3 or x_r_2>=x_r_3 and y_r_2<=y_r_3 or x_r_2<=x_r_3 and y_r_2>=y_r_3 or x_r_2<=x_r_3 and y_r_2<=y_r_3 ):
Beta=(x_r_2+x_r_3)/(y_r_2+y_r_3)
Beta=np.arctan(int(Beta))
V01=V01*np.cos(Beta)
Bit3==True
x1c=x_r_2
y1c=y_r_2
X2=True
if(x_r_2>=x_r_1 and y_r_2>=y_r_1 or x_r_2>=x_r_1 and y_r_2<=y_r_1 or x_r_2<=x_r_1 and y_r_2>=y_r_1 or x_r_2<=x_r_1 and y_r_2<=y_r_1 ):
Beta=(x_r_2+x_r_1)/(y_r_2+y_r_1)
Beta=np.arctan(int(Beta))
V01=V01*np.cos(Beta)
Bit1==True
x1c=x_r_2
y1c=y_r_2
X2=True
if(x_r_3>=x_r_1 and y_r_3>=y_r_1 or x_r_3>=x_r_1 and y_r_3<=y_r_1 or x_r_3<=x_r_1 and y_r_3>=y_r_1 or x_r_3<=x_r_1 and y_r_3<=y_r_1 ):
Beta=(x_r_3+x_r_1)/(y_r_3+y_r_1)
Beta=np.arctan(int(Beta))
V02=V02*np.cos(Beta)
Bit3==True
x1c=x_r_3
y1c=y_r_3
X3=True
if(x_r_3>=x_r_2 and y_r_3>=y_r_2 or x_r_3>=x_r_2 and y_r_3<=y_r_2 or x_r_3<=x_r_2 and y_r_3>=y_r_2 or x_r_3<=x_r_2 and y_r_3<=y_r_2 ):
Beta=(x_r_3+x_r_2)/(y_r_3+y_r_2)
Beta=np.arctan(int(Beta))
V02=V02*np.cos(Beta)
Bit2==True
x1c=x_r_3
y1c=y_r_3
X3=True
if(Bit2==True and Bit1==True or Bit2==True and Bit3==True or Bit3==True and Bit1==True or Bit3==True and Bit2==True or Bit1==True and Bit2==True or Bit1==True and Bit3==True):
if(X3==True):
x2c=x_r_3
y2c=y_r_3
if(X2==True):
x2c=x_r_2
y2c=y_r_2
if(X1==True):
x2c=x_r_1
y2c=y_r_1
T_k = i
break
i+=0.005
plt.figure(figsize=(15,15))
plt.scatter(res_x,res_y, 5, color='red')
plt.scatter(res_x2,res_y2, 5, color='orange')
plt.scatter(res_x3,res_y3, 5, color='blue')
plt.savefig('1.png')
array12 = np.array(['(' +str(round(x_r_1,2))+'('+str(round(T,2))+')'+', '+str(round(y_r_1,2))+'('+str(round(T,2))+')'+'); '+'('+str(round(V0x,2))+'('+str(round(T,2))+'), '+str(round(V0y,2))+'('+str(round(T,2))+')); '+'(' +str(round(x_r_2,2))+'('+str(round(T,2))+')'+','+str(round(y_r_2,2))+'('+str(round(T,2))+')); '+'('+str(round(V1x,2))+'('+str(round(T,2))+'), '+str(round(V1y,2))+'('+str(round(T,2))+')); '+'(' +str(round(x_r_3,2))+'('+str(round(T,2))+')'+', '+str(round(y_r_3,2))+'('+str(round(T,2))+')); '+'('+str(round(V2x,2))+'('+str(round(T,2))+'), '+str(round(V2y,2))+'('+str(round(T,2))+')); '+'('+str(round(x1c,2))+', '+str(round(y1c,2))+'); ('+str(round(x2c,2))+', '+str(round(y2c,2))+');'])
print(array12)
np.savetxt('output.txt', array12, delimiter='', fmt="%s")#
| true |
8981e87b88ac1ef036c1d88e80c017870be53590 | Python | unblest/python | /ex15.py | UTF-8 | 635 | 4 | 4 | [] | no_license | # exercise 15: reading files
# import the argument variable per "normal"
from sys import argv
# define the input for the file name we're going to read
script, filename = argv
# define and new variable with the open verb which is new
txt = open(filename)
# print the contents of the file
print "Here's your file %r:" % filename
print txt.read()
# now asks for the file name again and assigns to a new variable
print "Type the file name again:"
file_again = raw_input("> ")
# define another variable to open the filename given
txt_again = open(file_again)
# print the results
print txt_again.read()
txt.close()
txt_again.close()
| true |
0bcf8020288a8b32849d3debd40e22da7b705781 | Python | CheKey30/leetcode | /0114/114.py | UTF-8 | 943 | 4.15625 | 4 | [] | no_license | ```
Given a binary tree, flatten it to a linked list in-place.
For example, given the following tree:
1
/ \
2 5
/ \ \
3 4 6
The flattened tree should look like:
1
\
2
\
3
\
4
\
5
\
6
```
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def flatten(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
def preorder(root,res):
if root:
res.append(root)
preorder(root.left,res)
preorder(root.right,res)
tmp = []
if not root:
return
preorder(root,tmp)
n = len(tmp)
for i in range(n-1):
tmp[i].left = None
tmp[i].right = tmp[i+1] | true |
c8e606c100e756dcdad5b315f4ee3a6a7a95db80 | Python | rsk2327/Blog | /MisclassCost/ClassExtractor.py | UTF-8 | 1,677 | 2.78125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 9 20:14:18 2016
@author: rsk
"""
import cPickle
import gzip
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
from collections import Counter
from LogLayer import *
dataset = "/home/rsk/Documents/MNIST/Misclassification/mnist.pkl.gz"
f = gzip.open(dataset, 'rb')
[(trainx,trainy),(validx,validy),(testx,testy)] = cPickle.load(f)
f.close()
#%%
#train_set, valid_set, test_set
def imbalance(data,label,digits={3:0.5,7:0.7}):
"""
Creates an imabalance in the data vector by selectively downsampling
classes as given by the digits dictionary.
The digits dictionary specifies by what ratio any particular class should be
downsampled
Returns the downsampled datasets and its corresponding labels
"""
classIndices=[]
for i in range(10):
classIndices.append([])
for i in range(len(data)):
classIndices[label[i]].append(i)
finalIndices=[]
for i in range(10):
if i in digits.keys():
n = int(len(classIndices[i])*digits[i])
finalIndices= finalIndices + classIndices[i][0:n]
else:
finalIndices = finalIndices + classIndices[i]
return [data[finalIndices] , label[finalIndices]]
#%%
if __name__=="__main__":
a = imbalance(trainx,trainy)
# Checking if imbalance has been created or not
balanced = Counter(trainy)
imbalanced = Counter(a[1])
print "Balanced :"
print balanced
print "Imbalanced :"
print imbalanced
| true |
687b18a9a0dd8bcb0c7b785313582760642c210d | Python | Aasthaengg/IBMdataset | /Python_codes/p02848/s105798212.py | UTF-8 | 138 | 3.203125 | 3 | [] | no_license | n=int(input())
s=list(input())
# z=90=>65
for i in s:
x=ord(i)
y=x+n
if y>90:
z=chr(y-26)
else:
z=chr(y)
print(z,end='') | true |
b9b6daf6eb6d535c1a76a9f0d79b96b47c73389d | Python | darkangelcraft/SPLI-RSA | /Bruteforce attack/test.factor.py | UTF-8 | 191 | 3 | 3 | [] | no_license | n = 100123
flag = True
i = 2
if(n%2 == 0):
i=2
flag=False
i = i - 1
i = i + 1
while flag:
print("Numero sotto test %i" %i)
if (n%i == 0):
flag=False
else:
i = i + 2
print("i %d" %i) | true |
56369b5e1b657f5ba1a66167bb44eee995a38071 | Python | diogobaeder/moneypertime | /moneypertime/stores/models.py | UTF-8 | 1,502 | 2.96875 | 3 | [
"BSD-2-Clause"
] | permissive | from django.db import models
PRICE_TYPE_CHOICES = (
('C', 'Cash'),
('G', 'Gold'),
)
class Store(models.Model):
name = models.CharField(max_length=200, unique=True)
price = models.IntegerField(help_text='Price of the store itself')
price_type = models.CharField(choices=PRICE_TYPE_CHOICES, max_length=1, default=PRICE_TYPE_CHOICES[0][0])
employees = models.IntegerField(help_text='Number of employees accepted initially')
experience = models.IntegerField(help_text='Experience earned when built')
size1 = models.SmallIntegerField(help_text='Size of one of the sides of the store')
size2 = models.SmallIntegerField(help_text='Size the other side of the store')
build_time = models.IntegerField(help_text='Time spent to build this store')
should_create_on_water = models.BooleanField(help_text='Should the store be created on water?', default=False)
amount = models.IntegerField(help_text='Amount earned per interval')
interval = models.IntegerField(help_text='Interval, in seconds, in which the amount is earned')
performance = models.FloatField(help_text='Calculated performance (amount / (interval * size1 * size2))', editable=False, null=True, blank=True, default=0)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
performance = float(self.amount) / (self.interval * self.size1 * self.size2)
self.performance = round(performance, 2)
super(Store, self).save(*args, **kwargs)
| true |
0ae912b847faaeeeba975a4d01b98684014f427f | Python | Subash45/Multiplication_cipher | /multiplicative_cipher.py | UTF-8 | 1,045 | 3.953125 | 4 | [
"CC0-1.0"
] | permissive | # get word and the key from the user
word = input("\nEnter Your Password : ")
key = int(input("Enter The Key Value : "))
crypted = ""
# creating dict with alphabets
alpha = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h', 8: 'i', 9: 'j', 10: 'k', 11: 'l', 12: 'm',
13: 'n', 14: 'o', 15: 'p', 16: 'q', 17: 'r', 18: 's', 19: 't', 20: 'u', 21: 'v', 22: 'w', 23: 'x', 24: 'y', 25: 'z'}
num = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8, 'j': 9, 'k': 10, 'l': 11, 'm': 12,
'n': 13, 'o': 14, 'p': 15, 'q': 16, 'r': 17, 's': 18, 't': 19, 'u': 20, 'v': 21, 'w': 22, 'x': 23, 'y': 24, 'z': 25}
for i in word:
if 'a' <= i <= 'z' or 'A' <= i <= 'Z':
if 'a' <= i <= 'z':
temp = (num[i] * key) % 26
crypted += alpha[temp]
else:
j = i.lower()
temp_1 = (num[j] * key) % 26
temp_2 = alpha[temp_1]
crypted += temp_2.upper()
else:
crypted += i
print('\nEncrypted text is : ', crypted, "\n")
| true |
944e1047d077fffbacc92a4fd7fe70373c77c2c2 | Python | 49257620/reboot | /studysrc/example/exam089.py | UTF-8 | 992 | 3.953125 | 4 | [] | no_license | # encoding: utf-8
"""
【程序89】
题目:某个公司采用公用电话传递数据,数据是四位的整数,在传递过程中是加密的,加密规则如下:
每位数字都加上5,然后用和除以10的余数代替该数字,再将第一位和第四位交换,第二位和第三位交换。
1.程序分析:
2.程序源代码:
"""
code_li = [8, 4, 5, 6]
print('code:', code_li)
def encode(code_li):
for i in range(len(code_li)):
code_li[i] = (code_li[i] + 5) % 10
code_li[0], code_li[3] = code_li[3], code_li[0]
code_li[1], code_li[2] = code_li[2], code_li[1]
return code_li
decode_li = encode(code_li)
print('encode:', decode_li)
def decode(decode_li):
code_li[0], code_li[3] = code_li[3], code_li[0]
code_li[1], code_li[2] = code_li[2], code_li[1]
for i in range(len(decode_li)):
code_li[i] = code_li[i] - 5 if code_li[i] > 5 else code_li[i] + 5
return code_li
print('decode:', decode(decode_li))
| true |
da80964eac1ac1f0e8d227d5d2458ca338d99fde | Python | eshandinesh/gis_based_crime_mapping | /mapping/gis/yoink/feed.py | UTF-8 | 2,349 | 2.796875 | 3 | [
"MIT"
] | permissive | import ConfigParser, datetime, logging, os, time
import feedparser
from .download import download
from .util import catching
logger = logging.getLogger('yoink.feed')
class Feed(object):
'''Represents a single feed from which files are downloaded.
'''
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
def __init__(self,
name,
url,
folder,
timestamp,
config):
self.name = name
self.url = url
self.folder = folder
self.timestamp = timestamp
self.config = config
@catching(handler=lambda e: logger.error('Error processing feed: {0}'.format(e)))
def update(self):
'''Read latest feed data and schedule new files for download.
'''
logger.info('reading feed: {0}'.format(self.url))
# read the feed
parsed = feedparser.parse(self.url)
# grab the entries
entries = parsed['entries']
# filter out entries older than the feed timestamp
entries = filter(lambda e: e['updated_parsed'][:8] > self.timestamp.timetuple()[:8],
entries)
# process each entry (in timestamp order)
map(self.process_entry, entries)
@catching(exception_type=KeyError, handler=lambda e: logger.error('Entry is missing a field: {0}'.format(e)))
def process_entry(self, entry):
'''Look for all enclosures in `entry` and download anything
that meets the Feed's criteria.
'''
# Download the entry if it's timestamp is greater than the
# feed's
for enc in entry['enclosures']:
if enc['type'].startswith('audio/'):
url = enc['href']
if self.config.preview:
logger.info('[PREVIEW] Downloading {0}'.format(url))
else:
# Schedule the URL for download
download(url, self.folder)
# Update the feed's timestamp
self.timestamp = max(
self.timestamp,
datetime.datetime.strptime(
time.strftime(
Feed.TIME_FORMAT,
entry['updated_parsed']),
Feed.TIME_FORMAT)) | true |
a67df7ec61ff33f67c48659d771be02bfca87c86 | Python | CharlesLaforte/learning | /python/Minecraft/Classes/GhostCastle.py | UTF-8 | 1,048 | 2.65625 | 3 | [] | no_license | from mcpi.minecraft import Minecraft
mc = Minecraft.create("smalldell1")
import time
class NamedBuilding(object):
def __init__(self, x, y, z, width, height, depth, name):
self.x = x
self.y = y
self.z = z
self.width = width
self.height = height
self.depth = depth
self.name = name
def build(self):
mc.setBlocks(self.x, self.y, self.z,
self.x + self.width, self.y + self.height,
self.z + self.depth, 4)
mc.setBlocks(self.x + 1, self.y + 1, self.z + 1,
self.x + self.width - 1, self.y + self.height - 1,
self.z + self.depth - 1, 0)
def clear(self):
mc.setBlocks(self.x, self.y, self.z,
self.x + self.width, self.y + self.height,
self.z + self.depth, 0)
def getInfo():
pass
pos = mc.player.getTilePos()
x = pos.x
y = pos.y
z = pos.z
ghostCastle = NamedBuilding(x, y, z, 10, 16, 16, "Ghost Castle")
ghostCastle.build()
mc.postToChat(ghostCastle.getInfo())
time.sleep(30)
ghostCastle.clear() | true |
2ca91efd9c7ebf25d33aa2d948df64a020defb64 | Python | mfsuve/DeepReinforcementLearning | /blg604ehw2/dqn/model.py | UTF-8 | 12,154 | 3.109375 | 3 | [] | no_license | """
Deep Q network implementations.
Vanilla DQN and DQN with Duelling architecture,
Prioritized ReplayBuffer and Double Q learning.
"""
import torch
import numpy as np
import random
from copy import deepcopy
from collections import namedtuple
from blg604ehw2.dqn.replaybuffer import UniformBuffer
from blg604ehw2.dqn.replaybuffer import PrioirtyBuffer
from blg604ehw2.dqn.replaybuffer import Transition
from blg604ehw2.atari_wrapper import LazyFrames
from blg604ehw2.utils import process_state
from blg604ehw2.utils import normalize
class BaseDqn:
"""
Base class for DQN implementations.
Both greedy and e_greedy policies are defined.
Greedy policy is a wrapper for the _greedy_policy
method.
Arguments:
- nact: Number of the possible actions
int the action space
- buffer_capacity: Maximum capacity of the
replay buffer
"""
def __init__(self, nact, buffer_capacity):
super().__init__()
self.nact = nact
self.buffer_capacity = buffer_capacity
self._device = "cpu"
def greedy_policy(self, state):
""" Wrapper for the _greedy_policy of the
inherited class. Performs normalization if
the state is a LazyFrame(stack of gray images)
and cast the state to torch tensor with
additional dimension to make it compatible
with the neural network.
"""
### Optional, You many not use this ###
if isinstance(state, LazyFrames):
state = np.array(state, dtype="float32")
state = state.transpose(2, 0, 1)
state = normalize(state)
if isinstance(state, np.ndarray):
state = torch.from_numpy(state).float().to(self.device)
if state.shape[0] != 1:
state.unsqueeze_(0)
with torch.no_grad():
return self._greedy_policy(state)
def e_greedy_policy(self, state, epsilon):
""" Return action from greedy policy
with the 1-epsilon probability and
random action with the epsilon probability.
"""
if random.uniform(0, 1) < epsilon:
return random.randint(0, self.nact - 1)
else:
return self.greedy_policy(state)
def push_transition(self, transition):
""" Push transition to the replay buffer """
raise NotImplementedError
def update(self, batch_size):
""" Update the model """
raise NotImplementedError
def _greedy_policy(self, state):
""" Return greedy action for the state """
raise NotImplementedError
@property
def buffer_size(self):
""" Return buffer size """
return self.buffer.size
@property
def device(self):
""" Return device name """
return self._device
@device.setter
def device(self, value):
""" Set device name and the model's
device.
"""
super().to(value)
self._device = value
class DQN(BaseDqn, torch.nn.Module):
""" Vanilla DQN with target network and uniform
replay buffer. Implemantation of DeepMind's Nature
paper.
Arguments:
- valuenet: Neural network to represent value
function.
- nact: Number of the possible actions
int the action space
- lr: Learning rate of the optimization
(default=0.001)
- buffer_capacity: Maximum capacity of the
replay buffer (default=10000)
- target_update_period: Number of steps for
the target network update. After each update
counter set to zero again (default=100)
"""
def __init__(self, valuenet, nact, lr=0.001, buffer_capacity=10000,
target_update_period=100):
super().__init__(nact, buffer_capacity)
self.valuenet = valuenet
self.target_net = deepcopy(valuenet)
self.target_update_period = target_update_period
self.target_update_counter = 0
self.buffer = UniformBuffer(capacity=buffer_capacity)
self.opt = torch.optim.Adam(self.valuenet.parameters(), lr=lr)
def _greedy_policy(self, state):
""" Return greedy action for the state """
### YOUR CODE HERE ###
# You may skip this and override greedy_policy directlyDQN
return self.valuenet(state).argmax().item()
### END ###
def push_transition(self, transition, *args):
""" Push transition to the replay buffer
Arguments:
- transition: Named tuple of (state,
action, reward, next_state, terminal)
"""
self.buffer.push(transition)
def update(self, batch_size, gamma):
""" Update the valuenet and targetnet(if period)
and return mean absolute td error. Process samples
sampled from the replay buffer for q learning update.
Raise assertion if thereplay buffer is not big
enough for the batchsize.
"""
assert batch_size < self.buffer.size, "Buffer is not large enough!"
### YOUR CODE HERE ###
batch = self.buffer.sample(batch_size)
batch = Transition(*map(lambda e: e.to(self.device), batch))
state_action_values = self.valuenet(batch.state).gather(1, batch.action.long().unsqueeze(-1))
y = torch.zeros(batch_size, device=self.device)
non_terminal_idx = batch.terminal == 0
non_terminal_next_states = batch.next_state.index_select(0, non_terminal_idx.nonzero().squeeze())
y[non_terminal_idx] = self.target_net(non_terminal_next_states).max(1)[0].detach()
expected_state_action_values = (y * gamma) + batch.reward
expected_state_action_values = expected_state_action_values.unsqueeze(-1)
MAE = torch.nn.L1Loss()
MSE = torch.nn.MSELoss()
with torch.no_grad():
td_error = MAE(state_action_values, expected_state_action_values).item()
loss = MSE(state_action_values, expected_state_action_values)
self.opt.zero_grad()
loss.backward()
for param in self.valuenet.parameters():
param.grad.data.clamp_(-1, 1)
self.opt.step()
self.target_update_counter += 1
if self.target_update_counter % self.target_update_period == 0:
self.target_net.load_state_dict(self.valuenet.state_dict())
self.target_update_counter = 0
### END ###
return td_error # mean absolute td error
class DuelingDoublePrioritizedDQN(BaseDqn, torch.nn.Module):
""" DQN implementaiton with Duelling architecture,
Prioritized Replay Buffer and Double Q learning. Double
Q learning idea is implemented with a target network that
is replaced with the main network at every Nth step.
Arguments:
- valuenet: Neural network to represent value
function.
- nact: Number of the possible actions
int the action space
- lr: Learning rate of the optimization
(default=0.001)
- buffer_capacity: Maximum capacity of the
replay buffer (default=10000)
- target_replace_period: Number of steps to
replace value network with the target network
(default=50)
"""
def __init__(self, valuenet, nact, lr=0.001, buffer_capacity=10000,
target_replace_period=50):
super().__init__(nact, buffer_capacity)
### YOUR CODE HERE ###
self.valuenet = valuenet
self.target_net = deepcopy(valuenet)
self.target_update_period = target_replace_period
self.target_update_counter = 0
self.buffer = PrioirtyBuffer(capacity=buffer_capacity)
self.opt = torch.optim.Adam(self.valuenet.parameters(), lr=lr)
### END ###
def _greedy_policy(self, state):
""" Return greedy action for the state """
### YOUR CODE HERE ###
return self.valuenet(state).argmax().item()
### END ###
def td_error(self, trans, gamma):
""" Return the td error, predicted values and
target values.
"""
# Optional but convenient
### YOUR CODE HERE ###
s, a, r, ns, t = trans
if isinstance(s, LazyFrames):
s = normalize(np.array(s, dtype='float32').transpose(2, 0, 1))
ns = normalize(np.array(ns, dtype='float32').transpose(2, 0, 1))
s = torch.from_numpy(s).to(self.device).unsqueeze(0)
a = torch.from_numpy(np.array(a, dtype='float32')).to(self.device).unsqueeze(0)
r = torch.from_numpy(np.array(r, dtype='float32')).to(self.device).unsqueeze(0)
ns = torch.from_numpy(ns).to(self.device).unsqueeze(0)
t = torch.from_numpy(np.array(t, dtype='float32')).to(self.device).unsqueeze(0)
with torch.no_grad():
values = self.valuenet(s).gather(1, a.long().unsqueeze(-1)).squeeze()
argmax = self.valuenet(ns).argmax(1, keepdim=True)
target = self.target_net(ns).gather(1, argmax).squeeze()
expected = r + (gamma * target * (1 - t))
td_error = (values - expected.squeeze()).abs()
del values
del expected
del target
del argmax
return td_error.detach().mean().item()
### END ###
def push_transition(self, transition, gamma):
""" Push transitoins and corresponding td error
into the prioritized replay buffer.
"""
### YOUR CODE HERE ###
# Remember Prioritized Replay Buffer requires
# td error to push a transition. You need
# to calculate it for the given trainsition
self.buffer.push(transition, self.td_error(transition, gamma))
### END ###
def update(self, batch_size, gamma):
""" Update the valuenet and replace it with the
targetnet(if period). After the td error is
calculated for all the batch, priority values
of the transitions sampled from the buffer
are updated as well. Return mean absolute td error.
"""
assert batch_size < self.buffer.size, "Buffer is not large enough!"
### YOUR CODE HERE ###
# This time it is double q learning.
# Remember the idea behind double q learning.
idxs, batch, ws = self.buffer.sample(batch_size)
state, action, reward, next_state, done = batch
if state.ndim > 2:
state = state.transpose(0, 3, 1, 2)
state = normalize(state)
next_state = next_state.transpose(0, 3, 1, 2)
next_state = normalize(next_state)
state = torch.from_numpy(state).to(self.device)
action = torch.LongTensor(action).to(self.device).unsqueeze(-1)
reward = torch.FloatTensor(reward).to(self.device)
next_state = torch.from_numpy(next_state).to(self.device)
done = torch.FloatTensor(done).to(self.device)
def td_error_sub(s, a, r, ns, t):
values = self.valuenet(s).gather(1, a).squeeze()
argmax = self.valuenet(ns).argmax(1, keepdim=True)
target = self.target_net(ns).gather(1, argmax).squeeze()
expected = r + (gamma * target * (1 - t))
td_error = (values - expected).abs()
return td_error.detach(), values, expected
td_error, values, expected = td_error_sub(state, action, reward, next_state, done)
td_error = td_error.mean().item()
# Imprtance Sampling Weights
loss = ((values - expected).pow(2) * torch.FloatTensor(ws).to(self.device)).mean()
self.opt.zero_grad()
loss.backward()
self.opt.step()
self.target_update_counter += 1
if self.target_update_counter % self.target_update_period == 0:
self.target_net.load_state_dict(self.valuenet.state_dict())
self.target_update_counter = 0
new_vals = td_error_sub(state, action, reward, next_state, done)[0]
self.buffer.update_priority(idxs, new_vals.detach().cpu().squeeze().numpy())
### END ###
return td_error # mean absolute td error | true |
8c0554c6e2ab73333b430a59538f930013c4b425 | Python | manasa0917/python_code | /Class 8/maths_module.py | UTF-8 | 409 | 3.46875 | 3 | [] | no_license | def add(a,b):
return a+b
def factorial(n):
if (n>10000):
return 0
mult=1
for i in range(1,n+1):
mult=mult *i
return mult
def driverFucntion():
num1 = int(input("num 1: "))
num2 = int(input("num 2: "))
print("sum = {}".format(add(num1,num2)))
print("Factorial of sum = {}".format(factorial(add(num1,num2))))
if __name__ == "__main__":
driverFucntion()
| true |
fccf01d4dccc1083afa4a1d034f22a127eb76214 | Python | mkvenkatesh/Random-Programming-Exercises | /max_positive_sub_array.py | UTF-8 | 2,847 | 4.125 | 4 | [] | no_license | """
Problem Description: Given an array of integers, A of length N, find out the
maximum sum sub-array of non negative numbers from A.
The sub-array should be contiguous i.e., a sub-array created by choosing the
second and fourth element and skipping the third element is invalid.
Maximum sub-array is defined in terms of the sum of the elements in the
sub-array.
Find and return the required subarray.
NOTE: If there is a tie, then compare with segment's length and return segment
which has maximum length. If there is still a tie, then return the segment with
minimum starting index.
Problem Constraints: 1 <= N <= 10^5 -10^9 <= A[i] <= 10^9
Example Input Input 1: A = [1, 2, 5, -7, 2, 3] Input 2: A = [10, -1, 2, 3, -4,
100]
Example Output Output 1: [1, 2, 5] Output 2: [100]
# Algorithm
Find the first non-negative integer and do a running sum until the next
non-negative integer. Put the running sum and start/end index of this subarray
in a tuple.
Loop through and do this until end of array. If new rs > oldrs, replace rs with
new rs and new start/end index. if rs is same, store the one with longest
length. If rs is same and length is same, continue.
"""
class Solution:
# @param A : list of integers
# @return a list of integers
def maxset(self, A):
start_index = 0
subarray_sum = 0
subarray_start = None
subarray_end = 0
result = {
"sum": -1,
"start_index": 0,
"end_index": 0
}
while start_index < len(A):
# positive number
if A[start_index] >= 0:
if subarray_start == None:
subarray_start = start_index
subarray_sum += A[start_index]
subarray_end = start_index
# negative number
else:
subarray_start = None
subarray_sum = 0
# check if running sum and length of the subarray from current
# iteration is greater than or equal to subarray from previous
# iteration
if (subarray_start != None):
if (subarray_sum > result["sum"]):
result["sum"] = subarray_sum
result["start_index"] = subarray_start
result["end_index"] = subarray_end
elif (subarray_sum == result["sum"]):
if (subarray_end - subarray_start) > (result["end_index"] - result["start_index"]):
result["start_index"], result["end_index"] = subarray_start, subarray_end
start_index += 1
if result["sum"] == -1:
return []
else:
return A[result["start_index"] : result["end_index"]+1]
A = [-1, -1, -1, -1]
s = Solution()
print(s.maxset(A)) | true |
90ba7c97636889461b2ee8d0c69f2abd209a3841 | Python | mhcrnl/pygtk | /book/02-App_and_AppWindow/Application1.py | UTF-8 | 1,255 | 2.6875 | 3 | [] | no_license | class Application(Gtk.Application):
def __init__(self, *args, **kwargs):
super().__init__(*args, application_id="org.example.myapp",
flags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE,
**kwargs)
self.window = None
self.add_main_option("test", ord("t"), GLib.OptionFlags.NONE,
GLib.OptionArg.NONE, "Command line test", None)
def do_startup(self):
Gtk.Application.do_startup(self)
action = Gio.SimpleAction.new("quit", None)
action.connect("activate", self.on_quit)
self.add_action(action)
def do_activate(self):
# We only allow a single window and raise any existing ones
if not self.window:
# Windows are associated with the application
# when the last one is closed the application shuts down
self.window = AppWindow(application=self, title="Main Window")
self.window.present()
def do_command_line(self, command_line):
options = command_line.get_options_dict()
if options.contains("test"):
# This is printed on the main instance
print("Test argument recieved")
self.activate()
return 0
| true |
cb3e4054fd5bf34156955b2c99dd091e01bb59d3 | Python | s-rachmaninoff/Algorithm-1 | /Python_Algorithm/solve/section_4/solution_8.py | UTF-8 | 476 | 3.203125 | 3 | [] | no_license | # 침몰하는 타이타닉 (그리디)
def solution(n, m, people):
people.sort()
cnt = 0
while people:
if len(people) == 1:
cnt += 1
break
if people[0] + people[-1] <= m:
cnt += 1
people.pop(0)
people.pop()
else:
cnt += 1
people.pop()
return cnt
n, m = map(int, input().split())
people = list(map(int, input().split()))
print(solution(n, m, people)) | true |
875b714c1ad0dd93b044e6d4c078ae3cd35184f8 | Python | 6306022610113/INE_Problem | /exam/comm.py | UTF-8 | 304 | 3.515625 | 4 | [] | no_license | Sales = int(input("ENTER YOUR SALES : "))
Commission = 0
if Sales > 2000 :
if Sales > 4000:
if Sales > 6000:
Commission = 0.1
else:
Commission = 0.07
else:
Commission = 0.04
else:
Commission = 0.02
print("ํYOUR COMMISSION : ",Commission)
| true |
8e0685bb3f8eeb35bd71f19d11e45a7be7c04aff | Python | Vinceeee/mypy | /py3.6/async_guides/async_http.py | UTF-8 | 903 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
import asyncio
from random import randint
from urllib import request
"""
Non-blocking url-open by asyncio
"""
async def openurl(url):
print("opening {} ".format(url))
u = request.urlopen(url, timeout=10)
print(u.read())
st = randint(2, 3)
await asyncio.sleep(st)
print("task completed.")
return {url:u.status}
def normal_run():
# only coroutine task instances ,
# which need to be triggered in a event_loop
loop = asyncio.get_event_loop()
while True:
task1 = openurl("http://localhost:12345")
task2 = openurl("http://localhost:12345/home")
tasks = [asyncio.ensure_future(task1), asyncio.ensure_future(task2)]
loop.run_until_complete(asyncio.wait(tasks))
# loop.run_until_complete(task1)
for task in tasks:
print(task.result())
if __name__ == '__main__':
normal_run()
| true |
8bc2cb156c3eb31266fac9f1b12499e8193d4daa | Python | BABIN2D/Health-Management-System | /Health Management System.py | UTF-8 | 2,737 | 3.71875 | 4 | [] | no_license | import datetime
def gettime():
'''Time Function To Get The Current Time'''
return datetime.datetime.now()
choice = input('Enter R to read\nEnter L to log\t') # Decision on whether to write or read the txt file.
if choice in('L','l'): # Entering the log or write function.
user_name = input('Enter Your Name\t') # Asking the User to Enter There Name
with open('Health Management System.txt','a') as file: # Opening the Health Management System txt. file.
file.write('Time-\t')
file.write(str(gettime())) # Writing the time from the time function.
file.write('\n')
file.write('Name- ')
file.write(user_name +'\n') # Writing the username
diet_Exercise = input('Enter D for Diet\nEnter E for Exercise\t') # Decision on Diet or Exercise.
if diet_Exercise in ('D','d'):
file.write('Diet- ')
while True: # Entering in a while loop till the user wants to write
# his/her diet chart.
file.write(input('Enter Your Diet\t')+'\t') # Asking the user to input the diet.
choice1 = input('Do You Want to Log More Diet?\t') # decision box for more diet
if choice1 in ('Yes','yes','YES','y','Y'): # if the user enters any of the following - yes, Yes, YES,
continue # Y or y. this will count as the user wants to write more diet
else:
file.write('\n')
print('Good Bye')
break # Terminating the program if the above options are not in the list.
elif diet_Exercise in('E','e'): # Entering exercise loop if user enters E or e.
file.write('Exercise- ')
while True: # Entering the while loop
file.write(input('Enter Your Exercise\t')+'\t') # Asking user to input his/her exercise
choice2 = input('Do You Want to Log More Exercise?\t') # Asking user if he/she wants to log more exercise.
if choice2 in ('Yes','yes','YES','y','Y'): # If user wants to log more exercise.
continue
else:
file.write('\n')
print('Good Bye')
break # If the user doesn't wish to log any more entries.
else:
with open('Health Management System.txt') as fileR:
print(fileR.read())
| true |
50ffb52975944c90498abfa29667b3ef5cfbecc2 | Python | jditlee/tmdSurprise_leetcode_hot100 | /[124]二叉树中的最大路径和.py | UTF-8 | 1,656 | 3.46875 | 3 | [] | no_license | # 路径 被定义为一条从树中任意节点出发,沿父节点-子节点连接,达到任意节点的序列。同一个节点在一条路径序列中 至多出现一次 。该路径 至少包含一个 节点,且不
# 一定经过根节点。
#
# 路径和 是路径中各节点值的总和。
#
# 给你一个二叉树的根节点 root ,返回其 最大路径和 。
#
#
#
# 示例 1:
#
#
# 输入:root = [1,2,3]
# 输出:6
# 解释:最优路径是 2 -> 1 -> 3 ,路径和为 2 + 1 + 3 = 6
#
# 示例 2:
#
#
# 输入:root = [-10,9,20,null,null,15,7]
# 输出:42
# 解释:最优路径是 15 -> 20 -> 7 ,路径和为 15 + 20 + 7 = 42
#
#
#
#
# 提示:
#
#
# 树中节点数目范围是 [1, 3 * 104]
# -1000 <= Node.val <= 1000
#
# Related Topics 树 深度优先搜索 动态规划 二叉树
# 👍 1112 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def __init__(self):
self.res = float("-inf")
def maxPathSum(self, root: TreeNode) -> int:
def maxGain(node:TreeNode)->int:
if not node:
return 0
left = max(maxGain(node.left),0)
right = max(maxGain(node.right),0)
self.res = max(self.res,node.val+left+right)
return node.val+max(left,right)
maxGain(root)
return self.res
# leetcode submit region end(Prohibit modification and deletion)
| true |
8a25b4f460b990576fb0e14eabe56cfa30d77960 | Python | nickcoats/kattis-assignments | /get_shorty/get_shorty.py | UTF-8 | 1,752 | 3.078125 | 3 | [] | no_license | import sys
#
# Code Sample from Kattis Get Shorty Assignment
# URL: https://open.kattis.com/problems/getshorty
# Execute: pyhton get_shorty.py < get_shorty.in
#
tests = []
testSet = []
i = 0
valid = False
validData = False
for row in sys.stdin:
row = row.replace('\r', '').replace('\n', '')
row = row.split()
if len(row) == 2 and int(row[0]) == 0 and row[0] == row[1]:
tests.append(testSet)
break
if len(row) == 2:
if (int(row[0]) >= 2 and int(row[0]) <= 10000) and (int(row[1]) >= 1 and int(row[1]) <= 15000):
row[0] = int(row[0])
row[1] = int(row[1])
if i == 0:
valid = True
testSet.append(row)
i += 1
continue
valid = True
if validData == True:
tests.append(testSet)
testSet = []
testSet.append(row)
else:
valid = False
testSet = []
if len(row) == 3 and valid == True:
passed = False
row[0] = int(row[0])
row[1] = int(row[1])
if float(row[2]) >= 0.00 and float(row[2]) <= 1.00:
passed = True
row[2] = float(row[2])
if passed:
validData = True
testSet.append(row)
else:
validData = False
for test in tests:
mikaelSize = 0.00
startSize = 1.00
stop = test[0][0] - 1
for i in range(1,len(test)):
startSize = startSize * test[i][2]
if test[i][0] == stop or test[i][1] == stop:
if startSize > mikaelSize:
mikaelSize = startSize
startSize = 1.00
continue
print '{0:.4f}'.format(mikaelSize)
| true |
e883ffa06f8f4589c6cf35ff4cf4a2b769816f45 | Python | ISIS2503/Grupo7 | /Experimento2/Persistencia/consumerMedidas.py | UTF-8 | 1,505 | 2.640625 | 3 | [
"MIT"
] | permissive | import json
import requests
from kafka import KafkaConsumer
def post(p_sensetime, p_type, p_dataValue, p_unit):
payload = {
"sensetime": p_sensetime,
"type": p_type,
"dataValue": p_dataValue,
"unit": p_unit
}
url = 'http://localhost:8080/measurements'
response = requests.post(url, data=json.dumps(payload), headers={'Content-type': 'application/json'})
print(message.topic)
print("Response Status Code: " + str(response.status_code))
consumer = KafkaConsumer('rawdata', group_id='my-group', bootstrap_servers=['localhost:8090'])
for message in consumer:
json_data = json.loads(message.value.decode('utf-8'))
sensetime = json_data['sensetime']
temp = json_data['temperature']
senseTemp = temp['data']
tempUnit = temp['unit']
gas = json_data['gas']
senseGas = gas['data']
gasUnit = gas['unit']
if ((json_data['noise'] != None) and (json_data['ilumination'] != None)):
noise = json_data['noise']
senseNoise = noise['data']
noiseUnit = noise['unit']
ilumination = json_data['ilumination']
senseIlumination = ilumination['data']
iluminationUnit = ilumination['unit']
post(sensetime, "temperature", float(senseTemp), str(tempUnit))
post(sensetime, "gas", float(senseGas), str(gasUnit))
post(sensetime, "noise", float(senseNoise), str(noiseUnit))
post(sensetime, "iluniation", float(), str(iluminationUnit))
else:
post(sensetime, "temperature", float(senseTemp), str(tempUnit))
post(sensetime, "gas", float(senseGas), str(gasUnit)) | true |
d0c6c97c71faaa21f45e40125255f088eb15ed7d | Python | andreafresco/Project-Euler | /p003.py | UTF-8 | 1,097 | 3.765625 | 4 | [] | no_license | #
# Solution to Project Euler problem 3
# Copyright (c) Andrea Fresco. All rights reserved.
#
# https://projecteuler.net/problem=3
# https://github.com/andreafresco/Project-Euler
#
def smallest_prime(n): # return the smallest prime of n or n itself
# if it is prime
assert n > 0
i = 2
# we restrict the search of prime numbers between 2 and sqrt(n)
while i*i <= n: # equivalent to i <= sqrt(n)
if n%i == 0:
return i # we return the first prime number we encounter
i+=1
return n
def Largest_Prime_Factor(P):
smallest = smallest_prime(P) # finding the smallest prime
while smallest < P:
# Dividing the number by all it's smallest prime numbers until we reach
# the highest prime number (condition: P = smallest)
P //= smallest # integer division to avoid floating point results (e.g. 17.0 instead of 17)
smallest = smallest_prime(P)
return P
if __name__ == "__main__":
print(Largest_Prime_Factor(600851475143))
| true |
2e908bc075d4955811ebf0360f8270f622d4d305 | Python | alina-pavaluc/extract_pe_features | /classifier.py | UTF-8 | 4,532 | 2.703125 | 3 | [] | no_license | import csv
import pickle
import numpy as np
import pandas as pd
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from extract_features import extract_features_from_file, extract_features_from_folder
class Classifier:
def __init__(self, filename):
self.loaded_model = pickle.load(open(filename, 'rb'))
self.target_names = ['clean', 'malware']
def label_file(self, extracted_features):
predicted = self.loaded_model.predict_proba(extracted_features)
return list(map(lambda p: (self.target_names[p.argmax()], p.max()), predicted))
def classify_file(self, filename):
extracted_features = extract_features_from_file(filename)
if extracted_features is not None:
return filename, self.label_file([extracted_features])[0]
else:
# TODO
return 'this is not an application, so it\'s not a threat', '1.0'
def scan_folder(self, folder_name):
file_names, features = extract_features_from_folder(folder_name)
classification = self.label_file(features)
return zip(file_names, classification)
def train_using_decision_tree_classifier():
features = []
labels = []
with open('C:\\Users\\Alina\\PycharmProjects\\licenta2\\all_features.csv') as feature_file:
features_files = csv.reader(feature_file, delimiter=',')
for row in features_files:
features.append(row[1:-1])
labels.append(row[-1])
# target_names = ['clean', 'malware']
# features_name = ['DebugSize', 'ImageVersion', 'IatRVA', 'ExportSize',
# 'ResourceSize', 'VirtualSize2', 'NumberOfSections', 'CheckSum', 'DLLCharacteristics',
# 'SizeOfInitializedData', 'SizeOfStackReserve']
clf = tree.DecisionTreeClassifier()
clf = clf.fit(features, labels)
filename = 'C:\\Users\\Alina\\PycharmProjects\\licenta2\\finalized_model.sav'
pickle.dump(clf, open(filename, 'wb'))
def train_using_mlp():
features = []
labels = []
with open('C:\\Users\\Alina\\PycharmProjects\\licenta2\\all_features.csv') as feature_file:
features_files = csv.reader(feature_file, delimiter=',')
for row in features_files:
features.append(list(map(float, row[1:-1])))
labels.append(row[-1])
y = pd.factorize(labels)[0]
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(100, 50), random_state=1, max_iter=200)
clf.fit(features, y)
filename = 'C:\\Users\\Alina\\PycharmProjects\\licenta2\\MLP_model.sav'
pickle.dump(clf, open(filename, 'wb'))
def train_using_random_forest_classifier():
features = []
labels = []
feature_names = ['DebugSize', 'ImageVersion', 'ResourceSize', 'VirtualSize2', 'CheckSum', 'DLLCharacteristics',
'SizeOfInitializedData', 'SizeOfStackReserve']
with open('C:\\Users\\Alina\\PycharmProjects\\licenta2\\all_features.csv') as feature_file:
features_files = csv.reader(feature_file, delimiter=',')
for row in features_files:
features.append(list(map(float, [row[1], row[2], row[5], row[6], row[8], row[9], row[10], row[11]])))
labels.append(row[-1])
y = pd.factorize(labels)[0]
# X_train, X_test, y_train, y_test = train_test_split(features, y, test_size=0.3)
clf = RandomForestClassifier(n_jobs=2, random_state=0, n_estimators=100)
clf.fit(features, y)
# print(pd.Series(clf.feature_importances_, index=feature_names))
# clf.fit(X_train, y_train)
# y_pred = clf.predict(X_test)
# print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
# print(clf.predict([[0, 0.0, 278572, 0, 61484, 65536, 3, 46976398, 0, 65536, 1048576]]))
# print(clf.predict_proba([[0, 0.0, 278572, 0, 61484, 65536, 3, 46976398, 0, 65536, 1048576]]))
filename = 'C:\\Users\\Alina\\PycharmProjects\\licenta2\\finalized_model_random_forest.sav'
pickle.dump(clf, open(filename, 'wb'))
def use_classifier():
filename = 'C:\\Users\\Alina\\PycharmProjects\\licenta2\\finalized_model_random_forest.sav'
loaded_model = pickle.load(open(filename, 'rb'))
result = loaded_model.predict([[0, 0.0, 360448, 0, 292, 2176, 5, 0, 0, 301568, 1048576]])
prob = loaded_model.predict_proba([[0, 0.0, 360448, 0, 292, 2176, 5, 0, 0, 301568, 1048576]])
print(result, prob)
if __name__ == "__main__":
train_using_random_forest_classifier()
# use_classifier()
| true |
dc760cee1fc366f930e2494d232d30b9995c6870 | Python | VilenShvedov/Myprojects | /005_001_2.py | UTF-8 | 113 | 2.71875 | 3 | [] | no_license | import 005_001_Functions
print(functions.doubles(10))
print(functions.triples(10))
print(functions.squares(10)) | true |
2ab955390f3bbf5f9bd123814a26831b6805d087 | Python | ngvanryneveld/CapstoneProject-L1Task15 | /own_game.py | UTF-8 | 12,080 | 3.703125 | 4 | [] | no_license |
# import pygame allows for the game library functions to be included in the program
# import random allows for the program to generate random numbers
import pygame
import random
# this initializes the pygames modules to get everything started
pygame.init()
# the size of the screen display will be adjusted through these variables being width and height
screen_width = 800
screen_height = 600
# this creates the screen according to the above set variables
screen = pygame.display.set_mode((screen_width, screen_height))
# this is importing the images within this folder as the various characters and items within the pygame
player = pygame.image.load("player.png")
enemy1 = pygame.image.load("monster.png")
enemy2 = pygame.image.load("monster.png")
enemy3 = pygame.image.load("monster.png")
prize = pygame.image.load("prize.png")
# this evaluates the size of the player's image and is used in the boundary detection of the pygame
player_height = player.get_height()
player_width = player.get_width()
# this evaluates the size of the enemies' images and is used in the boundary detection of the pygame
enemy1_height = enemy1.get_height()
enemy1_width = enemy1.get_width()
enemy2_height = enemy2.get_height()
enemy2_width = enemy2.get_width()
enemy3_height = enemy3.get_height()
enemy3_width = enemy3.get_width()
# this evaluates the size of the prize's image and is used in the boundary detection of the pygame
prize_height = prize.get_height()
prize_width = prize.get_width()
# this print statement is just for interest of the user to see the size of the player's character
print("This is the height of the player " + str(player_height))
print("This is the width of the player " + str(player_width))
# this is to select a difficulty level within the game and will speed up the game play appropriately as well as speed up the user's movements if level 10 is selected
difficulty = input("""Enter a level you would like to play, ranging from 1 - 10,
10 can be discomforting but you will receive a slight speed bonus,
anything under 3 is for newbies: """)
# this while loop means the player cannot enter a false level number, but has to be from 1 - 10
while difficulty != "10" and difficulty != "9" and difficulty != "8" and difficulty != "7" and difficulty != "6" and difficulty != "5" and difficulty != "4" and difficulty != "3" and difficulty != "2" and difficulty != "1":
difficulty = input("Enter a level you would like to play, ranging from 1 - 10: ")
# this determines the starting position of the player with the XPosition being set but the YPosition being random without the possibility of the player being outside the screen
playerXPosition = 100
playerYPosition = random.randint(0, screen_height - player_height)
# this determines the enemies starting positions are off the screen as well as ensures they are all at different XPositions so that they do not overlap with each other
# the enemies YPositions are all random but without allowing them to be out of the screen
enemy1XPosition = screen_width
enemy1YPosition = random.randint(0, screen_height - enemy1_height)
enemy2XPosition = screen_width + (2 * enemy1_width)
enemy2YPosition = random.randint(0, screen_height - enemy2_height)
enemy3XPosition = screen_width + (4 * enemy2_width)
enemy3YPosition = random.randint(0, screen_height - enemy3_height)
# this determines the prize's position but ensures that it only is available after the enemies have moved onto the screen
prizeXPosition = screen_width + (7 * enemy3_width)
prizeYPosition = random.randint(0, screen_height - prize_height)
# the boolean value here ensures that while the keys are not being pressed, the value is false
keyUp = False
keyDown = False
keyRight = False
keyLeft = False
# this is the game loop in order to continue running the games' logic over and over until something occurs
# this continuously refreshes the game screen to represent the real time game play
# in terms of this game, it will keep looping until the pygame has a quit function reached, which would signify the end of the game
# boolean value 1 equals true so this will continuously run until quit
while 1:
# this clears the screen
screen.fill(0)
# this draws the position of the player, enemies and prizes specified position
screen.blit(player, (playerXPosition, playerYPosition))
screen.blit(enemy1, (enemy1XPosition, enemy1YPosition))
screen.blit(enemy2, (enemy2XPosition, enemy2YPosition))
screen.blit(enemy3, (enemy3XPosition, enemy3YPosition))
screen.blit(prize, (prizeXPosition, prizeYPosition))
# this updates the screen
pygame.display.flip()
# this for loop is to run through the events within the pygame
for event in pygame.event.get():
# this event checks if the player quits the pygame, in turn it exits the program
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
# this event checks if the player presses a down key
if event.type == pygame.KEYDOWN:
# it further tests if the key pressed is the one the player wants
# pygame.K_(DIRECTION) represents the keyboard constant
if event.key == pygame.K_UP:
keyUp = True
if event.key == pygame.K_DOWN:
keyDown = True
if event.key == pygame.K_RIGHT:
keyRight = True
if event.key == pygame.K_LEFT:
keyLeft = True
# this event checks if the key is up (i.e. not pressed by the player)
if event.type == pygame.KEYUP:
# tests if the key released is the one the player wants
if event.key == pygame.K_UP:
keyUp = False
if event.key == pygame.K_DOWN:
keyDown = False
if event.key == pygame.K_RIGHT:
keyRight = False
if event.key == pygame.K_LEFT:
keyLeft = False
# this determines if the player selected the highest level
if difficulty == "10":
# the following if functions move the players positions when the appropriate key is pressed
# the 2nd if functions of each mean the directions being pressed will not allow the player to move off the screen
# because of the hard level being selected, the player has the added speed movement
if keyUp == True:
if playerYPosition > 0:
playerYPosition -= 1.5
if keyDown == True:
if playerYPosition < (screen_height - player_height):
playerYPosition += 1.5
if keyRight == True:
if playerXPosition < (screen_width - player_width):
playerXPosition += 1.5
if keyLeft == True:
if playerXPosition > 0:
playerXPosition -= 1.5
# this is if the player does not select level 10
elif difficulty != "10":
# the players movement is at normal speed
# the 2nd if functions of each mean the directions being pressed will not allow the player to move off the screen
if keyUp == True:
if playerYPosition > 0:
playerYPosition -= 1
if keyDown == True:
if playerYPosition < (screen_height - player_height):
playerYPosition += 1
if keyRight == True:
if playerXPosition < (screen_width - player_width):
playerXPosition += 1
if keyLeft == True:
if playerXPosition > 0:
playerXPosition -= 1
# this creates a box around the player in order to check for collision with the enemy or the prize
# it checks if the boxes intersect
playerBox = pygame.Rect(player.get_rect())
# this updates the playerBox to the players position and creates the size of the box with width and height
playerBox.top = playerYPosition
playerBox.left = playerXPosition
playerBox.height = player_height
playerBox.width = player_width
# this creates a box around the enemies' in order to check for collision with the enemy or the prize
# it checks if the boxes intersect
enemy1Box = pygame.Rect(player.get_rect())
# this updates the enemyBox's positions and creates the size of the box with width and height
enemy1Box.top = enemy1YPosition
enemy1Box.left = enemy1XPosition
enemy1Box.height = enemy1_height
enemy1Box.width = enemy1_width
enemy2Box = pygame.Rect(player.get_rect())
enemy2Box.top = enemy2YPosition
enemy2Box.left = enemy2XPosition
enemy2Box.height = enemy2_height
enemy2Box.width = enemy2_width
enemy3Box = pygame.Rect(player.get_rect())
enemy3Box.top = enemy3YPosition
enemy3Box.left = enemy3XPosition
enemy3Box.height = enemy3_height
enemy3Box.width = enemy3_width
# this creates a box around the prize in order to check for collision with the enemy or the prize
# it checks if the boxes intersect
prizeBox = pygame.Rect(player.get_rect())
# this updates the prizeBox to the prize's position and creates the size of the box with width and height
prizeBox.top = prizeYPosition
prizeBox.left = prizeXPosition
prizeBox.height = prize_height
prizeBox.width = prize_width
# this tests for the collision of the playerBox with the enemy boxes and prints out the appropriate message and quits the game and closes the window
if playerBox.colliderect(enemy1Box):
print("You lose!")
pygame.quit()
exit(0)
if playerBox.colliderect(enemy2Box):
print("You lose!")
pygame.quit()
exit(0)
if playerBox.colliderect(enemy3Box):
print("You lose!")
pygame.quit()
exit(0)
# this test for the collision with the playerBox with the prizeBox and prints out the winning message and quits the game and closes the window
if playerBox.colliderect(prizeBox):
print("You win!")
pygame.quit()
exit(0)
# this tests to see if the playerBox did not collide with the enemies but also missed the prize and means he therefore did not win the game and prints out the appropriate message and exits the game and exits the window
if enemy1XPosition < (0 - enemy1_width) and enemy2XPosition < (0 - enemy2_width) and enemy3XPosition < (0 - enemy3_width) and prizeXPosition < (0 - prize_width):
print("You almost won, but you missed the prize!")
pygame.quit()
exit(0)
# the following if statements are to determine how fast the enemies and the prize approach the player, determined by the input of level
if difficulty == "10":
enemy1XPosition -= 5
enemy2XPosition -= 5
enemy3XPosition -= 5
prizeXPosition -= 5
if difficulty == "9":
enemy1XPosition -= 4.5
enemy2XPosition -= 4.5
enemy3XPosition -= 4.5
prizeXPosition -= 4.5
if difficulty == "8":
enemy1XPosition -= 4
enemy2XPosition -= 4
enemy3XPosition -= 4
prizeXPosition -= 4
if difficulty == "7":
enemy1XPosition -= 3.5
enemy2XPosition -= 3.5
enemy3XPosition -= 3.5
prizeXPosition -= 3.5
if difficulty == "6":
enemy1XPosition -= 3
enemy2XPosition -= 3
enemy3XPosition -= 3
prizeXPosition -= 3
if difficulty == "5":
enemy1XPosition -= 2.5
enemy2XPosition -= 2.5
enemy3XPosition -= 2.5
prizeXPosition -= 2.5
if difficulty == "4":
enemy1XPosition -= 2
enemy2XPosition -= 2
enemy3XPosition -= 2
prizeXPosition -= 2
if difficulty == "3":
enemy1XPosition -= 1.5
enemy2XPosition -= 1.5
enemy3XPosition -= 1.5
prizeXPosition -= 1.5
if difficulty == "2":
enemy1XPosition -= 1
enemy2XPosition -= 1
enemy3XPosition -= 1
prizeXPosition -= 1
if difficulty == "1":
enemy1XPosition -= 0.5
enemy2XPosition -= 0.5
enemy3XPosition -= 0.5
prizeXPosition -= 0.5
| true |
025bc17d7f2d4ff41468d8ec02e77a429ca54410 | Python | DockerNAS/yamlscript-formula | /_utils/voluptuous.py | UTF-8 | 35,512 | 2.703125 | 3 | [
"MIT"
] | permissive | # encoding: utf-8
#
# Copyright (C) 2010-2013 Alec Thomas <alec@swapoff.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# Author: Alec Thomas <alec@swapoff.org>
"""Schema validation for Python data structures.
Given eg. a nested data structure like this:
{
'exclude': ['Users', 'Uptime'],
'include': [],
'set': {
'snmp_community': 'public',
'snmp_timeout': 15,
'snmp_version': '2c',
},
'targets': {
'localhost': {
'exclude': ['Uptime'],
'features': {
'Uptime': {
'retries': 3,
},
'Users': {
'snmp_community': 'monkey',
'snmp_port': 15,
},
},
'include': ['Users'],
'set': {
'snmp_community': 'monkeys',
},
},
},
}
A schema like this:
>>> settings = {
... 'snmp_community': str,
... 'retries': int,
... 'snmp_version': All(Coerce(str), Any('3', '2c', '1')),
... }
>>> features = ['Ping', 'Uptime', 'Http']
>>> schema = Schema({
... 'exclude': features,
... 'include': features,
... 'set': settings,
... 'targets': {
... 'exclude': features,
... 'include': features,
... 'features': {
... str: settings,
... },
... },
... })
Validate like so:
>>> schema({
... 'set': {
... 'snmp_community': 'public',
... 'snmp_version': '2c',
... },
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {
... 'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'},
... },
... },
... }) == {
... 'set': {'snmp_version': '2c', 'snmp_community': 'public'},
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'}}}}
True
"""
import os
import re
import sys
from contextlib import contextmanager
from functools import wraps
if sys.version > '3':
import urllib.parse as urlparse
long = int
unicode = str
basestring = str
ifilter = filter
iteritems = dict.items
else:
from itertools import ifilter
import urlparse
iteritems = dict.iteritems
__author__ = 'Alec Thomas <alec@swapoff.org>'
__version__ = '0.8.5'
@contextmanager
def raises(exc, msg=None):
try:
yield
except exc as e:
if msg is not None:
assert str(e) == msg, '%r != %r' % (str(e), msg)
class Undefined(object):
def __nonzero__(self):
return False
def __repr__(self):
return '...'
UNDEFINED = Undefined()
class Error(Exception):
"""Base validation exception."""
class SchemaError(Error):
"""An error was encountered in the schema."""
class Invalid(Error):
"""The data was invalid.
:attr msg: The error message.
:attr path: The path to the error, as a list of keys in the source data.
:attr error_message: The actual error message that was raised, as a
string.
"""
def __init__(self, message, path=None, error_message=None, error_type=None):
Error.__init__(self, message)
self.path = path or []
self.error_message = error_message or message
self.error_type = error_type
@property
def msg(self):
return self.args[0]
def __str__(self):
path = ' @ data[%s]' % ']['.join(map(repr, self.path)) \
if self.path else ''
output = Exception.__str__(self)
if self.error_type:
output += ' for ' + self.error_type
return output + path
class MultipleInvalid(Invalid):
def __init__(self, errors=None):
self.errors = errors[:] if errors else []
def __repr__(self):
return 'MultipleInvalid(%r)' % self.errors
@property
def msg(self):
return self.errors[0].msg
@property
def path(self):
return self.errors[0].path
@property
def error_message(self):
return self.errors[0].error_message
def add(self, error):
self.errors.append(error)
def __str__(self):
return str(self.errors[0])
class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
"""
def __init__(self, schema, required=False, extra=False):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Keys in the data need not have keys in the schema.
"""
self.schema = schema
self.required = required
self.extra = extra
self._compiled = self._compile(schema)
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except MultipleInvalid:
raise
except Invalid as e:
raise MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, dict):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
type_ = type(schema)
if type_ is type:
type_ = schema
if type_ in (int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
default_required_keys = set(key for key in schema
if
(self.required and not isinstance(key, Optional))
or
isinstance(key, Required))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
def validate_mapping(path, iterable, out):
required_keys = default_required_keys.copy()
error = None
errors = []
for key, value in iterable:
key_path = path + [key]
candidates = _iterate_mapping_candidates(_compiled_schema)
for skey, (ckey, cvalue) in candidates:
try:
new_key = ckey(key_path, key)
except Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
try:
out[new_key] = cvalue(key_path, value)
except MultipleInvalid as e:
exception_errors.extend(e.errors)
except Invalid as e:
exception_errors.append(e)
if exception_errors:
for err in exception_errors:
if len(err.path) > len(key_path):
errors.append(err)
else:
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark any Required() fields as found.
required_keys.discard(skey)
break
else:
if self.extra:
out[key] = value
else:
errors.append(Invalid('extra keys not allowed', key_path))
for key in required_keys:
if getattr(key, 'default', UNDEFINED) is not UNDEFINED:
out[key.schema] = key.default
else:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(Invalid(msg, path + [key]))
if errors:
raise MultipleInvalid(errors)
return out
return validate_mapping
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if (schema.cls is not UNDEFINED
and not isinstance(data, schema.cls)):
raise Invalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
for node in schema:
if isinstance(node, Exclusive):
if node.group_of_exclusion not in groups_of_exclusion.keys():
groups_of_exclusion[node.group_of_exclusion] = []
groups_of_exclusion[node.group_of_exclusion].append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise Invalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
errors.append(Invalid(msg, path))
break
exists = True
if errors:
raise MultipleInvalid(errors)
out = type(data)()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(MultipleInvalid, 'invalid list value @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise Invalid('expected a %s' % seq_type_name, path)
# Empty seq schema, allow any data.
if not schema:
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
out.append(validate(index_path, value))
break
except Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
if len(invalid.path) <= len(index_path):
invalid = Invalid('invalid %s value' % seq_type_name, index_path)
errors.append(invalid)
if errors:
raise MultipleInvalid(errors)
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(MultipleInvalid, 'invalid tuple value @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(MultipleInvalid, 'invalid list value @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_scalar(schema):
"""A scalar value.
The schema can either be a value or a type.
>>> _compile_scalar(int)([], 1)
1
>>> with raises(Invalid, 'expected float'):
... _compile_scalar(float)([], '1')
Callables have
>>> _compile_scalar(lambda v: float(v))([], '1')
1.0
As a convenience, ValueError's are trapped:
>>> with raises(Invalid, 'not a valid value'):
... _compile_scalar(lambda v: float(v))([], 'a')
"""
if isinstance(schema, type):
def validate_instance(path, data):
if isinstance(data, schema):
return data
else:
msg = 'expected %s' % schema.__name__
raise Invalid(msg, path)
return validate_instance
if callable(schema):
def validate_callable(path, data):
try:
return schema(data)
except ValueError as e:
raise Invalid('not a valid value', path)
except MultipleInvalid as e:
for error in e.errors:
error.path = path + error.path
raise
except Invalid as e:
e.path = path + e.path
raise
return validate_callable
def validate_value(path, data):
if data != schema:
raise Invalid('not a valid value', path)
return data
return validate_value
def _iterate_mapping_candidates(schema):
"""Iterate over schema in a meaningful order."""
# We want Extra to match last, because it's a catch-all.
# Without this, Extra might appear first in the iterator, and fail
# to validate a key even though it's a Required that has its own
# validation, generating a false positive.
return sorted(iteritems(schema),
key=lambda v: v[0] == Extra)
def _iterate_object(obj):
"""Return iterator over object attributes. Respect objects with
defined __slots__.
"""
d = {}
try:
d = vars(obj)
except TypeError:
# maybe we have named tuple here?
if hasattr(obj, '_asdict'):
d = obj._asdict()
for item in iteritems(d):
yield item
try:
slots = obj.__slots__
except AttributeError:
pass
else:
for key in slots:
if key != '__dict__':
yield (key, getattr(obj, key))
raise StopIteration()
class Object(dict):
"""Indicate that we should work with attributes, not keys."""
def __init__(self, schema, cls=UNDEFINED):
self.cls = cls
super(Object, self).__init__(schema)
class Marker(object):
"""Mark nodes for special treatment."""
def __init__(self, schema, msg=None):
self.schema = schema
self._schema = Schema(schema)
self.msg = msg
def __call__(self, v):
try:
return self._schema(v)
except Invalid as e:
if not self.msg or len(e.path) > 1:
raise
raise Invalid(self.msg)
def __str__(self):
return str(self.schema)
def __repr__(self):
return repr(self.schema)
class Optional(Marker):
"""Mark a node in the schema as optional."""
class Exclusive(Optional):
"""Mark a node in the schema as exclusive.
Exclusive keys inherited from Optional:
>>> schema = Schema({Exclusive('alpha', 'angles'): int, Exclusive('beta', 'angles'): int})
>>> schema({'alpha': 30})
{'alpha': 30}
Keys inside a same group of exclusion cannot be together, it only makes sense for dictionaries:
>>> with raises(MultipleInvalid, "two or more values in the same group of exclusion 'angles'"):
... schema({'alpha': 30, 'beta': 45})
For example, API can provides multiple types of authentication, but only one works in the same time:
>>> msg = 'Please, use only one type of authentication at the same time.'
>>> schema = Schema({
... Exclusive('classic', 'auth', msg=msg):{
... Required('email'): basestring,
... Required('password'): basestring
... },
... Exclusive('internal', 'auth', msg=msg):{
... Required('secret_key'): basestring
... },
... Exclusive('social', 'auth', msg=msg):{
... Required('social_network'): basestring,
... Required('token'): basestring
... }
... })
>>> with raises(MultipleInvalid, "Please, use only one type of authentication at the same time."):
... schema({'classic': {'email': 'foo@example.com', 'password': 'bar'},
... 'social': {'social_network': 'barfoo', 'token': 'tEMp'}})
"""
def __init__(self, schema, group_of_exclusion, msg=None):
super(Exclusive, self).__init__(schema, msg=msg)
self.group_of_exclusion = group_of_exclusion
class Required(Marker):
"""Mark a node in the schema as being required, and optionally provide a default value.
>>> schema = Schema({Required('key'): str})
>>> with raises(MultipleInvalid, "required key not provided @ data['key']"):
... schema({})
>>> schema = Schema({Required('key', default='value'): str})
>>> schema({})
{'key': 'value'}
"""
def __init__(self, schema, msg=None, default=UNDEFINED):
super(Required, self).__init__(schema, msg=msg)
self.default = default
def Extra(_):
"""Allow keys in the data that are not present in the schema."""
raise SchemaError('"Extra" should never be called')
# As extra() is never called there's no way to catch references to the
# deprecated object, so we just leave an alias here instead.
extra = Extra
def Msg(schema, msg):
"""Report a user-friendly message if a schema fails to validate.
>>> validate = Schema(
... Msg(['one', 'two', int],
... 'should be one of "one", "two" or an integer'))
>>> with raises(MultipleInvalid, 'should be one of "one", "two" or an integer'):
... validate(['three'])
Messages are only applied to invalid direct descendants of the schema:
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!'))
>>> with raises(MultipleInvalid, 'invalid list value @ data[0][0]'):
... validate([['three']])
"""
schema = Schema(schema)
@wraps(Msg)
def f(v):
try:
return schema(v)
except Invalid as e:
if len(e.path) > 1:
raise e
else:
raise Invalid(msg)
return f
def message(default=None):
"""Convenience decorator to allow functions to provide a message.
Set a default message:
>>> @message('not an integer')
... def isint(v):
... return int(v)
>>> validate = Schema(isint())
>>> with raises(MultipleInvalid, 'not an integer'):
... validate('a')
The message can be overridden on a per validator basis:
>>> validate = Schema(isint('bad'))
>>> with raises(MultipleInvalid, 'bad'):
... validate('a')
"""
def decorator(f):
@wraps(f)
def check(msg=None):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except ValueError:
raise Invalid(msg or default or 'invalid value')
return wrapper
return check
return decorator
def truth(f):
"""Convenience decorator to convert truth functions into validators.
>>> @truth
... def isdir(v):
... return os.path.isdir(v)
>>> validate = Schema(isdir)
>>> validate('/')
'/'
>>> with raises(MultipleInvalid, 'not a valid value'):
... validate('/notavaliddir')
"""
@wraps(f)
def check(v):
t = f(v)
if not t:
raise ValueError
return v
return check
def Coerce(type, msg=None):
"""Coerce a value to a type.
If the type constructor throws a ValueError or TypeError, the value
will be marked as Invalid.
Default behavior:
>>> validate = Schema(Coerce(int))
>>> with raises(MultipleInvalid, 'expected int'):
... validate(None)
>>> with raises(MultipleInvalid, 'expected int'):
... validate('foo')
With custom message:
>>> validate = Schema(Coerce(int, "moo"))
>>> with raises(MultipleInvalid, 'moo'):
... validate('foo')
"""
@wraps(Coerce)
def f(v):
try:
return type(v)
except (ValueError, TypeError):
raise Invalid(msg or ('expected %s' % type.__name__))
return f
@message('value was not true')
@truth
def IsTrue(v):
"""Assert that a value is true, in the Python sense.
>>> validate = Schema(IsTrue())
"In the Python sense" means that implicitly false values, such as empty
lists, dictionaries, etc. are treated as "false":
>>> with raises(MultipleInvalid, "value was not true"):
... validate([])
>>> validate([1])
[1]
>>> with raises(MultipleInvalid, "value was not true"):
... validate(False)
...and so on.
"""
return v
@message('value was not false')
def IsFalse(v):
"""Assert that a value is false, in the Python sense.
(see :func:`IsTrue` for more detail)
>>> validate = Schema(IsFalse())
>>> validate([])
[]
"""
if v:
raise ValueError
return v
@message('expected boolean')
def Boolean(v):
"""Convert human-readable boolean values to a bool.
Accepted values are 1, true, yes, on, enable, and their negatives.
Non-string values are cast to bool.
>>> validate = Schema(Boolean())
>>> validate(True)
True
>>> with raises(MultipleInvalid, "expected boolean"):
... validate('moo')
"""
if isinstance(v, basestring):
v = v.lower()
if v in ('1', 'true', 'yes', 'on', 'enable'):
return True
if v in ('0', 'false', 'no', 'off', 'disable'):
return False
raise ValueError
return bool(v)
def Any(*validators, **kwargs):
"""Use the first validated value.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
:returns: Return value of the first validator that passes.
>>> validate = Schema(Any('true', 'false',
... All(Any(int, bool), Coerce(bool))))
>>> validate('true')
'true'
>>> validate(1)
True
>>> with raises(MultipleInvalid, "not a valid value"):
... validate('moo')
msg argument is used
>>> validate = Schema(Any(1, 2, 3, msg="Expected 1 2 or 3"))
>>> validate(1)
1
>>> with raises(MultipleInvalid, "Expected 1 2 or 3"):
... validate(4)
"""
msg = kwargs.pop('msg', None)
schemas = [Schema(val, **kwargs) for val in validators]
@wraps(Any)
def f(v):
error = None
for schema in schemas:
try:
return schema(v)
except Invalid as e:
if error is None or len(e.path) > len(error.path):
error = e
else:
if error:
raise error if msg is None else Invalid(msg)
raise Invalid(msg or 'no valid value found')
return f
def All(*validators, **kwargs):
"""Value must pass all validators.
The output of each validator is passed as input to the next.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema constructors.
>>> validate = Schema(All('10', Coerce(int)))
>>> validate('10')
10
"""
msg = kwargs.pop('msg', None)
schemas = [Schema(val, **kwargs) for val in validators]
def f(v):
try:
for schema in schemas:
v = schema(v)
except Invalid as e:
raise e if msg is None else Invalid(msg)
return v
return f
def Match(pattern, msg=None):
"""Value must be a string that matches the regular expression.
>>> validate = Schema(Match(r'^0x[A-F0-9]+$'))
>>> validate('0x123EF4')
'0x123EF4'
>>> with raises(MultipleInvalid, "does not match regular expression"):
... validate('123EF4')
>>> with raises(MultipleInvalid, 'expected string or buffer'):
... validate(123)
Pattern may also be a _compiled regular expression:
>>> validate = Schema(Match(re.compile(r'0x[A-F0-9]+', re.I)))
>>> validate('0x123ef4')
'0x123ef4'
"""
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
def f(v):
try:
match = pattern.match(v)
except TypeError:
raise Invalid("expected string or buffer")
if not match:
raise Invalid(msg or 'does not match regular expression')
return v
return f
def Replace(pattern, substitution, msg=None):
"""Regex substitution.
>>> validate = Schema(All(Replace('you', 'I'),
... Replace('hello', 'goodbye')))
>>> validate('you say hello')
'I say goodbye'
"""
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
def f(v):
return pattern.sub(substitution, v)
return f
@message('expected a URL')
def Url(v):
"""Verify that the value is a URL."""
try:
urlparse.urlparse(v)
return v
except:
raise ValueError
@message('not a file')
@truth
def IsFile(v):
"""Verify the file exists."""
return os.path.isfile(v)
@message('not a directory')
@truth
def IsDir(v):
"""Verify the directory exists.
>>> IsDir()('/')
'/'
"""
return os.path.isdir(v)
@message('path does not exist')
@truth
def PathExists(v):
"""Verify the path exists, regardless of its type."""
return os.path.exists(v)
def Range(min=None, max=None, min_included=True, max_included=True, msg=None):
"""Limit a value to a range.
Either min or max may be omitted.
Either min or max can be excluded from the range of accepted values.
:raises Invalid: If the value is outside the range.
>>> s = Schema(Range(min=1, max=10, min_included=False))
>>> s(5)
5
>>> s(10)
10
>>> with raises(MultipleInvalid, 'value must be at most 10'):
... s(20)
>>> with raises(MultipleInvalid, 'value must be higher than 1'):
... s(1)
"""
@wraps(Range)
def f(v):
if min_included:
if min is not None and v < min:
raise Invalid(msg or 'value must be at least %s' % min)
else:
if min is not None and v <= min:
raise Invalid(msg or 'value must be higher than %s' % min)
if max_included:
if max is not None and v > max:
raise Invalid(msg or 'value must be at most %s' % max)
else:
if max is not None and v >= max:
raise Invalid(msg or 'value must be lower than %s' % max)
return v
return f
def Clamp(min=None, max=None, msg=None):
"""Clamp a value to a range.
Either min or max may be omitted.
"""
@wraps(Clamp)
def f(v):
if min is not None and v < min:
v = min
if max is not None and v > max:
v = max
return v
return f
def Length(min=None, max=None, msg=None):
"""The length of a value must be in a certain range."""
@wraps(Length)
def f(v):
if min is not None and len(v) < min:
raise Invalid(msg or 'length of value must be at least %s' % min)
if max is not None and len(v) > max:
raise Invalid(msg or 'length of value must be at most %s' % max)
return v
return f
def In(container, msg=None):
"""Validate that a value is in a collection."""
@wraps(In)
def validator(value):
if not value in container:
raise Invalid(msg or 'value is not allowed')
return value
return validator
def Lower(v):
"""Transform a string to lower case.
>>> s = Schema(Lower)
>>> s('HI')
'hi'
"""
return str(v).lower()
def Upper(v):
"""Transform a string to upper case.
>>> s = Schema(Upper)
>>> s('hi')
'HI'
"""
return str(v).upper()
def Capitalize(v):
"""Capitalise a string.
>>> s = Schema(Capitalize)
>>> s('hello world')
'Hello world'
"""
return str(v).capitalize()
def Title(v):
"""Title case a string.
>>> s = Schema(Title)
>>> s('hello world')
'Hello World'
"""
return str(v).title()
def DefaultTo(default_value, msg=None):
"""Sets a value to default_value if none provided.
>>> s = Schema(DefaultTo(42))
>>> s(None)
42
"""
@wraps(DefaultTo)
def f(v):
if v is None:
v = default_value
return v
return f
def ExactSequence(validators, **kwargs):
"""Matches each element in a sequence against the corresponding element in
the validators.
:param msg: Message to deliver to user if validation fails.
:param kwargs: All other keyword arguments are passed to the sub-Schema
constructors.
>>> from voluptuous import *
>>> validate = Schema(ExactSequence([str, int, list, list]))
>>> validate(['hourly_report', 10, [], []])
['hourly_report', 10, [], []]
"""
msg = kwargs.pop('msg', None)
schemas = [Schema(val, **kwargs) for val in validators]
def f(v):
if not isinstance(v, (list, tuple)):
raise Invalid(msg)
try:
for i, schema in enumerate(schemas):
v[i] = schema(v[i])
except Invalid as e:
raise e if msg is None else Invalid(msg)
return v
return f
if __name__ == '__main__':
import doctest
doctest.testmod()
| true |
7383f5100571ba71f80321c14faa4fcf6bd2b317 | Python | xunathan96/CSC420 | /Assignment 1/code/boundary.py | UTF-8 | 1,337 | 3.0625 | 3 | [] | no_license | import numpy as np
def crop_filter(filter):
height, width = filter.shape
# Crop filter to be odd x odd
if height%2 == 0:
height = height - 1
if width%2 == 0:
width = width - 1
filter = filter[:height, :width]
return filter
def zero_pad(image, filter):
height, width = filter.shape
j = int((height-1)/2)
k = int((width-1)/2)
pad_axis_1 = (j,j) # Pad j pixels before and after axis 1
pad_axis_2 = (k,k)
padding = (pad_axis_1, pad_axis_2)
frame = np.pad(image, padding, mode='constant', constant_values=0)
return frame, j, k
def zero_pad_3D(image, filter):
height, width, depth = filter.shape
j = int((height-1)/2)
k = int((width-1)/2)
l = int((depth-1)/2)
pad_axis_1 = (j,j) # Pad j pixels before and after axis 1
pad_axis_2 = (k,k)
pad_axis_3 = (l,l)
padding = (pad_axis_1, pad_axis_2, pad_axis_3)
frame = np.pad(image, padding, mode='constant', constant_values=0)
return frame, j, k, l
def zero_pad_extend(image, filter):
height, width = filter.shape
pad_axis_1 = (height,height) # Pad j pixels before and after axis 1
pad_axis_2 = (width,width)
padding = (pad_axis_1, pad_axis_2)
frame = np.pad(image, padding, mode='constant', constant_values=0)
return frame, height, width
| true |