blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
905e5ad9d40829a09ffd8ae82ec1f3d48323b05e | 9c3cbc02b62b19fa9cca1a034b8512d659470010 | /qlearning_maze/Robot.py | 92024a6063264759f9ca7de112d3b7f1d5d559af | [] | no_license | SongShawn/MachineLearning | b3da75e68416d3257bce634b930b568ae781076d | b7330eff9ce511dc3e99f41bff8a4c702620d454 | refs/heads/master | 2022-12-05T14:37:52.851134 | 2019-07-09T09:11:18 | 2019-07-09T09:11:18 | 157,948,562 | 0 | 0 | null | 2022-11-22T01:39:22 | 2018-11-17T03:53:15 | Jupyter Notebook | UTF-8 | Python | false | false | 5,454 | py | import random
import numpy as np
import math
import os
import copy
class Robot(object):
def __init__(self, maze, alpha=0.5, gamma=0.9, epsilon0=0.5):
self.maze = maze
self.valid_actions = self.maze.valid_actions
self.state = None
self.action = None
# Set Parameters of the Learning Robot
self.alpha = alpha
self.gamma = gamma
self.epsilon0 = epsilon0
self.epsilon = epsilon0
self.t = 0
# sxn
self.nS = self.maze.height * self.maze.width
self.Qtable = {}
self.reset()
def reset(self):
"""
Reset the robot
"""
self.state = self.sense_state()
self.create_Qtable_line(self.state)
def set_status(self, learning=False, testing=False):
"""
Determine whether the robot is learning its q table, or
exceuting the testing procedure.
"""
self.learning = learning
self.testing = testing
def update_parameter(self, step_times=None):
"""
Some of the paramters of the q learning robot can be altered,
update these parameters when necessary.
"""
if self.testing:
# TODO 1. No random choice when testing
self.epsilon = 1.0
else:
# TODO 2. Update parameters when learning
if step_times != None:
if step_times <= self.maze.height*self.maze.width:
self.epsilon = 1.0/math.sqrt(self.t)
else:
self.epsilon = 1.0/math.log(self.t+1000,1000)
else:
if self.t < 10 * 1000:
self.epsilon = 1.0/math.log(self.t+1000,1000)
else :
self.epsilon = 1.0/math.sqrt(self.t)
return self.epsilon
def sense_state(self):
"""
Get the current state of the robot. In this
"""
# TODO 3. Return robot's current state
r, c = self.maze.robot['loc']
return r, c
def create_Qtable_line(self, state):
"""
Create the qtable with the current state
"""
# TODO 4. Create qtable with current state
# Our qtable should be a two level dict,
# Qtable[state] ={'u':xx, 'd':xx, ...}
# If Qtable[state] already exits, then do
# not change it.
if state not in self.Qtable:
self.Qtable[state] = {a:0.0 for a in self.maze.valid_actions}
pass
def choose_action(self):
"""
Return an action according to given rules
"""
def is_random_exploration():
# TODO 5. Return whether do random choice
# hint: generate a random number, and compare
# it with epsilon
return random.uniform(0,1) < self.epsilon
action = None
if self.learning:
if is_random_exploration():
# TODO 6. Return random choose aciton
action = np.random.choice(self.maze.valid_actions)
else:
# TODO 7. Return action with highest q value
action = max(self.Qtable[self.state], key=self.Qtable[self.state].get)
elif self.testing:
# TODO 7. choose action with highest q value
action = max(self.Qtable[self.state], key=self.Qtable[self.state].get)
else:
# TODO 6. Return random choose aciton
action = np.random.choice(self.maze.valid_actions,\
p=get_epsilon_greedy_probs(self.state))
return action
def update_Qtable(self, r, action, next_state):
"""
Update the qtable according to the given rule.
"""
if self.learning:
# TODO 8. When learning, update the q table according
# to the given rules
QTable_next_state = max(list(self.Qtable[next_state].values()))
self.Qtable[self.state][action] = (1 - self.alpha) * self.Qtable[self.state][action] + \
self.alpha * (r + self.gamma * QTable_next_state)
def update(self, avg_step_times_last_10=None):
"""
Describle the procedure what to do when update the robot.
Called every time in every epoch in training or testing.
Return current action and reward.
"""
self.t += 1
self.state = self.sense_state() # Get the current state
self.create_Qtable_line(self.state) # For the state, create q table line
action = self.choose_action() # choose action for this state
reward = self.maze.move_robot(action) # move robot for given action
next_state = self.sense_state() # get next state
self.create_Qtable_line(next_state) # create q table line for next state
if self.learning and not self.testing:
self.update_Qtable(reward, action, next_state) # update q table
self.update_parameter(avg_step_times_last_10) # update parameters
return action, reward
def Qstate_to_file(self, file_name):
f = open(file_name, 'w')
for r in range(self.maze.height):
for c in range(self.maze.width):
if (r,c) in self.Qtable:
f.writelines('({},{}): {}\n'.format(r, c, self.Qtable[(r,c)]))
else:
f.writelines('({},{}): {}\n'.format(r, c, 'NA'))
f.close()
| [
"SongShawn@github.com"
] | SongShawn@github.com |
fdfac35692f100cf285576edc13121121d0f7283 | f2174a48badf14fbedf8da3c8a5f83f0c8e4ae16 | /SPConvNets/trainer_3dmatch.py | a377a87ac475049f2b9e6d381967245da8998f4a | [
"MIT"
] | permissive | XYZ-99/EPN_PointCloud | e766aa9c4f9fce378492c660ef7f702278c591eb | db91de7e537fae480077aff8d9fd0df9bb45903c | refs/heads/main | 2023-05-07T09:10:19.626723 | 2021-05-18T02:23:53 | 2021-05-18T11:56:00 | 367,233,494 | 0 | 0 | MIT | 2021-05-14T02:58:30 | 2021-05-14T02:58:30 | null | UTF-8 | Python | false | false | 9,546 | py | from importlib import import_module
from SPConvNets import FragmentLoader, FragmentTestLoader, PointCloudPairSampler, Dataloader_3dmatch_eval
from tqdm import tqdm
import torch
import vgtk
import vgtk.pc as pctk
import numpy as np
import os
import os.path as osp
class Trainer(vgtk.Trainer):
def __init__(self, opt):
super(Trainer, self).__init__(opt)
if self.opt.train_loss.equi_alpha > 0:
self.summary.register(['Loss', 'InvLoss', 'Pos', 'Neg', 'Acc', \
'EquiLoss', 'EquiPos', 'EquiNeg', 'EquiAcc' ])
else:
self.summary.register(['Loss', 'Pos', 'Neg', 'Acc'])
self.epoch_counter = 0
self.iter_counter = 0
def _setup_datasets(self):
if self.opt.mode == 'train':
dataset = FragmentLoader(self.opt, self.opt.model.search_radius, kptname=self.opt.dataset, \
use_normals=self.opt.model.normals, npt=self.opt.npt)
sampler = PointCloudPairSampler(len(dataset))
self.dataset_train = torch.utils.data.DataLoader(dataset, \
batch_size=self.opt.batch_size, \
shuffle=False, \
sampler=sampler,
num_workers=self.opt.num_thread)
self.dataset_iter = iter(self.dataset_train)
if self.opt.mode == 'eval':
self.dataset_train = None
def _setup_eval_datasets(self, scene):
dataset_eval = Dataloader_3dmatch_eval(self.opt, scene)
self.dataset_eval = torch.utils.data.DataLoader(dataset_eval, \
batch_size=1, \
shuffle=False, \
num_workers=1)
def _setup_model(self):
param_outfile = osp.join(self.root_dir, "params.json")
module = import_module('SPConvNets.models')
self.model = getattr(module, self.opt.model.model).build_model_from(self.opt, param_outfile)
# flag for whether the model requires an input fragment (besides patches)
self.smooth_model = type(self.model) == import_module('SPConvNets.models').inv_so3net_smooth.InvSO3ConvSmoothModel
def _setup_metric(self):
self.anchors = self.model.get_anchor().to(self.opt.device)
self.metric = vgtk.loss.TripletBatchLoss(self.opt,\
self.anchors,
alpha = self.opt.train_loss.equi_alpha) \
# For epoch-based training
def epoch_step(self):
for it, data in tqdm(enumerate(self.dataset_train)):
self._optimize(data)
# For iter-based training
def step(self):
try:
data = next(self.dataset_iter)
except StopIteration:
# New epoch
self.epoch_counter += 1
print("[DataLoader]: At Epoch %d!"%self.epoch_counter)
self.dataset_iter = iter(self.dataset_train)
data = next(self.dataset_iter)
self._optimize(data)
def _prepare_input(self, data):
in_tensor_src = data['src'].to(self.opt.device)
in_tensor_tgt = data['tgt'].to(self.opt.device)
nchannel = in_tensor_src.shape[-1]
in_tensor_src = in_tensor_src.view(-1, self.opt.model.input_num, nchannel)
in_tensor_tgt = in_tensor_tgt.view(-1, self.opt.model.input_num, nchannel)
if self.smooth_model:
fragment_src = data['frag_src'].to(self.opt.device).squeeze()
in_tensor_src = (in_tensor_src, fragment_src)
fragment_tgt = data['frag_tgt'].to(self.opt.device).squeeze()
in_tensor_tgt = (in_tensor_tgt, fragment_tgt)
return in_tensor_src, in_tensor_tgt
def _optimize(self, data):
gt_T = data['T'].to(self.opt.device)
in_tensor_src, in_tensor_tgt = self._prepare_input(data)
y_src, yw_src = self.model(in_tensor_src)
y_tgt, yw_tgt = self.model(in_tensor_tgt)
self.optimizer.zero_grad()
if self.opt.train_loss.equi_alpha > 0:
self.loss, inv_info, equi_info = self.metric(y_src, y_tgt, gt_T, yw_src, yw_tgt)
invloss, pos_loss, neg_loss, accuracy = inv_info
equiloss, equi_accuracy, equi_pos_loss, equi_neg_loss = equi_info
else:
self.loss, accuracy, pos_loss, neg_loss = self.metric(y_src, y_tgt, gt_T)
self.loss.backward()
self.optimizer.step()
# Log training stats
if self.opt.train_loss.equi_alpha > 0:
log_info = {
'Loss': self.loss.item(),
'InvLoss': invloss.item(),
'Pos': pos_loss.item(),
'Neg': neg_loss.item(),
'Acc': 100 * accuracy.item(),
'EquiLoss': equiloss.item(),
'EquiPos': equi_pos_loss.item(),
'EquiNeg': equi_neg_loss.item(),
'EquiAcc': 100 * equi_accuracy.item(),
}
else:
log_info = {
'Loss': self.loss.item(),
'Pos': pos_loss.item(),
'Neg': neg_loss.item(),
'Acc': 100 * accuracy.item(),
}
self.summary.update(log_info)
self.iter_counter += 1
def _print_running_stats(self, step):
stats = self.summary.get()
self.logger.log('Training', f'{step}: {stats}')
# self.summary.reset(['Loss', 'Pos', 'Neg', 'Acc', 'InvAcc'])
def test(self):
pass
def eval(self, select):
'''
3D Match evaluation. Only works for invariant setting
'''
from SPConvNets.datasets import evaluation_3dmatch as eval3dmatch
# set up where to store the output feature
all_results = dict()
for scene in select:
assert osp.isdir(osp.join(self.opt.dataset_path, scene))
print(f"Working on scene {scene}...")
target_folder = osp.join('data/evaluate/3DMatch/', self.opt.experiment_id, scene, f'{self.opt.model.output_num}_dim')
self._setup_eval_datasets(scene)
self._generate(target_folder)
# recalls: [tau, ratio]
results = eval3dmatch.evaluate_scene(self.opt.dataset_path, target_folder, scene)
all_results[scene] = results
self._write_csv(all_results)
print("Done!")
def _generate(self, target_folder):
with torch.no_grad():
self.model.eval()
bs = self.opt.batch_size
print("\n---------- Evaluating the network! ------------------")
from tqdm import tqdm
for it, data in enumerate(self.dataset_eval):
sid = data['sid'].item()
# scene = data['scene']
checknan = lambda tensor: torch.sum(torch.isnan(tensor))
print("\nWorking on fragment id", sid)
n_keypoints = data['clouds'].shape[0]
# 5000 x N x 3
clouds = data['clouds'].to(self.opt.device).squeeze()
npt = clouds.shape[0]
if self.smooth_model:
frag = data['frag'].to(self.opt.device).squeeze()
feature_buffer = []
for bi in tqdm(range(0, npt, bs)):
in_tensor_test = clouds[bi : min(npt,bi+bs)]
if self.smooth_model:
in_tensor_test = (in_tensor_test, frag)
feature, _ = self.model(in_tensor_test)
feature_np = feature.detach().cpu().numpy()
if checknan(feature).item() > 0:
feature_np = np.nan_to_num(feature_np)
feature_buffer.append(feature_np)
# print("Batch counter at %d/%d"%(bi, npt), end='\r')
# target_folder = osp.join('data/evaluate/3DMatch/', self.opt.experiment_id, scene, f'{self.opt.model.output_num}_dim')
os.makedirs(target_folder, exist_ok=True)
feature_out = np.vstack(feature_buffer)
out_path = osp.join(target_folder, "feature%d.npy"%sid)
print(f"\nSaving features to {out_path}")
np.save(out_path, feature_out)
def _write_csv(self, results):
import csv
from SPConvNets.datasets import evaluation_3dmatch as eval3dmatch
csvpath = osp.join('data/evaluate/3DMatch/', self.opt.experiment_id, 'recall.csv')
with open(csvpath, 'w', newline='') as csvfile:
fieldnames = ['Scene'] + ['tau_%.2f'%tau for tau in eval3dmatch.TAU_RANGE]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for scene in results.keys():
recalls = results[scene]
row = dict()
row['Scene'] = scene
for tup in recalls:
tau, ratio = tup
row['tau_%.2f'%tau] = "%.2f"%ratio
writer.writerow(row)
### print out the stats
all_recall = []
for scene in results.keys():
tau, ratio = results[scene][0]
print("%s recall is %.2f at tau %.2f"%(scene, ratio, tau))
all_recall.append(ratio)
avg = np.array(all_recall).mean()
print("Average recall is %.2f !" % avg)
| [
"chw9308@hotmail.com"
] | chw9308@hotmail.com |
70e6f1310dd58ca95bbcd6e0f5213a0df17dcfba | 4b9014cf4ed3bb512e7de1fccd04b8945fcb19a6 | /enemy.py | 3f968e71899624f89fce0ad4601f6281e293ae1d | [] | no_license | supreme1902071/1902071.github.io | e640ffe0ec099353f091be35dfef50f199a9fa60 | ac33263fa37c469165c8175fc9aefe2a3c176fd5 | refs/heads/main | 2023-06-07T22:53:02.961934 | 2021-07-01T09:21:40 | 2021-07-01T09:21:40 | 381,974,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,691 | py | import pygame
from random import * # 创建随机数模块
class SmallEnemy(pygame.sprite.Sprite):
def __init__(self, bg_size): # 构造函数,传入对象和背景大小
pygame.sprite.Sprite.__init__(self) # 调用父类(Sprite)的构造函数
self.image = pygame.image.load("images/enemy1.png").convert_alpha() # 创建图像
# 加载摧毁图片
self.destroy_images = []
self.destroy_images.extend([\
pygame.image.load("images/enemy1_down1.png").convert_alpha(), \
pygame.image.load("images/enemy1_down2.png").convert_alpha(), \
pygame.image.load("images/enemy1_down3.png").convert_alpha(), \
pygame.image.load("images/enemy1_down4.png").convert_alpha() \
])
self.rect = self.image.get_rect() # 获取具有图像尺寸的矩形对象
self.width, self.height = bg_size[0], bg_size[1] # 矩形的长和宽赋初值
self.speed = 3 # 敌机的速度
self.active = True #表示当前飞机存活或摧毁
self.rect.left, self.rect.top = \
randint(0, self.width - self.rect.width), \
randint(-5 * self.height, 0) # 随机敌机的位置,高度为负数,一开始未显示成在界面中,但是已经生成
self.mask = pygame.mask.from_surface(self.image) # 取对象图片中非透明部分
def move(self): # 敌机移动函数
if self.rect.top < self.height: # 未到底部就一直向下走
self.rect.top += self.speed
else :
self.reset()# 否则出界,重新初始化
def reset(self): # 敌机重新初始化函数
self.active = True
self.rect.left, self.rect.top = \
randint(0, self.width - self.rect.width), \
randint(-5 * self.height, 0) # 随机敌机的位置,高度为负数,一开始未显示成在界面中,但是已经生成
class MidEnemy(pygame.sprite.Sprite):
energy = 8 # 中型飞机的血量
def __init__(self, bg_size): # 构造函数,传入对象和背景大小
pygame.sprite.Sprite.__init__(self) # 调用父类(Sprite)的构造函数
self.image = pygame.image.load("images/enemy2.png").convert_alpha() # 创建图像
# 加载被击中时的图片
self.image_hit = pygame.image.load("images/enemy2_hit.png").convert_alpha()
# 加载摧毁图片
self.destroy_images = []
self.destroy_images.extend([\
pygame.image.load("images/enemy2_down1.png").convert_alpha(), \
pygame.image.load("images/enemy2_down2.png").convert_alpha(), \
pygame.image.load("images/enemy2_down3.png").convert_alpha(), \
pygame.image.load("images/enemy2_down4.png").convert_alpha() \
])
self.rect = self.image.get_rect() # 获取具有图像尺寸的矩形对象
self.width, self.height = bg_size[0], bg_size[1] # 矩形的长和宽赋初值
self.speed = 2 # 敌机的速度
self.active = True #表示当前飞机存活或摧毁
self.rect.left, self.rect.top = \
randint(0, self.width - self.rect.width), \
randint(-10 * self.height, -self.height) # 随机敌机的位置,高度为负数,不会一开始就出现中型敌机
self.mask = pygame.mask.from_surface(self.image) # 取对象图片中非透明部分
self.energy = MidEnemy.energy # 初始化血量
self.hit = False #检测是否被击中
def move(self):
if self.rect.top < self.height: # 未到底部就一直向下走
self.rect.top += self.speed
else :
self.reset()# 否则出界,重新初始化
def reset(self):
self.active = True
self.energy = MidEnemy.energy # 初始化血量
self.rect.left, self.rect.top = \
randint(0, self.width - self.rect.width), \
randint(-10 * self.height, -self.height) # 随机敌机的位置,高度为负数,不会一开始就出现中型敌机
class BigEnemy(pygame.sprite.Sprite):
energy = 20
def __init__(self, bg_size): # 构造函数,传入对象和背景大小
pygame.sprite.Sprite.__init__(self) # 调用父类(Sprite)的构造函数
self.image1 = pygame.image.load("images/enemy3_n1.png").convert_alpha() # 创建图像
self.image2 = pygame.image.load("images/enemy3_n2.png").convert_alpha() # 创建图像
# 加载被击中时的图片
self.image_hit = pygame.image.load("images/enemy3_hit.png").convert_alpha()
# 加载摧毁图片
self.destroy_images = []
self.destroy_images.extend([\
pygame.image.load("images/enemy3_down1.png").convert_alpha(), \
pygame.image.load("images/enemy3_down2.png").convert_alpha(), \
pygame.image.load("images/enemy3_down3.png").convert_alpha(), \
pygame.image.load("images/enemy3_down4.png").convert_alpha(), \
pygame.image.load("images/enemy3_down5.png").convert_alpha(), \
pygame.image.load("images/enemy3_down6.png").convert_alpha() \
])
self.rect = self.image1.get_rect() # 获取具有图像尺寸的矩形对象
self.width, self.height = bg_size[0], bg_size[1] # 矩形的长和宽赋初值
self.speed = 1 # 敌机的速度
self.active = True #表示当前飞机存活或摧毁
self.rect.left, self.rect.top = \
randint(0, self.width - self.rect.width), \
randint(-15 * self.height, -5 * self.height) # 随机敌机的位置,高度为负数,不会一开始就出现中型敌机
self.mask = pygame.mask.from_surface(self.image1) # 取对象图片中非透明部分
self.energy = BigEnemy.energy # 初始化血量
self.hit = False #检测是否被击中
def move(self):
if self.rect.top < self.height: # 未到底部就一直向下走
self.rect.top += self.speed
else :
self.reset()# 否则出界,重新初始化
def reset(self):
self.active = True
self.energy = BigEnemy.energy # 初始化血量
self.rect.left, self.rect.top = \
randint(0, self.width - self.rect.width), \
randint(-15 * self.height, -5 * self.height) # 随机敌机的位置,高度为负数,不会一开始就出现中型敌机
| [
"1017110347@qq.com"
] | 1017110347@qq.com |
714954d08ba02518407e7f3e2aed040adb3d741f | 53a48e74508e615173493436eb34b7f8f6eefd96 | /web_api/project/schema.py | eb8bf79a81e863204db9ed39ca6fcb98781c1675 | [] | no_license | williamstrong/ForestArtBackEnd | 4f82cb3b663c586238b3f9d6421ece8227f4755a | f2d69547d948e731b1b87b5acd17ae52156614e6 | refs/heads/master | 2020-03-22T17:10:41.809137 | 2018-08-23T18:30:57 | 2018-08-23T18:30:57 | 140,378,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | import graphene
import image_api.schema
class Query(image_api.schema.Query, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query)
| [
"william.strong@me.com"
] | william.strong@me.com |
ecfe49b03baa1334ccc75a2b3bdbf0eb1e4e241a | 4fca17a3dbc3e74ba7e46bd7869eb6d138e4c422 | /_0163_Missing_Ranges.py | b5c8d5b3ad79b18657c10fbcb233bf4e9f0f2ccd | [] | no_license | mingweihe/leetcode | a2cfee0e004627b817a3c0321bb9c74128f8c1a7 | edff905f63ab95cdd40447b27a9c449c9cefec37 | refs/heads/master | 2021-06-19T07:46:46.897952 | 2021-05-02T05:13:17 | 2021-05-02T05:13:17 | 205,740,338 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | class Solution(object):
def findMissingRanges(self, nums, lower, upper):
"""
:type nums: List[int]
:type lower: int
:type upper: int
:rtype: List[str]
"""
res = []
for x in nums:
if x == lower:
lower += 1
elif lower < x:
if lower + 1 == x:
res.append(str(lower))
else:
res.append('%s->%s' % (lower, x-1))
lower = x + 1
if lower == upper:
res.append(str(upper))
elif lower < upper:
res.append('%s->%s' % (lower, upper))
return res
| [
"10962421@qq.com"
] | 10962421@qq.com |
4aa90e4762ebc9bc01901de23e573ec8e5b9bca2 | da9942c175c7289ff9ad1e8de0fb817ff2103292 | /62.py | 3a9dc97ed2467f894184da448ff2fe60116df59a | [] | no_license | db2398/set7 | 325acf2415642a82b6c0efb48142ed65208f6049 | fa5a2e4b75344368225e60da7a1acf27c522c692 | refs/heads/master | 2020-06-14T14:33:04.014545 | 2019-07-03T11:18:53 | 2019-07-03T11:18:53 | 195,027,788 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | t=input()
sd=set(t)
if(sd=={"0","1"}):
print("yes")
else:
print("no")
| [
"noreply@github.com"
] | db2398.noreply@github.com |
df297e6f7f59c6368f32e5ccd5de786138cb5f86 | 71a3616c900f1b2e385b37904c279ed63b09a150 | /code/function.py | b96e804efb5d73b6d159c1e4606a9472bab57ce6 | [] | no_license | svoss/masters-thesis | 21c7fd88e685310c74af8f9dce0357c71f1fe30f | 23c86a87aa1ee3376c3b825a5ebc9fe6b3b662f8 | refs/heads/master | 2021-03-27T20:41:15.310349 | 2017-09-05T11:26:18 | 2017-09-05T11:26:18 | 87,936,350 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py | import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class MultiTaskLoss(function.Function):
"""Padding of an array"""
def __init__(self, factors):
self.factors = factors
self.factors_xp = None
def check_type_forward(self, in_types):
# Depending on the arguments, pad_width and keywords, the input value
# may be inappropriate. In that case, numpy.pad or cupy.pad will raise
# errors, so that only check the size and the dtype in this function.
type_check.expect(in_types.size() == 1)
x_type = in_types[0]
type_check.expect(x_type.dtype.kind == 'f')
type_check.expect(x_type.shape[0] == len(self.factors))
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
if self.factors_xp is None:
self.factors_xp = xp.array(self.factors,dtype=inputs[0].dtype)
x = inputs[0].dot(self.factors_xp)
return xp.array(x,dtype=inputs[0].dtype).reshape(()),
def backward(self, inputs, grads):
xp = cuda.get_array_module(*inputs)
x = inputs[0]
gy = grads[0]
gx = gy.dot(self.factors_xp).astype(x.dtype, copy=False)
return gx,
if __name__ == "__main__":
import numpy as np
MTL = MultiTaskLoss([.33,.33,.33])
L = np.array([[1.,1.,1.],[1.,2.,3.]],dtype=np.float64)
X = MTL(L)
print X.data
X.backward() | [
"svoss@i-sti.nl"
] | svoss@i-sti.nl |
843082097c900a540cea7686d7ee120b302d66b2 | 7a9cfd7b5a0047b58c0db1ab2d72af7edd714691 | /Regular Expressions/Regularexpression.py | ef1008ada9136935c2bd344570d0aaa2b6906751 | [] | no_license | nareshkodimala/Python | 230571d3603b55c109a5ca06c1bacf7a31283d88 | 0d340003c613692ecbefa64252858793407e6f87 | refs/heads/master | 2022-02-25T06:34:36.299009 | 2019-07-22T09:33:33 | 2019-07-22T09:33:33 | 197,910,128 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #this re.match() which finds only first matched string otherwise it gives None
# import re
# st="this is regular expresstion"
# result=re.match(r"this",st)
# print(result)
#
# which is gives matched name as it is by using group
# import re
# st="example of print name only"
# result=re.match(r"example",st)
# print(result.group(0))
# example of start and end functions in regular expression
import re
st="example for start position of matching pattern in the string"
res=re.match(r"example",st)
#this strat fun gives first position of the given pattern string
print(res.start())
#this end fun gives first position of the given pattern string
print(res.end())
| [
"nareshkodimala111@gmail.com"
] | nareshkodimala111@gmail.com |
52bf7e4630283924775a27166d4d67d3905e5f2f | 9496842de3b1e530a25369a3beb360674346dd8e | /ex075.py | c326147534347373331e5c5d98864f7a52b4bb03 | [] | no_license | zuko56/Projetos_Python | 3e8a16080d5c158ff5f46d874653db462e8daa5f | 4ee3b609c0749b23facc35e48104d4ed272d4103 | refs/heads/master | 2020-05-15T06:03:16.224070 | 2019-04-23T14:18:48 | 2019-04-23T14:18:48 | 182,115,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | num = (int(input('Digite um número: ')), int(input('Digite outro número: ')), int(input('Digite mais um número: ')), int(input('Digite o último número: ')))
print(f'Vc digitou os valores {num}')
print(f'O valor 9 apareceu {num.count(9)} vezes')
if 3 in num:
print(f'O valor 3 apareceu {num.index(3) + 1} posição')
else:
print('O valor 3 não foi encontrado')
print('Os valores pares digitados foram: ', end='')
for n in num:
if n % 2 == 0:
print(n, end=' ')
| [
"noreply@github.com"
] | zuko56.noreply@github.com |
8b778933bd82b1063c33bf1c3d6bfb6f56edd624 | ba3f38205d104afc33895b9a95a0e4603aaad79b | /faceborder/1.py | 9b0df2f0cac0b767a133e1b4dbf46b69d6fe1f24 | [
"MIT"
] | permissive | Jegan-Novitech/SkyLark-Novitech | 6705644facd013b2e902fefad3dde789ba738f3e | a7a9abdabf6567d28a4a2d926c42a45703d7f3d1 | refs/heads/master | 2023-01-19T09:10:47.873619 | 2020-11-24T05:21:42 | 2020-11-24T05:21:42 | 315,524,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,835 | py | from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import numpy as np
import argparse
import imutils
import time
import cv2
import os
img2 = cv2.imread('/home/pi/Desktop/face_new/faceborder/11.jpg' )
print(img2)
img2=cv2.resize(img2, (750, 1200))
img1 = cv2.imread('/home/pi/Desktop/face_new/faceborder/12.jpg' )
img1=cv2.resize(img1, (750, 1200))
#img_rgb = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
def detect_and_predict_mask(frame, faceNet, maskNet):
# grab the dimensions of the frame and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
faceNet.setInput(blob)
detections = faceNet.forward()
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
preds = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > args["confidence"]:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
#print(startY,endY, startX,endX)
face = frame[startY:endY, startX:endX]
# print(face)
try:
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
face = np.expand_dims(face, axis=0)
# add the face and bounding boxes to their respective
# lists
faces.append(face)
locs.append((startX, startY, endX, endY))
except:
pass
# only make a predictions if at least one face was detected
if len(faces) > 0:
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
preds = maskNet.predict(faces)
# return a 2-tuple of the face locations and their corresponding
# locations
return (locs, preds)
import temp1
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--face", type=str,
default="face_detector",
help="path to face detector model directory")
ap.add_argument("-m", "--model", type=str,
default="mask_detector.model",
help="path to trained face mask detector model")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# load our serialized face detector model from disk
print("[INFO] loading face detector model...")
prototxtPath = os.path.sep.join([args["face"], "deploy.prototxt"])
weightsPath = os.path.sep.join([args["face"],
"res10_300x300_ssd_iter_140000.caffemodel"])
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
print("[INFO] loading face mask detector model...")
maskNet = load_model(args["model"])
# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
#cap = VideoStream(src=0).start()
cap = VideoStream(src=0,usePiCamera=1).start()
time.sleep(2)
while 1:
img=cap.read()
print=img
img=cv2.resize(img, (750, 1200))
img = cv2.flip(img, 1)
#blank_img = img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
frame = cv2.addWeighted(src1=img,alpha=1,src2=img2,beta=0.4, gamma = 0)
blended1 = cv2.addWeighted(src1=img,alpha=1,src2=img1,beta=0.9, gamma = 0)
(locs, preds) = detect_and_predict_mask(blended1, faceNet, maskNet)
# loop over the detected face locations and their corresponding
# locations
for (box, pred) in zip(locs, preds):
# unpack the bounding box and predictions
(startX, startY, endX, endY) = box
(mask, withoutMask) = pred
# determine the class label and color we'll use to draw
# the bounding box and text
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
# temperature=temp1.read_temp()
# print(temperature)
temperature='97.5'
# include the probability in the label
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(frame, temperature, (50, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.putText(frame, label, (startX, startY - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
# show the output frame
cv2.imshow("Frame", frame)
cv2.imshow("Frame1", blended1)
key = cv2.waitKey(1) & 0xFF
| [
"jegan@coitor.com"
] | jegan@coitor.com |
78f3e15ccac39d903ff575b24f5014d82f7f72b0 | 5ae5a01e71c6c82daf329df34ebdf4713986e588 | /djangoproject2/helloworld_project/settings.py | c570de6bad441bc83613e85c6a8fca1486ea6d48 | [] | no_license | rupeennaik/DjangoLearnings | aac9cbfc937c722eaa600209130ff91d82e9b6b1 | c84ca5df2ed3baf0686ba7c6c5f09663b9f4c404 | refs/heads/master | 2023-05-01T05:57:11.653928 | 2019-11-29T17:19:10 | 2019-11-29T17:19:10 | 222,110,470 | 0 | 0 | null | 2023-04-21T20:41:57 | 2019-11-16T14:23:40 | Python | UTF-8 | Python | false | false | 3,234 | py | """
Django settings for helloworld_project project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'zoywqmo46(f4_06ym7%q=^4-=sw$*fczj=z=*-1dv!0s9ohg)b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pages.apps.PagesConfig' # new
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'helloworld_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [r"D:\DjangoLearnings\djangoproject2\helloworld_project\templates"], # new
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'helloworld_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"rupeennaik85@gmail.com"
] | rupeennaik85@gmail.com |
eec8fe6e2f22fd18ff0f380dd0c780a1162a9cc0 | 3721d5d0c6f041323db876f1fa167b0f1f7878cf | /polls/views.py | 0ec3cc718dc4174aff399eefed5159d22f9563b5 | [] | no_license | SemieZX/mysite | e76e7964dbed8728f6efea161a3c3b83b6361964 | 06e6b1d577b2da1c91e717dea82a2462309e660d | refs/heads/master | 2023-06-09T19:45:50.204551 | 2021-06-29T15:51:04 | 2021-06-29T15:51:04 | 381,083,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("This is paul's poll web")
def detail(request, question_id):
return HttpResponse("You're looking at question %s." % question_id)
def results(request, question_id):
response = "You're looking at the results of question %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
return HttpResponse("You're voting on question %s." % question_id)
| [
"18846031359@163.com"
] | 18846031359@163.com |
a149aaf98e52f7341e3dcc68f0efb14590b43c19 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02700/s274282920.py | 922c558418997e6ed17552a908b8b8cb32996882 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | A,B,C,D = (int(x) for x in input().split())
while True:
C -= B
if C <= 0:
print('Yes')
break
else:
A -= D
if A <= 0:
print('No')
break | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d850054b9c83525bc9a607e92ae0868b85845841 | 7d23614beeb6d9cdd9b1b7fe74d8919043b00985 | /hadoop/mapreduce/distinct_values/map.py | dd9bdd3085534eecdf577d5f10ff9fea7afa7723 | [] | no_license | AnastasiiaNovikova/stepic | 427c9cb97d944be16878015007bc4108946f74f4 | 5d311ea0e4d71c81161564a634d0011f9f2fc671 | refs/heads/master | 2021-05-07T16:18:24.346156 | 2016-04-08T19:08:35 | 2016-04-08T19:08:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | import sys
for line in sys.stdin:
(key, values_str) = line.strip().split("\t")
values = values_str.split(",")
for value in values:
print(key, ",", value, "\t", 1, sep="")
| [
"mike.plekhanov@gmail.com"
] | mike.plekhanov@gmail.com |
76986032d06fbe9028e167dcf91e9552021d58ec | f9951087552808bdb5b045f627c7b3eb0a7019cc | /homeworks/hw03/src/widgets.py | 5c99146ce06a2685dff092dc9838d7ffb85631c3 | [] | no_license | ZaydH/cmps242 | 6933f7ba2b3609669b898d2afcbea17a4aa85c19 | d618d0a7b06998a4b8a43fc41c36e1b3094d6153 | refs/heads/master | 2022-03-02T11:31:54.598754 | 2019-09-11T00:36:05 | 2019-09-11T00:36:05 | 105,210,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,922 | py | from IPython.display import Javascript, display
import ipywidgets
import const
def run_all(_):
# noinspection PyTypeChecker
display(Javascript('IPython.notebook.execute_cells_below()'))
k_slider = ipywidgets.IntSlider(
value=10,
min=2,
max=20,
step=1,
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
width=100,
)
k_hbox = ipywidgets.HBox([ipywidgets.Label('Number of Folds: '), k_slider])
learning_alg_radio = ipywidgets.RadioButtons(
options=[const.ALG_GD, const.ALG_EG, const.ALG_SGD],
description="",
disabled=False
)
learning_alg_radio.value = const.ALG_GD
learning_alg_hbox = ipywidgets.HBox([ipywidgets.Label("Select the Learning Algorithm: "),
learning_alg_radio])
regularizer_radio = ipywidgets.RadioButtons(
options=[const.REGULARIZER_L1_NORM, const.REGULARIZER_L2_NORM],
description="",
disabled=False
)
regularizer_radio.value = const.REGULARIZER_L2_NORM
regularizer_hbox = ipywidgets.HBox([ipywidgets.Label("Select the Regularizer: "),
regularizer_radio])
error_type_radio = ipywidgets.RadioButtons(
options=[const.ERROR_ACCURACY, const.ERROR_RMS],
description="",
disabled=False
)
error_type_radio.value = const.ERROR_ACCURACY
error_type_hbox = ipywidgets.HBox([ipywidgets.Label("Select the Validation Error Calculation: "),
error_type_radio])
epoch_slider = ipywidgets.IntSlider(
value=25,
min=1,
max=100,
step=1,
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
width=100,
)
epoch_hbox = ipywidgets.HBox([ipywidgets.Label('Max. Number of Epochs: '), epoch_slider])
run_button = ipywidgets.Button(
description='Run Learner',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Run Learning Algorithm with the specified paramters',
icon='check'
)
run_button.on_click(run_all)
learning_rate_slider = ipywidgets.FloatSlider(
value=20,
min=0.1,
max=100,
step=0.1,
orientation='horizontal',
readout=True,
readout_format='.2f',
)
learning_rate_hbox = ipywidgets.HBox([ipywidgets.Label("Learning Rate ($\eta$): "),
learning_rate_slider])
lambdas_range_slider = ipywidgets.IntRangeSlider(
value=[-10, 10],
min=-10,
max=10,
step=1,
orientation='horizontal',
readout=True,
readout_format='d',
)
lambdas_range_hbox = ipywidgets.HBox([ipywidgets.Label("Range of $\lambda$ in Form $2^{x}$: "),
lambdas_range_slider])
update_results_button = ipywidgets.Button(
description='Update Results',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Update the table and graph',
icon='check'
)
update_results_button.on_click(run_all)
| [
"zhammoud@ucsc.edu"
] | zhammoud@ucsc.edu |
b56a9efa0e36bccedee67abd895b45d54be4114e | a259b8e6fbac1bc4ff26e18e6709829ca1eb1f3d | /a02_TextCNN/word_cnn.py | 4abe6257a679f75f96fc73a93cde539a3dddd251 | [] | no_license | godkillok/daguan | 37fef2fcb3b47b9a73dfc137e148bc90ba083bfc | c28c4bf8bc4cae22fc1b6491e5e46e5de7a5caf6 | refs/heads/master | 2020-03-25T00:20:35.446593 | 2018-12-02T16:24:53 | 2018-12-02T16:24:53 | 143,181,110 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,922 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import re
import os
import json
# for python 2.x
# import sys
# reload(sys)
# sys.setdefaultencoding("utf-8")
flags = tf.app.flags
flags.DEFINE_string("model_dir", "./model_dir", "Base directory for the model.")
flags.DEFINE_float("dropout_rate", 0.25, "Drop out rate")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate")
flags.DEFINE_integer("embedding_size", 128, "embedding size")
flags.DEFINE_integer("num_filters", 100, "number of filters")
flags.DEFINE_integer("num_classes", 14, "number of classes")
flags.DEFINE_integer("shuffle_buffer_size", 1000000, "dataset shuffle buffer size")
flags.DEFINE_integer("sentence_max_len", 100, "max length of sentences")
flags.DEFINE_integer("batch_size", 64, "number of instances in a batch")
flags.DEFINE_integer("save_checkpoints_steps", 5000, "Save checkpoints every this many steps")
flags.DEFINE_integer("train_steps", 2000,
"Number of (global) training steps to perform")
flags.DEFINE_integer("train_epoch", 1,
"Number of (global) training steps to perform")
flags.DEFINE_string("data_dir", "./dbpedia_csv", "Directory containing the dataset")
flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated list of number of window size in each filter")
flags.DEFINE_string("pad_word", "<pad>", "used for pad sentence")
FLAGS = flags.FLAGS
def parse_line(line, vocab):
def get_content(record):
fields = record.decode().split(",")
if len(fields) < 3:
raise ValueError("invalid record %s" % record)
text = re.sub(r"[^A-Za-z0-9\'\`]", " ", fields[2])
text = re.sub(r"\s{2,}", " ", text)
text = re.sub(r"\`", "\'", text)
text = text.strip().lower()
tokens = text.split()
tokens = [w.strip("'") for w in tokens if len(w.strip("'")) > 0]
n = len(tokens) # type: int
if n > FLAGS.sentence_max_len:
tokens = tokens[:FLAGS.sentence_max_len]
if n < FLAGS.sentence_max_len:
tokens += [FLAGS.pad_word] * (FLAGS.sentence_max_len - n)
return [tokens, np.int32(fields[0])]
result = tf.py_func(get_content, [line], [tf.string, tf.int32])
result[0].set_shape([FLAGS.sentence_max_len])
result[1].set_shape([])
# Lookup tokens to return their ids
ids = vocab.lookup(result[0])
return {"sentence": ids}, result[1] - 1
def input_fn(path_csv, path_vocab, shuffle_buffer_size, num_oov_buckets):
"""Create tf.data Instance from csv file
Args:
path_csv: (string) path containing one example per line
vocab: (tf.lookuptable)
Returns:
dataset: (tf.Dataset) yielding list of ids of tokens and labels for each example
"""
vocab = tf.contrib.lookup.index_table_from_file(path_vocab, num_oov_buckets=num_oov_buckets)
# Load txt file, one example per line
dataset = tf.data.TextLineDataset(path_csv)
# Convert line into list of tokens, splitting by white space
dataset = dataset.map(lambda line: parse_line(line, vocab))
dataset = dataset.repeat(FLAGS.train_epoch)
if shuffle_buffer_size > 0:
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.batch(FLAGS.batch_size).prefetch(1)
print(dataset.output_types)
print(dataset.output_shapes)
return dataset
def my_model(features, labels, mode, params):
sentence = features['sentence']
# Get word embeddings for each token in the sentence
embeddings = tf.get_variable(name="embeddings", dtype=tf.float32,
shape=[params["vocab_size"], FLAGS.embedding_size])
sentence = tf.nn.embedding_lookup(embeddings, sentence) # shape:(batch, sentence_len, embedding_size)
# add a channel dim, required by the conv2d and max_pooling2d method
sentence = tf.expand_dims(sentence, -1) # shape:(batch, sentence_len/height, embedding_size/width, channels=1)
pooled_outputs = []
for filter_size in params["filter_sizes"]:
conv = tf.layers.conv2d(
sentence,
filters=FLAGS.num_filters,
kernel_size=[filter_size, FLAGS.embedding_size],
strides=(1, 1),
padding="VALID",
activation=tf.nn.relu)
pool = tf.layers.max_pooling2d(
conv,
pool_size=[FLAGS.sentence_max_len - filter_size + 1, 1],
strides=(1, 1),
padding="VALID")
pooled_outputs.append(pool)
h_pool = tf.concat(pooled_outputs, 3) # shape: (batch, 1, len(filter_size) * embedding_size, 1)
h_pool_flat = tf.reshape(h_pool, [-1, FLAGS.num_filters * len(
params["filter_sizes"])]) # shape: (batch, len(filter_size) * embedding_size)
if 'dropout_rate' in params and params['dropout_rate'] > 0.0:
h_pool_flat = tf.layers.dropout(h_pool_flat, params['dropout_rate'],
training=(mode == tf.estimator.ModeKeys.TRAIN))
logits = tf.layers.dense(h_pool_flat, FLAGS.num_classes, activation=None)
optimizer = tf.train.AdagradOptimizer(learning_rate=params['learning_rate'])
def _train_op_fn(loss):
return optimizer.minimize(loss, global_step=tf.train.get_global_step())
my_head = tf.contrib.estimator.multi_class_head(n_classes=FLAGS.num_classes)
return my_head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
logits=logits,
train_op_fn=_train_op_fn
)
def main(unused_argv):
# Load the parameters from the dataset, that gives the size etc. into params
json_path = os.path.join(FLAGS.data_dir, 'dataset_params.json')
assert os.path.isfile(json_path), "No json file found at {}, run build_vocab.py".format(json_path)
# Loads parameters from json file
with open(json_path) as f:
config = json.load(f)
FLAGS.pad_word = config["pad_word"]
if config["train_size"] < FLAGS.shuffle_buffer_size:
FLAGS.shuffle_buffer_size = config["train_size"]
print("shuffle_buffer_size:", FLAGS.shuffle_buffer_size)
# Get paths for vocabularies and dataset
path_words = os.path.join(FLAGS.data_dir, 'words.txt')
assert os.path.isfile(path_words), "No vocab file found at {}, run build_vocab.py first".format(path_words)
# words = tf.contrib.lookup.index_table_from_file(path_words, num_oov_buckets=config["num_oov_buckets"])
path_train = os.path.join(FLAGS.data_dir, 'train.csv')
path_eval = os.path.join(FLAGS.data_dir, 'test.csv')
classifier = tf.estimator.Estimator(
model_fn=my_model,
params={
'vocab_size': config["vocab_size"],
'filter_sizes': list(map(int, FLAGS.filter_sizes.split(','))),
'learning_rate': FLAGS.learning_rate,
'dropout_rate': FLAGS.dropout_rate
},
config=tf.estimator.RunConfig(model_dir=FLAGS.model_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps)
)
train_spec = tf.estimator.TrainSpec(
input_fn=lambda: input_fn(path_train, path_words, FLAGS.shuffle_buffer_size, config["num_oov_buckets"]),
max_steps=FLAGS.train_steps
)
input_fn_for_eval = lambda: input_fn(path_eval, path_words, 0, config["num_oov_buckets"])
eval_spec = tf.estimator.EvalSpec(input_fn=input_fn_for_eval,steps=600, throttle_secs=30000)
print("before train and evaluate")
tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)
input_fn_for_pred=lambda: input_fn(path_train, path_words, 0, config["num_oov_buckets"])
print("evalue train set")
ge1=classifier.evaluate(input_fn=input_fn_for_pred)
print("evalue test set")
ge2 = classifier.evaluate(input_fn=input_fn_for_eval)
print("after train and evaluate")
def pred(unused_argv):
path_eval = os.path.join(FLAGS.data_dir, 'test.csv')
path_words = os.path.join(FLAGS.data_dir, 'words.txt')
input_fn_for_pred = lambda: input_fn(path_eval, path_words, 0, 100)
json_path = os.path.join(FLAGS.data_dir, 'dataset_params.json')
with open(json_path) as f:
config = json.load(f)
classifier = tf.estimator.Estimator(
model_fn=my_model,
params={
'vocab_size': config["vocab_size"],
'filter_sizes': list(map(int, FLAGS.filter_sizes.split(','))),
'learning_rate': FLAGS.learning_rate,
'dropout_rate': FLAGS.dropout_rate
},
config=tf.estimator.RunConfig(model_dir=FLAGS.model_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps)
)
eval_spec = classifier.predict(input_fn=input_fn_for_pred)
count = 0
for e in eval_spec:
count += 1
print(e.get('classes', ''))
print(count)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main=main)
| [
"fujianhaowawa@163.com"
] | fujianhaowawa@163.com |
6b854b39440765b0f5c80e3c3f73c5fdf6d4f8b8 | 4d10250b7ce80730414468e5e0060a207253a6d0 | /jplephem/test.py | bc8ec152f0e375d2117b0930f489d0e20a305d78 | [] | no_license | NatalieP-J/python | c68fdb84a6c9c432b34e57ae4e376f652451578a | c74bcfabde4704939550875bc42fc3e8a5dbc5bf | refs/heads/master | 2021-01-23T03:08:06.448979 | 2013-08-21T04:04:11 | 2013-08-21T04:04:11 | 10,916,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | """Tests for ``jplephem``.
See the accompanying ``jpltest`` module for a more intense numerical
test suite that can verify that ``jplephem`` delivers, in a large number
of cases, the same results as when the ephemerides are run at JPL. This
smaller and more feature-oriented suite can be run with::
python -m unittest discover jplephem
"""
import numpy as np
from functools import partial
from jplephem import Ephemeris, DateError
from unittest import TestCase
class Tests(TestCase):
def check0(self, x, y, z, dx, dy, dz):
eq = partial(self.assertAlmostEqual, delta=1.0)
eq(x, 39705023.28)
eq(y, 131195345.65)
eq(z, 56898495.41)
eq(dx, -2524248.19)
eq(dy, 619970.11)
eq(dz, 268928.26)
def check1(self, x, y, z, dx, dy, dz):
eq = partial(self.assertAlmostEqual, delta=1.0)
eq(x, -144692624.00)
eq(y, -32707965.14)
eq(z, -14207167.26)
eq(dx, 587334.38)
eq(dy, -2297419.36)
eq(dz, -996628.74)
def test_scalar_input(self):
import de421
e = Ephemeris(de421)
self.check0(*e.compute('earthmoon', 2414994.0))
self.check1(*e.compute('earthmoon', 2415112.5))
def test_array_input(self):
import de421
e = Ephemeris(de421)
v = e.compute('earthmoon', np.array([2414994.0, 2415112.5]))
v = np.array(v)
self.check0(*v[:,0])
self.check1(*v[:,1])
def test_ephemeris_end_date(self):
import de421
e = Ephemeris(de421)
x, y, z = e.position('earthmoon', e.jomega)
self.assertAlmostEqual(x, -2.81196460e+07, delta=1.0)
self.assertAlmostEqual(y, 1.32000379e+08, delta=1.0)
self.assertAlmostEqual(z, 5.72139011e+07, delta=1.0)
def test_too_early_date(self):
import de421
e = Ephemeris(de421)
self.assertRaises(DateError, e.compute, 'earthmoon', e.jalpha - 0.01)
def test_too_late_date(self):
import de421
e = Ephemeris(de421)
self.assertRaises(DateError, e.compute, 'earthmoon', e.jomega + 16.01)
| [
"natalie.price.jones@mail.utoronto.ca"
] | natalie.price.jones@mail.utoronto.ca |
c3b4cea182ae3b16c34f84f478d79d361164906d | 05a194d887c0ab9f1bc0041ac2287f1b8dff2bd6 | /src/sms/schemas/send.py | 5584fe7ca7f3f1fcd221c25cb8d821aa3cdc8dc1 | [] | no_license | athletictools/sms-gateway | d70842890b5e7a471e48eaa13a4ff90d855e725a | ed89ccc88579b3260db8f010a3606952afe55080 | refs/heads/master | 2023-05-03T01:22:55.027935 | 2021-05-26T11:01:07 | 2021-05-26T11:01:07 | 366,818,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | from typing import List, Optional
from pydantic import BaseModel
class Message(BaseModel):
id: str = None
phone_no: str
text: str
class SendRequest(BaseModel):
messages: List[Message]
| [
"evtatarintsev@ya.ru"
] | evtatarintsev@ya.ru |
5b08c298bbe51fb0bf56a0b0df1fd5e0e280b3cd | ab741694f394a50b6085102fbbbcd7fff336a604 | /5/part_two.py | 4192b1cc0a778f56ef325f24195b079d568f6287 | [] | no_license | KDercksen/adventofcode18 | aa7259fdb2ba153c6d6586986e21cd9e3a316468 | 54feeb5c5b82b7b117c887a9a459b50ad0726efe | refs/heads/master | 2020-04-09T02:09:29.840314 | 2018-12-10T10:09:59 | 2018-12-10T10:09:59 | 159,930,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from string import ascii_lowercase
def react(text):
i = 0
while i < len(text) - 1:
if abs(ord(text[i]) - ord(text[i + 1])) == 32:
del text[i : i + 2]
i -= 1
else:
i += 1
i = max(i, 0)
return len(text)
if __name__ == "__main__":
with open("input.txt") as f:
text = list(f.read().strip())
print(
min(
[
react([unit for unit in text if unit.lower() != letter])
for letter in ascii_lowercase
]
)
)
| [
"mail@koendercksen.com"
] | mail@koendercksen.com |
d5a140b811fe5a8c23c9eb3d8e15902576a1fd05 | cde24904de0830ee9f58c16969683f6eed76883b | /Chapter 7/number-analysis-program.py | 4d19979f969ecfc0b5ce2efdd957f9c991d438e1 | [] | no_license | stephenmoye/Python-Programming-Projects | d205ee34a99c4a4ee4555fe7b751e8576e665d40 | 5e8a6562ad86d4647af9deda7fb1386f1a100b0f | refs/heads/main | 2023-01-22T15:40:20.378504 | 2020-12-10T05:24:53 | 2020-12-10T05:24:53 | 320,168,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | # Number Analysis Program
# Design a program that asks the user to enter a series of 20 numbers. The program should store
# the numbers in a list then display the following data:
# The lowest number in the list
# The highest number in the list
# The total of the numbers in the list
# The average of the numbers in the list
numbersList = []
def main():
for month in range(0, 20):
print("Enter any number")
numbers = int(input())
numbersList.append(numbers)
analyze(numbersList)
def analyze(numbers):
total = 0
for nums in range(0, 20):
total = total + numbers[nums]
average = total / 20
print("Lowest number:", min(numbers))
print("Highest number:", max(numbers))
print("Total:", total)
print("Average:", average)
main()
| [
"stephenmoye@gmail.com"
] | stephenmoye@gmail.com |
a72dbf230d91deba8ed46f91784cde7bfef572d9 | ced1774c423247543413cb2d8bf2183bfe90b5a8 | /markovly.py | e6a7da219e30949b3837ea1948b2bd9ad2738a40 | [] | no_license | leegenes/markovly | 39eb5b8fdf0b580215ac2b272bd8d2deaeac1356 | 05f8c85c175e212c91f1f094686f12bdf80eddb3 | refs/heads/master | 2022-12-20T00:45:28.920488 | 2017-11-10T19:54:35 | 2017-11-10T19:54:35 | 99,361,418 | 0 | 0 | null | 2022-12-08T00:42:16 | 2017-08-04T16:32:19 | Python | UTF-8 | Python | false | false | 2,801 | py | from random import choice
class Markovly:
def __init__(self, text=None, n=None, token_type="word"):
self.text = text
self.ngram = n
self.token_type = token_type
self.tokens = None
def tokenize(self):
if self.token_type == "char":
text_pieces = list(self.text)
else:
text_pieces = self.text.split()
tokens = {}
for n, tp in enumerate(text_pieces):
# ensures enough indices remaining
# in text_pieces list for key and next word/char
try:
next_tp = text_pieces[n:n + self.ngram + 1]
except IndexError:
break
# set key as tuple of length n
k = tuple(next_tp[:- 1])
if len(k) != self.ngram:
break
# add key if not in token dict
if k not in tokens:
tokens[k] = []
# set
last_tp = next_tp[-1]
tokens[k].append(last_tp)
self.tokens = tokens
return self.tokens
def generate_verse(self):
# determines if a line should break
# will in all cases with more than
# 1 word on previous line - unless
# previous line includes an !
def insert_break(since_last_break):
if '!' in since_last_break:
return True
elif since_last_break.count(' ') <= 1 or ',' in since_last_break[-4:]:
return False
return True
def get_last_break(line):
if '\n' in line:
last_break = len(line) - line[-1::-1].index('\n') -1
else:
last_break = 0
return last_break
max_len = 5 if self.token_type == "word" else 280
start_keys = [k for k in self.tokens.keys() if k[0].isupper()]
k = choice(start_keys)
verse = list(k)
while len(verse) < max_len:
try:
next_piece = choice(self.tokens[k])
except KeyError:
break
if next_piece[0].isupper():
last_break = get_last_break(verse)
since_break = verse[last_break:]
if insert_break(since_break):
verse.append('\n')
verse.append(next_piece)
k = k[1:] + (next_piece,)
if verse[-1] != ' ':
verse = verse[:get_last_break(verse)]
return ''.join(verse)
def generate_song(self, verse_count):
verses = []
for i in range(verse_count):
verses.append(self.generate_verse())
return '\n\n'.join(verses)
if __name__ == '__main__':
words = input()
m = Markovly(text=words, n=8, token_type="char")
print(m.generate_verse())
| [
"haugenlee@leegenes.local"
] | haugenlee@leegenes.local |
c345de227b11ef2439e018bdd985a7e7f822347e | 66ad57b680fb7cb1b9835319477510709ed9f9de | /Ejercicio3.py | 97807574241307f01d7b11063ec1383057c0d9c6 | [] | no_license | ayanez16/Tarea-1-Estructura | d919ee9ec69dcb6afd404a643e4aa87b9619bb43 | c61cbb6e195f3aeeb41db296212bb740a28fc619 | refs/heads/main | 2023-06-08T03:13:45.011628 | 2021-06-30T02:41:23 | 2021-06-30T02:41:23 | 381,538,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | #Un vendedor recibe u sueldo base mas un 10% extra por comision de sus ventas.
#El vendedor desea saber cuanto dinero obtendra por concepto de comisiones por las tres ventas que realiza en el mes y el total que recibira
#en el mes tomando en cuenta su sueldo base y sus comosiones.
class Ejercicio3:
def run():
S=float(input("Ingrese el salario base: "))
V1=float(input("Ingrese el valor de la primera venta: "))
V2=float(input("Ingrese el valor de la segunda venta: "))
V3=float(input("Ingrese el valor de la tercera venta: "))
T=V1+V2+V3
B=T*0.10
R=S+B
print("El total del salario a recibir es: $")
print(R,"El sueldo a recibir: $")
run()
| [
"noreply@github.com"
] | ayanez16.noreply@github.com |
f59468a5e4143f0ff367cdc65d13c1808e6f1233 | 09f6c19a9b5717ba50f3cdefa914b0399939a58e | /1_introduction/fraction.py | 54a4090a92adc00772a998e3e6214998fcc53b90 | [] | no_license | prestidigitation/algo_and_data_struc | ca5f16fec8553290d8e8e4136397569eebb8a1fd | 2ec6e178dee47e46e1be181fc66ddb6998c02df7 | refs/heads/master | 2021-01-10T04:38:36.071922 | 2016-10-25T19:10:51 | 2016-10-25T19:10:51 | 46,324,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | def gcd(m, n):
while m % n != 0:
old_m = m
old_n = n
m = old_n
n = old_m % old_n
return n
class Fraction:
def __init__(self, top, bottom):
self.num = top
self.den = bottom
common = gcd(self.num, self.den)
self.num = top // common
self.den = bottom // common
def __str__(self):
return str(self.num) + "/" + str(self.den)
def show(self):
print(self.num, "/", self.den)
def __add__(self, other_fraction):
new_num = self.num * other_fraction.den + \
self.den * other_fraction.num
new_den = self.den * other_fraction.den
return Fraction(new_num, new_den)
def __sub__(self, other_fraction):
new_num = self.num * other_fraction.den - self.den * other_fraction.num
new_den = self.den * other_fraction.den
return Fraction(new_num, new_den)
def __mul__(self, other_fraction):
new_num = self.num * other_fraction.num
new_den = self.den * other_fraction.den
common = gcd(new_num, new_den)
return Fraction(new_num // common, new_den // common)
def __truediv__(self, other_fraction):
new_num = self.num * other_fraction.den
new_den = self.den * other_fraction.num
return Fraction(new_num, new_den)
def __eq__(self, other_fraction):
first_num = self.num * other_fraction.den
second_num = other_fraction.num * self.den
return first_num == second_num
def get_num(self):
return self.num
def get_den(self):
return self.den
| [
"alexander.c.rowland@gmail.com"
] | alexander.c.rowland@gmail.com |
3671b258e8174b8e885023dbabd7a1416204cfd8 | 1cbc5c6771901edc3a304d312cbf5a4fd2c931a6 | /pandora-ckz/pandora/paypal/standard/pdt/admin.py | d7f16cb27ac0cf62d8174d023ef13ae549fc6858 | [
"MIT"
] | permissive | williamlagos/django-coding | ed2a3dff2bf0bf36c1d799ad5746a2a1bd633726 | 246dc1aba32eae0b035c407de3e8fe954606b776 | refs/heads/master | 2023-03-29T05:39:15.311890 | 2020-12-11T01:13:22 | 2020-12-11T01:13:22 | 17,301,344 | 0 | 0 | MIT | 2021-03-31T20:18:54 | 2014-02-28T22:07:36 | Python | UTF-8 | Python | false | false | 2,270 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from string import split as L
from django.contrib import admin
from paypal.standard.pdt.models import PayPalPDT
# ToDo: How similiar is this to PayPalIPNAdmin? Could we just inherit off one common admin model?
class PayPalPDTAdmin(admin.ModelAdmin):
date_hierarchy = 'payment_date'
fieldsets = (
(None, {
"fields": L("flag txn_id txn_type payment_status payment_date transaction_entity reason_code pending_reason mc_gross mc_fee auth_status auth_amount auth_exp auth_id")
}),
("Address", {
"description": "The address of the Buyer.",
'classes': ('collapse',),
"fields": L("address_city address_country address_country_code address_name address_state address_status address_street address_zip")
}),
("Buyer", {
"description": "The information about the Buyer.",
'classes': ('collapse',),
"fields": L("first_name last_name payer_business_name payer_email payer_id payer_status contact_phone residence_country")
}),
("Seller", {
"description": "The information about the Seller.",
'classes': ('collapse',),
"fields": L("business item_name item_number quantity receiver_email receiver_id custom invoice memo")
}),
("Subscriber", {
"description": "The information about the Subscription.",
'classes': ('collapse',),
"fields": L("subscr_id subscr_date subscr_effective")
}),
("Recurring", {
"description": "Information about recurring Payments.",
"classes": ("collapse",),
"fields": L("profile_status initial_payment_amount amount_per_cycle outstanding_balance period_type product_name product_type recurring_payment_id receipt_id next_payment_date")
}),
("Admin", {
"description": "Additional Info.",
"classes": ('collapse',),
"fields": L("test_ipn ipaddress query flag_code flag_info")
}),
)
list_display = L("__unicode__ flag invoice custom payment_status created_at")
search_fields = L("txn_id recurring_payment_id")
admin.site.register(PayPalPDT, PayPalPDTAdmin) | [
"william.lagos@icloud.com"
] | william.lagos@icloud.com |
de309f144a2f38cb7e70445e21b178ee03526f31 | 3a7eeceb14859c4cf9f612f0c04e9c5efafb7191 | /solutions/using-loops/exercise_15a.py | cf4f9637333d72259bafe93146565a8d85ebef05 | [] | no_license | cs50puyo/check50ap | ebd998fcbca8489a89dc71e40ca3fcec014c8011 | 6fd220c5e8434e84e2cd90af178fb6ca5b705659 | refs/heads/master | 2020-03-10T02:00:58.819373 | 2018-05-02T18:31:58 | 2018-05-02T18:31:58 | 129,125,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | SIZE = 7
i = 0
while i < SIZE:
j = 0
while j < SIZE:
if i >= j:
print('T', end='')
j += 1
i += 1
print()
| [
"luisvf@bandofcoders.com"
] | luisvf@bandofcoders.com |
b3ba9461e231113f9206accdb8553163b1022ef2 | 7ca659eefd6eeddb4f2f1f40d6cd99a5a9a99504 | /AUInference.py | a0765ada710622e392e9c33b2ca001c59f4f94c6 | [] | no_license | JaineBudke/fake_emotion_analysis | 519071893dc1cbad6a77135e73955c5568827271 | 53d5c3dc7adeac280d95eafbb2b39ee9da6466c2 | refs/heads/master | 2020-07-29T11:42:40.871359 | 2019-10-15T22:11:14 | 2019-10-15T22:11:14 | 209,786,749 | 0 | 0 | null | 2019-10-15T22:11:15 | 2019-09-20T12:35:00 | Python | UTF-8 | Python | false | false | 4,404 | py | import numpy as np
class AUInference:
shape = np.empty(68)
neutral_shape = np.empty(68)
features = {}
neutral_features = {}
def __init__(self, shape, features, neutral_shape, neutral_features):
self.shape = shape
self.features = features
self.neutral_shape = neutral_shape
self.neutral_features = neutral_features
def AU1(self):
return ((self.features['ieb_height'] > (self.neutral_features['ieb_height'] + 0.075)) and (self.shape[22][1] >= self.neutral_shape[22][1]) and (self.shape[21][1] >= self.neutral_shape[21][1]))
def AU2(self):
return ( (self.features['oeb_height'] > (self.neutral_features['oeb_height'] + 0.08)) and (self.shape[19][1] >= self.neutral_shape[19][1]) and (self.shape[24][1] >= self.neutral_shape[24][1]) )
def AU4(self):
return ((self.features['ieb_height'] < (self.neutral_features['ieb_height'] - 0.03)) and (self.features['eb_distance'] < (self.neutral_features['eb_distance'] - 0.03)))
def AU5(self):
return ( (self.features['e_slanting'] >= (self.neutral_features['e_slanting'] - 0.05)) and (self.features['e_openness'] > (self.neutral_features['e_openness'] + 0.055)))
def AU6(self):
return ((self.features['m_mos'] >= (self.neutral_features['m_mos'] + 0.045)) and (self.features['e_openness'] < (self.neutral_features['e_openness'] - 0.05)))
def AU7(self):
return (self.features['e_openness'] < (self.neutral_features['e_openness'] - 0.07))
def AU9(self):
return ( (self.features['m_width'] < (self.neutral_features['m_width'] - 0.1)) and (self.features['e_openness'] < (self.neutral_features['e_openness'] - 0.05)) and (self.features['ieb_height'] < (self.neutral_features['ieb_height'] - 0.06)) )
def AU10(self):
return ((self.features['mul_height'] > (self.neutral_features['mul_height'] + 0.03)) and (self.features['m_width'] <= self.neutral_features['m_width']) and (self.features['m_openness'] > (self.neutral_features['m_openness'] + 0.15)))
def AU12(self):
return ((self.features['m_mos'] >= (self.neutral_features['m_mos'] + 0.05)) and (self.features['m_width'] > (self.neutral_features['m_width'] + 0.12)))
def AU15(self):
return (((self.features['m_mos'] + 0.03) <= self.neutral_features['m_mos']) and (self.features['lc_height'] < (self.neutral_features['lc_height'] + 0.055)))
def AU16(self):
return ((self.features['mll_height'] <= (self.neutral_features['mll_height'] - 0.01)) and (self.features['m_openness'] > (self.neutral_features['m_openness'] + 0.1)) and (self.features['lc_height'] < self.neutral_features['lc_height']))
def AU17(self):
return ( (self.features['m_openness'] < self.neutral_features['m_openness']) and ((abs(self.neutral_features['m_openness']) - abs(self.features['m_openness'])) >= 0.08) and (-self.features['mll_height'] < (-self.neutral_features['mll_height'] - 0.08)) )
def AU20(self):
return ((self.features['m_width'] > self.neutral_features['m_width']) and ((abs(self.features['m_width']) - abs(self.neutral_features['m_width'])) >= 0.15) and (self.features['m_mos'] < (self.neutral_features['m_mos'] + 0.075)) and (self.features['lc_height'] < self.neutral_features['lc_height']))
def AU23(self):
return (self.features['m_openness'] < (self.neutral_features['m_openness'] - 0.1))
def AU24(self):
return ((self.features['mul_height'] < self.neutral_features['mul_height']) and (self.features['mll_height'] > self.neutral_features['mll_height'] + 0.075) and (self.features['m_openness'] < (self.neutral_features['m_openness'] - 0.1)))
def AU25(self):
return (self.features['m_openness'] >= (self.neutral_features['m_openness'] + 0.13))
def AU26(self):
return ((self.features['m_openness'] >= (self.neutral_features['m_openness'] + 0.55)) and (self.features['m_openness'] <= (self.neutral_features['m_openness'] + 0.63)))
def AU27(self):
return (self.features['m_openness'] >= (self.neutral_features['m_openness'] + 0.63))
# get action units (AUs) and set in a dictionary
def getAllActionUnits(self):
AUs = {
"AU1": self.AU1(), "AU2": self.AU2(), "AU4": self.AU4(), "AU5": self.AU5(), "AU6": self.AU6(), "AU7": self.AU7(),
"AU9": self.AU9(), "AU10": self.AU10(), "AU12": self.AU12(), "AU15": self.AU15(), "AU16": self.AU16(), "AU17": self.AU17(),
"AU20": self.AU20(), "AU23": self.AU23(), "AU24": self.AU24(), "AU25": self.AU25(), "AU26": self.AU26(), "AU27": self.AU27()
}
return AUs
| [
"jainebudke@hotmail.com"
] | jainebudke@hotmail.com |
b416e000c05055c966ef50e7bead35df903c7b05 | 8b8a06abf18410e08f654fb8f2a9efda17dc4f8f | /app/request_session.py | f6a0cb38f59f5353b537a1d430baac107a5c80f0 | [] | no_license | corporacionrst/software_RST | d903dfadf87c97c692a821a9dd3b79b343d8d485 | 7a621c4f939b5c01fd222434deea920e2447c214 | refs/heads/master | 2021-04-26T23:23:27.241893 | 2018-10-05T23:21:34 | 2018-10-05T23:21:34 | 123,985,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,903 | py | from sistema.usuarios.models import Perfil
def getPerfil(request):
return Perfil.objects.get(usuario=request.user)
# def getStore(request):
# return Perfil.objects.get(usuario=request.user).tienda
def OKadmin(request):
if request.user.is_authenticated():
if "ADMIN" in Perfil.objects.get(usuario=request.user).puesto.nombre:
return True
return False
def OKbodega(request):
if request.user.is_authenticated():
ppl=Perfil.objects.get(usuario=request.user).puesto.nombre
if "BODEGA" in ppl:
return True
elif "ADMIN" in ppl:
return True
return False
def OKconta(request):
if request.user.is_authenticated():
ppl=Perfil.objects.get(usuario=request.user).puesto.nombre
if "CONTA" in ppl:
return True
elif "ADMIN" in ppl:
return True
return False
def OKmultitienda(request):
if request.user.is_authenticated():
return Perfil.objects.get(usuario=request.user).multitienda
return False
def OKcobros(request):
if request.user.is_authenticated():
ppl=Perfil.objects.get(usuario=request.user).puesto.nombre
if "COBROS" in ppl or "ADMIN" in ppl:
return True
return False
def OKventas(request):
if request.user.is_authenticated():
ppl=Perfil.objects.get(usuario=request.user).puesto.nombre
if "VENTA" in ppl:
return True
elif "ADMIN" in ppl:
return True
return False
def OKpeople(request):
if request.user.is_authenticated():
return True
return False
def sumar_DATO(request,numero):
val=Perfil.objects.get(usuario=request.user)
if numero=="4":
v = val.documento4.split("~")
val.documento4=v[0]+"~"+v[1]+"~"+str(int(v[2])+1)
val.save()
return v[0]+"~"+v[1]+"~"+str(int(v[2])+1)
def obtenerPlantilla(request):
if OKadmin(request):
return "admin.html"
elif OKconta(request):
return "conta.html"
elif OKbodega(request):
return "bodega.html"
elif OKcobros(request):
return "cobros.html"
else:
return "ventas.html"
| [
"admin@corporacionrst.com"
] | admin@corporacionrst.com |
d93b089cdb366b97b6e9780bb893d5dff97c7b9e | 73f5b5b136ac4cda1a55489e1757366dea916867 | /run_seq2seq.py | 9e651c3421a832b0e019f9efefe89a73f3dee965 | [] | no_license | dowobeha/pytorch_examples | 9c4b4a808be9864c34cf5fd805f495a8592a9eff | b13886e23503862cbab90db75879ec79f0844ef7 | refs/heads/master | 2020-07-26T00:05:52.752334 | 2019-10-04T19:33:44 | 2019-10-04T19:33:44 | 208,463,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,522 | py | from typing import List, Tuple
import torch
from torch.utils.data import DataLoader
from data import PigLatin
from seq2seq import EncoderWithEmbedding, DecoderWithAttention, verify_shape
def run_model(*, path: str, saved_encoder: str, saved_decoder: str, batch_size: int, device_name: str) -> None:
from torch.nn.functional import softmax
import numpy
encoder: EncoderWithEmbedding = torch.load(saved_encoder)
decoder: DecoderWithAttention = torch.load(saved_decoder)
print(type(decoder))
device = torch.device(device_name)
words: PigLatin = PigLatin(path=path, vocab=decoder.vocab)
data: DataLoader = DataLoader(dataset=words, batch_size=batch_size)
encoder.eval()
decoder.eval()
with torch.no_grad():
for batch in data: # type: torch.Tensor
examples: torch.Tensor = batch["data"].to(device)
labels: torch.LongStorage = batch["labels"].to(device)
decoder_previous_output: torch.LongTensor = batch["start-of-sequence"].squeeze(dim=1).to(device)
# At the end of the data set, the actual batch size may be smaller than batch_size, and that's OK
actual_batch_size: int = min(batch_size, examples.shape[0])
decoder_hidden_state: torch.Tensor = torch.zeros(actual_batch_size, 1, decoder.hidden_size).to(device)
verify_shape(tensor=decoder_previous_output, expected=[actual_batch_size])
verify_shape(tensor=examples, expected=[actual_batch_size, words.max_len])
verify_shape(tensor=labels, expected=[actual_batch_size, words.max_len])
encoder_states: torch.Tensor = encoder(batch_size=actual_batch_size,
seq_len=words.max_len,
input_tensor=examples)
decoder_output_list: List[torch.Tensor] = list()
for _ in range(words.max_len):
decoder_results: Tuple[torch.Tensor, torch.Tensor] = decoder(batch_size=actual_batch_size,
input_seq_len=words.max_len,
previous_decoder_output=decoder_previous_output,
previous_decoder_hidden_state=decoder_hidden_state,
encoder_states=encoder_states)
decoder_raw_output: torch.Tensor = decoder_results[0]
decoder_hidden_state: torch.Tensor = decoder_results[1]
verify_shape(tensor=decoder_raw_output, expected=[actual_batch_size, 1, len(decoder.vocab)])
verify_shape(tensor=decoder_hidden_state, expected=[actual_batch_size, 1, decoder.hidden_size])
decoder_previous_output: torch.LongTensor = softmax(decoder_raw_output, dim=2).squeeze(dim=1).topk(
k=1).indices.squeeze(dim=1)
verify_shape(tensor=decoder_previous_output, expected=[actual_batch_size])
decoder_output_list.append(decoder_previous_output)
print(len(decoder_output_list))
predictions: torch.Tensor = torch.stack(tensors=decoder_output_list).permute(1, 0)
verify_shape(tensor=predictions, expected=[actual_batch_size, words.max_len])
#print(decoder_output.shape)
#sys.exit()
# decoder_output: torch.Tensor = decoder(batch_size=actual_batch_size,
# input_seq_len=words.max_len,
# output_seq_len=words.max_len,
# previous_decoder_output=decoder_start_of_sequence,
# previous_decoder_hidden_state=decoder_hidden_state,
# encoder_states=encoder_states)
#verify_shape(tensor=decoder_output, expected=[actual_batch_size, words.max_len, len(decoder.vocab)])
# for index in range(actual_batch_size):
# verify_shape(tensor=seq2seq_output[index], expected=[words.max_len, len(seq2seq.vocab)])
#prediction_distributions: torch.Tensor = softmax(input=decoder_output, dim=2)
#verify_shape(tensor=prediction_distributions,
# expected=[actual_batch_size, words.max_len, len(decoder.vocab)])
#predictions: torch.LongTensor = torch.topk(input=prediction_distributions, k=1).indices.squeeze(dim=2)
#verify_shape(tensor=predictions, expected=[actual_batch_size, words.max_len])
for b in range(actual_batch_size):
int_tensor: torch.LongTensor = predictions[b]
verify_shape(tensor=int_tensor, expected=[words.max_len])
word: str = "".join([decoder.vocab.i2s[i] for i in int_tensor.tolist()])
label: str = "".join([decoder.vocab.i2s[i] for i in labels[b].tolist()])
print(f"{b}\t{words.max_len}\t{word}\t{label}\t{labels[b]}")
if __name__ == "__main__":
import sys
if len(sys.argv) == 4:
run_model(path=sys.argv[1],
saved_encoder=sys.argv[2],
saved_decoder=sys.argv[3],
batch_size=10,
device_name="cuda:0" if torch.cuda.is_available() else "cpu")
| [
"dowobeha@gmail.com"
] | dowobeha@gmail.com |
28baac5a621d65ae8bfeae46ed657209afc3d95a | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/client/gui/shared/utils/requesters/tokenrequester.py | 1ace65ad86b0304adeff25edcc9173651083c9f2 | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 3,878 | py | # 2015.11.10 21:29:45 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/utils/requesters/TokenRequester.py
import cPickle
from functools import partial
import BigWorld
from adisp import async
from constants import REQUEST_COOLDOWN, TOKEN_TYPE
from debug_utils import LOG_CURRENT_EXCEPTION
from TokenResponse import TokenResponse
from ids_generators import SequenceIDGenerator
def _getAccountRepository():
import Account
return Account.g_accountRepository
class TokenRequester(object):
__idsGen = SequenceIDGenerator()
def __init__(self, tokenType, wrapper = TokenResponse, cache = True):
super(TokenRequester, self).__init__()
if callable(wrapper):
self.__wrapper = wrapper
else:
raise ValueError, 'Wrapper is invalid: {0}'.format(wrapper)
self.__tokenType = tokenType
self.__callback = None
self.__lastResponse = None
self.__requestID = 0
self.__cache = cache
self.__timeoutCbID = None
return
def isInProcess(self):
return self.__callback is not None
def clear(self):
self.__callback = None
repository = _getAccountRepository()
if repository:
repository.onTokenReceived -= self.__onTokenReceived
self.__lastResponse = None
self.__requestID = 0
self.__clearTimeoutCb()
return
def getReqCoolDown(self):
return getattr(REQUEST_COOLDOWN, TOKEN_TYPE.COOLDOWNS[self.__tokenType], 10.0)
@async
def request(self, timeout = None, callback = None):
requester = getattr(BigWorld.player(), 'requestToken', None)
if not requester or not callable(requester):
if callback:
callback(None)
return
elif self.__cache and self.__lastResponse and self.__lastResponse.isValid():
if callback:
callback(self.__lastResponse)
return
else:
self.__callback = callback
self.__requestID = self.__idsGen.next()
if timeout:
self.__loadTimeout(self.__requestID, self.__tokenType, max(timeout, 0.0))
repository = _getAccountRepository()
if repository:
repository.onTokenReceived += self.__onTokenReceived
requester(self.__requestID, self.__tokenType)
return
def __onTokenReceived(self, requestID, tokenType, data):
if self.__requestID != requestID or tokenType != self.__tokenType:
return
else:
repository = _getAccountRepository()
if repository:
repository.onTokenReceived -= self.__onTokenReceived
try:
self.__lastResponse = self.__wrapper(**cPickle.loads(data))
except TypeError:
LOG_CURRENT_EXCEPTION()
self.__requestID = 0
if self.__callback is not None:
self.__callback(self.__lastResponse)
self.__callback = None
return
def __clearTimeoutCb(self):
if self.__timeoutCbID is not None:
BigWorld.cancelCallback(self.__timeoutCbID)
self.__timeoutCbID = None
return
def __loadTimeout(self, requestID, tokenType, timeout):
self.__clearTimeoutCb()
self.__timeoutCbID = BigWorld.callback(timeout, partial(self.__onTimeout, requestID, tokenType))
def __onTimeout(self, requestID, tokenType):
self.__clearTimeoutCb()
self.__onTokenReceived(requestID, tokenType, cPickle.dumps({'error': 'TIMEOUT'}, -1))
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\utils\requesters\tokenrequester.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:29:46 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
08aaabf095c1e32c30f69a14352a7fc8688d018d | 62e5ad135fb48354a7ba3c9c58f61e156a228c18 | /XianyuCrawler/asyxianyu.py | 6967f507ec248e4d419c150975ba4e8bbe3aa26d | [
"MIT"
] | permissive | mn3711698/ECommerceCrawlers | 70abba3baebb389b0ab4d3e007f207bc82bd94c4 | c6964ba058e3a194bc37fe1674c9f71840f95d63 | refs/heads/master | 2020-11-24T12:30:42.004909 | 2019-12-03T03:35:10 | 2019-12-03T03:35:10 | 228,143,883 | 3 | 1 | MIT | 2019-12-15T07:13:16 | 2019-12-15T07:13:16 | null | UTF-8 | Python | false | false | 6,746 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__author__ = 'AJay'
__mtime__ = '2019/4/26 0026'
"""
import asyncio
import aiohttp
import time
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__author__ = 'AJay'
__mtime__ = '2019/4/24 0024'
"""
#-*- coding:utf-8 -*-
import time
import requests
import os
import json
import re
from pyquery import PyQuery as pq
import datetime
import threading
import random
from dingding import DingMsg
from db import MongoConfig,MongoProduct,MongoKeyword,MongoTime
class XianYu():
def __init__(self,logMessage,errMessage):
self.page = 1
self.dbconf = MongoConfig()
self.dbprod = MongoProduct()
self.dbkey = MongoKeyword()
self.dmes = DingMsg()
self.finsh = False
self.paginator_next = False
self.data_list = []
# self.base_path = os.path.abspath(os.path.dirname(__file__))
self.logMessage=logMessage
self.errMessage=errMessage
def range_webhook(self):
webhook ='https://oapi.dingtalk.com/robot/send?access_token='+self.dbconf.select_all()[random.randint(0,self.dbconf.count()-1)].get('webhook')
return webhook
async def parse_html(self, html,keyword):
print('开始解析')
doc = pq(html)
num = doc.find('.cur-num').text() # 注释掉的参数为后续更改需求服务
# print('总数',num)
paginator_count = doc.find('.paginator-count').text()
# paginator_pre = doc.find('.paginator-pre').text()
self.paginator_next = doc.find('.paginator-next').text() # 下一页
now_page = doc.find('.paginator-curr').text()
# print('现在在第几页',now_page)
# P = '共([0-9]+)页'
# all_pagenum = re.findall(P, paginator_count, re.S)
# if all_pagenum:
# print(int(all_pagenum[0]))
itmes = doc('#J_ItemListsContainer .ks-waterfall').items()
for item in itmes:
data = {}
data['keyword'] =keyword
data['seller_nick'] = item.find('.seller-nick a').text() # nick
data['pic_href'] = 'https:'+str(item.find('.item-pic a').attr('href')) # details_ulr
data['title'] = item.find('.item-pic a').attr('title') # title
data['img_src'] = 'https:'+str(item.find('.item-pic a img').attr('data-ks-lazyload-custom')) # img
data['price'] = item.find('.item-attributes .item-price span em').text() # price
data['location'] = item.find('.item-attributes .item-location').text()
data['desc'] = item.find('.item-brief-desc').text()
data['pub_time'] = item.find('.item-pub-info .item-pub-time').text()
data['add_time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M-%S')
#TODO:10分钟之内的商品采集
try:
if int(data.get('pub_time').replace('分钟前','')) > 10: # 大于20分之间隔时间判定为超时
continue
except Exception as e: # 默认的时间大于及时了,直接舍弃
continue
# 如果数据库找不到对应关键字的链接 就插入数据,并且推送
if not self.dbprod.select({"keyword":keyword,"pic_href":data.get('pic_href')}):
self.dbprod.insert(data)
self.logMessage.put('['+keyword+']['+data['pub_time']+']['+data['title']+'[')
# print(data)
# TODO:普通发消息
# def send_message():
# if not getDingMes(webhook_url=self.range_webhook(), data=data,type=1):
# send_message()
# send_message()
# TODO: 链接发消息,不可取
#TODO:markdown发消息
self.markdown_list.append(data)
else:
print('数据已经存在mpass')
print(data)
print('#' * 50)
async def get(self,url,payload):
async with aiohttp.ClientSession() as session:
async with session.get(url,params=payload) as resp:
print(resp.status)
print(resp.url)
result = await resp.text()
return result
async def request(self,keyword_obj):
keyword = keyword_obj.get('keyword')
minPrice=keyword_obj.get('minPrice')
maxPrice=keyword_obj.get('maxPrice')
payload = {
"st_edtime":1, # 最新发布
"_input_charset": "utf8",
"search_type": "item",
"q": keyword,
"page": self.page,
"start": minPrice, # 价格范围
"end": maxPrice,
}
url = 'https://s.2.taobao.com/list/'
print('Waiting for', url)
result = await self.get(url,payload)
await self.parse_html(result,keyword)
def run(self,type):
self.markdown_list = []
keywords = self.dbkey.select_all({'start':1})
tasks = []
for key_obj in keywords:
tasks.append(asyncio.ensure_future(self.request(key_obj)))
print(len(tasks))
if tasks :
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
# 发送markdown数据文档,节约资源
def send_message():
if not self.dmes.send_msg(webhook_url=self.range_webhook(), data=self.markdown_list,type=type):
send_message()
self.errMessage.put('钉钉消息发送失败,发送数据太过于频繁')
else:
self.errMessage.put('钉钉消息发送,使用类型{}'.format(type))
if self.markdown_list:
send_message()
def _run(logMessage,errMessage):
# 这在tk线程中运行
print('启动')
dbtime = MongoTime()
while True:
time_config = dbtime.select_one({"flag":1})
type =time_config.get('type')
padding_time =time_config.get('time')
start_time = time.time()
xy = XianYu(logMessage,errMessage)
xy.run(type)
print('异步爬取用时:',time.time() - start_time )
# TODO:配置中的时间
errMessage.put('爬取耗时{}秒'.format(int(time.time() - start_time)))
if not padding_time:
padding_time = 10
time.sleep(padding_time)
if __name__ == '__main__':
from multiprocessing import Process,JoinableQueue
logMessage = JoinableQueue()
errMessage = JoinableQueue()
TProcess_crawler = threading.Thread(target=_run,args=(logMessage, errMessage))
# TProcess_crawler.daemon = True
TProcess_crawler.start()
# TProcess_crawler.join()
print('继续运行') | [
"1599121712@qq.com"
] | 1599121712@qq.com |
773e5d4335160199bf2a416ed4a155016341821d | d90a9cdfb8feb3a5e12f689aefcd39727ab3a05b | /wordlist/create_sqlite_db.py | f57e4d5d89b41ce3ccc479d9c9f804d5373d445c | [] | no_license | etuardu/mnemofon | b40034aac800a67c2220a415bb274d3686c3aa4c | d153015d4011465e96dcf4049576d4abc2340dee | refs/heads/master | 2021-08-08T21:23:30.926939 | 2021-01-30T00:42:38 | 2021-01-30T00:42:38 | 82,075,036 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | import word_2_digits
import sqlite3
WORDLIST_FILENAME = "diz"
def words_gen():
with open(WORDLIST_FILENAME) as wordlist:
for word in wordlist:
yield word.strip()
conn = sqlite3.connect('diz_ita.db')
c = conn.cursor()
c.execute("CREATE TABLE words (word text, digits text)")
for word in words_gen():
digits = word_2_digits.word_2_digits(word)
c.execute("INSERT INTO words VALUES ('{}', '{}')".format(word, digits))
conn.commit()
conn.close()
| [
"edonan@gmail.com"
] | edonan@gmail.com |
0b85630a9123b498e5f50e15d65fb027b4057127 | 1c6b5d41cc84c103ddb2db3689f61f47eaa2c13b | /CV_ToolBox-master/VOC_2_COCO/xml_helper.py | c97bb05d81b946aa96ae1e1ee0c4209f0f9cc9a7 | [] | no_license | Asher-1/DataAugmentation | e543a93912239939ccf77c98d9156c8ed15e1090 | c9c143e7cccf771341d2f18aa11daf8b9f817670 | refs/heads/main | 2023-07-01T22:49:10.908175 | 2021-08-13T10:01:56 | 2021-08-13T10:01:56 | 395,602,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,300 | py | # -*- coding=utf-8 -*-
import os
import xml.etree.ElementTree as ET
import xml.dom.minidom as DOC
# 从xml文件中提取bounding box信息, 格式为[[x_min, y_min, x_max, y_max, name]]
def parse_xml(xml_path):
'''
输入:
xml_path: xml的文件路径
输出:
从xml文件中提取bounding box信息, 格式为[[x_min, y_min, x_max, y_max, name]]
'''
tree = ET.parse(xml_path)
root = tree.getroot()
objs = root.findall('object')
coords = list()
for ix, obj in enumerate(objs):
name = obj.find('name').text
box = obj.find('bndbox')
x_min = int(box[0].text)
y_min = int(box[1].text)
x_max = int(box[2].text)
y_max = int(box[3].text)
coords.append([x_min, y_min, x_max, y_max, name])
return coords
# 将bounding box信息写入xml文件中, bouding box格式为[[x_min, y_min, x_max, y_max, name]]
def generate_xml(img_name, coords, img_size, out_root_path):
'''
输入:
img_name:图片名称,如a.jpg
coords:坐标list,格式为[[x_min, y_min, x_max, y_max, name]],name为概况的标注
img_size:图像的大小,格式为[h,w,c]
out_root_path: xml文件输出的根路径
'''
doc = DOC.Document() # 创建DOM文档对象
annotation = doc.createElement('annotation')
doc.appendChild(annotation)
title = doc.createElement('folder')
title_text = doc.createTextNode('Tianchi')
title.appendChild(title_text)
annotation.appendChild(title)
title = doc.createElement('filename')
title_text = doc.createTextNode(img_name)
title.appendChild(title_text)
annotation.appendChild(title)
source = doc.createElement('source')
annotation.appendChild(source)
title = doc.createElement('database')
title_text = doc.createTextNode('The Tianchi Database')
title.appendChild(title_text)
source.appendChild(title)
title = doc.createElement('annotation')
title_text = doc.createTextNode('Tianchi')
title.appendChild(title_text)
source.appendChild(title)
size = doc.createElement('size')
annotation.appendChild(size)
title = doc.createElement('width')
title_text = doc.createTextNode(str(img_size[1]))
title.appendChild(title_text)
size.appendChild(title)
title = doc.createElement('height')
title_text = doc.createTextNode(str(img_size[0]))
title.appendChild(title_text)
size.appendChild(title)
title = doc.createElement('depth')
title_text = doc.createTextNode(str(img_size[2]))
title.appendChild(title_text)
size.appendChild(title)
for coord in coords:
object = doc.createElement('object')
annotation.appendChild(object)
title = doc.createElement('name')
title_text = doc.createTextNode(coord[4])
title.appendChild(title_text)
object.appendChild(title)
pose = doc.createElement('pose')
pose.appendChild(doc.createTextNode('Unspecified'))
object.appendChild(pose)
truncated = doc.createElement('truncated')
truncated.appendChild(doc.createTextNode('1'))
object.appendChild(truncated)
difficult = doc.createElement('difficult')
difficult.appendChild(doc.createTextNode('0'))
object.appendChild(difficult)
bndbox = doc.createElement('bndbox')
object.appendChild(bndbox)
title = doc.createElement('xmin')
title_text = doc.createTextNode(str(int(float(coord[0]))))
title.appendChild(title_text)
bndbox.appendChild(title)
title = doc.createElement('ymin')
title_text = doc.createTextNode(str(int(float(coord[1]))))
title.appendChild(title_text)
bndbox.appendChild(title)
title = doc.createElement('xmax')
title_text = doc.createTextNode(str(int(float(coord[2]))))
title.appendChild(title_text)
bndbox.appendChild(title)
title = doc.createElement('ymax')
title_text = doc.createTextNode(str(int(float(coord[3]))))
title.appendChild(title_text)
bndbox.appendChild(title)
# 将DOM对象doc写入文件
f = open(os.path.jpin(out_root_path, img_name[:-4] + '.xml'), 'w')
f.write(doc.toprettyxml(indent=''))
f.close()
| [
"ludahai19@163.com"
] | ludahai19@163.com |
9537234e130166034003d3a6fedd1c33e3c0ffc5 | 4dd786e85f2feee1024e20560a018924784abc11 | /bwt/lyndon/lyndon.py | 8e81e3d16003b691d9f88dbb4723603f88b4aa5d | [] | no_license | ushitora/bwt | c3c74b5da416d37df61a80be209df25322be84ce | 9a2b339f7af4ca16501acf46d1e4958eda5c69c1 | refs/heads/master | 2020-06-27T02:32:43.781286 | 2019-08-01T07:00:30 | 2019-08-01T07:00:30 | 199,820,940 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | def longest_lyndon_prefix(w):
"""
Returns the tuple of the longest lyndon prefix length and the number of repitition for the string w.
Examples:
abbaa -> Returns (3, 1)
abbabb -> Returns (3, 2)
"""
i = 0
j = 1
while j < len(w) and w[i] <= w[j]:
if w[i] == w[j]:
i += 1
j += 1
else:
i = 0
j += 1
return j - i, j // (j - i)
def is_lyndon(w):
"""Returns true iff the string w is the lyndon word"""
return longest_lyndon_prefix(w)[0] == len(w)
def lyndon_break_points(w):
"""Returns lyndon breakpoints sequence of the string w"""
start = 0
while start < len(w):
pref, rep = longest_lyndon_prefix(w[start:])
for _ in range(rep):
start += pref
yield start
def lyndon_factorize(w):
"""
Returns lyndon factorization sequence of the string w
Examples:
abbaba -> abb, ab, a
"""
start = 0
for bp in lyndon_break_points(w):
yield w[start: bp]
start = bp
| [
"sidebook37@gmail.com"
] | sidebook37@gmail.com |
0f24fe0e1003f8b0742368eac725fc370ae0b63b | aba6c403aa4b8fe1ba19d14669062172c413e29a | /Sem 4/ps/3.py | 7bd99c4fc5c2e88eda79317c6e645e3ebf5bcb73 | [] | no_license | rsreenivas2001/LabMain | 3aa38526a40eb0496ab27dbdbfefde966077a6a6 | 249adc5f3189169cf9ed696e0234b11f7d2c028d | refs/heads/master | 2021-07-06T19:21:30.819464 | 2021-04-05T17:07:49 | 2021-04-05T17:07:49 | 232,073,123 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | def frequency(str):
cnt = dict()
for ltr in str:
if ltr not in cnt:
cnt[ltr] = 1
else:
cnt[ltr] += 1
return sorted(cnt.items())
if __name__ == '__main__':
x = input("Enter String : ")
print(frequency(x)) | [
"rsreenivas2001@gmail.com"
] | rsreenivas2001@gmail.com |
c6a869682bd9bb5ff7d2e5ab7409e52b380e3514 | 748be6cf181d029d7616ce70b2ffcbb5524c578d | /mapreduce/runner.py | 12b7077b8aa2823ae8c2cf3ca56d04c730069f86 | [] | no_license | insomniacdoll/personal-python-utils | 343fc739ea31130f968a543e5ee03fa5467c4113 | 80661f53ceac00b29de40c45cdc62b274f4a5d98 | refs/heads/master | 2021-01-01T15:24:59.224129 | 2015-09-27T08:44:21 | 2015-09-27T08:45:12 | 27,001,964 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 1,384 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Author : insomniacdoll@gmail.com
import sys
import os
from utils import *
"""适用于streaming的 简单mapreduce框架
如果需要join请参见join_runner.py
"""
def run_mapper(mapper):
for line in sys.stdin:
line = line.strip('\n')
keyvalue = None
try:
for keyvalue in mapper(line):
print keyvalue
except Exception, e:
Error(e)
Error('Invalid input [%s]' %keyvalue)
def combine_bykeys(stdin):
"""
@breif 将reduce的输入组合成 <key, [v1, v2, v3]>的形式,更方便的编写reduce
即按照key对输入流进行汇聚
"""
current_key = None
current_values = []
for line in stdin:
line = line.strip('\n')
(key, value) = line.split('\t', 1)
if current_key == key:
current_values.append(value)
else:
if current_key:
yield current_key, current_values
current_key = key
current_values = [value]
if current_key != None and current_key == key:
yield current_key, current_values
def run_reducer(reducer):
for key, values in combine_bykeys(sys.stdin):
try:
for keyvalue in reducer(key, values):
print keyvalue
except Exception, e:
Error(e)
| [
"insomniacdoll@gmail.com"
] | insomniacdoll@gmail.com |
32fa5d654b8d77496ae5bd3378511c8de453eb12 | f39538277bbfa7d27c50134e7fc30a8d0d8d6249 | /TIPA_library/utils/pca_basic.py | 9a069779319bd38d83ed6ea40056734a4abcd231 | [] | no_license | deepneuroscience/TIPA | e835683117cbe5468649cde071f07f1e83e74071 | c14bbd3355be38b5f8e48f9ba1497ac8aea62032 | refs/heads/master | 2023-06-12T23:04:39.981220 | 2023-05-31T18:47:16 | 2023-05-31T18:47:16 | 204,941,025 | 14 | 4 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | import numpy as np
from sklearn.decomposition import PCA
''' This is an example code for PCA projection
'''
def pca_basic(t2d_data_sequence):
# pca_input: 2184*(240*320)
t2d_data_height = t2d_data_sequence.shape[0]
t2d_data_width = t2d_data_sequence.shape[1]
t2d_data_sequence = t2d_data_sequence.reshape((t2d_data_height*t2d_data_width, np.size(t2d_data_sequence, 2)))
t2d_data_sequence = np.transpose(t2d_data_sequence)
# Eigenfaces
output_pca = PCA(n_components=0.95)
# PCA projection
_ = output_pca.fit(t2d_data_sequence)
t2d_data_sequence = output_pca.transform(t2d_data_sequence)
# Eigenfaces
eigen_faces = output_pca.components_
var_percent = output_pca.explained_variance_ratio_
return eigen_faces, var_percent | [
"noreply@github.com"
] | deepneuroscience.noreply@github.com |
af6411894235babd02c92b06e8b27ff6dbf8d1af | 986e50caf0b48b9423114e56c18b6edbf50c0aaa | /mp3scrap/spiders/mp3crawl.py | 5d4ea622654e2da8426d5a1a4ebe4cc0eeb4558d | [] | no_license | arun-shaw/songsmp3-scrap | 6b7906331893ff3d91da0703c6624b10b25db110 | 5e5e5a09f1a81f7f28ffbd0de69cc0440d0f46ae | refs/heads/main | 2023-01-05T14:34:15.706912 | 2020-11-03T06:18:24 | 2020-11-03T06:18:24 | 309,592,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,891 | py | # -*- coding: utf-8 -*-
import scrapy
from ..items import Mp3MovieItem,Mp3SongsItem
class Mp3crawlSpider(scrapy.Spider):
name = 'mp3'
#allowed_domains = ['https://www.songs-mp3.net/']
start_urls = ['https://www.songs-mp3.net/5/indipop-mp3-songs.html']
domain = 'https://www.songs-mp3.net'
def parse(self, response):
sel=scrapy.Selector(response)
#all_Movies_List,url = zip(sel.css('div#movie_cats ul li a::text').extract(),sel.css('div#movie_cats ul li a::attr(href)').extract())
# getting the list in alphabatical order
text=sel.css('div#movie_cats ul li a::text').extract()
url=sel.css('div#movie_cats ul li a::attr(href)').extract()
for (t,u) in zip(text,url):
hit=self.domain+u
print(t,hit)
yield scrapy.Request(hit,callback=self.pdata)
# getting the movie list of each alphabatical order
def pdata(self,response):
sel=scrapy.Selector(response)
Movie_Songs=sel.css('div.list_inside_box ul li a *::attr(href)').extract()
Movie_Name=sel.css('div.list_inside_box ul li a *::text').extract()
for (name,url) in zip(Movie_Name,Movie_Songs):
n=name.replace('Movie Mp3 Songs','').replace('Mp3 Songs','').replace(name[name.find('('):name.find(')')+1],'').replace('.','').strip()
year = name[name.find('(') + 1:name.find(')')]
Song_URL = self.domain + url
# print('Movie Name : ',n,'Released On : ',year,'Songs URL : ',Song_URL)
yield scrapy.Request(Song_URL, callback=self.sdata, meta={'M_Name': n, 'M_Year': year, 'M_URL': Song_URL})
# getting songs details of each movie.
def sdata(self, response):
sel = scrapy.Selector(response)
MovieItem=Mp3MovieItem()
Mov_Details = sel.css('div.movie_details table tbody tr td.m_d_title3')
Stars = ','.join([a for a in Mov_Details[0].css('a::text').extract()])
Director = ','.join([a for a in Mov_Details[1].css('a::text').extract()])
M_Director = ','.join([a for a in Mov_Details[2].css('a::text').extract()])
Composer = ','.join([a for a in Mov_Details[3].css('a::text').extract()])
Singer = ','.join([a for a in Mov_Details[4].css('a::text').extract()])
name = response.meta['M_Name']
year = response.meta['M_Year']
url = response.meta['M_URL']
# print('Movie Name :', name, 'Released On : ', year, 'Songs URL : ', url, 'Stars :', Stars, 'Director(s) :',
# Director, 'Music Director(s) :', M_Director, 'Composer :', Composer, 'Singer(s) :', Singer)
MovieItem['name']=name
MovieItem['year']=year
MovieItem['url']=url
MovieItem['Stars']=Stars
MovieItem['Director']=Director
MovieItem['M_Director']=M_Director
MovieItem['Composer']=Composer
MovieItem['Singer']=Singer
yield MovieItem # Storing movie details into database
# Songs details item
song_details = sel.css('div.items')
SongItem=Mp3SongsItem()
for song in song_details.css('div.link-item'):
Song_name = song.css('div.link::text').extract_first()
Song_URL = self.domain + song.css('a::attr(href)').extract_first()
Song_Artist = ','.join([a for a in song.css('div.item-artist a::text').extract()])
Song_Size = song.css('div.item-artist::text').extract_first().split(',')[0].split(':')[1].strip()
#print('Song Name :', Song_name, 'Song URL :', Song_URL, 'Artist(s)', Song_Artist, 'Size :', Song_Size)
# print(song.css('*').extract())
SongItem['Mov_Name']=name
SongItem['Title']=Song_name
SongItem['url']=Song_URL
SongItem['Artist']=Song_Artist
SongItem['Size']=Song_Size
yield SongItem # Storing songs details into database
| [
"noreply@github.com"
] | arun-shaw.noreply@github.com |
a504526e7afcb6817c2878fa279d32e1dfc65ac6 | 72f5adc4b6f79dd40e975c86abcdbd3d0ccada86 | /venv/bin/pip3.7 | 3786525abb997c921a0c0979436550edefdc7960 | [] | no_license | katrek/flask_vacancy_parser | 77101604ec5bfeb47c009b9d8329b42d9d30bf4a | bbea4ae860bb78f7264b05e92c6664f8e4c4b3cf | refs/heads/master | 2023-01-11T11:58:09.275448 | 2019-08-29T06:36:53 | 2019-08-29T06:36:53 | 204,666,913 | 1 | 1 | null | 2023-01-03T12:19:03 | 2019-08-27T09:22:35 | Python | UTF-8 | Python | false | false | 420 | 7 | #!/Users/artemtkachev/PycharmProjects/flask_parser2/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"akatrek@gmail.com"
] | akatrek@gmail.com |
61725cec442d9ec8f82600d63aa62acb08937b54 | 21a58adb30eb54a344ae185891df7cf235396b6d | /Proyecto1/urls.py | e6070fd51d182c9d11ecf5ee9e9967df6398c1ce | [] | no_license | Erisjimver/django_proyecto1_practica | a460800a6439ccb7981eeb7415fd798d94625c5a | 1d236510c074a03f42a7d35922ff973d0da2f532 | refs/heads/master | 2020-08-06T16:27:24.992895 | 2019-10-06T05:05:44 | 2019-10-06T05:05:44 | 213,073,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | """Proyecto1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from Proyecto1.views import cursoC,cursoCSS,saludo,despedida,dame_fecha,calcularEdad,calcularEdad1,saludo2
urlpatterns = [
path('admin/', admin.site.urls),
path('saludo/', saludo),
path('saludo2/', saludo2),
path('despedida/', despedida),
path('fecha/', dame_fecha),
path('edad/<int:anio>/', calcularEdad),
path('edad1/<int:edad>/<int:anio>/', calcularEdad1),
path('cursoC/', cursoC),
path('cursoCSS/', cursoCSS),
]
| [
"erisjinver@gmail.com"
] | erisjinver@gmail.com |
03771c28af243c41e09a09630addbf700d35abaa | b7f3edb5b7c62174bed808079c3b21fb9ea51d52 | /tools/android/native_lib_memory/parse_smaps.py | c167a327bfe8764e6dce320d671e151267c9aba7 | [
"Zlib",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"APSL-2.0",
"MIT",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown"
] | permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 64bee65c921db7e78e25d08f1e98da2668b57be5 | refs/heads/webml | 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 9,429 | py | #!/usr/bin/python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses /proc/[pid]/smaps on a device and shows the total amount of swap used.
"""
from __future__ import print_function
import argparse
import collections
import logging
import os
import re
import sys
_SRC_PATH = os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir)
sys.path.append(os.path.join(_SRC_PATH, 'third_party', 'catapult', 'devil'))
from devil.android import device_utils
class Mapping(object):
"""A single entry (mapping) in /proc/[pid]/smaps."""
def __init__(self, start, end, permissions, offset, pathname):
"""Initializes an instance.
Args:
start: (str) Start address of the mapping.
end: (str) End address of the mapping.
permissions: (str) Permission string, e.g. r-wp.
offset: (str) Offset into the file or 0 if this is not a file mapping.
pathname: (str) Path name, or pseudo-path, e.g. [stack]
"""
self.start = int(start, 16)
self.end = int(end, 16)
self.permissions = permissions
self.offset = int(offset, 16)
self.pathname = pathname.strip()
self.fields = collections.OrderedDict()
def AddField(self, line):
"""Adds a field to an entry.
Args:
line: (str) As it appears in /proc/[pid]/smaps.
"""
assert ':' in line
split_index = line.index(':')
k, v = line[:split_index].strip(), line[split_index + 1:].strip()
assert k not in self.fields
if v.endswith('kB'):
v = int(v[:-2])
self.fields[k] = v
def ToString(self):
"""Returns a string representation of a mapping.
The returned string is similar (but not identical) to the /proc/[pid]/smaps
entry it was generated from.
"""
lines = []
lines.append('%x-%x %s %x %s' % (
self.start, self.end, self.permissions, self.offset, self.pathname))
for name in self.fields:
format_str = None
if isinstance(self.fields[name], int):
format_str = '%s: %d kB'
else:
format_str = '%s: %s'
lines.append(format_str % (name, self.fields[name]))
return '\n'.join(lines)
def _ParseProcSmapsLines(lines):
SMAPS_ENTRY_START_RE = (
# start-end
'^([0-9a-f]{1,16})-([0-9a-f]{1,16}) '
# Permissions
'([r\-][w\-][x\-][ps]) '
# Offset
'([0-9a-f]{1,16}) '
# Device
'([0-9a-f]{2,3}:[0-9a-f]{2,3}) '
# Inode
'([0-9]*) '
# Pathname
'(.*)')
assert re.search(SMAPS_ENTRY_START_RE,
'35b1800000-35b1820000 r-xp 00000000 08:02 135522 '
'/usr/lib64/ld-2.15.so')
entry_re = re.compile(SMAPS_ENTRY_START_RE)
mappings = []
for line in lines:
match = entry_re.search(line)
if match:
(start, end, perms, offset, _, _, pathname) = match.groups()
mappings.append(Mapping(start, end, perms, offset, pathname))
else:
mappings[-1].AddField(line)
return mappings
def ParseProcSmaps(device, pid, store_file=False):
"""Parses /proc/[pid]/smaps on a device, and returns a list of Mapping.
Args:
device: (device_utils.DeviceUtils) device to parse the file from.
pid: (int) PID of the process.
store_file: (bool) Whether to also write the file to disk.
Returns:
[Mapping] all the mappings in /proc/[pid]/smaps.
"""
command = ['cat', '/proc/%d/smaps' % pid]
lines = device.RunShellCommand(command, check_return=True)
if store_file:
with open('smaps-%d' % pid, 'w') as f:
f.write('\n'.join(lines))
return _ParseProcSmapsLines(lines)
def _GetPageTableFootprint(device, pid):
"""Returns the page table footprint for a process in kiB."""
command = ['cat', '/proc/%d/status' % pid]
lines = device.RunShellCommand(command, check_return=True)
for line in lines:
if line.startswith('VmPTE:'):
value = int(line[len('VmPTE: '):line.index('kB')])
return value
def _SummarizeMapping(mapping, metric):
return '%s %s %s: %d kB (Total Size: %d kB)' % (
hex(mapping.start),
mapping.pathname, mapping.permissions, metric,
(mapping.end - mapping.start) / 1024)
def _PrintMappingsMetric(mappings, field_name):
"""Shows a summary of mappings for a given metric.
For the given field, compute its aggregate value over all mappings, and
prints the mappings sorted by decreasing metric value.
Args:
mappings: ([Mapping]) all process mappings.
field_name: (str) Mapping field to process.
"""
total_kb = sum(m.fields[field_name] for m in mappings)
print('Total Size (kB) = %d' % total_kb)
sorted_by_metric = sorted(mappings,
key=lambda m: m.fields[field_name], reverse=True)
for mapping in sorted_by_metric:
metric = mapping.fields[field_name]
if not metric:
break
print(_SummarizeMapping(mapping, metric))
def _PrintSwapStats(mappings):
print('SWAP:')
_PrintMappingsMetric(mappings, 'Swap')
def _FootprintForAnonymousMapping(mapping):
assert mapping.pathname.startswith('[anon:')
if (mapping.pathname == '[anon:libc_malloc]'
and mapping.fields['Shared_Dirty'] != 0):
# libc_malloc mappings can come from the zygote. In this case, the shared
# dirty memory is likely dirty in the zygote, don't count it.
return mapping.fields['Rss']
else:
return mapping.fields['Private_Dirty']
def _PrintEstimatedFootprintStats(mappings, page_table_kb):
print('Private Dirty:')
_PrintMappingsMetric(mappings, 'Private_Dirty')
print('\n\nShared Dirty:')
_PrintMappingsMetric(mappings, 'Shared_Dirty')
print('\n\nPrivate Clean:')
_PrintMappingsMetric(mappings, 'Private_Clean')
print('\n\nShared Clean:')
_PrintMappingsMetric(mappings, 'Shared_Clean')
print('\n\nSwap PSS:')
_PrintMappingsMetric(mappings, 'SwapPss')
print('\n\nPage table = %d kiB' % page_table_kb)
def _ComputeEstimatedFootprint(mappings, page_table_kb):
"""Returns the estimated footprint in kiB.
Args:
mappings: ([Mapping]) all process mappings.
page_table_kb: (int) Sizeof the page tables in kiB.
"""
footprint = page_table_kb
for mapping in mappings:
# Chrome shared memory.
#
# Even though it is shared memory, it exists because the process exists, so
# account for its entirety.
if mapping.pathname.startswith('/dev/ashmem/shared_memory'):
footprint += mapping.fields['Rss']
elif mapping.pathname.startswith('[anon'):
footprint += _FootprintForAnonymousMapping(mapping)
# Mappings without a name are most likely Chrome's native memory allocators:
# v8, PartitionAlloc, Oilpan.
# All of it should be charged to our process.
elif mapping.pathname.strip() == '':
footprint += mapping.fields['Rss']
# Often inherited from the zygote, only count the private dirty part,
# especially as the swap part likely comes from the zygote.
elif mapping.pathname.startswith('['):
footprint += mapping.fields['Private_Dirty']
# File mappings. Can be a real file, and/or Dalvik/ART.
else:
footprint += mapping.fields['Private_Dirty']
return footprint
def _ShowAllocatorFootprint(mappings, allocator):
"""Shows the total footprint from a specific allocator.
Args:
mappings: ([Mapping]) all process mappings.
allocator: (str) Allocator name.
"""
total_footprint = 0
pathname = '[anon:%s]' % allocator
for mapping in mappings:
if mapping.pathname == pathname:
total_footprint += _FootprintForAnonymousMapping(mapping)
print('\tFootprint from %s: %d kB' % (allocator, total_footprint))
def _CreateArgumentParser():
parser = argparse.ArgumentParser()
parser.add_argument('--pid', help='PID.', required=True, type=int)
parser.add_argument('--estimate-footprint',
help='Show the estimated memory foootprint',
action='store_true')
parser.add_argument('--store-smaps', help='Store the smaps file locally',
action='store_true')
parser.add_argument('--show-allocator-footprint',
help='Show the footprint from a given allocator',
choices=['v8', 'libc_malloc', 'partition_alloc'],
nargs='+')
parser.add_argument(
'--device', help='Device to use', type=str, default='default')
return parser
def main():
parser = _CreateArgumentParser()
args = parser.parse_args()
devices = device_utils.DeviceUtils.HealthyDevices(device_arg=args.device)
if not devices:
logging.error('No connected devices')
return
device = devices[0]
if not device.HasRoot():
device.EnableRoot()
# Enable logging after device handling as devil is noisy at INFO level.
logging.basicConfig(level=logging.INFO)
mappings = ParseProcSmaps(device, args.pid, args.store_smaps)
if args.estimate_footprint:
page_table_kb = _GetPageTableFootprint(device, args.pid)
_PrintEstimatedFootprintStats(mappings, page_table_kb)
footprint = _ComputeEstimatedFootprint(mappings, page_table_kb)
print('\n\nEstimated Footprint = %d kiB' % footprint)
else:
_PrintSwapStats(mappings)
if args.show_allocator_footprint:
print('\n\nMemory Allocators footprint:')
for allocator in args.show_allocator_footprint:
_ShowAllocatorFootprint(mappings, allocator)
if __name__ == '__main__':
main()
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
b73a8cf022db3964618300301bbb49bcdb1c282b | 10e3055ddf9c4e05b6f86fd1f0ee35309b3869be | /venv/bin/django-admin | 58c83d104f78ae24243d363e36bd3794f1295ae1 | [] | no_license | JKangel/xinpianchang | b1cc4963a4c7cee781a9364d41ce51a913fb7536 | e032375fc0fe978968f182555e36fa1d702cf27f | refs/heads/master | 2020-04-07T08:59:32.606515 | 2018-03-07T07:25:32 | 2018-03-07T07:25:32 | 124,197,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | #!/home/kangel/Learn/python/spider/xpc/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"j_juyongkang@163.com"
] | j_juyongkang@163.com | |
3b0564723412a2e743aabfd1ee3406838a99e044 | 4762906f4e2465026975a009665295c806f5a887 | /src/utils.py | a9eba8d6b8706e0569f151032508a8f52bd0477e | [] | no_license | sahil-m/dvc_session | 7c9e5c19f883da7be62f6f3c2d067e371d4af886 | ac44e1a160d35c8f5f0399a195dbea13a0b5da6f | refs/heads/master | 2022-05-30T00:26:43.735740 | 2020-05-04T08:02:27 | 2020-05-04T08:02:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | import os
import matplotlib.pyplot as plt
import pandas as pd
import yaml
from dagshub import dagshub_logger
from joblib import dump, load
from sklearn.metrics import plot_confusion_matrix
from yaml import CLoader as Loader
def log_experiment(out_path, params: dict, metrics: dict):
with dagshub_logger(metrics_path=f'{out_path}metrics.csv', hparams_path=f'{out_path}params.yml') as logger:
logger.log_hyperparams(params=params)
logger.log_metrics(metrics=metrics)
def print_results(accuracy, c_matrix, model_name=''):
print(f'Finished Training {model_name}:\nStats:')
print(f'\tConfusion Matrix:\n{c_matrix}')
print(f'\tModel Accuracy: {accuracy}')
def evaluate_model(model, X_test, y_test):
cmd = plot_confusion_matrix(model, X_test, y_test, cmap=plt.cm.Reds)
c_matrix = cmd.confusion_matrix
accuracy = model.score(X_test, y_test)
return accuracy, c_matrix, cmd.figure_
def save_results(out_path, model, fig):
if not os.path.isdir(out_path):
os.makedirs(out_path)
dump(model, f'{out_path}model.gz')
if fig:
fig.savefig(f'{out_path}confusion_matrix.svg', format='svg')
def read_data(data_path: str) -> (pd.DataFrame, pd.DataFrame, pd.Series, pd.Series):
train = pd.read_csv(f'{data_path}train.csv')
test = pd.read_csv(f'{data_path}test.csv')
X_train, y_train = train.drop(columns=['class']), train['class']
X_test, y_test = test.drop(columns=['class']), test['class']
return X_train, X_test, y_train, y_test
def load_model(path):
return load(f'{path}/model.gz')
def read_params(file='params.yaml', model='pca'):
with open(file, 'r') as fp:
params = yaml.load(fp, Loader)
return params[model] | [
"Puneetha_Pai@external.mckinsey.com"
] | Puneetha_Pai@external.mckinsey.com |
6978dadae7ce2b51d7b567621e71a595e4bfc48f | da8d3c7a115768e796a0521d5400471ccb74cd1b | /photo/migrations/0012_auto_20200609_1002.py | 67b4a934743851b8729ceea082ea58cb91601ca5 | [] | no_license | malindu97/CS50Final | e52d386f0e15e310d5300550b27f0356dc9aed3f | d73d262e91f381a0bb719656f4760221d9aee499 | refs/heads/master | 2022-10-15T13:19:57.354559 | 2020-06-10T10:48:15 | 2020-06-10T10:48:15 | 271,025,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | # Generated by Django 3.0.6 on 2020-06-09 04:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('photo', '0011_auto_20200608_1615'),
]
operations = [
migrations.AddField(
model_name='images',
name='num_vote_down',
field=models.PositiveIntegerField(db_index=True, default=0),
),
migrations.AddField(
model_name='images',
name='num_vote_up',
field=models.PositiveIntegerField(db_index=True, default=0),
),
migrations.AddField(
model_name='images',
name='vote_score',
field=models.IntegerField(db_index=True, default=0),
),
]
| [
"malindu.wick97@gmail.com"
] | malindu.wick97@gmail.com |
b1bd40fc3759540023329a1f40f62d02ebafc9e5 | f46df32e8e38370dc540952b9ab0a98477c65bc4 | /controllers.py | c5b03166bbc26d890b1c03f5b0132f6198b0229e | [] | no_license | suejungshin/zesty-api-backup | 95cac13da4b238e4f7d7b860f44372d3e1ef3547 | 835000fc2772aa22462f60dbe24c97675f9770d0 | refs/heads/master | 2022-12-07T09:21:07.758774 | 2020-03-25T06:50:53 | 2020-03-25T06:50:53 | 249,881,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | import models
import io
import geojson
import json
import urllib.request
from PIL import Image, ImageDraw
from pathlib import Path
path = Path.cwd()
def translate_coords(arr, bounds, size):
lon0 = bounds[0]
lat0 = bounds[1]
lon_scale = size[0] / (bounds[2] - bounds[0])
lat_scale = size[1] / (bounds[3] - bounds[1])
res = []
for coord in arr:
lon = coord[0]
lat = coord[1]
res.append(((lon - lon0) * lon_scale, size[1] - (lat - lat0) * lat_scale))
return res
def draw_overlay(id):
result = models.get_property_details(id)
bldg_latlongs = geojson.loads(result[0]).coordinates[0]
parcel_latlongs = geojson.loads(result[1]).coordinates[0]
image_bounds = result[2]
im = Image.open(f"./images/{id}.jpeg")
size = im.size
parcel_pxls = translate_coords(parcel_latlongs, image_bounds, size)
bldg_pxls = translate_coords(bldg_latlongs, image_bounds, size)
polyg = Image.new('RGBA', size)
pdraw = ImageDraw.Draw(polyg)
pdraw.polygon(parcel_pxls, fill=(255,255,0,128))
pdraw.polygon(bldg_pxls, fill=(255,0,0,128))
im.paste(polyg, mask=polyg)
image_path = f"{path}/images/{id}-overlayed.jpeg"
im.save(image_path, format="JPEG")
return
def save_image_from_url(id):
image_url = models.get_image_url(id)
if image_url is None:
return False
bytes_obj = urllib.request.urlopen(image_url).read()
image = Image.open(io.BytesIO(bytes_obj))
image_path = f"{path}/images/{id}.jpeg"
image.save(image_path, format="JPEG")
return True
def get_image(id, overlay):
if overlay:
image_path = f"{path}/images/{id}-overlayed.jpeg"
else:
image_path = f"{path}/images/{id}.jpeg"
try:
return open(image_path, "rb")
except FileNotFoundError:
success = save_image_from_url(id)
if not success:
return None
else:
if overlay:
draw_overlay(id)
return open(image_path, "rb")
else:
return open(image_path, "rb")
def get_nearby_properties(geojson_obj):
return models.find_nearby_property_ids(geojson_obj)
def get_stats(id, distance):
total_parcel_area_in_radius = models.get_total_parcels_area(id, distance)
buildings_detail = models.get_nearby_buildings_details(id, distance)
areas = buildings_detail[0]
distances = buildings_detail[1]
zone_density = models.get_zone_density(id, distance)
if not zone_density:
zone_density = "Search distance was too large"
return json.dumps({
"total_parcel_area_in_radius": total_parcel_area_in_radius,
"buildings_areas": areas,
"buildings_dists_to_center": distances,
"zone_density": zone_density
}) | [
"suejungshin@Suejungs-MBP.attlocal.net"
] | suejungshin@Suejungs-MBP.attlocal.net |
aeb43f727bef3568d5d61348436f69e824c7c388 | e08caa6d3db87282ff88e31885be602f5a8f7607 | /week5/mytree.py | 83783646560af7261c07927238ed17b97891e87a | [] | no_license | alice-yang94/GetAheadProgram | c66ac732c6f467251a73246f906f9e774d8b783b | 9cc6a8904020d99ec648943b51051f4261e03ddf | refs/heads/master | 2022-04-04T08:45:36.212499 | 2020-02-17T12:12:01 | 2020-02-17T12:12:01 | 232,677,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | class Node:
def __init__(self, val, ifTerminate, children = None):
self.val = val
self.ifTerminate = ifTerminate
self.children = {}
self.depth = 1
if children is not None:
for child in children:
self.add_child(child)
self.depth = max(child.depth+1, self.depth)
def __repr__(self):
return f'{self.val} {self.ifTerminate}'
def print_children(self):
if self.children:
print(f'children of {self.val}, {self.ifTerminate}: ', end ='')
print(self.children)
for child in self.children.values():
child.print_children()
def add_child(self, child):
assert isinstance(child, Node)
if child.val not in self.children:
self.children[child.val] = child
return child
if child.ifTerminate:
self.children[child.val].ifTerminate = True
return self.children[child.val]
def dfs(self):
stack = [self]
while stack:
curr = stack.pop()
# in-place
print(curr)
for c in curr.children:
stack.append(c)
| [
"aliceyang94@hotmail.com"
] | aliceyang94@hotmail.com |
9d274b04439cb4d82b16c15b0a53226f81f8972c | 94d7214427c44eb42a5202421cb4b1a11d8a9a07 | /travelism/agencies/models.py | eda988a32d4836422cb1be2cbb92d90282a4b0da | [] | no_license | alisoliman/travelism | 1357c8b3ad21fe00c9cd0007c9f45c0d5be5e8c7 | 4de747d430758c5cf6d586ce994c093e42c5341e | refs/heads/master | 2020-11-26T20:54:34.124100 | 2020-02-13T10:27:05 | 2020-02-13T10:27:05 | 229,203,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from django.db import models
# Create your models here.
class Agency(models.Model):
name = models.CharField(max_length=128)
brief = models.TextField()
cover_picture = models.ImageField()
rating = models.FloatField(default=0)
def __str__(self):
return self.name
| [
"ali.soliman95@gmail.com"
] | ali.soliman95@gmail.com |
bfee922efb85a81a17dfa95c624fc265aa22db08 | 3f0a5787dc4a417648ae4297724aed5a8fa0d318 | /005-ray/10-ray-serving-tutorial/app.py | a20835200875abc2f60d5877f8c486428cea8957 | [] | no_license | AndersonJo/code-snippet | d6d55574ff77e1c9d68f6f2e735096264ddac3ad | 856d97ac617ea2ae45b301a14a999647abec1a0c | refs/heads/master | 2023-05-10T19:44:21.953081 | 2023-02-16T13:12:51 | 2023-02-16T13:12:51 | 177,695,307 | 2 | 1 | null | 2023-05-01T21:46:50 | 2019-03-26T01:51:17 | Jupyter Notebook | UTF-8 | Python | false | false | 1,413 | py | from ray import serve
from starlette.requests import Request
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
@serve.deployment(num_replicas=2, ray_actor_options={"num_cpus": 1, "num_gpus": 0})
class Translator:
def __init__(self):
# Load model
self.tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_1.2B")
self.tokenizer.src_lang = 'en'
self.model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_1.2B")
self.model.eval()
def translate(self, text: str) -> str:
dest_lang_id = self.tokenizer.get_lang_id('ko')
encoded_src = self.tokenizer(text, return_tensors="pt")
generated_tokens = self.model.generate(**encoded_src,
forced_bos_token_id=dest_lang_id,
max_length=200,
use_cache=True)
result = self.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
return result
async def __call__(self, http_request: Request) -> str:
korean_text: str = await http_request.json()
return self.translate(korean_text)
translator = Translator.bind()
# if __name__ == '__main__':
# translator = Translator()
# print(translator.translate('self-belief and hard work will always earn you success'))
| [
"a141890@gmail.com"
] | a141890@gmail.com |
64d649aba8a3b3ae9ccf92ddc4d8ba2c1699a86b | d72d43d32f4f191a2e292b95decfa3979c585b64 | /bubbleSortIterationCount.py | f1e4a1d7b126c4d5e20418f7ed19dcb9bc9859a3 | [] | no_license | Maxyee/Python_Basic_Codes | 38db3cf9bf35f217c5c485f77e561d355b7cad5b | a14b17ad622f2bbfa774823b3400c263717a0e6f | refs/heads/master | 2020-03-22T01:42:03.614491 | 2019-06-17T08:28:54 | 2019-06-17T08:28:54 | 139,323,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | def print_swaps(arr):
swaps = 0
length = len(arr)
for i in range(length - 1):
for k in range(length - i - 1):
if arr[k] > arr[k + 1]:
arr[k], arr[k + 1] = arr[k + 1], arr[k]
swaps += 1
print(swaps)
N = int(input())
a = [int(x) for x in input().split()]
print_swaps(a) | [
"eyaminkhan00@gmail.com"
] | eyaminkhan00@gmail.com |
f6dffe88f3099ec4366c017e88d66f5268532180 | 4029d6780b91725bd14592ce07e1f7e4f069c8f2 | /Main.py | 7f7a1e1130609541fef8d28a7b26074c288f03bf | [] | no_license | AdamJSoftware/Convert_CSV | c31b5d79bc6265fb55a09384d95af5357879ce57 | 576fe307035f4a999e3cb18b492a0ba56f834588 | refs/heads/master | 2020-05-30T23:10:06.705308 | 2019-06-03T19:18:04 | 2019-06-03T19:18:04 | 190,009,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | import GUI
if __name__ == '__main__':
GUI.main()
| [
"42972871+ViteloSoftware@users.noreply.github.com"
] | 42972871+ViteloSoftware@users.noreply.github.com |
8ecf7bb0305c592d0b6011e7db8ef79bcf6f8662 | 7844d638111af81f9c8ff77b3808b27a130454e0 | /kws_test/feature/dct.py | 4a58bca56dce4b9644ff979a0dc33c79cdd177c0 | [] | no_license | tu1258/kws_test | 1bb9d664f967744aab2b8537dd6fe4bebb50f17d | b39bda2dc7a7ab0ce1c2d1c8e6a069159a44e9da | refs/heads/master | 2022-12-23T14:21:12.550151 | 2020-09-30T08:35:40 | 2020-09-30T08:35:40 | 299,853,532 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,255 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""compute direct forward DCT II on input speech signal."""
import numpy as np
import tensorflow.compat.v2 as tf
class DCT(tf.keras.layers.Layer):
"""Computes forward DCT transofmation.
It is based on direct implementation described at
https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft
This is useful for speech feature extraction.
"""
def __init__(self, num_features=None, **kwargs):
super(DCT, self).__init__(**kwargs)
self.num_features = num_features
def build(self, input_shape):
super(DCT, self).build(input_shape)
# dct is computed on last dim
feature_size = int(input_shape[-1])
if self.num_features is None:
self.num_features = int(input_shape[-1])
if self.num_features > feature_size:
raise ValueError('num_features: %d can not be > feature_size: %d' %
(self.num_features, feature_size))
# precompute forward dct transformation
self.dct = 2.0 * np.cos(np.pi * np.outer(
np.arange(feature_size) * 2.0 + 1.0, np.arange(feature_size)) /
(2.0 * feature_size))
# DCT normalization
norm = 1.0 / np.sqrt(2.0 * feature_size)
# reduce dims, so that DCT is computed only on returned features
# with size num_features
self.dct = (self.dct[:, :self.num_features] * norm).astype(np.float32)
def call(self, inputs):
# compute DCT
return tf.matmul(inputs, self.dct)
def get_config(self):
config = {
'num_features': self.num_features,
}
base_config = super(DCT, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"noreply@github.com"
] | tu1258.noreply@github.com |
d01e43573608281fc31233bbcdb6aa0b8621b9ca | 63d722cfc5749049c72e1f3c075ce95b99baa257 | /lessonOne.py | ae73b87186208ddbb0df14ce55cb9c7f546b46aa | [] | no_license | kdkimmer/hello-google-app-engine | d90b415dee0eeb78dc6e6c1086b667c104615358 | a9f70a1e8ff1b4104d6269b4f2c035210b20e0fb | refs/heads/master | 2021-01-20T17:20:16.908502 | 2016-08-21T19:56:29 | 2016-08-21T19:56:29 | 65,671,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | import webapp2
form="""
<form method = "post" action= "/testform">
<input name="q">
<input type="submit">
</form>
"""
class TestHandler(webapp2.RequestHandler):
def get(self):
q = self.request.get("q")
self.response.write(q)
#self.response.headers['Content-Type'] = 'plain/text'
#self.response.write(self.request)
class MainHandler(webapp2.RequestHandler):
def post(self):
self.response.write(form)
app = webapp2.WSGIApplication([
('/', MainHandler),
('/testform', TestHandler)
], debug=True) | [
"sageykat-123@yahoo.com"
] | sageykat-123@yahoo.com |
f0d8fc5a6739e6510b5819ce8a9f6155c79f922b | f8b5aafac15f408a48fabf853a918015c927e6fe | /backup/virtualenv/venv27/lib/python2.7/site-packages/openstackclient/identity/v3/role.py | 0376070907d96274184e4a7d75690462833415c3 | [] | no_license | to30/tmp | bda1ac0ca3fc61e96c2a1c491367b698d7e97937 | ec809683970af6787728c2c41f161f416155982a | refs/heads/master | 2021-01-01T04:25:52.040770 | 2016-05-13T16:34:59 | 2016-05-13T16:34:59 | 58,756,087 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,740 | py | # Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v3 Role action implementations"""
import logging
import six
import sys
from cliff import command
from cliff import lister
from cliff import show
from keystoneclient import exceptions as ksc_exc
from openstackclient.common import utils
from openstackclient.i18n import _ # noqa
class AddRole(command.Command):
"""Adds a role to a user or group on a domain or project"""
log = logging.getLogger(__name__ + '.AddRole')
def get_parser(self, prog_name):
parser = super(AddRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Role to add to <user> (name or ID)',
)
domain_or_project = parser.add_mutually_exclusive_group()
domain_or_project.add_argument(
'--domain',
metavar='<domain>',
help='Include <domain> (name or ID)',
)
domain_or_project.add_argument(
'--project',
metavar='<project>',
help='Include `<project>` (name or ID)',
)
user_or_group = parser.add_mutually_exclusive_group()
user_or_group.add_argument(
'--user',
metavar='<user>',
help='Include <user> (name or ID)',
)
user_or_group.add_argument(
'--group',
metavar='<group>',
help='Include <group> (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if (not parsed_args.user and not parsed_args.domain
and not parsed_args.group and not parsed_args.project):
return
role = utils.find_resource(
identity_client.roles,
parsed_args.role,
)
if parsed_args.user and parsed_args.domain:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
domain = utils.find_resource(
identity_client.domains,
parsed_args.domain,
)
identity_client.roles.grant(
role.id,
user=user.id,
domain=domain.id,
)
elif parsed_args.user and parsed_args.project:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
)
identity_client.roles.grant(
role.id,
user=user.id,
project=project.id,
)
elif parsed_args.group and parsed_args.domain:
group = utils.find_resource(
identity_client.groups,
parsed_args.group,
)
domain = utils.find_resource(
identity_client.domains,
parsed_args.domain,
)
identity_client.roles.grant(
role.id,
group=group.id,
domain=domain.id,
)
elif parsed_args.group and parsed_args.project:
group = utils.find_resource(
identity_client.groups,
parsed_args.group,
)
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
)
identity_client.roles.grant(
role.id,
group=group.id,
project=project.id,
)
else:
sys.stderr.write("Role not added, incorrect set of arguments \
provided. See openstack --help for more details\n")
return
class CreateRole(show.ShowOne):
"""Create new role"""
log = logging.getLogger(__name__ + '.CreateRole')
def get_parser(self, prog_name):
parser = super(CreateRole, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<role-name>',
help='New role name',
)
parser.add_argument(
'--or-show',
action='store_true',
help=_('Return existing role'),
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
try:
role = identity_client.roles.create(name=parsed_args.name)
except ksc_exc.Conflict as e:
if parsed_args.or_show:
role = utils.find_resource(identity_client.roles,
parsed_args.name)
self.log.info('Returning existing role %s', role.name)
else:
raise e
role._info.pop('links')
return zip(*sorted(six.iteritems(role._info)))
class DeleteRole(command.Command):
"""Delete role(s)"""
log = logging.getLogger(__name__ + '.DeleteRole')
def get_parser(self, prog_name):
parser = super(DeleteRole, self).get_parser(prog_name)
parser.add_argument(
'roles',
metavar='<role>',
nargs="+",
help='Role(s) to delete (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
for role in parsed_args.roles:
role_obj = utils.find_resource(
identity_client.roles,
role,
)
identity_client.roles.delete(role_obj.id)
return
class ListRole(lister.Lister):
"""List roles"""
log = logging.getLogger(__name__ + '.ListRole')
def get_parser(self, prog_name):
parser = super(ListRole, self).get_parser(prog_name)
domain_or_project = parser.add_mutually_exclusive_group()
domain_or_project.add_argument(
'--domain',
metavar='<domain>',
help='Filter roles by <domain> (name or ID)',
)
domain_or_project.add_argument(
'--project',
metavar='<project>',
help='Filter roles by <project> (name or ID)',
)
user_or_group = parser.add_mutually_exclusive_group()
user_or_group.add_argument(
'--user',
metavar='<user>',
help='Filter roles by <user> (name or ID)',
)
user_or_group.add_argument(
'--group',
metavar='<group>',
help='Filter roles by <group> (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
elif parsed_args.group:
group = utils.find_resource(
identity_client.groups,
parsed_args.group,
)
if parsed_args.domain:
domain = utils.find_resource(
identity_client.domains,
parsed_args.domain,
)
elif parsed_args.project:
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
)
# no user or group specified, list all roles in the system
if not parsed_args.user and not parsed_args.group:
columns = ('ID', 'Name')
data = identity_client.roles.list()
elif parsed_args.user and parsed_args.domain:
columns = ('ID', 'Name', 'Domain', 'User')
data = identity_client.roles.list(
user=user,
domain=domain,
)
for user_role in data:
user_role.user = user.name
user_role.domain = domain.name
elif parsed_args.user and parsed_args.project:
columns = ('ID', 'Name', 'Project', 'User')
data = identity_client.roles.list(
user=user,
project=project,
)
for user_role in data:
user_role.user = user.name
user_role.project = project.name
elif parsed_args.user:
columns = ('ID', 'Name')
data = identity_client.roles.list(
user=user,
domain='default',
)
elif parsed_args.group and parsed_args.domain:
columns = ('ID', 'Name', 'Domain', 'Group')
data = identity_client.roles.list(
group=group,
domain=domain,
)
for group_role in data:
group_role.group = group.name
group_role.domain = domain.name
elif parsed_args.group and parsed_args.project:
columns = ('ID', 'Name', 'Project', 'Group')
data = identity_client.roles.list(
group=group,
project=project,
)
for group_role in data:
group_role.group = group.name
group_role.project = project.name
else:
sys.stderr.write("Error: If a user or group is specified, either "
"--domain or --project must also be specified to "
"list role grants.\n")
return ([], [])
return (columns,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class RemoveRole(command.Command):
"""Remove role from domain/project : user/group"""
log = logging.getLogger(__name__ + '.RemoveRole')
def get_parser(self, prog_name):
parser = super(RemoveRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Role to remove (name or ID)',
)
domain_or_project = parser.add_mutually_exclusive_group()
domain_or_project.add_argument(
'--domain',
metavar='<domain>',
help='Include <domain> (name or ID)',
)
domain_or_project.add_argument(
'--project',
metavar='<project>',
help='Include <project> (name or ID)',
)
user_or_group = parser.add_mutually_exclusive_group()
user_or_group.add_argument(
'--user',
metavar='<user>',
help='Include <user> (name or ID)',
)
user_or_group.add_argument(
'--group',
metavar='<group>',
help='Include <group> (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if (not parsed_args.user and not parsed_args.domain
and not parsed_args.group and not parsed_args.project):
return
role = utils.find_resource(
identity_client.roles,
parsed_args.role,
)
if parsed_args.user and parsed_args.domain:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
domain = utils.find_resource(
identity_client.domains,
parsed_args.domain,
)
identity_client.roles.revoke(
role.id,
user=user.id,
domain=domain.id,
)
elif parsed_args.user and parsed_args.project:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
)
identity_client.roles.revoke(
role.id,
user=user.id,
project=project.id,
)
elif parsed_args.group and parsed_args.domain:
group = utils.find_resource(
identity_client.groups,
parsed_args.group,
)
domain = utils.find_resource(
identity_client.domains,
parsed_args.domain,
)
identity_client.roles.revoke(
role.id,
group=group.id,
domain=domain.id,
)
elif parsed_args.group and parsed_args.project:
group = utils.find_resource(
identity_client.groups,
parsed_args.group,
)
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
)
identity_client.roles.revoke(
role.id,
group=group.id,
project=project.id,
)
else:
sys.stderr.write("Role not removed, incorrect set of arguments \
provided. See openstack --help for more details\n")
return
class SetRole(command.Command):
"""Set role properties"""
log = logging.getLogger(__name__ + '.SetRole')
def get_parser(self, prog_name):
parser = super(SetRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Role to modify (name or ID)',
)
parser.add_argument(
'--name',
metavar='<name>',
help='Set role name',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
if not parsed_args.name:
return
role = utils.find_resource(
identity_client.roles,
parsed_args.role,
)
identity_client.roles.update(role.id, name=parsed_args.name)
return
class ShowRole(show.ShowOne):
"""Display role details"""
log = logging.getLogger(__name__ + '.ShowRole')
def get_parser(self, prog_name):
parser = super(ShowRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Role to display (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
identity_client = self.app.client_manager.identity
role = utils.find_resource(
identity_client.roles,
parsed_args.role,
)
role._info.pop('links')
return zip(*sorted(six.iteritems(role._info)))
| [
"tomonaga@mx2.mesh.ne.jp"
] | tomonaga@mx2.mesh.ne.jp |
be480facdbf7f43c654461fa2321b830698119e3 | 25112bd560d94eb34d1b11c5e074435e08e521d5 | /space_invaders.pyde | 365f9cde2b07576a917211e2aa9985fde344bd10 | [] | no_license | villa-version/space_invaders | 586bdb09b3604f6cd09c37f0e06012aa2d0221d4 | 3a66f72182230ee8535b6ec37b28a26a84a8e328 | refs/heads/main | 2023-03-20T06:03:43.573906 | 2021-03-07T11:36:22 | 2021-03-07T11:36:22 | 341,692,041 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | pyde | from MainConstructor import MainConstructor
main_constructor = None
add_library('minim')
def setup():
size(800,600)
imageMode(CENTER)
rectMode(CENTER)
ellipseMode(CENTER)
textSize(24)
minim_music = Minim(this)
mainMenuSound = minim_music.loadFile('sound/soundForSpaceInvMainMenu.wav')
soundMouse = minim_music.loadFile('sound/spaceInvClick.wav')
soundSpace = minim_music.loadFile('sound/soundForSpaceInvSpace.wav')
soundGun = minim_music.loadFile('sound/soundForSpaceInvGun.wav')
soundClick = minim_music.loadFile('sound/soundMouse.mp3')
takeSomething = minim_music.loadFile('sound/NumberTakeSome/number1/takeSomething_1.wav')
soundCoin = minim_music.loadFile('sound/coin.wav')
soundSpaceShip = minim_music.loadFile('sound/soundSpaceShip.mp3')
global main_constructor
main_constructor = MainConstructor(mainMenuSound, soundMouse, soundSpace, soundGun, soundClick, takeSomething, soundCoin, soundSpaceShip)
def draw():
background(70,174,183)
main_constructor.game(mousePressed)
if mousePressed:
main_constructor.shootWithSecondWeapon()
def mousePressed():
global main_constructor
main_constructor.shootWithFirstWeapon()
| [
"volinetsilia@gmail.com"
] | volinetsilia@gmail.com |
03b4afb727f1ffbae815e5573669e969ac331f09 | bf164105f07c4412160cc76eccef106344b36fe8 | /code/setup.py | 82056f7917168b45959aa945008a721faa189685 | [] | no_license | adhuri/DICYRT | 67d36da2f8f76ce1db6bee75f415642f3b9d68ea | cd456129f9b9645aec04dd61272a67735cb50585 | refs/heads/master | 2020-04-18T01:31:07.261263 | 2017-03-19T06:12:24 | 2017-03-19T06:12:24 | 67,916,708 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | __author__ = 'ANIKETDHURI'
class DICYRT():
"""
Contains config details for :
MongoDB
"""
| [
"aniket.dhuri@gmail.com"
] | aniket.dhuri@gmail.com |
26d15922d0ecfedcaf2173747381f0a5c15f707c | 12bbad5d5f81f0c9b11fc9db3dee69b901f44469 | /api/models.py | b9eb84a065c44cc4f0ff0d856770f641198a8578 | [] | no_license | youflox/news | d5e0432e57093d6f4c49015d361ffe01cbd1c48e | e0e829727ac046f36fdeaf9335002c1c2c83684d | refs/heads/master | 2023-01-08T21:07:11.295421 | 2020-11-06T07:47:57 | 2020-11-06T07:47:57 | 309,968,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length=200)
description = models.CharField(max_length=200, blank=True)
paragraph = models.TextField(null=False)
author = models.ForeignKey(User, on_delete=models.DO_NOTHING)
date = models.DateTimeField(auto_now=True)
slug = models.SlugField(max_length=100, unique=True)
tags = models.CharField(max_length=200)
image = models.ImageField(upload_to='images', null=False, blank=False)
def __str__(self):
return self.title
| [
"youflox@gmail.com"
] | youflox@gmail.com |
48dd32f18373f2b389e32630ded0044734fd4b19 | 4d44674625100e62be2bb5033339fb641bd454ac | /snippet/example/python/project/project/db/sqlalchemy/models.py | 782d92b417a09747274a173923da7001f80a4da4 | [
"MIT",
"Apache-2.0"
] | permissive | xgfone/snippet | 8b9004a649d2575b493a376c4b4f3d4a7c56a4b0 | b0b734dd35478b7ef3e6193623981f4f29b6748c | refs/heads/master | 2022-03-18T12:41:09.033144 | 2022-02-20T15:26:35 | 2022-02-20T15:26:35 | 41,615,643 | 158 | 61 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | # coding: utf-8
from __future__ import absolute_import, print_function, unicode_literals, division
import logging
from sqlalchemy.ext.declarative import declarative_base
from oslo_db.sqlalchemy import models
from sqlalchemy import create_engine
from sqlalchemy import Column, String, Integer, DateTime
from sqlalchemy.sql import fun
LOG = logging.getLogger(__name__)
BASE = declarative_base()
class TestData(models.ModelBase, BASE):
__tablename__ = 'test_data'
id = Column(Integer, primary_key=True, autoincrement=True)
data = Column(String(256), nullable=False)
create_time = Column(DateTime, server_default=func.now(), nullable=False)
def __init__(self, *args, **kwargs):
super(TestData, self).__init__()
for k, v in kwargs.items():
setattr(self, k, v)
def create_tables(engine=None):
if not engine:
try:
import sys
engine = sys.argv[1]
except IndexError:
engine = "sqlite:///:memory:"
engine = create_engine(engine, echo=True)
BASE.metadata.create_all(engine)
if __name__ == '__main__':
create_tables("sqlite:///:memory:")
| [
"xgfone@126.com"
] | xgfone@126.com |
d20d839db4208ddee9e1c7dd031a34f7f551e9a3 | 709dd4adba6ab990162660bf426ebb4cf8e35dca | /app/controller.py | 78ae4136b5d540cae15fd2cc3072443274f4c5f2 | [] | no_license | johnbomba/giz-tteller | ffefab0fa06bb99d91f6e6815d36d1c460679b05 | 441f6d10eefe15362cd0727165e4fd2e59ba393a | refs/heads/master | 2020-09-13T13:01:51.821679 | 2019-11-20T19:53:41 | 2019-11-20T19:53:41 | 222,789,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,797 | py | from app import view # imports from module folders are relative to the file being executed by python3 since main.py is at the top level, imports need to come from app. even for the files inside of app
from app import model
def run():
model.load()
while True:
user_account = login_menu() # returns the logged in user or None for quit
if user_account == None: # login menu returns None if 'quit' is selected
break
else:
main_menu(user_account) # when the user exits the main menu they will go back to login
def login_menu():
while True:
view.print_login_menu()
choice = view.login_prompt().strip()
if choice not in ("1", "2", "3"):
view.bad_login_input()
elif choice == "3":
view.goodbye()
return None # return None for quit
elif choice == "1":
""" TODO: prompt for firstname, lastname, and pin, and confirm pin
create the account and then tell the user what their new account number is """
pass
elif choice == "2":
""" TODO: prompt functions to ask for account and PIN, use try, except
to check for bad login """
return model.login("012345", "1234")
def create_account():
""" call this from the main login loop """
pass
def login_attempt():
""" call this from the main login loop """
pass
def main_menu(user):
while True:
view.print_main_menu(user)
choice = view.main_prompt()
""" TODO: add bad input message """
if choice == "4":
user["balance"] += 1.0 # delete this, just demonstrating model.save()
model.save(user)
return
""" TODO: implement the various options """
| [
"bomba.john@gmail.com"
] | bomba.john@gmail.com |
a1f9f2880c5805d0642099f67fac1e61760b9185 | c342d39a064441d7c83b94e896dfbac1dc155666 | /setup.py | cc22030282c6d003af194c2c298389e898f5d44d | [
"MIT"
] | permissive | arsho/generator | a67d876bf9dded9bacdbd50a9ab3999f90c81731 | 5dc346850ec99a47ca7c074e3e5dec0b5fff30e2 | refs/heads/master | 2021-01-01T16:54:41.955771 | 2017-07-21T14:37:34 | 2017-07-21T14:37:34 | 97,951,569 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | # -*- coding: utf-8 -*-
from setuptools import setup
def readme():
with open('README.rst', encoding='utf8') as f:
return f.read()
setup(name='generator',
version='0.0.1',
description='Generator is a package for generating strong password and check strength of user defined password.',
long_description=readme(),
install_requires=[],
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords='password generator strength pass',
url='http://github.com/arsho/generator',
author='Ahmedur Rahman Shovon',
author_email='shovon.sylhet@gmail.com',
license='MIT',
packages=['generator'],
include_package_data=True,
zip_safe=False
)
| [
"shovon.sylhet@gmail.com"
] | shovon.sylhet@gmail.com |
8f09ee1c175eaa67db58c061ed1f27c69414af94 | 20ade86a0c0f0ca6be3fae251488f985c2a26241 | /exp/analyze_5.py | d038d5fa9c073324d036a898b7df5cf86f573c6a | [] | no_license | adysonmaia/phd-sp-static | 69344fdd4edb4c216e4b88b0193308b33a30e72c | 79038d165c19f90e1f54597f7049553720f34c74 | refs/heads/master | 2023-04-14T15:59:07.414873 | 2019-10-24T07:56:37 | 2019-10-24T07:56:37 | 355,110,847 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,099 | py | import csv
import numpy as np
import scipy.stats as st
import matplotlib
import matplotlib.pyplot as plt
DPI = 100
Y_PARAM = {
'max_dv': {
'label': 'Deadline Violation - ms',
'limit': [0.0, 10.0]
},
'dsr': {
'label': 'Deadline Satisfaction - %',
'limit': [40.0, 100.0]
},
'avg_rt': {
'label': 'Response Time - ms',
'limit': [0.0, 18.0]
},
'cost': {
'label': 'Cost',
'limit': [1000.0, 1500.0]
},
'max_unavail': {
'label': 'Availability - %',
'limit': [70.0, 100.0]
},
'avg_unavail': {
'label': 'Unavailability - %',
'limit': [0.0, 10.0]
},
'avg_avail': {
'label': 'Availability - %',
'limit': [0.0, 100.0]
},
'time': {
'label': 'Execution Time - s',
'limit': [0.0, 300.0]
},
}
X_PARAM = {
'probability': {
'label': 'Elite Probability',
'limit': [10, 90],
},
'stop_threshold': {
'label': 'Stop Threshold',
'limit': [0, 1],
}
}
def get_data_from_file(filename):
results = []
with open(filename) as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count > 0:
results.append(row)
line_count += 1
return results
def filter_data(data, **kwargs):
def to_string_values(values):
str_values = []
if not isinstance(values, list):
values = [values]
for value in values:
str_values.append(str(value))
return str_values
def in_filter(row):
for key, value in row.items():
if key in f_values and value not in f_values[key]:
return False
return True
f_values = {k: to_string_values(v) for k, v in kwargs.items()}
return list(filter(lambda row: in_filter(row), data))
def format_metric(value, metric):
value = float(value)
if metric == 'max_unavail':
value = 100.0 * (1.0 - value)
elif metric == 'avg_unavail':
value = 100.0 * value
elif metric == 'avg_avail':
value = 100.0 * value
elif metric == 'dsr':
value = 100.0 * value
return value
def format_field(value, field):
value = float(value)
if field == 'stop_threshold':
value = round(value, 2)
return value
def calc_stats(values):
nb_runs = len(values)
mean = np.mean(values)
sem = st.sem(values)
if sem > 0.0:
# Calc confidence interval, return [mean - e, mean + e]
error = st.t.interval(0.95, nb_runs - 1, loc=mean, scale=sem)
error = error[1] - mean
else:
error = 0.0
return mean, error
def gen_figure(data, metric, x, x_field, data_filter, filename=None):
plt.clf()
matplotlib.rcParams.update({'font.size': 20})
filtered = filter_data(data, **data_filter)
y = []
y_errors = []
for x_value in x:
x_filter = {x_field: x_value}
x_data = filter_data(filtered, **x_filter)
values = list(map(lambda r: format_metric(r[metric], metric), x_data))
mean, error = calc_stats(values)
y.append(mean)
y_errors.append(error)
print("{} x={:.1f}, y={:.1f}".format(metric, x_value, mean))
x = [format_field(i, x_field) for i in x]
plt.errorbar(x, y, yerr=y_errors, markersize=10, fmt='-o')
plt.subplots_adjust(bottom=0.2, top=0.97, left=0.12, right=0.96)
x_param = X_PARAM[x_field]
y_param = Y_PARAM[metric]
plt.xlabel(x_param['label'])
plt.ylabel(y_param['label'])
plt.ylim(*y_param['limit'])
# plt.xlim(*x_param['limit'])
plt.xticks(x)
plt.grid(True)
if not filename:
plt.show()
else:
plt.savefig(filename, dpi=DPI, bbox_inches='tight', pad_inches=0.05)
def run():
data = get_data_from_file('exp/output/exp_5.csv')
all_solutions = [
('moga', 'preferred'),
]
metric_solutions = {
'max_dv': all_solutions,
# 'dsr': all_solutions,
# 'avg_rt': all_solutions,
'cost': all_solutions,
'avg_unavail': all_solutions,
'time': all_solutions
}
params = [
{
'title': 'st',
'filter': {},
'x_field': 'stop_threshold',
'x_values': np.arange(0.0, 0.6, 0.1)
},
]
for param in params:
for metric, solutions in metric_solutions.items():
for solution, sol_version in solutions:
fig_title = param['title']
filter = param['filter']
filter['solution'] = solution
filter['version'] = sol_version
x = param['x_values']
x_field = param['x_field']
filename = "exp/figs/exp_5/fig_{}_{}_{}_{}.png".format(
fig_title, metric, solution, sol_version
)
gen_figure(data, metric, x, x_field, filter, filename)
if __name__ == '__main__':
print("Execute as 'python3 analyze.py exp_5'")
| [
"adyson.maia@gmail.com"
] | adyson.maia@gmail.com |
b6a9c5e223340c1cbc33d718e31f775d955b1a8d | f0d9477c3079ff955b826c8ba2f4d043c17c012c | /home/models.py | c9c0352949dd5d3c6c652faf271cd3fd4fb9b767 | [] | no_license | embshao/junkfund | f19859b4f561da79c47b3dbc0dfea98c923fa25b | e32d47fe0d248b9b47cfb1dc32602b57869df81b | refs/heads/master | 2020-04-06T17:30:57.855468 | 2018-12-06T08:07:10 | 2018-12-06T08:07:10 | 157,661,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import RichTextField
from wagtail.admin.edit_handlers import FieldPanel
class HomePage(Page):
body = RichTextField(blank=True)
origin_statement = RichTextField(blank=True)
mission_statement = RichTextField(blank=True)
content_panels = Page.content_panels + [
FieldPanel('body', classname="full"),
FieldPanel('origin_statement'),
FieldPanel('mission_statement')
] | [
"embshao@Emilys-MacBook-Pro.local"
] | embshao@Emilys-MacBook-Pro.local |
17d235e0928561692d73711efe48d58fd5d371fa | 06aa3ec3262f6dd6866ea194ed6385f8e53509bf | /manuscript_codes/AML211DiffALL/remove_nonexistent_fromAnnotatedcsv.py | 409adfbaa7c37d20329ae26f43f38331d13472ce | [] | no_license | KuehLabUW/UPSIDE | 95ce078382792d1beb0574c3b19c04e467befa58 | 3c90de9677f24e258800cb95bce6cb528f4ad4ac | refs/heads/master | 2023-07-13T15:58:07.963672 | 2021-08-30T21:14:48 | 2021-08-30T21:14:48 | 329,134,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 16:21:25 2019
this script concatenates fluorescent data and subim data from separate csv files
and return a merged csv file for all positions
@author: phnguyen
"""
import pandas as pd
import os
csvs_dirname = '/media/phnguyen/Data2/Imaging/CellMorph/data/AML211DiffALL/csvs/'
os.chdir(csvs_dirname)
filename = 'AML211DiffALL_LargeMask_Annotated.csv'
df = pd.read_csv(filename)
print(len(df))
pos = [];
for i in range(len(df.index)):
if os.path.isfile(df.dirname[i]) == False:
pos.append(i)
print(i)
df.drop(df.index[pos], inplace=True)
#save the combined dataframe
df.to_csv(csvs_dirname+'AML211DiffALL_LargeMask_Annotated_trimmed.csv', sep=',')
| [
"kuehlab@uw.edu"
] | kuehlab@uw.edu |
1ab07df2d4cec989d96642020c6ad40a27c362f8 | 20931bf9e3f24b4acd5fcbf5e50cfa77d61b25e5 | /msds510/src/write_csv.py | 781e934c9c33dcd19dea2bd7420addb247c003b2 | [] | no_license | Yasa-Mufasa/School-Work-DSC510 | db273878c367038e4dd541c8365ee0c1ec577a7f | 7f8334a830e3347bfafa408f8d901afd6e083c13 | refs/heads/master | 2020-03-30T18:23:07.401211 | 2018-10-14T06:33:05 | 2018-10-14T06:33:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | import sys
import csv
def argumentExists(index):
try:
sys.argv[index]
except IndexError:
return ''
else:
return sys.argv[index]
def fixHeader(headerToFix):
corr_category = headerToFix
corr_category = corr_category.lower()
corr_category.strip('\n').strip('?').rstrip().lstrip()
corr_category = corr_category.replace('/','_')
return corr_category
def processedCSV(output, headers, input):
with open(output, 'w', newline='') as written:
dw = csv.DictWriter(written, fieldnames=headers)
firstRow = {}
for i in headers:
firstRow[i] = i
dw.writerow(firstRow)
for row in input:
dw.writerow(row)
def readRows(inputCSV):
with open(inputCSV, 'r') as read:
readCSV = csv.DictReader(read)
readCSV.fieldnames = [fixHeader(header) for header in readCSV.fieldnames]
listOfOrderedDics = []
for row in readCSV:
listOfOrderedDics.append(row)
return readCSV.fieldnames, listOfOrderedDics
if __name__ == '__main__':
csvToRead = argumentExists(1)
csvToCreate = argumentExists(2)
if csvToRead and csvToCreate:
getCSV = readRows(csvToRead)
processedCSV(csvToCreate, getCSV[0], getCSV[1])
| [
"noreply@github.com"
] | Yasa-Mufasa.noreply@github.com |
aed8838692f1449833f1bc080c28906f17883d7e | 807d1039976331aaed823d9afda8e29ee4232694 | /app/api_github.py | 30baea0e8e8b0ff4a981adec46cc2dd2a13a68ae | [
"MIT"
] | permissive | bemanuel/PUGMA-bot | 0b31165cb2b08c5f38dc01fc2d4a598b0398212c | e2cb11d57ec7409c62a4c61d2b1250e4e8b8e9a9 | refs/heads/master | 2020-03-30T04:36:59.962128 | 2018-08-25T08:43:24 | 2018-08-25T08:43:24 | 150,753,080 | 0 | 0 | MIT | 2018-09-28T14:32:27 | 2018-09-28T14:32:26 | null | UTF-8 | Python | false | false | 885 | py | import requests
import json
class Github:
def __init__(self):
self.user = 'pug-ma'
self.repository = 'meetups'
self.base_url = 'https://api.github.com/repos'
def _name_encontro(self, index):
if len(index) == 1:
index = '0' + index
return requests.utils.quote(f'PUG-MA #{index}.jpg')
def photo_encontro(self, index):
url = f'{self.base_url}/{self.user}/{self.repository}/contents/palestras/{self._name_encontro(index)}'
response = requests.get(url)
content = json.loads(response.content)
return content.get('download_url')
def photo_last_encontro(self):
url = f'{self.base_url}/{self.user}/{self.repository}/contents/palestras/'
response = requests.get(url)
content = json.loads(response.content)
return content[-1].get('download_url')
| [
"lucasinfologos@gmail.com"
] | lucasinfologos@gmail.com |
7b20eeb2fb379e209c3fb4674150a090949fec81 | 8398073ca4e7b4e9894f037ff87c5c2a2c2d7658 | /sorting/quick_sort.py | a4352435158fd2ae6b27e8c59f6f03df82c5e54c | [] | no_license | mtvillwock/cs | 9c78262e29c23eabdecd4778c163140c0a1c0061 | 7f4f0236ecc1769ac84bbebdaf5cd3fcbd65f8f7 | refs/heads/master | 2020-03-31T19:16:33.000451 | 2015-12-29T22:47:53 | 2015-12-29T22:47:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | #!/usr/bin/python
# Quick Sort class that allows for O(lg 2 n) to O(n^2) sorting of items,
# depending on where pivot picked happens to be within context of all sorted items
# Low memory overhead as it simply swaps items during comparisons
class QuickSort:
def __init__(self, items):
self.items = items
def sort(self, low = None, high = None):
if(low == None):
low = 0
if (high == None):
high = len(self.items) - 1
if(high - low > 0):
pivot = self.partition(low, high)
self.sort(low, pivot - 1)
self.sort(pivot + 1, high)
def partition(self, low, high):
pivot = high
first_high = low
for i in range(low, high):
if(self.items[i] < self.items[pivot]):
# Swap items
self.items[i], self.items[first_high] = self.items[first_high], self.items[i]
first_high += 1
self.items[pivot], self.items[first_high] = self.items[first_high], self.items[pivot]
return first_high
import random
print "Initialize QuickSort with list"
items = [3, 45, 89, 1, 7, 34940, 222222, 18, 2342, 344, 34, 233]
# Shuffle items randomly
random.shuffle(items)
print "Unsorted items: " + ", ".join(str(i) for i in items)
ms = QuickSort(items)
ms.sort()
print "Sorted items: " + ", ".join(str(i) for i in ms.items)
| [
"geeosh@gmail.com"
] | geeosh@gmail.com |
b39f7d7bc5979960cc3a326e3a5e41d319fc3636 | 16c5a7c5f45a6faa5f66f71e043ce8999cb85d80 | /app/honor/student/listen_everyday/object_page/history_page.py | 014714a71b529b852af33e51e693c88f7b3b6757 | [] | no_license | vectorhuztt/test_android_copy | ca497301b27f49b2aa18870cfb0fd8b4640973e5 | f70ab6b1bc2f69d40299760f91870b61e012992e | refs/heads/master | 2021-04-03T19:26:48.009105 | 2020-06-05T01:29:51 | 2020-06-05T01:29:51 | 248,389,861 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,494 | py | # coding: utf-8
# -------------------------------------------
# Author: Vector
# Date: 2018/12/17 16:11
# -------------------------------------------
from selenium.webdriver.common.by import By
from app.honor.student.login.object_page.home_page import HomePage
from conf.base_page import BasePage
from conf.decorator import teststep
from utils.wait_element import WaitElement
class HistoryPage(BasePage):
wait = WaitElement()
home = HomePage()
@teststep
def wait_check_history_page(self):
locator = (By.XPATH, "//android.widget.TextView[@text='历史推荐']")
return self.wait.wait_check_element(locator)
@teststep
def wait_check_clear_button_page(self):
locator = (By.ID, self.id_type() + 'clear')
return self.wait.wait_check_element(locator, timeout=5)
@teststep
def wait_check_red_hint_page(self):
locator = (By.ID, self.id_type() + 'tv_hint')
return self.wait.wait_check_element(locator, timeout=5)
@teststep
def wait_check_img_page(self):
locator = (By.ID, self.id_type() + 'img')
return self.wait.wait_check_element(locator, timeout=5)
@teststep
def wait_check_tips_page(self):
locator = (By.ID, self.id_type() + 'md_content')
return self.wait.wait_check_element(locator, timeout=5)
@teststep
def game_name(self):
locator = (By.ID, self.id_type() + 'game_name')
return self.wait.wait_find_elements(locator)
@teststep
def right_rate(self, game_name):
locator = (By.XPATH, '//android.widget.TextView[contains(@text,"{0}")]/../following-sibling::android.widget.'
'TextView[contains(@resource-id, "{1}right_rate")]'.format(game_name, self.id_type()))
return self.wait.wait_find_element(locator)
@teststep
def game_date(self, game_name):
locator = (By.XPATH, '//android.widget.TextView[contains(@text,"{0}")]/../following-sibling::'
'android.widget.TextView[contains(@resource-id,"time")]'.format(game_name))
return self.wait.wait_find_element(locator)
@teststep
def tips_operate_commit(self):
"""温馨提示 页面信息 -- 确定"""
if self.wait_check_tips_page(): # 温馨提示 页面
self.home.tips_content()
self.home.commit_button() # 确定按钮
@teststep
def history_page_operate(self):
print('听力历史处理页面')
game_names = self.game_name()
game_num = len(game_names) if len(game_names) < 10 else len(game_names) - 1
print('游戏个数:', game_num)
for i in range(game_num):
if self.wait_check_history_page():
name = game_names[i].text
right_rate = self.right_rate(name).text
game_date = self.game_date(name).text
print(name)
print(right_rate)
print(game_date)
if i == 3 or i == 5 or i == 7:
if name == '听音连句':
game_names[i].click()
if not self.wait_check_clear_button_page():
self.base_assert.except_error('Error-- 未发现听音连句的清除按钮')
else:
print('进入听音连句游戏页面')
self.home.click_back_up_button()
self.tips_operate_commit()
if name == '听后选择':
game_names[i].click()
if not self.wait_check_red_hint_page():
self.base_assert.except_error('Error-- 未发现听后选择的红色提示')
else:
print('进入听后选择游戏页面')
self.home.click_back_up_button()
self.tips_operate_commit()
if name == '听音选图':
game_names[i].click()
if not self.wait_check_img_page():
self.base_assert.except_error('Error-- 未发现听音选图的图片')
else:
print('进入听音选图游戏页面')
self.home.click_back_up_button()
self.tips_operate_commit()
print('-'*30, '\n')
self.home.click_back_up_button()
| [
"vectorztt@163.com"
] | vectorztt@163.com |
94cf848c42259d154818b2ce9d25437a2bfb5767 | 1b8fea84247058843b1a21aa148a5dcfd5c2ba2b | /app/tournament.py | 9813ec5c540609bcf04275950486ca4bac23bde9 | [] | no_license | aguijarro/tournament_web | 251735dedbc34f65d7d99d0db12060c0e71197c6 | 3b0179a87c21d5556d43f40cca0da494f6c09ee9 | refs/heads/master | 2021-01-10T14:58:47.620453 | 2016-01-28T13:56:08 | 2016-01-28T13:56:08 | 50,531,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,168 | py | #!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
return psycopg2.connect("dbname=tournament")
def reportTournaments():
"""Returns a list of tournaments registered in database"""
DB = connect()
c = DB.cursor()
sql = '''SELECT * FROM tournament;
'''
c.execute(sql)
tournaments = []
for row in c.fetchall():
tournaments.append(row)
DB.close()
return tournaments
def registerRound(name, id_tournament):
"""Adds a rounds to the tournament database.
Args:
name: the round's full name (needs be unique for each tournament).
id_tournament: foreing key from table tournament.
"""
DB = connect()
c = DB.cursor()
sql = '''INSERT INTO round(name, id_tournament)
VALUES (%s, %s) RETURNING id_round;'''
c.execute(sql,(name,id_tournament,))
DB.commit()
id_round = c.fetchone()[0]
DB.close()
return id_round
def registerStandings(id_round, id_player, matches, win, lost, tied, action):
"""Adds a scoreboard to the tournament database by each Round.
Args:
name: the player's full name (need not be unique).
id_round: foreing key from table round. Save scoreboard for each round
id_player: foreing key from table player_tournament. Save scoreboard for each player
matches: Save the number of matches for the player
win: Save a win result
lost: Save a lost result
tied: Save tied result
action: Define what action must be executed: Insert data or Update data
"""
if action == 'Insert':
DB = connect()
c = DB.cursor()
sql = '''INSERT INTO scoreboard(id_round, id_player, matches, win, lost, tied)
VALUES (%s, %s, %s, %s, %s, %s);'''
c.execute(sql,(id_round, id_player, matches, win, lost, tied,))
DB.commit()
DB.close()
else:
DB = connect()
c = DB.cursor()
sql = '''UPDATE scoreboard SET matches = %s, win = %s, lost = %s, tied = %s WHERE id_round = %s AND id_player = %s;'''
c.execute(sql,(matches,win,lost,tied, id_round,id_player,))
DB.commit()
DB.close()
def registerTournament(name, description, tournamentPlayers, rounds):
"""Adds a tournament to the tournament database.
Args:
name: the tournament's full name (need not be unique).
description: the tournament's description
tournamentPlayers: define how many players will has the Tournament
rounds: define how many rounds the Tournament will has depending the number of players.
"""
DB = connect()
c = DB.cursor()
sql = '''INSERT INTO tournament(name, description, tournamentPlayers, numberOfRounds)
VALUES (%s, %s, %s, %s) RETURNING id_tournament;'''
c.execute(sql,(name,description,tournamentPlayers,rounds,))
DB.commit()
id_tournament = c.fetchone()[0]
DB.close()
for x in range(0, rounds):
name = 'Round %d' % x
id_round = registerRound(name, id_tournament)
def reportRoundsTournament(id_tournament):
"""Returns the list of rounds registered per Tournament
Args:
id_tournament: foreing key from table tournament.
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT r.name, r.id_round FROM round r WHERE r.id_tournament = %s;
'''
c.execute(sql,(id_tournament,))
rounds = []
for row in c.fetchall():
rounds.append(row)
DB.close()
return rounds
def numberPlayerByTournament(id_tournament):
"""Returns the number of players by Tournament.
Args:
id_tournament: foreing key from table tournament.
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT name, tournamentPlayers FROM tournament WHERE id_tournament = %s;'''
c.execute(sql,(id_tournament,))
numberPlayers = c.fetchone()[1]
DB.close()
return numberPlayers
def numberPlayerInTournament(id_tournament):
"""Returns the number of players already register in a Tournament.
Args:
id_tournament: foreing key from table tournament.
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT count(*) FROM player_tournament WHERE id_tournament = %s;'''
c.execute(sql,(id_tournament,))
numberPlayers = c.fetchone()[0]
DB.close()
return numberPlayers
def registerPlayer(name):
"""Adds a player to the tournament database.
Args:
name: the player's full name (need not be unique).
"""
DB = connect()
c = DB.cursor()
sql = '''INSERT INTO player(name) VALUES (%s);'''
c.execute(sql,(name,))
DB.commit()
DB.close()
def reportPlayersTournaments(id_tournament):
"""Returns the list of players registered per Tournament
Args:
id_tournament: foreing key from table tournament.
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT p.name, p.id_player, pt.id_player_tournament FROM player_tournament pt, player p
WHERE p.id_player = pt.id_player
AND pt.id_tournament = %s;
'''
c.execute(sql,(id_tournament,))
players = []
for row in c.fetchall():
players.append(row)
DB.close()
return players
def assignPlayerTournament(id_tournament, id_player):
"""Assign a player to a one tournament.
Args:
id_tournament: foreing key from table tournament.
id_player: foreing key from table player.
"""
DB = connect()
c = DB.cursor()
sql = '''INSERT INTO player_tournament(id_tournament, id_player) VALUES (%s,%s);'''
c.execute(sql,(id_tournament,id_player,))
DB.commit()
DB.close()
def reportPlayers(id_tournament):
"""Returns the list of players currently registered.
Args:
id_tournament: foreing key from table tournament.
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT * FROM player p WHERE p.id_player NOT IN
(SELECT pt.id_player FROM player_tournament pt where pt.id_tournament = %s);
'''
c.execute(sql,(id_tournament))
players = []
for row in c.fetchall():
players.append(row)
DB.close()
return players
def firstRoundTournament(id_tournament):
"""Returns the first round for each Tournament.
Args:
id_tournament: foreing key from table tournament.
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT id_round FROM round WHERE name = 'Round 0' AND id_tournament = %s;'''
c.execute(sql,(id_tournament,))
id_round = c.fetchone()[0]
DB.close()
return id_round
def initTournament(id_tournament):
"""Initializes the Tournament and records the first scoreboard for the define the first
matches.
Args:
id_tournament: foreing key from table tournament.
"""
DB = connect()
c = DB.cursor()
sql = '''UPDATE tournament SET stateTournament = True WHERE id_tournament = %s;'''
c.execute(sql,(id_tournament,))
DB.commit()
DB.close()
#Returns players already register in a tournament
playersTournaments = reportPlayersTournaments(id_tournament)
#Returns id for that round
id_round = firstRoundTournament(id_tournament)
for player in playersTournaments:
#Save standing for first round
registerStandings(id_round,player[2],0,0,0,0,'Insert')
def reportIdTournament(id_round):
"""Returns the id for each Tournament.
Args:
id_round: foreing key from table round.
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT id_tournament FROM round WHERE id_round = %s;'''
c.execute(sql,(id_round,))
id_round = c.fetchone()[0]
DB.close()
return id_round
def roundStandings(id_round):
"""Returns a list of the players and their win records, sorted by wins
Args:
id_round: foreing key from table round.
Returns:
A list of tuples, each of which contains (id_player, name, matches, wins, lost, tied):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
matches: the number of matches the player has played
wins: the number of matches the player has won
lost: the number of matches the player has lost
tied: the number of matches the player has tied
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT s.id_player, p.name, s.matches, s.win, s.lost, s.tied
FROM scoreboard s, player_tournament pt, player p
WHERE s.id_player = pt.id_player_tournament
AND pt.id_player = p.id_player
AND s.id_round = %s
ORDER BY 4 DESC;
'''
c.execute(sql,(id_round,))
standings = []
for row in c.fetchall():
standings.append(row)
DB.close()
return standings
def reportMatchByRound(id_round):
""" Returns true if there is a match already register for that round
Args:
id_round: foreing key from table round.
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT count(*) FROM match WHERE id_round = %s;'''
c.execute(sql,(id_round,))
numberMatch = c.fetchone()[0]
DB.close()
if numberMatch !=0:
return False
else:
return True
def reportResultByMatch(id_round):
""" Returns true if there is a result already registered for that round
Args:
id_round: foreing key from table round.
"""
print "id_round"
print id_round
DB = connect()
c = DB.cursor()
sql = '''SELECT result from match where id_round = %s;'''
c.execute(sql,(id_round,))
results = []
for row in c.fetchall():
results.append(row)
DB.close()
for result in results:
if result[0]==None:
return True
return False
def roundMatches(id_round):
"""Returns a list of the matches per Round and Tournament.
Args:
id_round: foreing key from table round.
Returns:
A list of tuples, each of which contains (m.id_match, p1.name, p2.name, pt1.id_player_tournament, pt2.id_player_tournament):
m.id_match: the matches' unique id (assigned by the database)
p1.name: the player's 1 full name (as registered)
p2.name: the player's 2 full name (as registered)
pt1.id_player_tournament: the player's 1 unique id (assigned by the database)
pt2.id_player_tournament: the player's 2 unique id (assigned by the database)
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT m.id_match, p1.name, p2.name, pt1.id_player_tournament, pt2.id_player_tournament
FROM match m, player_tournament pt1, player_tournament pt2, player p1, player p2
WHERE m.player_1 = pt1.id_player_tournament
AND m.player_2 = pt2.id_player_tournament
AND pt1.id_player = p1.id_player
AND pt2.id_player = p2.id_player
AND m.id_round = %s;
'''
c.execute(sql,(id_round,))
matches = []
for row in c.fetchall():
matches.append(row)
DB.close()
return matches
def saveMatch(id_round, player_1, player_2):
"""Records the match for a Round of Tournament.
Args:
id_round: the round's unique id (assigned by the database)
player_1: the player's 1 id (as registered)
player_2: the player's 2 id (as registered)
"""
DB = connect()
c = DB.cursor()
sql = '''INSERT INTO match(id_round, player_1, player_2) VALUES (%s,%s,%s);'''
c.execute(sql,(id_round,player_1,player_2,))
DB.commit()
DB.close()
def updateScoreboardBye(id_round, id_player):
"""Update scoreboard of a player which is in Bye state.
Args:
id_round: the round's unique id (assigned by the database)
id_player: the player's 1 id (as registered)
"""
DB = connect()
c = DB.cursor()
#get number of match
sql_num_match = '''SELECT get_num_match(%s,%s);'''
c.execute(sql_num_match,(id_round,id_player))
num_match = c.fetchone()[0]
#get number of match for loser
sql_win_num_match = '''SELECT get_num_win(%s,%s);'''
c.execute(sql_win_num_match,(id_round,id_player))
win_num_match = c.fetchone()[0]
DB.close()
#Save standings
registerStandings(id_round,id_player,num_match + 1,win_num_match + 1,0,0,'Update')
def saveBye(id_round, id_player):
"""Record a player which was selected for a Bye state.
Args:
id_round: the round's unique id (assigned by the database)
id_player: the player's 1 id (as registered)
"""
DB = connect()
c = DB.cursor()
sql = '''INSERT INTO bye(id_round, id_player) VALUES (%s,%s);'''
c.execute(sql,(id_round,id_player,))
DB.commit()
DB.close()
def reportByePlayer(id_round):
"""Returns a player wihch is in Bye state.
Args:
id_round: the round's unique id (assigned by the database)
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT p.name FROM bye b, player_tournament pt, player p
WHERE b.id_player = pt.id_player_tournament
AND pt.id_player = p.id_player
AND b.id_round = %s;
'''
c.execute(sql,(id_round,))
namePlayerBye = c.fetchone()
DB.close()
return namePlayerBye
def reportIdByePlayer(id_round):
"""Returns value if the player is already saved in Bye table.
Args:
id_round: the round's unique id (assigned by the database)
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT pt.id_player_tournament FROM bye b, player_tournament pt, player p
WHERE b.id_player = pt.id_player_tournament
AND pt.id_player = p.id_player
AND b.id_round = %s;
'''
c.execute(sql,(id_round,))
idPlayerBye = c.fetchone()[0]
DB.close()
return idPlayerBye
def reportBye(id_round, id_player):
"""Returns value if the player is already saved in Bye table. Function use to control that a player
must not save two times in a Bye table
Args:
id_round: the round's unique id (assigned by the database)
id_player: the player's 1 id (as registered)
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT count(*) FROM bye WHERE id_round = %s AND id_player = %s;'''
c.execute(sql,(id_round,id_player,))
bye = c.fetchone()[0]
DB.close()
return bye
def returnPairStandings(id_round,standings):
"""Return a even scoreboard whitout player which was saved in Bye Table.
Args:
id_round: the round's unique id (assigned by the database)
standings: scoreboard belonging to a round
"""
for index, standing in enumerate(standings):
bye = reportBye(id_round, standing[0])
if int(bye) == 0:
#Save bye player
saveBye(id_round,standing[0])
standings.pop(index)
break
return standings
def validPairs(first_player, second_player):
"""Return True if the pair for the match is valid. That means that the players never play before
Args:
first_player: the player's 1 id (as registered)
second_player: the player's 2 id (as registered)
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT count(*) as num_match
FROM match m
WHERE (m.player_1 = %s and m.player_2 = %s)
OR (m.player_1 = %s and m.player_2 = %s);
'''
c.execute(sql,(first_player,second_player,second_player,first_player))
count = c.fetchone()[0]
DB.close()
if count == 0:
return True
else:
return False
def makePairs(index_first_player, first_player, possiblePairs):
"""Return a player selected from the different alternatives
Args:
index_first_player: array position for a player_1
first_player: the player's 1 id (as registered)
possiblePairs: array of possible pairs for player 1
"""
for index_second_player, second_player in enumerate(possiblePairs):
if validPairs(first_player[0],second_player[0]):
return index_second_player + (index_first_player + 1), second_player
def swissPairings(id_round):
"""Records a list of pairs of players for the next round of a match.
Args:
id_round: the round's unique id (assigned by the database)
"""
pairs = []
#Return standings for a round
standings = roundStandings(id_round)
#Returns the id of tournament to identify the number of players
id_tournament = reportIdTournament(id_round)
#Returns the number of players by tournament
playersByTournament = numberPlayerByTournament(id_tournament)
if (int(playersByTournament) % 2 != 0):
#returns a standings with out bye player
standings = returnPairStandings(id_round,standings)
while len(standings) > 1:
index_first_player = 0
first_player = standings[0]
# Make pair
index_second_player, second_player = makePairs(index_first_player, first_player, standings[1:])
# Take off the pairs which was proccess
standings.pop(index_second_player)
standings.pop(index_first_player)
pairs.append((first_player[0],second_player[0]))
for pair in pairs:
#Save a new match
saveMatch(id_round,pair[0],pair[1])
def saveResultMatch(id_round, id_match, winner, loser, tied):
"""Returns the number of players already register in a Tournament.
Args:
id_round: the round's unique id (assigned by the database)
id_match: the matches' unique id (assigned by the database)
winner: the winner's id (as registered)
loser: the loser's 1 id (as registered)
tied: condition that explain if the result of the match was tied
"""
if tied == None:
DB = connect()
c = DB.cursor()
sql = '''UPDATE match SET result = %s WHERE id_match = %s;'''
c.execute(sql,(winner,id_match,))
DB.commit()
DB.close()
#Save match for save when there is not a tied
reportMatch(id_round, winner, loser, tied)
else:
DB = connect()
c = DB.cursor()
sql = '''UPDATE match SET result = %s WHERE id_match = %s;'''
c.execute(sql,(tied,id_match,))
DB.commit()
DB.close()
#Save match for save when there is a tied
reportMatch(id_round, winner, loser, tied)
def reportMatch(id_round, winner, loser, tied):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
loser: the id number of the player who lost
"""
DB = connect()
c = DB.cursor()
if tied == None:
#get number of match for winners
sql_win_num_match = '''SELECT get_num_match(%s,%s);'''
c.execute(sql_win_num_match,(id_round,winner))
winner_num_match = c.fetchone()[0]
#get number of match for loser
sql_lost_num_match = '''SELECT get_num_match(%s,%s);'''
c.execute(sql_lost_num_match,(id_round,loser))
loser_num_match = c.fetchone()[0]
#get number of wins for winner
sql_winner_num_win = '''SELECT get_num_win(%s,%s);'''
c.execute(sql_winner_num_win,(id_round,winner))
winner_num_win = c.fetchone()[0]
#get number of lost for winner
sql_winner_num_lost = '''SELECT get_num_lost(%s,%s);'''
c.execute(sql_winner_num_lost,(id_round,winner))
winner_num_lost = c.fetchone()[0]
#get number of wins for loser
sql_loser_num_win = '''SELECT get_num_win(%s,%s);'''
c.execute(sql_loser_num_win,(id_round,loser))
loser_num_win = c.fetchone()[0]
#get number of lost for loser
sql_loser_num_lost = '''SELECT get_num_lost(%s,%s);'''
c.execute(sql_loser_num_lost,(id_round,loser))
loser_num_lost = c.fetchone()[0]
#get number of tied for winner
sql_winner_num_tied = '''SELECT get_num_tied(%s,%s);'''
c.execute(sql_winner_num_tied,(id_round,winner))
winner_num_tied = c.fetchone()[0]
#get number of tied for loser
sql_loser_num_tied = '''SELECT get_num_tied(%s,%s);'''
c.execute(sql_loser_num_tied,(id_round,loser))
loser_num_tied = c.fetchone()[0]
#update winner
registerStandings(id_round,winner,winner_num_match + 1,winner_num_win + 1,winner_num_lost,winner_num_tied,'Update')
#update loser
registerStandings(id_round,loser,loser_num_match + 1,loser_num_win,loser_num_lost + 1,loser_num_tied,'Update')
else:
sql_player_1_num_match = '''SELECT get_num_match(%s,%s);'''
c.execute(sql_player_1_num_match,(id_round,winner))
player_1_num_match = c.fetchone()[0]
#get number of match for loser
sql_player_2_num_match = '''SELECT get_num_match(%s,%s);'''
c.execute(sql_player_2_num_match,(id_round,loser))
player_2_num_match = c.fetchone()[0]
#get number of tied player 1
sql_player_1_num_tied = '''SELECT get_num_tied(%s,%s);'''
c.execute(sql_player_1_num_tied,(id_round,winner))
player_1_num_tied = c.fetchone()[0]
#get number of tied player 2
sql_player_2_num_tied = '''SELECT get_num_tied(%s,%s);'''
c.execute(sql_player_2_num_tied,(id_round,loser))
player_2_num_tied = c.fetchone()[0]
#get number of wins player 1
sql_player_1_num_win = '''SELECT get_num_win(%s,%s);'''
c.execute(sql_player_1_num_win,(id_round,winner))
player_1_num_win = c.fetchone()[0]
#get number of wins player 2
sql_player_2_num_win = '''SELECT get_num_win(%s,%s);'''
c.execute(sql_player_2_num_win,(id_round,loser))
player_2_num_win = c.fetchone()[0]
#get number of lost player 1
sql_player_1_num_lost = '''SELECT get_num_lost(%s,%s);'''
c.execute(sql_player_1_num_lost,(id_round,winner))
player_1_num_lost = c.fetchone()[0]
#get number of lost player 2
sql_player_2_num_lost = '''SELECT get_num_lost(%s,%s);'''
c.execute(sql_player_2_num_lost,(id_round,loser))
player_2_num_lost = c.fetchone()[0]
#update player_1
registerStandings(id_round,winner,player_1_num_match + 1,player_1_num_win,player_1_num_lost,player_1_num_tied + 1,'Update')
#update player_2
registerStandings(id_round,loser,player_2_num_match + 1,player_2_num_win,player_2_num_lost,player_2_num_tied + 1,'Update')
def getNextRound(id_round, id_tournament):
"""Returns the next round for each tournament.
Args:
id_round: the round's unique id (assigned by the database)
id_tournament: the tournament's unique id (assigned by the database)
"""
DB = connect()
c = DB.cursor()
sql = '''SELECT id_round
FROM round
WHERE id_tournament = %s
AND id_round > %s LIMIT 1;'''
c.execute(sql,(id_tournament, id_round,))
next_round = c.fetchone()
DB.close()
return next_round
| [
"aguijarro@vic-data.com"
] | aguijarro@vic-data.com |
ad7f555c1cf2c1b37ed92c22ebdd1a203ddce1bc | 477800f35c1f7eb2134a1219a4b289bc23831398 | /qcache/qframe/pandas_filter.py | dbe68782ad62183cfdea360ada6132d96177ff98 | [
"MIT"
] | permissive | tobgu/qcache | c10686027a4bff07ae07d64731bfcd780b3d43e3 | 331cd23f69a44824f86e0912f796cbc54e03b037 | refs/heads/master | 2023-08-26T10:58:54.569583 | 2019-01-05T22:30:03 | 2019-01-05T22:30:03 | 43,605,709 | 44 | 3 | MIT | 2023-08-14T21:53:01 | 2015-10-03T16:31:22 | Python | UTF-8 | Python | false | false | 4,697 | py | from __future__ import unicode_literals
import operator
import numpy
from qcache.qframe.common import assert_list, raise_malformed, is_quoted, unquote, assert_len
from qcache.qframe.constants import COMPARISON_OPERATORS
from qcache.qframe.context import get_current_qframe
JOINING_OPERATORS = {'&': operator.and_,
'|': operator.or_}
def _leaf_node(df, q):
if isinstance(q, basestring):
if is_quoted(q):
return q[1:-1].encode('utf-8')
try:
return df[q]
except KeyError:
raise_malformed("Unknown column", q)
return q
def _bitwise_filter(df, q):
assert_len(q, 3)
op, column, arg = q
if not isinstance(arg, (int, long)):
raise_malformed('Invalid argument type, must be an integer:'.format(t=type(arg)), q)
try:
series = df[column] & arg
if op == "any_bits":
return series > 0
return series == arg
except TypeError:
raise_malformed("Invalid column type, must be an integer", q)
def _not_filter(df, q):
assert_len(q, 2, "! is a single arity operator, invalid number of arguments")
return ~_do_pandas_filter(df, q[1])
def _isnull_filter(df, q):
assert_len(q, 2, "isnull is a single arity operator, invalid number of arguments")
# Slightly hacky but the only way I've come up with so far.
return df[q[1]] != df[q[1]]
def _comparison_filter(df, q):
assert_len(q, 3)
op, col_name, arg = q
return COMPARISON_OPERATORS[op](df[col_name], _do_pandas_filter(df, arg))
def _join_filter(df, q):
result = None
if len(q) < 2:
raise_malformed("Invalid number of arguments", q)
elif len(q) == 2:
# Conjunctions and disjunctions with only one clause are OK
result = _do_pandas_filter(df, q[1])
else:
result = reduce(lambda l, r: JOINING_OPERATORS[q[0]](l, _do_pandas_filter(df, r)),
q[2:], _do_pandas_filter(df, q[1]))
return result
def prepare_in_clause(q):
"""
The arguments to an in expression may be either a list of values or
a sub query which is then executed to produce a list of values.
"""
assert_len(q, 3)
_, col_name, args = q
if isinstance(args, dict):
# Sub query, circular dependency on query by nature so need to keep the import local
from qcache.qframe import query
current_qframe = get_current_qframe()
sub_df, _ = query(current_qframe.df, args)
try:
args = sub_df[col_name].values
except KeyError:
raise_malformed('Unknown column "{}"'.format(col_name), q)
if not isinstance(args, (list, numpy.ndarray)):
raise_malformed("Second argument must be a list", q)
return col_name, args
def _in_filter(df, q):
col_name, args = prepare_in_clause(q)
return df[col_name].isin(args)
def _like_filter(df, q):
assert_len(q, 3)
op, column, raw_expr = q
if not is_quoted(raw_expr):
raise_malformed("like expects a quoted string as second argument", q)
regexp = unquote(raw_expr)
if not regexp.startswith('%'):
regexp = '^' + regexp
else:
regexp = regexp[1:]
if not regexp.endswith('%'):
regexp += '$'
else:
regexp = regexp[:-1]
# 'like' is case sensitive, 'ilike' is case insensitive
case = op == 'like'
try:
return df[column].str.contains(regexp, case=case, na=False)
except AttributeError:
raise_malformed("Invalid column type for (i)like", q)
def _do_pandas_filter(df, q):
if not isinstance(q, list):
return _leaf_node(df, q)
if not q:
raise_malformed("Empty expression not allowed", q)
result = None
op = q[0]
try:
if op in ('any_bits', 'all_bits'):
result = _bitwise_filter(df, q)
elif op == "!":
result = _not_filter(df, q)
elif op == "isnull":
result = _isnull_filter(df, q)
elif op in COMPARISON_OPERATORS:
result = _comparison_filter(df, q)
elif op in JOINING_OPERATORS:
result = _join_filter(df, q)
elif op == 'in':
result = _in_filter(df, q)
elif op in ('like', 'ilike'):
result = _like_filter(df, q)
else:
raise_malformed("Unknown operator", q)
except KeyError:
raise_malformed("Column is not defined", q)
except TypeError:
raise_malformed("Invalid type in argument", q)
return result
def pandas_filter(df, filter_q):
if filter_q:
assert_list('where', filter_q)
return df[_do_pandas_filter(df, filter_q)]
return df
| [
"tobias.l.gustafsson@gmail.com"
] | tobias.l.gustafsson@gmail.com |
7345f22226c66c1e13ad3c5e6f25e48b5eab1585 | 9f42533560a924b702a951255156eabe14db67bf | /frontend/services/connect_service.py | ce2ce4954eab98e0ac0873e64b923b1d6f5a48f3 | [] | no_license | rJunx/RAID5 | 82c011072c72d50293720ff5669bde9df0721ce2 | 5c20c57d593c941d3409956461331996ec8e637d | refs/heads/master | 2020-05-19T07:50:19.594666 | 2017-06-03T21:44:53 | 2017-06-03T21:44:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,669 | py | #!/usr/bin/python
## @package RAID5.frontend.services.connect_service
## Module that implements the ConnectService class. Service brings a disk
## back online to a volume.
#
import errno
import logging
import os
import socket
import time
import traceback
from block_device.services import update_level_service
from common.services import base_service
from common.utilities import constants
from common.utilities import html_util
from common.utilities import util
from frontend.pollables import bds_client_socket
from frontend.services import display_disks_service
from frontend.utilities import cache
from frontend.utilities import disk_manager
from frontend.utilities import disk_util
from frontend.utilities import service_util
from common.utilities.state_util import state
from common.utilities.state_util import state_machine
## Frontend ConnectService. This service adds the wanted disk back to the disk
## array. Rebuilding of the disk is done after terminate, since it has to be
## done in the background after socket has been closed, as a callable
class ConnectService(base_service.BaseService):
## Constructor for ConnectService
# @param entry (pollable) the entry (probably @ref
# common.pollables.service_socket) using the service
# @param pollables (dict) All the pollables currently in the server
# @param args (dict) Arguments for this service
def __init__(self, entry, pollables, args):
super(ConnectService, self).__init__(
[],
["disk_UUID", "volume_UUID"],
args
)
## Volume we're dealing with
self._volume = None
## Disks we're dealing with
self._disks = None
## Disk UUID of connected disk
self._disk_UUID = None
## Mode of adding a new disk
self._new_disk_mode = False
## Disk already built boolean
self._disk_built = False
## StateMachine object
self._state_machine = None
## Current block num (for rebuilding)
self._current_block_num = ""
## Current data (for rebuilding)
self._current_data = ""
## pollables of the Frontend server
self._pollables = pollables
## Disk Manager that manages all the clients
self._disk_manager = None
## Name of the service
# needed for Frontend purposes, creating clients
# required by common.services.base_service.BaseService
# @returns (str) service name
@staticmethod
def get_name():
return "/connect"
## Checks if and how the disk needs to be rebuilt.
## @param entry (@ref common.pollables.pollable.Pollable) entry we belong
## to
## @returns need_rebuild (bool) if needs to be rebuilt
def initial_setup(self, entry):
self._disk_UUID = self._args["disk_UUID"][0]
self._volume_UUID = self._args["volume_UUID"][0]
# first check validity of volume
if (
self._volume_UUID not in entry.application_context["volumes"].keys(
) or
(
entry.application_context["volumes"][self._volume_UUID][
"volume_state"
] != constants.INITIALIZED
)
):
raise RuntimeError("%s:\t Need to initialize volume" % (
entry,
))
self._volume = entry.application_context["volumes"][self._volume_UUID]
self._disks = self._volume["disks"]
# now check validity of disk_UUID
if self._disk_UUID not in self._disks.keys():
raise RuntimeError("%s:\t Disk not part of volume" % (
entry,
))
# sanity check that this level is no more than all the others:
for disk_UUID, disk in self._disks.items():
if (
disk_UUID != self._disk_UUID and
(
disk["level"] <
self._disks[self._disk_UUID]["level"]
)
):
raise RuntimeError("Error in levels")
self._disks[self._disk_UUID]["state"] = constants.REBUILD
## Before pollable sends response status service function
## @param entry (@ref common.pollables.pollable.Pollable) entry we belong
## to
## @returns finished (bool) returns true if finished
def before_response_status(self, entry):
# initial_setup, also check if we need to add thid disk out of no-where
self.initial_setup(entry)
# Re-send the management part
self._response_content = html_util.create_html_page(
"",
constants.HTML_DISPLAY_HEADER,
0,
display_disks_service.DisplayDisksService.get_name(),
)
self._response_headers = {
"Content-Length": "%s" % len(self._response_content),
}
return True
# REBULD PART, DONE BEFORE TERMINATE (AFTER CLOSE)
## Rebuilding States
(
GET_DATA_STATE,
SET_DATA_STATE,
UPDATE_LEVEL_STATE,
FINAL_STATE
) = range(4)
# STATE FUNCTIONS:
## Before we get the rebulding data
## @param entry (@ref common.pollables.pollable.Pollable) entry we belong
## to
## @returns epsilon_path (bool) if there is no need for input
def before_get_data(self, entry):
self._current_block_num, self._current_data = (
self._disks[self._disk_UUID]["cache"].next_block()
)
if self._current_data is not None:
# got data stored in cache, no need for hard rebuild
# ==> This is an epsilon_path
return True
else:
# need to retreive data from XOR of all the disks besides the current
# in order to rebuild it
request_info = {}
for disk_UUID in self._disks.keys():
if disk_UUID != self._disk_UUID:
request_info[disk_UUID] = {
"block_num" : self._current_block_num,
"password" : self._volume["long_password"]
}
self._disk_manager = disk_manager.DiskManager(
self._disks,
self._pollables,
entry,
service_util.create_get_block_contexts(
self._disks,
request_info
)
)
entry.state = constants.SLEEPING_STATE
return False # need input, not an epsilon path
## After we get the rebulding data
## @param entry (@ref common.pollables.pollable.Pollable) entry we belong
## to
## @returns next_state (int) next state of StateMachine. None if not
## ready to move on to next state.
def after_get_data(self, entry):
# first check if the data has come from the cache
if self._current_data is not None:
return ConnectService.SET_DATA_STATE
# now we know that the data has come from the other disks. check if
# they all finished and their responses
if not self._disk_manager.check_if_finished():
return None
if not self._disk_manager.check_common_status_code("200"):
raise RuntimeError(
"Block Device Server sent a bad status code"
)
# data not saved in cache, need to xor all the blocks
blocks = []
for disk_num, response in self._disk_manager.get_responses().items():
blocks.append(response["content"])
# check if finished scratch mode for cache
if (
(
self._disks[self._disk_UUID]["cache"].mode ==
cache.Cache.SCRATCH_MODE
) and disk_util.all_empty(blocks)
):
# all the blocks we got are empty, change to cache mode
self._disks[self._disk_UUID]["cache"].mode = (
cache.Cache.CACHE_MODE
)
# nothing to set now, we stay in GET_DATA_STATE and start working
# from cache
return ConnectService.GET_DATA_STATE
else:
self._current_data = disk_util.compute_missing_block(
blocks
)
return ConnectService.SET_DATA_STATE
## Before we set the rebulding data
## @param entry (@ref common.pollables.pollable.Pollable) entry we belong
## to
## @returns epsilon_path (bool) if there is no need for input
def before_set_data(self, entry):
self._disk_manager = disk_manager.DiskManager(
self._disks,
self._pollables,
entry,
service_util.create_set_block_contexts(
self._disks,
{
self._disk_UUID: {
"block_num": self._current_block_num,
"content": self._current_data,
"password" : self._volume["long_password"]
}
}
)
)
entry.state = constants.SLEEPING_STATE
return False # need input, not an epsilon path
## After we set the rebulding data
## @param entry (@ref common.pollables.pollable.Pollable) entry we belong
## to
## @returns next_state (int) next state of StateMachine. None if not
## ready to move on to next state.
def after_set_data(self, entry):
if not self._disk_manager.check_if_finished():
return None
if not self._disk_manager.check_common_status_code("200"):
raise RuntimeError(
"Block Device Server sent a bad status code"
)
if self.check_if_built():
return ConnectService.UPDATE_LEVEL_STATE
return ConnectService.GET_DATA_STATE
## Before we update the level of the updated disk
## @param entry (@ref common.pollables.pollable.Pollable) entry we belong
## to
## @returns epsilon_path (bool) if there is no need for input
def before_update_level(self, entry):
self._disk_manager = disk_manager.DiskManager(
self._disks,
self._pollables,
entry,
service_util.create_update_level_contexts(
self._disks,
{
self._disk_UUID: {
"addition" : "1",
"password" : self._volume["long_password"]
}
}
)
)
entry.state = constants.SLEEPING_STATE
return False # need input, not an epsilon path
## Before we have updated the level of the updated disk
## @param entry (@ref common.pollables.pollable.Pollable) entry we belong
## to
## @returns next_state (int) next state of StateMachine. None if not
## ready to move on to next state.
def after_update_level(self, entry):
if not self._disk_manager.check_if_finished():
return None
if not self._disk_manager.check_common_status_code("200"):
raise RuntimeError(
"Block Device Server sent a bad status code"
)
self._disks[self._disk_UUID]["level"] += 1
self._disks[self._disk_UUID]["state"] = constants.ONLINE
entry.state = constants.CLOSING_STATE
return ConnectService.FINAL_STATE
## Rebuilding states for StateMachine
STATES = [
state.State(
GET_DATA_STATE,
[SET_DATA_STATE],
before_get_data,
after_get_data,
),
state.State(
SET_DATA_STATE,
[GET_DATA_STATE, UPDATE_LEVEL_STATE],
before_set_data,
after_set_data,
),
state.State(
UPDATE_LEVEL_STATE,
[FINAL_STATE],
before_update_level,
after_update_level,
),
state.State(
FINAL_STATE,
[FINAL_STATE],
),
]
## Before pollable terminates service function
## @param entry (@ref common.pollables.pollable.Pollable) entry we belong
## to
## @returns finished (bool) returns true if finished
def before_terminate(self, entry):
# create the state machine for rebuilding disk
first_state_index = ConnectService.GET_DATA_STATE
if self._new_disk_mode:
first_state_index = ConnectService.NEW_DISK_SETUP_STATE
elif self.check_if_built():
first_state_index = ConnectService.UPDATE_LEVEL_STATE
# create rebuild state machine
self._state_machine = state_machine.StateMachine(
ConnectService.STATES,
ConnectService.STATES[first_state_index],
ConnectService.STATES[ConnectService.FINAL_STATE]
)
# pass args to the machine, will use *args to pass them on
self._state_machine.run_machine((self, entry))
## Called when BDSClientSocket invoke the on_finsh method to wake up
## the ServiceSocket. Let StateMachine handle the wake up call.
## @param entry (@ref common.pollables.pollable.Pollable) entry we belong
## to
def on_finish(self, entry):
# pass args to the machine, will use *args to pass them on
self._state_machine.run_machine((self, entry))
## Checks if self._disk_UUID is built
## @returns built (bool) if disk needs to be rebuilt
def check_if_built(self):
# check if already connected, no need to rebuild, or cache is empty
if (
self._disks[self._disk_UUID]["state"] == constants.REBUILD and
not self._disks[self._disk_UUID]["cache"].is_empty()
):
return False
return True
| [
"royzohar25@gmail.com"
] | royzohar25@gmail.com |
ead62ea3fed82a2e6b2323190722d00bd0d01a78 | d8438d98a1103b4f7fc8fa84c0ee5521fef8a5d1 | /jingdong/pipelines.py | a434947aca050d4eacdd7b596c52b38bca8667c1 | [] | no_license | dgh0707/items | 6c171c2bf1485c27ea6b6c14178a255c6fcfd026 | 9b109bc4197fb1518110d8a5fb4d839615b35171 | refs/heads/master | 2020-04-30T06:02:20.243072 | 2019-06-18T04:26:12 | 2019-06-18T04:26:12 | 176,640,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,857 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
class JingdongPipeline(object):
def __init__(self):
# self.conn = pymysql.connect(user='root', passwd='123456', db='taobao',
# host='localhost', charset='utf8')
# self.cursor = self.conn.cursor()
# self.cursor.execute('truncate qw_goods;')
# self.conn.commit()
self.conn = pymysql.connect(user='root', passwd='2018@Amber123', db='apst_share',
host='47.93.244.121', charset='utf8')
self.cursor = self.conn.cursor()
# self.cursor.execute('truncate qw_goods;')
# self.conn.commit()
def process_item(self, item, spider):
# tit = self.cursor.execute('select id from jd where cid=%s', (item['cid']))
# if tit:
# tit = self.cursor.fetchone()
# else:
# self.cursor.execute(
# """INSERT INTO jd (cid,goods_number,title,auctionUrl,price,brand,comm_attr,oss_imgurl,oss_imageurl)VALUES ("%s","%s","%s","%s","%s","%s","%s","%s","%s")""" %
# (
# item['cid'],
# item['comm_id'],
# item['title'],
# item['auctionUrl'],
# pymysql.escape_string(item['price']),
# item['brand'],
# pymysql.escape_string(item['comm_attr']),
# pymysql.escape_string(item['oss_imgurl']),
# pymysql.escape_string(item['oss_imageurl']),
#
# )
# )
# tit = self.cursor.lastrowid
# self.conn.commit()
tit = self.cursor.execute('select id from qw_goods where goods_number=%s', (item['comm_id']))
if tit:
tit = self.cursor.fetchone()
else:
self.cursor.execute(
"""INSERT INTO qw_goods (cid,goods_number,brand,goodsname,auctionurl,goodsprice,goods_attribute,goodsthumb,details,discount_price)VALUES ("%s","%s","%s","%s","%s","%s","%s","%s","%s","%s")""" %
(
item['cid'],
item['comm_id'],
pymysql.escape_string(item['brand']),
item['title'],
item['auctionUrl'],
item['price'],
pymysql.escape_string(item['comm_attr']),
pymysql.escape_string(item['oss_imgurl']),
pymysql.escape_string(item['oss_imageurl']),
item['discount_price'],
)
)
tit = self.cursor.lastrowid
self.conn.commit()
return item
| [
"815060473@qq.com"
] | 815060473@qq.com |
035067ece4ae3373879cbb19730d1349f123b2b5 | c61616a2d328cbe8f1980160d6769d7b13059de4 | /Python/analisador.py | ac85733a3a1d23748a2466a7bd426cb48df8bd07 | [] | no_license | jms05/BioLab | 01ef2b2f6983752525dacf37595ef2152848ed35 | eb05e56ec301b28f09df72b79964b58a6de99f84 | refs/heads/master | 2021-01-12T09:29:50.940398 | 2017-01-20T11:43:33 | 2017-01-20T11:43:33 | 76,168,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,341 | py | from Bio import SeqIO
import requests
from io import StringIO
import re
from Bio.Blast import NCBIWWW
from Bio.Blast import NCBIXML
from Bio import ExPASy
import urllib
from Bio import SwissProt
from Bio.SwissProt import KeyWList
from Bio import ExPASy
import os
import random
from urllib.request import urlopen
DNA_BASES = ["A","T","C","G"]
PROTAIN_BASES= ['I', 'M', 'T', 'N', 'K', 'S', 'R',
'P', 'H', 'Q', 'V', 'A', 'D', 'E',
'G', 'F', 'L', 'Y', 'C', '_', 'W',]
START_BASE = 'M'
END_BASE = '_'
DNA_COMPLEMENT = {"A":"T","T":"A","C":"G","G":"C"}
CODONS = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',
'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W',
}
def parseFile(filename):
seq_record = SeqIO.read(filename, "genbank")
return seq_record
def reverseComplement(dnaSeq):
comp=""
for base in dnaSeq:
comp+=DNA_COMPLEMENT[base]
return comp[::-1]
def parseXML(textxml):
starf= False
satrtG = False
startFeat = False
functions = []
function = ""
name =""
domains = []
for line in textxml.splitlines():
if "<comment type=\"function\">" in line:
starf =True
function=""
if(("</comment>" in line) and starf):
starf = False
functions.append(function)
if(starf):
function+=(line.strip())
if "<feature" in line:
startFeat=True
if "</feature>" in line:
startFeat=False
if(startFeat):
if "type=\"domain\"" in line:
campos = line.split()
for campo in campos:
if "description" in campo:
interesse = campo.split("=")[1].replace("\"","")
domains.append(interesse)
if "<gene>" in line:
satrtG=True
if "</gene>" in line:
satrtG=False
if(satrtG):
if "primary" in line:
name = line.split(">")[1].split("<")[0]
ret = []
for fun in functions:
ret.append(fun.split("<")[-2].split(">")[1])
if(name ==""):
name = "-"
domianTex = ""
for domain in domains:
domianTex+=(domain+"|")
if domianTex=="":
domianTex="-"
return(ret,name,domianTex)
def parseTXT(record):
try:
status = str(record.data_class)
except Exception:
status= "-"
local = "-"
name = "-"
funcMolec = []
bioPro = []
for cr in record.cross_references:
if(cr[0]== "GO"):
(tipo,ids,cool,pis) =cr
if(tipo=="GO"):
cools = str(cool).split(":")
if(cools[0]=='F'):
funcMolec.append(cools[1])
if (cools[0]=='P'):
bioPro.append(cools[1])
if (cools[0]=='C'):
local=cools[1]
return (status,local,funcMolec,bioPro)
def downloadSwiss(idfasta,ext):
target_url = "http://www.uniprot.org/uniprot/" + idfasta+ "."+ext
ret = urlopen(target_url).read()
return ret
def parseSwissProt(idswiss):
txt =downloadSwiss(idswiss,"txt")
xml = downloadSwiss(idswiss,"xml")
#ficheirs ja aqui
f =open("tmp.dat","w")
f.write(txt.decode('utf-8'))
f.close()
handle = open("tmp.dat")
(status,local,funcMol,bioPro) = parseTXT(SwissProt.read(handle))
#parse ao txt feito
(functions,name,domianTex) = parseXML(xml.decode('utf-8'))
return(status,local,funcMol,bioPro,functions,name,domianTex)
#print(str(record))
def getinfosfromgem(genbank):
ACC = "NC_002942"
ret =[]
i=0
dna = genbank.seq
for feat in rec.features:
#pri(str(feat))
if feat.type == 'CDS':
strand = str(feat.location).split("]")[1]
inter= str(feat.location).split("]")[0].replace("[","")
start = int(inter.split(":")[0])
end = int(inter.split(":")[1])
if(strand=="(-)"):
strand="(-)"
seqdnaprot= reverseComplement(dna[start:end])
else:
strand="(+)"
seqdnaprot= str((dna[start:end]))
geneID = feat.qualifiers["db_xref"][0]
ID_prot = feat.qualifiers["protein_id"][0]
try:
function= feat.qualifiers["function"][0]
except Exception:
function= "Unknown"
try:
genName= feat.qualifiers["gene"][0]
except Exception:
genName= "-"
tradu = feat.qualifiers["translation"][0]
locus = feat.qualifiers["locus_tag"][0]
prorainNAme = feat.qualifiers["product"][0]
try:
ecNumber = feat.qualifiers["EC_number"][0]
except Exception:
ecNumber= "-"
geninfo = (geneID,ACC,locus,genName,strand,seqdnaprot)
protinfo = ("uniorotID_rev",ID_prot,prorainNAme,len(tradu),"local",function,tradu)
ret.append((geninfo,protinfo,ecNumber))
return ret
def getInfouniprot(protainID):
params = {"query": protainID, "format": "fasta"}
r = requests.get("http://www.uniprot.org/uniprot/", params)
i=0
for record in SeqIO.parse(StringIO(r.text), "fasta"):
idfasta = record.id
idfasta = str(idfasta).split("|")[1]
#break
parst = parseSwissProt(idfasta)
return(idfasta,parst)
def shortSeq(sequence,tam):
return sequence # tirar isto para fazer short
start = sequence[:tam]
end = sequence[(-1*tam):]
return(start+"..."+end)
def juntaFuncoes(listaF,fungb,sep):
ret = fungb
for f in listaF:
ret += (sep+f)
return ret
def juntaLista(lista,sep):
ret = ""
for elem in lista:
ret += elem+sep
return ret[:-1]
def createCVSRecord(gbData,swissData,sep):
grauRev = "---"
(genInfo,protInfo,EC) = gbData
(geneID,NCIgual,locusTag,geneName,strand,dnaSeq) =genInfo
(lixo,assNCBI,protName,protLen,lixo2,protFungb,protSeq) = protInfo
(idSwiss,parse) = swissData
(protStatus,protLocal,funcaoMolec,processBiol,funcoes,geneNameSwiss,domianTex) = parse
funcaoMolec = juntaLista (funcaoMolec, "_")
processBiol = juntaLista( processBiol,"_")
if(funcaoMolec==""):
funcaoMolec="-"
if(processBiol==""):
processBiol="-"
funcoes = juntaFuncoes(funcoes,protFungb,"_")
geneID=geneID.split(":")[1]
##feito na
if(geneName=="-"):
geneName = geneNameSwiss
data = geneID+sep+geneName+sep+NCIgual+sep+locusTag+sep+strand+sep+shortSeq(dnaSeq,7)
data = data + sep + assNCBI+ sep+idSwiss+sep+protName+sep+shortSeq(protSeq,7)
data = data+ sep+ str(protLen)+ sep+ protStatus+ sep + grauRev + sep + protLocal
data = data +sep +EC+ sep + funcaoMolec+ sep+ processBiol+ sep + funcoes+ sep + domianTex
return data
sep =";"
rec = parseFile("grupo6.txt")
datas = getinfosfromgem(rec)
filecsv = open("tabela_com_seq_Completa.csv","w")
filecsv.write("sep="+sep+"\n")
cabeca="geneID"+sep+"GeneName"+sep+"GeneAccessNumber"+sep+"locusTag"+sep+"strand"+sep+"DNA_SEQ"
cabeca+=sep+"AccessNumberNCBI"+sep+"idSwiss"+sep+"protName"+sep+"PROT_SEQ"
cabeca+=sep+"PROT_Tamanho"+sep+"protStatus"+sep+"grauRev"+sep+"protLocal"
cabeca+=sep+"EC"+sep+"(GO)funcaoMolec"+sep+"(GO)processBiol"+sep+"funcoes"+sep+"Domain"
filecsv.write(cabeca+"\n")
i=1
for data in datas:
(gene,prot,ec)=data
(rev,ID_prot,prorainNAme,tam,local,function,tradu)=prot
swissinfo = getInfouniprot(ID_prot)
dataCS = createCVSRecord(data,swissinfo,sep)
filecsv.write(dataCS+"\n");
print("mais UMA: " + str(i))
i=i+1
filecsv.close() | [
"joaomsilva@outlook.com"
] | joaomsilva@outlook.com |
c1d6c1b5976bb6b865327038010437c766ab107a | 0ac0387f701e10a3d5d1fd42287ae8ab4b76be11 | /MAN_CNS/CNS_Dumy.py | 85836a3282ecf9d7837abaa5496211a87d6505d3 | [
"Apache-2.0"
] | permissive | LeeDaeil/CNS_Autonomous | 676e6f091c4e25d4f9b52683d119bae1ea4289a5 | 2ae3688cfd654b9669893e3cdf4cdf1ac0748b9f | refs/heads/master | 2021-06-19T11:09:38.550032 | 2021-01-06T07:45:29 | 2021-01-06T07:45:29 | 144,431,774 | 2 | 0 | null | 2018-11-10T15:38:05 | 2018-08-12T02:24:15 | Python | UTF-8 | Python | false | false | 7,812 | py | from CNS_UDP_FAST import CNS
import numpy as np
import time
import random
class ENVCNS(CNS):
def __init__(self, Name, IP, PORT, Monitoring_ENV=None):
super(ENVCNS, self).__init__(threrad_name=Name,
CNS_IP=IP, CNS_Port=PORT,
Remote_IP='192.168.0.29', Remote_Port=PORT, Max_len=10)
self.Monitoring_ENV = Monitoring_ENV
self.Name = Name # = id
self.AcumulatedReward = 0
self.ENVStep = 0
self.LoggerPath = 'DB'
self.want_tick = 5 # 1sec
self.Loger_txt = ''
self.input_info = [
# (para, x_round, x_min, x_max), (x_min=0, x_max=0 is not normalized.)
('BHV142', 1, 0, 0), # Letdown(HV142)
]
self.action_space = 1
self.observation_space = len(self.input_info)
# ENV Logger
def ENVlogging(self, s):
cr_time = time.strftime('%c', time.localtime(time.time()))
if self.ENVStep == 0:
with open(f'{self.Name}.txt', 'a') as f:
f.write('==' * 20 + '\n')
f.write(f'[{cr_time}]\n')
f.write('==' * 20 + '\n')
else:
with open(f'{self.Name}.txt', 'a') as f:
f.write(f'[{cr_time}] {self.Loger_txt}\n')
def normalize(self, x, x_round, x_min, x_max):
if x_max == 0 and x_min == 0:
# It means X value is not normalized.
x = x / x_round
else:
x = x_max if x >= x_max else x
x = x_min if x <= x_min else x
x = (x - x_min) / (x_max - x_min)
return x
def get_state(self):
state = []
for para, x_round, x_min, x_max in self.input_info:
if para in self.mem.keys():
state.append(self.normalize(self.mem[para]['Val'], x_round, x_min, x_max))
else:
# ADD logic ----- 계산된 값을 사용하고 싶을 때
pass
# state = [self.mem[para]['Val'] / Round_val for para, Round_val in self.input_info]
self.Loger_txt += f'{state}\t'
return np.array(state)
def get_reward(self):
"""
R => _
:return:
"""
r = 0
self.Loger_txt += f'R:{r}\t'
return r
def get_done(self, r):
V = {
'Dumy': 0
}
r = self.normalize(r, 1, 0, 2)
d = False
self.Loger_txt += f'{d}\t'
return d, self.normalize(r, 1, 0, 2)
def _send_control_save(self, zipParaVal):
super(ENVCNS, self)._send_control_save(para=zipParaVal[0], val=zipParaVal[1])
def send_act(self, A):
"""
A 에 해당하는 액션을 보내고 나머지는 자동
E.x)
self._send_control_save(['KSWO115'], [0])
...
self._send_control_to_cns()
:param A: A 액션 [0, 0, 0] <- act space에 따라서
:return: AMod: 수정된 액션
"""
AMod = A
V = {
'CNSTime': self.mem['KCNTOMS']['Val'],
'Delta_T': self.mem['TDELTA']['Val'],
'ChargingVV': self.mem['BFV122']['Val'],
'ChargingVM': self.mem['KLAMPO95']['Val'], # 1 m 0 a
'LetDownSet': self.mem['ZINST36']['Val'],
}
ActOrderBook = {
'ChargingValveOpen': (['KSWO101', 'KSWO102'], [0, 1]),
'ChargingValveStay': (['KSWO101', 'KSWO102'], [0, 0]),
'ChargingValveClase': (['KSWO101', 'KSWO102'], [1, 0]),
'ChargingEdit': (['BFV122'], [0.12]),
'LetdownValveOpen': (['KSWO231', 'KSWO232'], [0, 1]),
'LetdownValveStay': (['KSWO231', 'KSWO232'], [0, 0]),
'LetdownValveClose': (['KSWO231', 'KSWO232'], [1, 0]),
'PZRBackHeaterOff': (['KSWO125'], [0]), 'PZRBackHeaterOn': (['KSWO125'], [1]),
'PZRProHeaterMan': (['KSWO120'], [1]), 'PZRProHeaterAuto': (['KSWO120'], [0]),
'PZRProHeaterDown': (['KSWO121', 'KSWO122'], [1, 0]),
'PZRProHeaterStay': (['KSWO121', 'KSWO122'], [0, 0]),
'PZRProHeaterUp': (['KSWO121', 'KSWO122'], [0, 1]),
'LetDownSetDown': (['KSWO90', 'KSWO91'], [1, 0]),
'LetDownSetStay': (['KSWO90', 'KSWO91'], [0, 0]),
'LetDownSetUP': (['KSWO90', 'KSWO91'], [0, 1]),
'ChangeDelta': (['TDELTA'], [1.0]),
'ChargingAuto': (['KSWO100'], [0])
}
# Delta
# if V['Delta_T'] != 1: self._send_control_save(ActOrderBook['ChangeDelta'])
# Done Act
self._send_control_to_cns()
return AMod
def SkipAct(self):
ActOrderBook = {
'Dumy': (['Dumy_para'], [0]),
}
# Skip or Reset Act
# self._send_control_save(ActOrderBook['LetdownValveStay'])
# Done Act
self._send_control_to_cns()
return 0
def step(self, A, mean_, std_):
"""
A를 받고 1 step 전진
:param A: [Act], numpy.ndarry, Act는 numpy.float32
:return: 최신 state와 reward done 반환
"""
# Old Data (time t) ---------------------------------------
AMod = self.send_act(A)
self.want_tick = int(10)
if self.Monitoring_ENV is not None:
self.Monitoring_ENV.push_ENV_val(i=self.Name,
Dict_val={f'{Para}': self.mem[f'{Para}']['Val'] for Para in
['BHV142', 'BFV122', 'ZINST65', 'ZINST63']}
)
self.Monitoring_ENV.push_ENV_ActDis(i=self.Name,
Dict_val={'Mean': mean_, 'Std': std_}
)
# New Data (time t+1) -------------------------------------
super(ENVCNS, self).step()
self._append_val_to_list()
self.ENVStep += 1
reward = self.get_reward()
done, reward = self.get_done(reward)
if self.Monitoring_ENV is not None:
self.Monitoring_ENV.push_ENV_reward(i=self.Name,
Dict_val={'R': reward, 'AcuR': self.AcumulatedReward, 'Done': done})
next_state = self.get_state()
# ----------------------------------------------------------
self.ENVlogging(s=self.Loger_txt)
# self.Loger_txt = f'{next_state}\t'
self.Loger_txt = ''
return next_state, reward, done, AMod
def reset(self, file_name):
# 1] CNS 상태 초기화 및 초기화된 정보 메모리에 업데이트
super(ENVCNS, self).reset(initial_nub=1, mal=True, mal_case=35, mal_opt=1, mal_time=450, file_name=file_name)
# 2] 업데이트된 'Val'를 'List'에 추가 및 ENVLogging 초기화
self._append_val_to_list()
self.ENVlogging('')
# 3] 'Val'을 상태로 제작후 반환
state = self.get_state()
# 4] 보상 누적치 및 ENVStep 초기화
self.AcumulatedReward = 0
self.ENVStep = 0
if self.Monitoring_ENV is not None: self.Monitoring_ENV.init_ENV_val(self.Name)
# 5] FIX RADVAL
self.FixedRad = random.randint(0, 20) * 5
self.FixedTime = 0
self.FixedTemp = 0
return state
if __name__ == '__main__':
# ENVCNS TEST
env = ENVCNS(Name='Env1', IP='192.168.0.101', PORT=int(f'7101'))
# Run
for _ in range(1, 2):
env.reset(file_name=f'Ep{_}')
start = time.time()
for _ in range(0, 300):
A = 0
next_state, reward, done, AMod = env.step(A, std_=1, mean_=0)
if done:
print(f'END--{start}->{time.time()} [{time.time()-start}]')
break | [
"dleodfl1004@naver.com"
] | dleodfl1004@naver.com |
8976b9d0a19b96ac46f5c6b8646da091f81333ad | 4ddcec92fd322d2ecd89aaf323a59de5b628dc7d | /main.py | ceed23be1d12732750ed64d4d668d38383d8bacf | [] | no_license | ldev/ldevirc | cbf63a16df7f8417fa4a32597ef10153ef1195b5 | 776ab1732027c98be3c668b07e37b20bbfb20517 | refs/heads/master | 2016-09-11T02:09:02.500477 | 2014-02-19T09:12:09 | 2014-02-19T09:12:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,502 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This is a python bot written by Jonas Lindstad (LDEV)
# You are free to use this code, which is licensed under the "WTFPL license".
#
# ---------------------------------------------------------------------
#
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# Version 2, December 2004
#
# Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
#
# Everyone is permitted to copy and distribute verbatim or modified
# copies of this license document, and changing it is allowed as long
# as the name is changed.
#
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
#
# 0. You just DO WHAT THE FUCK YOU WANT TO.
#
# ---------------------------------------------------------------------
#
#
# TODO:
# * http://stackoverflow.com/questions/930700/python-parsing-irc-messages
# * Få nick-greia inn i en while-løkke, som sikrer at den får et unikt brukernavn
# * Append users to auto-op file from IRC channel
# * Skille ut config i egen fil
#
# MODULES
#
import socket
import sys
from random import choice
import time
import datetime
import logging
import logging.handlers # why?
from subprocess import Popen, PIPE # For calling traceroute
import signal # for ctrl-c catching
import sys # for ctrl-c catching
from platform import platform # Finne linuxversjon etc.
from os import path # finding the current script path
# import urllib2 # URL grabbing
import simplejson as json # Config parsing
import re # URL grabbing
from urllib import request # URL grabbing
#
# VARIABLES
#
client_name = 'ldevirc'
version = '0.1a'
working_dir = path.dirname(path.realpath(__file__)) # no trailing slash
# load configuration file
with open('%s/config.json' % working_dir) as data_file:
config = json.load(data_file)
#
# LOGGING
# Good article about logging: http://victorlin.me/posts/2012/08/26/good-logging-practice-in-python
# logger.<level> = debug, info, warn, error
#
logger_file = '%s/logs/%s_%s' % (working_dir, config['network'].lower(), config['channel'].lower())
logger = logging.getLogger(version)
logger_level = logging.getLevelName(config['logging']['level'])
logger.setLevel(logger_level)
handler = logging.handlers.RotatingFileHandler(logger_file, 'a', maxBytes=config['logging']['max_file_size'], backupCount=config['logging']['max_number_of_files'], encoding=config['server_encoding'])
handler.setLevel(logger_level)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') # left "%(name)" out on purpose
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('%s started' % version)
#
# FUNCTIONS
#
def find_user(str):
return str.split('!')[0][1:]
def traceroute(dest, user):
dest = dest.replace('|', '').replace(';', '').replace('&', '') # somewhat securing against malicious input..
irc_cmd('PRIVMSG %s :Performing trace towards %s\r\n' % (user, dest))
trace_log_text = "Performing traceroute towards %s (requested by %s)" % (dest, user)
logger.info(trace_log_text)
p = Popen(['tracepath', dest], stdout=PIPE)
while True:
line = p.stdout.readline() # return bytes
if not line:
break
line = line.decode("utf-8")
irc.send(('PRIVMSG %s :%s\r\n' % (user, line)).encode()) # to remove b'' from line
# send string to IRC server
def irc_cmd(text):
irc.send(("%s\r\n" % text).encode(config['server_encoding']))
def timestamp():
return datetime.datetime.now().isoformat().split('.')[0]
# handle ctrl+c gracefully
def signal_handler(signal, frame):
print ('\nctrl+c detected. Disconnecting from %s' % config['server'])
logger.warn(text.strip())
irc_cmd("QUIT :%s" % config['quit_message'])
time.sleep(1)
sys.exit(0)
def grab_title(url):
resource = request.urlopen(url)
source = resource.read().decode(resource.headers.get_content_charset())
match = re.findall(r'<title>(.*?)</title>', source, re.S)
if match:
title = match[0].replace('\n', '').strip()
logger.info('titlegrabber: Grabbed title "%s"' % title)
irc_cmd('PRIVMSG %s :(%s)' % (config['channel'], title))
return title
else:
logger.info('titlegrabber: No match for <title></title>')
#
# PERFORM IRC CONNECTION
#
signal.signal(signal.SIGINT, signal_handler)
print ('\r\npress ctrl+c to close the connection to the server')
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logger.info("ldevirc connecting to %s (%s)" % (config['network'], config['server']))
irc.connect((config['server'], config['port']))
irc_cmd('USER %s Testing 0 * :...' % (config['botnick']))
time.sleep(1) # Prevent Excess flood at a couple of networks (EFnet for instance)
irc_cmd('NICK %s' % config['botnick'])
time.sleep(1) # Prevent Excess flood at a couple of networks (EFnet for instance)
#
# TODO: Replace with while-loop of some sort
#
text = irc.recv(4096).decode(config['server_encoding'])
if text.find('ERR_ALREADYREGISTRED') != -1:
logger.info('Nick %s taken. Attempting to use %s-' % (config['botnick'], config['botnick']))
irc_cmd('NICK %s-' % config['botnick'])
else:
logger.info('Registered nick %s' % config['botnick'])
# prevents loop if the server closed the connection
if text.find('ERROR') !=-1:
# print('[%s] ERROR: %s' % timestamp(), text)
log.error('%s' % text)
sys.exit(1)
# join channel
irc_cmd("JOIN %s" % config['channel'])
logger.info("Joined channel %s" % config['channel'])
#
# MAIN LOOP - PROCESSING
#
while 1:
# receive data from IRC server
text = irc.recv(4096).decode(config['server_encoding'])
if len(text.strip()) == 0:
logger.error('No data from IRC server - Either IRC server disconnected client or server crashed')
sys.exit(1)
# print text to terminal
for line in text.split('\n'):
if len(line.strip()) > 0:
if text.find(config['botnick']) != -1:
print('[%s] %s' % (timestamp(), line.strip()))
logger.info(line.strip())
else:
logger.debug(line.strip())
#
# BOT CONTROL
#
if text.find('PING') != -1:
logger.debug('PONG %s' % (text.split()[1]))
irc_cmd(('PONG %s' % (text.split()[1])))
# Join on kick
if text.find('KICK %s %s' % (config['channel'], config['botnick'])):
if text.split(' ')[1] == 'KICK' and text.split(' ')[2] == config['channel'] and text.split(' ')[3] == config['botnick']:
logger.warn(text)
time.sleep(1)
irc_cmd("JOIN %s" % config['channel'])
time.sleep(0.5)
irc_cmd('PRIVMSG %s :I\'m back, bitches!' % config['channel'])
#
# CTCP
# http://www.irchelp.org/irchelp/rfc/ctcpspec.html
# http://www.kvirc.net/doc/doc_ctcp_handling.html
# Skipped CTCP commands: SOURCE, USERINFO, ERRMSG
#
# FINGER - Returns the user's full name, and idle time
if text.find('PRIVMSG %s :\u0001FINGER\u0001' % (config['botnick'])) != -1:
logger.debug('NOTICE %s :\u0001FINGER %s by ldev.no\u0001' % (find_user(text), config['botnick']))
irc_cmd('NOTICE %s :\u0001FINGER %s by ldev.no\u0001' % (find_user(text), config['botnick']))
# VERSION - The version and type of the client
if text.find('PRIVMSG %s :\u0001VERSION\u0001' % (config['botnick'])) != -1:
# logger.debug('NOTICE %s :\u0001VERSION %s:%s\u0001' % (find_user(text), version, platform()))
# irc_cmd('NOTICE %s :\u0001VERSION %s:%s\u0001' % (find_user(text), version, platform()))
logger.info('NOTICE %s :\u0001VERSION %s:%s:%s\u0001' % (find_user(text), client_name, version, platform()))
irc_cmd('NOTICE %s :\u0001VERSION %s:%s:%s\u0001' % (find_user(text), client_name, version, platform()))
# CLIENTINFO - Dynamic master index of what a client knows
if text.find('PRIVMSG %s :\u0001CLIENTINFO\u0001' % (config['botnick'])) != -1:
logger.debug('NOTICE %s :\u0001VERSION %s under development by Jonas Lindstad\u0001' % version)
irc_cmd('NOTICE %s :\u0001VERSION %s under development by Jonas Lindstad\u0001' % version)
# PING - Used to measure the delay of the IRC network between clients.
if text.find('PRIVMSG %s :\u0001PING\u0001' % (config['botnick'])) != -1:
logger.debug('NOTICE %s :\u0001PING %s\u0001' % (text.split()[2]))
irc_cmd('NOTICE %s :\u0001PING %s\u0001' % (text.split()[2]))
# TIME - Gets the local date and time from other clients.
if text.find('PRIVMSG %s :\u0001TIME\u0001' % (config['botnick'])) != -1:
logger.debug('NOTICE %s :\u0001TIME %s\u0001' % timestamp())
irc_cmd('NOTICE %s :\u0001TIME %s\u0001' % timestamp())
#
# CAP - capabilities
# http://www.leeh.co.uk/draft-mitchell-irc-capabilities-02.html
#
#
# All the extra shit
#
# help
if text.find('!help') !=-1 and text.find(config['botnick']) == -1:
logger.info('PRIVMSG %s :This is %s. Commands: !help, !bully <user>, !trace <host>' % (config['channel'], version))
irc_cmd('PRIVMSG %s :This is %s. Commands: !help, !bully <user>, !trace <host>' % (config['channel'], version))
# bully
if text.find(':!bully') != -1:
with open('%s/bully.txt' % working_dir) as f:
lines = f.read().splitlines()
to = text.split(':!bully')[1].strip()
phrase = choice(lines)
logger.info('PRIVMSG %s :%s %s' % (config['channel'], to, phrase))
irc_cmd('PRIVMSG %s :%s %s' % (config['channel'], to, phrase))
# gay
if text.find('gay') !=-1 and text.find(config['botnick']) == -1:
logger.info('PRIVMSG %s :%s er gay!' % (config['channel'], find_user(text)))
irc_cmd('PRIVMSG %s :%s er gay!' % (config['channel'], find_user(text)))
# auto OP
if text.find("JOIN :%s" % config['channel']) !=-1 and text.find(config['botnick']) == -1:
auto_op_file = '%s/auto-op/%s_%s' % (working_dir, config['network'].lower(), config['channel'].lower())
logger.debug('Checking auto-op file "%s"' % auto_op_file)
if path.isfile(auto_op_file):
with open(auto_op_file) as f:
operators = f.read().splitlines()
if any(ext in text for ext in operators):
to = text.split('!')[0][1:]
irc_cmd(('PRIVMSG %s :Hei, deg kjenner jeg!' % config['channel']))
logger.info(('MODE %s +o %s' % (config['channel'], to)))
irc_cmd(('MODE %s +o %s' % (config['channel'], to)))
else:
logger.debug('User not in auto-op file')
else:
logger.debug('No auto-op file for this network/channel')
# trace
if text.find(':!trace') != -1:
dest = text.split(':!trace')[1].strip()
traceroute(dest, find_user(text))
# URL title grabber
if text.find(config['botnick']) == -1 and (text.find('http://') != -1 or text.find('https://') != -1):
urls = re.findall(r'(https?://\S+)', text)
try:
logger.info('titlegrabber: Fetching title from %s' % urls[0])
grab_title(urls[0])
except:
logger.info('titlegrabber: Failed to fetch title from %s' % urls[0])
# e = sys.exc_info()[0]
logger.error(sys.exc_info())
# print(sys.exc_info())
pass
| [
"jonaslindstad@gmail.com"
] | jonaslindstad@gmail.com |
9981c922ae16ac4a813895e536e8827db3022216 | 4f9d0b10aad4adeefaa76c73a6f9cc5b45a26ec6 | /Ecom/migrations/0004_remove_orders_customer.py | 7c2fbed9d40dfd990a84ba0a703f0f5b0a8738c2 | [] | no_license | Bhawan-Sharma/ABshopEcommerce | 967213b11f3766386680c4e01053163c884353cf | 47a86795f7b2ba95f05d8cff6469a8377275eda1 | refs/heads/main | 2023-07-27T01:42:28.479598 | 2021-08-28T13:50:22 | 2021-08-28T13:50:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | # Generated by Django 3.0.8 on 2021-08-27 05:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Ecom', '0003_orders_product'),
]
operations = [
migrations.RemoveField(
model_name='orders',
name='customer',
),
]
| [
"83630093+Alfacito@users.noreply.github.com"
] | 83630093+Alfacito@users.noreply.github.com |
091a06320d8b36532cf76359942de1eec908b433 | 4a5aa5683a039a06a8af1fd21055eb4702e4dfdd | /same-tree.py | f30e147bd5d369bbd50ac46b75185312e13a5caa | [] | no_license | Fliv/my-leetcode | 8ed03461f492d874c90dc820909bec9ad659a963 | a45002fb06954fbff790317a9fbb6f79adebf367 | refs/heads/master | 2021-01-11T04:33:35.909269 | 2017-07-05T11:10:04 | 2017-07-05T11:10:04 | 71,137,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isSameTree(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
"""
if not p and not q:
return True
if not p and q or not q and p:
return False
if p.val != q.val:
return False
return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
| [
"415071280@qq.com"
] | 415071280@qq.com |
da830d452062851dbe34843842810de0095e6f06 | 496ea91f6bcbcf0b1468f447107e5dc56bf571ef | /lesson/02_blog2/1_python/2_dictionary/dic2.py | a920d6c8e3a8bc3e8fe0df19589efc4a576b55fa | [] | no_license | remopro-pro/webbasic | 8e9d7d32f2df2b275b1012cc6e6d54f433c13186 | 1acf0f0db5861b9103475cdce0f8b7db39e9a97f | refs/heads/master | 2022-12-11T13:41:57.519862 | 2020-09-10T10:55:13 | 2020-09-10T10:55:13 | 279,002,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | user = {"name": "磯野", "age": 18}
user["name"] = "フグ田"
print(user["name"])
user["job"] = "会社員"
print(user)
| [
"hirfujit@yahoo-corp.jp"
] | hirfujit@yahoo-corp.jp |
737f7c4f3db32fbbc32c0d5f8ed335fc3f63e82b | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20200504/example_egoist/walker.py | 519c662fb68e00489ebc5b0bbaa8f85170fd985e | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 42 | py | ../../20200503/example_metashape/walker.py | [
"ababjam61+github@gmail.com"
] | ababjam61+github@gmail.com |
a9f60f3ed1fe3f516a90a7101d86cf5d08986545 | 3b80ec0a14124c4e9a53985d1fa0099f7fd8ad72 | /realestate/urls.py | 11e290ebf235d7ae4d3ce6986f61c81f4176ded0 | [] | no_license | aayushgupta97/RealEstate_Django_TTN | ec4dde7aa3a1bcfa4d88adb5ea7ebb20127e7489 | 9af7c26c85c46ac5b0e3b3fad4a7b1067df20c47 | refs/heads/master | 2020-05-04T08:09:03.917026 | 2019-04-18T08:30:05 | 2019-04-18T08:30:05 | 179,041,202 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from pages import views as page_views
urlpatterns = [
path('properties/', include('properties.urls')),
path('', include('pages.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls')),
path('contacts/', include('contacts.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
handler404 = page_views.handler404
# handler500 = page_views.handler500
| [
"aayushgupta2097@gmail.com"
] | aayushgupta2097@gmail.com |
fc2f5b4eaf1d9c7e2539b1ef43e5b12ba9fbe924 | 38fecea29fa82eb203fd964acd54ffacc7e4c388 | /chapter03/page048_colored_grid.py | 9a62621c8c535c213b8b8c6e2da4ef4c1286ade9 | [] | no_license | mjgpy3/mfp-python3-examples | 3c74f09c6155e9fbf35bd8ec104bdfe4429b9f4b | 09547141d25859fe93a6a0e70c828877ee93f736 | refs/heads/master | 2020-12-03T18:38:30.411800 | 2020-01-18T20:42:20 | 2020-01-18T20:42:20 | 231,431,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | #!/usr/bin/env python3
from page040_grid import Grid
class ColoredGrid(Grid):
# Cannot do specific property setters in Python
def set_distances(self, distances):
self.distances = distances
farthest, self.maximum = distances.max()
def background_color_for(self, cell):
distance = self.distances[cell]
if not distance:
return (255, 255, 255)
intensity = float(self.maximum - distance) / self.maximum
dark = round(255 * intensity)
bright = 128 + round(127 * intensity)
return (dark, bright, dark)
| [
"mjg.py3@gmail.com"
] | mjg.py3@gmail.com |
c70e8a10944cccf8e5bdfde937bf2c8263a1b00e | 85f065d6eff8b04f412aab0a5c0615ced0efb408 | /third/mosesdecoder-RELEASE-3.0/scripts/training/wrappers/conll2mosesxml.py | 69ee4f73713f3d45fdc14e82c10b72cec238a4ed | [] | no_license | antot/posteditese_mtsummit19 | 976a099fe6ade6c0a57672c6f82d6f490fe18afa | b344d9134c749fab8681928da49f4894cf1fb696 | refs/heads/master | 2020-06-08T12:50:42.308562 | 2019-06-22T12:31:09 | 2019-06-22T12:31:09 | 193,231,110 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,589 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Rico Sennrich
# takes a file in the CoNLL dependency format (from the CoNLL-X shared task on dependency parsing; http://ilk.uvt.nl/conll/#dataformat )
# and produces Moses XML format. Note that the structure is built based on fields 9 and 10 (projective HEAD and RELATION),
# which not all parsers produce.
# usage: conll2mosesxml.py [--brackets] < input_file > output_file
from __future__ import print_function, unicode_literals
import sys
import re
import codecs
from collections import namedtuple,defaultdict
from lxml import etree as ET
Word = namedtuple('Word', ['pos','word','lemma','tag','head','func', 'proj_head', 'proj_func'])
def main(output_format='xml'):
sentence = []
for line in sys.stdin:
# process sentence
if line == "\n":
sentence.insert(0,[])
if is_projective(sentence):
write(sentence,output_format)
else:
sys.stderr.write(' '.join(w.word for w in sentence[1:]) + '\n')
sys.stdout.write('\n')
sentence = []
continue
try:
pos, word, lemma, tag, tag2, morph, head, func, proj_head, proj_func = line.split()
except ValueError: # word may be unicode whitespace
pos, word, lemma, tag, tag2, morph, head, func, proj_head, proj_func = re.split(' *\t*',line.strip())
word = escape_special_chars(word)
lemma = escape_special_chars(lemma)
if proj_head == '_':
proj_head = head
proj_func = func
sentence.append(Word(int(pos), word, lemma, tag2,int(head), func, int(proj_head), proj_func))
# this script performs the same escaping as escape-special-chars.perl in Moses.
# most of it is done in function write(), but quotation marks need to be processed first
def escape_special_chars(line):
line = line.replace('\'',''') # xml
line = line.replace('"','"') # xml
line = line.replace('[','[') # syntax non-terminal
line = line.replace(']',']') # syntax non-terminal
return line
# make a check if structure is projective
def is_projective(sentence):
dominates = defaultdict(set)
for i,w in enumerate(sentence):
dominates[i].add(i)
if not i:
continue
head = int(w.proj_head)
while head != 0:
if i in dominates[head]:
break
dominates[head].add(i)
head = int(sentence[head].proj_head)
for i in dominates:
dependents = dominates[i]
if max(dependents) - min(dependents) != len(dependents)-1:
sys.stderr.write("error: non-projective structure.\n")
return False
return True
def write(sentence, output_format='xml'):
if output_format == 'xml':
tree = create_subtree(0,sentence)
out = ET.tostring(tree, encoding = 'UTF-8').decode('UTF-8')
if output_format == 'brackets':
out = create_brackets(0,sentence)
out = out.replace('|','|') # factor separator
out = out.replace('&apos;',''') # lxml is buggy if input is escaped
out = out.replace('&quot;','"') # lxml is buggy if input is escaped
out = out.replace('&#91;','[') # lxml is buggy if input is escaped
out = out.replace('&#93;',']') # lxml is buggy if input is escaped
print(out)
# write node in Moses XML format
def create_subtree(position, sentence):
element = ET.Element('tree')
if position:
element.set('label', sentence[position].proj_func)
else:
element.set('label', 'sent')
for i in range(1,position):
if sentence[i].proj_head == position:
element.append(create_subtree(i, sentence))
if position:
if preterminals:
head = ET.Element('tree')
head.set('label', sentence[position].tag)
head.text = sentence[position].word
element.append(head)
else:
if len(element):
element[-1].tail = sentence[position].word
else:
element.text = sentence[position].word
for i in range(position, len(sentence)):
if i and sentence[i].proj_head == position:
element.append(create_subtree(i, sentence))
return element
# write node in bracket format (Penn treebank style)
def create_brackets(position, sentence):
if position:
element = "[ " + sentence[position].proj_func + ' '
else:
element = "[ sent "
for i in range(1,position):
if sentence[i].proj_head == position:
element += create_brackets(i, sentence)
if position:
word = sentence[position].word
tag = sentence[position].tag
if preterminals:
element += '[ ' + tag + ' ' + word + ' ] '
else:
element += word + ' ] '
for i in range(position, len(sentence)):
if i and sentence[i].proj_head == position:
element += create_brackets(i, sentence)
if preterminals or not position:
element += '] '
return element
if __name__ == '__main__':
if sys.version_info < (3,0,0):
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
if '--no_preterminals' in sys.argv:
preterminals = False
else:
preterminals = True
if '--brackets' in sys.argv:
main('brackets')
else:
main('xml')
| [
"a.toral.ruiz@rug.nl"
] | a.toral.ruiz@rug.nl |
ae3daa75051224632560f48bca4435780cfa44ae | 25f31909afa432e49b0a77fc469cd9d6e6d72d70 | /lab assingnments/ex3.py | ac8385051f315bd51e2ce999da6107490ff67fd6 | [] | no_license | VenkySVR/Python-DataStructures | 7d13e044b705fd232db9f0997981ee11f9fb88ad | bac0e1697f4da00b93c9fc879f027ddb7371e252 | refs/heads/master | 2021-04-19T19:52:05.154734 | 2020-05-09T10:50:57 | 2020-05-09T10:50:57 | 249,631,604 | 1 | 0 | null | 2020-05-06T17:14:54 | 2020-03-24T06:37:05 | Python | UTF-8 | Python | false | false | 1,363 | py | # problem 1
s = 'Beautiful palace'
print(s[:])
print(s[::])
first_five_chars = s[:5]
print(first_five_chars)
third_to_fifth_chars = s[2:5]
print(third_to_fifth_chars)
# problem 2
py_string = 'learn python'
slice_object1 = slice(-1, -6, -1)
slice_object2 = slice(1, 6, -1)
slice_object3 = slice(1, 6, 1)
print(py_string[slice_object1], py_string[slice_object2],py_string[slice_object3])
# Problem 3
s = 'Learning is FUN'
reverseString = s[::-1]
print(reverseString)
s1 = s[2:8:2]
print(s1)
s1 = s[8:1:-1]
print(s1)
s1 = s[-4:-2]
s1 = s[8:1:-2]
print(s1)
s1 = s[-4:-2]
print(s1)
s = 'Python'
s1=s[100:]
print(s1)
s1= s[2:50]
print(s1)
# Problem 4
py_string = 'Python book'
slice_object = slice(3)
print(py_string[slice_object])
slice_object = slice(1, 6, 2)
print(py_string[slice_object])
# Problem 5
py_list = ['P', 'y', 't', 'h', 'o', 'n']
py_tuple = ('P', 'y', 't', 'h', 'o', 'n')
slice_object = slice(3)
print(py_list[slice_object])
slice_object = slice(1, 5, 2)
print(py_tuple[slice_object])
slice_object = slice(-1, -4, -1)
print(py_list[slice_object])
slice_object = slice(-1, -5, -2)
print(py_tuple[slice_object])
# """
# output
# Beautiful palace
# Beautiful palace
# Beaut
# aut
# nohty earn
# NUF si gninraeL
# ann
# gninra
# nna
# F
# thon
# Pyt
# yhn
# ['P', 'y', 't']
# ('y', 'h')
# ['n', 'o', 'h']
# ('n', 'h')
# """ | [
"venky.s.vr13@gmail.com"
] | venky.s.vr13@gmail.com |
55d82652cc45328f27233924c8696a251ed8e11d | dc0f89b49e01a1885660779ad169fcb65889854a | /dictionaries.py | 48fe83757b1d1b5e121f083229520d8382063b7e | [] | no_license | quhao0994/python_work | 9fc99f14dd7a706903cf1172b3b881ce283c620d | bc6fbebcda8ca1542571787ea9e96a0490dc6919 | refs/heads/master | 2020-04-08T22:23:43.011660 | 2020-01-12T08:00:24 | 2020-01-12T08:00:24 | 159,785,462 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | monthConversions = {
"Jan":"January",
"Feb":"February",
"Mar":"March",
0:"zero"
}
print(monthConversions["Jan"])
print(monthConversions.get("Luv","not a valid key")) | [
"noreply@github.com"
] | quhao0994.noreply@github.com |
dc570a665616fbf6bb0a3acf033b6e2d00c998c3 | 0275a3c5b7477be0095999ff53c67e7eb9b1be47 | /separate_process.py | 8ff50f0bfa53473f67239ef68781b76a3455df60 | [] | no_license | amiecorso/scripts_simblock | 6800a91e717e86cba459235250f3d4f475b38081 | 975d954287d791e6a3125533c77b9ba0bae8392b | refs/heads/master | 2020-09-05T09:33:26.312161 | 2019-12-06T02:03:22 | 2019-12-06T02:03:22 | 220,058,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | import driver_simblock
RESULTS_DIR = "/Users/amiecorso/scripts/results/"
driver_simblock.process_results(RESULTS_DIR, "patch.csv")
| [
"acorso@cs.uoregon.edu"
] | acorso@cs.uoregon.edu |
fc7241d3c097b1d4ec4a17558bbeb948d4440761 | ccc8cd0979095de2d2644be06c596f33ed1b4072 | /integration_tests/suite/test_dird_documentation.py | bb9d3888040bc28b3f6d92bed419dd1c635abdbe | [] | no_license | TinxHQ/wazo-google | 13373b3b5357ceb0a1d62c129818176265a4acc9 | d8a115ddd7bd76595adaf834c87a043c96dc53a4 | refs/heads/master | 2021-03-22T05:19:33.535426 | 2019-08-07T14:51:38 | 2019-08-07T14:51:38 | 113,173,511 | 0 | 0 | null | 2019-08-07T14:51:39 | 2017-12-05T11:28:08 | Python | UTF-8 | Python | false | false | 736 | py | # Copyright 2019 The Wazo Authors (see the AUTHORS file)
# SPDX-License-Identifier: GPL-3.0-or-later
import pprint
import requests
from hamcrest import assert_that, empty
from .helpers.base import BaseTestCase
class TestDirdDocumentation(BaseTestCase):
asset = 'documentation'
def test_documentation_errors(self):
api_url = 'https://dird:9489/0.1/api/api.yml'
self._validate_api(api_url)
def _validate_api(self, url):
port = self.service_port(8080, 'swagger-validator')
validator_url = 'http://localhost:{port}/debug'.format(port=port)
response = requests.get(validator_url, params={'url': url})
assert_that(response.json(), empty(), pprint.pformat(response.json()))
| [
"pcm@wazo.io"
] | pcm@wazo.io |
4aaf7f9daeeb93706d4bbb8c3bd8d49f690c0c93 | d9b3289354d8f75ae8dd9988a89b08596bd4cae9 | /pgadmin/pgadmin/browser/server_groups/servers/resource_groups/__init__.py | 336fe7d01d25a73d9bfd68a2da083098d2be10c2 | [] | no_license | DataCraft-AI/pgdevops | 8827ab8fb2f60d97a22c03317903b71a12a49611 | f489bfb22b5b17255f85517cb1443846133dc378 | refs/heads/master | 2023-02-10T05:44:00.117387 | 2020-01-22T13:40:58 | 2020-01-22T13:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,471 | py | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2018, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements Resource Groups for PPAS 9.4 and above"""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers as servers
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.utils import NodeView
from pgadmin.utils.ajax import make_json_response, \
make_response as ajax_response, internal_server_error, gone
from pgadmin.utils.ajax import precondition_required
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
from pgadmin.utils import IS_PY2
# If we are in Python3
if not IS_PY2:
unicode = str
class ResourceGroupModule(CollectionNodeModule):
"""
class ResourceGroupModule(CollectionNodeModule)
A module class for Resource Group node derived from CollectionNodeModule.
Methods:
-------
* __init__(*args, **kwargs)
- Method is used to initialize the ResourceGroupModule and it's base module.
* BackendSupported(manager, **kwargs)
- This function is used to check the database server type and version.
Resource Group only supported in PPAS 9.4 and above.
* get_nodes(gid, sid, did)
- Method is used to generate the browser collection node.
* node_inode()
- Method is overridden from its base class to make the node as leaf node.
* script_load()
- Load the module script for resource group, when any of the server node is
initialized.
"""
NODE_TYPE = 'resource_group'
COLLECTION_LABEL = gettext("Resource Groups")
def __init__(self, *args, **kwargs):
"""
Method is used to initialize the ResourceGroupModule and it's base module.
Args:
*args:
**kwargs:
"""
super(ResourceGroupModule, self).__init__(*args, **kwargs)
self.min_ver = 90400
self.max_ver = None
self.server_type = ['ppas']
def get_nodes(self, gid, sid):
"""
Method is used to generate the browser collection node
Args:
gid: Server Group ID
sid: Server ID
"""
yield self.generate_browser_collection_node(sid)
@property
def node_inode(self):
"""
Override this property to make the node as leaf node.
Returns: False as this is the leaf node
"""
return False
@property
def script_load(self):
"""
Load the module script for resource group, when any of the server node is initialized.
Returns: node type of the server module.
"""
return servers.ServerModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
blueprint = ResourceGroupModule(__name__)
class ResourceGroupView(NodeView):
"""
class ResourceGroupView(NodeView)
A view class for resource group node derived from NodeView. This class is
responsible for all the stuff related to view like create/update/delete resource group,
showing properties of resource group node, showing sql in sql pane.
Methods:
-------
* __init__(**kwargs)
- Method is used to initialize the ResourceGroupView and it's base view.
* module_js()
- This property defines (if javascript) exists for this node.
Override this property for your own logic
* check_precondition()
- This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to self
* list()
- This function is used to list all the resource group nodes within that collection.
* nodes()
- This function will used to create all the child node within that collection.
Here it will create all the resource group node.
* properties(gid, sid, did, rg_id)
- This function will show the properties of the selected resource group node
* create(gid, sid, did, rg_id)
- This function will create the new resource group object
* update(gid, sid, did, rg_id)
- This function will update the data for the selected resource group node
* delete(self, gid, sid, rg_id):
- This function will drop the resource group object
* msql(gid, sid, did, rg_id)
- This function is used to return modified SQL for the selected resource group node
* get_sql(data, rg_id)
- This function will generate sql from model data
* sql(gid, sid, did, rg_id):
- This function will generate sql to show it in sql pane for the selected resource group node.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'}
]
ids = [
{'type': 'int', 'id': 'rg_id'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}]
})
def __init__(self, **kwargs):
"""
Method is used to initialize the ResourceGroupView and it's base view.
Also initialize all the variables create/used dynamically like conn, template_path.
Args:
**kwargs:
"""
self.conn = None
self.template_path = None
super(ResourceGroupView, self).__init__(**kwargs)
def module_js(self):
"""
This property defines (if javascript) exists for this node.
Override this property for your own logic.
"""
return make_response(
render_template(
"resource_groups/js/resource_groups.js",
_=gettext
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to self
"""
@wraps(f)
def wrap(*args, **kwargs):
# Here args[0] will hold self & kwargs will hold gid,sid,did
self = args[0]
self.driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = self.driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection()
# If DB not connected then return error to browser
if not self.conn.connected():
return precondition_required(
gettext(
"Connection to the server has been lost."
)
)
self.template_path = 'resource_groups/sql'
return f(*args, **kwargs)
return wrap
@check_precondition
def list(self, gid, sid):
"""
This function is used to list all the resource group nodes within that collection.
Args:
gid: Server Group ID
sid: Server ID
"""
sql = render_template("/".join([self.template_path, 'properties.sql']))
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return ajax_response(
response=res['rows'],
status=200
)
@check_precondition
def node(self, gid, sid, rg_id):
"""
This function will used to create all the child node within that collection.
Here it will create all the resource group node.
Args:
gid: Server Group ID
sid: Server ID
"""
sql = render_template("/".join([self.template_path, 'nodes.sql']),
rgid=rg_id)
status, result = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=result)
if len(result['rows']) == 0:
return gone(gettext("""Could not find the resource group."""))
res = self.blueprint.generate_browser_node(
result['rows'][0]['oid'],
sid,
result['rows'][0]['name'],
icon="icon-resource_group"
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid):
"""
This function will used to create all the child node within that collection.
Here it will create all the resource group node.
Args:
gid: Server Group ID
sid: Server ID
"""
res = []
sql = render_template("/".join([self.template_path, 'nodes.sql']))
status, result = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=result)
for row in result['rows']:
res.append(
self.blueprint.generate_browser_node(
row['oid'],
sid,
row['name'],
icon="icon-resource_group"
))
return make_json_response(
data=res,
status=200
)
@check_precondition
def properties(self, gid, sid, rg_id):
"""
This function will show the properties of the selected resource group node.
Args:
gid: Server Group ID
sid: Server ID
rg_id: Resource Group ID
"""
sql = render_template("/".join([self.template_path, 'properties.sql']), rgid=rg_id)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(gettext("""Could not find the resource group."""))
return ajax_response(
response=res['rows'][0],
status=200
)
@check_precondition
def create(self, gid, sid):
"""
This function will create the new resource group object
Args:
gid: Server Group ID
sid: Server ID
"""
required_args = [
'name'
]
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for arg in required_args:
if arg not in data:
return make_json_response(
status=410,
success=0,
errormsg=gettext(
"Could not find the required parameter (%s)." % arg
)
)
try:
# Below logic will create new resource group
sql = render_template("/".join([self.template_path, 'create.sql']), rgname=data['name'], conn=self.conn)
if sql and sql.strip('\n') and sql.strip(' '):
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
# Below logic will update the cpu_rate_limit and dirty_rate_limit for resource group
# we need to add this logic because in resource group you can't run multiple commands in one transaction.
sql = render_template("/".join([self.template_path, 'update.sql']), data=data, conn=self.conn)
# Checking if we are not executing empty query
if sql and sql.strip('\n') and sql.strip(' '):
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
# Below logic is used to fetch the oid of the newly created resource group
sql = render_template("/".join([self.template_path, 'getoid.sql']), rgname=data['name'])
# Checking if we are not executing empty query
rg_id = 0
if sql and sql.strip('\n') and sql.strip(' '):
status, rg_id = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=rg_id)
return jsonify(
node=self.blueprint.generate_browser_node(
rg_id,
sid,
data['name'],
icon="icon-resource_group"
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, rg_id):
"""
This function will update the data for the selected resource group node
Args:
gid: Server Group ID
sid: Server ID
rg_id: Resource Group ID
"""
required_args = [
'name', 'cpu_rate_limit', 'dirty_rate_limit'
]
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
sql = render_template("/".join([self.template_path, 'properties.sql']), rgid=rg_id)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
old_data = res['rows'][0]
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
if data['name'] != old_data['name']:
sql = render_template("/".join([self.template_path, 'update.sql']),
oldname=old_data['name'], newname=data['name'], conn=self.conn)
if sql and sql.strip('\n') and sql.strip(' '):
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
# Below logic will update the cpu_rate_limit and dirty_rate_limit for resource group
# we need to add this logic because in resource group you can't run multiple commands
# in one transaction.
if (data['cpu_rate_limit'] != old_data['cpu_rate_limit']) \
or (data['dirty_rate_limit'] != old_data['dirty_rate_limit']):
sql = render_template("/".join([self.template_path, 'update.sql']), data=data, conn=self.conn)
if sql and sql.strip('\n') and sql.strip(' '):
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
return jsonify(
node=self.blueprint.generate_browser_node(
rg_id,
sid,
data['name'],
icon="icon-%s" % self.node_type
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def delete(self, gid, sid, rg_id):
"""
This function will drop the resource group object
Args:
gid: Server Group ID
sid: Server ID
rg_id: Resource Group ID
"""
try:
# Get name for resource group from rg_id
sql = render_template("/".join([self.template_path, 'delete.sql']), rgid=rg_id, conn=self.conn)
status, rgname = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=rgname)
if rgname is None:
return make_json_response(
success=0,
errormsg=gettext(
'Error: Object not found.'
),
info=gettext(
'The specified resource group could not be found.\n'
)
)
# drop resource group
sql = render_template("/".join([self.template_path, 'delete.sql']), rgname=rgname, conn=self.conn)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=gettext("Resource Group dropped"),
data={
'id': rg_id,
'sid': sid,
'gid': gid,
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def msql(self, gid, sid, rg_id=None):
"""
This function is used to return modified SQL for the selected resource group node
Args:
gid: Server Group ID
sid: Server ID
rg_id: Resource Group ID
"""
data = dict()
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
sql, name = self.get_sql(data, rg_id)
# Most probably this is due to error
if not isinstance(sql, (str, unicode)):
return sql
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
def get_sql(self, data, rg_id=None):
"""
This function will generate sql from model data
Args:
data: Contains the value of name, cpu_rate_limit, dirty_rate_limit
rg_id: Resource Group Id
"""
required_args = [
'name', 'cpu_rate_limit', 'dirty_rate_limit'
]
if rg_id is not None:
sql = render_template("/".join([self.template_path, 'properties.sql']), rgid=rg_id)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("The specified resource group could not be found.")
)
old_data = res['rows'][0]
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
sql = ''
name_changed = False
if data['name'] != old_data['name']:
name_changed = True
sql = render_template("/".join([self.template_path, 'update.sql']),
oldname=old_data['name'], newname=data['name'], conn=self.conn)
if (data['cpu_rate_limit'] != old_data['cpu_rate_limit']) \
or data['dirty_rate_limit'] != old_data['dirty_rate_limit']:
if name_changed:
sql += "\n-- Following query will be executed in a separate transaction\n"
sql += render_template("/".join([self.template_path, 'update.sql']), data=data, conn=self.conn)
else:
sql = render_template("/".join([self.template_path, 'create.sql']), rgname=data['name'], conn=self.conn)
if ('cpu_rate_limit' in data and data['cpu_rate_limit'] > 0) \
or ('dirty_rate_limit' in data and data['dirty_rate_limit'] > 0):
sql += "\n-- Following query will be executed in a separate transaction\n"
sql += render_template("/".join([self.template_path, 'update.sql']), data=data, conn=self.conn)
return sql, data['name'] if 'name' in data else old_data['name']
@check_precondition
def sql(self, gid, sid, rg_id):
"""
This function will generate sql for sql pane
Args:
gid: Server Group ID
sid: Server ID
rg_id: Resource Group ID
"""
sql = render_template("/".join([self.template_path, 'properties.sql']), rgid=rg_id)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("The specified resource group could not be found.")
)
# Making copy of output for future use
old_data = dict(res['rows'][0])
sql = render_template("/".join([self.template_path, 'create.sql']), display_comments=True,
rgname=old_data['name'], conn=self.conn)
sql += "\n"
sql += render_template("/".join([self.template_path, 'update.sql']), data=old_data, conn=self.conn)
return ajax_response(response=sql)
ResourceGroupView.register_node_view(blueprint)
| [
"denis@lussier.io"
] | denis@lussier.io |
00aae4ffd01e0585a48b1869dd2c4e3f247a73a2 | 13911dd9a588439ba96fe810ffb64e1a20bfb3d1 | /part2/.~c9_invoke_Cz2UO2.py | ef2992c5635c3941c2e10d3bf9e5f5f83ff0ec42 | [] | no_license | derrickeckardt/tweet-classification | e1997e0f2c4c743c8054c5c7424e686910673ee6 | 558663eb2aeccc14551ffcb1af5f341a56c60759 | refs/heads/master | 2020-10-01T02:21:25.035216 | 2018-11-27T05:11:16 | 2018-11-27T05:11:16 | 227,432,611 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,846 | py | #!/usr/bin/env python
#
# CS B551 - Elements of AI
# Indiana University, Fall 2018
# Assignment 2, Part 2 - Tweet classification
#
# Completed by Derrick Eckardt
# derrick@iu.edu
# import libraries
import sys
import pandas as pd
from collections import Counter
training_file, testing_file, output_file = [sys.argv[1],sys.argv[2],sys.argv[3]]
# open import file
training_data, testing_data =[], []
training_locations, items = [],[]
training_dict = {}
with open(testing_file, 'r') as file:
for line in file:
testing_data.append([ str(i) for i in line.split() ])
with open(training_file, 'r') as file:
for line in file:
# Loads as list of lists of lists
if any(line.split()[0] == city[0] for city in training_data):
print training_data[training_data.index(line.split()[0])]
test = [line.split()[0], training_data[training_data.index(line.split()[0])][1]+[ str(i) for i in line.split()[1:] ]]
else:
training_data.append([line.split()[0], [str(i) for i in line.split()[1:]]])
print len(training_data)
print training_data
# Loads as dictiorary - presorted, takes a lot of time to do so. it takes
# almost two minutes to do so. not erribly efficient
# if line.split()[0] in training_dict.keys():
# training_dict[line.split()[0]]['tweet_count'] += 1
# else:
# training_dict[line.split()[0]] = {}
# training_dict[line.split()[0]]['tweet_count'] = 1
# for token in line.split()[1:]:
# # print token
# if token in training_dict[line.split()[0]].keys():
# training_dict[line.split()[0]][token] += 1
# else:
# training_dict[line.split()[0]][token] = 1
for i in range(5):
print len(training_data[i])," ",training_data[i]
# print training_dict['Boston,_MA']['tweet_count']
# print training_dict['Boston,_MA']['Boston']
# print len(training_data)
# print len(testing_data)
# Get unique locations and unique words
# Generate look-up tables for each word.
# frequency_table = pd.DataFrame({'token':[]})
# print frequency_table
# Originally I was planning to build a frequency table by counting all the cities
# and all of the words
# now, i think I rather just generate those on the fly, perhaps
# locations = []
# tokens = []
# for tweet in training_data[0:10000]:
# locations = list(set(locations) | set([tweet[0]]))
# tokens = list(set(tokens) | set(tweet[1:]))
# # if tweet[0] not in locations
# # locations.append(tweet[0])
# # for token in tweet[1:]:
# # if token not in tokens:
# # tokens.append(token)
# print locations
# print len(locations)
# print len(tokens)
# Filter out stop words
# may be best to do it during file-read in
# Generate look-up tables:
| [
"derrick.eckardt@gmail.com"
] | derrick.eckardt@gmail.com |
de0beb1610545ee78ac1dcc707d7fc40e2c1a0fb | 748bbab674d1a5ae6a59bfd4ac22efcb4355e82a | /Prog-II/Back_Front/back/modelo.py | 500e048c4dda6a3d2bb759c389dc9ab5b947b11b | [] | no_license | Lima001/Tecnico-IFC | 8819114a35080eb914a2d836a0accbf79d3268d8 | 771fa39dd6046a9d92860fbde70c10dcecd975a3 | refs/heads/master | 2021-02-07T01:31:04.929420 | 2020-06-24T02:09:38 | 2020-06-24T02:09:38 | 243,967,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | from peewee import *
arq = "dados.db"
db = SqliteDatabase(arq)
class BaseModel(Model):
class Meta:
database = db
class Cachorro(BaseModel):
nome = CharField()
idade = IntegerField()
raca = CharField()
if __name__ == "__main__":
db.connect()
db.create_tables([Cachorro])
dog1 = Cachorro.create(nome="Rex",idade=1,raca="Pastor Alemao")
print(dog1.nome + "|" + str(dog1.idade) + "|" + dog1.raca) | [
"limaedugabriel@gmail.com"
] | limaedugabriel@gmail.com |
0dabd218576ed96dbe4a021fce762f03727b90ae | b4948c322401435a02370dd96708399fda4a48fc | /demo/simple_code/test_pass.py | 453fa756db68194cdd14c29692c9fa5fb24807be | [] | no_license | fengbingchun/Python_Test | 413e2c9bb844a5b3641e6e6daf37df277589006e | eaedcd55dbc156b685fa891538e1120ea68fa343 | refs/heads/master | 2023-06-21T02:28:07.310364 | 2023-06-11T04:46:29 | 2023-06-11T04:46:29 | 99,814,791 | 7 | 6 | null | 2022-09-30T00:38:06 | 2017-08-09T14:01:48 | C++ | UTF-8 | Python | false | false | 428 | py | # Blog: https://blog.csdn.net/fengbingchun/article/details/125242357
# 1. empty function
def func():
pass # remember to implement this
func()
# 2. empty class
class fbc:
pass
fbc()
# 3. loop
num = 5
for i in range(num):
pass
# 4. conditional statement
a = 5
b = 10
if (a < b):
pass
else:
print("b<=a")
for letter in "Python3":
if letter == "h":
pass
else:
print("", letter, end="")
print("\ntest finish")
| [
"fengbingchun@163.com"
] | fengbingchun@163.com |
032d7f9ebb27b22f4c6751ff173017cf28bac592 | 8c74fad72d58787895d5397737990e37e3a4756d | /test/test.py | 63a3dbcc48c48169972a8a722b3aed5f2a572888 | [] | no_license | xiaoye-hua/kaggle-TalkingData-AdTracking-Fraud-Detection-Challenge | b36ebb98af6d25cd7110fe0f2e508818c713a78f | 1d54384fbd1ef3badf649f016ffa89a1901c0e04 | refs/heads/master | 2020-03-14T22:42:38.083160 | 2018-05-06T15:27:06 | 2018-05-06T15:27:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15 | py | print('haha')
| [
"huag@kth.se"
] | huag@kth.se |
fa791cbb653d2472098d0a3b028680e2bc9b6414 | 61432a6d3b25e5b3142fe1f154acf5764bc2d596 | /custom_report/controllers/controllers.py | 0d654793e8486cc3dde196ee71832650723dcae7 | [] | no_license | halltech-ci/tfc_agro | 8c2c7911901e8c7bcf548fb05ca8f7891ab4ef51 | a737dfdccfca51136cb01894a00f21f5365a771a | refs/heads/master_1 | 2020-12-22T08:59:40.507801 | 2020-08-17T21:20:18 | 2020-08-17T21:20:18 | 236,734,216 | 0 | 3 | null | 2020-05-09T23:19:24 | 2020-01-28T12:50:00 | Python | UTF-8 | Python | false | false | 788 | py | # -*- coding: utf-8 -*-
from odoo import http
# class CustomReport(http.Controller):
# @http.route('/custom_report/custom_report/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/custom_report/custom_report/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('custom_report.listing', {
# 'root': '/custom_report/custom_report',
# 'objects': http.request.env['custom_report.custom_report'].search([]),
# })
# @http.route('/custom_report/custom_report/objects/<model("custom_report.custom_report"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('custom_report.object', {
# 'object': obj
# }) | [
"maurice.atche@halltech-africa.com"
] | maurice.atche@halltech-africa.com |
23e561b64b11252fe6b05b45353e2c3062ea8061 | 5d542e06668faebe3c391661215227a39e303f23 | /COPM1405/comp1405_f17_101071063_a3.py | 4bdb83008a31c13b3ac9661fe21235f2d61ddcb7 | [] | no_license | zivvvvvwang/COMP1405_Assignment | a565b345cdad54166f7810dc97d3f92d0ec57cc1 | 70902a838b85122bd05afd420344102d0ab80857 | refs/heads/master | 2021-01-03T02:58:18.972703 | 2020-02-12T00:25:29 | 2020-02-12T00:25:29 | 239,892,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | # ============================================================
#
# Student Name (as it appears on cuLearn): ziwen wang
# Student ID (9 digits in angle brackets): <101071063>
# Course Code (for this current semester): COMP1405A
#
# ============================================================
from comp1405_f17_assistant_a3 import *
def decision_making_function(e): # 'e' IS THE SHAPE ARGUMENT YOU MUST PASS TO YOUR PERMITTED FUNCTIONS
condition_for_sending_down = color_is_blue(e) and wrapped_in_a_square(e) or (color_is_purple(e) and wrapped_in_a_cross(e))
condition_for_sending_left = ((color_is_red(e) and (wrapped_in_a_triangle(e) or wrapped_in_a_circle(e))) or (color_is_orange(e) and wrapped_in_a_circle)) and divides_evenly_by(e,3)
condition_for_sending_right = color_is_red(e) and wrapped_in_a_cross(e) or (color_is_orange(e) and wrapped_in_a_square(e))
return (condition_for_sending_down, condition_for_sending_left, condition_for_sending_right)
run_the_program(decision_making_function)
| [
"noreply@github.com"
] | zivvvvvwang.noreply@github.com |
79610c15babdc1b86af4beba5ba9479ee4ae7927 | dffd4899b346fb48fbb1cfe529e6e17d3196676a | /select.py | af726ff92c19f8ed56376e2df0ab37c645372994 | [] | no_license | MySonIsZhaGou/sorting | 3166c03cf2f983905b5a88dd1146a4fcf01940ea | c6600f1479b6f76b7010561a1de07d496c8b0e06 | refs/heads/master | 2021-01-23T03:59:56.510511 | 2017-04-06T01:36:14 | 2017-04-06T01:36:14 | 86,145,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | from numpy import random
array=random.randint(0,100,10)
print array
def select(array):
min_num=array[0]
for i in range(len(array)):
min_num = array[i]
for j in range(len(array)-i):
if array[j+i]<min_num:
min_num=array[j+i]
temp=j+i
array[temp],array[i]=array[i],min_num
return array
print select(array) | [
"noreply@github.com"
] | MySonIsZhaGou.noreply@github.com |
211b498888127f1226c08e00b409a8281cbe53d4 | 27953af4264bf68ccfeef3eac7b31f7b401e6345 | /DeepSwapPackages/image_augmentation.py | 89a78ad2727d943d4a586015e25a1e6d3bab158d | [] | no_license | rezabonyadi/deep_end_to_end_face_swap | ad0d931060fc51da0b99ffc51bffc69b1b62baa0 | 11fbb66c83e9fb41f22f56bad35e27fe8f8a3c42 | refs/heads/master | 2021-07-07T11:06:18.341839 | 2020-08-23T03:15:46 | 2020-08-23T03:15:46 | 174,415,611 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py | import cv2
import numpy
from DeepSwapPackages.umeyama import umeyama
# from skimage.transform import warp, AffineTransform
#
# def randRange(a, b):
# '''
# a utility functio to generate random float values in desired range
# '''
# return numpy.random.rand() * (b - a) + a
#
#
# def randomAffine(im, rotation_range, zoom_range, shift_range, random_flip):
# '''
# wrapper of Affine transformation with random scale, rotation, shear and translation parameters
# '''
# sc = randRange(1-zoom_range, 1+zoom_range)
# tform = AffineTransform(scale=(sc, sc),
# rotation=randRange(-rotation_range, rotation_range),
# # shear=randRange(-0.2, 0.2),
# translation=(randRange(-im.shape[0]*shift_range, im.shape[0]*shift_range),
# randRange(-im.shape[1]*shift_range, im.shape[1]*shift_range)))
# warped_image = cv2.resize(warp(im, tform.inverse, mode='reflect'), (64, 64))
# target_image = cv2.resize(im, (64, 64))
#
# return warped_image, target_image
def random_transform( image, rotation_range, zoom_range, shift_range, random_flip ):
h,w = image.shape[0:2]
rotation = numpy.random.uniform( -rotation_range, rotation_range )
scale = numpy.random.uniform( 1 - zoom_range, 1 + zoom_range )
tx = numpy.random.uniform( -shift_range, shift_range ) * w
ty = numpy.random.uniform( -shift_range, shift_range ) * h
mat = cv2.getRotationMatrix2D( (w//2,h//2), rotation, scale )
mat[:,2] += (tx,ty)
result = cv2.warpAffine( image, mat, (w,h), borderMode=cv2.BORDER_REPLICATE )
if numpy.random.random() < random_flip:
result = result[:,::-1]
return result
# get pair of random warped images from aligened face image
def random_warp( image ):
assert image.shape == (256,256,3)
range_ = numpy.linspace( 128-80, 128+80, 5 )
mapx = numpy.broadcast_to( range_, (5,5) )
mapy = mapx.T
mapx = mapx + numpy.random.normal( size=(5,5), scale=5 )
mapy = mapy + numpy.random.normal( size=(5,5), scale=5 )
interp_mapx = cv2.resize( mapx, (80,80) )[8:72,8:72].astype('float32')
interp_mapy = cv2.resize( mapy, (80,80) )[8:72,8:72].astype('float32')
warped_image = cv2.remap( image, interp_mapx, interp_mapy, cv2.INTER_LINEAR )
src_points = numpy.stack( [ mapx.ravel(), mapy.ravel() ], axis=-1 )
dst_points = numpy.mgrid[0:65:16,0:65:16].T.reshape(-1,2)
mat = umeyama( src_points, dst_points, True )[0:2]
target_image = cv2.warpAffine( image, mat, (64,64) )
return warped_image, target_image
| [
"Rezabny@gmail.com"
] | Rezabny@gmail.com |
3771885ffc07cb6353782e4ba2a5ce9c33480434 | d2af44cdecdf48f9edb052fdc29d406edb2ccfad | /utils/metrics.py | e82dd9cda80f23613143a4e8ab4e3d258bf361d2 | [] | no_license | zx3Leonoardo/3d-medical | e3122f9e1f4602add26f97d9c258c5ff63a5b54c | 9ec045fbbb0980679fb624db769cf096ed1ab8f8 | refs/heads/main | 2023-07-16T20:03:42.947066 | 2021-08-26T02:17:15 | 2021-08-26T02:17:15 | 390,895,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,717 | py | import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
class LossAverage(object):
def __init__(self) -> None:
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = round(self.sum / self.count, 4)
class DiceAverage(object):
def __init__(self, class_num) -> None:
self.class_num = class_num
self.reset()
def reset(self):
self.value = np.asarray([0]*self.class_num, dtype='float64')
self.avg = np.asarray([0]*self.class_num, dtype='float64')
self.sum = np.asarray([0]*self.class_num, dtype='float64')
self.count = 0
def update(self, logits, targets):
self.value = DiceAverage.get_dices(logits, targets)
self.sum += self.value
self.count += 1
self.avg = np.around(self.sum / self.count, 4)
@staticmethod
def get_dices(logits, targets):
dices = []
max_element, _ = logits.max(dim=1)
max_element = max_element.unsqueeze(1).repeat(1, logits.size()[1], 1, 1, 1)
ge = torch.ge(logits, max_element)
one = torch.ones_like(logits)
zero = torch.zeros_like(logits)
res = torch.where(ge, one, zero)
for class_id in range(res.size()[1]):
inter = torch.sum(res[:, class_id, :, :, :] * res[:, class_id, :, :, :])
union = torch.sum(res[:, class_id, :, :, :] + res[:, class_id, :, :, :])
dice = (2. * inter+1)/(union+1)
dices.append(dice.item())
return np.asarray(dices) | [
"57-qinzixin@163.com"
] | 57-qinzixin@163.com |
37a4e1551a9674af83ec7b85906f69f911224ef7 | 1ffaa803ef08794a25cd0f74f738222bcd9f7e76 | /Python_Prog/Largest_number.py | 42dceb426a78415087d0a426a6f78233eb6beed5 | [] | no_license | vysagh00/Python-programs | 75d188809eb1ae78eeb59f377ef24a06d13a9337 | c1b2b64115ebd46e91133109eb90dea60bd3a5b0 | refs/heads/main | 2023-08-26T12:34:00.783796 | 2021-10-28T16:59:40 | 2021-10-28T16:59:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | num1 = int(input("Enter first number: "))
num2 = int(input("Enter second number: "))
num3 = int(input("Enter third number: "))
if (num1 >= num2) and (num1 >= num3):
largest = num1
elif (num2 >= num1) and (num2 >= num3):
largest = num2
else:
largest = num3
print("The largest number is", largest)
| [
"noreply@github.com"
] | vysagh00.noreply@github.com |
a1607a63fe213a683271e64ccc48df84cac8cdf2 | 1a7dee4f8328a6be57e867d4bb165d176b4c2e1a | /laplace_v2.py | 8d7f5ff176d3e1694a31d55bb2a41bceb8a77906 | [] | no_license | Atroxon/laplace | 9c2513ac16c6d0aa888e0ab6acdd1508303be334 | 26302cb2ca176e86bf7dbb32fbe93ce17fceba15 | refs/heads/main | 2023-01-14T08:42:16.242661 | 2020-11-10T23:30:06 | 2020-11-10T23:30:06 | 311,795,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,102 | py | import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
import copy
test_files = ['images/3.jpeg']
AVG_kernel = np.array([[1,1,1],[1,1,1],[1,1,1]])
L_kernel = np.array([[0,1,0],[1,-4,1],[0,1,0]])
E_kernel = np.array([[-1,-1,-1],[-1,8,-1],[-1,-1,-1]])
def main():
open_imgs = import_images(paths=test_files)
derivatives = list()
# First kernel
for img in open_imgs:
average_matrix = convolution_2D(img, AVG_kernel)
laplace_matrix = convolution_2D(average_matrix, L_kernel)
absolute_matrix = absolute(laplace_matrix)
normalized_matrix = min_max(absolute_matrix,(0,255))
derivatives.append(normalized_matrix)
# for i in range(len(open_imgs)):
# plot_derivatives(open_imgs[i],derivatives[i])
# Second kernel
for img in open_imgs:
average_matrix = convolution_2D(img, AVG_kernel)
laplace_matrix = convolution_2D(average_matrix, E_kernel)
absolute_matrix = absolute(laplace_matrix)
normalized_matrix = min_max(absolute_matrix,(0,255))
derivatives.append(normalized_matrix)
plot_derivatives(open_imgs[0],derivatives[0])
plot_derivatives2(open_imgs[0],derivatives[1])
plt.show()
def import_images(paths=None):
"""
Import images from input paths
inputs:
paths: list containing paths to images
output:
imgs: list containing cv2 objects
"""
imgs = list()
if paths == None:
out = dummy()
imgs.append(out)
test = copy.deepcopy(imgs)
return test
for path in paths:
imgs.append(cv.imread(path, cv.IMREAD_GRAYSCALE))
return imgs
def convolution_2D(image, kernel, padding=0, strides=1): #Add strides
print(image)
xImage = image.shape[0]
yImage = image.shape[1]
xKernel = kernel.shape[0]
yKernel = kernel.shape[1]
# Add padding
pad_image = np.zeros((xImage+(padding*2),yImage+(padding*2)), np.int16)
pad_image[padding:(xImage)+padding, padding:(yImage)+padding] = image
print(pad_image)
print(pad_image.shape)
# Adjusted to the spaces where kernel "fits"
out_matrix = np.zeros((pad_image.shape[0]-2*(xKernel//2),pad_image.shape[1]-2*(yKernel//2)), np.int16)
print(out_matrix.shape)
for x in range(pad_image.shape[0]): #Add strides here
abs_x = x - (xKernel//2)
if x < (xKernel//2): #Upper bounds
continue
if x >= pad_image.shape[0]-(xKernel//2): #Lower bounds
break
for y in range(pad_image.shape[1]):
abs_y = y - (yKernel//2)
if y < (yKernel//2): #Left bound
continue
if y >= pad_image.shape[1]-(yKernel//2): #Right bound
break
extract = pad_image[x-(xKernel//2):x+(xKernel//2)+1, y-(yKernel//2):y+(yKernel//2)+1]
pixel_val = np.sum([kernel * extract])
out_matrix[abs_x, abs_y] = pixel_val
return out_matrix
def plot_derivatives(image, matrix):
popFig,popAxs=plt.subplots(1,2)
popFig.suptitle("Grayscale ")
popAxs[0].imshow(image, cmap='gray')
popAxs[1].imshow(matrix, cmap='gray')
return
def plot_derivatives2(image, matrix):
ppFig,ppAxs=plt.subplots(1,2)
ppFig.suptitle("Grayscale ")
ppAxs[0].imshow(image, cmap='gray')
ppAxs[1].imshow(matrix, cmap='gray')
return
def absolute(matrix):
absolute_matrix = np.zeros((matrix.shape[0],matrix.shape[1]), np.int16)
for x in range(matrix.shape[0]):
for y in range(matrix.shape[1]):
absolute_matrix[x,y] = abs(matrix[x,y])
return absolute_matrix
def min_max(matrix, boundaries):
LBound, UBound = boundaries
m = matrix.min()
M = matrix.max()
normalized = np.zeros((matrix.shape[0],matrix.shape[1]), np.uint8)
for x in range(matrix.shape[0]):
for y in range(matrix.shape[1]):
normalized[x,y] = (matrix[x,y]-LBound)*(UBound-LBound)/(M-m) # todo eso +LBound
print(normalized)
return normalized
if __name__ == '__main__':
main() | [
"danielsalinas.t@gmail.com"
] | danielsalinas.t@gmail.com |
1c76c0d73c6d00dda9f771fd4eb96c5024ac5792 | 0ab40aa11442ef5868438844ca193a88cc2ab0af | /Crosstalk/analyze_cross_talk.py | 10c923b7c414428f9d01a510ade08e2d6b0559f8 | [] | no_license | nischalmishra/TEMPO_python | 2d85b0a401e776e4a1ae65920bd7553a3896170a | 643a9577fd6686ec32d85205b5988ec757eec4c8 | refs/heads/master | 2020-07-20T10:20:40.333931 | 2019-09-05T14:26:36 | 2019-09-05T14:26:36 | 206,623,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,238 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 19 10:38:50 2017
@author: nmishra
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.io.idl import readsav
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pandas as pd
def read_outlier_mask():
outlier_mask= np.genfromtxt(r'C:\Users\nmishra\Workspace\TEMPO\outlier_mask\final_outlier_mask_2_sigma.csv', delimiter=',')
quad_A = outlier_mask[0:1024, 0:1024]
quad_B = outlier_mask[1024:, 0:1024]
quad_C = outlier_mask[1024:, 1024:]
quad_D = outlier_mask[0:1024:, 1024:]
outlier_mask_final = [quad_A, quad_B, quad_C, quad_D]
return outlier_mask_final
"""
This function reads the outlier_mask
"""
def filter_outlier_median(quads):
if np.array(quads).ndim ==3:
ndims, nx_quad, ny_quad = quads.shape
elif np.array(quads).ndim ==2:
ndims=1
nx_quad, ny_quad = quads.shape
else:
nx_quad= 1
ndims=1
ny_quad = len(quads)
hist_data = np.reshape(quads,(ndims*nx_quad*ny_quad, 1))
diff = abs(hist_data - np.median(hist_data)) # find the distance to the median
median_diff = np.median(diff) # find the median of this distance
measured_threshold = diff/median_diff if median_diff else 0.
outlier_filtered_data = hist_data[measured_threshold < 6.]
#print(outlier_filtered_data)
return outlier_filtered_data
def perform_bias_subtraction_ave (active_quad, trailing_overclocks):
# sepearate out even and odd detectors for both the active quads and trailing overclocks
# The trailing overclocks are averaged and the average offset is subtracted
# from the active quad. This is done for both, ping and pong
""" Remove offset from active quads. Take care of ping-pong by breaking
Quads and overclocks into even and odd
"""
# sepearate out even and odd detectors
nx_quad,ny_quad = active_quad.shape
bias_subtracted_quad = np.array([[0]*ny_quad]*nx_quad)
even_detector_bias = trailing_overclocks[ :, ::2]
# remove outliers
# First 4 hot lines in even and odd
# last odd lne in odd
even_detector_bias = even_detector_bias[:, 4:]
avg_bias_even = np.mean(even_detector_bias, axis=1)
odd_detector_bias = trailing_overclocks[:, 1::2]
odd_samples = odd_detector_bias[:, 4:]
rows, cols = odd_samples.shape
odd_detector_bias = odd_samples[:, 0:cols-1]
avg_bias_odd = np.mean(odd_detector_bias, axis=1)
even_detector_active_quad = active_quad[:, ::2]
odd_detector_active_quad = active_quad[:, 1::2]
bias_subtracted_quad_even = even_detector_active_quad - avg_bias_even[:, None]
bias_subtracted_quad_odd = odd_detector_active_quad - avg_bias_odd[:, None]
bias_subtracted_quad = np.reshape(bias_subtracted_quad, (nx_quad, ny_quad))
bias_subtracted_quad[:, ::2] = bias_subtracted_quad_even
bias_subtracted_quad[:, 1::2] = bias_subtracted_quad_odd
return bias_subtracted_quad
def perform_bias_subtraction_ave_sto (active_quad, trailing_overclocks):
# sepearate out even and odd detectors for both the active quads and trailing overclocks
# The trailing overclocks are averaged and the average offset is subtracted
# from the active quad. This is done for both, ping and pong
""" Remove offset from active quads. Take care of ping-pong by breaking
Quads and overclocks into even and odd
"""
bias_subtracted_quad = np.zeros((1,1024))
even_detector_bias = trailing_overclocks[:, ::2]
even_detector_bias = even_detector_bias[:, 1:]
avg_bias_even = np.mean(even_detector_bias)
#print(np.mean(avg_bias_even))
odd_detector_bias = trailing_overclocks[:, 1::2]
odd_detector_bias = odd_detector_bias[:, 1:10 ]
avg_bias_odd = np.mean(odd_detector_bias)
# plt.plot(np.mean(even_detector_bias, axis=0).T,'.', color='blue')
# plt.plot(np.mean(odd_detector_bias, axis=0).T,'.', color='black')
# plt.show()
# cc
#
#print(np.mean(avg_bias_odd))
even_detector_active_quad = active_quad[::2]
odd_detector_active_quad = active_quad[1::2]
bias_subtracted_quad_even = even_detector_active_quad - avg_bias_even
bias_subtracted_quad_odd = odd_detector_active_quad - avg_bias_odd
bias_subtracted_quad[:, ::2] = np.array(bias_subtracted_quad_even)
bias_subtracted_quad[:, 1::2] = np.array(bias_subtracted_quad_odd)
#print(avg_bias_even, avg_bias_odd, np.mean(bias_subtracted_quad_even), np.mean(bias_subtracted_quad_odd))
return bias_subtracted_quad
def perform_smear_subtraction(active_quad, int_time):
# the underlying assumption in smear subtraction is that the dark current
#in the storage region is really small and hence neglected from the analysis.
#typically, Csmear = tFT / (ti+ tFT) * (AVG[C(w)] - DCStor * tRO
# tft = 8ms
tFT = 8.3333*10**(3)
ti = int_time
smear_factor = (tFT / (ti+ tFT))* np.mean(active_quad, axis=0)
#print(smear_factor.shape)
#cc
smear_subtracted_quad = active_quad - smear_factor[None, :]
return smear_subtracted_quad
def perform_Dark_removal(data_file, i):
# calculate dark current
IDL_variable = readsav(data_file)
all_full_frame = IDL_variable.q
quad_full_frame = all_full_frame[:, i , :, :]
avg_quad = np.mean(quad_full_frame[:, :, :], axis=0)
active_quad = avg_quad[4:1028, 10:1034]
tsoc = avg_quad[4:1028, 1034:1056]
dark_current = perform_bias_subtraction_ave(active_quad, tsoc)
return dark_current
def create_image(image_data, title, figure_name, spot):
plt.figure()
ax = plt.gca()
if spot==2:
image = ax.imshow(image_data[720:860, 720:860], cmap='nipy_spectral', origin='lower')
elif spot==1:
image = ax.imshow(image_data[185:325, 170:310], cmap='nipy_spectral', origin='lower')
plt.title(title)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(image, cax= cax)
plt.grid(False)
plt.savefig(figure_name,dpi=95,bbox_inches="tight")
#plt.show()
plt.close('all')
def create_hist(image, title, figure_name, COLOR) :
if np.array(image).ndim ==2:
nx_quad, ny_quad = image.shape
else:
nx_quad= 1
ny_quad = len(image)
#print(ny_quad)
#cc
label = 'Mean = '+ str(round(np.mean(image), 2))
plt.figure(figsize=(8, 5))
plt.hist(np.reshape(image, (nx_quad* ny_quad, 1)),10, facecolor=COLOR, label=label)
plt.grid(True, linestyle=':')
legend = plt.legend(loc='best', ncol=3, shadow=True,
prop={'size':10}, numpoints=1)
legend.get_frame().set_edgecolor('wheat')
legend.get_frame().set_linewidth(2.0)
#plt.xlim(-10, 10)
#plt.ylim(0, 40000)
plt.ylabel('Frequency (# of pixels)', fontsize=12,
fontweight="bold")
plt.xlabel(' Dark current (DN) ', fontsize=12,
fontweight="bold")
plt.title(title)
#plt.savefig(figure_name, dpi=100, bbox_inches="tight")
#plt.show()
plt.close('all')
def plot_row_avg(row_avg, title, figure_name, COLOR, xlabel):
# let's take the mean tsoc for 100 frames
nrows = 1
ncols = 1
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(7,5))
fig.subplots_adjust(left=0.125, right=0.95, bottom=0.1, top=0.9,
wspace=0.3, hspace=.25)
ax.plot(row_avg, '.', color=COLOR)
ax.grid(True, linestyle=':')
ax.set_title(title, fontsize=12, fontweight='bold')
ax.set_ylabel('Signal - Offset (DN)', fontsize=12, fontweight='bold')
ax.set_xlabel(xlabel, fontsize=12, fontweight='bold')
#ax.set_ylim(ylim[0], ylim[1])
#plt.savefig(figure_name, dpi=100, bbox_inches="tight")
plt.show()
plt.close('all')
def main():
"""
Tme main function
"""
#nx_quad = 1056 # For Tempo
#ny_quad = 1046 # For Tempo
#nlat = nx_quad*2
#nspec = ny_quad*2
file_path = r'F:\TEMPO\Data\GroundTest\FPS\Crosstalk'
file_path_dark = r'F:\TEMPO\Data\GroundTest\FPS\Crosstalk'
save_file_path = r'C:\Users\nmishra\Workspace\TEMPO\Cross_Talk_Test'
outlier_mask = read_outlier_mask()
temp_files = os.listdir(file_path)
for files in range(0, 4):
dframe1 = []
dframe2 = []
rows_max_A = [ ]
cols_max_A = [ ]
rows_max_B = [ ]
cols_max_B = [ ]
rows_max_C = [ ]
cols_max_C = [ ]
rows_max_D = [ ]
cols_max_D = [ ]
save_dir = os.path.join(save_file_path, temp_files[files])
if not os.path.exists(save_dir):
os.makedirs(save_dir)
saved_data_files = os.path.join(file_path, temp_files[files],'Script_Data','saved_quads')
saved_dark_files = os.path.join(file_path_dark, temp_files[files],'Script_Data','saved_quads','Dark')
all_int_files = [each for each in os.listdir(saved_data_files) \
if each.endswith('dat.sav')]
# all_dark_files = [each for each in os.listdir(saved_dark_files) \
# if each.endswith('dat.sav')]
for data_files in all_int_files:
data_file = os.path.join(saved_data_files, data_files)
print(data_file)
IDL_variable = readsav(data_file)
data_path_name_split = data_files.split('_')
int_time = int(data_path_name_split[-1].split('.')[0])
quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']
color = ['Blue','Green','Red','Orange']
all_full_frame = IDL_variable.q
ylim1= [0, 16000]
ylim2 = [-6, 6]
for i in range(0, 4):
quad_full_frame = all_full_frame[:, i, :, :]
avg_quad = np.mean(quad_full_frame[:, :, :], axis=0)
active_quad = avg_quad[4:1028, 10:1034]
tsoc = avg_quad[4:1028, 1034:1056]
#------perform bias subtraction using trailing overclocks and save the dark current image----------
#bias_subtracted_quad = perform_bias_subtraction_ave(active_quad, tsoc)
# mask out the outliers
cross_talk_array = avg_quad
nx1, ny1 = cross_talk_array.shape
# let's reshape the array to 1-D so we can work with single loop
#cross_talk_array = np.reshape(cross_talk_array, (nx1*ny1, 1))
if(temp_files[files] in("Channel_A", "Channel_C")) :
if len(data_path_name_split)>9:
spot = 2
input_signal = (data_path_name_split[-5])
quad_illuminated = data_path_name_split[-6]
else:
spot = 1
input_signal = (data_path_name_split[-4])
quad_illuminated = data_path_name_split[-5]
if spot==1:
dark_data_file= os.path.join(saved_dark_files, all_dark_files[0])
elif spot==2:
dark_data_file= os.path.join(saved_dark_files, all_dark_files[1])
# subtract off the dark current
cross_talk_array = cross_talk_array
row_average = np.mean(cross_talk_array, axis=1)
column_average = np.mean(cross_talk_array, axis=0)
string1 = quad_illuminated[0:4]+' '+quad_illuminated[4]+ ' Illuminated'
string2 = 'Input Signal = '+ input_signal
string3 = 'spot'+ str(spot)
title1 = quads[i]+' Image\n ' + string1+' @'+string3+', '+string2
title2 = quads[i]+' Row Average Profile \n ' +'('+ string1+' @'+string3+', '+string2 +')'
title3 = quads[i]+' Column Average Profile \n ' +'('+ string1+' @'+string3+', '+string2 +')'
title4 = quads[i]+' Image Profile \n ' +'('+ string1+' @'+string3+', '+string2 +')'
elif(temp_files[files] in("Channel_D")) :
if len(data_path_name_split)>9:
spot = 2
input_signal = (data_path_name_split[-5])
quad_illuminated = data_path_name_split[-6]
else:
spot = 1
quad_illuminated = data_path_name_split[-4]
input_signal ='Not Given'
string1 = quad_illuminated[0:4]+' '+quad_illuminated[4]+ ' Illuminated'
string2 = 'Input Signal = '+ input_signal
string3 = 'spot'+ str(spot)
print(string1)
print(string2)
print(string3)
title1 = quads[i]+' Image\n ' + '('+ string1+' @'+string3+', '+string2+')'
title2 = quads[i]+' Row Average Profile \n ' +'('+ string1+' @'+string3+', '+string2 +')'
title3 = quads[i]+' Column Average Profile \n ' +'('+ string1+' @'+string3+', '+string2 +')'
title4 = quads[i]+' ImageProfile \n ' +'('+ string1+' @'+string3+', '+string2 +')'
else:
if len(data_path_name_split)>8:
spot = 2
quad_illuminated = data_path_name_split[-5]
else:
spot=1
quad_illuminated = data_path_name_split[-4]
string1 = quad_illuminated[0:4]+' '+quad_illuminated[4]+ ' Illuminated'
string3 = 'Spot'+ str(spot)
title1 = quads[i]+' Image\n ' + string1+' @'+string3
title2 = quads[i]+' Row Average Profile \n ' + string1+' @'+string3
title3 = quads[i]+' Column Average Profile \n ' + string1+' @'+string3
title4 = quads[i]+' Image Profile \n ' + string1+' @'+string3
if quad_illuminated.lower() == quads[i].replace(" ","").lower():
ylim = ylim1
#print(ylim)
else:
smear_subtracted_quad[(smear_subtracted_quad>1500)] = np.mean(smear_subtracted_quad)
ylim = ylim2
if spot == 1:
#rows, cols = np.reshape()
if i == 0:
rows_max_A.append(cross_talk_array[185:325, 170:310])
elif i == 1:
rows_max_B.append(cross_talk_array[185:325, 170:310])
elif i == 2:
rows_max_C.append(cross_talk_array[185:325, 170:310])
elif i == 3:
rows_max_D.append(cross_talk_array[185:325, 170:310])
elif spot==2:
if i==0:
rows_max_A.append(cross_talk_array[720:860, 720:860])
elif i==1:
rows_max_B.append(cross_talk_array[720:860, 720:860])
elif i==2:
rows_max_C.append(cross_talk_array[720:860, 720:860])
elif i==3:
rows_max_D.append(cross_talk_array[720:860, 720:860])
quad_save = 'Cross_Talk_Image_Ghost'
save_dir_image = os.path.join(save_dir, quads[i], quad_save)
if not os.path.exists(save_dir_image):
os.makedirs(save_dir_image)
figure_name = save_dir_image + '/'+ data_files + '.png'
create_image(cross_talk_array, title1, figure_name, spot)
#save_plot = 'plot_row_average'
save_plot = 'plot_all_data'
save_dir_plot = os.path.join(save_dir, quads[i], save_plot)
if not os.path.exists(save_dir_plot):
os.makedirs(save_dir_plot)
figure_name = save_dir_plot + '/'+ data_files + '.png'
xlabel = 'Pixel Indices (#)'
plot_row_avg(cross_talk_array, title4, figure_name, color[i], xlabel)
save_plot = 'plot_column_average'
save_dir_plot = os.path.join(save_dir, quads[i], save_plot)
if not os.path.exists(save_dir_plot):
os.makedirs(save_dir_plot)
figure_name = save_dir_plot + '/'+ data_files + '.png'
xlabel = 'Spatial Pixel Indices (#)'
#plot_row_avg(column_average, title3, figure_name, color[i], xlabel)
save_plot = 'plot_row_average'
save_dir_plot = os.path.join(save_dir, quads[i], save_plot)
if not os.path.exists(save_dir_plot):
os.makedirs(save_dir_plot)
figure_name = save_dir_plot + '/'+ data_files + '.png'
xlabel = 'Spectral Pixel Indices (#)'
#plot_row_avg(row_average, title2, figure_name, color[i], xlabel)
#cc
# dframe1 = pd.DataFrame(
# {'Quad_A_rows' : rows_max_A,
# 'Quad_B_rows' : rows_max_B,
# 'Quad_C_rows' : rows_max_C,
# 'Quad_D_rows': rows_max_D,
# })
# dframe2 = pd.DataFrame(
# {'Quad_A_cols' : cols_max_A,
# 'Quad_B_cols' : cols_max_B,
# 'Quad_C_cols' : cols_max_C,
# 'Quad_D_cols': cols_max_D,
# })
ndims, row_s,col_s = np.array(rows_max_A).shape
rows_max_A = np.reshape(np.array(rows_max_A), (ndims*row_s*col_s, 1))
rows_max_B = np.reshape(np.array(rows_max_B), (ndims* row_s*col_s,1 ))
rows_max_C = np.reshape(np.array(rows_max_C), (ndims*row_s*col_s, 1))
rows_max_D = np.reshape(np.array(rows_max_D), (ndims*row_s*col_s, 1))
csv_name_A = save_dir+'/'+temp_files[files]+'_cross_talk_A.csv'
csv_name_B = save_dir+'/'+temp_files[files]+'_cross_talk_B.csv'
csv_name_C = save_dir+'/'+temp_files[files]+'_cross_talk_C.csv'
csv_name_D = save_dir+'/'+temp_files[files]+'_cross_talk_D.csv'
np.savetxt(csv_name_A, np.asarray(rows_max_A), delimiter=',', fmt='%1.2f')
np.savetxt(csv_name_B, np.asarray(rows_max_B), delimiter=',', fmt='%1.2f')
np.savetxt(csv_name_C, np.asarray(rows_max_C), delimiter=',', fmt='%1.2f')
np.savetxt(csv_name_D, np.asarray(rows_max_D), delimiter=',', fmt='%1.2f')
#csv_name_cols = save_dir+'/'+temp_files[files]+'_cols_mean.csv'
#dframe1.to_csv(csv_name_rows, header=True, columns=['Quad_A_rows','Quad_B_rows','Quad_C_rows','Quad_D_rows'])
#dframe2.to_csv(csv_name_cols, header=True, columns=['Quad_A_cols','Quad_B_cols','Quad_C_cols','Quad_D_cols'])
#cc
if __name__ == "__main__":
main()
| [
"nischal.mishra@gmail.com"
] | nischal.mishra@gmail.com |
66da12d1a46b2363ec995ed6684e46458403d42d | f027db78eb8e44c7d5994531157e910dabe37a6c | /Deep Learning/Unsupervised Learning/Self Organizing Maps (SOM)/Hybrid Supervised-Unsupervised Fraud Detection Model/mega_case_study_improved.py | 484cb9e198bfc6e49dbfc918085cf98a404f247e | [] | no_license | jamesawgodwin/PythonMachineLearningTemplates | e6d8ac678ca483a49a2e73dc079faad9bba25147 | 320f76dabbd988f1c56774e89b5baf9b2767d10d | refs/heads/master | 2021-08-08T17:44:35.442494 | 2020-04-19T04:03:51 | 2020-04-19T04:03:51 | 158,993,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,304 | py | # Mega Case Study - Make a Hybrid Deep Learning Model
# Part 1 - Identify the Frauds with the Self-Organizing Map
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Credit_Card_Applications.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
X = sc.fit_transform(X)
# Training the SOM
from minisom import MiniSom
som = MiniSom(x = 10, y = 10, input_len = 15, sigma = 1.0, learning_rate = 0.5)
som.random_weights_init(X)
som.train_random(data = X, num_iteration = 100)
# Visualizing the results
from pylab import bone, pcolor, colorbar, plot, show
bone()
pcolor(som.distance_map().T)
colorbar()
markers = ['o', 's']
colors = ['r', 'g']
for i, x in enumerate(X):
w = som.winner(x)
plot(w[0] + 0.5,
w[1] + 0.5,
markers[y[i]],
markeredgecolor = colors[y[i]],
markerfacecolor = 'None',
markersize = 10,
markeredgewidth = 2)
show()
# Finding the frauds
rows = 10 # dimensions of SOM
cols = 10
# Add indices to SOM values & sort by value
helper = np.concatenate(
(som.distance_map().reshape(rows*cols, 1), # the SOM map values
np.arange(rows*cols).reshape(rows*cols, 1)), # concatenated with an index
axis=1) # as a 2D matrix with 2 columns of data
helper = helper[helper[:, 0].argsort()][::-1] # sort by first column (map values) and reverse (so top values are first)
# First choose how many cells to take as outliers
use_threshold = True # toggle usage for calculating indices (pick cells that exceed threshold or use hardcoded number of cells)
top_cells = 4 # 4 out of 100 seems a valid idea, but ideally it might be chosen after inspecting the current SOM plot
threshold = 0.8 # Use threshold to select top cells
# Take indices that correspond to cells we're interested in
idx = helper[helper[:, 0] > threshold, 1] if use_threshold else helper[:top_cells, 1]
# Find the data entries assigned to cells corresponding to the selected indices
result_map = []
mappings = som.win_map(X)
for i in range(rows):
for j in range(cols):
if (i*rows+j) in idx:
if len(result_map) == 0:
result_map = mappings[(i,j)]
else:
# Sometimes a cell contains no observations (customers)... weird
# This will cause numpy to raise an exception so guard against that!
if len(mappings[(i,j)]) > 0:
result_map = np.concatenate((result_map, mappings[(i,j)]), axis=0)
# finally we get our fraudster candidates
frauds = sc.inverse_transform(result_map)
# This is the list of potential cheaters (customer ids)
#print(frauds[:, 0])
# Part 2 - Going from Unsupervised to Supervised Deep Learning
# Creating the matrix of features
customers = dataset.iloc[:, 1:].values
# Creating the dependent variable
is_fraud = np.zeros(len(dataset))
for i in range(len(dataset)):
if dataset.iloc[i,0] in frauds:
is_fraud[i] = 1
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
customers = sc.fit_transform(customers)
# Part 2 - Create the ANN!
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 2, kernel_initializer = 'uniform', activation = 'relu', input_dim = 15))
# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(customers, is_fraud, batch_size = 1, epochs = 2)
# Predicting the probabilities of frauds
y_pred = classifier.predict(customers)
y_pred = np.concatenate((dataset.iloc[:, 0:1].values, y_pred), axis = 1)
# sort np array by index 1 (probability of being a cheater)
y_pred = y_pred[y_pred[:, 1].argsort()] | [
"james.a.w.godwin@gmail.com"
] | james.a.w.godwin@gmail.com |
a58b76fab4d8fa60abf11ac71cab242a7beccad6 | c5a1c95e9d8ce937f71caf8340cf11fe98e64f56 | /day9/problem5/[이재형] 하샤드 수.py | ff36e229a9d9bb46a7cac28263c8e782cc36fcf6 | [] | no_license | Boot-Camp-Coding-Test/Programmers | 963e5ceeaa331d99fbc7465f7b129bd68e96eae3 | 83a4b62ba2268a47859a6ce88ae1819bc96dcd85 | refs/heads/main | 2023-05-23T08:21:57.398594 | 2021-06-12T16:39:21 | 2021-06-12T16:39:21 | 366,589,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | def solution(x):
a = []
for i in range(len(str(x))):
a.append(int(str(x)[i]))
if x % sum(a) == 0:
return True
else :
return False
| [
"noreply@github.com"
] | Boot-Camp-Coding-Test.noreply@github.com |
1e1a220013ea65a97547f55b52bf0e6e8ba7ee32 | 4b742f57981b3db902e7048fe05faf263ff52138 | /base/migrations/0010_pgpkey_passphrase.py | 174c1f9c2f96097e66f55808d6348a2d55d10933 | [
"MIT"
] | permissive | erichuang2015/Hiren-MailBox | eace0c90b5815f3e4a660dfda75910256704db96 | ff4cad0998007e8c9a2a200af3a2e05a3d947d12 | refs/heads/master | 2020-04-02T01:31:55.680288 | 2018-09-13T15:21:46 | 2018-09-13T15:21:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # Generated by Django 2.0.4 on 2018-05-22 04:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0009_auto_20180504_0501'),
]
operations = [
migrations.AddField(
model_name='pgpkey',
name='passphrase',
field=models.TextField(default=''),
preserve_default=False,
),
]
| [
"git.pyprism@gmail.com"
] | git.pyprism@gmail.com |
dc0cd9bf59857e086726029d8ff6c6aa608de145 | 51464779b985f9e4e67279eea4bf022fa8b687e6 | /data/tools.py | e8067a800b1ebca972f374a637592ca7df76b585 | [
"CC0-1.0",
"WTFPL"
] | permissive | xinmingzhang/pyweek22 | 2b1ef5b11cfbf5c14d5b3d4ee0315d7a86eb98b0 | 07ef583507d5971b7e55dcd66cee22dc6f36f134 | refs/heads/master | 2020-04-17T18:43:29.846741 | 2016-09-10T07:55:00 | 2016-09-10T07:55:00 | 67,858,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,906 | py | """
This module contains the fundamental Control class and a prototype class
for States. Also contained here are resource loading functions.
"""
import os
import copy
import pygame as pg
class Control(object):
"""Control class for entire project. Contains the game loop, and contains
the event_loop which passes events to States as needed. Logic for flipping
states is also found here."""
def __init__(self, caption, icon = None):
self.screen = pg.display.get_surface()
self.caption = caption
self.done = False
self.clock = pg.time.Clock()
self.fps = 60.
self.show_fps = False
self.current_time = 0.0
self.keys = pg.key.get_pressed()
self.state_dict = {}
self.state_name = None
self.state = None
self.fullscreen = False
self.icon = icon
if self.icon != None:
pg.display.set_icon(self.icon)
def setup_states(self, state_dict, start_state):
"""Given a dictionary of States and a State to start in,
builds the self.state_dict."""
self.state_dict = state_dict
self.state_name = start_state
self.state = self.state_dict[self.state_name]
def update(self, dt):
"""Checks if a state is done or has called for a game quit.
State is flipped if neccessary and State.update is called."""
self.current_time = pg.time.get_ticks()
if self.state.quit:
pg.mouse.set_visible(True)
self.done = True
elif self.state.done:
self.flip_state()
self.state.update(dt)
self.state.draw(self.screen)
def flip_state(self):
"""When a State changes to done necessary startup and cleanup functions
are called and the current State is changed."""
previous, self.state_name = self.state_name, self.state.next
persist = self.state.cleanup()
self.state = self.state_dict[self.state_name]
self.state.startup(persist)
self.state.previous = previous
def event_loop(self):
"""Process all events and pass them down to current State. The f5 key
globally turns on/off the display of FPS in the caption"""
for event in pg.event.get():
if event.type == pg.QUIT:
self.done = True
elif event.type == pg.KEYDOWN:
self.keys = pg.key.get_pressed()
self.toggle_show_fps(event.key)
elif event.type == pg.KEYUP:
self.keys = pg.key.get_pressed()
self.toggle_fullscreen(event.key)
self.state.get_event(event)
def toggle_show_fps(self, key):
"""Press f5 to turn on/off displaying the framerate in the caption."""
if key == pg.K_F5:
self.show_fps = not self.show_fps
if not self.show_fps:
pg.display.set_caption(self.caption)
def toggle_fullscreen(self, key):
if key == pg.K_F1:
screen_size = pg.display.get_surface().get_size()
self.fullscreen = not self.fullscreen
if self.fullscreen:
self.screen = pg.display.set_mode(screen_size, pg.FULLSCREEN)
else:
self.screen = pg.display.set_mode(screen_size)
def main(self):
"""Main loop for entire program."""
while not self.done:
time_delta = self.clock.tick(self.fps)
self.event_loop()
self.update(time_delta)
pg.display.update()
if self.show_fps:
fps = self.clock.get_fps()
with_fps = "{} - {:.2f} FPS".format(self.caption, fps)
pg.display.set_caption(with_fps)
class _State(object):
"""This is a prototype class for States. All states should inherit from it.
No direct instances of this class should be created. get_event and update
must be overloaded in the childclass. startup and cleanup need to be
overloaded when there is data that must persist between States."""
def __init__(self):
self.start_time = 0.0
self.current_time = 0.0
self.done = False
self.quit = False
self.next = None
self.previous = None
self.persist = {}
def get_event(self, event):
"""Processes events that were passed from the main event loop.
Must be overloaded in children."""
pass
def startup(self, current_time, persistent):
"""Add variables passed in persistent to the proper attributes and
set the start time of the State to the current time."""
self.persist = persistent
self.start_time = current_time
def cleanup(self):
"""Add variables that should persist to the self.persist dictionary.
Then reset State.done to False."""
self.done = False
return self.persist
def update(self, surface, keys, current_time):
"""Update function for state. Must be overloaded in children."""
pass
def render_font(self, font, msg, color, center):
"""Returns the rendered font surface and its rect centered on center."""
msg = font.render(msg, 1, color)
rect = msg.get_rect(center=center)
return msg, rect
class _KwargMixin(object):
"""
Useful for classes that require a lot of keyword arguments for
customization.
"""
def process_kwargs(self, name, defaults, kwargs):
"""
Arguments are a name string (displayed in case of invalid keyword);
a dictionary of default values for all valid keywords;
and the kwarg dict.
"""
settings = copy.deepcopy(defaults)
for kwarg in kwargs:
if kwarg in settings:
if isinstance(kwargs[kwarg], dict):
settings[kwarg].update(kwargs[kwarg])
else:
settings[kwarg] = kwargs[kwarg]
else:
message = "{} has no keyword: {}"
raise AttributeError(message.format(name, kwarg))
for setting in settings:
setattr(self, setting, settings[setting])
### Resource loading functions.
def load_all_gfx(directory, colorkey=(0, 0, 0), accept=(".png", ".jpg", ".bmp")):
"""Load all graphics with extensions in the accept argument. If alpha
transparency is found in the image the image will be converted using
convert_alpha(). If no alpha transparency is detected image will be
converted using convert() and colorkey will be set to colorkey."""
graphics = {}
for pic in os.listdir(directory):
name, ext = os.path.splitext(pic)
if ext.lower() in accept:
img = pg.image.load(os.path.join(directory, pic))
if img.get_alpha():
img = img.convert_alpha()
else:
img = img.convert()
img.set_colorkey(colorkey)
graphics[name] = img
return graphics
def load_all_music(directory, accept=(".wav", ".mp3", ".ogg", ".mdi")):
"""Create a dictionary of paths to music files in given directory
if their extensions are in accept."""
songs = {}
for song in os.listdir(directory):
name, ext = os.path.splitext(song)
if ext.lower() in accept:
songs[name] = os.path.join(directory, song)
return songs
def load_all_fonts(directory, accept=(".ttf",)):
"""Create a dictionary of paths to font files in given directory
if their extensions are in accept."""
return load_all_music(directory, accept)
def load_all_maps(directory, accept=(".tmx",)):
"""Create a dictionary of paths to map files in given directory
if their extensions are in accept."""
return load_all_music(directory, accept)
def load_all_movies(directory, accept=(".mpg",)):
"""Create a dictionary of paths to movie files in given directory
if their extensions are in accept."""
return load_all_music(directory, accept)
def load_all_sfx(directory, accept=(".wav", ".mp3", ".ogg", ".mdi")):
"""Load all sfx of extensions found in accept. Unfortunately it is
common to need to set sfx volume on a one-by-one basis. This must be done
manually if necessary in the setup module."""
effects = {}
for fx in os.listdir(directory):
name, ext = os.path.splitext(fx)
if ext.lower() in accept:
effects[name] = pg.mixer.Sound(os.path.join(directory, fx))
return effects
def strip_from_sheet(sheet, start, size, columns, rows=1):
"""Strips individual frames from a sprite sheet given a start location,
sprite size, and number of columns and rows."""
frames = []
for j in range(rows):
for i in range(columns):
location = (start[0] + size[0] * i, start[1] + size[1] * j)
frames.append(sheet.subsurface(pg.Rect(location, size)))
return frames
def strip_coords_from_sheet(sheet, coords, size):
"""Strip specific coordinates from a sprite sheet."""
frames = []
for coord in coords:
location = (coord[0] * size[0], coord[1] * size[1])
frames.append(sheet.subsurface(pg.Rect(location, size)))
return frames
def get_cell_coordinates(rect, point, size):
"""Find the cell of size, within rect, that point occupies."""
cell = [None, None]
point = (point[0] - rect.x, point[1] - rect.y)
cell[0] = (point[0] // size[0]) * size[0]
cell[1] = (point[1] // size[1]) * size[1]
return tuple(cell)
def cursor_from_image(image):
"""Take a valid image and create a mouse cursor."""
colors = {(0, 0, 0, 255): "X",
(255, 255, 255, 255): "."}
rect = image.get_rect()
icon_string = []
for j in range(rect.height):
this_row = []
for i in range(rect.width):
pixel = tuple(image.get_at((i, j)))
this_row.append(colors.get(pixel, " "))
icon_string.append("".join(this_row))
return icon_string
def color_swap(source_image, swap_map):
"""
Creates a new Surface from the source_image with some or all colors
swapped for new colors. Colors are swapped according to the
color pairs in the swap_map dict. The keys and values in swap_map
can be RGB tuples or pygame color-names. For each key in swap_map,
all pixels of that color will be replaced by the color that key maps to.
For example, passing this dict:
{(0,255,0): (255, 0, 255),
"black": (255, 0, 0),
"yellow": "green"}
would result in green pixels recolored purple, black pixels recolored
red and yellow pixels recolored green.
NOTE: This will not work if Pygame's video mode has not been set
(i.e., you need to call pygame.display.set_mode beforehand).
"""
img = source_image
size = img.get_size()
surf = pg.Surface(size)
color_surf = pg.Surface(size)
final = img.copy()
for original_color, new_color in swap_map.items():
if isinstance(original_color, str):
original = pg.Color(original_color)
else:
original = original_color
if isinstance(new_color, str):
recolor = pg.Color(new_color)
else:
recolor = new_color
color_surf.fill(original)
surf.set_colorkey(original)
pg.transform.threshold(surf, img, original, (0, 0, 0, 0),
recolor, 1, color_surf, True)
final.blit(surf, (0, 0))
return final
def lerp(color_1, color_2, lerp_val):
"""
Return a new color that is a linear interpolation of the two
argument colors. lerp_val must be between 0 and 1 (inclusive).
"""
if not (0 <= lerp_val <= 1):
raise ValueError("Lerp value must be in the range [0,1] inclusive.")
new = [int(a * (1 - lerp_val) + b * lerp_val) for a, b in zip(color_1, color_2)]
return pg.Color(*new)
| [
"252004372@qq.com"
] | 252004372@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.