blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
648a45ddee93210901fcbaf778e75181ac7c38e8 | fba6fca82bf1edd8e69ef94cede17193b845c48d | /planer.py | f8f6b7202193cf0d4658884a66e5d68ecea28ca3 | [] | no_license | sweettpickle/planer | 9da0cc9b34e62563979f2757b733a1612b749918 | cd505e6aebc5e3dcfdc66fabadaaaa4d75170998 | refs/heads/master | 2022-05-14T17:53:30.819859 | 2020-04-24T14:23:35 | 2020-04-24T14:23:35 | 258,514,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,969 | py | import telebot
from telebot import types
# bot = telebot.TeleBot('%ваш токен%')
token = '1010919676:AAFlETQiiF6PUzGctcTFtNZLzCb12aVJjt4'
bot = telebot.TeleBot(token)
# обработчик сообщений
@bot.message_handler(commands=['start'])
def welcome(message):
# bot.reply_to(message, message.text)
# bot.send_message(message.chat.id, "Привет!")
menu = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=1)
buttom1 = types.KeyboardButton("Список привычек")
buttom2 = types.KeyboardButton("Добавить привычку")
buttom3 = types.KeyboardButton("Удалить привычку")
menu.add(buttom1, buttom2, buttom3)
bot.send_message(message.chat.id, "Выберите действие:", reply_markup=menu)
done = "\u274c"
not_done = "\u2b55\ufe0f"
key = ''
def create_progress(n):
lst = []
for i in range(n):
lst.append(not_done)
return lst
track = {
"Спорт": create_progress(21),
"Чтение 30 минут": create_progress(21)
}
@bot.message_handler(content_types=['text'])
def get_message(message):
if message.text == "Список привычек":
inline = types.InlineKeyboardMarkup(row_width=1)
for key in track.keys():
inline.add(types.InlineKeyboardButton(key, callback_data=key))
bot.send_message(message.chat.id, "Ваш список привычек:", reply_markup=inline)
if message.text == "Добавить привычку":
bot.register_next_step_handler(message, add_tracker)
bot.send_message(message.chat.id, "Введите название:")
if message.text == "Удалить привычку":
bot.register_next_step_handler(message, del_tracker)
bot.send_message(message.chat.id, "Введите название:")
def add_tracker(message):
if message.text in track:
bot.send_message(message.chat.id, "Привычка с таким названием уже есть")
else:
global key
key = message.text
bot.register_next_step_handler(message, add_tracker2)
bot.send_message(message.chat.id, "Введите количество дней:")
def add_tracker2(message):
track[key] = create_progress(int(message.text))
bot.send_message(message.chat.id, "Привычка добавлена")
def del_tracker(message):
if message.text in track:
track.pop(message.text)
bot.send_message(message.chat.id, "Привычка удалена")
else:
bot.send_message(message.chat.id, "Такой привычки нет")
@bot.callback_query_handler(func=lambda call: True)
def callback_inline(call):
if call.data in track:
global key
key = call.data
inline = types.InlineKeyboardMarkup(row_width=1)
but = types.InlineKeyboardButton(''.join(track[key]), callback_data="check")
inline.add(but)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,
text=key, reply_markup=inline)
elif call.data == "check":
check(key)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,
text=key, reply_markup=None)
inline = types.InlineKeyboardMarkup(row_width=1)
but = types.InlineKeyboardButton(''.join(track[key]), callback_data="check")
inline.add(but)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,
text=key, reply_markup=inline)
bot.answer_callback_query(call.id, text="Отмечено")
def check(key):
# def check(key, id):
lst = track.get(key)
# lst = users[id].get(key)
for i in range(len(lst)):
if lst[i] == not_done:
lst[i] = done
break
track[key] = lst
bot.polling(none_stop=True)
| [
"noreply@github.com"
] | sweettpickle.noreply@github.com |
51372281f96d983816766c266f8a2e1c3e0a83f6 | 57206f40094fc5b0c8ff41ed8a4cec34bfd04ab5 | /Vehicle detection/speedCal.py | 31c276a7b812c787afb80524f9458de293bb93d1 | [] | no_license | wuhaostudy1992/ECPS210 | 4a7b0b8e580b07997134fad4e6cd30d6e057acdd | d68493cbba25f800e9ec57619485b6231e8724bf | refs/heads/master | 2020-03-09T09:53:55.633723 | 2018-05-11T04:28:17 | 2018-05-11T04:28:17 | 128,723,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | # coding=utf-8
import numpy as np
import cv2
cap = cv2.VideoCapture("high.flv")
# Shi-Tomasi
feature_params = dict(maxCorners=10, qualityLevel=0.1, minDistance=1, blockSize=9)
# LK
lk_params = dict(winSize=(30, 30), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# create random color
color = np.random.randint(0, 255, (100, 3))
# get the first frame and turn to gery
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
# ST
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
mask = np.zeros_like(old_frame)
while 1:
ret, frame = cap.read()
if frame is None:
cv2.waitKey(0)
break
else:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 计算光流
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# 选择好的特征点
if p1 is None:
pass
elif p0 is None:
pass
else:
good_new = p1[st == 1]
good_old = p0[st == 1]
# 输出每一帧内特征点的坐标
# 坐标个数为之前指定的个数
#print(good_new)
# 绘制轨迹
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# 更新上一帧以及特征点
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
cv2.destroyAllWindows()
cap.release()
| [
"wuhaostudy1992@gmail.com"
] | wuhaostudy1992@gmail.com |
d8d13467a7799672f45585a90fa8be260b9dd1a7 | 028e16089ebbbd81adf41789a8da3ba3e25323d0 | /문자열 마음데로 정렬하기.py | f4bbb55c432e09c2ad24557bce06c0be8279bb49 | [] | no_license | FlowerLSH/Study1 | 18b840a1d3345e39f759661c88aa3fbc63a649f0 | bedc273d77ad7eb9fd214967dadcf9e49f21f2c8 | refs/heads/master | 2023-01-31T13:32:34.374341 | 2020-12-16T15:03:33 | 2020-12-16T15:03:33 | 278,109,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | def solution(strings, n):
answer = []
index = dict()
arr1 = []
for i in strings:
index[i] = i[n]
arr1.append(i[n])
arr = list(set(arr1))
arr.sort()
for j in arr:
emp = []
for k in index:
if index[k] == j: emp.append(k)
emp.sort()
answer.extend(emp)
return answer
def solution(strings, n):
answer = []
for i in range(len(strings)):
strings[i] = strings[i][n] + strings[i]
strings.sort()
for j in strings:
answer.append(j[1:])
return answer
| [
"noreply@github.com"
] | FlowerLSH.noreply@github.com |
4e42e313b4e8f4517cca59865a67badc6b525b39 | 200df6cda6e54d56a4c800e10e6d5f248d7d59f2 | /02-算法思想/广度优先搜索/778.水位上升的泳池中游泳(H).py | 0d5613ed1b63a75e2a20984da04124b0b0f7e70b | [] | no_license | jh-lau/leetcode_in_python | b9b9a47d0b3ce29c3c56836b39decc3ec4487777 | 1d1876620a55ff88af7bc390cf1a4fd4350d8d16 | refs/heads/master | 2023-04-17T15:01:49.925774 | 2021-04-24T01:17:39 | 2021-04-24T01:17:39 | 192,735,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,600 | py | """
@Author : liujianhan
@Date : 20/9/26 19:31
@Project : leetcode_in_python
@FileName : 778.水位上升的泳池中游泳(H).py
@Description : 在一个 N x N 的坐标方格 grid 中,每一个方格的值 grid[i][j] 表示在位置 (i,j) 的平台高度。
现在开始下雨了。当时间为 t 时,此时雨水导致水池中任意位置的水位为 t 。你可以从一个平台游向四周相邻的任意一个平台,
但是前提是此时水位必须同时淹没这两个平台。假定你可以瞬间移动无限距离,也就是默认在方格内部游动是不耗时的。
当然,在你游泳的时候你必须待在坐标方格里面。
你从坐标方格的左上平台 (0,0) 出发。最少耗时多久你才能到达坐标方格的右下平台 (N-1, N-1)?
示例 1:
输入: [[0,2],[1,3]]
输出: 3
解释:
时间为0时,你位于坐标方格的位置为 (0, 0)。
此时你不能游向任意方向,因为四个相邻方向平台的高度都大于当前时间为 0 时的水位。
等时间到达 3 时,你才可以游向平台 (1, 1). 因为此时的水位是 3,坐标方格中的平台没有比水位 3 更高的,所以你可以游向坐标方格中的任意位置
示例2:
输入: [[0,1,2,3,4],[24,23,22,21,5],[12,13,14,15,16],[11,17,18,19,20],[10,9,8,7,6]]
输出: 16
解释:
0 1 2 3 4
24 23 22 21 5
12 13 14 15 16
11 17 18 19 20
10 9 8 7 6
最终的路线用加粗进行了标记。
我们必须等到时间为 16,此时才能保证平台 (0, 0) 和 (4, 4) 是连通的
提示:
2 <= N <= 50.
grid[i][j] 位于区间 [0, ..., N*N - 1] 内。
"""
import bisect
import sys
from typing import List
class Solution:
# 228ms, 14MB
@staticmethod
def swim_in_water(grid: List[List[int]]) -> int:
"""
并查集
@param grid:
@return:
"""
n = len(grid)
p = [[(i, j) for j in range(n)] for i in range(n)] # 并查集二维数组初始化
h = sorted([[grid[i][j], i, j] for j in range(n) for i in range(n)]) # 按高度对点排序
def f(a, b):
if (a, b) != p[a][b]:
p[a][b] = f(*p[a][b]) # 二元并查集,元组传参要用*解包
return p[a][b]
k = 0
for t in range(max(grid[0][0], grid[-1][-1]), h[-1][0]): # 起点是两个对角的最大值,终点是整个数据里的最大高度
while h[k][0] <= t:
_, i, j = h[k]
for x, y in [(i + 1, j), (i, j + 1), (i - 1, j), (i, j - 1)]:
if 0 <= x < n and 0 <= y < n:
if grid[i][j] <= t and grid[x][y] <= t:
(pi, pj), (px, py) = f(i, j), f(x, y)
if (pi, pj) != (px, py): # 让符合时间空间条件且不相同的集合合并
p[px][py] = (pi, pj)
k += 1
if f(0, 0) == f(n - 1, n - 1): # 首末元素属于同一个集合就返回答案
return t
return h[-1][0]
# 172ms,, 13.8MB
@staticmethod
def swim_in_water_v2(grid: List[List[int]]) -> int:
"""
BFS
@param grid:
@return:
"""
n = len(grid)
c = {(0, 0)} # 访问标记
for t in range(max(grid[0][0], grid[-1][-1]), sys.maxsize): # 从首末元素的最大时间作为最开始的判断条件
p = c.copy() # 宽搜队列初始化,每个时间点的初始状态是上一轮时间访问标记过的坐标
while p:
q = set() # 下一批宽搜队列
for i, j in p:
if i == j == n - 1: # 如果走到目标了就返回时间
return t
for x, y in [(i + 1, j), (i, j + 1), (i - 1, j), (i, j - 1)]:
if 0 <= x < n and 0 <= y < n and grid[x][y] <= t and (x, y) not in c: # 符合时空条件就扩散地图
q |= {(x, y)}
c |= {(x, y)}
p = q
# 128ms, 13.8MB
@staticmethod
def swim_in_water_v3(grid: List[List[int]]) -> int:
"""
升序队列
@param grid:
@return:
"""
n = len(grid)
b = {(0, 0)} # 访问标记
p = [[grid[0][0], 0, 0]] # 升序队列初始化
t = 0 # 途径最大时间标记
while True:
h, i, j = p.pop(0)
t = max(t, h)
if i == j == n - 1: # 找到终点就就返回时间
return t
for x, y in [(i + 1, j), (i, j + 1), (i - 1, j), (i, j - 1)]:
if 0 <= x < n and 0 <= y < n and (x, y) not in b:
bisect.insort(p, [grid[x][y], x, y]) # 二分插入
b |= {(x, y)}
# 140ms, 13.7MB
@staticmethod
def swim_in_water_v4(grid: List[List[int]]) -> int:
"""
双向升序队列
@param grid:
@return:
"""
n = len(grid)
b, e = {(0, 0)}, {(n - 1, n - 1)} # 双向访问标记
p, q = [[grid[0][0], 0, 0]], [[grid[-1][-1], n - 1, n - 1]] # 双向升序队列初始化
t = 0 # 途径最大时间标记
while True:
h, i, j = p.pop(0)
t = max(t, h)
if (i, j) in e: # 如果找到的点已经存在于另一个队列里,就返回答案
return t
for x, y in [(i + 1, j), (i, j + 1), (i - 1, j), (i, j - 1)]:
if 0 <= x < n and 0 <= y < n and (x, y) not in b:
bisect.insort(p, [grid[x][y], x, y])
b |= {(x, y)}
h, i, j = q.pop(0) # 从这里开始都是对称的,调换p,q,b,e就行。
t = max(t, h)
if (i, j) in b:
return t
for x, y in [(i + 1, j), (i, j + 1), (i - 1, j), (i, j - 1)]:
if 0 <= x < n and 0 <= y < n and (x, y) not in e:
bisect.insort(q, [grid[x][y], x, y])
e |= {(x, y)}
if __name__ == '__main__':
test_cases = [
[[0, 2], [1, 3]],
[[0, 1, 2, 3, 4], [24, 23, 22, 21, 5], [12, 13, 14, 15, 16], [11, 17, 18, 19, 20], [10, 9, 8, 7, 6]],
]
for tc in test_cases:
print(Solution.swim_in_water(tc))
print(Solution.swim_in_water_v2(tc))
print(Solution.swim_in_water_v3(tc))
print(Solution.swim_in_water_v4(tc))
| [
"lorgerd@163.com"
] | lorgerd@163.com |
8f6544b242c2b325c60dfe4ba718e842a1bd5da5 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-2/dn4 - krajevne funkcije/M-17221-2470.py | 10d80965dddb932bac3c85dd6f23dabfac539c8c | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,073 | py | # Tu pišite svoje funkcije:
from math import *
def koordinate(ime, kraji):
for kraj in kraji:
if(kraj[0] == ime):
return (kraj[1], kraj[2])
return None
def razdalja_koordinat(x1, y1, x2, y2):
return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
def razdalja(ime1, ime2, kraji):
koordinate1 = koordinate(ime1, kraji)
koordinate2 = koordinate(ime2, kraji)
return razdalja_koordinat(koordinate1[0], koordinate1[1], koordinate2[0], koordinate2[1])
def v_dometu(ime, domet, kraji):
seznamKrajev = []
for kraj in kraji:
if(kraj[0] != ime and razdalja(ime, kraj[0], kraji) <= domet):
seznamKrajev.append(kraj[0])
return seznamKrajev
def najbolj_oddaljeni(ime, imena, kraji):
maxLen = -1
returnIme = None
for imeL in imena:
if(razdalja(ime, imeL, kraji) > maxLen):
maxLen = razdalja(ime, imeL, kraji)
returnIme = imeL
return returnIme
def zalijemo(ime, domet, kraji):
return najbolj_oddaljeni(ime, v_dometu(ime, domet, kraji), kraji)
def presek(s1, s2):
presekKraji = []
for kraj1 in s1:
for kraj2 in s2:
if(kraj1 == kraj2):
presekKraji.append(kraj1)
return presekKraji
def skupno_zalivanje(ime1, ime2, domet, kraji):
seznamKrajev = []
for kraj in kraji:
if razdalja(ime1, kraj[0], kraji) < domet and razdalja(ime2, kraj[0], kraji) < domet:
seznamKrajev.append(kraj[0])
return seznamKrajev
import unittest
class TestKraji(unittest.TestCase):
vsi_kraji = [
('Brežice', 68.66, 7.04),
('Lenart', 85.20, 78.75),
('Rateče', -65.04, 70.04),
('Ljutomer', 111.26, 71.82),
('Rogaška Slatina', 71.00, 42.00),
('Ribnica', 7.10, -10.50),
('Dutovlje', -56.80, -6.93),
('Lokve', -57.94, 19.32),
('Vinica', 43.81, -38.43),
('Brtonigla', -71.00, -47.25),
('Kanal', -71.00, 26.25),
('Črnomelj', 39.05, -27.93),
('Trbovlje', 29.61, 35.07),
('Beltinci', 114.81, 80.54),
('Domžale', -2.34, 31.50),
('Hodoš', 120.70, 105.00),
('Škofja Loka', -23.64, 35.07),
('Velike Lašče', 0.00, 0.00),
('Velenje', 33.16, 54.29),
('Šoštanj', 29.61, 57.75),
('Laško', 42.60, 33.29),
('Postojna', -29.54, -5.25),
('Ilirska Bistrica', -27.19, -27.93),
('Radenci', 100.61, 84.00),
('Črna', 15.41, 66.57),
('Radeče', 39.05, 24.57),
('Vitanje', 47.36, 57.75),
('Bled', -37.84, 56.07),
('Tolmin', -63.90, 36.75),
('Miren', -72.14, 7.04),
('Ptuj', 87.61, 61.32),
('Gornja Radgona', 97.06, 89.25),
('Plave', -73.34, 21.00),
('Novo mesto', 37.91, -3.47),
('Bovec', -76.89, 52.50),
('Nova Gorica', -69.79, 12.29),
('Krško', 60.35, 14.07),
('Cerknica', -18.89, -3.47),
('Slovenska Bistrica', 66.31, 57.75),
('Anhovo', -72.14, 22.78),
('Ormož', 107.71, 61.32),
('Škofije', -59.14, -27.93),
('Čepovan', -60.35, 22.78),
('Murska Sobota', 108.91, 87.57),
('Ljubljana', -8.24, 22.78),
('Idrija', -43.74, 17.54),
('Radlje ob Dravi', 41.46, 82.32),
('Žalec', 37.91, 43.79),
('Mojstrana', -49.70, 64.79),
('Log pod Mangartom', -73.34, 59.54),
('Podkoren', -62.69, 70.04),
('Kočevje', 16.61, -21.00),
('Soča', -69.79, 52.50),
('Ajdovščina', -53.25, 5.25),
('Bohinjska Bistrica', -48.49, 47.25),
('Tržič', -22.44, 56.07),
('Piran', -75.69, -31.50),
('Kranj', -20.09, 43.79),
('Kranjska Gora', -60.35, 68.25),
('Izola', -68.59, -31.50),
('Radovljica', -31.95, 54.29),
('Gornji Grad', 13.06, 49.03),
('Šentjur', 54.46, 40.32),
('Koper', -63.90, -29.72),
('Celje', 45.01, 42.00),
('Mislinja', 42.60, 66.57),
('Metlika', 48.56, -19.21),
('Žaga', -81.65, 49.03),
('Komen', -63.90, -1.68),
('Žužemberk', 21.30, 0.00),
('Pesnica', 74.55, 80.54),
('Vrhnika', -23.64, 14.07),
('Dravograd', 28.40, 78.75),
('Kamnik', -1.14, 40.32),
('Jesenice', -40.19, 64.79),
('Kobarid', -74.55, 43.79),
('Portorož', -73.34, -33.18),
('Muta', 37.91, 82.32),
('Sežana', -54.39, -13.96),
('Vipava', -47.29, 1.79),
('Maribor', 72.21, 75.28),
('Slovenj Gradec', 31.95, 71.82),
('Litija', 14.20, 22.78),
('Na Logu', -62.69, 57.75),
('Stara Fužina', -52.04, 47.25),
('Motovun', -56.80, -52.50),
('Pragersko', 73.41, 57.75),
('Most na Soči', -63.90, 33.29),
('Brestanica', 60.35, 15.75),
('Savudrija', -80.44, -34.96),
('Sodražica', 0.00, -6.93),
]
class CountCalls:
def __init__(self, f):
self.f = f
self.call_count = 0
def __call__(self, *args, **kwargs):
self.call_count += 1
return self.f(*args, **kwargs)
@classmethod
def setUpClass(cls):
global koordinate, razdalja_koordinat
try:
koordinate = cls.CountCalls(koordinate)
except:
pass
try:
razdalja_koordinat = cls.CountCalls(razdalja_koordinat)
except:
pass
def test_1_koordinate(self):
kraji = [
('Brežice', 68.66, 7.04),
('Lenart', 85.20, 78.75),
('Rateče', -65.04, 70.04),
('Ljutomer', 111.26, 71.82)
]
self.assertEqual(koordinate("Brežice", kraji), (68.66, 7.04))
self.assertEqual(koordinate("Lenart", kraji), (85.20, 78.75))
self.assertEqual(koordinate("Rateče", kraji), (-65.04, 70.04))
self.assertEqual(koordinate("Ljutomer", kraji), (111.26, 71.82))
self.assertIsNone(koordinate("Ljubljana", kraji))
kraji = [('Brežice', 68.66, 7.04)]
self.assertEqual(koordinate("Brežice", kraji), (68.66, 7.04))
self.assertIsNone(koordinate("Lenart", kraji))
kraji = []
self.assertIsNone(koordinate("Brežice", kraji))
def test_1_range_len(self):
class NoGetItem(list):
def __getitem__(*x):
raise IndexError("Nauči se (pravilno) uporabljati zanko for!")
kraji = NoGetItem([('Brežice', 68.66, 7.04), ('Lenart', 85.20, 78.75),
('Rateče', -65.04, 70.04)])
self.assertEqual(koordinate("Brežice", kraji), (68.66, 7.04))
self.assertEqual(koordinate("Lenart", kraji), (85.20, 78.75))
self.assertEqual(koordinate("Rateče", kraji), (-65.04, 70.04))
self.assertIsNone(koordinate("Ljubljana", kraji))
def test_2_razdalja_koordinat(self):
self.assertEqual(razdalja_koordinat(0, 0, 1, 0), 1)
self.assertEqual(razdalja_koordinat(0, 0, 0, 1), 1)
self.assertEqual(razdalja_koordinat(0, 0, -1, 0), 1)
self.assertEqual(razdalja_koordinat(0, 0, 0, -1), 1)
self.assertEqual(razdalja_koordinat(1, 0, 0, 0), 1)
self.assertEqual(razdalja_koordinat(0, 1, 0, 0), 1)
self.assertEqual(razdalja_koordinat(-1, 0, 0, 0), 1)
self.assertEqual(razdalja_koordinat(0, -1, 0, 0), 1)
self.assertEqual(razdalja_koordinat(1, 2, 4, 6), 5)
self.assertEqual(razdalja_koordinat(1, 2, -2, 6), 5)
self.assertEqual(razdalja_koordinat(1, 2, 4, -2), 5)
self.assertEqual(razdalja_koordinat(1, 2, -2, -2), 5)
from math import sqrt
self.assertAlmostEqual(razdalja_koordinat(1, 2, 0, 1), sqrt(2))
def test_3_razdalja_krajev(self):
kraji = [
('Brežice', 10, 20),
('Lenart', 13, 24),
('Rateče', 17, 20),
('Ljutomer', 8, 36)
]
from math import sqrt
self.assertEqual(razdalja("Brežice", "Lenart", kraji), 5)
self.assertEqual(razdalja("Lenart", "Brežice", kraji), 5)
self.assertEqual(razdalja("Brežice", "Rateče", kraji), 7)
self.assertAlmostEqual(razdalja("Lenart", "Rateče", kraji), sqrt(32))
self.assertEqual(razdalja("Lenart", "Ljutomer", kraji), 13)
koordinate.call_count = razdalja_koordinat.call_count = 0
razdalja("Brežice", "Lenart", kraji)
self.assertEqual(
koordinate.call_count, 2,
"Funkcija `razdalja` mora dvakrat poklicati `koordinate`")
self.assertEqual(
razdalja_koordinat.call_count, 1,
"Funkcija `razdalja` mora enkrat poklicati `razdalja`")
def test_4_v_dometu(self):
kraji = [
('Lenart', 13, 24),
('Brežice', 10, 20), # Lenart <-> Brežice = 5
('Rateče', 17, 20), # Lenart <-> Rateče = 5.66
('Ljutomer', 8, 36) # Lenart <-> Ljutomer = 13
]
self.assertEqual(v_dometu("Lenart", 5, kraji), ["Brežice"])
self.assertEqual(v_dometu("Lenart", 3, kraji), [])
self.assertEqual(set(v_dometu("Lenart", 6, kraji)), {"Brežice", "Rateče"})
kraji = self.vsi_kraji
self.assertEqual(set(v_dometu("Ljubljana", 20, kraji)), {'Vrhnika', 'Domžale', 'Kamnik', 'Škofja Loka'})
def test_5_najbolj_oddaljeni(self):
kraji = [
('Lenart', 13, 24),
('Brežice', 10, 20), # Lenart <-> Brežice = 5
('Rateče', 17, 20), # Lenart <-> Rateče = 5.66
('Ljutomer', 8, 36) # Lenart <-> Ljutomer = 13
]
self.assertEqual(najbolj_oddaljeni("Lenart", ["Brežice", "Rateče"], kraji), "Rateče")
self.assertEqual(najbolj_oddaljeni("Lenart", ["Brežice"], kraji), "Brežice")
kraji = self.vsi_kraji
self.assertEqual(najbolj_oddaljeni("Ljubljana", ["Domžale", "Kranj", "Maribor", "Vrhnika"], kraji), "Maribor")
def test_6_zalijemo(self):
self.assertEqual(zalijemo("Ljubljana", 30, self.vsi_kraji), "Cerknica")
def test_7_presek(self):
self.assertEqual(presek([1, 5, 2], [3, 1, 4]), [1])
self.assertEqual(presek([1, 5, 2], [3, 0, 4]), [])
self.assertEqual(presek([1, 5, 2], []), [])
self.assertEqual(presek([], [3, 0, 4]), [])
self.assertEqual(presek([], []), [])
self.assertEqual(set(presek([1, 5, 2], [2, 0, 5])), {2, 5})
self.assertEqual(presek(["Ana", "Berta", "Cilka"], ["Cilka", "Dani", "Ema"]), ["Cilka"])
def test_8_skupno_zalivanje(self):
self.assertEqual(set(skupno_zalivanje("Bled", "Ljubljana", 30, self.vsi_kraji)),
{"Kranj", "Škofja Loka"})
if __name__ == "__main__":
unittest.main()
| [
"benjamin.fele@gmail.com"
] | benjamin.fele@gmail.com |
d4a5b1ba6b6f1f3a11524fac579af53d35e04cf7 | b18660ec434f8ebafeb5397690aa1b4c0a1cb528 | /train_ALL_LSTM.py | 528fe8ecf051ad46dbbf40705f292c601be2e094 | [] | no_license | wp0517/pytorch_SRU | 5d46956406c7b64431b736981f4565264ca9aa29 | 96be5b4f4f0b73a4e0532bb18d726655af0fdb50 | refs/heads/master | 2020-04-09T12:06:47.847348 | 2018-06-17T00:53:05 | 2018-06-17T00:53:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,866 | py | import os
import sys
import torch
import torch.autograd as autograd
import torch.nn.functional as F
import torch.nn.utils as utils
import torch.optim.lr_scheduler as lr_scheduler
import shutil
import random
import hyperparams
import time
torch.manual_seed(hyperparams.seed_num)
random.seed(hyperparams.seed_num)
def train(train_iter, dev_iter, test_iter, model, args):
if args.cuda:
model = model.cuda()
if args.Adam is True:
print("Adam Training......")
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.init_weight_decay)
elif args.SGD is True:
print("SGD Training.......")
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.init_weight_decay,
momentum=args.momentum_value)
elif args.Adadelta is True:
print("Adadelta Training.......")
optimizer = torch.optim.Adadelta(model.parameters(), lr=args.lr, weight_decay=args.init_weight_decay)
'''
lambda1 = lambda epoch: epoch // 30
lambda2 = lambda epoch: 0.99 ** epoch
print("lambda1 {} lambda2 {} ".format(lambda1, lambda2))
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=[lambda2])
scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9)
'''
# scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
lambda2 = lambda epoch: args.learning_rate_decay ** epoch
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=[lambda2])
steps = 0
model_count = 0
model.train()
time_list = []
for epoch in range(1, args.epochs+1):
print("\n## 第{} 轮迭代,共计迭代 {} 次 !##\n".format(epoch, args.epochs))
scheduler.step()
# print("now lr is {} \n".format(scheduler.get_lr()))
print("now lr is {} \n".format(optimizer.param_groups[0].get("lr")))
for batch in train_iter:
feature, target = batch.text, batch.label
# feature.data.t_()
target.data.sub_(1) # batch first, index align
if args.cuda:
feature, target = feature.cuda(), target.cuda()
# print(feature)
# target = autograd.Variable(target) # question 1
optimizer.zero_grad()
model.zero_grad()
model.hidden = model.init_hidden(args.lstm_num_layers, args.batch_size)
if feature.size(1) != args.batch_size:
# continue
model.hidden = model.init_hidden(args.lstm_num_layers, feature.size(1))
# start_time = time.time()
logit = model(feature)
loss = F.cross_entropy(logit, target)
start_time = time.time()
loss.backward()
end_time = time.time()
time_list.append(end_time - start_time)
# print("Backward Time is {} ".format(end_time - start_time))
if args.init_clip_max_norm is not None:
# print("aaaa {} ".format(args.init_clip_max_norm))
utils.clip_grad_norm(model.parameters(), max_norm=args.init_clip_max_norm)
optimizer.step()
steps += 1
if steps % args.log_interval == 0:
train_size = len(train_iter.dataset)
# print("sadasd", torch.max(logit, 0))
corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()
accuracy = float(corrects)/batch.batch_size * 100.0
sys.stdout.write(
'\rBatch[{}/{}] - loss: {:.6f} acc: {:.4f}%({}/{})'.format(steps,
train_size,
loss.data[0],
accuracy,
corrects,
batch.batch_size))
if steps % args.test_interval == 0:
eval(dev_iter, model, args, scheduler)
if steps % args.save_interval == 0:
if not os.path.isdir(args.save_dir): os.makedirs(args.save_dir)
save_prefix = os.path.join(args.save_dir, 'snapshot')
save_path = '{}_steps{}.pt'.format(save_prefix, steps)
torch.save(model, save_path)
print("\n", save_path, end=" ")
test_model = torch.load(save_path)
model_count += 1
test_eval(test_iter, test_model, save_path, args, model_count)
sum = 0
for index, value in enumerate(time_list):
if index != 0:
sum += value
avg = sum / len(time_list)
print("Time is {} ".format(avg))
return model_count
def eval(data_iter, model, args, scheduler):
model.eval()
corrects, avg_loss = 0, 0
for batch in data_iter:
feature, target = batch.text, batch.label
target.data.sub_(1)
# feature, target = batch.text, batch.label.data.sub_(1)
if args.cuda is True:
feature, target = feature.cuda(), target.cuda()
model.hidden = model.init_hidden(args.lstm_num_layers, args.batch_size)
if feature.size(1) != args.batch_size:
model.hidden = model.init_hidden(args.lstm_num_layers, feature.size(1))
logit = model(feature)
loss = F.cross_entropy(logit, target, size_average=False)
avg_loss += loss.data[0]
corrects += (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()
size = len(data_iter.dataset)
avg_loss = loss.data[0]/size
accuracy = float(corrects)/size * 100.0
model.train()
print('\nEvaluation - loss: {:.6f} acc: {:.4f}%({}/{}) \n'.format(avg_loss,
accuracy,
corrects,
size))
def test_eval(data_iter, model, save_path, args, model_count):
# print(save_path)
model.eval()
corrects, avg_loss = 0, 0
for batch in data_iter:
feature, target = batch.text, batch.label
target.data.sub_(1)
if args.cuda:
feature, target = feature.cuda(), target.cuda()
# feature.data.t_()
# target.data.sub_(1) # batch first, index align
# target = autograd.Variable(target)
if args.cuda:
feature, target = feature.cuda(), target.cuda()
model.hidden = model.init_hidden(args.lstm_num_layers, args.batch_size)
if feature.size(1) != args.batch_size:
# continue
model.hidden = model.init_hidden(args.lstm_num_layers, feature.size(1))
logit = model(feature)
loss = F.cross_entropy(logit, target, size_average=False)
avg_loss += loss.data[0]
corrects += (torch.max(logit, 1)
[1].view(target.size()).data == target.data).sum()
size = len(data_iter.dataset)
avg_loss = loss.data[0]/size
accuracy = float(corrects)/size * 100.0
model.train()
print('\nEvaluation - loss: {:.6f} acc: {:.4f}%({}/{}) \n'.format(avg_loss,
accuracy,
corrects,
size))
print("model_count {}".format(model_count))
# test result
if os.path.exists("./Test_Result.txt"):
file = open("./Test_Result.txt", "a")
else:
file = open("./Test_Result.txt", "w")
file.write("model " + save_path + "\n")
file.write("Evaluation - loss: {:.6f} acc: {:.4f}%({}/{}) \n".format(avg_loss, accuracy, corrects, size))
file.write("model_count {} \n".format(model_count))
file.write("\n")
file.close()
# calculate the best score in current file
resultlist = []
if os.path.exists("./Test_Result.txt"):
file = open("./Test_Result.txt")
for line in file.readlines():
if line[:10] == "Evaluation":
resultlist.append(float(line[34:41]))
result = sorted(resultlist)
file.close()
file = open("./Test_Result.txt", "a")
file.write("\nThe Current Best Result is : " + str(result[len(result) - 1]))
file.write("\n\n")
file.close()
shutil.copy("./Test_Result.txt", "./snapshot/" + args.mulu + "/Test_Result.txt")
# whether to delete the model after test acc so that to save space
if os.path.isfile(save_path) and args.rm_model is True:
os.remove(save_path)
| [
"bamtercelboo@163.com"
] | bamtercelboo@163.com |
793c15be2778bfa6a0852f657ea403fc51e685ba | a3f793a53361d08f3e0cdedc7fab9df40e201eef | /main.py | a53882b59400172fbcb656c830535363798e384d | [] | no_license | songshanshi/imoocc_py3 | 156db4f072bc956f45cbcc8c61fca964be8acfb9 | 6f3491ce857c541bf55d5ed8993265b7dd4dee09 | refs/heads/master | 2020-04-28T02:25:18.241155 | 2018-10-16T07:20:15 | 2018-10-16T07:20:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,917 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#########################################################################
# Author:Jeson
# Email:jeson@imoocc.com
import datetime
import os
import re
import yaml
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
# import sys
os.environ["DJANGO_SETTINGS_MODULE"] = 'admin.settings.local_cj'
import django
import time
django.setup()
from scanhosts.models import HostLoginifo
from scanhosts.util.nmap_all_server import NmapNet
from scanhosts.util.nmap_all_server import NmapDocker
from scanhosts.util.nmap_all_server import NmapKVM
from scanhosts.util.nmap_all_server import NmapVMX
from scanhosts.util.nmap_all_server import snmp_begin
from scanhosts.util.j_filter import FilterRules
from scanhosts.util.get_pv_relation import GetHostType
from detail.models import PhysicalServerInfo,ConnectionInfo,OtherMachineInfo,StatisticsRecord
from operations.models import MachineOperationsInfo
from scanhosts.util.nmap_all_server import NetDevLogin
from admin.settings.local_cj import BASE_DIR
import logging
logger = logging.getLogger("django")
from apps.detail.utils.machines import Machines
# def net_begin():
# '''
# 开始执行网络扫描
# :return:
# '''
# nm = NmapNet(oid='1.3.6.1.2.1.1.5.0',Version=2)
# nm_res = nm.query()
# print "...................",nm_res
def main():
'''
读取扫描所需配置文件
:return:
'''
s_conf = yaml.load(open('conf/scanhosts.yaml'))
s_nets = s_conf['hostsinfo']['nets']
s_ports = s_conf['hostsinfo']['ports']
s_pass = s_conf['hostsinfo']['ssh_pass']
s_cmds = s_conf['hostsinfo']['syscmd_list']
s_keys = s_conf['hostsinfo']['ssh_key_file']
s_blacks = s_conf['hostsinfo']['black_list']
s_emails = s_conf['hostsinfo']['email_list']
n_sysname_oid = s_conf['netinfo']['sysname_oid']
n_sn_oid = s_conf['netinfo']['sn_oids']
n_commu = s_conf['netinfo']['community']
n_login_sw = s_conf['netinfo']['login_enable']
n_backup_sw = s_conf['netinfo']['backup_enable']
n_backup_sever = s_conf['netinfo']['tfp_server']
d_pass = s_conf['dockerinfo']['ssh_pass']
starttime = datetime.datetime.now()
'''
扫描主机信息
'''
for nmap_type in s_nets:
unkown_list,key_not_login_list = snmp_begin(nmap_type,s_ports,s_pass,s_keys,s_cmds,s_blacks,s_emails)
'''
扫描网络信息
'''
nm = NmapNet(n_sysname_oid,n_sn_oid,n_commu)
if key_not_login_list:
for item in key_not_login_list:
is_net = nm.query(item)
if is_net[0] or is_net[1]:
HostLoginifo.objects.update_or_create(ip=item,hostname=is_net[0],sn=is_net[1],mathine_type="Network device")
else:
HostLoginifo.objects.update_or_create(ip=item,ssh_port=key_not_login_list[item][0],ssh_status=0)
other_sn = item.replace('.','')
ob = OtherMachineInfo.objects.filter(sn_key=other_sn)
if not ob:
print(".........................OtherMachineInfo",item,other_sn)
OtherMachineInfo.objects.create(ip=item,sn_key=other_sn,reson_str=u"SSH端口存活,无法登录",oth_cab_id=1)
if unkown_list:
for item in unkown_list:
is_net = nm.query(item)
if is_net[0] or is_net[1]:
HostLoginifo.objects.update_or_create(ip=item,hostname=is_net,mathine_type="Network device")
else:
HostLoginifo.objects.update_or_create(ip=item,ssh_status=0)
other_sn = item.replace('.','')
ob = OtherMachineInfo.objects.filter(sn_key=other_sn)
if not ob:
OtherMachineInfo.objects.create(ip=item,sn_key=other_sn,reson_str=u"IP存活,非Linux服务器",oth_cab_id=1)
# '''
# 网络设备备份或者登录功能
# '''
# net_login_dct = {}
# with open("%s/conf/net_dev.pass"%BASE_DIR,'r') as f:
# for item in f.readlines():
# ip,username,passwd,en_passwd = re.split("\s+",item)[:4]
# net_login_dct[ip] = (username,passwd,en_passwd)
# if n_login_sw == "True":
# res = NetDevLogin(dev_ips=net_login_dct,backup_sw=n_backup_sw,back_server=n_backup_sever)
'''
规则:主机信息,去重、生成关系字典
'''
ft = FilterRules()
key_ip_dic = ft.run()
'''
梳理虚拟服务器主机于服务器信息
'''
pv = GetHostType()
p_relate_dic = pv.get_host_type(key_ip_dic)
'''
更新宿主机类型中表对应关系
'''
ip_key_dic = {v:k for k,v in key_ip_dic.items()}
docker_p_list = p_relate_dic["docker-containerd"]
kvm_p_list = p_relate_dic["qemu-system-x86_64"]
vmware_p_list = p_relate_dic["vmx"]
for item in docker_p_list:
PhysicalServerInfo.objects.filter(conn_phy__sn_key=ip_key_dic[item]).update(vir_type="1")
for item in kvm_p_list:
PhysicalServerInfo.objects.filter(conn_phy__sn_key=ip_key_dic[item]).update(vir_type="0")
for item in vmware_p_list:
PhysicalServerInfo.objects.filter(conn_phy__sn_key=ip_key_dic[item]).update(vir_type="2")
'''
扫描docker的宿主机和虚拟服务的关系
'''
ds = NmapDocker(s_cmds,d_pass,ip_key_dic)
ds.do_nmap(docker_p_list)
'''
扫描KVM的宿主机和虚拟服务的关系
# '''
ks = NmapKVM(ip_key_dic)
ks.do_nmap(kvm_p_list)
'''
扫描ESXI虚拟机配置
'''
ne = NmapVMX(vmware_p_list,ip_key_dic)
ne.dosnmp()
'''
更新状态表,用户信息表
'''
c_sn_lst = [item.sn_key for item in ConnectionInfo.objects.all()]
o_sn_lst = [item.sn_key for item in OtherMachineInfo.objects.all()]
old_sn_list = [item.sn_key for item in MachineOperationsInfo.objects.all()]
new_sn_lst = c_sn_lst + o_sn_lst
diff_sn_lst = set(new_sn_lst + old_sn_list)
for item in diff_sn_lst:
try:
nsin = MachineOperationsInfo.objects.filter(sn_key=item)
if not nsin:
MachineOperationsInfo.objects.create(sn_key=item)
except Exception as e:
print("Error:SN:%s not insert into database,reason is:%s"%(item,e))
logger.error("Error:SN:%s not insert into database,reason is:%s"%(item,e))
'''
统计总数
'''
info_dic = Machines().get_all_count()
StatisticsRecord.objects.create(all_count=info_dic['all_c'],pyh_count=info_dic['pyh_c'],net_count=info_dic['net_c'],
other_count=info_dic['other_c'],vmx_count=info_dic['vmx_c'],kvm_count=info_dic['kvm_c'],docker_count=info_dic['docker_c'])
endtime = datetime.datetime.now()
totaltime = (endtime - starttime).seconds
logger.info("{Finish:Use time %s s}"%totaltime)
print("{Finish:Use time %s s}"%totaltime)
if __name__ == "__main__":
main() | [
"gengming8859@icloud.com"
] | gengming8859@icloud.com |
2bb1e7e593dfb67298aa570a9c0e2c150b0dc54b | d0bd9c3c5539141c74e0eeae2fa6b7b38af84ce2 | /src/cogent3/parse/__init__.py | 7559bc6dcc006e4be1bcd02096d3c56f55fc2512 | [
"BSD-3-Clause"
] | permissive | KaneWh1te/cogent3 | 150c72e2f80a6439de0413b39c4c37c09c9966e3 | 115e9eb5700627fdb24be61441a7e3e155c02c61 | refs/heads/master | 2023-07-29T00:32:03.742351 | 2021-04-20T04:32:00 | 2021-04-20T04:32:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | #!/usr/bin/env python
__all__ = [
"blast",
"cigar",
"clustal",
"dialign",
"ebi",
"fasta",
"gcg",
"genbank",
"gff",
"locuslink",
"ncbi_taxonomy",
"newick",
"nexus",
"paml",
"paml_matrix",
"phylip",
"rdb",
"record",
"record_finder",
"sequence",
"table",
"tinyseq",
"tree",
"tree_xml",
"unigene",
]
__author__ = ""
__copyright__ = "Copyright 2007-2021, The Cogent Project"
__credits__ = [
"Gavin Huttley",
"Peter Maxwell",
"Rob Knight",
"Catherine Lozupone",
"Jeremy Widmann",
"Matthew Wakefield",
"Sandra Smit",
"Greg Caporaso",
"Zongzhi Liu",
"Micah Hamady",
"Jason Carnes",
"Raymond Sammut",
"Hua Ying",
"Andrew Butterfield",
"Marcin Cieslik",
]
__license__ = "BSD-3"
__version__ = "2021.04.20a"
__maintainer__ = "Gavin Huttley"
__email__ = "Gavin.Huttley@anu.edu.au"
__status__ = "Production"
| [
"Gavin.Huttley@anu.edu.au"
] | Gavin.Huttley@anu.edu.au |
59750fad06e56331b012fa6b1c81a5617cb2f861 | 27df339496f93c8ac45878130e774ae7ef8d1f61 | /csv_file/csv_demo.py | ba634edff5b3a8db253f2636b7987edded93beaa | [] | no_license | vutukurus/class_project | 9ce2df421f44c9b90c04997ff41e19dade3d6cb7 | d714b4edafc02dcbb59fab4ba78480e1c0657b66 | refs/heads/master | 2021-01-12T05:25:33.411576 | 2017-03-12T14:27:28 | 2017-03-12T14:27:28 | 77,926,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | import csv
#CSV file reading in python..
f = open('demo_file.csv',"r")
file_content = csv.reader(f) #it will read csv file contents..
for i in file_content:
print i
'''
#logic for printing salaries grater than 15k
c=0
for i in file_content:
if c > 0:
if int(i[-1]) > 15000:
print i[-1]
c=c+1
'''
f_write = open('demo_emp.csv',"w")
write_content = csv.writer(f_write)
for i in file_content:
del i[1]
write_content.writerow(i)
| [
"vutukurus@gmail.com"
] | vutukurus@gmail.com |
af0fc5daf91f3b8d1cc064e06a4521d8bbcdd22e | e4d298905eaa0ba0d5fae062cfa6167669825a96 | /Snake_game.py | 670d25759fd6134d157292714aa855baa288c856 | [] | no_license | Blockcipher123/Snake-Game | 55f98cc58ff7d1f0005ee2ce513dae8412454786 | e64e4dd8d7374292519bbd23a851656bd12377db | refs/heads/main | 2023-08-11T08:17:39.600454 | 2021-09-26T04:55:50 | 2021-09-26T04:55:50 | 393,858,226 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,013 | py | import pygame
import random
import os
pygame.mixer.init()
pygame.init()
# colors
white = (255, 255,255)
red = (255, 0, 0)
black = (0,0, 0)
green = (9, 237, 24)
screen_width = 900
screen_hight = 600
# Creation Window
gameWindow = pygame.display.set_mode((screen_width, screen_hight))
# Background Image
bgimg = pygame.image.load("back2.jpg")
bgimg = pygame.transform.scale(bgimg, (screen_width, screen_hight)).convert_alpha()
# game over image
gameing = pygame.image.load("firstintro.png")
gameing = pygame.transform.scale(gameing, (screen_width, screen_hight)).convert_alpha()
# Game title
pygame.display.set_caption('Snakes_Game')
pygame.display.update()
clock = pygame.time.Clock()
font = pygame.font.SysFont(None, 55)
def text_screen(text, color, x, y):
screen_text = font.render(text, True, color)
gameWindow.blit(screen_text, [x,y])
def plot_snake(gameWindow, color, snk_list, snake_size):
# print(snk_list)
for x,y in snk_list:
pygame.draw.rect(gameWindow,color,[x, y, snake_size, snake_size])
def welcome():
exit_game = False
while not exit_game:
gameWindow.fill((220,100,229))
text_screen("Welcome To Snake", black, 260, 250)
text_screen("Press Space Bar To Play", black, 230, 290)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
pygame.mixer.music.load("back.mp3")
pygame.mixer.music.play()
gameloop()
pygame.display.update()
clock.tick(50)
# Game loop
def gameloop():
# Game specific variable
exit_game = False
game_over = False
snake_x = 45
snake_y = 55
velocity_x = 0
velocity_y = 0
snk_list = []
snk_length = 1
# check if highscore file exists
if (not os.path.exists("")):
with open("hiscore.txt", "w") as f:
f.write("0")
with open("highscore.txt", 'r') as f:
highscore = f.read()
apple_x = random.randint(20,screen_width/2)
apple_y = random.randint(20,screen_hight/2)
score = 0
init_velocity = 5
snake_size = 30
fps = 50
while not exit_game:
if game_over:
with open("highscore.txt", 'w') as f:
f.write(str(highscore))
gameWindow.fill((0,0,0))
gameWindow.blit(gameing,(5,5))
text_screen(f"Your Score is {score}", red, 320, 400)
# foont = text_screen(f'By Block_Cipher', green, 500, 500)
# foont1(Font(20))
# if score > highscore:
# text_screen(f"Great, Score is {score}", red, 320, 400)
for event in pygame.event.get():
# print(event)
if event.type==pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
# gameloop()
welcome()
else:
for event in pygame.event.get():
# print(event)
if event.type==pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
velocity_x = init_velocity
velocity_y = 0
if event.key == pygame.K_LEFT:
velocity_x = - init_velocity
velocity_y = 0
if event.key == pygame.K_UP:
velocity_y = - init_velocity
velocity_x = 0
if event.key == pygame.K_DOWN:
velocity_y = init_velocity
velocity_x = 0
# if event.click == pygame.C_RIGHT:
# velocity_x = init_velocity
# velocity_y = 0
if event.key == pygame.K_q:
score += 10
snake_x = snake_x + velocity_x
snake_y = snake_y + velocity_y
if abs (snake_x - apple_x) <15 and abs(snake_y - apple_y) <15:
score += 10
apple_x = random.randint(20,screen_width/2)
apple_y = random.randint(20,screen_hight/2)
snk_length += 5
# print(highscore)
if score>int(highscore):
highscore = score
gameWindow.fill(white)
gameWindow.blit(bgimg, (0,0))
text_screen("Score : " + str(score) + " Hiscore : " + str(highscore), green, 5 , 5 )
pygame.draw.rect(gameWindow, red, [apple_x, apple_y, snake_size, snake_size])
head = []
head.append(snake_x)
head.append(snake_y)
snk_list.append(head)
if len(snk_list)>snk_length:
del snk_list[0]
if head in snk_list[:-1]:
game_over = True
pygame.mixer.music.load("gameover.mp3")
pygame.mixer.music.play()
if snake_x<0 or snake_x>screen_width or snake_y<0 or snake_y>screen_hight:
game_over = True
pygame.mixer.music.load("gameover.mp3")
pygame.mixer.music.play()
# print("Game over ! -")
# pygame.draw.rect(gameWindow,black,[snake_x, snake_y, snake_size, snake_size])
plot_snake(gameWindow, black, snk_list, snake_size)
pygame.display.update()
clock.tick(fps)
pygame.quit()
quit()
welcome()
| [
"noreply@github.com"
] | Blockcipher123.noreply@github.com |
9f8c5b37b12ed48e2504be0c4a85627772bf19c6 | f188c8791f10b481c59b064e5632f2b8e6468a15 | /pyhrv/tools.py | cbfc8d21cab829d8db98694990d6728b1c3940bf | [
"BSD-3-Clause"
] | permissive | RuWander/pyhrv | a440b0f0e7bb199718fefcbc6f56a509914f4585 | 61b233e907a0223e2a2bf1a5962ee2e9ae8ed36c | refs/heads/master | 2021-03-27T05:56:17.351755 | 2020-03-17T13:28:39 | 2020-03-17T13:28:39 | 247,793,858 | 0 | 0 | BSD-3-Clause | 2020-03-16T18:55:30 | 2020-03-16T18:55:29 | null | UTF-8 | Python | false | false | 45,737 | py | #!/usr/bin/env python -W ignore::FutureWarning
# -*- coding: utf-8 -*-
"""
pyHRV - Heart Rate Variability Toolbox - Tools
----------------------------------------------
This module provides support tools for HRV analysis such as the computation of HRV relevant data series (NNI, NNI
differences Heart Rate) and
Notes
-----
.. This module is part of the master thesis
"Development of an Open-Source Python Toolbox for Heart Rate Variability (HRV)".
.. This module is a contribution to the open-source biosignal processing toolbox 'BioSppy':
https://github.com/PIA-Group/BioSPPy
Author
------
.. Pedro Gomes, pgomes92@gmail.com
Thesis Supervisors
------------------
.. Hugo Silva, PhD, Instituto de Telecomunicacoes, PLUX wireless biosignals S.A.
.. Prof. Dr. Petra Margaritoff, University of Applied Sciences Hamburg
Docs
----
.. You can find the documentation for this module here:
https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html
Last Update
-----------
12-11-2019
:copyright: (c) 2018 by Pedro Gomes
:license: BSD 3-clause, see LICENSE for more details.
"""
# Compatibility
from __future__ import absolute_import, division
# Imports
import os
import sys
import warnings
import json
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import datetime as dt
from matplotlib.projections import register_projection
# BioSPPy imports
import biosppy
# Local imports
import pyhrv
import pyhrv.time_domain
import pyhrv.frequency_domain
import pyhrv.nonlinear
# Turn off toolbox triggered warnings
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=RuntimeWarning)
def nn_intervals(rpeaks=None):
"""Computes the NN intervals [ms] between successive R-peaks.
Docs: https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#nn-intervals-nn-intervals
Parameter
---------
rpeaks : array
R-peak times in [ms] or [s]
Returns
-------
nni : array
NN intervals in [ms]
Raises
------
TypeError
If no data provided for 'rpeaks'
TypeError
If data format is not list or numpy array
TypeError
If 'rpeaks' array contains non-integer or non-float value
Notes
-----
.. You can find the documentation for this function here:
https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#nn-intervals-nn-intervals
"""
# Check input signal
if rpeaks is None:
raise TypeError("No data for R-peak locations provided. Please specify input data.")
elif type(rpeaks) is not list and not np.ndarray:
raise TypeError("List, tuple or numpy array expected, received %s" % type(rpeaks))
# if all(isinstance(n, int) for n in rpeaks) is False or all(isinstance(n, float) for n in rpeaks) is False:
# raise TypeError("Incompatible data type in list or numpy array detected (only int or float allowed).")
# Confirm numpy arrays & compute NN intervals
rpeaks = np.asarray(rpeaks)
nn_int = np.zeros(rpeaks.size - 1)
for i in range(nn_int.size):
nn_int[i] = rpeaks[i + 1] - rpeaks[i]
return pyhrv.utils.nn_format(nn_int)
def nni_diff(nni=None):
"""Computes the series of differences between successive NN intervals [ms].
Docs: https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#nn-interval-differences-nn-diff
Parameters
----------
nni : array
NN intervals in [ms] or [s].
Returns
-------
nni_diff_ : numpy array
Difference between successive NN intervals in [ms].
Raises
------
TypeError
If no data provided for 'rpeaks'.
TypeError
If no list or numpy array is provided.
TypeError
If NN interval array contains non-integer or non-float value.
Notes
.. You can find the documentation for this module here:
https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#nn-interval-differences-nn-diff
"""
# Check input signal
if nni is None:
raise TypeError("No data for R-peak locations provided. Please specify input data.")
elif type(nni) is not list and type(nni) is not np.ndarray:
raise TypeError("List or numpy array expected, received %s" % type(nni))
elif all(isinstance(x, int) for x in nni) and all(isinstance(x, float) for x in nni):
raise TypeError("'nni' data contains non-int or non-float data.")
else:
nn = pyhrv.utils.nn_format(nni)
# Confirm numpy arrays & compute NN interval differences
nn_diff_ = np.zeros(nn.size - 1)
for i in range(nn.size - 1):
nn_diff_[i] = abs(nn[i + 1] - nn[i])
return np.asarray(nn_diff_)
def plot_ecg(signal=None,
t=None,
sampling_rate=1000.,
interval=None,
rpeaks=True,
figsize=None,
title=None,
show=True):
"""Plots ECG lead-I like signal on a medical grade ECG paper-like figure layout.
Docs: https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#plot-ecg-plot-ecg
Parameters
----------
signal : array
ECG lead-I like signal (filtered or unfiltered)
t : array, optional
Time vector for the ECG lead-I like signal (default: None)
sampling_rate : int, float, optional
Sampling rate of the acquired signal in [Hz] (default: 1000Hz)
interval : array, 2-element, optional
Visualization interval of the ECG lead-I like signal plot (default: None: [0s, 10s]
rpeaks : bool, optional
If True, marks R-peaks in ECG lead-I like signal (default: True)
figsize : array, optional
Matplotlib figure size (width, height) (default: None: (12, 4))
title : str, optional
Plot figure title (default: None).
show : bool, optional
If True, shows the ECG plot figure(default: True)
Returns
-------
fig_ecg : matplotlib figure object
Matplotlib figure of ECG plot
Raises
------
TypeError
If no ECG data provided.
Notes
----
.. The 'rpeaks' parameter will have no effect if there are more then 50 r-epaks within the visualization interval.
In this case, no markers will be set to avoid overloading the plot
.. You can find the documentation for this function here:
https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#plot-ecg-plot-ecg
"""
# Check input data
if signal is None:
raise TypeError("No ECG data provided. Please specify input data.")
else:
# Confirm numpy
signal = np.asarray(signal)
# Compute time vector
if t is None:
t = pyhrv.utils.time_vector(signal, sampling_rate=sampling_rate)
# Configure interval of visualized signal
if interval is 'complete':
interval = [0, t[-1]]
else:
interval = pyhrv.utils.check_interval(interval, limits=[0, t[-1]], default=[0, 10])
# Prepare figure
if figsize is None:
figsize = (12, 4)
fig_ecg = plt.figure(figsize=figsize)
ax = fig_ecg.add_subplot(111)
# Configure axis according to according to BITalino ECG sensor ranges
if signal.max() > 1.5:
y_min = int(signal.min() - (signal.max() - signal.min()) * 0.2)
y_max = int(signal.max() + (signal.max() - signal.min()) * 0.2)
unit = '-'
y_minor = np.linspace(y_min, y_max, 12)
y_major = np.linspace(y_min, y_max, 4)
elif signal.max() < 1.0:
y_min, y_max = -1., 1.,
unit = 'mV'
y_minor = np.arange(-0.9, y_min, 0.1)
y_major = np.arange(-1.0, y_max + 0.5, 0.5)
else:
y_min, y_max = -1.5, 1.5,
unit = 'mV'
y_minor = np.arange(-1.4, y_min, 0.1)
y_major = np.arange(y_min, y_max + 0.5, 0.5)
ax.axis([interval[0], interval[1], y_min, y_max])
ax.set_xlabel('Time [$s$]')
ax.set_ylabel('ECG [$%s$]' % unit)
# Set ticks as ECG paper (box height ~= 0.1mV; width ~= 0.1s when using default values)
n = int(interval[1] / 10)
try:
ax.set_xticks(np.arange(0.0, interval[1] + 0.1, float(n)/5), minor=True)
ax.xaxis.grid(which='minor', color='salmon', lw=0.3)
ax.set_xticks(np.arange(0, interval[1] + 0.1, n))
ax.xaxis.grid(which='major', color='r', lw=0.7)
ax.set_yticks(y_minor, minor=True)
ax.yaxis.grid(which='minor', color='salmon', lw=0.3)
ax.set_yticks(y_major)
ax.yaxis.grid(which='major', color='r', lw=0.7)
except:
ax.grid(False)
# Add legend
unit = '' if unit == '-' else unit
text_ = 'Division (x): %is\nDivision (y): %.1f%s' % (n, (np.abs(y_major[1] - y_major[0])), unit)
ax.text(0.88, 0.85, text_, transform=ax.transAxes, fontsize=9,
bbox=dict(boxstyle='round', facecolor='white', alpha=0.9))
# Plot ECG lead-I like signal
ax.plot(t, signal, 'r')
fig_ecg.tight_layout()
# Plot r-peaks
rps = biosppy.signals.ecg.ecg(signal=signal, sampling_rate=sampling_rate, show=False)[2]
p = [float(signal[x]) for x in rps]
r = t[rps]
if rpeaks:
ax.plot(r, p, 'g*', alpha=0.7)
# Add title
if title is not None:
ax.set_title('ECG lead-I like signal - %s' % str(title))
else:
ax.set_title('ECG lead-I like signal')
# Show plot
if show:
plt.show()
# Output
args = (fig_ecg, )
names = ('ecg_plot', )
return biosppy.utils.ReturnTuple(args, names)
def tachogram(nni=None,
signal=None,
rpeaks=None,
sampling_rate=1000.,
hr=True,
interval=None,
title=None,
figsize=None,
show=True):
"""Plots Tachogram (NNI & HR) of an ECG lead-I like signal, NNI or R-peak series.
Docs: https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#tachogram-tachogram
Parameters
----------
nni : array
NN intervals in [ms] or [s].
rpeaks : array
R-peak times in [ms] or [s].
signal : array, optional
ECG lead-I like signal.
sampling_rate : int, float
Sampling rate of the acquired signal in [Hz].
hr : bool, optional
If True, plots series of heart rate data in [bpm] (default: True).
interval : list, optional
Sets visualization interval of the signal (default: [0, 10]).
title : str, optional
Plot figure title (default: None).
figsize : array, optional
Matplotlib figure size (width, height) (default: (12, 4)).
show : bool, optional
If True, shows plot figure (default: True).
Returns
-------
fig : matplotlib.pyplot figure
Tachogram figure & graph
Raises
------
TypeError
If no input data for 'nni', 'rpeaks' or 'signal' is provided
Notes
-----
.. NN intervals are derived from the ECG lead-I like signal if 'signal' is provided.
.. If both 'nni' and 'rpeaks' are provided, 'rpeaks' will be chosen over the 'nn' and the 'nni' data will be computed
from the 'rpeaks'.
.. If both 'nni' and 'signal' are provided, 'nni' will be chosen over 'signal'.
.. If both 'rpeaks' and 'signal' are provided, 'rpeaks' will be chosen over 'signal'.
"""
# Check input
if signal is not None:
rpeaks = biosppy.signals.ecg.ecg(signal=signal, sampling_rate=sampling_rate, show=False)[2]
elif nni is None and rpeaks is None:
raise TypeError('No input data provided. Please specify input data.')
# Get NNI series
nni = pyhrv.utils.check_input(nni, rpeaks)
# Time vector back to ms
t = np.cumsum(nni) / 1000.
# Configure interval of visualized signal
if interval is 'complete':
interval = [0, t[-1]]
else:
interval = pyhrv.utils.check_interval(interval, limits=[0, t[-1]], default=[0, 10])
# Prepare figure
if figsize is None:
figsize = (12, 4)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# X-Axis configuration
# Set x-axis format to seconds if the duration of the signal <= 60s
if interval[1] <= 60:
ax.set_xlabel('Time [s]')
# Set x-axis format to MM:SS if the duration of the signal > 60s and <= 1h
elif 60 < interval[1] <= 3600:
ax.set_xlabel('Time [MM:SS]')
formatter = mpl.ticker.FuncFormatter(lambda ms, x: str(dt.timedelta(seconds=ms))[2:])
ax.xaxis.set_major_formatter(formatter)
# Set x-axis format to HH:MM:SS if the duration of the signal > 1h
else:
ax.set_xlabel('Time [HH:MM:SS]')
formatter = mpl.ticker.FuncFormatter(lambda ms, x: str(dt.timedelta(seconds=ms)))
ax.xaxis.set_major_formatter(formatter)
try:
n = int(interval[1] / 10)
ax.set_xticks(np.arange(0, interval[1] + n, n))
except Exception as e:
ax.grid(False)
# Y-Axis configuration (min, max set to maximum of the visualization interval)
ax.set_ylabel('NN Interval [$ms$]')
nn_min = np.min(nni[np.argwhere(np.logical_and(interval[0] <= t, t <= interval[1]))])
nn_max = np.max(nni[np.argwhere(np.logical_and(interval[0] <= t, t <= interval[1]))])
ax.axis([interval[0], interval[1], nn_min * 0.9, nn_max * 1.1])
# Plot 'x' markers only if less than 50 rpeaks are within the given data, otherwise don't add them
if np.argwhere(t < interval[1]).size < 50:
l1 = ax.plot(t, nni, color='g', label='NN Intervals', marker='x', linestyle='--', linewidth=0.8)
ax.vlines(t, 200, 3000, linestyles='--', linewidth=0.5, alpha=0.7, colors='lightskyblue')
else:
l1 = ax.plot(t, nni, color='g', label='NN Intervals', linestyle='--', linewidth=0.8)
lns = []
# Plot heart rate signal
if hr:
ax2 = ax.twinx()
bpm_values = heart_rate(nni)
hr_min = heart_rate(nn_max)
hr_max = heart_rate(nn_min)
ax2.set_ylabel('Heart Rate [$1/min$]', rotation=270, labelpad=15)
ax2.axis([interval[0], interval[1], hr_min * 0.9, hr_max * 1.1])
# Plot 'x' markers only if less than 50 rpeaks are within the given data, otherwise don't add them
if np.argwhere(t < interval[1]).size < 50:
l2 = ax2.plot(t, bpm_values, color='red', label='Heart Rate', marker='x', linestyle='--', linewidth=0.8)
else:
l2 = ax2.plot(t, bpm_values, color='red', label='Heart Rate', linestyle='--', linewidth=0.8)
lns = l1 + l2
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc=1)
else:
ax.legend(loc=1)
# Add title
if title is not None:
ax.set_title('Tachogram - %s' % str(title))
else:
ax.set_title('Tachogram')
# Show plot
if show:
plt.show()
# Output
args = (fig, )
names = ('tachogram_plot', )
return biosppy.utils.ReturnTuple(args, names)
def heart_rate(nni=None, rpeaks=None):
"""Computes a series of Heart Rate values in [bpm] from a series of NN intervals or R-peaks in [ms] or [s] or the HR from a single NNI.
Docs: https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#heart-rate-heart-rate
Parameters
----------
nni : int, float, array
NN intervals in [ms] or [s].
rpeaks : int, float, array
R-peak times in [ms] or [s].
Returns
-------
bpm : list, numpy array, float
Heart rate computation [bpm].
Float value if 1 NN interval has been provided
Float array if series of NN intervals or R-peaks are provided.
Raises
------
TypeError
If no input data for 'rpeaks' or 'nn_intervals provided.
TypeError
If provided NN data is not provided in float, int, list or numpy array format.
Notes
-----
.. You can find the documentation for this module here:
https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#heart-rate-heart-rate
"""
# Check input
if nni is None and rpeaks is not None:
# Compute NN intervals if rpeaks array is given; only 1 interval if 2 r-peaks provided
nni = nn_intervals(rpeaks) if len(rpeaks) > 2 else int(np.abs(rpeaks[1] - rpeaks[0]))
elif nni is not None:
# Use given NN intervals & confirm numpy if series of NN intervals is provided
if type(nni) is list or type(nni) is np.ndarray:
nni = pyhrv.utils.nn_format(nni) if len(nni) > 1 else nni[0]
elif type(nni) is int or float:
nni = int(nni) if nni > 10 else int(nni) / 1000
else:
raise TypeError("No data for R-peak locations or NN intervals provided. Please specify input data.")
# Compute heart rate data
if type(nni) is int:
return 60000. / float(nni)
elif type(nni) is np.ndarray:
return np.asarray([60000. / float(x) for x in nni])
else:
raise TypeError("Invalid data type. Please provide data in int, float, list or numpy array format.")
def heart_rate_heatplot(nni=None,
rpeaks=None,
signal=None,
sampling_rate=1000.,
age=18,
gender='male',
interval=None,
figsize=None,
show=True):
"""Graphical visualization & classification of HR performance based on normal HR ranges by age and gender.
Docs: https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#heart-rate-heatplot-hr-heatplot
Parameters
----------
nni : array
NN intervals in [ms] or [s].
rpeaks : array
R-peak times in [ms] or [s].
signal : array, optional
ECG lead-I like signal.
sampling_rate : int, float, optional
Sampling rate of the acquired signal in [Hz].
age : int, float
Age of the subject (default: 18).
gender : str
Gender of the subject ('m', 'male', 'f', 'female'; default: 'male').
interval : list, optional
Sets visualization interval of the signal (default: [0, 10]).
figsize : array, optional
Matplotlib figure size (width, height) (default: (12, 4)).
show : bool, optional
If True, shows plot figure (default: True).
Returns
-------
hr_heatplot : biosppy.utils.ReturnTuple object
Raises
------
TypeError
If no input data for 'nni', 'rpeaks' or 'signal' is provided
Notes
-----
.. If both 'nni' and 'rpeaks' are provided, 'rpeaks' will be chosen over the 'nn' and the 'nni' data will be computed
from the 'rpeaks'
.. Modify the 'hr_heatplot.json' file to write own database values
"""
# Helper function
def _get_classification(val, data):
for key in data.keys():
if data[key][0] <= int(val) <= data[key][1]:
return key
# Check input
if signal is not None:
rpeaks = biosppy.signals.ecg.ecg(signal=signal, sampling_rate=sampling_rate, show=False)[2]
elif nni is None and rpeaks is None:
raise TypeError('No input data provided. Please specify input data.')
# Get NNI series
nn = pyhrv.utils.check_input(nni, rpeaks)
# Compute HR data and
hr_data = heart_rate(nn)
t = np.cumsum(nn) / 1000
interval = pyhrv.utils.check_interval(interval, limits=[0, t[-1]], default=[0, t[-1]])
# Prepare figure
if figsize is None:
figsize = (12, 5)
fig, (ax, ax1, ax2) = plt.subplots(3, 1, figsize=figsize, gridspec_kw={'height_ratios': [12, 1, 1]})
ax1.axis("off")
fig.suptitle("Heart Rate Heat Plot (%s, %s)" % (gender, age))
# X-Axis configuration
# Set x-axis format to seconds if the duration of the signal <= 60s
if interval[1] <= 60:
ax.set_xlabel('Time [s]')
# Set x-axis format to MM:SS if the duration of the signal > 60s and <= 1h
elif 60 < interval[1] <= 3600:
ax.set_xlabel('Time [MM:SS]')
formatter = mpl.ticker.FuncFormatter(lambda ms, x: str(dt.timedelta(seconds=ms))[2:])
ax.xaxis.set_major_formatter(formatter)
# Set x-axis format to HH:MM:SS if the duration of the signal > 1h
else:
ax.set_xlabel('Time [HH:MM:SS]')
formatter = mpl.ticker.FuncFormatter(lambda ms, x: str(dt.timedelta(seconds=ms)))
ax.xaxis.set_major_formatter(formatter)
# Set gender
if gender not in ["male", "m", "female", "f"]:
raise ValueError("Unknown gender '%s' for this database." % gender)
else:
if gender == 'm':
gender = 'male'
elif gender == 'f':
gender = 'female'
# Load comparison data from database
database = json.load(open(os.path.join(os.path.split(__file__)[0], './files/hr_heatplot.json')))
# Get database values
if age > 17:
for key in database["ages"].keys():
if database["ages"][key][0] - 1 < age < database["ages"][key][1] + 1:
_age = database["ages"][key][0]
color_map = database["colors"]
data = database[gender][str(_age)]
order = database["order"]
# Plot with information based on reference database:
# Create classifier counter (preparation for steps after the plot)
classifier_counter = {}
for key in data.keys():
classifier_counter[key] = 0
# Add threshold lines based on the comparison data
for threshold in data.keys():
ax.hlines(data[threshold][0], 0, t[-1], linewidth=0.4, alpha=1, color=color_map[threshold])
ax.plot(t, hr_data, 'k--', linewidth=0.5)
# Add colorized HR markers
old_classifier = _get_classification(hr_data[0], data)
start_index = 0
end_index = 0
for hr_val in hr_data:
classifier_counter[old_classifier] += 1
current_classifier = _get_classification(hr_val, data)
if current_classifier != old_classifier:
ax.plot(t[start_index:end_index], hr_data[start_index:end_index], 'o',
markerfacecolor=color_map[old_classifier], markeredgecolor=color_map[old_classifier])
start_index = end_index
old_classifier = current_classifier
end_index += 1
# Compute distribution of HR values in %
percentages = {}
_left = 0
legend = []
for i in range(7):
classifier = str(order[str(i)][0])
percentages[classifier] = float(classifier_counter[classifier]) / hr_data.size * 100
ax2.barh("", percentages[classifier], left=_left, color=color_map[classifier])
_left += percentages[classifier]
legend.append(mpl.patches.Patch(label="%s\n(%.2f%s)" % (order[str(i)][1], percentages[classifier], "$\%$"),
fc=color_map[classifier]))
ax.legend(handles=legend, loc=8, ncol=7)
elif age <= 0:
raise ValueError("Age cannot be <= 0.")
else:
warnings.warn("No reference data for age %i available." % age)
ax.plot(t, hr_data, 'k--', linewidth=0.5)
ax2.plot("", 0)
# Set axis limits
ax.axis([interval[0], interval[1], hr_data.min() * 0.7, hr_data.max() * 1.1])
ax.set_ylabel('Heart Rate [$1/min$]')
ax2.set_xlim([0, 100])
ax2.set_xlabel("Distribution of HR over the HR classifiers [$\%$]")
# Show plot
if show:
plt.show()
# Output
return biosppy.utils.ReturnTuple((fig, ), ('hr_heatplot', ))
def time_varying(nni=None, rpeaks=None, parameter='sdnn', window='n20', interpolation=None, show=True, mode='normal'):
"""Computes time varying plot of a pyHRV parameter at every NNI of the input NNI (or rpeak) series using a moving
time window or a moving NNI window.
Parameters
----------
nni : array
NN-Intervals in [ms] or [s]
rpeaks : array
R-peak locations in [ms] or [s]
parameter : string
pyHRV parameter key for which the time varying computation is to be plotted (check the hrv_keys.json file for a
full list of available keys)
window : string
Time varying window configuration using the following syntax:
'tX' for using a moving time window, with X being the window interval before and after the current NNI
Example: t20 generates a time window of 20s before and 20s after each NNI for the computation
of th pyHRV parameter
OR
'nX' for using a moving NNI window, with X being the number of NNI included before and after the current
NNI
Example: n20 generates a window which includes 20 NNI before and 20 NNI after the current NNI
interpolation : int (optional)
Frequency at which the computed parameter signal is be resampled and interpolated (for example to create a
parameter signal with the same sampling frequency of the original ECG signal)
show : bool, optional
If true, show time varying plot (default: True)
mode :
Returns
-------
"""
# Check input series
nn = pyhrv.utils.check_input(nni, rpeaks)
# Check if parameter is on the list of invalid parameters (computational time of these parameters are too long or
# the parameters are input parameters for PSD functions
invalid_parameters = ['plot', 'tinn_m', 'tinn_n', 'fft_nfft', 'fft_window', 'fft_resampling_frequency',
'fft_interpolation', 'ar_nfft', 'ar_order', 'lomb_nfft', 'lomb_ma']
# Check selected parameter
if parameter is None:
raise TypeError("No parameter set for 'parameter'")
elif parameter in invalid_parameters:
raise ValueError("Parameter '%s' is not supported by this function. Please select another one." % parameter)
elif parameter not in pyhrv.utils.load_hrv_keys_json().keys():
raise ValueError("Unknown parameter '%s' (not a pyHRV parameter)." % parameter)
# Check window and decode window configuration
if window[0] != 't' and window[0] != 'n':
raise ValueError("Invalid mode '%s'. Please select 't' for a time window or 'n' for a NNI window." % window[0])
elif int(window[1:]) <= 0:
raise ValueError("'window' cannot be <= 0.")
else:
window_mode = window[0]
window_size = int(window[1:])
# Internal helper function
def _compute_parameter(array, func):
try:
# Try to pass the show and mode argument to to suppress PSD plots
val = eval(func + '(nni=array, mode=\'dev\')[0][\'%s\']' % parameter)
except TypeError as e:
if 'mode' in str(e):
try:
# If functions has now mode feature but 'mode' argument, but a plotting feature
val = eval(func + '(nni=array, plot=False)[\'%s\']' % parameter)
except TypeError as a:
try:
val = eval(func + '(nni=array, show=False)[\'%s\']' % parameter)
except TypeError as ae:
if 'plot' in str(ae):
# If functions has now plotting feature try regular function
val = eval(func + '(nni=array)[\'%s\']' % parameter)
else:
val = eval(func + '(nni=array)[\'%s\']' % parameter)
return val
# Vars
parameter_values = np.asarray([])
# Get hrv_keys & the respective function
hrv_keys = pyhrv.utils.load_hrv_keys_json()
parameter_func = hrv_keys[parameter][-1]
parameter_label = hrv_keys[parameter][1]
parameter_unit = hrv_keys[parameter][2]
# Beat window computation
if window_mode == 'n':
for i, _ in enumerate(nni):
if i == 0:
continue
# Incomplete initial window
elif i <= (window_size - 1):
vals = nn[:(i + window_size + 1)]
parameter_values = np.append(parameter_values, _compute_parameter(vals, parameter_func))
# Complete Window
elif i < (nni.size - window_size):
vals = nn[i - window_size: i + window_size + 1]
parameter_values = np.append(parameter_values, _compute_parameter(vals, parameter_func))
# Incomplete ending window
else:
vals = nn[i - window_size:i]
parameter_values = np.append(parameter_values, _compute_parameter(vals, parameter_func))
# Time window computation
elif window_mode == 't':
t = np.cumsum(nn) / 1000
for i, _t in enumerate(t):
if i == 0:
continue
# Incomplete initial window
elif _t <= window_size:
# t_vals = np.where((t <= _t) & (t <== (_t + window_size)))
indices = np.where(t <= (_t + window_size))[0]
parameter_values = np.append(parameter_values, _compute_parameter(nn[indices], parameter_func))
# Complete Window
elif _t < t[-1] - window_size:
indices = np.where(((_t - window_size) <= t) & (t <= (_t + window_size)))[0]
parameter_values = np.append(parameter_values, _compute_parameter(nn[indices], parameter_func))
# Incomplete end window
else:
indices = np.where(((_t - window_size) <= t) & (t <= t[-1]))[0]
parameter_values = np.append(parameter_values, _compute_parameter(nn[indices], parameter_func))
# Interpolation (optional) and time vector
if interpolation is not None:
t = np.cumsum(nn)
f_interpol = sp.interpolate.interp1d(t, parameter_values, 'cubic')
t = np.arange(t[0], t[-1], 1000. / interpolation)
parameter_values = f_interpol(t)
t /= 1000.
else:
t = np.cumsum(nn) / 1000
# Define start and end intervals
if window_mode == 'n':
indices = np.arange(0, len(nn))
start_interval = np.where(indices < window_size + 1)[0]
valid_interval = np.where((indices >= (window_size + 1)) & (indices <= (indices[-1] - window_size)))[0]
end_interval = np.where(indices > (indices[-1] - window_size))[0][:-1]
elif window_mode == 't':
start_interval = np.where(t < window_size)[0]
valid_interval = np.where((t >= window_size) & (t <= t[-1] - window_size))[0]
end_interval = np.where(t > t[-1] - window_size)[0][:-1]
y_min, y_max = 0, parameter_values.max() * 1.2
# Figure
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(111)
_win_mode = "NNI Window: %i Intervals" % window_size if window_mode == 'n' else "Time Window: %is" % window_size
fig.suptitle('Time Varying - %s Evolution' % parameter_label)
ax.set_title('(%s)' % _win_mode, size=10)
ax.set_ylabel('%s [$%s$]' % (parameter.upper(), parameter_unit))
ax.set_xlim([0, t[-1]])
ax.set_ylim([y_min, y_max])
# Plot start values (except the very first NNI)
ax.plot(t[1:window_size + 1], parameter_values[1:window_size + 1], 'r--')
# Plot valid values
ax.plot(t[valid_interval], parameter_values[valid_interval], 'g')
# Plot final values
ax.plot(t[end_interval], parameter_values[end_interval], 'r--')
# X-Axis configuration
# Set x-axis format to seconds if the duration of the signal <= 60s
if t[-1] <= 60:
ax.set_xlabel('Time [s]')
# Set x-axis format to MM:SS if the duration of the signal > 60s and <= 1h
elif 60 < t[-1] <= 3600:
ax.set_xlabel('Time [MM:SS]')
formatter = mpl.ticker.FuncFormatter(lambda ms, x: str(dt.timedelta(seconds=ms))[2:])
ax.xaxis.set_major_formatter(formatter)
# Set x-axis format to HH:MM:SS if the duration of the signal > 1h
else:
ax.set_xlabel('Time [HH:MM:SS]')
formatter = mpl.ticker.FuncFormatter(lambda ms, x: str(dt.timedelta(seconds=ms)))
ax.xaxis.set_major_formatter(formatter)
# Window areas
legends = []
ax.vlines(t[window_size], y_min, y_max, color='r')
ax.fill_between([0, t[window_size]], [y_max, y_max], facecolor='r', alpha=0.3)
ax.vlines(t[parameter_values.size - window_size - 1], y_min, y_max, color='r')
ax.fill_between([t[parameter_values.size - window_size - 1], t[-1]], [y_max, y_max], facecolor='r', alpha=0.3)
legends.append(mpl.patches.Patch(fc='g', label='Complete Window'))
legends.append(mpl.patches.Patch(fc='r', label='Incomplete Window', alpha=0.3))
# Recommended minimum window size
# TODO in future versions: add available recommended minimum durations to the HRV keys json file
parameter_minimum = 50
if t[window_size] < parameter_minimum:
ax.vlines(parameter_minimum, y_min, y_max, color='orange')
ax.fill_between([t[window_size], parameter_minimum], [y_max, y_max], color='orange', alpha=0.3)
legends.append(mpl.patches.Patch(fc='orange', label='Recommended Minimum Window Size (%is)' %
parameter_minimum, alpha=0.3))
ax.legend(handles=legends, loc=8, framealpha=1., ncol=3)
# Add overall value
val = _compute_parameter(nn, parameter_func)
ax.hlines(val, 0, t[-1], linestyles='--', linewidth=0.7)
ax.text(1, val + 1, 'Overall')
# Check mode
if mode not in ['normal', 'dev', 'devplot']:
warnings.warn("Unknown mode '%s'. Will proceed with 'normal' mode." % mode, stacklevel=2)
mode = 'normal'
if mode == 'normal':
if show:
plt.show()
# Output
args = (fig,)
names = ("time_varying_%s" % parameter,)
return biosppy.utils.ReturnTuple(args, names)
elif mode == 'dev':
return t, parameter_values, parameter
elif mode == 'devplot':
if mode == 'normal':
if show:
plt.show()
# Output
args = (fig, )
names = ("time_varying_%s" % parameter, )
return biosppy.utils.ReturnTuple(args, names), t, parameter_values, parameter
def radar_chart(nni=None,
rpeaks=None,
comparison_nni=None,
comparison_rpeaks=None,
parameters=None,
reference_label='Reference',
comparison_label='Comparison',
show=True,
legend=True):
"""Plots a radar chart of HRV parameters to visualize the evolution the parameters computed from a NNI series
(e.g. extracted from an ECG recording while doing sports) compared to a reference/baseline NNI series (
e.g. extracted from an ECG recording while at rest).
The radarchart normalizes the values of the reference NNI series with the values extracted from the baseline NNI
series being used as the 100% reference values.
Example: Reference NNI series: SDNN = 100ms → 100%
Comparison NNI series: SDNN = 150ms → 150%
The radar chart is not limited by the number of HRV parameters to be included in the chart; it dynamically
adjusts itself to the number of compared parameters.
Docs: https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#radar-chart-radar-chart
Parameters
----------
nni : array
Baseline or reference NNI series in [ms] or [s] (default: None)
rpeaks : array
Baseline or referene R-peak series in [ms] or [s] (default: None)
comparison_nni : array
Comparison NNI series in [ms] or [s] (default: None)
comparison_rpeaks : array
Comparison R-peak series in [ms] or [s] (default: None)
parameters : list
List of pyHRV parameters (see keys of the hrv_keys.json file for a full list of available parameters).
The list must contain more than 1 pyHRV parameters (default: None)
reference_label : str, optional
Plot label of the reference input data (e.g. 'ECG while at rest'; default: 'Reference')
comparison_label : str, optional
Plot label of the comparison input data (e.g. 'ECG while running'; default: 'Comparison')
show : bool, optional
If True, shows plot figure (default: True).
legend : bool, optional
If true, add a legend with the computed results to the plot (default: True)
Returns (biosppy.utils.ReturnTuple Object)
------------------------------------------
[key : format]
Description.
reference_results : dict
Results of the computed HRV parameters of the reference NNI series
Keys: parameters listed in the input parameter 'parameters'
comparison results : dict
Results of the computed HRV parameters of the comparison NNI series
Keys: parameters listed in the input parameter 'parameters'
radar_plot : matplotlib figure
Figure of the generated radar plot
Raises
------
TypeError
If an error occurred during the computation of a parameter
TypeError
If no input data is provided for the baseline/reference NNI or R-peak series
TypeError
If no input data is provided for the comparison NNI or R-peak series
TypeError
If no selection of pyHRV parameters is provided
ValueError
If less than 2 pyHRV parameters were provided
Notes
-----
.. If both 'nni' and 'rpeaks' are provided, 'rpeaks' will be chosen over the 'nn' and the 'nni' data will be computed
from the 'rpeaks'
.. If both 'comparison_nni' and 'comparison_rpeaks' are provided, 'comparison_rpeaks' will be chosen over the
the 'comparison_nni' and the nni data will be computed from the 'comparison_rpeaks'
"""
# Helper function & variables
para_func = pyhrv.utils.load_hrv_keys_json()
unknown_parameters, ref_params, comp_params = [], {}, {}
def _compute_parameter(nni_series, parameter):
# Get function name for the requested parameter
func = para_func[parameter][-1]
try:
# Try to pass the show and mode argument to to suppress PSD plots
index = 0
if parameter.endswith('_vlf'):
parameter = parameter.replace('_vlf', '')
elif parameter.endswith('_lf'):
index = 1
parameter = parameter.replace('_lf', '')
elif parameter.endswith('_hf'):
index = 2
parameter = parameter.replace('_hf', '')
val = eval(func + '(nni=nni_series, mode=\'dev\')[0][\'%s\']' % (parameter))
val = val[index]
except TypeError as e:
if 'mode' in str(e):
try:
# If functions has now mode feature but 'mode' argument, but a plotting feature
val = eval(func + '(nni=nni_series, plot=False)[\'%s\']' % parameter)
except TypeError as a:
if 'plot' in str(a):
# If functions has now plotting feature try regular function
val = eval(func + '(nni=nni_series)[\'%s\']' % parameter)
else:
raise TypeError(e)
return val
# Check input data
if nni is None and rpeaks is None:
raise TypeError("No input data provided for baseline or reference NNI. Please specify the reference NNI series.")
else:
nn = pyhrv.utils.check_input(nni, rpeaks)
if comparison_nni is not None and comparison_rpeaks is not None:
raise TypeError("No input data provided for comparison NNI. Please specify the comarison NNI series.")
else:
comp_nn = pyhrv.utils.check_input(comparison_nni, comparison_rpeaks)
if parameters is None:
raise TypeError("No input list of parameters provided for 'parameters'. Please specify a list of the parameters"
"to be computed and compared.")
elif len(parameters) < 2:
raise ValueError("Not enough parameters selected for a radar chart. Please specify at least 2 HRV parameters "
"listed in the 'hrv_keys.json' file.")
# Check for parameter that require a minimum duration to be computed & remove them if the criteria is not met
if nn.sum() / 1000. <= 600 or comp_nn.sum() / 1000. <= 600:
for p in ['sdann', 'sdnn_index']:
if p in parameters:
parameters.remove(p)
warnings.warn("Input NNI series are too short for the computation of the '%s' parameter. This "
"parameter has been removed from the parameter list." % p, stacklevel=2)
# Register projection of custom RadarAxes class
register_projection(pyhrv.utils.pyHRVRadarAxes)
# Check if the provided input parameter exists in pyHRV (hrv_keys.json) & compute available parameters
for p in parameters:
p = p.lower()
if p not in para_func.keys():
# Save unknown parameters
unknown_parameters.append(p)
else:
# Compute available parameters
ref_params[p] = _compute_parameter(nn, p)
comp_params[p] = _compute_parameter(comp_nn, p)
# Check if any parameters could not be computed (returned as None or Nan) and remove them
# (avoids visualization artifacts)
if np.isnan(ref_params[p]) or np.isnan(comp_params[p]):
ref_params.pop(p)
comp_params.pop(p)
warnings.warn("The parameter '%s' could not be computed and has been removed from the parameter list."
% p)
# Raise warning pointing out unknown parameters
if unknown_parameters != []:
warnings.warn("Unknown parameters '%s' will not be computed." % unknown_parameters, stacklevel=2)
# Prepare plot
colors = ['lightskyblue', 'salmon']
if legend:
fig, (ax_l, ax) = plt.subplots(1, 2, figsize=(12, 6), subplot_kw=dict(projection='radar'))
else:
fig, ax = plt.subplots(1, 1, figsize=(8, 8), subplot_kw={'projection': 'radar'})
theta = np.linspace(0, 2 * np.pi, len(ref_params.keys()), endpoint=False)
ax.theta = theta
# Prepare plot data
ax.set_varlabels([para_func[s][1].replace(' ', '\n') for s in ref_params.keys()])
ref_vals = [100 for x in ref_params.keys()]
com_vals = [comp_params[p] / ref_params[p] * 100 for p in ref_params.keys()]
# Plot data
for i, vals in enumerate([ref_vals, com_vals]):
ax.plot(theta, vals, color=colors[i])
ax.fill(theta, vals, color=colors[i], alpha=0.3)
title = "HRV Parameter Radar Chart\nReference NNI Series (%s) vs. Comparison NNI Series (%s)\n" % (colors[0], colors[1]) \
+ r"(Chart values in $\%$, Reference NNI parameters $\hat=$100$\%$)"
# Add legend to second empty plot
if legend:
ax_l.set_title(title, horizontalalignment='center')
legend = []
# Helper function
def _add_legend(label, fc="white"):
return legend.append(mpl.patches.Patch(fc=fc, label="\n" + label))
# Add list of computed parameters
_add_legend(reference_label, colors[0])
for p in ref_params.keys():
_add_legend("%s:" % para_func[p][1])
# Add list of comparison parameters
_add_legend(comparison_label, colors[1])
for p in ref_params.keys():
u = para_func[p][2] if para_func[p][2] != "-" else ""
_add_legend("%.2f%s vs. %.2f%s" % (ref_params[p], u, comp_params[p], u))
# Add relative differences
_add_legend("")
for i, _ in enumerate(ref_params.keys()):
val = com_vals[i] - 100
_add_legend("+%.2f%s" % (val, r"$\%$") if val > 0 else "%.2f%s" % (val, r"$\%$"))
ax_l.legend(handles=legend, ncol=3, frameon=False, loc=7)
ax_l.axis('off')
else:
ax.set_title(title, horizontalalignment='center')
# Show plot
if show:
plt.show()
# Output
args = (ref_params, comp_params, fig, )
names = ('reference_results', 'comparison_results', 'radar_plot', )
return biosppy.utils.ReturnTuple(args, names)
def hrv_export(results=None, path=None, efile=None, comment=None, plots=False):
"""
Exports HRV results into a JSON file.
Docs: https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#hrv-export-hrv-export
Parameters
----------
results : dict, biosppy.utils.ReturnTuple object
Results of the HRV analysis
path : str
Absolute path of the output directory
efile : str, optional
Output file name
comment : str, optional
Optional comment
plots : bool, optional
If True, save figures of the results in .png format
Returns
-------
efile : str
Absolute path of the output export file (may vary from the input data)
Raises
------
TypeError
No input data provided
TypeError
Unsupported data format provided (other than dict, or biosppy.utils.ReturnTuple object.)
TypeError
If no file or directory path provided
Notes
-----
.. If 'path' is a file handler, 'efile' will be ignored.
.. Creates file with automatic name generation if only an output path is provided.
.. Output file name may vary from input file name due changes made to avoid overwrting existing files (your
results are important after all!).
.. Existing files will not be overwritten, instead the new file will consist of the given file name with an
(incremented) identifier (e.g. '_1') that will be added at the end of the provided file name.
.. You can find the documentation for this function here:
https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#hrv-export-hrv-export
"""
# Check input (if available & biosppy.utils.ReturnTuple object)
if results is None:
raise TypeError("No results data provided. Please specify input data.")
elif results is not type(dict()) and isinstance(results, biosppy.utils.ReturnTuple) is False:
raise TypeError("Unsupported data format: %s. "
"Please provide input data as Python dictionary or biosppy.utils.ReturnTuple object." % type(results))
if path is None:
raise TypeError("No file name or directory provided. Please specify at least an output directory.")
elif type(path) is str:
if efile is None:
# Generate automatic file name
efile = 'hrv_export' + dt.datetime.now().strftime('_%Y-%m-%d_%H-%M-%S') + '.json'
path += efile
else:
# Check if file name has an '.json' extension
_, fformat = os.path.splitext(efile)
if fformat != 'json':
path = path + efile + '.json'
else:
path = path + efile
elif type(path) is file:
path_ = path.name
path.close()
path = path_
efile, _ = pyhrv.utils.check_fname(path, 'json', efile)
# Get HRV parameters
params = json.load(open(os.path.join(os.path.split(__file__)[0], './files/hrv_keys.json'), 'r'))
# Save plot figures
if plots:
for key in results.keys():
if isinstance(results[key], plt.Figure) and key in params.keys():
results[key].savefig(os.path.splitext(efile)[0] + '_' + str(key), dpi=300)
# Prepare output dictionary
output = {'Name': efile, 'Comment': str(comment)}
for key in results.keys():
if isinstance(results[key], biosppy.utils.ReturnTuple):
output[key] = dict(results[key])
elif isinstance(results[key], tuple):
output[key] = list(results[key])
elif isinstance(results[key], str):
output[key] = results[key]
elif isinstance(results[key], range):
output[key] = list(results[key])
elif results[key] is None:
output[key] = 'n/a'
elif 'plot' not in str(key) and 'histogram' not in str(key):
output[key] = float(results[key]) if str(results[key]) != 'nan' else 'n/a'
json.encoder.FLOAT_REPR = lambda o: format(o, 'f')
with open(efile, 'w+') as f:
json.dump(output, f, sort_keys=True, indent=4, separators=(',', ': '))
return str(efile)
def hrv_import(hrv_file=None):
"""Imports HRV results stored in JSON files generated with the 'hrv_export()' function.
Docs: https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#hrv-import-hrv-import
Parameters
----------
hrv_file : file object, str
File handler or absolute string path of the HRV JSON file
Returns
-------
output : biosppy.utils.ReturnTuple object
All imported results.
Raises
------
TypeError
No input data provided.
Notes
-----
.. You can find the documentation for this function here:
https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#hrv-import-hrv-import
"""
# Check input data and load JSON file content
if hrv_file is None:
raise TypeError("No input data provided. Please specify input data.")
elif type(hrv_file) is str:
data = json.load(open(hrv_file, 'r'))
elif isinstance(hrv_file, file):
data = json.load(hrv_file)
results = dict()
for key in data.keys():
results[str(key)] = data[key] if type(data[key]) is not str else str(data[key])
# Create biosppy.utils.ReturnTuple object from imported data
return biosppy.utils.ReturnTuple(results.values(), results.keys())
if __name__ == "__main__":
"""
Example Script - HRV Tools
"""
import pyhrv
from biosppy.signals.ecg import ecg
# Load a Sample Signal
nni = pyhrv.utils.load_sample_nni()
# Load OpenSignals (r)evolution ECG sample file
signal = np.loadtxt('./files/SampleECG.txt')[:, -1]
# Filter data & get r-peak locations [ms]
signal, rpeaks = ecg(signal, show=False)[1:3]
# Plot ECG for the interval of 0s and 22s
plot_ecg(signal, interval=[0, 22])
# Plot Tachogram for the interval of 0s and 22s
tachogram(nni, interval=[0, 22])
# Heart Rate Heatplot to highlight HR performance compared to a sports database
heart_rate_heatplot(nni, gender='male', age=28)
# Time Varying is designed to show the evolution of HRV parameters over time using a moving window
# Define a moving window of 3 NNIs before and after the current NNI using the NNI window indicator 'n'
time_varying(nni, parameter='sdnn', window='n3')
# Define a moving window of 3 seconds before and after the current NNI using the time window indicator 't'
time_varying(nni, parameter='sdnn', window='t3')
# Radar charts are created dynamically, depending on the number of parameters used as input
# For this example, let's split he test NNI series into two segments & select a list of 6 parameters
ref_nni = nni[:100]
comp_nni = nni[100:200]
params = ['nni_mean', 'nni_max', 'sdnn', 'rmssd', 'sdsd', 'nn50', 'nn20']
radar_chart(ref_nni, comparison_nni=comp_nni, parameters=params)
# Now with only 3 parameters
params = ['nni_mean', 'sdnn', 'rmssd']
radar_chart(ref_nni, comparison_nni=comp_nni, parameters=params)
# Export and import HRV results into and from JSON files:
# First, compute hrv parameters
results = pyhrv.hrv(nni, show=False)
hrv_export(results, path='./files/', efile='SampleExport')
hrv_import('./files/SampleExport.json')
| [
"pgomes92@gmail.com"
] | pgomes92@gmail.com |
2efc3b8d4f8b6993091cf4bfe85bdb9711ec2a74 | 602ae5fca1a1d25d70cc3e1a84759d0caf124b57 | /Dash Basics/dash_core_components_example.py | a3bf2c3dec79cc947119eba68e52c15102ca1de5 | [] | no_license | harryschaefer93/DashAppPractice | 540f05380575a8afea8f8db2c9f5ef70588ec25f | aa4a144df94a32e55a206d99dd89d431baa77ccf | refs/heads/master | 2023-07-31T22:19:22.413393 | 2021-09-19T21:09:57 | 2021-09-19T21:09:57 | 321,131,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | import dash
import dash_html_components as html
import dash_core_components as dcc
app = dash.Dash()
app.layout = html.Div([
html.Label('Dropdown Component:'),
dcc.Dropdown(
options=[
{'label': 'Houston', 'value': 'HTX'},
{'label': 'Austin', 'value': 'ATX'},
{'label': 'Dallas', 'value': 'DTX'}],
value='HTX'),
html.P(html.Label('Slider Component:')),
#html.P inserts linebreak so labels aren't on top of eachother
dcc.Slider(
min=0,
max=9,
marks={i: '{}'.format(i) for i in range(10)},
value=5),
html.P(html.Label('Radio Items Component')),
dcc.RadioItems(
options=[
{'label': 'Houston', 'value': 'HTX'},
{'label': 'Austin', 'value': 'ATX'},
{'label': 'Dallas', 'value': 'DTX'}],
value='HTX')
])
if __name__ == '__main__':
app.run_server() | [
"harryschaefer1993@gmail.com"
] | harryschaefer1993@gmail.com |
f107a42e17a213bb257e6dc9bee18367a2d43d35 | c7a867c33675d48c9bcc73c70c27cac085661ebb | /extractor/POSMmanagement/process.py | a6b472f4c094cbd47cf5cd7e2c6fc14894009bd4 | [] | no_license | v55448330/posm | 3e4cbcb22f5eae17c956eb02346a8fc5a932966c | a53c15337301a769ac3b9bde54ab845ac0fe5211 | refs/heads/master | 2020-05-29T11:05:40.229015 | 2015-03-29T08:21:22 | 2015-03-29T08:21:22 | 47,541,652 | 1 | 0 | null | 2015-12-07T09:15:42 | 2015-12-07T09:15:42 | null | UTF-8 | Python | false | false | 4,559 | py | # -*- coding: utf-8 -*-
import logging
LOG = logging.getLogger(__file__)
import subprocess
import psycopg2
import sys
import os
from .utils import proc_exec
class ProcessManagement():
def __init__(self, settings, verbose=False):
self.verbose = verbose
self.settings = settings.get_settings()
self.db_params = settings.db_params
def processAdminLevels(self, settings_file):
command = [
'python', 'extract.py', '--settings', settings_file,
'--problems_as_geojson'
]
LOG.debug('Command: %s', ' '.join(command))
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False
)
# execute the process ... .wait()
admin_level_data_path = os.path.join(
self.settings.get('sources').get('data_directory'),
'{}.pbf'.format(
self.settings.get('sources').get('admin_levels_file')
)
)
LOG.info(
'Processing admin levels %s', admin_level_data_path
)
msg = proc_exec(proc, self.verbose)
if proc.returncode != 0:
LOG.error('Admin level processing has not exited cleanly!')
LOG.error(msg)
sys.exit(99)
def deconstructGeometry(self):
conn = psycopg2.connect(**self.db_params)
cur = conn.cursor()
try:
cur.execute("set search_path = \"$user\", 'public', 'topology';")
LOG.info('Deconstructing geometry...')
cur.execute('select deconstruct_geometry();')
conn.commit()
except psycopg2.ProgrammingError, e:
LOG.error('Unhandeld error: (%s) %s', e.pgcode, e.pgerror)
raise e
cur.close()
conn.close()
def createBaseTopology(self):
conn = psycopg2.connect(**self.db_params)
cur = conn.cursor()
try:
cur.execute("set search_path = \"$user\", 'public', 'topology';")
LOG.info('Initializing topology...')
cur.execute('select init_base_topology();')
except psycopg2.ProgrammingError, e:
LOG.error('Unhandeld error: (%s) %s', e.pgcode, e.pgerror)
raise e
cur.execute('SELECT osm_id FROM all_geom order by osm_id asc')
osm_ids = cur.fetchall()
cur.execute('SELECT count(osm_id) FROM all_geom')
total = cur.fetchone()[0]
try:
for idx, osm_id in enumerate(osm_ids):
LOG.debug(
'Creating topology for %s ... (%s/%s)',
osm_id[0], idx+1, total
)
cur.execute(
"set search_path = \"$user\", 'public', 'topology';"
)
cur.execute('select create_base_topology_for_id(%s);', osm_id)
conn.commit()
except psycopg2.ProgrammingError, e:
LOG.error('Unhandeld error: (%s) %s', e.pgcode, e.pgerror)
raise e
cur.close()
conn.close()
def simplifyAdminLevels(self, tolerance=0.001):
conn = psycopg2.connect(**self.db_params)
cur = conn.cursor()
try:
cur.execute("set search_path = \"$user\", 'public', 'topology';")
LOG.info('Simplifying admin_levels ...')
cur.execute('select simplify_dissolve(%s);', (tolerance,))
conn.commit()
except psycopg2.ProgrammingError, e:
LOG.error('Unhandeld error: (%s) %s', e.pgcode, e.pgerror)
raise e
cur.close()
conn.close()
def convertToGeoJson(self, settings_file, *args):
if len(args) > 0:
command = [
'python', 'generate_geojson.py', '--rm', '--settings',
settings_file
]
command += [arg for arg in args]
else:
command = [
'python', 'generate_geojson.py', '--rm', '--all', '--settings',
settings_file
]
LOG.debug('Command: %s', ' '.join(command))
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False
)
# execute the process ... .wait()
LOG.info('Converting to geojson ... exported_geojson.zip')
msg = proc_exec(proc, self.verbose)
if proc.returncode != 0:
LOG.error('Converting to geojson has not exited cleanly!')
LOG.error(msg)
sys.exit(99)
| [
"dodobas@candela-it.com"
] | dodobas@candela-it.com |
41a00bab3f061077909f54d74dc574355af1929d | 1b77eaf078321b1320d72aa36a4357568101e4ca | /江南大学教务处/test.py | 93ac06b18e5699d2285b3f417e63ee409aaa3bec | [] | no_license | BEE-JN/python_homework | 92ffc1216a380d124901fd64cc541f70813847dc | 8ba4ea79cbd422f40e6f9f1cc5fed4d75715d207 | refs/heads/master | 2020-03-23T08:02:47.863607 | 2018-07-17T15:30:21 | 2018-07-17T15:30:21 | 141,305,118 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | import requests
import time
url = "https://survey.zkeycc/pku/xsdc/?dm=bk"
if __name__=='__main__':
while 1:
r=requests.get(url)
print(r.content)
time.sleep(1) | [
"41156190+GCS-CN@users.noreply.github.com"
] | 41156190+GCS-CN@users.noreply.github.com |
060eac28c26b41125b17d80a73f320465fa80cf3 | 2a6934acac4ec8bb29ad51e525ad2ed839a18587 | /sleekocmd.py | 68f49aecbefe4e0f260eb97fa74fe5cb08374f80 | [] | no_license | alexschlueter/arlecksbot | d9ca769a00bf0458163b397ebce314d510066af4 | 1730f5123b10bc638906f6206ea6b5b08460bfac | refs/heads/master | 2021-01-10T21:39:51.643735 | 2013-01-16T11:59:43 | 2013-01-16T11:59:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,261 | py | #!/usr/bin/env python2
# Name: SleekoCommander
# Author: Nick Caplinger (SleekoNiko)
# Dependencies: numpy, pypng
# Ideas:
# control the midfield with gankers
#1. Ambush flag carriers by predicting their path to the flag stand and whether or not they can intercept
#2. Camp the enemy spawn
#3. Actively search around points of interest to gain map awareness
# Import AI Sandbox API:
from api import Commander
from api import commands
from api import Vector2
# Import other modules
import random
#import png # for writing debug pngs
import networkx as nx # for graphs
import itertools
import math
#TODO Make bots more aggressive when time is running out and losing
#TODO Make bots more defensive when time is running out and winning
class SleekoCommander(Commander):
"""
Runners are responsible for infiltrating the enemy's defenses by flanking.
Defenders watch the flag stand for intruders and flankers by positioning themselves accordingly.
Midfielders try to provide map control by ganking and performing midfield objectives such as escorting and interception. They may fall into other roles when needed.
"""
def initialize(self):
"""
Assign each bot a role. Runners and defenders should default to 40%, and midfielders should default to 20%.
Role counts should adapt throughout the game depending on how aggressive or defensive the enemy commander is.
"""
self.verbose = True # display the command descriptions next to the bot labels
self.lastEventCount = 0
self.numAllies = len(self.game.team.members)
self.botDeathLocations = [] # stores a list of Vector2 objects of where bots died
self.makeRunnerGraph()
self.runners = [] # 40%
self.defenders = [] # 40%
self.midfielders = [] # 20%
ourSpawn = self.game.team.botSpawnArea[0]
theirSpawn = self.game.enemyTeam.botSpawnArea[0]
# if their spawn is closer to our flag than ours is
# attacking will probably be easy, so get more defenders
if distTo(theirSpawn, self.game.team.flag.position) < distTo(ourSpawn, self.game.team.flag.position):
# roughly half attackers/defenders
self.desiredRunners = math.ceil(self.numAllies * .5)
self.desiredDefenders = math.ceil(self.numAllies * .5)
else:
# Few defenders and the rest are attackers
defPercent = .20
self.desiredDefenders = math.ceil(self.numAllies * defPercent)
self.desiredRunners = math.ceil(self.numAllies * (1 - defPercent))
# Assign roles
for bot in self.game.team.members:
if len(self.runners) < self.desiredRunners:
self.runners.append(bot)
else:
self.defenders.append(bot)
# TODO calculate for more than 2 flags
self.midPoint = (self.game.team.botSpawnArea[0] + self.game.enemyTeam.flag.position) / 2.0
dirToFlag = (self.game.enemyTeam.flag.position - self.game.team.flag.position)
self.frontFlank = Vector2(dirToFlag.x, dirToFlag.y).normalized()
self.leftFlank = Vector2(dirToFlag.y,-dirToFlag.x).normalized()
self.rightFlank = Vector2(-dirToFlag.y,dirToFlag.x).normalized()
# Create behavior tree
self.behaviorTree = BotBehaviorTree(
Selector([
Sequence([
BotIsRunner(),
Selector([
Sequence([
BotHasFlag(),
RunToScoreZone()
]),
Sequence([
AllyHasFlag(),
SecureEnemyFlagObjective()
]),
Sequence([
Inverter(TeamHasEnemyFlag()),
#SmartApproachFlag()
Selector([
Sequence([
NearEnemyFlag(),
Selector([
Sequence([
EnemiesAreAlive(),
AttackFlag()
]),
ChargeFlag()
])
]),
ChargeToFlagFlank()
])
])
])
]),
Sequence([
BotIsDefender(),
Selector([
Sequence([
BotHasFlag(),
RunToScoreZone()
]),
Sequence([
OurFlagIsInBase(),
SecureOurFlagStand()
]),
Sequence([
OurFlagIsOnOurHalf(),
SecureOurFlag()
]),
Sequence([
SecureOurFlagStand()
])
])
])
])
)
# Set some blackboard data
self.behaviorTree.root.blackboard = {}
self.behaviorTree.root.blackboard['commander'] = self
# I was using a png file for output
#bt = getVonNeumannNeighborhood((int(self.game.team.flagSpawnLocation.x), int(self.game.team.flagSpawnLocation.y)), self.level.blockHeights, int(self.level.firingDistance))
#createPngFromBlockTuples(bt, (self.level.width, self.level.height))
#createPngFromMatrix(bt, (self.level.width, self.level.height))
# Determine safest positions for flag defense
self.secureFlagDefenseLocs = self.getMostSecurePositions(Vector2(self.game.team.flagSpawnLocation.x, self.game.team.flagSpawnLocation.y))
self.secureEnemyFlagLocs = self.getMostSecurePositions(Vector2(self.game.enemyTeam.flagSpawnLocation.x, self.game.enemyTeam.flagSpawnLocation.y))
def tick(self):
"""
Listen for events and run the bot's behavior tree.
"""
# listen for events
if len(self.game.match.combatEvents) > self.lastEventCount:
lastCombatEvent = self.game.match.combatEvents[-1]
#self.log.info('event:'+str(lastCombatEvent.type))
# if lastCombatEvent.instigator is not None:
# print "event:%d %f %s %s" % (lastCombatEvent.type,lastCombatEvent.time,lastCombatEvent.instigator.name,lastCombatEvent.subject.name)
# else:
# print "event:%d %f" % (lastCombatEvent.type,lastCombatEvent.time)
if lastCombatEvent.type == lastCombatEvent.TYPE_KILLED:
if lastCombatEvent.subject in self.game.team.members:
self.botDeathLocations.append(lastCombatEvent.subject.position)
#self.updateRunnerGraph()
self.lastEventCount = len(self.game.match.combatEvents)
# run behavior tree
for bot in self.game.bots_alive:
self.behaviorTree.root.blackboard['bot'] = bot
self.behaviorTree.run()
def shutdown(self):
scoreDict = self.game.match.scores
myScore = scoreDict[self.game.team.name]
theirScore = scoreDict[self.game.enemyTeam.name]
if myScore < theirScore:
self.log.info("We lost! Final score: " + str(myScore) + "-" + str(theirScore))
"""
Returns most secure positions by using von Neumann neighborhood where r = firingDistance + 2
"""
def getMostSecurePositions(self,secLoc):
levelSize = (self.level.width, self.level.height)
width, height = levelSize
potPosits = [[0 for y in xrange(height)] for x in xrange(width)]
neighbors = getVonNeumannNeighborhood((int(secLoc.x), int(secLoc.y)), self.level.blockHeights, int(self.level.firingDistance)+2)
securePositions = []
for n in neighbors:
# use raycasting to test whether or not this position can see the flag
# if it can't, automatically set it to 0
x,y = n
if self.level.blockHeights[x][y] >= 2:
potPosits[x][y] = 50
else:
potPosits[x][y] = 255
if potPosits[x][y] == 255:
numWallCells = numAdjCoverBlocks(n, self.level.blockHeights)
numWallCells += numAdjMapWalls(n, levelSize)
#print numWallCells
if numWallCells == 0:
potPosits[x][y] = 128
if potPosits[x][y] == 255:
# make sure they have LOS with the flag
goodLOS = True
lookVec = Vector2(x+0.5,y+0.5) - (secLoc + Vector2(.5,.5))
lookVecNorm = lookVec.normalized()
vecInc = .1
while vecInc < lookVec.length():
testPos = secLoc + lookVecNorm * vecInc
#print str(testPos)
if self.level.blockHeights[int(testPos.x)][int(testPos.y)] >= 2:
goodLOS = False
break
vecInc += .1
if not goodLOS:
potPosits[x][y] = 128
else:
securePositions.append(n)
#createPngFromMatrix(potPosits, levelSize)
return sorted(securePositions, key = lambda p: numAdjMapWalls(p, levelSize)*4 + numAdjCoverBlocksWeighted(p, self) + distTo(Vector2(p[0],p[1]), secLoc)/self.level.firingDistance, reverse = True)
def getFlankingPosition(self, bot, target):
flanks = [target + f * self.level.firingDistance for f in [self.leftFlank, self.rightFlank]]
options = map(lambda f: self.level.findNearestFreePosition(f), flanks)
#return sorted(options, key = lambda p: (bot.position - p).length())[0]
return random.choice(options)
# return number of living enemies
def numAliveEnemies(self):
livingEnemies = 0
for bot in self.game.enemyTeam.members:
if bot.health != None and bot.health > 0:
livingEnemies += 1
return livingEnemies
def makeRunnerGraph(self):
blocks = self.level.blockHeights
width, height = len(blocks), len(blocks[0])
g = nx.Graph(directed=False, map_height = height, map_width = width)
#self.positions = g.new_vertex_property('vector<float>')
#self.weights = g.new_edge_property('float')
#g.vertex_properties['pos'] = self.positions
#g.edge_properties['weight'] = self.weights
self.terrain = []
self.positions = {}
for j in range(0, height):
row = []
for i in range(0,width):
if blocks[i][j] == 0:
g.add_node(i+j*width, position = (float(i)+0.5, float(j)+0.5) )
self.positions[i+j*width] = Vector2(float(i) + 0.5, float(j) + 0.5)
row.append(i+j*width)
else:
row.append(None)
self.terrain.append(row)
for i, j in itertools.product(range(0, width), range(0, height)):
p = self.terrain[j][i]
if not p: continue
if i < width-1:
q = self.terrain[j][i+1]
if q:
e = g.add_edge(p, q, weight = 1.0)
if j < height-1:
r = self.terrain[j+1][i]
if r:
e = g.add_edge(p, r, weight = 1.0)
self.runnerGraph = g
def updateRunnerGraph(self):
blocks = self.level.blockHeights
width, height = len(blocks), len(blocks[0])
# update the weights based on the distance
for j in range(0, height):
for i in range(0, width -1):
a = self.terrain[j][i]
b = self.terrain[j][i+1]
if a and b:
w = max(255 - 4*(self.distances[a] + self.distances[b]), 0)
self.graph[a][b]['weight'] = w
for j in range(0, height-1):
for i in range(0, width):
a = self.terrain[j][i]
b = self.terrain[j+1][i]
if a and b:
w = max(255 - 4*(self.distances[a] + self.distances[b]), 0)
self.graph[a][b]['weight'] = w
def getNodeIndex(self, position):
i = int(position.x)
j = int(position.y)
width = self.runnerGraph.graph["map_width"]
return i+j*width
# Helper functions
def distTo(pos1, pos2):
return (pos1 - pos2).length()
# used for intercepting enemy flag runners
def canInterceptTarget(bot, target, targetGoal):
return distTo(bot, targetGoal) < distTo(target, targetGoal)
# Returns number of blocks that are adjacent that can be used as cover at a given position
def numAdjCoverBlocks(cell, blockHeights):
adjCells = getVonNeumannNeighborhood(cell, blockHeights, 1)
numWallCells = 0
for aCell in adjCells:
aCellX, aCellY = aCell
if blockHeights[aCellX][aCellY] >= 2:
numWallCells += 1
return numWallCells
# prioritize cells that have cover from their spawn
def numAdjCoverBlocksWeighted(cell, cmdr):
adjCells = getVonNeumannNeighborhood(cell, cmdr.level.blockHeights, 1)
# get distances of cells to their spawn
spawnPoint = cmdr.game.enemyTeam.botSpawnArea[0]
cellDistances = [distTo(spawnPoint, Vector2(x[0] + .5, x[1] + .5)) for x in adjCells]
cellDistData = sorted(zip(adjCells, cellDistances), key = lambda x: x[1], reverse = True)
wallScore = 0
for i, aCell in enumerate([x[0] for x in cellDistData]):
if not aCell == cell:
aCellX, aCellY = aCell
if cmdr.level.blockHeights[aCellX][aCellY] >= 2:
wallScore += i
return wallScore
# Tests to see approx. how far we can go in a direction until hitting a wall
def unblockedDistInDir(startPos, direction, commander):
testPos = startPos
while withinLevelBounds(testPos, (commander.level.width, commander.level.height)):
if commander.level.blockHeights[int(testPos.x)][int(testPos.y)] < 2:
testPos = testPos + direction/2
else:
break
return distTo(startPos, testPos)
# Returns true if the cell position is within level bounds, false otherwise
def withinLevelBounds(pos, levelSize):
return pos.x >= 0 and pos.y >= 0 and pos.x < levelSize[0] and pos.y < levelSize[1]
# Returns the number of adjacent map walls
def numAdjMapWalls(cell, mapSize):
adjWalls = 0
x,y = cell
width,height = mapSize
if x == 0 or x == width-1:
adjWalls += 1
if y == 0 or y == height-1:
adjWalls += 1
return adjWalls
# Returns the von Neumann Neighborhood of the cell of specified range as a list of tuples (x,y)
# http://mathworld.wolfram.com/vonNeumannNeighborhood.html
def getVonNeumannNeighborhood(cell, cells, r): # where cell is a tuple, cells is a 2D list, and r is the range
newCells = [] # list of tuples
for x, cx in enumerate(cells):
for y, cy in enumerate(cx):
if abs(x - cell[0]) + abs(y - cell[1]) <= r:
newCells.append((x,y))
return newCells
def createPngFromBlockTuples(tupleList, levelSize, name='pngtest.png'): # where tupleList is a list of block position tuples, levelSize is a tuple of x,y level size
width, height = levelSize
pngList = [[0 for y in xrange(height)] for x in xrange(width)]
for t in tupleList: # I could probably use list comprehensions here
print str(t)
x,y = t
column = pngList[y]
column[x] = 255
image = png.from_array(pngList, mode='L') # grayscale
image.save(name)
def createPngFromMatrix(matrix, levelSize, name='pngtest.png'):
width, height = levelSize
transposedMatrix = [[row[i] for row in matrix] for i in xrange(height)]
image = png.from_array(transposedMatrix, mode='L')
image.save(name)
# Base class for bot behavior tree
class BotBehaviorTree:
def __init__(self, child=None):
self.root = child
def run(self):
self.root.run()
# Base task classes
class Task:
def __init__(self, children=None, parent=None, blackboard=None):
#holds the children of task
self.children = children
self.blackboard = blackboard
self.parent = parent
if self.children != None:
for c in self.children:
c.parent = self
# returns True for success and False for failure
def run(self):
raise NotImplementedError("Can't call Task.run() without defining behavior.")
# Get data from the dict blackboard
def getData(self, name):
if self.blackboard == None or (self.blackboard != None and not name in blackboard):
testParent = self.parent
while testParent != None:
if testParent.blackboard != None and name in testParent.blackboard:
return testParent.blackboard[name]
else:
testParent = testParent.parent
# We went through the parents and didn't find anything, so return None
return None
else:
return blackboard[name]
class Selector (Task):
def run(self):
for c in self.children:
if c.run():
return True
return False
class Sequence (Task):
def run(self):
for c in self.children:
if not c.run():
return False
return True
# Decorators
class Decorator (Task):
def __init__(self, child=None,parent=None,blackboard=None):
self.child = child
self.parent = parent
self.blackboard = blackboard
self.child.parent = self
class Inverter (Decorator):
def run(self):
return not self.child.run()
# Now onto tasks specific to our program:
class BotIsRunner(Task):
def run(self):
return self.getData('bot') in self.getData('commander').runners
class BotIsDefender(Task):
def run(self):
return self.getData('bot') in self.getData('commander').defenders
class TeamHasEnemyFlag(Task):
def run(self):
commander = self.getData('commander')
return commander.game.enemyTeam.flag.carrier != None
class BotHasFlag(Task):
def run(self):
return self.getData('bot') == self.getData('commander').game.enemyTeam.flag.carrier
class LookRandom(Task):
def run(self):
self.getData('commander').issue(commands.Defend, self.getData('bot'), Vector2(random.random()*2 - 1, random.random()*2 - 1), description = 'Looking in random direction')
return True
class ChargeFlag(Task):
def run(self):
bot = self.getData('bot')
level = self.getData('commander').level
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_CHARGING and bot.state != bot.STATE_TAKINGORDERS:
self.getData('commander').issue(commands.Charge, self.getData('bot'), self.getData('commander').game.enemyTeam.flag.position, description = 'Rushing enemy flag')
return True
class SmartApproachFlag(Task):
def run(self):
bot = self.getData('bot')
cmdr = self.getData('commander')
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_CHARGING and bot.state != bot.STATE_TAKINGORDERS:
dst = cmdr.game.enemyTeam.flag.position
message = "Intelligently approaching flag?"
# calculate the shortest path between the bot and the target using our weights
srcIndex = cmdr.getNodeIndex(bot.position)
dstIndex = cmdr.getNodeIndex(dst)
pathNodes = nx.shortest_path(cmdr.runnerGraph, srcIndex, dstIndex, 'weight')
pathLength = len(pathNodes)
if pathLength > 0:
path = [cmdr.positions[p] for p in pathNodes if cmdr.positions[p]]
if len(path) > 0:
orderPath = path[::10]
orderPath.append(path[-1]) # take every 10th point including last point
cmdr.issue(commands.Charge, bot, orderPath, description = message)
class ChargeToFlagFlank(Task):
def run(self):
bot = self.getData('bot')
level = self.getData('commander').level
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_CHARGING and bot.state != bot.STATE_TAKINGORDERS:
flankPos = self.getData('commander').getFlankingPosition(bot, self.getData('commander').game.enemyTeam.flag.position)
self.getData('commander').issue(commands.Charge, self.getData('bot'), flankPos, description = 'Rushing enemy flag via flank')
return True
class AttackFlag(Task):
def run(self):
bot = self.getData('bot')
cmdr = self.getData('commander')
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_ATTACKING and bot.state != bot.STATE_TAKINGORDERS:
cmdr.issue(commands.Attack, bot, cmdr.game.enemyTeam.flag.position, description = 'Attacking enemy flag')
return True
class WithinShootingDistance(Task):
def __init__(self):
self.shootingDistance = self.getData('commander').level.firingDistance
def run(self):
return distTo(self.getData('bot').position, self.getData('targetPos')) < self.shootingDistance
class RunToScoreZone(Task):
def run(self):
bot = self.getData('bot')
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_CHARGING and bot.state != bot.STATE_TAKINGORDERS:
self.getData('commander').issue(commands.Charge, self.getData('bot'), self.getData('commander').game.team.flagScoreLocation, description = 'Taking their flag home')
return True
class AllyHasFlag(Task):
def run(self):
for b in self.getData('commander').game.bots_alive:
if b == self.getData('commander').game.enemyTeam.flag.carrier:
return True
return False
class SecureEnemyFlagObjective(Task):
def run(self):
bot = self.getData('bot')
cmdr = self.getData('commander')
flagSpawnLoc = cmdr.game.enemyTeam.flagSpawnLocation
flagScoreLoc = cmdr.game.enemyTeam.flagScoreLocation
# secure their flag spawn or their flag capture zone; whichever is closer
flagSpawnDist = distTo(bot.position, flagSpawnLoc)
capZoneDist = distTo(bot.position, flagScoreLoc)
secureLoc = None
secureDist = flagSpawnDist
if flagSpawnDist < capZoneDist:
secureLoc = flagSpawnLoc
secureDist = flagSpawnDist
else:
secureLoc = flagScoreLoc
secureDist = capZoneDist
if secureDist < 2:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_DEFENDING and bot.state != bot.STATE_TAKINGORDERS:
# TODO face direction(s) that the attackers will most likely come from
direction = (cmdr.midPoint - bot.position).normalized() + (random.random() - 0.5)
dirLeft = Vector2(-direction.y, direction.x)
dirRight = Vector2(direction.y, -direction.x)
cmdr.issue(commands.Defend, bot, [(direction, 1.0), (dirLeft, 1.0), (direction, 1.0), (dirRight, 1.0)], description = 'Keeping flag objective secure')
else:
enemiesAlive = False
for b in cmdr.game.enemyTeam.members:
if b.health != None and b.health > 0:
enemiesAlive = True
break
if enemiesAlive:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_ATTACKING and bot.state != bot.STATE_TAKINGORDERS:
cmdr.issue(commands.Attack, bot, secureLoc, description = 'Moving to secure enemy flag objective')
else:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_CHARGING and bot.state != bot.STATE_TAKINGORDERS:
cmdr.issue(commands.Charge, bot, secureLoc, description = 'Charging to secure enemy flag objective')
return True
class NearEnemyFlag(Task):
def run(self):
bot = self.getData('bot')
return distTo(bot.position, self.getData('commander').game.enemyTeam.flag.position) < self.getData('commander').level.firingDistance * 1.5
class EnemiesAreAlive(Task):
def run(self):
for bot in self.getData('commander').game.enemyTeam.members:
if bot.health != None and bot.health > 0:
return True
return False
# Defender bot code
class OurFlagIsInBase(Task):
def run(self):
ourFlag = self.getData('commander').game.team.flag
ourFlagSpawnLoc = self.getData('commander').game.team.flagSpawnLocation
return distTo(ourFlag.position, ourFlagSpawnLoc) < 3
class OurFlagIsOnOurHalf(Task):
def run(self):
cmdr = self.getData('commander')
flagDistToSpawn = distTo(cmdr.game.team.flag.position, cmdr.game.team.flagSpawnLocation)
flagDistToScore = distTo(cmdr.game.team.flag.position, cmdr.game.enemyTeam.flagScoreLocation)
return flagDistToSpawn < flagDistToScore
class SecureOurFlag(Task):
def run(self):
cmdr = self.getData('commander')
bot = self.getData('bot')
secureLoc = cmdr.game.team.flag.position
secureDist = distTo(bot.position, secureLoc)
if secureDist < 2:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_DEFENDING and bot.state != bot.STATE_TAKINGORDERS:
# TODO face direction(s) that the attackers will most likely come from
direction = (cmdr.midPoint - bot.position).normalized() + (random.random() - 0.5)
dirLeft = Vector2(-direction.y, direction.x)
dirRight = Vector2(direction.y, -direction.x)
cmdr.issue(commands.Defend, bot, [(direction, 1.0), (dirLeft, 1.0), (direction, 1.0), (dirRight, 1.0)], description = 'Keeping our flag secure')
else:
enemiesAlive = False
for b in cmdr.game.enemyTeam.members:
if b.health != None and b.health > 0:
enemiesAlive = True
break
if enemiesAlive:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_ATTACKING and bot.state != bot.STATE_TAKINGORDERS:
cmdr.issue(commands.Attack, bot, secureLoc, description = 'Moving to secure our flag')
else:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_CHARGING and bot.state != bot.STATE_TAKINGORDERS:
cmdr.issue(commands.Charge, bot, secureLoc, description = 'Charging to secure our flag')
return True
class SecureOurFlagStand(Task):
def run(self):
cmdr = self.getData('commander')
bot = self.getData('bot')
safeLocs = cmdr.secureFlagDefenseLocs
secureLoc = None
secureDist = None
chosenLoc = None
if len(safeLocs) == 0:
secureLoc = cmdr.game.team.flagSpawnLocation
else:
#double check to make sure we have a good position; note that this shouldn't really be done here
for i, sLoc in enumerate(safeLocs):
if distTo(Vector2(sLoc[0] + .5, sLoc[1] + .5), cmdr.game.team.flagSpawnLocation + Vector2(.5,.5)) <= cmdr.level.firingDistance - 1:
chosenLoc = safeLocs[i]
break
if chosenLoc == None:
# Give up
chosenLoc = secureLoc
secureLoc = Vector2(chosenLoc[0] + 0.5, chosenLoc[1] + 0.5)
secureDist = distTo(bot.position, secureLoc)
if secureDist < .5:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_DEFENDING and bot.state != bot.STATE_TAKINGORDERS:
# face away from adjacent walls
directions = []
secureLocCell = (int(secureLoc.x), int(secureLoc.y))
for aCell in getVonNeumannNeighborhood(secureLocCell, cmdr.level.blockHeights, 1):
if aCell != secureLocCell:
if cmdr.level.blockHeights[aCell[0]][aCell[1]] <= 1:
aimDir = Vector2(aCell[0], aCell[1]) - Vector2(secureLocCell[0], secureLocCell[1])
aimDist = unblockedDistInDir(secureLoc, aimDir, cmdr)
if aimDist > cmdr.level.firingDistance / 3:
directions.append(aimDir.normalized())
if len(directions) > 0:
cmdr.issue(commands.Defend, bot, directions, description = 'Keeping our flag stand secure')
else:
cmdr.issue(commands.Defend, bot, (cmdr.game.team.flagSpawnLocation - bot.position).normalized(), description = 'Keeping our flag stand secure')
else:
enemiesAlive = False
for b in cmdr.game.enemyTeam.members:
if b.health != None and b.health > 0:
enemiesAlive = True
break
if enemiesAlive:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_ATTACKING and bot.state != bot.STATE_TAKINGORDERS:
cmdr.issue(commands.Attack, bot, secureLoc, description = 'Moving to secure our flag stand')
else:
if bot.state != bot.STATE_SHOOTING and bot.state != bot.STATE_CHARGING and bot.state != bot.STATE_TAKINGORDERS:
cmdr.issue(commands.Charge, bot, secureLoc, description = 'Charging to secure our flag stand')
return True
| [
"arleckshunt@googlemail.com"
] | arleckshunt@googlemail.com |
071cdfa73d8b8928b72c1e5dd2ba4a8ba6f7578c | b7850a5605eea1ab876140e2ab9eea9b5e3b6305 | /ControlFlujo/for.py | 80c2c78706b7c7dc8c074f7ee4798cbe2b99f7a3 | [] | no_license | lagarridom/PythonSemestral19_1 | db37159983f842a2310f676e167f877fe93c6706 | aad25b1a84822f52796f5faed251a62037a04c3a | refs/heads/master | 2020-03-27T06:34:45.149558 | 2018-10-17T02:37:05 | 2018-10-17T02:37:05 | 146,116,479 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | #10.-
"""
for ITERADOR in ITERABLE:
instrucciones
"""
for i in [10, 20, 30, 40, 50]:
print(i)
nombres = ["luis", "pato", "gabriel"]
for nom in nombres:
print("El es"+nom)
for elemento in ("cadena", 3, 3.4, True):
print(type(elemento))
diccionario = {"lunes":"pollo","martes":"pescado","miercoles":"carne"}
for llave in diccionario:
print("Los %s me gusta comer %s"%(llave,diccionario[llave]))
#print("Los {} me gusta comer {}".format(llave,diccionario[llave]))
lista = [("Jorge",10),("Gueva",9),("Ana",10)]
for nombre,calif in lista:
print("%s obtuvo %s"%(nombre,calif))
| [
"noreply@github.com"
] | lagarridom.noreply@github.com |
1e6eba4819d445e4bc1753cf73177ddf6931adac | ae09e15b3f4ac2c6f5a0d3f7a0d73c6def37ab2b | /joinquant/get_data.py | eb55a21d0de07f52edf198b705d82a416b5a4de2 | [] | no_license | fswzb/sensequant | db81053685a985fe50e7082b6f2e65e2e6540212 | b5fdac13f5caf2d00b99506c1c25389b406d9b17 | refs/heads/master | 2020-08-01T15:53:35.102784 | 2016-08-01T03:56:49 | 2016-08-01T03:56:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,713 | py | import pandas as pd
import numpy as np
import itertools
from src import configure
import sys
# Add the Test Folder path to the sys.path list
sys.path.append('/home/lcc/sensequant/code')
# Now you can import your module
from src.preprocess import in_day_unit
class get_data():
def __init__(self, fname):
self.fname = configure.tech_hdf_file
def history(self, start_date=None, end_date=None, count=None, filed='avg', stock_list=None):
'''
the resulting data frame is in a unit of day
start_date OR count
'''
def read_data(fname):
df_all = pd.DataFrame()
for stock in stock_list:
df = pd.read_hdf(fname, stock)
df = df.drop('index')
df = in_day_unit(df)
df_all = df_all.append(df)
return df_all
def open_(df):
return df.loc[0, 'open']
def close_(df):
return df.iloc[-1]['close']
def low(df):
if count:
return np.sort(df.low.values)[:count]
def high(df):
return np.sort(df.high.values)[-count:]
def avg(df):
return np.average(df[:count].close.values)
def pre_close(df):
return scalify(df[df.date==np.sort(df.date)[-count]].close.values)
def integrate_into_df(series, colname):
return pd.DataFrame(series, columns=[colname]).reset_index()
if start_date and count:
raise ValueError('CAN set count or start_date!')
df_all = read_data(self.fname, stock_list)
df_all = df_all[df_all.date > pd.to_datetime(start_date)] if start_date else df_all
groups = df_all.groupby('stock_id')
if filed == 'open':
result = groups.apply(open_)
elif filed == 'low':
result = groups.apply(close_)
elif filed == 'high':
result = groups.apply(high)
elif filed == 'avg':
result = groups.apply(ave)
elif filed == 'pre_close':
result = groups.apply(pre_close)
else:
raise ValueError('No such filed')
return integrate_into_df(result, filed)
def get_fundamentals(self, start_date, end_date, stock_id, colname_list):
def has_same_element(l1, l2):
'''
check whether two list have mutual elements
'''
for e in l1:
if e in l2:
return True
return False
col4check = json.loads(open(configure.colnames_in_each_fundamental_df).read())
msk = (df.date>=start_date)&(df.date<=end_date)&(df.stock_id==stock_id)
result = pd.DataFrame()
for df_name, columns in col4check.items():
if has_same_element(stock_id, colname_list):
df = pd.read_hdf(configure.fundamental_hdf_file,
df_name,
columns=columns_list+'stock_id'+'date',
where=['date>=pd.to_datetime(%s)' % start_date,
'date<=pd.to_datetime(%s)' % end_date,
'stock_id==(%s)'% stock_id])
if result.empty:
result = df
else:
result = pd.merge(info, share, on=['stock_id', 'date'], how='outer')
return result
def get_fundamental_items(self):
col4check = json.loads(open(configure.colnames_in_each_fundamental_df).read())
for k,v in col4check.items():
print ('%s: %s'(% k, % v)) | [
"rylanlzc@gmail.com"
] | rylanlzc@gmail.com |
96c1f73d17d18f7906615ca48bc2e2d25d8b7259 | caa06eca3eef2549d5088f6487201f734b35822e | /multi_ie/EE/model/multi_pointer_net.py | 0d2334ed6d7a09e87757e36528cedd3c228713c5 | [] | no_license | kelvincjr/shared | f947353d13e27530ba44ea664e27de51db71a5b6 | 4bc4a12b0ab44c6847a67cbd7639ce3c025f38f8 | refs/heads/master | 2023-06-23T19:38:14.801083 | 2022-05-17T09:45:22 | 2022-05-17T09:45:22 | 141,774,490 | 6 | 1 | null | 2023-06-12T21:30:07 | 2018-07-21T02:22:34 | Python | UTF-8 | Python | false | false | 7,694 | py | # _*_ coding:utf-8 _*_
import warnings
import numpy as np
import torch
import torch.nn as nn
from transformers import BertModel
from transformers import BertPreTrainedModel
from .layernorm import ConditionalLayerNorm
#from utils.data_util import batch_gather
warnings.filterwarnings("ignore")
def batch_gather(data: torch.Tensor, index: torch.Tensor):
length = index.shape[0]
t_index = index.cpu().numpy()
t_data = data.cpu().data.numpy()
result = []
for i in range(length):
result.append(t_data[i, t_index[i], :])
return torch.from_numpy(np.array(result)).to(data.device)
class ERENet(nn.Module):
"""
ERENet : entity relation jointed extraction
"""
def __init__(self, encoder, classes_num):
super().__init__()
self.classes_num = classes_num
# BERT model
self.bert = encoder
config = encoder.config
self.token_entity_emb = nn.Embedding(num_embeddings=2, embedding_dim=config.hidden_size,
padding_idx=0)
# self.encoder_layer = TransformerEncoderLayer(config.hidden_size, nhead=4)
# self.transformer_encoder = TransformerEncoder(self.encoder_layer, num_layers=1)
self.LayerNorm = ConditionalLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# pointer net work
self.po_dense = nn.Linear(config.hidden_size, self.classes_num * 2)
self.subject_dense = nn.Linear(config.hidden_size, 2)
self.loss_fct = nn.BCEWithLogitsLoss(reduction='none')
#self.init_weights()
def forward(self, q_ids=None, passage_ids=None, segment_ids=None, attention_mask=None, subject_ids=None,
subject_labels=None,
object_labels=None, eval_file=None,
is_eval=False):
mask = (passage_ids != 0).float()
bert_encoder = self.bert(passage_ids, token_type_ids=segment_ids, attention_mask=mask)[0]
if not is_eval:
# subject_encoder = self.token_entity_emb(token_type_ids)
# context_encoder = bert_encoder + subject_encoder
sub_start_encoder = batch_gather(bert_encoder, subject_ids[:, 0])
sub_end_encoder = batch_gather(bert_encoder, subject_ids[:, 1])
subject = torch.cat([sub_start_encoder, sub_end_encoder], 1)
context_encoder = self.LayerNorm(bert_encoder, subject)
sub_preds = self.subject_dense(bert_encoder)
po_preds = self.po_dense(context_encoder).reshape(passage_ids.size(0), -1, self.classes_num, 2)
subject_loss = self.loss_fct(sub_preds, subject_labels)
# subject_loss = F.binary_cross_entropy(F.sigmoid(sub_preds) ** 2, subject_labels, reduction='none')
subject_loss = subject_loss.mean(2)
subject_loss = torch.sum(subject_loss * mask.float()) / torch.sum(mask.float())
po_loss = self.loss_fct(po_preds, object_labels)
# po_loss = F.binary_cross_entropy(F.sigmoid(po_preds) ** 4, object_labels, reduction='none')
po_loss = torch.sum(po_loss.mean(3), 2)
po_loss = torch.sum(po_loss * mask.float()) / torch.sum(mask.float())
loss = subject_loss + po_loss
return loss
else:
subject_preds = nn.Sigmoid()(self.subject_dense(bert_encoder))
answer_list = list()
for qid, sub_pred in zip(q_ids.cpu().numpy(),
subject_preds.cpu().numpy()):
context = eval_file[qid].bert_tokens
start = np.where(sub_pred[:, 0] > 0.6)[0]
end = np.where(sub_pred[:, 1] > 0.5)[0]
subjects = []
for i in start:
j = end[end >= i]
if i == 0 or i > len(context) - 2:
continue
if len(j) > 0:
j = j[0]
if j > len(context) - 2:
continue
subjects.append((i, j))
answer_list.append(subjects)
qid_ids, bert_encoders, pass_ids, subject_ids, token_type_ids = [], [], [], [], []
for i, subjects in enumerate(answer_list):
if subjects:
qid = q_ids[i].unsqueeze(0).expand(len(subjects))
pass_tensor = passage_ids[i, :].unsqueeze(0).expand(len(subjects), passage_ids.size(1))
new_bert_encoder = bert_encoder[i, :, :].unsqueeze(0).expand(len(subjects), bert_encoder.size(1),
bert_encoder.size(2))
token_type_id = torch.zeros((len(subjects), passage_ids.size(1)), dtype=torch.long)
for index, (start, end) in enumerate(subjects):
token_type_id[index, start:end + 1] = 1
qid_ids.append(qid)
pass_ids.append(pass_tensor)
subject_ids.append(torch.tensor(subjects, dtype=torch.long))
bert_encoders.append(new_bert_encoder)
token_type_ids.append(token_type_id)
if len(qid_ids) == 0:
subject_ids = torch.zeros(1, 2).long().to(bert_encoder.device)
qid_tensor = torch.tensor([-1], dtype=torch.long).to(bert_encoder.device)
po_tensor = torch.zeros(1, bert_encoder.size(1)).long().to(bert_encoder.device)
return qid_tensor, subject_ids, po_tensor
qids = torch.cat(qid_ids).to(bert_encoder.device)
pass_ids = torch.cat(pass_ids).to(bert_encoder.device)
bert_encoders = torch.cat(bert_encoders).to(bert_encoder.device)
# token_type_ids = torch.cat(token_type_ids).to(bert_encoder.device)
subject_ids = torch.cat(subject_ids).to(bert_encoder.device)
flag = False
split_heads = 1024
bert_encoders_ = torch.split(bert_encoders, split_heads, dim=0)
pass_ids_ = torch.split(pass_ids, split_heads, dim=0)
# token_type_ids_ = torch.split(token_type_ids, split_heads, dim=0)
subject_encoder_ = torch.split(subject_ids, split_heads, dim=0)
po_preds = list()
for i in range(len(bert_encoders_)):
bert_encoders = bert_encoders_[i]
# token_type_ids = token_type_ids_[i]
pass_ids = pass_ids_[i]
subject_encoder = subject_encoder_[i]
if bert_encoders.size(0) == 1:
flag = True
# print('flag = True**********')
bert_encoders = bert_encoders.expand(2, bert_encoders.size(1), bert_encoders.size(2))
subject_encoder = subject_encoder.expand(2, subject_encoder.size(1))
# pass_ids = pass_ids.expand(2, pass_ids.size(1))
sub_start_encoder = batch_gather(bert_encoders, subject_encoder[:, 0])
sub_end_encoder = batch_gather(bert_encoders, subject_encoder[:, 1])
subject = torch.cat([sub_start_encoder, sub_end_encoder], 1)
context_encoder = self.LayerNorm(bert_encoders, subject)
po_pred = self.po_dense(context_encoder).reshape(subject_encoder.size(0), -1, self.classes_num, 2)
if flag:
po_pred = po_pred[1, :, :, :].unsqueeze(0)
po_preds.append(po_pred)
po_tensor = torch.cat(po_preds).to(qids.device)
po_tensor = nn.Sigmoid()(po_tensor)
return qids, subject_ids, po_tensor
| [
"deco_2004@163.com"
] | deco_2004@163.com |
5b3b646c4113d5b24b5038e64dcdf1fcd7ee035b | fa572b453270fd688e91cbed75d488c24b86cb12 | /lists/tests/test_forms.py | 7f7270b28c3a1678dd2f22d5451771c213844173 | [] | no_license | XOyarz/TDD-with-Python | d3bfea9ac4b0391058a6b8b2d759cde8c53e759c | d2350e70cd77691255a667cbff60910b36a30cc3 | refs/heads/master | 2021-01-21T13:26:52.011789 | 2017-09-11T18:39:45 | 2017-09-11T18:39:45 | 102,126,072 | 0 | 1 | null | 2017-09-11T18:39:46 | 2017-09-01T15:19:02 | Python | UTF-8 | Python | false | false | 929 | py | from django.test import TestCase
from lists.forms import ItemForm, EMPTY_ITEM_ERROR
from lists.models import Item, List
class ItemFormTest(TestCase):
def test_form_item_input_has_placeholder_and_css_classes(self):
form = ItemForm()
self.assertIn('placeholder="Enter a to-do item"', form.as_p())
self.assertIn('class="form-control input-lg"', form.as_p())
def test_form_validation_for_blank_items(self):
form = ItemForm(data={'text':''})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['text'], [EMPTY_ITEM_ERROR])
def test_form_save_handles_saving_to_a_list(self):
list_ = List.objects.create()
form = ItemForm(data={'text': 'do me'})
new_item = form.save(for_list=list_)
self.assertEqual(new_item, Item.objects.first())
self.assertEqual(new_item.text, 'do me')
self.assertEqual(new_item.list, list_) | [
"xavier982@hotmail.com"
] | xavier982@hotmail.com |
e33f5e747a3394df1d4ab13d0f881353585a66d3 | fb9b320109ba55fc68fab793ce7a77058dc8b682 | /semi-supervised-learning/visual/score_visual_lfs.py | 044956da5eebf4ddcac680083ba6c027aec29002 | [] | no_license | NLP4H/MSBC | c3b03806666584a4fa1cc7328ba9d45f061d2a77 | 60b0b89496eb28707d323b595af7a411dbd84768 | refs/heads/master | 2022-10-09T19:02:48.998958 | 2020-06-02T07:15:11 | 2020-06-02T07:15:11 | 268,724,086 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,366 | py | """
Snorkel labelling functions for visual score
Note: This visual subscore is not based on Neurostatus defns but
based on heureustic information provided by Zhen based on advice given from MS clinicians
"""
import os
import pandas as pd
import re
import numpy as np
from nltk.tokenize import word_tokenize, sent_tokenize
import snorkel
from snorkel.labeling import labeling_function
def predict_visual_acuity(note):
"""
Rules
1: 20/20 – 20/30
2: 20/30 – 20/60
3: 20/60 – 20/100
4: 20/100 – 20/200
Input: note
Returns: raw visual acuity score
"""
score = -1
# Pattern
p = re.compile(r" 20\/\d{2,3}")
p2 = re.compile(r"visual acuity", re.IGNORECASE)
if len(re.findall(p, note)) > 0:
# List of possible visual acuities in each note
visual_acuities = []
for acuity in re.findall(p, note):
visual_acuities.append(int(acuity[4:]))
# Take the worst disability
worst_eye = max(visual_acuities)
best_eye = min(visual_acuities)
# vision improvement -> remove the worst one
sentences = sent_tokenize(note)
for sent in sentences:
# in each sentence, look for visual aquity number and "vision improved" token
if len(visual_acuities) > 1 and len(re.findall(r"(?:Vision|vision)", sent)) > 0 and len(re.findall(r"improv", sent)) > 0:
# If originally is finger counting, than no use to remove
if len(re.findall(r"finger counting vision", note)) > 0:
break
else:
visual_acuities.remove(max(visual_acuities))
worst_eye = max(visual_acuities)
break
if len(visual_acuities) > 1 and len(re.findall(p, sent)) > 0 and len(re.findall(r"improv", sent)) > 0:
visual_acuities.remove(max(visual_acuities))
worst_eye = max(visual_acuities)
break
# Vision recover
if len(re.findall(r"(?:Vision|vision) recover", sent)) > 0:
if len(re.findall(p, sent)) > 0:
visual_acuities = []
for acuity in re.findall(p, sent):
visual_acuities.append(int(acuity[4:]))
worst_eye = max(visual_acuities)
best_eye = min(visual_acuities)
break
else:
score = 0
# print("Visual Acuity: ", score)
return score
# print("worst:", worst_eye)
# print("best:", best_eye)
# 20/20 normal
if worst_eye == 20:
score = 0
# print("Visual Acuity: ", score)
return score
# 1: 20/20 – 20/30
elif worst_eye > 20 and worst_eye <= 30:
score = 1
# print("Visual Acuity: ", score)
return score
# 2: 20/30 – 20/60
elif worst_eye > 30 and worst_eye <= 60:
score = 2
# print("Visual Acuity: ", score)
return score
# 3: 20/60 – 20/100
elif worst_eye > 60 and worst_eye <= 100:
score = 3
# print("Visual Acuity: ", score)
return score
# 4: 20/100 – 20/200
elif (worst_eye > 100 and worst_eye <= 200) or \
(worst_eye != best_eye and worst_eye > 60 and worst_eye <= 100 and best_eye > 60 and best_eye <= 100):
score = 4
# print("Visual Acuity: ", score)
return score
# 5: > 200
elif (worst_eye > 200) or \
(worst_eye != best_eye and worst_eye > 100 and worst_eye <= 200 and best_eye > 60 and best_eye <= 200):
score = 5
# print("Visual Acuity: ", score)
return score
# 6: worst eye > 200, best eye >= 60
elif (worst_eye > 200):
score = 6
# print("Visual Acuity: ", score)
return score
# "Visual acuity" is detected
elif len(re.findall(p2, note)) > 0:
sentences = sent_tokenize(note)
for sent in sentences:
if len(re.findall(p2, sent)) > 0 and len(re.findall(r"normal|Normal", sent)) > 0:
score = 0
# print("Visual Acuity: ", score)
return score
# print("Visual Acuity: ", score)
return score
def predict_pallor(note):
"""
Check whether there's disc pallor
Input: note
Returns: score for disc pallor (maximum 1)
"""
# Patterns
p = re.compile(r" disk | disc |fundoscopy| fundi | fundus|optic nerve", re.IGNORECASE)
p_neg = re.compile(r" no | not |normal|unremarkable|crisp", re.IGNORECASE)
p_abnormal = re.compile(r"pallor|pale", re.IGNORECASE)
# Predictions
score = -1
sentences = sent_tokenize(note)
for sent in sentences:
if len(re.findall(r"optic atrophy", sent)) > 0:
score = 1
break
if len(re.findall(r"temporal pallor|significant pallor|bilateral optic disc pallor", sent)) > 0:
score = 1
break
if len(re.findall(p, sent)) > 0:
if len(re.findall(p_neg, sent)) > 0:
score = 0
break
elif len(re.findall(p_abnormal, sent)) > 0:
score = 1
break
# print("Pallor:", score)
return score
def predict_scotoma(note):
"""
Check scotoma
0: normal
1: small / no mention of size
2: large
Input: note
Returns: score for scotoma
"""
# Patterns
p = re.compile(r"scotoma", re.IGNORECASE)
p_neg = re.compile(r" no | deni|not have|not had", re.IGNORECASE)
# Predictions
score = -1
sentences = sent_tokenize(note)
for sent in sentences:
if len(re.findall(p, sent)) > 0:
# print(sent)
# Negation
if len(re.findall(p_neg, sent)) > 0:
score = 0
break
# Large
elif len(re.findall(r"large|Large", sent)) > 0:
score = 2
break
else:
score = 1
break
# print("Scotoma: ", score)
return score
def predict_visual_fields(note):
"""
Outputs:
0: if no change in visual field
1: if visual field got worst
"""
p = re.compile(r"visual field", re.IGNORECASE)
p_neg = re.compile(r"full|intact|normal")
# p2 = re.compile(r"hemianopsia", re.IGNORECASE)
score = -1
sentences = sent_tokenize(note)
for sent in sentences:
if len(re.findall(p, sent)) > 0:
if len(re.findall(p_neg, sent)) > 0:
score = 0
elif len(re.findall(r"restrict", sent)) > 0:
score = 1
# print("Visual Fields: ", score)
return score
def general_rule(note):
"""
Zhen's heurestics (developed through meetings with MS clinicians who label)
Apply general rules where there's no specific description in the notes
1. Finger Counting
2. Light Perception
"""
# Normal
# Some level of blindness
# finger counting
p1 = re.compile(r"count finger acuity|remains blind|left blind|right blind", re.IGNORECASE)
score = -1
sentences = sent_tokenize(note)
# TODO: Black and white|shapes and shadows
for sent in sentences:
# Normal
if len(re.findall(r"no visual symptom", sent)) > 0:
# print("No visual symptons")
score = 0
break
if len(re.findall(r"neurological exam", sent)) > 0 and len(re.findall(r"normal", sent)) > 0:
# print("Neurological exam normal")
score = 0
break
if len(re.findall(r"otherwise|Otherwise", sent)) > 0 and len(re.findall(r"normal", sent)) > 0 and len(re.findall(r"visual|vision", sent)) == 0:
score = 0
break
if len(re.findall(r"EDSS", sent)) > 0 and len(re.findall(r"based on sensory", sent)) > 0:
score = 0
break
# Abnormal
if len(re.findall(p1, sent)) > 0:
# print("Blind/Finger counting")
score = 6
break
elif len(re.findall(r"finger counting", sent)) > 0 and len(re.findall(r"foot", sent)) > 0:
# print("Finger counting 1 ft")
score = 5
break
elif len(re.findall(r"finger counting", sent)) > 0 and len(re.findall(r"2 feet|two feet|3 feet|three feet", sent)) > 0:
# print("Finger counting 2/3 ft")
score = 4
break
elif len(re.findall(r"finger counting", sent)) > 0 and len(re.findall(r"light perception", sent)) > 0:
# print("Finger counting & light perception")
score = 6
break
elif len(re.findall(r"EDSS", sent)) > 0 and len(re.findall(r"\s4", sent)) > 0 and len(re.findall(r"vision alone", sent)) > 0:
# print("EDSS 4 related to vision")
score = 6
break
elif len(re.findall(r"EDSS", sent)) > 0 and len(re.findall(r"\s3", sent)) > 0 and len(re.findall(r"vision|visual sign", sent)) > 0:
# print("EDSS 3 related to vision")
score = 4
break
elif len(re.findall(r"EDSS", sent)) > 0 and len(re.findall(r"\s2", sent)) > 0 and len(re.findall(r"vision|visual sign", sent)) > 0:
score = 2
break
elif len(re.findall(r"EDSS", sent)) > 0 and len(re.findall(r"\s4", sent)) > 0 and len(re.findall(r"loss of vision", sent)) > 0:
# print("EDSS 4 related to vision")
score = 4
break
phrases = sent.split(",")
for phrase in phrases:
if len(re.findall(r"vision|visual", phrase)) > 0 and len(re.findall(r"significant", phrase)) > 0 and len(re.findall(r"impair", phrase)) > 0:
if len(re.findall(r"improv", note)) > 0:
break
score = 6
break
# print("General Rule: ", score)
return score
def select_neuro_exam(note):
"""
Function used for Zhen's heurestics
"""
p = re.compile(r"NEUROLOGICAL EXAMINATION:|EXAMINATION:|NEUROLOGICAL|(?:Neurological|neurological|neurologic|Neurologic) examination")
p1 = re.compile(r"Cranial|Visual|Vision|On examination day")
p2 = re.compile(r"examination|exam", re.IGNORECASE)
sentences = sent_tokenize(note)
start_index = 0
if len(re.findall(p, note)) > 0:
for j in range(len(sentences)):
if len(re.findall(p, sentences[j])) > 0:
# start index = first sentence to mention neurological exam
start_index = j
else:
for j in range(len(sentences)):
if len(re.findall(p1, sentences[j])) > 0:
start_index = j
break
elif len(re.findall(p2, sentences[j])) > 0:
start_index = j
break
selected_note = " ".join([sentences[j] for j in range(start_index, len(sentences))])
return selected_note
@labeling_function()
def LF_visual_original(df_row):
"""
Visual subscore prediction based on Zhen's heurestics (developed through meeting with MS clinicians)
Visual subscore is determined from the highest potential visual subscore from general_rule, or outputs from predict_visual_acuity, predict_pallor, predict_scotoma and predict_visual_fields
This doesn't match with the neurostatus definitions, but seems to be a heurestics that's applied when labelling the function
This apparently gives higher accuracy than just following neurostatus defns
"""
note = df_row.text
if "edss_19" in np.asarray(df_row.index):
edss_categorical = df_row.edss_19
else:
edss_categorical = -1
# Unknown by default
score = -1
selected_note = select_neuro_exam(note)
# EDSS = 0 all scores 0
if edss_categorical == 0:
score = 0
else:
score = max(general_rule(selected_note), predict_visual_acuity(selected_note), predict_pallor(selected_note), predict_scotoma(selected_note), predict_visual_fields(selected_note))
return score
def get_visual_lfs():
# Uncomment to test just new LFs
return [LF_visual_original]
# RULES
# visual subscore depends on visual acuity, visual fields, scotoma, disc pallor
# Visual acuity
# 1: 20/20 – 20/30
# 2: 20/30 – 20/60
# 3: 20/60 – 20/100
# 4: 20/100 – 20/200
# disk pallor:
# 0: none
# 1: present
# scotom,
# 0: normal
# 1: small / no mention of size
# 2: large
# visual fields
# 0: healthy
# 1: decline / restricted
# score 0
# disc pallor: N/A = 0
# scotoma: N/A = 0
# visual field: N/A
# visual acuity: normal = 1
# score 1
# if either one or all (and/or)
# disc pallor: true = 1
# scotoma: small = 1
# visual field: N/A = 0
# visual acuity: 20/30 (0.67) - 20/20 (1.0) of worse eye = 2
# score 2:
# disc pallor: N/A = 0
# scotoma: N/A = 0
# visual field: N/A = 0
# visual acuity: 20/30(0.67) - 20/59(0.34) of worse eye with maximal visual acuity (corrected) = 2
# score 3:
# disc pallor: N/A = 0
# scotoma: large = 2
# visual field: moderate decrease = 1
# visual acuity: 20/60(0.33) - 20/99(0.21) of worse eye with maximal visual acuity (corrected) = 3
# score 4:
# disc pallor: N/A = 0
# scotoma: N/A
# visual field: decrease in worse eye
# visual acuity:
# 20/100(0.2) - 20/200(0.1) of worse eye with maximal visual acuity (corrected) = 4
# grade 3
# < 20/60 (0.33) for better eye = 1, 2
# score 5:
# disc pallor: N/A = 0
# scotoma: N/A = 0
# visual field: N/A = 0
# visual acuity:
# < 20/200 (0.1) for worse eye with maximal visual acuity = 1,2,3,4
# grade 4
# < 20/60 (0.33) for better eye with maximal visual acuity = 1,2
# score 6:
# disc pallor: N/A = 0
# scotoma: N/A = 0
# visual field: N/A =0
# visual acuity:
# grade 5
# < 20/60 (0.33) for better eye with maximal visual acuity = 1,2 | [
"michalmalyska@Michals-MacBook-Air.local"
] | michalmalyska@Michals-MacBook-Air.local |
090808a81f4df895399f7bf0cacf2ebba9dc778e | dac7095e7b5ad4dae993871c1ae45cbb7a5ce5f7 | /Character/14.Yashiro/Re/Yashiro_C.py | 2c0cd5afc2266f92fa0884ed0406cc049d41656d | [] | no_license | Lastation/RenewalAniChaos | d12a8423f4b83cb019495c59ed059451e67e0483 | c3edb29af58925de55c11110ccaf927d2b5d1b39 | refs/heads/master | 2023-08-24T11:28:35.614844 | 2023-08-22T21:23:14 | 2023-08-22T21:23:14 | 246,617,812 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,242 | py | import variable as v;
import func.trig as trg;
function main(playerID)
{
if (v.P_WaitMain[playerID] == 0)
{
if (v.P_CountMain[playerID] == 0)
{
KillUnitAt(All, " Creep. Dunkelheit", "Anywhere", playerID);
if (v.P_LoopMain[playerID] < 2)
{
var d = 0;
var n = 8;
var r = 75 + 75 * v.P_LoopMain[playerID];
trg.Shape_Circle(playerID, 1, " Creep. Dunkelheit", d, n, r);
trg.Shape_Circle(playerID, 1, "Kakaru (Twilight)", d, n, r);
MoveLocation(v.P_LocationID[playerID], v.P_UnitID[playerID], playerID, "Anywhere");
MoveUnit(All, " Creep. Dunkelheit", playerID, "[Skill]Unit_Wait_ALL", v.P_LocationID[playerID]);
Order(" Creep. Dunkelheit", playerID, "Anywhere", Attack, v.P_LocationID[playerID]);
KillUnitAt(All, "Kakaru (Twilight)", "Anywhere", playerID);
}
else if (v.P_LoopMain[playerID] == 3)
{
var d = 0;
var n = 8;
var r = 150;
trg.Shape_Circle(playerID, 1, "40 + 1n Ghost", d, n, r);
MoveLocation(v.P_LocationID[playerID], v.P_UnitID[playerID], playerID, "Anywhere");
MoveUnit(All, "40 + 1n Ghost", playerID, "[Skill]Unit_Wait_ALL", v.P_LocationID[playerID]);
Order("40 + 1n Ghost", playerID, "Anywhere", Attack, v.P_LocationID[playerID]);
}
else if (v.P_LoopMain[playerID] == 5)
{
KillUnitAt(All, "40 + 1n Ghost", "Anywhere", playerID);
for (var i = 0; i < 3; i++)
{
var d = 0;
var n = 8;
var r = 50 + 50 * i;
trg.Shape_Circle(playerID, 1, "40 + 1n Zergling", d, n, r);
}
KillUnitAt(All, "40 + 1n Zergling", "Anywhere", playerID);
}
trg.Main_Wait(160);
v.P_LoopMain[playerID] += 1;
if (v.P_LoopMain[playerID] == 6)
{
v.P_CountMain[playerID] += 1;
v.P_LoopMain[playerID] = 0;
}
}
else if (v.P_CountMain[playerID] == 1)
{
trg.SkillEnd();
}
}
} | [
"aaiiiho@gmail.com"
] | aaiiiho@gmail.com |
950368a1376a80cc13f1e58217778e6f36f4931f | b5bbbed97f1c52180751cde5cc187158ae98cec3 | /football_api/urls.py | 465ee7a3e2df25f2d2d502b84d4abd6ea0d93e1a | [
"MIT"
] | permissive | king-tomi/total-football-api | d4066fd4005ba71df445edf46ccaead5140fa126 | 39f8efbd8b658a5a2e52458dc594f8354d28da04 | refs/heads/main | 2023-07-18T12:48:17.648402 | 2021-09-07T23:42:42 | 2021-09-07T23:42:42 | 404,150,481 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | """football_api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls.conf import include
from rest_framework.documentation import include_docs_urls
from rest_framework.schemas import get_schema_view
API_TITLE = "Football API"
DESCRIPTION = "This is an API that lets you collects, update and mutate information about football clubs, players and fixtures."
view = get_schema_view(title=API_TITLE, version='1.0.0', url='https://footballapi.herokuapp.com')
urlpatterns = [
path('admin/', admin.site.urls),
path('api/v1/', include('football.urls')),
path('api_auth/', include('rest_framework.urls')),
path('api/v1/rest_auth/', include('rest_auth.urls')),
path('api/v1/rest_auth/registration/', include('rest_auth.registration.urls')),
path('docs/', include_docs_urls(title = API_TITLE,
description=DESCRIPTION)),
path('schema/', view)
]
| [
"noreply@github.com"
] | king-tomi.noreply@github.com |
b865fa83e9b8e72b08e144110aa75b200cf807d4 | 7ec04fc867d0a48fffc05c65bff9217cfe211fe7 | /HW/统计字符串/countHotal.py | 31f95d4500041b28fa209a1a7c4dae778ad024c5 | [] | no_license | Cherry93/pythonPractic | 3b9d1f99803503073bbb2f3a58009665338bd278 | 2889183af6c9a01ab47895b23e2d6ce8c288fd4d | refs/heads/master | 2021-08-31T16:41:56.655989 | 2017-12-22T03:53:18 | 2017-12-22T03:53:18 | 115,008,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | '''
统计加州旅馆中所有单词出现的次数,并降序打印
'''
import collections
file = input("Enter a filename:")
with open(file, 'r') as fpr:
content = fpr.read()
content = content.replace("\n", '')
content1 = content.split()
print(content1)
print(content1[0].lower())
print(len(content1))
list =[]
for i in range(0,len(content1)):
list.append(content1[i].lower())
print(list)
print("\n各单词出现的个数:\n%s"%collections.Counter(list))
#content2 = content1.lower()
#print(content1)
| [
"358544104@qq.com"
] | 358544104@qq.com |
18cbb1000278e07122b15b44881c258ca0146e5d | 2ec96d507e800a8c669b1507a1bfc7b5aee697ac | /test/test_logging.py | b32bd591ce2603f9644f63853170ceb6f57d3feb | [
"Apache-2.0"
] | permissive | leizhen10000/rainbrow | 719e83ac3702d6b2309c15bd26d4d4c5e49be199 | 6a61ed3550e9fc6d2ff48eb49fed0fb81f4a41c9 | refs/heads/master | 2020-03-19T04:57:47.181019 | 2019-07-09T10:02:40 | 2019-07-09T10:02:40 | 79,789,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# @Time : 2019-07-03 17:58
# @Author : Lei Zhen
# @Contract: leizhen8080@gmail.com
# @File : test_logging.py
# @Software: PyCharm
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
from common.log_util import logger
def test_logger():
logger.warning("Don't use this method")
def test_error():
try:
result = 10 / 0
except Exception:
logger.error('Failed to get result', exc_info=True)
logger.info('Finish')
| [
"leizhen8080@gmail.com"
] | leizhen8080@gmail.com |
c7eee2a22a1efb7ddb4b5278189a0424acae6d63 | 14ebcf98e7c64505839e0b7bbab89e32af7abe1e | /deep4rec/datasets/census.py | 4d58743074fe1b8d1eb3004a4a18b2d8469a1f62 | [
"Apache-2.0"
] | permissive | Luiz-FS/Deep4Rec | b021fbc36e377e1055e46e9a52f68c32018894f8 | 78c5ca74f0e0d06a9f4bb2f267817b69abd40d1d | refs/heads/master | 2020-09-03T14:27:45.828007 | 2019-11-18T01:10:30 | 2019-11-18T01:10:30 | 219,484,411 | 0 | 0 | Apache-2.0 | 2019-11-18T01:10:31 | 2019-11-04T11:22:00 | Python | UTF-8 | Python | false | false | 6,110 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dataset interface for Census dataset.
Census dataset: https://archive.ics.uci.edu/ml/machine-learning-databases/adult
"""
import os
import urllib.request
import numpy as np
import pandas as pd
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import OneHotEncoder
import tensorflow as tf
from deep4rec.datasets.dataset import Dataset
import deep4rec.utils as utils
_CSV_COLUMNS = [
"age",
"workclass",
"fnlwgt",
"education",
"education_num",
"marital_status",
"occupation",
"relationship",
"race",
"gender",
"capital_gain",
"capital_loss",
"hours_per_week",
"native_country",
"income_bracket",
]
_CSV_COLUMN_DEFAULTS = [
[0],
[""],
[0],
[""],
[0],
[""],
[""],
[""],
[""],
[""],
[0],
[0],
[0],
[""],
[""],
]
class CensusDataset(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult"
def __init__(self, dataset_name, output_dir, *args, **kwargs):
super().__init__(dataset_name, output_dir, *args, **kwargs)
self.train_filename = "adult.data"
self.test_filename = "adult.test"
self.train_url = os.path.join(self.url, self.train_filename)
self.test_url = os.path.join(self.url, self.test_filename)
self.train_path = os.path.join(self.output_dir, self.train_filename)
self.test_path = os.path.join(self.output_dir, self.test_filename)
self.preprocessed_path = os.path.join(self.output_dir, self.dataset_name)
self._ord_encoder = OrdinalEncoder()
self._occupation_ord_encoder = OrdinalEncoder()
self._one_hot_encoder = OneHotEncoder(sparse=False)
def _download_and_clean_file(self, url, filename):
"""Downloads data from url, and makes changes to match the CSV format."""
temp_file, _ = urllib.request.urlretrieve(url)
with tf.gfile.Open(temp_file, "r") as temp_eval_file:
with tf.gfile.Open(filename, "w") as eval_file:
for line in temp_eval_file:
line = line.strip()
line = line.replace(", ", ",")
if not line or "," not in line:
continue
if line[-1] == ".":
line = line[:-1]
line += "\n"
eval_file.write(line)
tf.gfile.Remove(temp_file)
def download(self):
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
self._download_and_clean_file(self.train_url, self.train_path)
self._download_and_clean_file(self.test_url, self.test_path)
def check_downloaded(self):
return os.path.exists(self.train_path) and os.path.exists(self.test_path)
def check_preprocessed(self):
return False
def _preprocess(self, filename, train_data=False):
df = pd.read_csv(filename, names=_CSV_COLUMNS)
# Categorical columns
df_base_columns = df[
["education", "marital_status", "relationship", "workclass"]
]
if train_data:
base_columns = self._ord_encoder.fit_transform(df_base_columns.values)
occupation_column = self._occupation_ord_encoder.fit_transform(
df["occupation"].values.reshape(-1, 1)
)
one_hot_base_columns = self._one_hot_encoder.fit_transform(
df_base_columns.values
)
else:
base_columns = self._ord_encoder.transform(df_base_columns.values)
occupation_column = self._occupation_ord_encoder.transform(
df["occupation"].values.reshape(-1, 1)
)
one_hot_base_columns = self._one_hot_encoder.transform(
df_base_columns.values
)
# Age buckets
buckets = [0, 18, 25, 30, 35, 40, 45, 50, 55, 60, 65, 200]
age_buckets = np.array(
pd.cut(df["age"], buckets, labels=range(len(buckets) - 1)).values
)
wide_columns = np.concatenate(
(base_columns, age_buckets.reshape(-1, 1)), axis=1
)
numerical_columns = df[
["age", "education_num", "capital_gain", "capital_loss", "hours_per_week"]
].values
deep_columns = np.concatenate((one_hot_base_columns, numerical_columns), axis=1)
labels = np.where(df["income_bracket"].values == ">50K", 1, 0)
return wide_columns, deep_columns, occupation_column, labels
def preprocess(self):
self.train_wide_data, self.train_deep_data, self.train_embedding_data, self.train_y = self._preprocess(
self.train_path, train_data=True
)
self.test_wide_data, self.test_deep_data, self.test_embedding_data, self.test_y = self._preprocess(
self.test_path, train_data=False
)
@property
def train_size(self):
return len(self.train_wide_data)
@property
def train_features(self):
return [self.train_embedding_data, self.train_wide_data, self.train_deep_data]
@property
def test_features(self):
return [self.test_embedding_data, self.test_wide_data, self.test_deep_data]
@property
def num_features_one_hot(self):
return len(np.unique(self.train_embedding_data))
@property
def num_features(self):
return 1
| [
"mariannelinharesm@gmail.com"
] | mariannelinharesm@gmail.com |
3fc886a7c14ade2e0cb83e4bcde5765fa6f76294 | 9144f651ca34ba324270bbcd216c6f8396aa4602 | /pro4_2.py | 56371678cf000e81ff854ba1890b524610a9cdec | [] | no_license | NishantJindal41/guvi3 | 1a26bf132e4c660b287b42e8b538d2496d8468f4 | 3d1ba6626b4684abacbbd0f9c0a3e2d44e88d85c | refs/heads/master | 2020-04-15T06:19:28.831298 | 2019-04-16T16:24:22 | 2019-04-16T16:24:22 | 164,456,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | n, m = map(int, input().split())
A = []
for i in range(n);
A.append(list(map(int,input().split())))
for i in range(len(A));
A[i]=sorted(A[i])
A = [list(i) for i in zip(*x)]
for i in range(len(A));
A[i]=sorted(A[i])
A = [list(i) for i in zip(*A)];
print(A);
}
| [
"noreply@github.com"
] | NishantJindal41.noreply@github.com |
20f38bed70c1832fcabd62c4095639a2b179b570 | 2b63c99eaa0c4866e5ea956ab0c5207b01ce6c0f | /bin/easy_install-3.2 | 2753b93978d3263f58bff7abb72623f52d4505d4 | [] | no_license | raspicamproject/raspi-python | 6f55b7c989e6f98d16a06f2f3b245f296af5d2a4 | 42380d47fb7974b63f21f56bb1093c246074ed76 | refs/heads/master | 2021-01-15T11:25:38.106055 | 2015-03-19T16:05:43 | 2015-03-19T16:05:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | 2 | #!/home/pi/raspi-cam/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'distribute==0.6.24','console_scripts','easy_install-3.2'
__requires__ = 'distribute==0.6.24'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('distribute==0.6.24', 'console_scripts', 'easy_install-3.2')()
)
| [
"lebras.simon@gmail.com"
] | lebras.simon@gmail.com |
098f5bd193fa7c2e62926d0e9b70554529503192 | 1d218b6af654a77aa8a3592e40f93fbc4f584319 | /cirq-core/cirq/sim/act_on_state_vector_args.py | 07f86b370d922667fa62ff5ea2779f0a32ba82d1 | [
"Apache-2.0"
] | permissive | eendebakpt/Cirq | dcf2d15781aea201760b37140ab1505ef570ee1c | f8ffbea1b68b483f8dc716781f2c514a02aa765e | refs/heads/master | 2023-08-30T10:30:41.653293 | 2021-09-23T19:01:32 | 2021-09-23T19:01:32 | 409,719,785 | 0 | 0 | Apache-2.0 | 2021-09-23T19:29:17 | 2021-09-23T19:29:16 | null | UTF-8 | Python | false | false | 13,551 | py | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objects and methods for acting efficiently on a state vector."""
from typing import Any, Tuple, TYPE_CHECKING, Union, Dict, List, Sequence, Iterable
import numpy as np
from cirq import linalg, protocols, sim
from cirq._compat import deprecated_parameter
from cirq.sim.act_on_args import ActOnArgs, strat_act_on_from_apply_decompose
from cirq.linalg import transformations
if TYPE_CHECKING:
import cirq
def _rewrite_deprecated_args(args, kwargs):
if len(args) > 3:
kwargs['axes'] = args[3]
if len(args) > 4:
kwargs['prng'] = args[4]
if len(args) > 5:
kwargs['log_of_measurement_results'] = args[5]
if len(args) > 6:
kwargs['qubits'] = args[6]
return args[:3], kwargs
class ActOnStateVectorArgs(ActOnArgs):
"""State and context for an operation acting on a state vector.
There are two common ways to act on this object:
1. Directly edit the `target_tensor` property, which is storing the state
vector of the quantum system as a numpy array with one axis per qudit.
2. Overwrite the `available_buffer` property with the new state vector, and
then pass `available_buffer` into `swap_target_tensor_for`.
"""
@deprecated_parameter(
deadline='v0.13',
fix='No longer needed. `protocols.act_on` infers axes.',
parameter_desc='axes',
match=lambda args, kwargs: 'axes' in kwargs
or ('prng' in kwargs and len(args) == 4)
or (len(args) > 4 and isinstance(args[4], np.random.RandomState)),
rewrite=_rewrite_deprecated_args,
)
def __init__(
self,
target_tensor: np.ndarray,
available_buffer: np.ndarray,
prng: np.random.RandomState,
log_of_measurement_results: Dict[str, Any],
qubits: Sequence['cirq.Qid'] = None,
axes: Iterable[int] = None,
):
"""Inits ActOnStateVectorArgs.
Args:
target_tensor: The state vector to act on, stored as a numpy array
with one dimension for each qubit in the system. Operations are
expected to perform inplace edits of this object.
available_buffer: A workspace with the same shape and dtype as
`target_tensor`. Used by operations that cannot be applied to
`target_tensor` inline, in order to avoid unnecessary
allocations. Passing `available_buffer` into
`swap_target_tensor_for` will swap it for `target_tensor`.
qubits: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
prng: The pseudo random number generator to use for probabilistic
effects.
log_of_measurement_results: A mutable object that measurements are
being recorded into.
axes: The indices of axes corresponding to the qubits that the
operation is supposed to act upon.
"""
super().__init__(prng, qubits, axes, log_of_measurement_results)
self.target_tensor = target_tensor
self.available_buffer = available_buffer
def swap_target_tensor_for(self, new_target_tensor: np.ndarray):
"""Gives a new state vector for the system.
Typically, the new state vector should be `args.available_buffer` where
`args` is this `cirq.ActOnStateVectorArgs` instance.
Args:
new_target_tensor: The new system state. Must have the same shape
and dtype as the old system state.
"""
if new_target_tensor is self.available_buffer:
self.available_buffer = self.target_tensor
self.target_tensor = new_target_tensor
# TODO(#3388) Add documentation for Args.
# pylint: disable=missing-param-doc
def subspace_index(
self, axes: Sequence[int], little_endian_bits_int: int = 0, *, big_endian_bits_int: int = 0
) -> Tuple[Union[slice, int, 'ellipsis'], ...]:
"""An index for the subspace where the target axes equal a value.
Args:
little_endian_bits_int: The desired value of the qubits at the
targeted `axes`, packed into an integer. The least significant
bit of the integer is the desired bit for the first axis, and
so forth in increasing order. Can't be specified at the same
time as `big_endian_bits_int`.
When operating on qudits instead of qubits, the same basic logic
applies but in a different basis. For example, if the target
axes have dimension [a:2, b:3, c:2] then the integer 10
decomposes into [a=0, b=2, c=1] via 7 = 1*(3*2) + 2*(2) + 0.
big_endian_bits_int: The desired value of the qubits at the
targeted `axes`, packed into an integer. The most significant
bit of the integer is the desired bit for the first axis, and
so forth in decreasing order. Can't be specified at the same
time as `little_endian_bits_int`.
When operating on qudits instead of qubits, the same basic logic
applies but in a different basis. For example, if the target
axes have dimension [a:2, b:3, c:2] then the integer 10
decomposes into [a=1, b=2, c=0] via 7 = 1*(3*2) + 2*(2) + 0.
Returns:
A value that can be used to index into `target_tensor` and
`available_buffer`, and manipulate only the part of Hilbert space
corresponding to a given bit assignment.
Example:
If `target_tensor` is a 4 qubit tensor and `axes` is `[1, 3]` and
then this method will return the following when given
`little_endian_bits=0b01`:
`(slice(None), 0, slice(None), 1, Ellipsis)`
Therefore the following two lines would be equivalent:
args.target_tensor[args.subspace_index(0b01)] += 1
args.target_tensor[:, 0, :, 1] += 1
"""
return linalg.slice_for_qubits_equal_to(
axes,
little_endian_qureg_value=little_endian_bits_int,
big_endian_qureg_value=big_endian_bits_int,
qid_shape=self.target_tensor.shape,
)
# pylint: enable=missing-param-doc
def _act_on_fallback_(
self,
action: Union['cirq.Operation', 'cirq.Gate'],
qubits: Sequence['cirq.Qid'],
allow_decompose: bool = True,
) -> bool:
strats = [
_strat_act_on_state_vector_from_apply_unitary,
_strat_act_on_state_vector_from_mixture,
_strat_act_on_state_vector_from_channel,
]
if allow_decompose:
strats.append(strat_act_on_from_apply_decompose)
# Try each strategy, stopping if one works.
for strat in strats:
result = strat(action, self, qubits)
if result is False:
break # coverage: ignore
if result is True:
return True
assert result is NotImplemented, str(result)
raise TypeError(
"Can't simulate operations that don't implement "
"SupportsUnitary, SupportsConsistentApplyUnitary, "
"SupportsMixture or is a measurement: {!r}".format(action)
)
def _perform_measurement(self, qubits: Sequence['cirq.Qid']) -> List[int]:
"""Delegates the call to measure the state vector."""
bits, _ = sim.measure_state_vector(
self.target_tensor,
self.get_axes(qubits),
out=self.target_tensor,
qid_shape=self.target_tensor.shape,
seed=self.prng,
)
return bits
def _on_copy(self, target: 'ActOnStateVectorArgs'):
target.target_tensor = self.target_tensor.copy()
target.available_buffer = self.available_buffer.copy()
def _on_kronecker_product(self, other: 'ActOnStateVectorArgs', target: 'ActOnStateVectorArgs'):
target_tensor = transformations.state_vector_kronecker_product(
self.target_tensor, other.target_tensor
)
target.target_tensor = target_tensor
target.available_buffer = np.empty_like(target_tensor)
def _on_factor(
self,
qubits: Sequence['cirq.Qid'],
extracted: 'ActOnStateVectorArgs',
remainder: 'ActOnStateVectorArgs',
validate=True,
atol=1e-07,
):
axes = self.get_axes(qubits)
extracted_tensor, remainder_tensor = transformations.factor_state_vector(
self.target_tensor, axes, validate=validate, atol=atol
)
extracted.target_tensor = extracted_tensor
extracted.available_buffer = np.empty_like(extracted_tensor)
remainder.target_tensor = remainder_tensor
remainder.available_buffer = np.empty_like(remainder_tensor)
def _on_transpose_to_qubit_order(
self, qubits: Sequence['cirq.Qid'], target: 'ActOnStateVectorArgs'
):
axes = self.get_axes(qubits)
new_tensor = transformations.transpose_state_vector_to_axis_order(self.target_tensor, axes)
target.target_tensor = new_tensor
target.available_buffer = np.empty_like(new_tensor)
def sample(
self,
qubits: Sequence['cirq.Qid'],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> np.ndarray:
indices = [self.qubit_map[q] for q in qubits]
return sim.sample_state_vector(
self.target_tensor,
indices,
qid_shape=tuple(q.dimension for q in self.qubits),
repetitions=repetitions,
seed=seed,
)
def _strat_act_on_state_vector_from_apply_unitary(
unitary_value: Any,
args: 'cirq.ActOnStateVectorArgs',
qubits: Sequence['cirq.Qid'],
) -> bool:
new_target_tensor = protocols.apply_unitary(
unitary_value,
protocols.ApplyUnitaryArgs(
target_tensor=args.target_tensor,
available_buffer=args.available_buffer,
axes=args.get_axes(qubits),
),
allow_decompose=False,
default=NotImplemented,
)
if new_target_tensor is NotImplemented:
return NotImplemented
args.swap_target_tensor_for(new_target_tensor)
return True
def _strat_act_on_state_vector_from_mixture(
action: Any, args: 'cirq.ActOnStateVectorArgs', qubits: Sequence['cirq.Qid']
) -> bool:
mixture = protocols.mixture(action, default=None)
if mixture is None:
return NotImplemented
probabilities, unitaries = zip(*mixture)
index = args.prng.choice(range(len(unitaries)), p=probabilities)
shape = protocols.qid_shape(action) * 2
unitary = unitaries[index].astype(args.target_tensor.dtype).reshape(shape)
linalg.targeted_left_multiply(
unitary, args.target_tensor, args.get_axes(qubits), out=args.available_buffer
)
args.swap_target_tensor_for(args.available_buffer)
if protocols.is_measurement(action):
key = protocols.measurement_key_name(action)
args.log_of_measurement_results[key] = [index]
return True
def _strat_act_on_state_vector_from_channel(
action: Any, args: 'cirq.ActOnStateVectorArgs', qubits: Sequence['cirq.Qid']
) -> bool:
kraus_operators = protocols.kraus(action, default=None)
if kraus_operators is None:
return NotImplemented
def prepare_into_buffer(k: int):
linalg.targeted_left_multiply(
left_matrix=kraus_tensors[k],
right_target=args.target_tensor,
target_axes=args.get_axes(qubits),
out=args.available_buffer,
)
shape = protocols.qid_shape(action)
kraus_tensors = [e.reshape(shape * 2).astype(args.target_tensor.dtype) for e in kraus_operators]
p = args.prng.random()
weight = None
fallback_weight = 0
fallback_weight_index = 0
for index in range(len(kraus_tensors)):
prepare_into_buffer(index)
weight = np.linalg.norm(args.available_buffer) ** 2
if weight > fallback_weight:
fallback_weight_index = index
fallback_weight = weight
p -= weight
if p < 0:
break
assert weight is not None, "No Kraus operators"
if p >= 0 or weight == 0:
# Floating point error resulted in a malformed sample.
# Fall back to the most likely case.
prepare_into_buffer(fallback_weight_index)
weight = fallback_weight
index = fallback_weight_index
args.available_buffer /= np.sqrt(weight)
args.swap_target_tensor_for(args.available_buffer)
if protocols.is_measurement(action):
key = protocols.measurement_key_name(action)
args.log_of_measurement_results[key] = [index]
return True
| [
"noreply@github.com"
] | eendebakpt.noreply@github.com |
b0dd12f359d47826a34ff45e3eec933fcad07418 | b9bd74516dc0f24d336f35b14780a171030ea529 | /18.随机森林算法.py | ba4d7db1198edb81bfe134670e0a5c5bf50c3801 | [] | no_license | DataIsStrength/scikit-learn | 814326c03743cacb62b68bb5cfda8e4e75b2fe09 | 2d581032404f67ad628da75a64c1cba514b77bf7 | refs/heads/main | 2023-08-29T07:47:28.600815 | 2021-10-30T14:29:26 | 2021-10-30T14:29:26 | 422,897,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,643 | py | # -*- coding: utf-8 -*-
"""
随机森林算法
"""
'''
1.随机森林:
随机森林是包含多个决策树的分类器,其输出的类别是由个别树输出的类别的众数而定。
例如,训练了5个树,其中4个树结果是True,1个数结果False,那么最终结果就是True。
2.Bootstrap抽样:
即随机有放回抽样,是一种重抽样的方法,为了形成随机森林的多个决策树,
要采用Bootstrap抽样,具体过程如下:
①抽取样本:在N个样本中Bootstrap抽取N个,形成一个树的训练数据集。
②选择特征:如果一共有M个特征,则选择m个来训练决策树,m<<M,这样的好处是可以降维。
'''
import pandas as pd
#导入字典向量化类
from sklearn.feature_extraction import DictVectorizer
#导入数据集划分函数
from sklearn.model_selection import train_test_split
#导入随机森林预估器类
from sklearn.ensemble import RandomForestClassifier
#导入网格搜索函数
from sklearn.model_selection import GridSearchCV
#从网站下载数据
data=pd.read_csv('titanic.csv')
#选择特征值
x=data[['pclass','age','sex']].copy()
#选择目标值
y=data['survived'].copy()
#缺失值处理,inplace设置为True表示对原始数据进行修改
#如果inplace设置为False,则修改后需要赋值给一个新的变量,而原数据不变
x['age'].fillna(x['age'].mean(),inplace=True)
#特征工程
#将x转换成字典数据x.to_dict,设置orient参数可以调整格式,一般常用records
x=x.to_dict(orient='records')
#实例化字典向量化类
transform=DictVectorizer(sparse=False)
#调用fit_transform
x=transform.fit_transform(x)
print(transform.get_feature_names())
print(x)
#划分数据集,设置测试集占比30%
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3)
#实例化随机森林预估器类
estimator=RandomForestClassifier()
#设置备选超参数,n_estimators是决策树的数量,max_depth是单个树的最大深度
param={'n_estimators':[120,200,300,500,800,1200],'max_depth':[5,8,15,25,30]}
#对模型进行2折交叉验证的网格搜索
estimator=GridSearchCV(estimator,param_grid=param,cv=2)
#训练模型
estimator.fit(x_train,y_train)
#验证和评估模型
print('预测的准确率为:',estimator.score(x_test,y_test))
'''
随机森林算法总结:
1.在当前所有算法中,具有极好的准确率。
2.能够有效地运行在大数据集上,处理具有高维特征的输入样本,而且不需要降维。
3.能够评估各个特征在分类问题上的重要性。
''' | [
"noreply@github.com"
] | DataIsStrength.noreply@github.com |
357383ee731f6f3e548ae395bba056b0b0a71aeb | 9acbda554a136235ef2f605281ec39e6c6d8cd63 | /billman/settings.py | 7b47311e624aa21bf2d35d7e64239afeefdaf76a | [] | no_license | arpheno/billman | 23dfa9d6b05ce022ec8b2165c6e5bdf2dad51d91 | 11b6d9386d4e450f75b9c809a48626b2379a39eb | refs/heads/master | 2020-04-10T09:22:55.242570 | 2014-06-07T17:37:51 | 2014-06-07T17:37:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,086 | py | """
Django settings for billman project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'abwsz7w&+fzjjwjn%uql(*f=s^fy_$d1d#oc$9)q_v@weh9fp#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'billman.urls'
WSGI_APPLICATION = 'billman.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| [
"ciupakm@gmail.com"
] | ciupakm@gmail.com |
66768fa558987437e75dd0b92ce3c1a561c04efb | b9650416b09de5ca8f4063a066e56586060f1cf0 | /Tutorial29/tutorial29.py | 27ebdb2a5c415b4b27e6a0505715c2550d1b34f9 | [] | no_license | giova0/cursopython | e0ef2f4edacab706ed896df5b8b22fdc3e9f1b23 | 19790bb6f82dcae80d10df9f5d84df85ae4fea54 | refs/heads/master | 2023-02-21T10:44:51.597349 | 2021-01-26T00:35:02 | 2021-01-26T00:35:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | #import modulo
from modulo import *
suma(8,9)
resta(4,5)
multiplicacion(6,7)
input() | [
"jesusjppr@gmail.com"
] | jesusjppr@gmail.com |
ed42e42af0566b7f01fc67a855eb8e7c557d20a2 | 588de5142e35002b618c5777751f141c075be8fd | /hourofci/hourofci_widgets.py | 2b89a07e5c04fda80ff9b97562caa96a587a59f7 | [] | no_license | IrisZhiweiYan/test_hourofci | 5464035164f4657351c70bc81dfd52694ccd9096 | a4371f88fcd091ae939655da5087b1010a7795f9 | refs/heads/master | 2021-07-14T01:22:27.932172 | 2020-11-09T05:40:27 | 2020-11-09T05:40:27 | 219,059,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | from hourofci import *
from .buttons import button
# import buttons
# CHANGE in v2: pass the answer catalog in the notebook to the widget function
def IntSlider(question, hash_answer_catalog):
# start_time = time.time()
int_range = widgets.IntSlider()
display(int_range)
value = 10
# Iris: where to get change?
def on_value_change(change):
# CHANGE: append -> replace (only keep the last answers between two submissions)
Answer_Dict[question]=[change["new"]]
int_range.observe(on_value_change, names='value')
# Button Evaluator with arguments (desired_answer, frmt) | Fmrt is the format to evaluate like single item, list, dict, etc
# CHANGE in v2: pass the answer catalog to the submit button function to valid
button(question, hash_answer_catalog) | [
"irisgogo.yan@gmail.com"
] | irisgogo.yan@gmail.com |
ca7766b7e9d85b36afe08c3d90244d6b446ff5e9 | 99d3d6132908841edcbc9deec3f590b0e9eba5b6 | /venvpip/bin/macho_standalone | 14da32f6ac9d00114ceb371da83895ecd90dbe2a | [] | no_license | nataliawcislo/cutvideo | 7979066b6ee146d78308832e137dbe0028ccca11 | 66401b15bff3a7e6a01d8eb2d7e308b8bd04d302 | refs/heads/main | 2023-07-28T07:05:06.709878 | 2021-09-14T13:37:08 | 2021-09-14T13:37:08 | 391,267,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | #!/Users/natalka/PycharmProjects/cutvideo/venvpip/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from macholib.macho_standalone import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"wcislonatalia1998@gmail.com"
] | wcislonatalia1998@gmail.com | |
215ae356baf15b509dbf0205fdc664d254fcde92 | 09fbe030de6322ab7d141276fc1f8019cc1604e5 | /migrations/versions/1da835fbb866_season_model.py | 9bdfea12529d693c6a23bdc367ef38e0eb5d0bd2 | [] | no_license | cpkm/darts-site | 0f2dece8c563adfa67f85031d0a99ac947e1508a | 1be939ffe8ba2e58e093adac81993aa19079d1e1 | refs/heads/master | 2022-12-10T00:30:09.173821 | 2021-11-24T01:43:03 | 2021-11-24T01:43:03 | 158,226,996 | 0 | 2 | null | 2022-12-08T01:22:29 | 2018-11-19T13:24:05 | Python | UTF-8 | Python | false | false | 1,492 | py | """season model
Revision ID: 1da835fbb866
Revises: 8689d43c428c
Create Date: 2018-11-27 16:58:53.743252
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1da835fbb866'
down_revision = '8689d43c428c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('season',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('season_name', sa.String(length=64), nullable=True),
sa.Column('start_date', sa.Date(), nullable=True),
sa.Column('end_date', sa.Date(), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_season'))
)
with op.batch_alter_table('season', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_season_end_date'), ['end_date'], unique=False)
batch_op.create_index(batch_op.f('ix_season_season_name'), ['season_name'], unique=True)
batch_op.create_index(batch_op.f('ix_season_start_date'), ['start_date'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('season', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_season_start_date'))
batch_op.drop_index(batch_op.f('ix_season_season_name'))
batch_op.drop_index(batch_op.f('ix_season_end_date'))
op.drop_table('season')
# ### end Alembic commands ###
| [
"20861192+cpkm@users.noreply.github.com"
] | 20861192+cpkm@users.noreply.github.com |
0316e5d65124316a03b71cca0c7a19fb8a058d61 | eaf2c1331eeef64d276d7d77127191a0b7ac6908 | /home/models.py | a8f4e4b4114e33c3a14700d090085fb9619d7358 | [] | no_license | anubhavsrwn/Basic-Django-App | 9378d9244bc34437498c7792646d0fe933e5c40a | de6ccdb378927b35c6c7b916c80290f4d573ea69 | refs/heads/master | 2023-06-26T21:39:20.879087 | 2021-07-20T08:33:40 | 2021-07-20T08:33:40 | 387,726,678 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | from django.db import models
# Create your models here.
class Contact(models.Model):
name = models.CharField(max_length=122)
email = models.CharField(max_length=122)
phone = models.CharField(max_length=12)
desc = models.TextField()
date = models.DateField()
| [
"anubhav.s@technovert.net"
] | anubhav.s@technovert.net |
e469b1c0df202afbdf63411ba8abdbd4527e1190 | 493f99b210303d019f62195ae8dde9d02ee1b81f | /indy_node/test/api/test_rich_schema_objects_reply.py | 56dbda7791bdd23e1fda9ccddcde4b9f7330077d | [
"Apache-2.0"
] | permissive | darklordz-217/indy-node | 745baa357fe739bac20433cb2daa0f7c5a2f2caf | 4d2f6a9dc0ff136117f8766a4f2cf70b239404e0 | refs/heads/master | 2022-10-06T17:01:12.414734 | 2020-06-11T08:49:17 | 2020-06-11T08:49:17 | 271,472,931 | 2 | 0 | Apache-2.0 | 2020-06-11T09:04:23 | 2020-06-11T06:48:21 | Python | UTF-8 | Python | false | false | 2,499 | py | import json
import pytest
from indy_common.constants import JSON_LD_CONTEXT, RS_CONTEXT_TYPE_VALUE, RICH_SCHEMA, RICH_SCHEMA_ENCODING, \
RICH_SCHEMA_MAPPING, RICH_SCHEMA_CRED_DEF, RS_CRED_DEF_TYPE_VALUE, RS_MAPPING_TYPE_VALUE, \
RS_ENCODING_TYPE_VALUE, RS_SCHEMA_TYPE_VALUE, RICH_SCHEMA_PRES_DEF, RS_PRES_DEF_TYPE_VALUE
from indy_node.test.api.helper import validate_write_reply, validate_rich_schema_txn, sdk_build_rich_schema_request
from indy_node.test.rich_schema.templates import RICH_SCHEMA_EX1, W3C_BASE_CONTEXT, RICH_SCHEMA_ENCODING_EX1, \
RICH_SCHEMA_MAPPING_EX1, RICH_SCHEMA_CRED_DEF_EX1, RICH_SCHEMA_PRES_DEF_EX1
from plenum.common.util import randomString
from plenum.test.helper import sdk_get_reply, sdk_sign_and_submit_req
# The order of creation is essential as some rich schema object reference others by ID
# Encoding's id must be equal to the one used in RICH_SCHEMA_MAPPING_EX1
@pytest.mark.parametrize('txn_type, rs_type, content, rs_id',
[(JSON_LD_CONTEXT, RS_CONTEXT_TYPE_VALUE, W3C_BASE_CONTEXT, randomString()),
(RICH_SCHEMA, RS_SCHEMA_TYPE_VALUE, RICH_SCHEMA_EX1, RICH_SCHEMA_EX1['@id']),
(RICH_SCHEMA_ENCODING, RS_ENCODING_TYPE_VALUE, RICH_SCHEMA_ENCODING_EX1,
"did:sov:1x9F8ZmxuvDqRiqqY29x6dx9oU4qwFTkPbDpWtwGbdUsrCD"),
(RICH_SCHEMA_MAPPING, RS_MAPPING_TYPE_VALUE, RICH_SCHEMA_MAPPING_EX1,
RICH_SCHEMA_MAPPING_EX1['@id']),
(RICH_SCHEMA_CRED_DEF, RS_CRED_DEF_TYPE_VALUE, RICH_SCHEMA_CRED_DEF_EX1, randomString()),
(RICH_SCHEMA_PRES_DEF, RS_PRES_DEF_TYPE_VALUE, RICH_SCHEMA_PRES_DEF_EX1,
RICH_SCHEMA_PRES_DEF_EX1['@id'])])
def test_rich_schema_object_reply_is_valid(looper, sdk_pool_handle, sdk_wallet_steward,
txn_type, rs_type, content, rs_id):
request = sdk_build_rich_schema_request(looper, sdk_wallet_steward,
txn_type=txn_type, rs_id=rs_id, rs_name=randomString(),
rs_version='1.0', rs_type=rs_type,
rs_content=json.dumps(content))
reply = sdk_get_reply(looper, sdk_sign_and_submit_req(sdk_pool_handle, sdk_wallet_steward, request))[1]
validate_write_reply(reply)
validate_rich_schema_txn(reply['result']['txn'], txn_type)
| [
"alexander.sherbakov@dsr-corporation.com"
] | alexander.sherbakov@dsr-corporation.com |
c3589908c3d02252488818d8c7ea24447f365be5 | af0d9efc37cc79b170cafcee1a5044588167761c | /clean.py | 0dc4360098b244e5377974c16aec64899d8bb40d | [] | no_license | AcmeCleanPower/HRSLToolkit | 41ba3a817bc5dfb59bdb1bca32def61effbc3902 | 4b5144c775b6bb7292dfec132eabc839cc9c7374 | refs/heads/master | 2021-01-18T20:04:18.656066 | 2017-04-07T20:43:14 | 2017-04-07T20:43:14 | 86,934,250 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from skimage import morphology
from skimage.external import tifffile as tif
# borrowed from tiff_tools.py
def read_array_from_tiff(fin, band=1):
tiff = gdal.Open(fin)
return np.array(tiff.GetRasterBand(band).ReadAsArray())
def med_filter(mr, n=8):
med_denoise = sp.ndimage.median_filter(mr, n)
return med_denoise
def gauss_filter(mr, n=8):
gauss_denoise = sp.ndimage.gaussian_filter(mr, n)
return gauss_denoise
# def tifsave(denoised, name='denoised.tif'):
# tif.imsave(name, denoised) | [
"stephen.s.c.chan@gmail.com"
] | stephen.s.c.chan@gmail.com |
44c995550d05f889cc581a0508223c1b95b5eb2d | f3075dd8f88e2d1d3c6ee350098d7bd42238b92c | /naver_map/naver_gigye.py | 1441572bc7711d0e83c1a8ca27bc707c1bafb40c | [] | no_license | jjjjooonno/kmong | 8257a208918b947569d8868605051c7c801f5fa6 | 6a38f5fa4ff031602c70c6ed925fa3abdb6af38d | refs/heads/master | 2020-03-28T22:19:48.693145 | 2018-09-18T03:35:39 | 2018-09-18T03:35:39 | 149,224,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | from selenium import webdriver
from bs4 import BeautifulSoup
from pandas import *
import time
import re
dt = read_excel('행정구역코드(법정동코드)_전체본.xls',0)
dt_dong = dt['법정동명']
dt_dong_new = []
for i in dt_dong:
if i[-1] == '동':
dt_dong_new.append(i)
query = []
for i in dt_dong_new:
query.append(i+' 제조업 > 기계, 장비제조')
names = []
tels = []
addrss = []
dr = webdriver.Chrome('/Users/joono/chromedriver')
dr.get('https://map.naver.com/')
dr.find_element_by_xpath('//*[@id="search-input"]').send_keys(query[0])
dr.find_element_by_xpath('//*[@id="header"]/div[1]/fieldset/button').click()
time.sleep(1)
drt = dr.page_source
soup = BeautifulSoup(drt,'html.parser')
num = soup.find('span',attrs = {'class':'n'}).text[]
whole = soup.find_all('dl',attrs={'class':'lsnx_det'})
for i in whole:
print(str(i))
i = str(i)
if 'href=\"#\">' in i:
name = i.split('href="#">')[1].split('</a>')[0]
names.append(name.strip())
else:
names.append('없음')
if 'class=\"addr\">' in i:
addr = i.split('class="addr">')[1].split('<a')[0]
addrss.append(addr.strip())
else:
addrss.append('없음')
if 'class=\"tel\">' in i:
tel1 = i.split('class="tel">')[1].split('</dd>')[0]
tels.append(tel1.strip())
else:
tels.append('없음')
print(names)
print(tels)
print(addrss) | [
"jjjjooonno@gmail.com"
] | jjjjooonno@gmail.com |
cd71c189fbf967e745e42e3248c4421abdfecb06 | 8b2be934a63fee5e542bb818e81d1452b31d0ecc | /Candidate_models/final_rnn.py | d18abffcc2c1e35e0e6a9f67ccb9f5d595851a7b | [] | no_license | Danny1379/Computational_intelligence_final_project_NLP | 06cde2ce40c795c57eb0f31235a9ffc98178e7fa | 7c8dc7b69e2f8458959c44b8b1a1e16be300651e | refs/heads/main | 2023-05-14T09:23:13.328895 | 2021-06-01T11:08:50 | 2021-06-01T11:08:50 | 338,410,556 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,850 | py | import numpy as np
import sklearn as sk
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.utils import np_utils
vocab_size = 40000
embedding_dim = 100
trunc_type = 'post'
padding_type = 'post'
oov_tok = "<OOV>"
def load_data():
path = "train.csv"
df = pd.read_csv(path)
print(np.shape(df))
return df
def get_labels_features(df):
df.Text = df.Text.str.replace('\d', '')
df.Text = df.Text.str.replace('\n', ' ')
df.Text = df.Text.str.replace('.', " ")
df.Text = df.Text.str.replace(',', " ")
x = df['Text']
y = df['Category']
print(np.shape(x), np.shape(y))
return x, y
def preprocess_encode(y):
label_enc = LabelEncoder()
y = label_enc.fit_transform(y)
y = np_utils.to_categorical(y)
return y
def split(x, y):
return train_test_split(x, y, test_size=0.10)
def tokenize(x_train, x_test):
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(x_train)
training_sequences = tokenizer.texts_to_sequences(x_train)
max_length = get_sequence_length(training_sequences)
training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(x_test)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
return training_padded, testing_padded, max_length
# find average sequence length might go for maximum length !
def get_sequence_length(training_sequences):
sum = 0
for i in range(len(training_sequences)):
sum += len(training_sequences[i])
max_length = int(sum / len(training_sequences))
print("sequence length is : ", max_length)
return max_length
def get_array(training_padded, y_train, testing_padded, y_test):
training_padded = np.asarray(training_padded)
training_labels = np.asarray(y_train)
testing_padded = np.asarray(testing_padded)
testing_labels = np.asarray(y_test)
return training_padded, training_labels, testing_padded, testing_labels
def get_model(max_length):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.SpatialDropout1D(0.5),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(max_length)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(100)),
tf.keras.layers.Dense(34, activation="softmax")
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def train_model(model, training_padded, training_labels, testing_padded, testing_labels):
num_epochs = 10
history = model.fit(training_padded, training_labels, epochs=num_epochs,
validation_data=(testing_padded, testing_labels), verbose=1, batch_size=256)
def main():
df = load_data()
x, y = get_labels_features(df)
y = preprocess_encode(y)
x_train, x_test, y_train, y_test = split(x, y)
x_train, x_test, max_length = tokenize(x_train, x_test)
x_train, y_train, x_test, y_test = get_array(x_train, y_train, x_test, y_test)
model = get_model(max_length)
train_model(model, x_train, y_train, x_test, y_test)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | Danny1379.noreply@github.com |
1b2d9640e14054aa031bb6464300d7d9f16da3d7 | 14da21d280a8508de934a68508612af36b42c43c | /mysite/text/migrations/backup/0017_auto_20200714_1701.py | 435eeb64ccf5c4e3f0f3a106b2040fe4d5469db4 | [] | no_license | Shoupinglianginnolux/textmining | dd8bc2e2d5c8ea576fffb8c197f6bcffa64272e1 | c811589c5d675f65a6211aec1df5d408ca1bd98c | refs/heads/main | 2023-08-07T12:52:05.709080 | 2021-10-01T07:07:20 | 2021-10-01T07:07:20 | 412,362,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,534 | py | # Generated by Django 3.0.4 on 2020-07-14 17:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('text', '0016_auto_20200713_1111'),
]
operations = [
migrations.CreateModel(
name='TMPSRQ',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('SRNumber', models.CharField(max_length=12, verbose_name='SRNumber')),
('SRType', models.CharField(max_length=5, verbose_name='SRType')),
('CreatedDate', models.DateTimeField(blank=True, null=True)),
('Model', models.CharField(max_length=15, verbose_name='Model')),
('SerialNumber', models.CharField(blank=True, max_length=25, null=True)),
('ErrorCode', models.CharField(max_length=30)),
('InternalNotes', models.CharField(max_length=300)),
('PredictErrorCode', models.CharField(blank=True, max_length=10, null=True)),
('ReviseErrorCode', models.CharField(blank=True, max_length=10, null=True)),
('Train', models.BooleanField(blank=True, default=True, null=True)),
('UploadDate', models.DateField(auto_now=True, null=True)),
],
),
migrations.AddField(
model_name='srqs',
name='ReviseErrorCode',
field=models.CharField(blank=True, max_length=10, null=True),
),
]
| [
"shouping.liang@innolux.com"
] | shouping.liang@innolux.com |
d6e158a754d97f4f5e0dedfbf9ad93d4b43e0abe | ec28e7f3290069451ec8889efa4e22b5930979c0 | /strategery/engine.py | c9ee8a83a5a9f31f423dbb23634a73f29cba5401 | [
"MIT"
] | permissive | rcgale/strategery | e226c0deb53a8ff35aa32a25b963807ffc98859f | d1608ea59587d7e49db0bdf788e3243d4d42081a | refs/heads/master | 2021-06-23T16:06:45.218568 | 2021-03-06T01:42:15 | 2021-03-06T01:42:15 | 206,894,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,487 | py | import inspect
import sys
import time
from functools import lru_cache
from strategery.exceptions import TaskError, StrategyError
from strategery.logging import BypassLogger
from strategery.strategy import get_strategy
from strategery.tasks import Task, get_key
logger = None
def execute(*args, targets, input=None, preprocessed=None):
resolved_logger = logger or BypassLogger()
input = __renaming_preprocessed_to_input(preprocessed, input)
if type(input) is list or type(input) is tuple:
# Convert lists/tuples to type-indexed dictionary
input = {type(p): p for p in input}
queue = get_strategy(tuple(targets), preprocessed_keys=tuple(input.keys()))
print('Processing strategy:', file=resolved_logger)
for n, stage in enumerate(queue):
print('Phase {}: {}'.format(n, [t.name() for t in stage]), file=resolved_logger)
print("\n", file=resolved_logger)
# Populate with preprocessed
processed = input
for stage in queue:
for task in stage:
if task not in processed:
try:
ts = time.time()
__assert_task_type(task)
dependencies = __resolve_task_dependencies(task, processed)
processed[task] = task(*dependencies)
te = time.time()
print('[%2.2f sec] Processed: %r ' % (te - ts, task.name()),
file=resolved_logger)
except Exception as e:
raise TaskError('Stategery failed at task {t}, found at approximately "{f}".\n\nInner error:\n{et}: {e}'.format(
t=task.name(),
et=type(e).__name__,
e=e,
f=task.code_file_colon_line(),
))
return tuple([processed[get_key(t)] for t in targets])
def __renaming_preprocessed_to_input(preprocessed, input):
if preprocessed:
__warn_once(
'strategery warning: the argument `preprocessed` has been renamed to `input` '
'and will be removed in a future version.',
)
if input and preprocessed:
raise Exception('Cannot specify both `input` and `preprocessed')
return input or preprocessed or {}
@lru_cache(1)
def __warn_once(message):
print(message, file=sys.stderr)
def __assert_task_type(task):
if not inspect.isfunction(task) and not inspect.isclass(task) and not hasattr(type(task), '__call__'):
raise Exception("Task cannot be processed, '{t}' is not a function or a class.".format(t=task.name))
def __resolve_task_dependencies(task: Task, processed):
if len(task.parameters) != len(task.dependencies):
raise StrategyError('Stategery task {t} expects parameters {p}, @fed_by decorator only accounts for {d}'.format(
t=task.name(),
p=[k for k in task.signature.parameters.keys()],
d=[d.name() for d in task.dependencies]
))
values = []
for parameter, dependency in zip(task.parameters.values(), task.dependencies):
if dependency in processed:
values.append(processed[dependency])
elif parameter.default != inspect._empty:
values.append(parameter.default)
else:
raise StrategyError('Strategery task {t} could not resolve parameter {p}.'.format(
t=task.name(),
p=parameter.name
))
return values
| [
"galer@ohsu.edu"
] | galer@ohsu.edu |
9435b62b274dd42e74992cca64e569aa33c081d9 | 00adb3ceec4e37f8384f575d2711a27ca94327bb | /solutions/836_Rectangle_Overlap/solution_arsho.py | 9cc8700ed4892cd4bd78dee1949d0945fb8ad33a | [] | no_license | arsho/leetcode | 8c7d40de5a2579f06c58474540d704aaae41d81a | 55d52f7b150968ce348782ca30e573ae1cf1bd53 | refs/heads/master | 2023-01-23T20:46:00.859736 | 2023-01-22T15:55:18 | 2023-01-22T15:55:18 | 123,886,220 | 6 | 10 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | """
Title : 836. Rectangle Overlap
Category : Math
URL : https://leetcode.com/problems/rectangle-overlap/
Author : arsho
Created : 23 March 2021
"""
from typing import List
class Solution:
def isRectangleOverlap(self, rect1: List[int], rect2: List[int]) -> bool:
x_distance = min(rect1[2], rect2[2]) - max(rect1[0], rect2[0])
y_distance = min(rect1[3], rect2[3]) - max(rect1[1], rect2[1])
return x_distance > 0 and y_distance > 0
| [
"shahariarrabby@gmail.com"
] | shahariarrabby@gmail.com |
8fcbe2dc5cae2f366d06acf5e7f587d9893d8b85 | 7e181f4925d24c95924920647a8d007f6a609821 | /venv/bin/django-admin.py | 7aded3eddca9f1de5217af11ebfb2746615b12e1 | [] | no_license | Tamim101/portfolio-update | af76cd1db34d4c57658f39b032c40689ad5afe8c | bf52a72eb45c485cad578ef2a81536d8827899dc | refs/heads/master | 2023-04-23T03:47:14.815150 | 2021-05-02T04:29:11 | 2021-05-02T04:29:11 | 363,565,323 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | #!/Users/mujahid/Documents/Django/Personal_Portfolio_/My_Personal_Portfolio/venv/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"tamimkhan7133@gmail.com"
] | tamimkhan7133@gmail.com |
7d3e8a94ef63a6dd931ce66016c56a243fb7a2e9 | 7a402c6bb3887af56ff8609453ef926fa41291a5 | /LightGBMwithSImpleFeatures.py | ebcffd5eb8005c9fe27613e44bc60d7577bc6206 | [] | no_license | lizihaoleo/home-credit-default-risk | cdcfe2ee7768b553205f3121a946535122b8626b | 1a9f8d3ab107f8b3ed59634db71382f4965ecb0b | refs/heads/master | 2021-10-18T05:27:36.397640 | 2019-02-14T03:54:31 | 2019-02-14T03:54:31 | 170,621,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,019 | py | # HOME CREDIT DEFAULT RISK COMPETITION
# Most features are created by applying min, max, mean, sum and var functions to grouped tables.
# Little feature selection is done and overfitting might be a problem since many features are related.
# The following key ideas were used:
# - Divide or subtract important features to get rates (like annuity and income)
# - In Bureau Data: create specific features for Active credits and Closed credits
# - In Previous Applications: create specific features for Approved and Refused applications
# - Modularity: one function for each table (except bureau_balance and application_test)
# - One-hot encoding for categorical features
# All tables are joined with the application DF using the SK_ID_CURR key (except bureau_balance).
# You can use LightGBM with KFold or Stratified KFold. Please upvote if you find usefull, thanks!
# Update 16/06/2018:
# - Added Payment Rate feature
# - Removed index from features
# - Set early stopping to 200 rounds
# - Use standard KFold CV (not stratified)
# Public LB increased to 0.792
import numpy as np
import pandas as pd
import gc
import time
from contextlib import contextmanager
from lightgbm import LGBMClassifier
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import KFold, StratifiedKFold
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
@contextmanager
def timer(title):
t0 = time.time()
yield
print("{} - done in {:.0f}s".format(title, time.time() - t0))
# One-hot encoding for categorical columns with get_dummies
def one_hot_encoder(df, nan_as_category = True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
# Preprocess application_train.csv and application_test.csv
def application_train_test(num_rows = None, nan_as_category = False):
# Read data and merge
df = pd.read_csv('./application_train.csv', nrows= num_rows)
test_df = pd.read_csv('./application_test.csv', nrows= num_rows)
print("Train samples: {}, test samples: {}".format(len(df), len(test_df)))
df = df.append(test_df).reset_index()
# Optional: Remove 4 applications with XNA CODE_GENDER (train set)
df = df[df['CODE_GENDER'] != 'XNA']
# Categorical features with Binary encode (0 or 1; two categories)
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = pd.factorize(df[bin_feature])
# Categorical features with One-Hot encode
df, cat_cols = one_hot_encoder(df, nan_as_category)
# NaN values for DAYS_EMPLOYED: 365.243 -> nan
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)
# Some simple new features (percentages)
df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']
df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']
del test_df
gc.collect()
return df
# Preprocess bureau.csv and bureau_balance.csv
def bureau_and_balance(num_rows = None, nan_as_category = True):
bureau = pd.read_csv('./bureau.csv', nrows = num_rows)
bb = pd.read_csv('./bureau_balance.csv', nrows = num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)
# Bureau balance: Perform aggregations and merge with bureau.csv
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU').agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist()])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(['SK_ID_BUREAU'], axis=1, inplace= True)
del bb, bb_agg
gc.collect()
# Bureau and bureau_balance numeric features
num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'DAYS_CREDIT_UPDATE': ['mean'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
}
# Bureau and bureau_balance categorical features
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist()])
# Bureau: Active credits - using only numerical aggregations
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR').agg(num_aggregations)
active_agg.columns = pd.Index(['ACTIVE_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist()])
bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')
del active, active_agg
gc.collect()
# Bureau: Closed credits - using only numerical aggregations
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR').agg(num_aggregations)
closed_agg.columns = pd.Index(['CLOSED_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist()])
bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')
del closed, closed_agg, bureau
gc.collect()
return bureau_agg
# Preprocess previous_applications.csv
def previous_applications(num_rows = None, nan_as_category = True):
prev = pd.read_csv('./previous_application.csv', nrows = num_rows)
prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)
# Days 365.243 values -> nan
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
# Add feature: value ask / value received percentage
prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']
# Previous applications numeric features
num_aggregations = {
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_APPLICATION': ['min', 'max', 'mean'],
'AMT_CREDIT': ['min', 'max', 'mean'],
'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],
'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],
'AMT_GOODS_PRICE': ['min', 'max', 'mean'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
}
# Previous applications categorical features
cat_aggregations = {}
for cat in cat_cols:
cat_aggregations[cat] = ['mean']
prev_agg = prev.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist()])
# Previous Applications: Approved Applications - only numerical features
approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
approved_agg = approved.groupby('SK_ID_CURR').agg(num_aggregations)
approved_agg.columns = pd.Index(['APPROVED_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist()])
prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR')
# Previous Applications: Refused Applications - only numerical features
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
refused_agg = refused.groupby('SK_ID_CURR').agg(num_aggregations)
refused_agg.columns = pd.Index(['REFUSED_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist()])
prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR')
del refused, refused_agg, approved, approved_agg, prev
gc.collect()
return prev_agg
# Preprocess POS_CASH_balance.csv
def pos_cash(num_rows = None, nan_as_category = True):
pos = pd.read_csv('./POS_CASH_balance.csv', nrows = num_rows)
pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)
# Features
aggregations = {
'MONTHS_BALANCE': ['max', 'mean', 'size'],
'SK_DPD': ['max', 'mean'],
'SK_DPD_DEF': ['max', 'mean']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
pos_agg = pos.groupby('SK_ID_CURR').agg(aggregations)
pos_agg.columns = pd.Index(['POS_' + e[0] + "_" + e[1].upper() for e in pos_agg.columns.tolist()])
# Count pos cash accounts
pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR').size()
del pos
gc.collect()
return pos_agg
# Preprocess installments_payments.csv
def installments_payments(num_rows = None, nan_as_category = True):
ins = pd.read_csv('./installments_payments.csv', nrows = num_rows)
ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)
# Percentage and difference paid in each installment (amount paid and installment value)
ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']
ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']
# Days past due and days before due (no negative values)
ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']
ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']
ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)
ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)
# Features: Perform aggregations
aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DPD': ['max', 'mean', 'sum'],
'DBD': ['max', 'mean', 'sum'],
'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],
'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
ins_agg = ins.groupby('SK_ID_CURR').agg(aggregations)
ins_agg.columns = pd.Index(['INSTAL_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist()])
# Count installments accounts
ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR').size()
del ins
gc.collect()
return ins_agg
# Preprocess credit_card_balance.csv
def credit_card_balance(num_rows = None, nan_as_category = True):
cc = pd.read_csv('./credit_card_balance.csv', nrows = num_rows)
cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)
# General aggregations
cc.drop(['SK_ID_PREV'], axis= 1, inplace = True)
cc_agg = cc.groupby('SK_ID_CURR').agg(['min', 'max', 'mean', 'sum', 'var'])
cc_agg.columns = pd.Index(['CC_' + e[0] + "_" + e[1].upper() for e in cc_agg.columns.tolist()])
# Count credit card lines
cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR').size()
del cc
gc.collect()
return cc_agg
# LightGBM GBDT with KFold or Stratified KFold
# Parameters from Tilii kernel: https://www.kaggle.com/tilii7/olivier-lightgbm-parameters-by-bayesian-opt/code
def kfold_lightgbm(df, num_folds, stratified = False, debug= False):
# Divide in training/validation and test data
train_df = df[df['TARGET'].notnull()]
test_df = df[df['TARGET'].isnull()]
print("Starting LightGBM. Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape))
del df
gc.collect()
# Cross validation model
if stratified:
folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001)
else:
folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001)
# Create arrays and dataframes to store results
oof_preds = np.zeros(train_df.shape[0])
sub_preds = np.zeros(test_df.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df[feats], train_df['TARGET'])):
train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]
valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]
# LightGBM parameters found by Bayesian optimization
clf = LGBMClassifier(
nthread=4,
n_estimators=10000,
learning_rate=0.02,
num_leaves=34,
colsample_bytree=0.9497036,
subsample=0.8715623,
max_depth=8,
reg_alpha=0.041545473,
reg_lambda=0.0735294,
min_split_gain=0.0222415,
min_child_weight=39.3259775,
silent=-1,
verbose=-1, )
clf.fit(train_x, train_y, eval_set=[(train_x, train_y), (valid_x, valid_y)],
eval_metric= 'auc', verbose= 100, early_stopping_rounds= 200)
oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))
del clf, train_x, train_y, valid_x, valid_y
gc.collect()
print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))
# Write submission file and plot feature importance
if not debug:
test_df['TARGET'] = sub_preds
test_df[['SK_ID_CURR', 'TARGET']].to_csv(submission_file_name, index= False)
display_importances(feature_importance_df)
return feature_importance_df
# Display/plot feature importance
def display_importances(feature_importance_df_):
cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False)[:40].index
best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
plt.figure(figsize=(8, 10))
sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False))
plt.title('LightGBM Features (avg over folds)')
plt.tight_layout()
plt.savefig('lgbm_importances01.png')
def main(debug = False):
num_rows = 10000 if debug else None
df = application_train_test(num_rows)
with timer("Process bureau and bureau_balance"):
bureau = bureau_and_balance(num_rows)
print("Bureau df shape:", bureau.shape)
df = df.join(bureau, how='left', on='SK_ID_CURR')
del bureau
gc.collect()
with timer("Process previous_applications"):
prev = previous_applications(num_rows)
print("Previous applications df shape:", prev.shape)
df = df.join(prev, how='left', on='SK_ID_CURR')
del prev
gc.collect()
with timer("Process POS-CASH balance"):
pos = pos_cash(num_rows)
print("Pos-cash balance df shape:", pos.shape)
df = df.join(pos, how='left', on='SK_ID_CURR')
del pos
gc.collect()
with timer("Process installments payments"):
ins = installments_payments(num_rows)
print("Installments payments df shape:", ins.shape)
df = df.join(ins, how='left', on='SK_ID_CURR')
del ins
gc.collect()
with timer("Process credit card balance"):
cc = credit_card_balance(num_rows)
print("Credit card balance df shape:", cc.shape)
df = df.join(cc, how='left', on='SK_ID_CURR')
del cc
gc.collect()
with timer("Run LightGBM with kfold"):
feat_importance = kfold_lightgbm(df, num_folds= 5, stratified= False, debug= debug)
if __name__ == "__main__":
submission_file_name = "submission_kernel02.csv"
with timer("Full model run"):
main() | [
"lizihaoleo@gmail.com"
] | lizihaoleo@gmail.com |
65d5f34158a47d8bcadc80486e61ebf04640a721 | 2e2ecaaace5bcc70ccfad103ddb3692db82ec655 | /django/background_scripts/microsoft/get_utilization.py | 0c87906410b428638748f0f7ee6e6701ca43fa35 | [] | no_license | RobertRajcool/Angular4-Django | 32f3d24c4dbbc42abe671fd65fe98637834a388b | 4424e25f6d9f982d682d13ae0e620dd0a01348a7 | refs/heads/master | 2021-07-10T17:53:36.222339 | 2017-10-10T10:17:56 | 2017-10-10T10:17:56 | 104,564,564 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 15,536 | py | from decimal import Decimal
from . import microsoft_base
from redington import settings
from cloudtemplates.models import CloudRates
from billing.models import CloudServiceConsumptions
from customers.models import Customers, CloudAccounts
from products.models import Products, VendorDetails
from django.db.models import ObjectDoesNotExist, Q
from django.core.exceptions import MultipleObjectsReturned
from cloudapp.defaults import AppDefaults
import datetime
from datetime import timedelta, tzinfo
from django.utils import timezone
from cloudapp.generics.caculator import calculate_azure_partner_cost
import pprint
import requests
import subprocess
import os.path
import json
import functools
import uuid
import sys
import pytz
class UtilizationRecords(microsoft_base.MicrosoftBase):
def __init__(self, tenantId, subscriptionId, startDate, endDate):
super(UtilizationRecords, self).__init__()
self.tenantId = tenantId
self.subscriptionId = subscriptionId
self.startDate = startDate
self.endDate = endDate
self.consolidated_rates = {}
self.grouped_records = {}
self.grouped_calculations = {}
self.ignored_rate_names = [
'Data Transfer In (GB)',
]
self.consolidated_rate_names = [
'Data Transfer Out (GB)'
]
# Main method to get the utilizations
def getUtilization(self):
access_headers = self.getAccessHeaders()
url = 'https://api.partnercenter.microsoft.com/v1/customers/' \
'{}/subscriptions/{}/utilizations/azure?' \
'start_time={}&end_time={}&granularity=Daily&show_details=True'. \
format(self.tenantId, self.subscriptionId, self.startDate, self.endDate)
utilization_records_out = requests.get(url, headers=access_headers)
utilization_records_out.encoding = 'utf-8-sig'
utilization_records = utilization_records_out.text
self.process_records(utilization_records, self.grouped_records, self.grouped_calculations,
self.consolidated_rates)
if len(self.consolidated_rates) > 0:
""" Querying vendor & customer """
vendor = VendorDetails.objects.filter(vendor_name=AppDefaults.cloud_vendor_codes(return_as='name',
query_str='MS')).first()
account_type = AppDefaults.cloud_vendor_codes(return_as='code', query_str=vendor.vendor_name)
cloud_accounts = CloudAccounts.objects.filter(details__tenant_id=self.tenantId.upper(),
type=account_type
)
""" Try for lowercase """
if not cloud_accounts.exists():
cloud_accounts = CloudAccounts.objects.filter(details__tenant_id=self.tenantId.lower(),
type=account_type
)
customer = None
if cloud_accounts.exists():
cloud_account = cloud_accounts.first()
customer = cloud_account.customer
customer_cloud_acc_details = cloud_account.details
standard_discount = 10
if 'standard_discount' in customer_cloud_acc_details \
and customer_cloud_acc_details['standard_discount'] is not None \
and customer_cloud_acc_details['standard_discount'] != '':
standard_discount = float(customer_cloud_acc_details['standard_discount'])
for name, entries in self.consolidated_rates.items():
by_region = {}
totals = 0
for entry in entries:
name_with_location = str.format('{}|{}', name, entry[6])
region_entry = by_region.setdefault(name_with_location, [])
region_entry.append(entry)
totals = totals + entry[7]
for item in by_region:
split_values = item.split('|')
if split_values:
product_name = split_values[0]
location = split_values[1]
daily_records = by_region[item]
for rec in daily_records:
start_date = self.str_to_datetime(rec[0])
date_of_recording = None
if start_date.month == 1:
if start_date.day >= 22:
date_of_recording = datetime.datetime(start_date.year, start_date.month, 22, 0, 0,
0, tzinfo=pytz.UTC)
else:
date_of_recording = datetime.datetime(start_date.year - 1, 12, 22, 0, 0, 0,
tzinfo=pytz.UTC)
else:
if start_date.day >= 22:
date_of_recording = datetime.datetime(start_date.year, start_date.month, 22, 0, 0,
0, tzinfo=pytz.UTC)
else:
date_of_recording = datetime.datetime(start_date.year, start_date.month - 1, 22, 0,
0, 0, tzinfo=pytz.UTC)
# Check if there isa record on the 22nd (as we store all storage only on the 22nd
consumption = CloudServiceConsumptions.objects.filter(
linked_account_id=self.tenantId,
subscription_id=self.subscriptionId,
item_description=product_name,
region=location,
usage_start_date=date_of_recording
)
cloud_rate = CloudRates.objects.get(uuid=rec[2])
if consumption.exists():
consumption = consumption[0]
consumption.usage_quantity = consumption.usage_quantity + Decimal(rec[7])
if consumption.usage_quantity > 5:
cost = calculate_azure_partner_cost(
(float(consumption.usage_quantity) - 5) * float(cloud_rate.rate),
standard_discount)
consumption.unblended_cost = Decimal(cost)
consumption.save()
else:
consumption = CloudServiceConsumptions()
consumption.customer = customer
consumption.vendor = vendor
consumption.record_id = cloud_rate.uuid
consumption.usage_start_date = date_of_recording
end_date = date_of_recording + timedelta(days=1)
consumption.usage_end_date = end_date
consumption.payer_account_id = self.csp_domain
consumption.linked_account_id = self.tenantId
consumption.pricing_plan_id = ''
consumption.product_name = rec[4]
consumption.usage_type = rec[5]
consumption.item_description = rec[3]
consumption.usage_quantity = rec[7]
consumption.region = location if location else 'N/A'
consumption.rate_id = cloud_rate.id
consumption.subscription_id = self.subscriptionId
consumption.unblended_cost = 0 # Always 0 when we start
consumption.save()
# pprint.pprint(by_region)
pprint.pprint(self.grouped_records)
pprint.pprint(self.grouped_calculations)
total = functools.reduce(lambda x, y: x + y, self.grouped_calculations.values())
pprint.pprint(total)
def str_to_datetime(self, dt_string):
""" Converts date string into UTC datetime object """
return datetime.datetime.strptime(dt_string, "%Y-%m-%d").replace(
tzinfo=timezone.utc) if dt_string is not None else datetime.datetime.utcnow()
# Recursive Block to keep returning records till we dont have any more continuation records...SPIN SPIN SPIN
def process_records(self, utilization_records, grouped_records, grouped_calculations, consolidated_rates):
out_file = open('/tmp/{}.json'.format(self.subscriptionId), 'w')
out_file.write(utilization_records)
out_file.close()
if os.path.exists('/tmp/{}.json'.format(self.subscriptionId)):
proc = subprocess.Popen(
["jq",
"-c",
'.items[] | [(.usageStartTime | sub("(?<before>.*)[-+]\\\\d{2}:\\\\d{2}"; .before ) | '
'strptime("%Y-%m-%dT%H:%M:%S") | strftime("%Y-%m-%d")), '
'(.usageEndTime | sub("(?<before>.*)[-+]\\\\d{2}:\\\\d{2}"; .before ) | '
'strptime("%Y-%m-%dT%H:%M:%S") | strftime("%Y-%m-%d")), '
'.resource.id, .resource.name, .resource.category, .resource.subcategory, .resource.region, .quantity]'
],
stdout=subprocess.PIPE,
stdin=open('/tmp/{}.json'.format(self.subscriptionId)))
""" Querying vendor & customer """
vendor = VendorDetails.objects.filter(vendor_name=AppDefaults.cloud_vendor_codes(return_as='name',
query_str='MS')).first()
account_type = AppDefaults.cloud_vendor_codes(return_as='code', query_str=vendor.vendor_name)
cloud_accounts = CloudAccounts.objects.filter(details__tenant_id=self.tenantId.upper(),
type=account_type
)
""" Try for lowercase """
if not cloud_accounts.exists():
cloud_accounts = CloudAccounts.objects.filter(details__tenant_id=self.tenantId.lower(),
type=account_type
)
customer = None
if cloud_accounts.exists():
cloud_account = cloud_accounts.first()
customer = cloud_account.customer
customer_cloud_acc_details = cloud_account.details
standard_discount = 10
if 'standard_discount' in customer_cloud_acc_details \
and customer_cloud_acc_details['standard_discount'] is not None \
and customer_cloud_acc_details['standard_discount'] != '':
standard_discount = float(customer_cloud_acc_details['standard_discount'])
else:
sys.exit(
'\033[0;37;41mSeems there is no customer for tenant id: %s. Terminating ...\033[0m' % self.tenantId)
for line in proc.stdout.readlines():
line = json.loads(line.decode())
utilization_start_date = self.str_to_datetime(line[0])
utilization_end_date = self.str_to_datetime(line[1])
resource_uuid = line[2]
name = line[3]
category = line[4]
subcategory = line[5]
location = line[6]
quantity = line[7]
if name in self.ignored_rate_names:
continue
if name in self.consolidated_rate_names:
consolidated_rate_name_value = consolidated_rates.setdefault(name, [])
consolidated_rate_name_value.append(line)
continue
try:
cloud_rate = CloudRates.objects.get(uuid=resource_uuid)
full_name = str.format('{}|{}|{}|{}', category, subcategory, name, location)
current_util = grouped_records.setdefault(full_name, 0)
grouped_records[full_name] = current_util + quantity
current_prices = grouped_calculations.setdefault(full_name, 0)
grouped_calculations[full_name] = current_prices + (quantity * float(cloud_rate.rate))
# Store in the DB
consumption = CloudServiceConsumptions()
consumption.customer = customer
consumption.vendor = vendor
consumption.record_id = cloud_rate.uuid
consumption.usage_start_date = utilization_start_date
consumption.usage_end_date = utilization_end_date
consumption.payer_account_id = self.csp_domain
consumption.linked_account_id = self.tenantId
consumption.pricing_plan_id = ''
consumption.product_name = category
consumption.usage_type = subcategory
consumption.item_description = name
consumption.usage_quantity = quantity
consumption.region = location if location else 'N/A'
consumption.rate_id = cloud_rate.id
consumption.subscription_id = self.subscriptionId
consumption.unblended_cost = calculate_azure_partner_cost(quantity * float(cloud_rate.rate),
standard_discount)
consumption.save()
except ObjectDoesNotExist:
print(
"could not find for %s %s %s %s %s" % (
category, subcategory, name, location, utilization_start_date))
# Delete the file
os.remove('/tmp/{}.json'.format(self.subscriptionId))
# Check if there are further entries
json_output = json.loads(utilization_records)
if 'next' in json_output['links']:
url = 'https://api.partnercenter.microsoft.com/v1/' + json_output['links']['next']['uri']
continuation_header = {json_output['links']['next']['headers'][0]['key']:
json_output['links']['next']['headers'][0]['value']}
access_headers = self.getAccessHeaders()
access_headers.update(continuation_header)
utilization_records_out = requests.get(url, headers=access_headers)
utilization_records_out.encoding = 'utf-8-sig'
utilization_records = utilization_records_out.text
self.process_records(utilization_records, grouped_records, grouped_calculations, consolidated_rates)
| [
"doss.cclawrance226@gmail.com"
] | doss.cclawrance226@gmail.com |
2d0368b80dbcb9c47412bfcb2b774d5f053f1fb7 | aea6cb6ab25a288d9a7143d54523d63aee0d67d9 | /alien_invasion/alien.py | 0f3173b69a5052510f6efacd2f79a6ddfb1e8ba0 | [] | no_license | ivanozz/python-crash-course | 13f8fbb00effa37dd513ded6d9d12539af516293 | dd889759275c24bddb2ba55c3c4a86af917781a0 | refs/heads/master | 2021-05-11T01:02:04.620160 | 2018-02-12T17:33:24 | 2018-02-12T17:33:24 | 118,316,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,666 | py | import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
"""Класс представляющий одного пришельца"""
def __init__(self, ai_settings, screen):
"""Инициализирует пришельца и задает его начальную позицию"""
super(Alien, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
# загрузка изображения пришельца и назначение атрибута rect
self.image = pygame.image.load('images/alien.bmp')
self.rect = self.image.get_rect()
# каждый новый пришелец появляется в левом верхнем углу экрана
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# сохранение точной позиции пришельца
self.x = float(self.rect.x)
def blitme(self):
"""Выводит пришельца в текущем положении"""
self.screen.blit(self.image, self.rect)
def update(self):
"""Перемещение пришельца вправо"""
self.x += (self.ai_settings.alien_speed_factor *
self.ai_settings.fleet_direction)
self.rect.x = self.x
def check_edges(self):
"""Возвращает True, если пришелец находится у края экрана"""
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= 0:
return True
| [
"iva1989@icloud.com"
] | iva1989@icloud.com |
3d037302f28cf01673df247c203d6585ef894a16 | b7a03c36c8f7d95d7cac3628f7c9d4834f33b2b9 | /week6_EDA_streamlit/day1_streamlit/streamlit_class/utils/dataframes.py | 8761c1b955c376c140a4d4f4ee5bc17d670bef0f | [
"MIT"
] | permissive | Jasc94/thebridge | dcca51d2d9aef7044f87795025678cb4340fb109 | e194e7f5948dc52bc62fc06bd4ae58a031d3aa7a | refs/heads/main | 2023-07-16T04:12:37.673222 | 2021-08-16T16:00:04 | 2021-08-16T16:00:04 | 361,670,388 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | import streamlit as st
import pandas as pd
def get_data_from_df(df):
selected_values = df.iloc[:10,:].values
return str(selected_values)
@st.cache(suppress_st_warning=True)
def load_csv_df(uploaded_file):
df = None
if uploaded_file != None:
#uploaded_file.seek(0)
df = pd.read_csv(uploaded_file, nrows=200) # Cargame las primeras 200 filas
#st.write("csv Readed¡")
st.balloons() # Muestra unos globos cuando cargamos el archivo exitosamente
return df
@st.cache(suppress_st_warning=True)
def load_normal_csv(uploaded_file):
df = None
if uploaded_file != None:
#uploaded_file.seek(0)
df = pd.read_csv(uploaded_file, nrows=200) # Cargame las primeras 200 filas
#st.write("csv Readed¡")
return df
# Para cargar los dataframes con fin de utilizarlo como un mapa
@st.cache(suppress_st_warning=True)
def load_csv_for_map(csv_path):
if csv_path != None:
df = pd.read_csv(csv_path, sep=';') # Leelo con separadores ";"
df = df.rename(columns={'latidtud': 'lat', 'longitud': 'lon'}) # Latitud -> // Longitud -> lon
st.balloons()
return df
| [
"jonathansuarezcaceres@gmail.com"
] | jonathansuarezcaceres@gmail.com |
112ec58a217bf533202e32fa348ec43b73761ded | f396b2690b62c74dfa6a58a619c9f64828d4cf84 | /TreningDjango/news/migrations/0002_alter_post_slug.py | 00f42b3df82c567631c5fcfaec9191d44eb476b7 | [] | no_license | bart-son/home-budget | 7daf7bd56f9bf819e4777a1d1f1588b433df545c | ef9721a099dc55750c42b1437762bb3b206f53c1 | refs/heads/master | 2023-05-04T23:35:15.796553 | 2021-05-29T07:42:52 | 2021-05-29T07:42:52 | 371,912,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # Generated by Django 3.2 on 2021-04-11 12:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='slug',
field=models.SlugField(unique_for_date='created'),
),
]
| [
"bartosz.kwiatk@gmail.com"
] | bartosz.kwiatk@gmail.com |
f6eee75f1210f4f5852d81892eb4e17c12279a21 | f7dc26779dd5491480b3e88fa1ab38eee99eacad | /third-party-synthetic/third-party-tester/tests/testdefinitions/ping_test.py | d2e1480b1f6af85bfd054fced4bfae7b7be55a4f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | Dynatrace/dynatrace-api | 13e068a17c08f0e7609438c043066ff4dc6b9357 | 5d85759bbead72dc708581979987c41912393410 | refs/heads/master | 2023-08-11T13:46:46.051080 | 2023-07-24T13:33:50 | 2023-07-24T13:33:50 | 45,841,623 | 99 | 87 | BSD-3-Clause | 2023-07-08T22:51:18 | 2015-11-09T14:04:12 | Python | UTF-8 | Python | false | false | 2,810 | py | import os
from tests import Test, TestStep, TestArgument
from datetime import timedelta
import pings
class PingTest(Test):
"""Ping test class.
It measures the time needed for a single ICMP probe to check server
availability.
"""
TEST_NAME = 'ping'
TEST_HELP_DESCRIPTION = "Test if given host responds to ping message and measures response time"
HOSTNAME_FLAG = 'hostname'
TEST_ARGUMENTS = [
TestArgument(
flag_names=['--' + HOSTNAME_FLAG],
flag_args={
'required': True,
'nargs': 1,
'metavar': HOSTNAME_FLAG,
'help': 'Address of a host to test'
}
),
]
def __init__(self, args):
"""Create a PingTest class instance.
Extends the base class __init__() method.
Args:
args: Command line arguments in dict form
"""
super().__init__()
self.hostname = args[self.HOSTNAME_FLAG][0]
self.dynatrace_test_name = 'ICMP ping test for {hostname}'.format(hostname=self.hostname)
self.steps.append(PingTest.PingStep(self.hostname))
class PingStep(TestStep):
"""ICMP ping test class."""
def __init__(self, hostname):
"""Create PingStep class instance.
Args:
hostname: IP or hostname of the host to ping
"""
test_step_name = 'ICMP ping test for {hostname}'.format(hostname=hostname)
super().__init__(test_step_name)
# Check if running as root at posix systems
if os.name != "nt" and os.geteuid() != 0:
self.logger.error(
'Operation not permitted - Note that ICMP messages '
'can only be sent from processes running as root.'
)
exit(1)
self.pinger = pings.Ping()
self.hostname = hostname
def __call__(self):
"""Execute the test step.
Overrides the base class implementation.
"""
self.logger.info("Sending ICMP probe to {}".format(self.hostname))
self.set_started()
ping_response = self.pinger.ping(self.hostname)
# Check if ICMP message was successfully received
if ping_response.ret_code != pings.consts.SUCCESS:
self.logger.error("ICMP probing failed")
# Fail test by returning without calling self.set_passed()
return
# Only one ICMP probe is sent, so min time is the same as max and avg times
self.duration = timedelta(milliseconds=ping_response.min_rtt)
self.set_passed()
self.logger.info("{} responded successfully".format(self.hostname))
| [
"pawel.nalezyty@dynatrace.com"
] | pawel.nalezyty@dynatrace.com |
61dea01dd75ab8c90bcab212f1a036edbc2ab457 | 2bebcab2997a08695378d798ffb196d49ad1b846 | /orders/migrations/0010_auto_20200826_1214.py | e324a93b00264f69ef948c91908114bec10504d1 | [] | no_license | lit-lucy/Pizza-orders | 739049825ac9af266d23b2d2a2af53236b1c728b | 46b5c5aa2bb5e3d5f7c5901f9c06ccec12c8c0d0 | refs/heads/master | 2023-05-31T19:10:42.206173 | 2020-09-04T09:42:49 | 2020-09-04T09:42:49 | 272,914,438 | 0 | 0 | null | 2021-06-10T23:03:50 | 2020-06-17T08:03:56 | Python | UTF-8 | Python | false | false | 432 | py | # Generated by Django 3.0.7 on 2020-08-26 12:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0009_auto_20200826_1027'),
]
operations = [
migrations.AlterField(
model_name='order',
name='delivery_type',
field=models.IntegerField(choices=[(1, 'Pick up in a restaurant')], default=1),
),
]
| [
"love@MacBook-Pro.local"
] | love@MacBook-Pro.local |
0342556411170d9d8108b8c277f1ca7f02dc2409 | 45e8df26d895fce1ffced77e2fc7c87aa5fcec71 | /leetcode/python-sol/301.Remove_Invalid_Parentheses.py | 6bcb271d66e2d15f39cc41fe7eb7f82c787062cb | [] | no_license | mikehung/competitive-programming | 5c4b429942357bfbe3c8ff2820d5cb5b7c5dc828 | 50713dc5973f2ea42220ac0248c3d1a6d90fcc15 | refs/heads/master | 2021-06-20T08:21:36.837057 | 2021-01-01T01:29:56 | 2021-01-01T01:31:55 | 153,595,870 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,935 | py | class Solution:
def removeInvalidParentheses(self, s):
def valid(s):
left = 0
for c in s:
if c == '(':
left += 1
elif c == ')':
left -= 1
if left < 0:
return False
return left == 0
def helper(s, num_remove):
if s in memo:
return memo[s]
ret = []
if valid(s):
if num_remove == self.max_num_remove:
ret.append(s)
else:
ret = [s]
self.max_num_remove = num_remove
elif num_remove < self.max_num_remove:
for i in range(len(s)):
if s[i] in '()':
ret += helper(s[:i] + s[i+1:], num_remove+1)
memo[s] = ret
return ret
def trim(s):
l = []
found_left = False
for c in s:
if c == '(':
found_left = True
if found_left or c != ')':
l.append(c)
r = []
found_right = False
for c in reversed(l):
if c == ')':
found_right = True
if found_right or c != '(':
r.append(c)
return ''.join(reversed(r))
self.max_num_remove = float('inf')
memo = {}
s = trim(s)
return list(filter(lambda _: len(_) == len(s)-self.max_num_remove, set(helper(s, 0)))) or ['']
import time
def test(s):
print(s)
beg = time.time()
r = Solution().removeInvalidParentheses(s)
print(r, time.time()-beg)
test('()())()')
test('(a)())()')
test(')(')
test('()')
test('n')
test('(a(())()')
test("()(((((((()")
test("(()()()))((")
test("))aaa))s)(()()()))(a((c((")
test("((()))((()(()")
| [
"mikehung@synology.com"
] | mikehung@synology.com |
bcfcfd42d82934ef66bd39ecc5139583c6a927df | f62ff90d7850af458d8f12386fc9ee9134dbe7c1 | /Plots/Showplots/Model_3/Current_Voltage_Curves.py | 2d9023dab4df536df56c4202551adad30523eb73 | [] | no_license | AlexSchmid22191/EIS_R_Sim | 51b431f078cb455fc38637c192436c0523449565 | 851b061e60811e1e58a5b2fd4e393e529c3f86ac | refs/heads/master | 2023-06-27T17:40:59.177270 | 2021-07-22T11:50:27 | 2021-07-22T11:50:27 | 380,768,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,606 | py | from matplotlib.pyplot import subplots, show
from matplotlib.style import use
from numpy import load, log10
use('../Show.mplstyle')
data = load('../../../Currents_Resistances_Model_3/Current_Data_Model_3.npy')
fig_hi, ax_hi = subplots(nrows=2, figsize=(6, 8))
fig_me, ax_me = subplots(nrows=2, figsize=(6, 8))
fig_lo, ax_lo = subplots(nrows=2, figsize=(6, 8))
# High oxygen partial pressures
for i in (1400, 1500, 1600, 1700, 1800):
ax_hi[0].plot(data['overpotential'][1::25, i], abs(data['current'][1::25, i]), linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
ax_hi[1].plot(data['overpotential'][0::25, i], data['current'][0::25, i], linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
# Medium oxygen partial pressures
for i in (1000, 1100, 1200, 1300):
ax_me[0].plot(data['overpotential'][1::25, i], abs(data['current'][1::25, i]), linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
ax_me[1].plot(data['overpotential'][0::25, i], data['current'][0::25, i], linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
# Low oxygen partial pressures
for i in (500, 600, 700, 800, 900):
ax_lo[0].plot(data['overpotential'][1::25, i], abs(data['current'][1::25, i]), linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
ax_lo[1].plot(data['overpotential'][0::25, i], data['current'][0::25, i], linestyle='-',
label='$10^{%d}$ bar' % log10(data['pressure'][1, i]))
ax_hi[0].set_yscale('log')
ax_me[0].set_yscale('log')
ax_lo[0].set_yscale('log')
ax_hi[1].set_yscale('symlog', linthreshy=1e-1)
ax_me[1].set_yscale('symlog', linthreshy=1e-4)
ax_lo[1].set_yscale('symlog', linthreshy=1e-9)
# ax_hi[0].set_ylim(1e-3, 1e5)
# ax_hi[1].set_ylim(-1e5, 1e0)
# ax_me[0].set_ylim(1e-6, 1e0)
# ax_me[1].set_ylim(-1e0, 1e0)
# ax_lo[0].set_ylim(1e-10, 1e0)
# ax_lo[1].set_ylim(-1e-4, 1e1)
for ax in (ax_hi[0], ax_hi[1], ax_me[0], ax_me[1], ax_lo[0], ax_lo[1]):
ax.set_ylabel('Absolute current density (A/m²)')
ax.set_xlabel('Overpotential (V)')
ax.legend()
# fig_hi.tight_layout()
# fig_hi.savefig('Plots/Current_Voltage_Curves_Hi.pdf')
# fig_hi.savefig('Plots/Current_Voltage_Curves_Hi.png')
#
# fig_me.tight_layout()
# fig_me.savefig('Plots/Current_Voltage_Curves_Me.pdf')
# fig_me.savefig('Plots/Current_Voltage_Curves_Me.png')
#
# fig_lo.tight_layout()
# fig_lo.savefig('Plots/Current_Voltage_Curves_Lo.pdf')
# fig_lo.savefig('Plots/Current_Voltage_Curves_Lo.png')
show()
| [
"Alex.Schmid91@gmail.com"
] | Alex.Schmid91@gmail.com |
2ad49bb329c46561a59ca0a5e8fdb28c2b53c900 | 3f5d387b70ba0c828d9ebe30e6100d8837466b64 | /FWUploadThread.py | c431d49b7e06187157e6520f7824eba4c7f76789 | [
"MIT"
] | permissive | CsVance82/WIZnet-S2E-Tool-GUI | 502eb04841549ff2ad3eeeabe5f3dccb4c6aa9d8 | 6cadde2c3b37bd3eb403e56e61675ee44e884c5b | refs/heads/master | 2020-03-15T02:53:17.775896 | 2018-04-29T23:38:07 | 2018-04-29T23:38:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,386 | py | #!/usr/bin/python
import re
import sys
import io
import time
import logging
import threading
import getopt
import os
import subprocess
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
import binascii
from WIZMSGHandler import WIZMSGHandler
from WIZUDPSock import WIZUDPSock
from wizsocket.TCPClient import TCPClient
from PyQt5.QtCore import QThread, pyqtSignal, pyqtSlot
OP_SEARCHALL = 1
OP_SETIP = 2
OP_CHECKIP = 3
OP_FACTORYRESET = 4
OP_GETDETAIL = 5
OP_FWUP = 6
SOCK_CLOSE_STATE = 1
SOCK_OPENTRY_STATE = 2
SOCK_OPEN_STATE = 3
SOCK_CONNECTTRY_STATE = 4
SOCK_CONNECT_STATE = 5
idle_state = 1
datasent_state = 2
class FWUploadThread(QThread):
uploading_size = pyqtSignal(int)
upload_result = pyqtSignal(int)
error_flag = pyqtSignal(int)
def __init__(self, conf_sock, dest_mac, idcode, binaryfile, ipaddr, port):
QThread.__init__(self)
self.dest_mac = None
self.bin_filename = None
self.fd = None
self.data = None
self.client = None
self.timer1 = None
self.istimeout = 0
self.serverip = None
self.serverport = None
self.sentbyte = 0
self.dest_mac = dest_mac
self.bin_filename = binaryfile
self.idcode = idcode
self.error_noresponse = 0
self.retrycheck = 0
self.conf_sock = conf_sock
self.what_sock = '%s' % self.conf_sock
# socket config (for TCP unicast)
self.ip_addr = ipaddr
self.port = port
self.cli_sock = None
def setparam(self):
self.fd = open(self.bin_filename, "rb")
self.data = self.fd.read(-1)
self.remainbytes = len(self.data)
self.curr_ptr = 0
self.fd.close()
sys.stdout.write("Firmware file size: %r\n\n" % len(self.data))
def myTimer(self):
# sys.stdout.write('timer1 timeout\r\n')
self.istimeout = 1
def jumpToApp(self):
cmd_list = []
# boot mode change: App boot mode
cmd_list.append(["MA", self.dest_mac])
cmd_list.append(["PW", self.idcode])
cmd_list.append(["AB", ""])
if 'TCP' in self.what_sock:
self.wizmsghangler = WIZMSGHandler(self.conf_sock, cmd_list, 'tcp', OP_FWUP, 2)
elif 'UDP' in self.what_sock:
self.wizmsghangler = WIZMSGHandler(self.conf_sock, cmd_list, 'udp', OP_FWUP, 2)
self.resp = self.wizmsghangler.run()
self.uploading_size.emit(1)
self.msleep(1000)
def sendCmd(self, command):
cmd_list = []
self.resp = None
# Send FW UPload request message
cmd_list.append(["MA", self.dest_mac])
cmd_list.append(["PW", self.idcode])
cmd_list.append([command, str(len(self.data))])
if 'TCP' in self.what_sock:
self.wizmsghangler = WIZMSGHandler(self.conf_sock, cmd_list, 'tcp', OP_FWUP, 2)
elif 'UDP' in self.what_sock:
self.wizmsghangler = WIZMSGHandler(self.conf_sock, cmd_list, 'udp', OP_FWUP, 2)
# sys.stdout.write("cmd_list: %s\r\n" % cmd_list)
# if no reponse from device, retry for several times.
for i in range(4):
# self.resp = self.wizmsghangler.parseresponse()
self.resp = self.wizmsghangler.run()
if self.resp is not '':
break
self.msleep(500)
self.uploading_size.emit(2)
def run(self):
self.setparam()
self.jumpToApp()
if 'UDP' in self.what_sock:
pass
elif 'TCP' in self.what_sock:
self.sock_close()
self.SocketConfig()
self.sendCmd('FW')
if self.resp is not '' and self.resp is not None:
resp = self.resp.decode('utf-8')
# print('resp', resp)
params = resp.split(':')
sys.stdout.write('Dest IP: %s, Dest Port num: %r\r\n' % (params[0], int(params[1])))
self.serverip = params[0]
self.serverport = int(params[1])
self.uploading_size.emit(3)
else:
print('No response from device. Check the network or device status.')
self.error_flag.emit(-1)
self.error_noresponse = -1
try:
self.client = TCPClient(2, params[0], int(params[1]))
except:
pass
try:
if self.error_noresponse < 0:
pass
else:
# sys.stdout.write("%r\r\n" % self.client.state)
while True:
if self.retrycheck > 6:
break
self.retrycheck += 1
if self.client.state is SOCK_CLOSE_STATE:
if self.timer1 is not None:
self.timer1.cancel()
cur_state = self.client.state
try:
self.client.open()
# sys.stdout.write('1 : %r\r\n' % self.client.getsockstate())
# sys.stdout.write("%r\r\n" % self.client.state)
if self.client.state is SOCK_OPEN_STATE:
sys.stdout.write('[%r] is OPEN\r\n' % (self.serverip))
# sys.stdout.write('[%r] client.working_state is %r\r\n' % (self.serverip, self.client.working_state))
self.msleep(500)
except Exception as e:
sys.stdout.write('%r\r\n' % e)
elif self.client.state is SOCK_OPEN_STATE:
self.uploading_size.emit(4)
cur_state = self.client.state
try:
self.client.connect()
# sys.stdout.write('2 : %r' % self.client.getsockstate())
if self.client.state is SOCK_CONNECT_STATE:
sys.stdout.write('[%r] is CONNECTED\r\n' % (self.serverip))
# sys.stdout.write('[%r] client.working_state is %r\r\n' % (self.serverip, self.client.working_state))
except Exception as e:
sys.stdout.write('%r\r\n' % e)
elif self.client.state is SOCK_CONNECT_STATE:
# if self.client.working_state == idle_state:
# sys.stdout.write('3 : %r' % self.client.getsockstate())
try:
self.uploading_size.emit(5)
while self.remainbytes is not 0:
if self.client.working_state == idle_state:
if self.remainbytes >= 1024:
msg = bytearray(1024)
msg[:] = self.data[self.curr_ptr:self.curr_ptr+1024]
self.client.write(msg)
self.sentbyte = 1024
# sys.stdout.write('1024 bytes sent from at %r\r\n' % (self.curr_ptr))
sys.stdout.write('[%s] 1024 bytes sent from at %r\r\n' % (self.serverip, self.curr_ptr))
self.curr_ptr += 1024
self.remainbytes -= 1024
else :
self.uploading_size.emit(6)
msg = bytearray(self.remainbytes)
msg[:] = self.data[self.curr_ptr:self.curr_ptr+self.remainbytes]
self.client.write(msg)
# sys.stdout.write('Last %r byte sent from at %r \r\n' % (self.remainbytes, self.curr_ptr))
sys.stdout.write('[%s] Last %r byte sent from at %r \r\n' % (self.serverip, self.remainbytes, self.curr_ptr))
self.curr_ptr += self.remainbytes
self.remainbytes = 0
self.sentbyte = self.remainbytes
self.client.working_state = datasent_state
self.timer1 = threading.Timer(2.0, self.myTimer)
self.timer1.start()
elif self.client.working_state == datasent_state:
# sys.stdout.write('4 : %r' % self.client.getsockstate())
response = self.client.readbytes(2)
if response is not None:
if int(binascii.hexlify(response), 16):
self.client.working_state = idle_state
self.timer1.cancel()
self.istimeout = 0
else:
print('ERROR: No response from device. Stop FW upload...')
self.client.close()
self.upload_result.emit(-1)
self.terminate()
if self.istimeout is 1:
self.istimeout = 0
self.client.working_state = idle_state
self.client.close()
self.upload_result.emit(-1)
self.terminate()
self.uploading_size.emit(7)
except Exception as e:
sys.stdout.write('%r\r\n' % e)
response = ""
break
print('retrycheck: %d' % self.retrycheck)
if self.retrycheck > 6 or self.error_noresponse < 0:
sys.stdout.write('Device [%s] firmware upload fail.\r\n' % (self.dest_mac))
self.upload_result.emit(-1)
elif self.error_noresponse >= 0:
self.uploading_size.emit(8)
sys.stdout.write('Device [%s] firmware upload success!\r\n' % (self.dest_mac))
self.upload_result.emit(1)
# send FIN packet
self.msleep(500)
self.client.shutdown()
if 'TCP' in self.what_sock:
self.conf_sock.shutdown()
except Exception as e:
self.error_flag.emit(-3)
sys.stdout.write('%r\r\n' % e)
finally:
pass
def sock_close(self):
# 기존 연결 fin
if self.cli_sock is not None:
if self.cli_sock.state is not SOCK_CLOSE_STATE:
self.cli_sock.shutdown()
if self.conf_sock is not None:
self.conf_sock.shutdown()
def tcpConnection(self, serverip, port):
retrynum = 0
self.cli_sock = TCPClient(2, serverip, port)
print('sock state: %r' % (self.cli_sock.state))
while True:
if retrynum > 6:
break
retrynum += 1
if self.cli_sock.state is SOCK_CLOSE_STATE:
self.cli_sock.shutdown()
cur_state = self.cli_sock.state
try:
self.cli_sock.open()
if self.cli_sock.state is SOCK_OPEN_STATE:
print('[%r] is OPEN' % (serverip))
time.sleep(0.5)
except Exception as e:
sys.stdout.write('%r\r\n' % e)
elif self.cli_sock.state is SOCK_OPEN_STATE:
cur_state = self.cli_sock.state
try:
self.cli_sock.connect()
if self.cli_sock.state is SOCK_CONNECT_STATE:
print('[%r] is CONNECTED' % (serverip))
except Exception as e:
sys.stdout.write('%r\r\n' % e)
elif self.cli_sock.state is SOCK_CONNECT_STATE:
break
if retrynum > 6:
sys.stdout.write('Device [%s] TCP connection failed.\r\n' % (serverip))
return None
else:
sys.stdout.write('Device [%s] TCP connected\r\n' % (serverip))
return self.cli_sock
def SocketConfig(self):
# Broadcast
if 'UDP' in self.what_sock:
self.conf_sock = WIZUDPSock(5000, 50001)
self.conf_sock.open()
# TCP unicast
elif 'TCP' in self.what_sock:
print('upload_unicast: ip: %r, port: %r' % (self.ip_addr, self.port))
self.conf_sock = self.tcpConnection(self.ip_addr, self.port)
if self.conf_sock is None:
# self.isConnected = False
print('TCP connection failed!: %s' % self.conf_sock)
self.error_flag.emit(-3)
self.terminate()
else:
self.isConnected = True | [
"kyi8907@gmail.com"
] | kyi8907@gmail.com |
26ab05607c4b32f1e825bba9fc453b0a897db9b6 | cdf9bba82c5e42758664e1eeb3e157340c29c500 | /generator/test.py | b9f9245ed294e3d2fa2ce52879a97137638525f9 | [] | no_license | mex3/fizmat-a | ce2802035dd045fcf0f6bfe8d6388ed88c6c2af5 | be90332a518f0c3b2113dd967a63f4a056f937ab | refs/heads/master | 2021-01-21T13:41:35.050273 | 2016-05-14T12:13:16 | 2016-05-14T12:13:16 | 55,837,428 | 1 | 4 | null | 2016-05-14T12:12:24 | 2016-04-09T09:59:23 | Python | UTF-8 | Python | false | false | 82 | py | inputq = open('test.txt', 'r')
s=inputq.readline()
print (s)
ss=input()
print(ss) | [
"gurovic@gmail.com"
] | gurovic@gmail.com |
31740dec5203fccc5a4171d951f24d5a9e15aa2a | 59a4a6b9a18778566307af6a281d86c3f79531e0 | /Trening/TreningDjango/asgi.py | 644eee76517ec9b2d15360ca1ad815b2860e5354 | [] | no_license | Kasuczi/WebDev-Back-End | d8448ee2ed052518d95d54953f2672e19d997ea7 | f3759fbd11d4d572fb6cf960d59dbd6e84692f8f | refs/heads/master | 2021-05-26T03:49:11.889975 | 2020-04-08T09:19:11 | 2020-04-08T09:19:11 | 254,039,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
ASGI config for TreningDjango project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TreningDjango.settings')
application = get_asgi_application()
| [
"janikowski.mateusz96@gmail.com"
] | janikowski.mateusz96@gmail.com |
690fe2ffb43edf1febae8410ba150129ce00cce0 | 3419067388879d8a6542df01cb0278ae90b021a2 | /py100day/Day01-15/Day04/code/for2.py | 22c96886304fa47ecee9b5c39f3f60d9a36a21f1 | [] | no_license | oweson/python-river-master | faa31c5248e297a92054cc302e213e2b37fb8bd5 | cf9e99e611311b712465eb11dec4bb8f712929b2 | refs/heads/master | 2021-06-21T15:47:01.755957 | 2019-10-02T00:08:05 | 2019-10-02T00:08:05 | 205,607,518 | 0 | 0 | null | 2021-06-10T21:55:20 | 2019-08-31T23:39:55 | Python | UTF-8 | Python | false | false | 173 | py | """
用for循环实现1~100之间的偶数求和
Version: 0.1
Author: 骆昊
Date: 2018-03-01
"""
# 步长是2
sum = 0
for x in range(2, 101, 2):
sum += x
print(sum)
| [
"570347720@qq.com"
] | 570347720@qq.com |
1026e1d0f5add5bf40edc076405f2e409f26c5ce | 2f2682f778512a75a1ff49d7e267c2f4d355c48e | /geoprocess/controllers.py | 7be119b34c9b20b609770261e464a475b5996a9b | [] | no_license | beatcovid/geoprocess | 4a44f46b900c2e0ffed0dab18008e7884e759e3b | c2a7b1e4ede06583679db9dadebe2066b0274e54 | refs/heads/master | 2023-04-13T13:45:48.572825 | 2020-05-27T03:08:14 | 2020-05-27T03:08:14 | 260,215,049 | 0 | 1 | null | 2023-03-29T00:36:19 | 2020-04-30T13:11:38 | Python | UTF-8 | Python | false | false | 4,538 | py | import csv
import email.utils
import json
import logging
import os
import sys
from datetime import datetime
from pprint import pprint
from dotenv import load_dotenv
from pymongo import MongoClient
from geoprocess.find_psma import find_lga, find_sa3
from geoprocess.google_geo import google_geocode, lookup_placeid, place_autocomplete
from geoprocess.settings import MONGO_CONNECT_URL
load_dotenv()
logger = logging.getLogger("geoprocess")
logging.basicConfig(level=logging.INFO)
logger.setLevel(logging.INFO)
mongo_connection = MongoClient(MONGO_CONNECT_URL)
def flatten_google_place(place, prefix):
ac = place["address_components"]
flattened = {}
for component in ac:
for ctype in component["types"]:
if not ctype == "political":
flattened[prefix + "_" + ctype] = component["short_name"]
return flattened
def get_granuality(flat_geo, prefix):
FIELDS = [
f"{prefix}_postal_code",
f"{prefix}_locality",
f"{prefix}_administrative_area_level_2",
f"{prefix}_administrative_area_level_1",
f"{prefix}_country",
]
for field in FIELDS:
if field in flat_geo:
return field[len(prefix) + 1 :]
return "country"
def update_geoplots():
"""
just a simple q
"""
db = mongo_connection.prod_covid19_api_docdb.instances
query = {"_geo_processed": {"$ne": True}}
processed = 0
updated = 0
place_fields = ["userdetail_city", "travel_country"]
for a in db.find(query).sort("_submission_time", -1):
for place_field in place_fields:
if place_field in a:
if not type(a[place_field]) is str:
continue
if " " in a[place_field]:
continue
try:
p = lookup_placeid(a[place_field])
except Exception as e:
logger.error("Could not find place id for: {}".format(a[place_field]))
logger.error(e)
continue
p_flat = flatten_google_place(p, place_field)
if (
place_field + "_country" in p_flat
and p_flat[place_field + "_country"] == "AU"
and (
place_field + "_locality" in p_flat
or place_field + "_postal_code" in p_flat
)
):
if not place_field + "_lga_id" in a:
lgs = find_lga(
p["geometry"]["location"]["lat"],
p["geometry"]["location"]["lng"],
)
if lgs:
p_flat[place_field + "_lga_id"] = lgs
if not place_field + "_sa3_id" in a:
sa3 = find_sa3(
p["geometry"]["location"]["lat"],
p["geometry"]["location"]["lng"],
)
if sa3:
p_flat[place_field + "_sa3_id"] = sa3
p_flat[place_field + "_granuality"] = get_granuality(p_flat, place_field)
if (
place_field + "_country" in p_flat
and p_flat[place_field + "_country"] == "AU"
and (
place_field + "_administrative_area_level_1" in p_flat
or "userdetail_city_postal_code" in p_flat
)
):
p_flat[place_field + "_state"] = p_flat[
place_field + "_administrative_area_level_1"
]
p_flat["_geo_processed"] = True
pprint(p_flat)
try:
db.update_one(
{"_id": a["_id"]}, {"$set": p_flat},
)
except Exception as e:
logger.error(
"Db error on updating place_id: {} {}".format(
a["_id"], place_field
)
)
logger.error(e)
continue
logger.info(
"Updated {} {} -> {}".format(place_field, a["_id"], a[place_field])
)
updated += 1
processed += 1
print("Processed {} and updated {}".format(processed, updated))
| [
"nc9@protonmail.com"
] | nc9@protonmail.com |
36e68501df2d8e58be3d3faf5755ed0cc380b628 | 854660b48f63711489eabd96a6d3cbf69f4c1144 | /CodingBat/WarmUp1/diff21/diff21.py | 1ccef4bbbe8437264f4935a8c30561a58a224abf | [] | no_license | SametSahin10/CodingBat-Exercises | c9727e7d38defeb927d3684263d0d7655b8d8afa | e7371a8b8c71706872c8ba7a0d140d19e7ce20dc | refs/heads/master | 2021-05-10T09:12:21.039238 | 2018-03-05T20:05:54 | 2018-03-05T20:05:54 | 118,917,328 | 2 | 0 | null | 2018-03-05T20:05:55 | 2018-01-25T13:38:52 | Java | UTF-8 | Python | false | false | 92 | py | def diff21(n):
if(n > 21):
return abs(n-21) * 2
else:
return abs(n-21)
| [
"enesdemirag1@hotmail.com"
] | enesdemirag1@hotmail.com |
6c7fbde29cd1fec7bc805412befb2db644f4048d | a5bb696d63052d6a07d0ca1c48f415e5a5308c8d | /Lesson11/Lesson11_hw02.py | e1d3197e9faaa59f3ec12f3c872a4b8b592b522d | [] | no_license | VitaliiRomaniukKS/python_course | 258af6f1a925c5cbc9207ddf0958f30652e84ff8 | a530d34ad18c6fcb8e4b573376a21fe34f653f77 | refs/heads/master | 2020-09-03T17:27:27.892224 | 2020-03-15T14:44:57 | 2020-03-15T14:44:57 | 219,520,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | # Распарсить файл с информацией о платежах, но использовать только те,
# где тип платежа out, также не все строки могут быть в корректном формате.
# Кто совершал больше всего покупок? На наибольшую сумму? Файл:
out_trans_list = []
with open('payments.txt', 'r') as payments_f:
for line in payments_f:
new_trans = line.split(';')
if (len(new_trans) == 5) and (new_trans[-2] == 'out'):
# print(new_trans)
new_trans.remove(new_trans[4])
out_trans_list.append(new_trans)
print(out_trans_list)
payments_d = {}
for i in out_trans_list:
summa = float (i[1].split()[0].replace(',','.'))
if i[0] not in payments_d:
payments_d[i[0]] = [summa]
else:
payments_d[i[0]].append(summa)
print()
print(payments_d)
max_pay = [0,0]
max_sum = [0,0]
max_price = [0,0]
for name, p_count in payments_d.items():
if len(p_count) > max_pay[1]:
max_pay = [name,len(p_count)]
if sum(p_count) > max_sum[1]:
max_sum = [name, sum(p_count)]
if max(p_count) > max_price[1]:
max_price = [name,max(p_count)]
print(max_pay)
print(max_sum)
print(max_price)
| [
"noreply@github.com"
] | VitaliiRomaniukKS.noreply@github.com |
c0bccab0f33fe2f6323731cddd1742ba4d45275c | aa410a95773aeea73e75f0e701db5cdc0eda890b | /weapons.py | cf6e4eb05ba6ad8a453e07637018051ed6eac5f8 | [] | no_license | predominant/zombsole | ccc00893b7739c5341c43fc28375415fa628b885 | a04ff40a144cb1f63d8aa29ccf0b06ecccc2bc7f | refs/heads/master | 2021-01-21T19:29:05.322551 | 2014-03-26T05:38:15 | 2014-03-26T05:38:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | # coding: utf-8
from core import Weapon
def _new_weapon_class(name, max_range, damage_range):
'''Create new weapon class.'''
class NewWeapon(Weapon):
def __init__(self):
super(NewWeapon, self).__init__(name,
max_range,
damage_range)
NewWeapon.__name__ = name
return NewWeapon
ZombieClaws = _new_weapon_class('ZombieClaws', 1.5, (5, 10))
Knife = _new_weapon_class('Knife', 1.5, (5, 10))
Axe = _new_weapon_class('Axe', 1.5, (75, 100))
Gun = _new_weapon_class('Gun', 6, (10, 50))
Rifle = _new_weapon_class('Rifle', 10, (25, 75))
Shotgun = _new_weapon_class('Shotgun', 3, (75, 100))
| [
"fisadev@gmail.com"
] | fisadev@gmail.com |
582bb899d0407eb2070b66f080e8e55395136ae0 | 5d9d88881abd73cc841f1bc3a523ebbb1c46f6b4 | /DeepTrainer/DeepTrainer/state_tracker.py | 59f2d525260bf2d1f5fd428196ae7cf7a51ea89f | [] | no_license | zhuMingXu/CarSimRL | 3f6e92d73a6eacc9fc311bc5c71e6e909fe79335 | bcbb7654f1b68b00edb00ccd6d1480a7db9e6598 | refs/heads/master | 2022-04-13T14:04:56.596481 | 2017-02-15T11:57:03 | 2017-02-15T11:57:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,872 | py | # -*- coding: utf-8 -*-
import constants as CONST
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
class StateTracker():
def __init__(self):
#keeping track of oldest state so I can remove it
#before inserting the most recent state
self.frame_history = np.zeros(CONST.FRAME_HISTORY_SIZE)
self.oldest_state_idx = 0
self.idx_old_to_new = [i for i in range(len(self.frame_history))] #[0,1,2,..n]
self.state = np.zeros(CONST.STATE_MATRIX_SIZE)
#initalizing gray scale state matrix
for scan in self.frame_history:
self.state += scan
def reset(self):
self.frame_history = np.zeros(self.frame_history.shape)
self.oldest_state_idx = 0
self.idx_old_to_new = [i for i in range(len(self.frame_history))] #[0,1,2,..n]
self.state = np.zeros(self.state.shape)
#initalizing gray scale state matrix
for scan in self.frame_history:
self.state += scan
# new_scan is a 2d numpy array representing the lidar one_hot array
def update(self, new_scan):
#plt.imshow(new_scan, cmap=plt.cm.hot)
#new_scan = new_scan.flatten()
# sutract oldest scan fron state
# self.state -= self.frame_history[self.oldest_state_idx]
# superimpose new_scan into state matrix
# self.state += new_scan
# replace oldest scan with new_scan
self.frame_history[self.oldest_state_idx] = new_scan
self.state = np.zeros(self.state.shape)
weight_idx = 0
for frame in self.frame_history:
self.state += frame*CONST.HISTORY_WEIGHTS[weight_idx]
weight_idx += 1
# increment olderst_scan_idx
self.oldest_state_idx = (self.oldest_state_idx - 1) % len(self.frame_history)
for idx in self.idx_old_to_new:
idx = (idx + 1) % len(self.idx_old_to_new)
def __plotFrame(self, data):
values = np.unique(data.ravel())
im = plt.imshow(data, interpolation='none')
colors = [im.cmap(im.norm(value)) for value in values]
patches = [mpatches.Patch(color=colors[i], label="Level {l}".format(l=values[i])) for i in range(len(values))]
plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
plt.show()
def plotState(self, plt_state=True, plt_full_history=False):
if plt_state:
self.__plotFrame(self.state)
if plt_full_history:
count = len(self.idx_old_to_new)-1
for idx in self.idx_old_to_new:
print("Frame T-{0}: ".format(count))
self.__plotFrame(self.frame_history[idx])
count -= 1
| [
"joshua.d.patterson1@gmail.com"
] | joshua.d.patterson1@gmail.com |
066a9bb3b23255dc5f349786bfe1e4b093454a5a | 238a0dd7c1bd72b8e241798c8d31ff3cbb2c0c90 | /caesar.py | 36d0e4ed7074f13b4cd5f3fa51e5d21e4d8bb64d | [] | no_license | Procerus/caesar-converter | 7881666ae638ef288af873377436fd482797182c | 92a8ce5e764e552351e51048384747683a634c2e | refs/heads/master | 2020-09-19T21:21:34.310140 | 2019-11-26T23:00:48 | 2019-11-26T23:00:48 | 224,301,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,730 | py | # This program takes in a user input when running the program as an argument
# that number is a key that will shift the associated text that the user enters
# next and shifts every letter that amount
import sys
def main(argv):
try:
sys.argv[1] == None
except IndexError:
print("Usage: python " + sys.argv[0] + " k")
return 0
# key converted to int
key = int(argv[1])
# check if the key was entered in properly checks to make sure a number is inputted and
# checks if there is extra characters in
try:
(key == 0 and strncmp(argv[1], "0", true)) or len(sys.argv) > 3
except IndexError:
print("Usage: python " + sys.argv[0] + " k")
return 0
#ord() convert to in and chr converts to string
name = input("plaintext: ")
length = len(name)
# converts key to modulus of 26 if person typed a larger number
key = key % 26
print("ciphertext: ", end="")
for i in range(0, length):
#checks if the name is lower case
if ord(name[i]) > 96 and ord(name[i]) < 123:
if (ord(name[i]) + key) % 122 < 97:
print(chr(((ord(name[i]) + key) % 122) + 96), end="")
else:
print(chr(ord(name[i]) + key), end="")
# checks if character is uppercase
elif ord(name[i]) > 64 and ord(name[i]) < 91:
if (ord(name[i]) + key) % 90 < 65:
print(chr(((ord(name[i]) + key) % 90) + 65), end="")
else:
print(chr(ord(name[i]) + key), end="")
# if it is non character it will just print
else:
print(name[i], end="")
print("")
main(sys.argv)
| [
"noreply@github.com"
] | Procerus.noreply@github.com |
2eac0fe3402f79f389178ebe792a10a16f7c1a4a | 039f2c747a9524daa1e45501ada5fb19bd5dd28f | /AGC001/AGC001c.py | 6f3fb7892212fb5a2683a833717ea55a344d0dfd | [
"Unlicense"
] | permissive | yuto-moriizumi/AtCoder | 86dbb4f98fea627c68b5391bf0cc25bcce556b88 | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | refs/heads/master | 2023-03-25T08:10:31.738457 | 2021-03-23T08:48:01 | 2021-03-23T08:48:01 | 242,283,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | #AGC001c
def main():
import sys
input=sys.stdin.readline
sys.setrecursionlimit(10**6)
# map(int, input().split())
if __name__ == '__main__':
main() | [
"kurvan1112@gmail.com"
] | kurvan1112@gmail.com |
c058ffd30adadb95fe7dfaf10ca143450a96c2c5 | 445720e21dce60d8504daeb68a97525343a95639 | /PyCybos/pycybos/cybos_EurexJpBid.py | 2730aced35e282a67acf7d661199f2898493b0d1 | [] | no_license | QuantTraderEd/AQTrader | 60826cd71b0fa568852f23be9daeb7f65a13e845 | c65ecba53beebce500a2e9cde0bd54374851e980 | refs/heads/live_patch | 2021-06-01T16:40:30.350977 | 2020-05-31T07:06:56 | 2020-05-31T07:06:56 | 145,120,057 | 2 | 0 | null | 2020-02-06T13:00:22 | 2018-08-17T12:51:13 | Python | UTF-8 | Python | false | false | 548 | py | # -*- coding: utf-8 -*-
"""
Created on Sat May 31 14:14:25 2014
@author: assa
"""
from cybos_source import Source
class EurexJpBid(Source):
""" subscribe index option quote """
def __init__(self, code = None):
super(EurexJpBid, self).__init__('CpSysDib.EurexJpbid.1')
self.type = 'TAQ'
self.data = None
if code: self.SetInputValue('0',code)
pass
def OnSignal(self):
self.data = []
for i in xrange(14): self.data.append(self.com.GetHeaderValue(i))
self.Notify()
pass | [
"hyojkim79@gmail.com"
] | hyojkim79@gmail.com |
a61174c4d8077eef4dc25a83b1c32e6f227bcf5d | f0f2d8cb16d494443a678ea24c04be95d1cbf824 | /Time_table_generator_/py_ui/Room.py | e40c89794cad5314c9a7d7c2e008ddbbc15889b5 | [] | no_license | Automatic-Timetable-Generator/ATG | 314e09b2becef67913df0744c094bca4d20635f0 | 6b1187e0be434346bfdd1a61a30bb57718fb0cbc | refs/heads/master | 2021-02-17T10:51:00.604358 | 2020-04-08T07:30:30 | 2020-04-08T07:30:30 | 245,091,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,306 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'room.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(716, 553)
Dialog.setMinimumSize(QtCore.QSize(716, 553))
Dialog.setMaximumSize(QtCore.QSize(716, 553))
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.gridLayout.setObjectName("gridLayout")
self.lblName = QtWidgets.QLabel(Dialog)
self.lblName.setObjectName("lblName")
self.gridLayout.addWidget(self.lblName, 0, 0, 1, 1)
self.lineEditName = QtWidgets.QLineEdit(Dialog)
self.lineEditName.setObjectName("lineEditName")
self.gridLayout.addWidget(self.lineEditName, 0, 1, 1, 1)
self.groupBox = QtWidgets.QGroupBox(Dialog)
self.groupBox.setObjectName("groupBox")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.groupBox)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.radioLec = QtWidgets.QRadioButton(self.groupBox)
self.radioLec.setObjectName("radioLec")
self.horizontalLayout_2.addWidget(self.radioLec)
self.radioLab = QtWidgets.QRadioButton(self.groupBox)
self.radioLab.setObjectName("radioLab")
self.horizontalLayout_2.addWidget(self.radioLab)
self.gridLayout.addWidget(self.groupBox, 0, 2, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.tableSchedule = QtWidgets.QTableView(Dialog)
self.tableSchedule.setObjectName("tableSchedule")
self.verticalLayout.addWidget(self.tableSchedule)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.btnFinish = QtWidgets.QPushButton(Dialog)
self.btnFinish.setObjectName("btnFinish")
self.horizontalLayout.addWidget(self.btnFinish)
self.btnCancel = QtWidgets.QPushButton(Dialog)
self.btnCancel.setObjectName("btnCancel")
self.btnFinish.setStyleSheet('background-color:#833471;color:white;')
self.btnCancel.setStyleSheet('background-color:#747d8c;color:white;')
self.horizontalLayout.addWidget(self.btnCancel)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Room"))
self.lblName.setText(_translate("Dialog", "Name"))
self.groupBox.setTitle(_translate("Dialog", "Type"))
self.radioLec.setText(_translate("Dialog", "Lecture"))
self.radioLab.setText(_translate("Dialog", "Laboratory"))
self.btnFinish.setText(_translate("Dialog", "Finish"))
self.btnCancel.setText(_translate("Dialog", "Cancel"))
| [
"noreply@github.com"
] | Automatic-Timetable-Generator.noreply@github.com |
203f04df3c3f6b979898621a354f1d50daec9fe6 | db01067e88324466ba4743e5e53cd53de609c342 | /04. Functions Basics Lab/01. Grades.py | 0f7ea110f53b23d04e17bca1968d4c40ba88f432 | [] | no_license | IlkoAng/-Python-Fundamentals-Softuni | 07eaf89d340b2e60214ab5f8e896629ae680dc4a | 01a112b13e84ab2f29e6fc4ed39f08f395d54429 | refs/heads/main | 2023-05-18T02:11:53.676763 | 2021-06-06T15:39:22 | 2021-06-06T15:39:22 | 371,475,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | def solve(grade):
if 2.00 <= grade <= 2.99:
return "Fail"
elif 3.00 <= grade <= 3.49:
return "Poor"
elif 3.50 <= grade <= 4.49:
return "Good"
elif 4.50 <= grade <= 5.49:
return "Very Good"
elif 5.50 <= grade <= 6.00:
return "Excellent"
grade_data = float(input())
print(solve(grade_data))
| [
"noreply@github.com"
] | IlkoAng.noreply@github.com |
9cc02e6a288eb047e372805fdff7b5c41409b6f1 | 7b71da9189de3358ef73b37a3083a56c1ab10772 | /robobench/calibration/pipette_calibration/classify.py | 3f968b169085ed7e6e911dc5b6a88230de32baef | [] | no_license | EndyLab/opentrons | 91ff3b8364c9b1746f7d073875651baa5efaf4c3 | 75d1789ad6ddef556a2c46e6608d5496f9f5ec7d | refs/heads/master | 2021-01-13T13:39:32.443502 | 2019-11-19T22:23:47 | 2019-11-19T22:23:47 | 76,410,678 | 8 | 0 | null | 2020-10-27T21:05:49 | 2016-12-14T00:42:02 | Roff | UTF-8 | Python | false | false | 1,890 | py | import cv2
import numpy as np
# classifies an array of imgs
def knn(img, k=5):
# load the data we generated previously
training_dir = "C:/Users/gohna/Documents/bioe reu/opentrons/robobench/calibration/pipette_calibration/training"
samples = np.loadtxt(training_dir+'/general-samples.data').astype(np.float32)
responses = np.loadtxt(training_dir+'/general-responses.data').astype(np.float32)
responses = responses.reshape((responses.size,1))
# train the KNN model
knn_model = cv2.ml.KNearest_create()
knn_model.train(samples,cv2.ml.ROW_SAMPLE,responses)
dists = []
img_scaled = cv2.resize(img, (10,25))
sample = img_scaled.reshape((1,250))
sample = np.float32(sample)
ret, results, neighbours, dist = knn_model.findNearest(sample, k)
identified = int(results[0][0])
dists.append(dist)
# print('distance:',neighbours)
return identified
if __name__ == '__main__':
training_dir = "C:/Users/gohna/Documents/bioe reu/opentrons/robobench/calibration/pipette_calibration/training"
# load the data we generated previously
samples = np.loadtxt(training_dir+'/general-samples.data').astype(np.float32)
responses = np.loadtxt(training_dir+'/general-responses.data').astype(np.float32)
responses = responses.reshape((responses.size,1))
# train the KNN model
print("sample size", samples.shape,"response size:",responses.size)
knn_model = cv2.ml.KNearest_create()
knn_model.train(samples,cv2.ml.ROW_SAMPLE,responses)
test = training_dir + '/9/DIGIT120207.jpg'
img = cv2.imread(test)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_scaled = cv2.resize(img, (10, 25))
print(img_scaled.shape)
sample = img_scaled.reshape((1,250))
sample = np.float32(sample)
print("img test size", sample.shape)
ret, results, neighbours, dist = knn_model.findNearest(sample, k=2)
matches = results==responses
string = str(int((results[0][0])))
# print(matches)
print(string) | [
"natagoh@stanford.edu"
] | natagoh@stanford.edu |
8652d45d2d668f0a709c1b4574844c3bdb0bca45 | e7e4943e67db51791de9f0dbd302a1e6bf6e7446 | /Prism_RayTracing.py | 6d3a97582d0740e14cc88a6a871a1655fb4c8a2d | [] | no_license | benchizhao/RayTracing-week1-4 | 7b8949cebd77db81323bbbb686a3a7c11f1eb392 | 2aa5bc00b5a497018d3e0e8fb37a967375c5e0d4 | refs/heads/master | 2022-11-08T00:42:07.001028 | 2020-06-29T03:59:00 | 2020-06-29T03:59:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,663 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 09:23:07 2020
This file needs three packages:numpy, matplotlib.pyplt, math.
This .py file simulate the behavior of the prism. The geometry of the prism is equilateral triangle.
To trace the ray, we use the state of ray to describe. To trace the ray while refracted by the
prism, the snell's law is used.
In the end we also plot the ray. This file did not take the full reflection into consideration,
if the incident angle is smaller than -2 degree, the outcoming ray will disappear.
@author: Benchi Zhao
"""
import numpy as np
import matplotlib.pyplot as plt
import math
class PrismTracing:
def __init__(self,x,z,theta):
'''
__init__ (self,x,z,theta)
Gives the initial state of the ray.
Parameters
------------
self.x: folat
Initial x-position of the ray.
self.z: float
Initial z-position of the ray.
self.theta: float
The angle between the horizontal and the ray path (in degree).
To aviod the bug, make sure the input value is greater than -2.
self.n_air: float
The refractive index of air.
self.n_glass: float
The refractive index of glass.
self.state: list
When ray interacting with the optical equipments, the ray state will change,
all states are recorded in self.state.
self.central_point: float
Position of central point of the prism.
self.side_length: float
Length of each side of the prism.
'''
self.x = x
self.z = z
self.theta = theta
self.n_air = 1.0
self.n_glass = 1.5
self.state = []
self.central_point = 0
self.side_length = 0
def ray(self):
'''
ray(self)
Append the initial ray state into the total ray state.
'''
ray_state = np.array([self.x,self.z,self.theta])
self.state.append(ray_state)
def prism(self,side_length,central_point):
'''
prism(self,side_length,central_point)
Simulate the behavior of prism.
Append the ray state into self.state after passing the prism.
Parameters
------------
side_length: float
Length of each side of the prism.
self.central_point: float
Position of central point of the prism.
'''
self.central_point = central_point
self.side_length = side_length
# The ray incident into the prism
incident_slope_1 = np.tan(np.deg2rad(self.state[-1][2]))
L = np.array([[-incident_slope_1,1],[-math.sqrt(3),1]])
R = np.array([self.state[-1][1]-incident_slope_1*self.state[-1][0],-math.sqrt(3)*self.central_point+self.side_length/math.sqrt(3)])
result = np.linalg.solve(L,R)
# Calculate the position of interacting point
incident_angle_1 = 30 + self.state[-1][2]
out_angle_1 = np.rad2deg(np.arcsin(self.n_air/self.n_glass * np.sin(np.deg2rad(incident_angle_1))))
ray_state = np.array([result[0],result[1],out_angle_1-30])
self.state.append(ray_state)
# The ray come out from the prism
incident_slope_2 = np.tan(np.deg2rad(self.state[-1][2]))
L = np.array([[-incident_slope_2,1],[math.sqrt(3),1]])
R = np.array([self.state[-1][1]-incident_slope_2*self.state[-1][0],math.sqrt(3)*self.central_point+self.side_length/math.sqrt(3)])
result = np.linalg.solve(L,R)
# Calculate the position of interacting point
incident_angle_2 = 60- out_angle_1
out_angle_2 = np.rad2deg(np.arcsin(self.n_glass/self.n_air * np.sin(np.deg2rad(incident_angle_2))))
ray_state = np.array([result[0],result[1],30-out_angle_2])
self.state.append(ray_state)
def plot_ray(self):
'''
plot_ray(self)
Plot the prism and the ray path which is described in self.state.
'''
# plot the prism
x1 = np.linspace(-self.side_length/2+self.central_point,0+self.central_point)
y1 = math.sqrt(3)*(x1-self.central_point)+ 2/math.sqrt(3)*self.side_length/2
x2 = np.linspace(0+self.central_point,self.side_length/2+self.central_point)
y2 = -math.sqrt(3)*(x2-self.central_point) + 2/math.sqrt(3)*self.side_length/2
x3 = np.linspace(-self.side_length/2+self.central_point,self.side_length/2+self.central_point)
y3 = [min(y1)]*len(x3)
plt.plot(x1,y1,'k')
plt.plot(x2,y2,'k')
plt.plot(x3,y3,'k')
# plot ray
for i in range(len(self.state)):
slope = np.tan(np.deg2rad(self.state[i][2]))
if i < len(self.state)-1:
x = np.linspace(self.state[i][0],self.state[i+1][0])
y = np.linspace(self.state[i][1],self.state[i+1][1],len(x))
plt.plot(x,y)
else:
x = np.linspace(self.state[i][0],self.state[i][0]+self.state[1][0])
y = slope*x+(self.state[i][1]-slope*self.state[i][0])
plt.plot(x,y)
plt.show()
if __name__=='__main__':
def main():
PT = PrismTracing(0,-1,10)
# Three parameters are x, z, angle
PT.ray()
PT.prism(4,6)
# Two parameters are side_length , central position
PT.plot_ray()
print(PT.state)
main() | [
"noreply@github.com"
] | benchizhao.noreply@github.com |
9ced689eedce285664f086c784d82698560785ff | 324ae3f56926da4169598897b4b9f06053d0a78d | /src/satisfy/nonogram.py | a0d242026fbebebb46a473f94765a8a6f23c6589 | [
"Apache-2.0"
] | permissive | simone-campagna/satisfy | 654152b410f81d0aaa672a8f3629aecce5924498 | b5327e937e32c5324c05f6288f59cfaac4a316dc | refs/heads/master | 2023-06-29T04:46:00.051479 | 2019-06-29T17:20:35 | 2019-06-29T17:20:35 | 157,007,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,321 | py | import collections
from .solver import ModelSolver, VarSelectionPolicy
__all__ = [
'NonogramSolver',
'pixmap_shape',
'pixmap_to_nonogram',
]
VarInfo = collections.namedtuple('VarInfo', 'size start_value end_value')
class NonogramSolver(ModelSolver):
def __init__(self, nonogram, **args):
if args.get('var_selection_policy', None) is None:
args['var_selection_policy'] = VarSelectionPolicy.MIN_BOUND
super().__init__(**args)
model = self._model
rows = nonogram['rows']
cols = nonogram['columns']
num_rows = len(rows)
num_cols = len(cols)
var_infos = {}
# add row vars and constraints:
row_vars = {r: [] for r in range(num_rows)}
for r, row in enumerate(rows):
cur_vars = row_vars[r]
if row:
start = 0
rem_size = sum(row) + len(row) - 1
for k, size in enumerate(row):
offset = size + int(k != len(row) - 1)
end = num_cols - rem_size + 1
domain = list(range(start, end))
var = model.add_int_variable(name='r{}_{}'.format(r, k), domain=domain)
var_infos[var.name] = VarInfo(size=size, start_value=start, end_value=end + size)
# model.add_constraint(var + size <= num_cols) # TODO diff SERVE???
start += offset
rem_size -= offset
if cur_vars:
prev_var = cur_vars[-1]
constraint = var > prev_var + var_infos[prev_var.name].size
model.add_constraint(constraint)
cur_vars.append(var)
# add col vars and constraints:
col_vars = {c: [] for c in range(num_cols)}
for c, col in enumerate(cols):
cur_vars = col_vars[c]
if col:
start = 0
rem_size = sum(col) + len(col) - 1
for k, size in enumerate(col):
offset = size + int(k != len(col) - 1)
end = num_rows - rem_size + 1
domain = list(range(start, end))
var = model.add_int_variable(name='c{}_{}'.format(c, k), domain=domain)
var_infos[var.name] = VarInfo(size=size, start_value=start, end_value=end + size)
# model.add_constraint(var + size <= num_rows) # TODO diff SERVE???
start += offset
rem_size -= offset
if cur_vars:
prev_var = cur_vars[-1]
constraint = var > prev_var + var_infos[prev_var.name].size
model.add_constraint(constraint)
cur_vars.append(var)
# add row<>col constraints:
for r in range(num_rows):
for c in range(num_cols):
r_expr_list = []
for var in row_vars[r]:
size = var_infos[var.name].size
var_info = var_infos[var.name]
if var_info.start_value <= c < var_info.end_value:
r_expr_list.append((var <= c) & (c < var + size))
# else:
# print("r: {}: discard {} ({})".format(var.name, c, var_info), model.get_var_domain(var))
c_expr_list = []
for var in col_vars[c]:
size = var_infos[var.name].size
var_info = var_infos[var.name]
if var_info.start_value <= r < var_info.end_value:
c_expr_list.append((var <= r) & (r < var + size))
# else:
# print("c: {}: discard {} ({})".format(var.name, r, var_info), model.get_var_domain(var))
if r_expr_list or c_expr_list:
if r_expr_list:
r_expr = sum(r_expr_list)
else:
r_expr = 0
if c_expr_list:
c_expr = sum(c_expr_list)
else:
c_expr = 0
constraint = (sum(r_expr_list) == sum(c_expr_list))
model.add_constraint(constraint)
# instance attributes:
self._var_infos = var_infos
self._shape = (num_rows, num_cols)
self._row_vars = row_vars
self._col_vars = col_vars
@property
def source(self):
return self._source
@property
def expr(self):
return self._expr
def __iter__(self):
model = self._model
solver = self._solver
num_rows, num_cols = self._shape
var_infos = self._var_infos
row_vars = self._row_vars
for solution in solver.solve(model):
pixmap = [[0 for _ in range(num_cols)] for _ in range(num_rows)]
for r, cur_vars in row_vars.items():
for var in cur_vars:
start = solution[var.name]
size = var_infos[var.name].size
for c in range(start, start + size):
pixmap[r][c] = 1
yield pixmap
def pixmap_shape(pixmap):
num_rows = len(pixmap)
if pixmap:
num_cols = max(len(row) for row in pixmap)
else:
num_cols = 0
return num_rows, num_cols
def pixmap_to_nonogram(pixmap):
num_rows, num_cols = pixmap_shape(pixmap)
rows = []
for r, pixmap_row in enumerate(pixmap):
row = []
count = 0
for c, cell in enumerate(pixmap_row):
if cell:
count += 1
else:
if count:
row.append(count)
count = 0
if count:
row.append(count)
rows.append(row)
cols = []
for c in range(num_cols):
col = []
count = 0
for r in range(num_rows):
cell = pixmap[r][c]
if cell:
count += 1
else:
if count:
col.append(count)
count = 0
if count:
col.append(count)
cols.append(col)
return {'rows': rows, 'columns': cols}
| [
"simone.campagna11@gmail.com"
] | simone.campagna11@gmail.com |
cee887c62ba400532290d58768227c8f8146d85e | e892207dd96f39ee2b9a99761b08381c442bef61 | /sources/simple_shop/adapters/message_bus/scheme.py | aa54e44be9cd5c4ff308c76d099b67dd11a68c6a | [
"MIT"
] | permissive | theevs/classic_demo_medium_difficulty | f9196795bd60acea3458c3b83d8a56c79ecdb9e7 | 335b50f9509c85dc426a76dbef7f7f2d28603e76 | refs/heads/main | 2023-08-31T21:32:51.001017 | 2021-10-26T14:38:58 | 2021-10-26T14:38:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from classic.messaging_kombu import BrokerScheme
from kombu import Exchange, Queue
broker_scheme = BrokerScheme(
Queue('PrintOrderPlaced', Exchange('OrderPlaced'), max_length=1)
)
| [
"variasov@gmail.com"
] | variasov@gmail.com |
1955dee98cd2d41ac68b3c467664e2dd537aaf21 | 354f5a285b0c6f14475aba45a18312995a337de6 | /core/admin.py | 7a082b38139b57888ec5f982338832dba5f04bf7 | [] | no_license | lucasdmarten/navedexAPI | 45063cf63a8b7d55cc23a01ada6451a8e726e6f7 | 712425268a2cbd8f041deac32ea3d25292ea30fe | refs/heads/master | 2023-03-23T11:27:19.833069 | 2021-03-15T22:15:41 | 2021-03-15T22:15:41 | 345,178,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,453 | py | from django import forms
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.core.exceptions import ValidationError
from core.models import MyUser, Naver, Projeto
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
username = forms.CharField(max_length=10)
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = MyUser
fields = ('username','email',)
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = MyUser
fields = ('username','email', 'password', 'is_active', 'is_admin')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class UserAdmin(BaseUserAdmin):
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('username', 'email', 'is_admin')
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': ('username','email', 'password')}),
('Permissions', {'fields': ('is_admin',)}),
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username','email', 'password1', 'password2'),
}),
)
search_fields = ('username','email',)
ordering = ('username','email',)
filter_horizontal = ()
class NaverAdmin(admin.ModelAdmin):
list_display = ('id','user','fullname','birthdate','admission_date','job_role','get_projects')
class ProjetoAdmin(admin.ModelAdmin):
list_display = ('id','name','get_users_id',)
# Now register the new UserAdmin...
admin.site.register(MyUser, UserAdmin)
admin.site.register(Naver, NaverAdmin)
admin.site.register(Projeto, ProjetoAdmin)
# ... and, since we're not using Django's built-in permissions,
# unregister the Group model from admin.
admin.site.unregister(Group) | [
"lucasdmarten@gmail.com"
] | lucasdmarten@gmail.com |
6f53e73452513ff22b1f2c3fd170aeff0d69f4c2 | 532dc9970b8808fe806aa6a02c0cf9bb7c742b3c | /model_form/forms.py | 1a207e3c15e90dcd1a694af226bcfe159e1a27f5 | [] | no_license | adkhn777/forms_example | 3e604aaa3390d49d2b76c83c07ec5d1d837e56ed | 56b78b770bdddbb984585c08f2d9c495345622b1 | refs/heads/master | 2021-01-24T11:22:11.893363 | 2016-10-07T07:06:47 | 2016-10-07T07:06:47 | 70,223,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from django import forms
from .models import ModelFormModel
class ModelFormForm(forms.ModelForm):
class Meta:
model = ModelFormModel
fields = ["name", "bio"]
| [
"adnan@tuple.tech"
] | adnan@tuple.tech |
008b54483f4cb8901c5fb12ee52cc4dfed87daa6 | 8900db5096dcf2f8a758f6792cc7c8f2bc1d7785 | /src/dataset.py | b23694ff85df42853de900a29099a30f0b2c3931 | [] | no_license | PeterZhouSZ/seg2vid | ca5d649a19c47b769fc243ef5da4f95e965c42a9 | 306ca254ac30620afe48767d02b9cf3fecba1194 | refs/heads/master | 2020-06-01T19:50:22.737812 | 2019-03-26T11:37:58 | 2019-03-26T11:37:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,811 | py | from datasets.dataset_path import *
def get_training_set(opt):
assert opt.datset in ['cityscapes', 'cityscapes_two_path', 'kth']
if opt.dataset == 'cityscapes':
from datasets.cityscapes_dataset_w_mask import Cityscapes
train_Dataset = Cityscapes(datapath=CITYSCAPES_TRAIN_DATA_PATH, datalist=CITYSCAPES_TRAIN_DATA_LIST,
size=opt.input_size, split='train', split_num=1, num_frames=opt.num_frames)
elif opt.dataset == 'cityscapes_two_path':
from datasets.cityscapes_dataset_w_mask_two_path import Cityscpes
train_Dataset = Cityscapes(datapath=CITYSCAPES_TRAIN_DATA_PATH,
mask_data_path=CITYSCAPES_TRAIN_DATA_SEGMASK_PATH,
datalist=CITYSCAPES_TRAIN_DATA_LIST,
size=opt.input_size, split='train', split_num=1, num_frames=opt.num_frames,
mask_suffix='ssmask.png')
elif opt.dataset == 'kth':
from datasets.kth_dataset import KTH
train_Dataset = KTH(dataset_root=KTH_DATA_PATH,
datalist=KTH_DATA_PATH_LIST,
size=opt.input_size, num_frames=opt.num_frames)
return train_Dataset
def get_test_set(opt):
assert opt.dataset in ['cityscapes', 'cityscapes_two_path', 'kth', 'ucf101', 'KITTI']
if opt.dataset == 'cityscapes':
from datasets.cityscapes_dataset_w_mask import Cityscapes
test_Dataset = Cityscapes(datapath=CITYSCAPES_VAL_DATA_PATH, mask_data_path=CITYSCAPES_VAL_DATA_SEGMASK_PATH,
datalist=CITYSCAPES_VAL_DATA_LIST,
size=opt.input_size, split='train', split_num=1, num_frames=opt.num_frames,
mask_suffix='ssmask.png', returnpath=True)
elif opt.dataset == 'cityscapes_two_path':
from datasets.cityscapes_dataset_w_mask_two_path import Cityscapes
test_Dataset = Cityscapes(datapath=CITYSCAPES_VAL_DATA_PATH, mask_data_path=CITYSCAPES_VAL_DATA_SEGMASK_PATH,
datalist=CITYSCAPES_VAL_DATA_LIST,
size=opt.input_size, split='train', split_num=1, num_frames=opt.num_frames,
mask_suffix='ssmask.png', returnpath=True)
elif opt.dataset == 'cityscapes_pix2pixHD':
from cityscapes_dataloader_w_mask_pix2pixHD import Cityscapes
test_Dataset = Cityscapes(datapath=CITYSCAPES_TEST_DATA_PATH,
mask_data_path=CITYSCAPES_VAL_DATA_SEGMASK_PATH,
datalist=CITYSCAPES_VAL_DATA_MASK_LIST,
size= opt.input_size, split='test', split_num=1,
num_frames=opt.num_frames, mask_suffix='ssmask.png', returnpath=True)
elif opt.dataset == 'kth':
from datasets.kth_dataset import KTH
test_Dataset = KTH(dataset_root=KTH_DATA_PATH,
datalist='./file_list/kth_test_%s_16_ok.txt' % opt.category,
size=opt.input_size, num_frames=opt.num_frames)
elif opt.dataset == 'KITTI':
from datasets.kitti_dataset import KITTI
kitti_dataset_list = os.listdir(KITTI_DATA_PATH)
test_Dataset = KITTI(datapath=KITTI_DATA_PATH, datalist=kitti_dataset_list, size=opt.input_size,
returnpath=True)
elif opt.dataset == 'ucf101':
from datasets.ucf101_dataset import UCF101
test_Dataset = UCF101(datapath=os.path.join(UCF_101_DATA_PATH, category),
datalist=os.path.join(UCF_101_DATA_PATH, 'list/test%s.txt' % (opt.category.lower())), returnpath=True)
return test_Dataset | [
"panjunting@sensetime.com"
] | panjunting@sensetime.com |
cb5a0612aa59838260c5412af9a13b580b3b489b | 28114f0389cc27d34c3a12e241cc4aac73952b3c | /SensorData/SensorData/urls.py | c9ea4cd0562eb2141ab41382d17844dc50c4a54a | [] | no_license | sherlockvind/swlab2_assignment5 | 57e1feebc377be4dacaf555ac1db1a28798a7fa5 | 5c8da346e7bbdbcd4942af4bb88fa68d113b861b | refs/heads/main | 2023-04-06T15:58:39.086948 | 2021-04-21T07:17:57 | 2021-04-21T07:17:57 | 353,388,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | """SensorData URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('main.urls', namespace='main')),
]
| [
"vb.3596@gmail.com"
] | vb.3596@gmail.com |
5314e273b1fab0e24e981f1fa735e30171ba22a2 | a76ea88df1f28bab17fae40f1c0b4a4471e3da05 | /wanderbot-ws/src/teleopbot/src/keys_to_twist.py | 6fd8b0342f416ee377d5cad97ce7997915336bc4 | [] | no_license | oemergenc/ros-wander-bot | 0182b902851f6e896654c72d08e7c86f2bd72311 | d78d2a6595882f2f05c468c84a4d102cca8a699b | refs/heads/master | 2020-04-14T02:03:03.829173 | 2019-01-07T10:05:29 | 2019-01-07T10:05:29 | 163,575,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | #!/usr/bin/env python
# BEGIN ALL
import rospy
from std_msgs.msg import String
from geometry_msgs.msg import Twist
# BEGIN KEYMAP
key_mapping = {'w': [0, 1], 'x': [0, -1],
'a': [-1, 0], 'd': [1, 0],
's': [0, 0]}
# END KEYMAP
def keys_cb(msg, twist_pub):
# BEGIN CB
if len(msg.data) == 0 or not key_mapping.has_key(msg.data[0]):
return # unknown key.
vels = key_mapping[msg.data[0]]
# END CB
t = Twist()
t.angular.z = vels[0]
t.linear.x = vels[1]
twist_pub.publish(t)
if __name__ == '__main__':
rospy.init_node('keys_to_twist')
twist_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
rospy.Subscriber('keys', String, keys_cb, twist_pub)
rospy.spin()
# END ALL
| [
"oemergenc@gmail.com"
] | oemergenc@gmail.com |
dafc1d3715006b5918f20ffe70c8e774c8a86cf5 | 1fb9a17e9273a627b090b1f332fab15f14d5baca | /Clustering_Twitter_Data.py | e0a27cd69ed88ca34b7c77cfda65f70fe673eb8e | [] | no_license | Akhilavk1106/Clustering-Twitter-Health-Data | 1803481d3f52d7167909c15404894e4a73cd2fd1 | 8dc2f3a30d68ac287680ecc4318ea2c621b12933 | refs/heads/master | 2020-03-29T07:34:13.632976 | 2018-09-21T00:52:54 | 2018-09-21T00:52:54 | 149,670,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py |
import re
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import numpy as np
from sklearn import cluster, datasets
import pandas as pd
import sys
original_labels=np.empty(16,dtype='int64')
tweet_num=0
tweets=[]
listfile = []
for file_name in sys.argv[1:]:
list=[]
with open(file_name,encoding='ISO-8859-1') as file:
list_special=['rt','video','amp','may']
for row in file.readlines():
content=row.split('|')
c=content[-1].split(' http')
c[0]=c[0].lower()
remove_pun = re.sub("[\s+\.\-\!\;\:\/_,$%^(+\"\']+|[+——!,。?、~@#¥%…&…*()]+", " ", c[0])
list.append(remove_pun)
tweets.extend(list)
original_labels[tweet_num]=len(list)
tweet_num +=1
print(original_labels)
true_cluster2 = np.empty((len(tweets), 1), dtype='int64')
begin = 0
for i in range(len(original_labels)):
end = begin + original_labels[i]
true_cluster2[begin:end, 0] = i
begin = end
vectorizer = TfidfVectorizer(stop_words='english',max_features=5000)
X = vectorizer.fit_transform(tweets)
array_trans=X.toarray()
pca2=PCA(n_components=2)
newMat = pca2.fit_transform(array_trans)
kmeans = KMeans(n_clusters=16,random_state=0).fit(newMat)
labels = kmeans.labels_
X_clustered = kmeans.fit_predict(newMat)
ind=0
print(true_cluster2)
for la in labels:
print('OriginalLabel:',true_cluster2[ind],'ClusterLabel',la)
ind+=1
#Define our own color map
LABEL_COLOR_MAP = {0: 'b', 1: 'c', 2: 'k',3:'m', 4: 'green', 5: 'r',6:'w', 7: 'y', 8: 'ivory',9:'navy', 10: 'orange', 11: 'purple',12:'olive', 13: 'gray', 14: 'maroon',15:'pink', 16: 'tan'}
label_color = [LABEL_COLOR_MAP[l] for l in X_clustered]
# Plot the scatter digram
plt.figure(figsize = (25,25))
plt.scatter(newMat[:,0],newMat[:,1], c= label_color, alpha=0.5)
plt.show()
| [
"noreply@github.com"
] | Akhilavk1106.noreply@github.com |
41203f92213b29c8d6459485d713fd321114f4fd | 6f33999bb1cc273388bf4d7dfa550bdf428cdf04 | /myapp/migrations/0001_initial.py | f97924a41e670ebe398b72ef59bf9e701d396ab3 | [] | no_license | xiezongzheng/test9_29 | 863fa5b85b65b2200b070800c576f41da11d4653 | afb8c567f29f60a9e9d28693ceb1bfada967e44a | refs/heads/master | 2021-01-10T01:19:05.587110 | 2015-11-01T04:17:44 | 2015-11-01T04:17:44 | 44,243,815 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,063 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bactname',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('num', models.TextField(db_column='NUM', blank=True)),
('genus', models.CharField(max_length=50, db_column='GENUS', blank=True)),
('species', models.CharField(max_length=50, db_column='SPECIES', blank=True)),
('subspecies', models.CharField(max_length=50, db_column='SUBSPECIES', blank=True)),
('reference', models.CharField(max_length=50, db_column='REFERENCE', blank=True)),
('status', models.CharField(max_length=50, db_column='STATUS', blank=True)),
('authors', models.CharField(max_length=50, db_column='AUTHORS', blank=True)),
('remarks', models.CharField(max_length=50, db_column='REMARKS', blank=True)),
('risk_grp', models.CharField(max_length=50, db_column='RISK_GRP', blank=True)),
('type_strains', models.CharField(max_length=50, db_column='TYPE_STRAINS', blank=True)),
('taxonid', models.CharField(max_length=50, db_column='taxonId', blank=True)),
('ncbitaxonid', models.CharField(max_length=50, db_column='ncbiTaxonId', blank=True)),
('mclid', models.CharField(max_length=50, db_column='mclId', blank=True)),
('sequence', models.TextField(db_column='SEQUENCE', blank=True)),
],
options={
'db_table': 'bactname',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TaxonMapping',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('speciesname', models.CharField(max_length=100, db_column='speciesName', blank=True)),
('taxonid', models.CharField(max_length=50, db_column='taxonId', blank=True)),
('ncbitaxonid', models.CharField(max_length=50, db_column='ncbiTaxonId', blank=True)),
('mclid', models.CharField(max_length=50, db_column='mclId', blank=True)),
],
options={
'db_table': 'taxon_mapping',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='User',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('username', models.CharField(max_length=100, blank=True)),
('password', models.CharField(max_length=100, blank=True)),
],
options={
'db_table': 'user',
'managed': False,
},
bases=(models.Model,),
),
]
| [
"421489422@qq.com"
] | 421489422@qq.com |
4bfe6ebbc3532652449f4621355b38f922dd4b06 | 977eb763cdf049d6cd58b3055bd353e2d93afbed | /readfinstar.py | ae730e393490e5c4d0bf1c6bf0f47bf9db7fea71 | [] | no_license | syadav8126/toofanTicker | 7a64a3af9b1e73d20ab8b3d6af18c7e99c7a8a19 | e731748e54c780d3964a0e8595d0f08e46d1d938 | refs/heads/main | 2023-02-20T07:36:15.848279 | 2021-01-26T06:44:01 | 2021-01-26T06:44:01 | 332,995,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | import finstar
import csv
import sys
import subprocess
from subprocess import Popen
import time
input_file='standalone.csv'
with open(input_file, 'r') as f:
data = csv.reader(f)
for row in data:
cmd=[sys.executable, './finstar.py',row[0]]
Popen(cmd,shell=False,stdin=None,stdout=None,stderr=None,close_fds=True)
time.sleep(0.18)
#subprocess.call([sys.executable, './finstar.py',row[0]])
| [
"syadav8126@gmail.com"
] | syadav8126@gmail.com |
971bdaf13fb6d5bfbbcd7260417062a0c83db880 | f84ecb8178baaad91914ca20212a67d22fdce293 | /account/account/settings.py | 229e6511004eb60dc7308bd414a52f6cd2a9a762 | [] | no_license | TonyMistark/account_statement | 33047972fcf2854a973e35a8aea56ec0e051b2a1 | aeb66f1ca687e3afe4f0c308889929019209ec4e | refs/heads/master | 2021-01-22T03:49:13.483873 | 2017-02-09T16:22:14 | 2017-02-09T16:22:14 | 81,460,715 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,791 | py | """
Django settings for account project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8fb7nwrenc3tn4j1gcb@%ztui@2gti!*jpdeobe2ip&u36^q3+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"rest_framework",
"account",
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAdminUser',
],
'PAGE_SIZE': 10
}
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'account.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'account.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': "account", # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': 'root', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3306', # Set to empty string for default. Not used with sqlite3.
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"tony_mistark@163.com"
] | tony_mistark@163.com |
5dc6bd71fa378f65e395229b4201d11a93f1a69b | e55c20745958762f899d79e9fad8fedee0cc2a53 | /apps/courses/migrations/0009_course_teacher.py | bb9c3c92de0ba0507b54c908fbc1b9af6b5a71f5 | [] | no_license | lisi2016/MxOnline | 1b4703fbac6c88c66f0e7b3b5fbcfb1f1ab077ee | 0aeca26244b8c446fea096dcdbefcbaee2835dc1 | refs/heads/master | 2021-08-19T01:25:34.808049 | 2017-11-24T10:14:06 | 2017-11-24T10:14:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-11-17 15:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organization', '0006_teacher_image'),
('courses', '0008_video_learn_times'),
]
operations = [
migrations.AddField(
model_name='course',
name='teacher',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organization.Teacher', verbose_name='\u6388\u8bfe\u8bb2\u5e08'),
),
]
| [
"caozhiqiango@foxmail.com"
] | caozhiqiango@foxmail.com |
6168b0398ebb09f7c55ed863aca797354077e778 | c0b4a1991ad529f162779e60d0af3e22f2468aaa | /cogs/members.py | fa99048e26540f997e676784f27765288f0b4420 | [] | no_license | scosco97/apibot | 51ae66317a4abfe7bb9380b23679ad476800ad1e | 3f714c2daa6a2cd046d417bc0e74e2501ed55959 | refs/heads/master | 2023-07-29T09:05:36.770121 | 2021-09-11T20:19:35 | 2021-09-11T20:19:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,015 | py | import discord
import random
from config import settings
from discord.ext import commands
class MembersCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="welcome", hidden=True)
async def welcome(self, ctx, member: discord.Member = None):
if not member:
return await ctx.send("Member does not exist.")
channel = self.bot.get_channel(settings['channels']['welcome'])
msg = (f"Welcome to the Clash API Developers server, {member.mention}! We're glad to have you!\n"
f"First, please let us know what your preferred programming language is. "
f"Next, if you've already started working with the API, please tell us a little about your project. "
f"If you haven't started a project yet, let us know what you're interested in making.")
await channel.send(msg)
mod_log = self.bot.get_channel(settings['channels']['mod-log'])
msg = f"{member.display_name}#{member.discriminator} just joined the server."
await mod_log.send(f"{msg} (This message generated by the `//welcome` command initiated by "
f"{ctx.author.display_name}.")
@commands.Cog.listener()
async def on_member_join(self, member):
"""Discord listener which is called when a user joins the Discord server."""
if member.guild.id != 566451504332931073:
# only act if they are joining API server
return
if not member.bot:
channel = self.bot.get_channel(settings['channels']['welcome'])
msg = (f"Welcome to the Clash API Developers server, {member.mention}! We're glad to have you!\n"
f"First, please let us know what your preferred programming language is. "
f"Next, if you've already started working with the API, please tell us a little about your project. "
f"If you haven't started a project yet, let us know what you're interested in making.")
await channel.send(msg)
else:
channel = self.bot.get_channel(settings['channels']['admin'])
await channel.send(f"{member.mention} has just been invited to the server. "
f"Perhaps it is time to set up a demo channel? Try `//setup {member.mention} @owner`")
mod_log = self.bot.get_channel(settings['channels']['mod-log'])
msg = f"{member.display_name}#{member.discriminator} just joined the server."
await mod_log.send(msg)
@commands.Cog.listener()
async def on_member_update(self, old_member, new_member):
"""Discord listener to announce new member with Developer role to #general"""
if new_member.guild.id != 566451504332931073:
# only act if this is the API server
return
if old_member.roles == new_member.roles:
return
developer_role = new_member.guild.get_role(settings['roles']['developer'])
if developer_role not in old_member.roles and developer_role in new_member.roles:
if new_member.bot:
channel = self.bot.get_channel(settings['channels']['admin'])
await channel.send(f"Who is the bonehead that assigned the Developer role to a bot? "
f"{new_member.name} is a bot.")
# At this point, it should be a member on our server that has just received the developers role
self.bot.logger.info(f"New member with Developers role: {new_member.display_name}")
sql = "SELECT role_id, role_name, emoji_repr FROM bot_language_board"
fetch = await self.bot.pool.fetch(sql)
language_roles = [[row['role_id'], row['role_name'], row['emoji_repr']] for row in fetch]
member_languages = ""
member_role_emoji = []
for language_role in language_roles:
for role in new_member.roles:
if language_role[0] == role.id:
member_languages += f"{language_role[1]}\n"
member_role_emoji.append(language_role[2])
channel = new_member.guild.get_channel(settings['channels']['general'])
embed = discord.Embed(color=discord.Color.blue(),
description=f"Please welcome {new_member.display_name} to the Clash API Developers "
f"server.")
embed.set_thumbnail(url=new_member.avatar_url_as(size=128))
if member_languages:
embed.add_field(name="Languages:", value=member_languages)
msg = await channel.send(embed=embed)
if member_role_emoji:
for emoji in member_role_emoji:
await msg.add_reaction(emoji)
@commands.Cog.listener()
async def on_member_remove(self, member):
"""Discord listener which is called when a user leaves the Discord server."""
if member.guild.id != 566451504332931073:
# only act if they are joining API server
return
# Build random list of messages
msg_options = [" just left the server. Buh Bye!",
" just left our Discord. I wonder if we will miss them.",
" just left. What's up with that?",
" went bye-bye. Who will fill the void?",
" has left us. A short moment of silence.",
" has departed. Hope they learned everything they need!",
]
channel = self.bot.get_channel(settings['channels']['general'])
msg = member.display_name + random.choice(msg_options)
await channel.send(msg)
mod_log = self.bot.get_channel(settings['channels']['mod-log'])
msg = f"{member.display_name}#{member.discriminator} just left the server."
await mod_log.send(msg)
def setup(bot):
bot.add_cog(MembersCog(bot))
| [
"wpmjones@gmail.com"
] | wpmjones@gmail.com |
eaeef1d5a47d3ff5621d988c694458cf63dc39a6 | ceab178d446c4ab55951c3d65d99815e9fdee43a | /archive/coding_practice/python/ticks_plot.py | 83e7d35370f009514aa95366b78a92f4f61f0afa | [] | no_license | DeneBowdalo/AtmosChem_Tools | 01ecedb0df5c5d6e01966a0c3d8055826f5ac447 | 220c2f697a4f4c1e5443c336ede923b2004fe9f5 | refs/heads/master | 2021-01-10T18:05:30.800218 | 2017-02-06T16:08:14 | 2017-02-06T16:08:14 | 43,529,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | import matplotlib.pyplot as plt
x = [5,3,7,2,4,1,11,25,33]
plt.plot(x)
plt.xticks(range(len(x)), ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']);
plt.yticks(range(1,36,2));
plt.show()
| [
"db876@earth0.york.ac.uk"
] | db876@earth0.york.ac.uk |
def39a55d547e1131e0f8dcf639f5da81e09bb90 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /cGaTqHsPfR5H6YBuj_0.py | c3936bfae1158025ccd064458e0c9c17ee2d0b5e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | """
Given a list of ingredients `i` and a flavour `f` as input, create a function
that returns the list, but with the elements `bread` around the selected
ingredient.
### Examples
make_sandwich(["tuna", "ham", "tomato"], "ham") ➞ ["tuna", "bread", "ham", "bread", "tomato"]
make_sandwich(["cheese", "lettuce"], "cheese") ➞ ["bread", "cheese", "bread", "lettuce"]
make_sandwich(["ham", "ham"], "ham") ➞ ["bread", "ham", "bread", "bread", "ham", "bread"]
### Notes
* You will always get valid inputs.
* Make two separate sandwiches if two of the same elements are next to each other (see example #3).
"""
def make_sandwich(ingredients, flavour):
sandwich = []
for i in ingredients:
sandwich += ['bread', i, 'bread'] if i == flavour else [i]
return sandwich
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
7637d837e8cb0ba7f81221d92b23e7c92de9f971 | f925eae9b78d44f9aa56cff17ef07aab87346a18 | /stats/plot_all_roles.py | 7e0f598a1128f40a4935de1b80120f39d0da950a | [
"MIT"
] | permissive | wejradford/castminer | f05d965e514c236657142c4db15a5c42db5160d1 | 6b792ba59621e7d0925d4ed683a51946c5193f3c | refs/heads/master | 2020-12-24T08:55:13.205547 | 2016-09-06T12:46:15 | 2016-09-06T12:46:15 | 31,730,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | #!/usr/bin/env python
from __future__ import print_function
import logging
import os
from utils import argparser_factory, db_factory, plot_role_counts, \
get_all_role_counts, FIGS
log = logging.getLogger()
p = argparser_factory()
p.add_argument('-w', '--window', default=5, type=int)
args = p.parse_args()
db = db_factory(args.db)
c = db.cursor()
counts, total = get_all_role_counts(c)
log.info('Collected {} data points'.format(total))
fname = os.path.join(FIGS, 'counts.rm-{}.pdf'.format(args.window))
plot_role_counts(counts, 'counts', fname,
window=args.window,
height=args.height, width=args.width,
font_size=args.font_size)
| [
"wejradford@gmail.com"
] | wejradford@gmail.com |
d0b7766854f6f8576dbddc9fb4645f233cca2c41 | 2f3999daf2a359f6677835718958ca6c6e0e4a6a | /example1.py | 7a6e8c0a528cd327d451c3a3f6724d5cb11f6fac | [] | no_license | Sangeetha-Naresh/class97 | d9402203a5804ecd24d51e4eb6eff2cb8b4802ec | 6d36c52368bcc9dd47bf011c48768b5358b4e5c5 | refs/heads/main | 2023-05-06T22:37:51.255175 | 2021-05-16T14:59:01 | 2021-05-16T14:59:01 | 367,911,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | age= int(input("enter your age:"))
if age>18:
print("you are an adult")
elif age >12:
print("teenager")
else:
print("kiddo") | [
"noreply@github.com"
] | Sangeetha-Naresh.noreply@github.com |
efe1f8c522d049fcbb235a250a4ab33ac544503a | 47299b9cca902b847371fa78eacbdaae0bae25f4 | /webclone/one/urls.py | d67a6f7c91c658bc0feb139b85ba6826aba95130 | [] | no_license | kapoorarpit/web_clone- | 5c293fc2c10568562fd3c678e2fc8e43bc436b93 | 3540255fd6336583a9409c51deff0eae92810ee8 | refs/heads/master | 2023-06-11T04:51:07.138328 | 2021-06-29T19:50:52 | 2021-06-29T19:50:52 | 321,369,324 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | from django.contrib import admin
from django.urls import path
from .import views
urlpatterns = [
path('', views.home, name='home'),
path('home/search/', views.search, name='search'),
]
| [
"kapoorarpit2000@gmail.com"
] | kapoorarpit2000@gmail.com |
b01ea9b981eaf809aed4db02cdf99add3ef4992e | a4753147801dbabfec45f6f9f47572cda77efb81 | /debugging-constructs/ibmfl/util/data_handlers/mnist_pytorch_data_handler.py | 29cc18afb938e575e71025d9007fd67f722221b9 | [
"MIT"
] | permissive | SEED-VT/FedDebug | e1ec1f798dab603bd208b286c4c094614bb8c71d | 64ffa2ee2e906b1bd6b3dd6aabcf6fc3de862608 | refs/heads/main | 2023-05-23T09:40:51.881998 | 2023-02-13T21:52:25 | 2023-02-13T21:52:25 | 584,879,212 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,460 | py | """
Licensed Materials - Property of IBM
Restricted Materials of IBM
20221069
© Copyright IBM Corp. 2022 All Rights Reserved.
"""
import logging
import numpy as np
from ibmfl.data.data_handler import DataHandler
from ibmfl.util.datasets import load_mnist
logger = logging.getLogger(__name__)
class MnistPytorchDataHandler(DataHandler):
def __init__(self, data_config=None):
super().__init__()
self.file_name = None
if data_config is not None:
if 'npz_file' in data_config:
self.file_name = data_config['npz_file']
# load the datasets
(self.x_train, self.y_train), (self.x_test, self.y_test) = self.load_dataset()
# pre-process the datasets
self.preprocess()
def get_data(self):
"""
Gets pre-process mnist training and testing data.
:return: training data
:rtype: `tuple`
"""
return (self.x_train, self.y_train), (self.x_test, self.y_test)
def load_dataset(self, nb_points=500):
"""
Loads the training and testing datasets from a given local path.
If no local path is provided, it will download the original MNIST \
dataset online, and reduce the dataset size to contain \
500 data points per training and testing dataset.
Because this method
is for testing it takes as input the number of datapoints, nb_points,
to be included in the training and testing set.
:param nb_points: Number of data points to be included in each set if
no local dataset is provided.
:type nb_points: `int`
:return: training and testing datasets
:rtype: `tuple`
"""
if self.file_name is None:
(x_train, y_train), (x_test, y_test) = load_mnist()
x_train = x_train[:nb_points]
y_train = y_train[:nb_points]
x_test = x_test[:nb_points]
y_test = y_test[:nb_points]
else:
try:
logger.info('Loaded training data from ' + str(self.file_name))
data_train = np.load(self.file_name)
x_train = data_train['x_train']
y_train = data_train['y_train']
x_test = data_train['x_test']
y_test = data_train['y_test']
except Exception:
raise IOError('Unable to load training data from path '
'provided in config file: ' +
self.file_name)
return (x_train, y_train), (x_test, y_test)
def preprocess(self):
"""
Preprocesses the training and testing dataset, \
e.g., reshape the images according to self.channels_first; \
convert the labels to binary class matrices.
:return: None
"""
img_rows, img_cols = 28, 28
self.x_train = self.x_train.astype('float32').reshape(self.x_train.shape[0], 1, img_rows, img_cols)
self.x_test = self.x_test.astype('float32').reshape(self.x_test.shape[0], 1,img_rows, img_cols)
# print(self.x_train.shape[0], 'train samples')
# print(self.x_test.shape[0], 'test samples')
self.y_train = self.y_train.astype('int64')
self.y_test = self.y_test.astype('int64')
# print('y_train shape:', self.y_train.shape)
# print(self.y_train.shape[0], 'train samples')
# print(self.y_test.shape[0], 'test samples')
| [
"waris@vt.edu"
] | waris@vt.edu |
911507e11b9253bb23a002ed90852dba054ea2f8 | 9e22cd10e0e89872146b2ced45a8fcff29ae30d2 | /module_integration/raspberrypi/manage.py | 50d7c99e7444d50ab732838ce32b87b4252cd5e2 | [] | no_license | Supriya-Suresh/eYSIP-2017_Vegetable-Identification-Using-Transfer-Learning | ca19833e5a2208252bfcf33515fd7ea0a3480c6d | 8c570408b4394789840660fa9123caea8e634f6c | refs/heads/master | 2022-11-23T22:59:40.466739 | 2017-07-08T06:20:49 | 2017-07-08T06:20:49 | 107,849,691 | 0 | 0 | null | 2022-11-22T01:06:25 | 2017-10-22T08:34:22 | Python | UTF-8 | Python | false | false | 10,977 | py | import loadcell as lc #import load cell library
import RPi.GPIO as GPIO
import lcd #import lcd library
import kpad #import keypad library
import time
import os
import math
import datetime
import sys
import json
#address constant for lines in lcd display
LINE_1 = 0x80
LINE_2 = 0xC0
LINE_3 = 0x94
LINE_4 = 0xD4
baseValue = 0 #variable to store the base value of load cell
taredWeight = 0 #variable to store tared weight
imgName = "" #variable to store image name
measuredWeight = 0.0 #variable to store calculated weight
DOUT = 22 #constant stores gpio pin used by dout pin of hx711. It will be used to check if hx711 is ready to send data or not
troughID = "" #variable to sotre trough ID
locationID = "" #variable to sotre location ID
cropID = "" #variable to store crop ID
cropName = "" #variable to store crop name
locationName = "" #variable to store location name
#Flag variables
troughIDExcepted = 0 #to check if trough id is accepted or not
locationIDExcepted = 0 #to check if location id is accepted or not
cropIDExcepted = 0 #to check if crop id is accepted or not
pictureTaken = 0 #to check if picture is taken or not
active = 1 #to check if program is active or not
#initialize lcd
lcd.lcd_init()
'''
*
* Function Name: calculateWeight
* Input: none
* Output: returns the calculated weight from the load cell value
* Logic: 1) take the reading from load cell
* 2) take the difference between current value and base value
* 3) divide the difference with diference got with known weight
* 4) finally multiply the division answer with known weight value to get the weight
* Example Call: calculateWeight()
*
'''
def caculateWeight():
global taredWeight
global measuredWeight
global baseValue
val = lc.read_cell_value() #read load cell value
weight = ((baseValue - val) / 49000.0) * 230.0 #convert them into weight
weight = weight - taredWeight #remove tared weight from calculated weight
if weight < 0: #if weight becomes negative then set it back to zero
weight = 0
weight = int(weight)
measuredWeight = weight #store weight into measuredWeight variable
return measuredWeight #return the calculated weight
'''
*
* Function Name: displayWeight
* Input: none
* Output: none
* Logic: it displays weight on the lcd screen by using calculateWeight function
* Example Call: displayWeight()
*
'''
def displayWeight() :
global measuredWeight
lcd.string("Object weight is:", LINE_3)
weight = caculateWeight() #get calculated weight from the calculateWeight function
lcd.string(str(weight) + " grams", LINE_4) #display the weight on the lcd
if measuredWeight < 10:
lcd.string("Place your object on", LINE_1)
lcd.string("the platform", LINE_2)
else:
lcd.string("Press * button to", LINE_1)
lcd.string("continue.", LINE_2)
'''
*
* Function Name: tare
* Input: none
* Output: none
* Logic: takes the current weight of the object and stores it in variable then it will be subtracted form current weight value
* Example Call: tare()
*
'''
def tare():
global baseValue
global taredWeight
lcd.clear()
lcd.string("Taring weight...", LINE_1)
lcval = lc.read_average_value(10)
diff = math.fabs(baseValue- lcval)
taredWeight = (diff / 49000.0) * 230.0 #store the calculated weight in variable
'''
*
* Function Name: takePicture
* Input: none
* Output: none
* Logic: takes picture using USB camera using fscam program
* Example Call: takePicture()
*
'''
def takePicture():
global imgName
global pictureTaken
lcd.string("Taking picture...", LINE_2)
if os.path.exists('/dev/video0'):
#create image file name with current date
imgName = "image-" + datetime.datetime.now().isoformat() + ".jpg"
imgName = "/home/pi/ghfarm/images/%s" %imgName
#capture image and save in images directory. if image file does not exists in folder then retake the image
while os.path.isfile(imgName) == False:
os.system("fswebcam -r 640x480 -S 10 --no-banner /home/pi/ghfarm/images/%s" %imgName)
pictureTaken = 1 #if picture is successfully taken then set pictureTaken flag to 1
else: #if camera is not attached display error message
lcd.clear()
lcd.string(" FAILED", LINE_1)
lcd.string("No camera attached", LINE_2)
time.sleep(2)
######################################################################################################################
pictureTaken = 1
'''
*
* Function Name: storeData
* Input: none
* Output: none
* Logic: stores the data into local database
* Example Call: storeData()
*
'''
def storeData():
global imgName
lcd.string("Storing data...", LINE_3)
f = open('/home/pi/ghfarm/details.txt','a')
time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
crops = {'weight':measuredWeight,'crop_id':cropID,'time': time, 'imagepath':imgName, 'troughid':troughID}
crop_details = json.dumps(crops)
f.write(crop_details +'\n')
'''
*
* Function Name: fetchCropInfo
* Input: none
* Output: none
* Logic: fetches the crop name and id from local database
* Example Call: fetchCropInfo()
*
'''
def fetchCropInfo():
global cropName
global cropIDExcepted
cropName = "Spinach"
cropIDExcepted = 1
# "retrieves crop info through cropid info"
# global cropID
# global cropName
# global cropIDExcepted
# try:
# lcd.clear()
# lcd.string("Fetching crop info...", LINE_1)
# #create instance of a database with host, username, password and database name
# db = sqldb.connect("localhost", "root", "firebird", "maindb")
# #create cursor object
# cursor = db.cursor()
# cid = int(cropID) #convert accepted crop id into integer
# sql = "SELECT name FROM cropinfo WHERE id = %d" % (cid)
# cursor.execute(sql)
# data = cursor.fetchone()
# #if there some crop exists with this id
# if data > 0:
# cropName = data[0] #then assigh cropname to variable
# cropIDExcepted = 1 #set cropIDExcepted flag to one
# #lcd.clear()
# #lcd.string("Successfully fetched", LINE_1)
# #lcd.string("crop information", LINE_2)
# #time.sleep(0.5)
# else: #if no crop exists with entered ID
# lcd.clear()
# lcd.string("Crop ID does not", LINE_1)
# lcd.string("exists!", LINE_2)
# lcd.string("Enter valid crop ID", LINE_3)
# time.sleep(3)
# except: #if database connection Fails
# lcd.clear()
# lcd.string(" FAILED", LINE_1)
# lcd.string("Unable to connect to", LINE_2)
# lcd.string("local database", LINE_3)
# lcd.string("Try again later", LINE_4)
# time.sleep(3)
# db.close()
'''
*
* Function Name: acceptCropID
* Input: none
* Output: none
* Logic: accepts crop ID from user using keypad
* Example Call: acceptCropID()
*
'''
def acceptCropID():
global cropID
lcd.clear()
cropID = ""
key = ""
time.sleep(0.1)
lcd.string("Enter Crop ID", LINE_1)
lcd.string("Press * to continue", LINE_2)
lcd.string("and # for backspace", LINE_3)
#loop until some crop id is entered and * key is pressed. Following loop will run until valid crop id entered
while key != "*":
lcd.string(cropID, LINE_4)
key = kpad.get_key()
if key == '*':
if len(cropID) <= 0:
lcd.clear()
lcd.string("Crop ID cant", LINE_1)
lcd.string("be null", LINE_2)
time.sleep(1)
lcd.clear()
lcd.string("Enter Crop ID", LINE_1)
lcd.string("Press * to continue", LINE_2)
lcd.string("and # for backspace", LINE_3)
else:
break
elif key == '#': #for backspacing
if len(cropID) > 0:
cropID = cropID[:-1]
elif key.isdigit():
cropID += key
time.sleep(0.2)
key = ""
#after accepting crop ID fetch crop information from local database
print("Calling fetchcrop")
fetchCropInfo()
'''
*
* Function Name: fetchTroughInfo
* Input: none
* Output: none
* Logic: fetches the trough name and id from local database
* Example Call: fetchTroughInfo()
*
'''
def fetchTroughInfo():
global troughID
global troughIDExcepted
troughIDExcepted = 1
'''
*
* Function Name: acceptTroughID
* Input: none
* Output: none
* Logic: accepts trough ID from user using keypad
* Example Call: acceptTroughID()
*
'''
def acceptTroughID():
global troughID
global troughIDExcepted
lcd.clear()
troughID = ""
key = "E"
time.sleep(0.1)
lcd.string("Enter Trough ID", LINE_1)
lcd.string("Press * to continue", LINE_2)
lcd.string("and # for backspace", LINE_3)
#loop until some trough id is entered and * key is pressed. Following loop will only break when valid trough ID is entered
while key != "*":
lcd.string(troughID, LINE_4)
key = kpad.get_key()
if key == '*':
if len(troughID) <= 0:
lcd.clear()
lcd.string("Trough ID can't", LINE_1)
lcd.string("be null", LINE_2)
time.sleep(1)
lcd.clear()
lcd.string("Enter Trough ID", LINE_1)
lcd.string("Press * to continue", LINE_2)
lcd.string("and # for backspace", LINE_3)
else:
break
elif key == '#': #for backspacing
if len(troughID) > 0:
troughID = troughID[:-1]
elif key.isdigit():
troughID += key
time.sleep(0.1)
key = ""
#check if entered trough ID is valid or not by fetching it from local database
fetchTroughInfo()
'''
*
* Function Name: init
* Input: none
* Output: none
* Logic: calculates the baseValue of load cell and fetches the crop info from the server database
* Example Call: init()
*
'''
def init():
print("Initialization")
global baseValue
lcd.string(" Welcome", LINE_1)
lcd.string(" Remove any object", LINE_2)
lcd.string(" from the platform.", LINE_3)
time.sleep(2)
lcd.clear()
lcd.string(" Welcome", LINE_1)
lcd.string(" Please wait...", LINE_2)
baseValue = lc.base_value()
try :
init()
print("Started System")
lcd.string("Started System", LINE_1)
troughIDExcepted = cropIDExcepted = pictureTaken = 0
key = "E"
#while key pressed is not the * key
while True:
while key is not '*' :
displayWeight()
key = kpad.get_key()
if key == 'D' :
tare()
elif key == 'A':
lcd.clear()
lcd.string(" System", LINE_2)
lcd.string(" Shutting down...", LINE_3)
active = 0
os.system("sudo poweroff")
lcd.clear()
break
elif key == 'B':
lcd.clear()
lcd.string(" Script", LINE_2)
lcd.string(" Restarting", LINE_3)
lcd.string(" Please wait...", LINE_4)
active = 0
GPIO.cleanup()
sys.stdout.flush()
os.execv(sys.executable, ['python'] + sys.argv)
break
elif key == 'C':
lcd.clear()
lcd.string(" System", LINE_2)
lcd.string(" Restarting", LINE_3)
lcd.string(" Please wait...", LINE_4)
active = 0
os.system("sudo reboot")
break
acceptCropID()
print("Accepted Crop")
if cropIDExcepted:
print("Calling acceptTrough")
acceptTroughID()
if troughIDExcepted:
print("Calling acceptLocation")
takePicture()
if pictureTaken:
print("Calling Store Data")
storeData()
key = "E"
except KeyboardInterrupt:
print("Interrupted by keyboard")
finally:
lcd.clear()
time.sleep(1)
GPIO.cleanup()
| [
"techieboy.teknas@gmail.com"
] | techieboy.teknas@gmail.com |
7eced97eac47dfd2ce21cee31fe289634f7a5bf7 | eac6dc8eb8e5f088500f425a7323cd35a4f99bd6 | /src/courses/migrations/0012_course_active.py | af89db3155df4d47be9b84b4c843f0b847c617a6 | [] | no_license | aminhp93/django_serverup_2 | a14195af756799795282028ba611dbccc3848870 | aef31722e882367c731e9e48fc8af8740befc112 | refs/heads/master | 2020-05-27T01:54:15.268661 | 2017-02-25T21:58:36 | 2017-02-25T21:58:36 | 82,514,017 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-19 18:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0011_auto_20170219_1749'),
]
operations = [
migrations.AddField(
model_name='course',
name='active',
field=models.BooleanField(default=True),
),
]
| [
"minhpn.org.ec@gmail.com"
] | minhpn.org.ec@gmail.com |
472f3e9fe76c99a8fc0b7f48fea8176b6b9b582e | 5f9ec375125dae625b5fe169b6f3f836a2431dd1 | /core/logger_helper.py | 9ce5bc46fbbe8b6e97a9ed7da18f446afd2fbc52 | [] | no_license | mumudd/python_weixin | e280b6bdc81f30365b1bb0e4700d9a00e6b99037 | 144dbedc72c010beae0d243001b82b9f687d0a1f | refs/heads/master | 2021-06-23T20:14:36.237386 | 2020-12-09T08:37:51 | 2020-12-09T08:37:51 | 160,120,308 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,915 | py | import logging
from logging import Logger
from logging.handlers import TimedRotatingFileHandler
'''日志管理类'''
def init_logger(logger_name):
if logger_name not in Logger.manager.loggerDict:
logger1 = logging.getLogger(logger_name)
logger1.setLevel(logging.INFO) # 设置最低级别
# logger1.setLevel(logging.DEBUG) # 设置最低级别
df = '%Y-%m-%d %H:%M:%S'
format_str = '[%(asctime)s]: %(name)s %(levelname)s %(lineno)s %(message)s'
formatter = logging.Formatter(format_str, df)
# handler all
try:
handler1 = TimedRotatingFileHandler('/usr/web_wx/log/all.log', when='D', interval=1, backupCount=7)
except Exception:
handler1 = TimedRotatingFileHandler('F:\program\web_wx\core\log\/all.log', when='D', interval=1, backupCount=7)
handler1.setFormatter(formatter)
handler1.setLevel(logging.DEBUG)
logger1.addHandler(handler1)
# handler error
try:
handler2 = TimedRotatingFileHandler('/usr/web_wx/log/error.log', when='D', interval=1, backupCount=7)
except Exception:
handler2 = TimedRotatingFileHandler('F:\program\web_wx\core\log\error.log', when='D', interval=1, backupCount=7)
handler2.setFormatter(formatter)
handler2.setLevel(logging.ERROR)
logger1.addHandler(handler2)
# console
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# 设置日志打印格式
console.setFormatter(formatter)
# 将定义好的console日志handler添加到root logger
logger1.addHandler(console)
logger1 = logging.getLogger(logger_name)
return logger1
logger = init_logger('runtime-log')
if __name__ == '__main__':
logger.debug('test-debug')
logger.info('test-info')
logger.warn('test-warn')
logger.error('test-error')
| [
"sufaith@foxmail.com"
] | sufaith@foxmail.com |
72b1bd0b8b29a08c14a6a75b7ceb058d86883236 | 39d100d1ed768ab4bdc768dc70e68d4bf943f233 | /tgmate/views/__init__.py | a21ee0f14e8e4e8b7a1506789e34fefb9805171c | [] | no_license | ivan-koryshkin/tgmate | 702b5c465a3435be134d858cc5fbd0f5ca8fd1f3 | 7ae1f5125ac19f00c53d557c70dbbdbe99886cac | refs/heads/master | 2023-08-30T09:20:04.947011 | 2021-11-09T13:21:17 | 2021-11-09T13:21:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | from .admin_view import UserModelView
from .admin_view import TgUserView
from .admin_view import MessageView
from .admin_view import ChatView
__all__ = [
'UserModelView',
'TgUserView',
'MessageView',
'ChatView'
] | [
"ivan.koryshkin@gmail.com"
] | ivan.koryshkin@gmail.com |
f42e13027d1095f41cb53f127f04978052b43ba3 | 8b07bc3844f23054abccae1d50e1bc5ede5943c3 | /producto/migrations/0003_producto_disponible.py | 0723fa7bfd1f6bfdb2e002ca5efd13b4510feb82 | [] | no_license | HedleyPty/PythonAnywhere | 9c5ba4913e7f8d188d1fb1a0f6f8a3aa4b96210d | 324bdb325db2ecfe22521d82ee3fe6cb2acc927a | refs/heads/master | 2021-09-14T08:53:48.957057 | 2018-05-10T19:02:40 | 2018-05-10T19:02:40 | 112,934,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-22 08:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('producto', '0002_auto_20160221_1425'),
]
operations = [
migrations.AddField(
model_name='producto',
name='disponible',
field=models.BooleanField(default=False),
),
]
| [
"hedleypanama@gmail.com"
] | hedleypanama@gmail.com |
b2bed29df5eede8d6e01cc0c3ae685153dd0d69a | 84750e22e48440a292c305dbd2ae75c4a210e934 | /exspider/utils/view_funcs.py | 736f3db21f558507697e89f6966b94cbba6307eb | [] | no_license | stonegithubs/exspider | 617800a13ec9a1ca9c723d517766d00dcedd36a1 | 42b5cb0415c90dd60bc2c009a6aa467e71823854 | refs/heads/master | 2022-08-02T10:02:55.566046 | 2020-04-29T14:54:58 | 2020-04-29T14:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | #! /usr/bin/python
# -*- coding:utf-8 -*-
# @zhuchen : 2019-03-06 15:18
import time
from django.conf import settings
from rest_framework.response import Response
# 成功
success_code = {
1: "{}"
}
# 用户错误
user_error_code = {
1001: "登录失败",
1002: "重复请求验证码",
1003: "验证码错误",
1004: "您已经登录",
1005: "需要登录才能操作",
1006: "验证码过期",
1007: "稍后再试",
1008: "{}"
}
# 系统错误
http_error_code = {
9001: "必传参数[{}]错误",
9002: "[{}]参数错误",
9003: "[{}]格式错误",
9004: "自定义错误",
9005: "数据不存在",
9006: "数据添加失败,{}",
9007: "数据保存失败",
9008: "{}" # 自定义错误,客户端展示
}
def http_response(http_code, http_msg=None, data=None, **kwargs):
resp = settings.RESPONSE_FORMAT.copy()
resp['code'] = http_code
if http_code in user_error_code:
resp['message'] = user_error_code[http_code]
elif http_code in http_error_code:
resp['message'] = http_error_code[http_code]
else:
resp['message'] = success_code[http_code]
if http_msg is not None:
resp['message'] = resp['message'].format(http_msg)
if data is not None:
resp['data'] = data
resp['server_time'] = int(time.time())
resp.update(kwargs)
return Response(resp) | [
"chen.zhu@blacktail.tech"
] | chen.zhu@blacktail.tech |
f20a1f49d564b9bb5bdee9d117e1c5832706526f | 639d66b4a667db97c2638132dd028b7f5b865ef0 | /splash_screen.py | 6e5e635b1b0dac9fae73c7f54c4e3271555746a6 | [] | no_license | liturreg/blackjack_pythonProject | d91d21494b21159667f48a683b919ea68401c56c | b88f15ac35db8fbeb8b00234084c5b114383d6cd | refs/heads/master | 2023-01-29T18:45:08.531471 | 2020-12-07T19:57:33 | 2020-12-07T19:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | def splash_screen():
print(r""" /$$$$$$$ /$$ /$$ /$$$$$$$$ /$$ /$$ /$$$$$$ /$$ /$$ /$$$$$$$ /$$ /$$$$$$ /$$$$$$ /$$ /$$ /$$$$$ /$$$$$$ /$$$$$$ /$$ /$$
| $$__ $$| $$ /$$/|__ $$__/| $$ | $$ /$$__ $$| $$$ | $$ | $$__ $$| $$ /$$__ $$ /$$__ $$| $$ /$$/ |__ $$ /$$__ $$ /$$__ $$| $$ /$$/
| $$ \ $$ \ $$ /$$/ | $$ | $$ | $$| $$ \ $$| $$$$| $$ | $$ \ $$| $$ | $$ \ $$| $$ \__/| $$ /$$/ | $$| $$ \ $$| $$ \__/| $$ /$$/
| $$$$$$$/ \ $$$$/ | $$ | $$$$$$$$| $$ | $$| $$ $$ $$ | $$$$$$$ | $$ | $$$$$$$$| $$ | $$$$$/ | $$| $$$$$$$$| $$ | $$$$$/
| $$____/ \ $$/ | $$ | $$__ $$| $$ | $$| $$ $$$$ | $$__ $$| $$ | $$__ $$| $$ | $$ $$ /$$ | $$| $$__ $$| $$ | $$ $$
| $$ | $$ | $$ | $$ | $$| $$ | $$| $$\ $$$ | $$ \ $$| $$ | $$ | $$| $$ $$| $$\ $$ | $$ | $$| $$ | $$| $$ $$| $$\ $$
| $$ | $$ | $$ | $$ | $$| $$$$$$/| $$ \ $$ | $$$$$$$/| $$$$$$$$| $$ | $$| $$$$$$/| $$ \ $$| $$$$$$/| $$ | $$| $$$$$$/| $$ \ $$
|__/ |__/ |__/ |__/ |__/ \______/ |__/ \__/ |_______/ |________/|__/ |__/ \______/ |__/ \__/ \______/ |__/ |__/ \______/ |__/ \__/""" + "\n")
| [
"nicolas.gasco92@gmail.com"
] | nicolas.gasco92@gmail.com |
ba63f7efdf10aab9c7481c9a2bee33143ac12df2 | 2037235643046608bf883f11c1bc448e2df8a4a3 | /HuaYing/practice/test14.py | a18f331036c28c57f36f4079f83d4f9d3c4a6650 | [] | no_license | Hardworking-tester/HuaYingAutoTest | 7e46dfb0729961cee0da06762fc0be11724ad80b | c1f0cf7aa4433f482bbae88d1a5637b9859359ca | refs/heads/master | 2021-01-10T18:38:37.788736 | 2015-09-05T10:37:10 | 2015-09-05T10:37:10 | 41,957,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | #encoding:utf-8
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
br=webdriver.Firefox()
# br.maximize_window()
br.get("http://www.xebest.com:8000")
elements=br.find_elements_by_class_name("nav-arrow")
element1=elements[4]
if element1.is_displayed():
print ("网站导航链接已定位到")
else:
print ("网站导航元素未找到,请更换定位方式后重新定位")
# if br.find_element_by_xpath("//*[@id='topnav']/ul/li[5]/div[2]/ul[2]/li[2]/a").is_displayed():
# if br.find_element_by_css_selector("div#topnav>ul:first>li:nth(4)>div:nth(1)>ul:nth(1)>li(1)>a").is_displayed():
# if br.find_element_by_css_selector("li#all_menu>ul:nth(0)>li:nth(0)>a>span").is_displayed():
# if br.find_element_by_link_text(u"易支付").is_displayed():
# print ("易支付元素已找到")
# else:
# print("易支付元素未找到,请更换定位方式后重新定位")
# epay=br.find_element_by_css_selector("div#topnav>ul>li:nth(4)>div:nht(1)>ul:nth(1)>li(1)>a")
# epay=br.find_element_by_xpath("//*[@id='topnav']/ul/li[5]/div[2]/ul[2]/li[2]/a")
# epay=br.find_element_by_xpath("//*[@id='topnav']/ul/li[5]/div[2]/ul[2]/li[2]/a")
epay=br.find_element_by_link_text(u"易支付")
ActionChains(br).move_to_element(element1).click(element1).perform()
ActionChains(br).move_to_element(epay).click(epay).perform() | [
"373391120@qq.com"
] | 373391120@qq.com |
fd57d33c643143a4cd19384011907536cfa8de5d | 4864834342f99fff07f3c8b61c39f90228988ccf | /goldi_locks.py | acc88c342cb4f2fcc6722f6c6256ae7bb472caf4 | [] | no_license | Kyle628/dailyprogrammer | 6999d37d5449942e3a1a04800bf4999c2530d06b | 7985f6ecaf88d0e6d1247d38959c17e90256e1d4 | refs/heads/master | 2020-06-23T13:40:27.510734 | 2017-05-10T17:48:57 | 2017-05-10T17:48:57 | 74,647,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | import sys
input_str = "100 80\n30 50\n130 75\n90 60\n150 85\n120 70\n200 200\n110 100"
lines = input_str.split('\n')
first_line = lines.pop(0).split(" ")
weight = int(first_line[0])
temp_tolerance = int(first_line[1])
for i,line in enumerate(lines):
line_arr = line.split(" ")
weight_capacity = int(line_arr[0])
soup_temp = int(line_arr[1])
if weight_capacity > weight and soup_temp < temp_tolerance:
sys.stdout.write(str(i + 1) + " ")
print ''
| [
"kyjoconn@ucsc.edu"
] | kyjoconn@ucsc.edu |
8c0ee08b61836fa5388ef5e53460488f7c307034 | 03afa9df5e088558fffdf1594344d609ab199720 | /model_full_no_stage2_1RIM.py | df200ad92466861e2e25ce5de6a6c2cb3cb04976 | [] | no_license | tungvd345/Deraining | 46489a376446c717914362ed36d997622df14c27 | 3dde575c620ddabca44341a4d078a34a9c67f6ea | refs/heads/master | 2023-03-18T13:45:27.630232 | 2021-02-19T06:26:11 | 2021-02-19T06:26:11 | 265,502,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,268 | py | import torch
import torch.nn as nn
from torch.nn import init
import torchvision.transforms as transforms
import torchvision.models as models
import functools
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
from math import log10
class Deraining(nn.Module):
def __init__(self,args):
super(Deraining, self).__init__()
self.args = args
self.upsample = F.interpolate
self.upx2 = nn.Upsample(scale_factor=2, mode = 'bilinear', align_corners=True)
self.up_feature = up_feature(in_channels=16*16*3)
self.ats_model = SCA_UNet(in_channel=3, out_channel=3)
self.operation_layer = operation_layer(in_channels=3)
self.add_layer = add_layer()
self.mul_layer = mul_layer()
self.relu = nn.LeakyReLU(0.2, True)
self.sigmoid = nn.Sigmoid()
self.conv = nn.Conv2d(in_channels=9, out_channels=3, kernel_size=3, padding=1)
self.channel_att = channel_attention(in_channels=9)
self.rcan = RCAN(args)
def forward(self, x, kpts):
b, c, height, width = x.size()
# x = self.upsample1(x)
# features_add = self.up_feature(kpts)
# features_add = self.upsample(features_add, size=(height, width), mode='bilinear', align_corners=True)
#
# features_mul = self.up_feature(kpts)
# features_mul = self.upsample(features_mul, size=(height, width), mode='bilinear', align_corners=True)
# atm, trans, streak = self.ats_model(x)
# clean = (x - (1-trans) * atm) / (trans + 0.0001) - streak
clean,feature = self.ats_model(x)
# feature = self.ats_feature(x)
# add_residual = self.operation_layer(features_add)
# add_layer = x + add_residual
add_residual = self.add_layer(feature)
add_residual = self.upsample(add_residual, size=(height, width), mode='bilinear', align_corners=True)
add_layer = x + add_residual
# mul_residual = self.mul_layer(feature)
mul_residual = self.mul_layer(feature)
mul_residual = self.upsample(mul_residual, size=(height, width), mode='bilinear', align_corners=True)
mul_layer = x * (mul_residual)
concatenates = torch.cat((clean, add_layer, mul_layer), dim=1)
# concatenates = torch.cat((clean, mul_layer), dim=1)
# w0 = self.channel_att(add_layer)
# out_comb = w0 * add_layer
# out_comb = self.conv(concatenates)
w0, w1, w2 = self.channel_att(concatenates)
out_comb = w0 * clean + w1 * add_layer + w2 * mul_layer
# w1, w2 = self.channel_att(concatenates)
# out_comb = w1 * clean + w2 * mul_layer
# out_SR = self.rcan(out_comb)
out_SR = out_comb
out_combine = out_comb
return out_SR, out_combine, clean, add_layer, mul_layer, add_residual, mul_residual
# return out_SR, out_combine, add_layer, add_layer, add_layer, add_residual, add_residual
# return out_SR, out_combine, clean, clean, clean
class ATS_model(nn.Module):
def __init__(self, args, in_channels):
super(ATS_model, self).__init__()
self.conv1 = nn.Conv2d(in_channels = in_channels, out_channels = 64, kernel_size = 3, padding = 1)
# self.batch_norm = nn.BatchNorm2d(64)
self.relu1 = nn.LeakyReLU(0.2, True)
self.conv2 = nn.Conv2d(in_channels = 64, out_channels = 128, kernel_size = 3, padding = 1)
# self.pooling = nn.AvgPool2d(kernel_size = (3,3))
# self.fc = nn.Linear(in_features = in_channels * (args.patch_size//6) * (args.patch_size//4), out_features = 3) # (patch*3//2) //3 = patch // 2
# self.sigmoid = nn.Sigmoid()
self.predict_S = predict_S(in_channel=3, out_channel=3)
self.predict_A = predict_A(128)
self.predict_T = predict_T(in_channel=3, out_channel=3)
# self.conv = nn.Conv2d(in_channels, out_channels=128, kernel_size=3, padding=1)
def forward(self,x):
# T = self.predict_T(x)
S = self.predict_S(x)
T = self.predict_T(x)
x = self.relu1(self.conv1(x))
x = self.relu1(self.conv2(x))
# conv_T = self.conv2(self.relu1(self.batch_norm(self.conv1(x))))
# T = self.sigmoid(conv_T)
# T = self.predict_A(x)
# pooling = self.pooling(x)
# b, c, h, w = pooling.size()
# pooling = pooling.view(b,-1)
# A = self.sigmoid(self.fc(pooling))
# A = A.view(b,3,1,1)
A = self.predict_A(x)
# conv_S = self.conv2(self.relu1(self.batch_norm(self.conv1(x))))
# S = self.sigmoid(conv_S)
#clean = (img_in - (1 - T) * A) / (T + 0.0001) - S
return A, T, S
class predict_S(nn.Module):
def __init__(self, in_channel, out_channel=3):
super(predict_S, self).__init__()
self.conv1 = nn.Conv2d(in_channel, 32, kernel_size=3, padding=1)
self.dense_block1 = dense_block(in_channel=32, up_channel=32)
# self.dense_block = dense_block(in_channel=in_channel, out_channel=in_channel)
self.conv2 = nn.Conv2d(32, 64, kernel_size=1)
self.dense_block2 = dense_block(in_channel=64, up_channel=64)
self.relu = nn.ReLU()
sequence = [nn.Conv2d(64, 64 // 2, kernel_size=1),
nn.ReLU(True),
nn.Conv2d(64 // 2, out_channel, kernel_size=1),
nn.Dropout2d()
]
self.down_conv = nn.Sequential(*sequence)
self.reset_params()
def forward(self, x):
# dense_block = self.dense_block(x)
x = self.relu(self.conv1(x))
dense_block1 = self.dense_block1(x)
dense_block2 = self.relu(self.conv2(dense_block1))
dense_block2 = self.dense_block2(dense_block2)
streak = self.down_conv(dense_block2)
return streak
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
# init.constant(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
class predict_T(nn.Module):
def __init__(self, in_channel, out_channel=3):
super(predict_T, self).__init__()
self.trans_unet = TransUNet(in_channel, out_channel)
self.reset_params()
def forward(self, x):
trans = self.trans_unet(x)
return trans
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
# init.constant(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
class predict_A(nn.Module):
def __init__(self, in_channel):
super(predict_A, self).__init__()
self.conv1 = nn.Conv2d(in_channel, in_channel//4, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(in_channel//4, in_channel//4, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(in_channel//4, in_channel//16, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(in_channel//16, in_channel//16, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(in_channel//16, in_channel//64, kernel_size=3, padding=1)
self.relu = nn.ReLU()
self.pooling1 = nn.AdaptiveAvgPool2d((128, 128))
self.pooling2 = nn.AdaptiveAvgPool2d((64, 64))
self.pooling3 = nn.AdaptiveAvgPool2d((32, 32))
self.pooling4 = nn.AdaptiveAvgPool2d((16,16))
self.pooling5 = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(in_channel//64, 1)
self.reset_params()
def forward(self, x):
b, c, h, w = x.size()
atm1 = self.pooling1(self.relu(self.conv1(x)))
atm2 = self.pooling2(self.relu(self.conv2(atm1)))
atm3 = self.pooling3(self.relu(self.conv3(atm2)))
atm4 = self.pooling4(self.relu(self.conv4(atm3)))
atm5 = self.pooling5(self.relu(self.conv5(atm4)))
atm5 = atm5.view(b, -1)
atm = self.fc(atm5)
atm = atm.view(b, 1, 1, 1)
return atm
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
# init.constant(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
##################################################################################
# dense_block use pretrained dense-net
##################################################################################
# class dense_block(nn.Module):
# def __init__(self, in_channel, out_channel):
# super(dense_block, self).__init__()
# model_dense_net = models.densenet121(pretrained=True)
# model_dense_net = list(model_dense_net.children())[:]
# self.dense_block = model_dense_net[0].denseblock1
# self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=64, kernel_size=7, padding=3)
# self.relu = nn.ReLU(True)
# # sequence = []
# sequence = [nn.Conv2d(256, 224, kernel_size = 1),
# nn.ReLU(True),
# nn.Conv2d(224, 192, kernel_size = 1),
# nn.ReLU(True),
# nn.Conv2d(192, 160, kernel_size = 1),
# nn.ReLU(True),
# nn.Conv2d(160, 128, kernel_size = 1),
# nn.ReLU(True),
# nn.Conv2d(128, 96, kernel_size = 1),
# nn.ReLU(True),
# nn.Conv2d(96, 64, kernel_size = 1),
# nn.ReLU(True),
# nn.Conv2d(64, 3, kernel_size = 1),
# nn.Dropout2d()]
# self.down_conv = nn.Sequential(*sequence)
#
# def forward(self, x):
# x = self.relu(self.conv1(x))
# dense_block = self.relu(self.dense_block(x))
# out = self.down_conv(dense_block)
#
# return out
##################################################################################
##################################################################################
# dense_block don't use pretrained
##################################################################################
class dense_block(nn.Module):
def __init__(self, in_channel, up_channel=32, num_dense_layer=4):
super(dense_block, self).__init__()
in_chan = in_channel
sequence_1 = []
for i in range(num_dense_layer):
sequence_1.append(dense_layer(in_chan, up_channel))
in_chan += up_channel
self.dense_block = nn.Sequential(*sequence_1)
sequence_2 = [nn.Conv2d(in_chan, in_chan//2, kernel_size=1),
nn.ReLU(True),
nn.Conv2d(in_chan//2, in_channel, kernel_size = 1),
nn.Dropout2d()
]
self.down_conv = nn.Sequential(*sequence_2)
def forward(self, x):
dense_block = self.dense_block(x)
out = self.down_conv(dense_block)
out = out + x
return out
class dense_layer(nn.Module):
def __init__(self, in_channel, up_channel):
super(dense_layer, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channel, out_channels=up_channel, kernel_size=3, padding=1)
self.relu = nn.ReLU(True)
def forward(self, x):
out =self.relu(self.conv(x))
out = torch.cat((x, out), 1)
return out
##################################################################################
##################################################################################
# Defines the Unet-transmission
##################################################################################
class TransUNet(nn.Module):
def __init__(self, in_channel, n_classes):
super(TransUNet, self).__init__()
self.conv1x1 = nn.Conv2d(in_channel, in_channel, kernel_size=1, stride=1, padding=0)
# self.inc = inconv(in_channel, 64)
self.inconv = nn.Sequential(
nn.Conv2d(in_channel, 32, 3, padding=1),
# nn.InstanceNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, 3, padding=1),
# nn.InstanceNorm2d(64),
nn.ReLU(inplace=True)
)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.up1 = up(1024, 256)
self.up2 = up(512, 128)
self.up3 = up(256, 64)
self.up4 = up(128, 32)
self.outconv = nn.Conv2d(32, n_classes, kernel_size=1)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
def forward(self, x):
x = self.conv1x1(x)
x1 = self.inconv(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
# decoder
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.tanh(self.outconv(x))
return x
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
# nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
# nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.mpconv = nn.Sequential(
# nn.MaxPool2d(2),
# double_conv(in_ch, out_ch)
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(True),
nn.Conv2d(out_ch, out_ch, kernel_size=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(True)
)
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=False):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
diffX = x2.size()[2] - x1.size()[2]
diffY = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (0, diffY, 0, diffX))
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
##################################################################################
# Defines the SCA-clean - base on UNet
##################################################################################
class SCA_UNet(nn.Module):
def __init__(self, in_channel, out_channel):
super(SCA_UNet, self).__init__()
self.conv1x1 = nn.Conv2d(in_channel, in_channel, kernel_size=1, stride=1, padding=0)
# self.inc = inconv(in_channel, 64)
self.inconv = nn.Sequential(
nn.Conv2d(in_channel, 32, 3, padding=1),
# nn.InstanceNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, 3, padding=1),
# nn.InstanceNorm2d(64),
nn.ReLU(inplace=True)
)
self.down1 = down_SCA(64, 128)
self.down2 = down_SCA(128, 256)
self.down3 = down_SCA(256, 512)
self.down4 = down_SCA(512, 512)
self.up1 = up_SCA(1024, 256)
self.up2 = up_SCA(512, 128)
self.up3 = up_SCA(256, 64)
self.up4 = up_SCA(128, 32)
self.outconv = nn.Conv2d(32, out_channel, kernel_size=1)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
def forward(self, x):
x = self.conv1x1(x)
x1 = self.inconv(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
# decoder
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = (self.outconv(x))
in_feature = x2
return x, in_feature
class SCA_feature(nn.Module):
def __init__(self, in_channel, out_channel):
super(SCA_feature, self).__init__()
self.conv1x1 = nn.Conv2d(in_channel, in_channel, kernel_size=1, stride=1, padding=0)
# self.inc = inconv(in_channel, 64)
self.inconv = nn.Sequential(
nn.Conv2d(in_channel, 32, 3, padding=1),
# nn.InstanceNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, 3, padding=1),
# nn.InstanceNorm2d(64),
nn.ReLU(inplace=True)
)
self.down1 = down_SCA(64, 128)
self.down2 = down_SCA(128, 256)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
def forward(self, x):
x = self.conv1x1(x)
x1 = self.inconv(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
# x4 = self.down3(x3)
# x5 = self.down4(x4)
feature = x3
return feature
class down_SCA(nn.Module):
def __init__(self, in_chan, out_chan, reduce=16):
super(down_SCA, self).__init__()
self.conv1 = nn.Conv2d(in_chan, out_chan, kernel_size=3, stride=2, padding=1)
self.relu = nn.ReLU(True)
self.conv2 = nn.Conv2d(out_chan, out_chan, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(out_chan, out_chan, kernel_size=3, padding=1)
self.sigmoid = nn.Sigmoid()
self.ca = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(out_chan, out_chan//reduce, kernel_size=1, padding=0),
nn.ReLU(),
nn.Conv2d(out_chan//reduce, out_chan, kernel_size=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
x = self.relu(self.conv1(x))
conv2 = self.relu(self.conv2(x))
conv3_1 = self.conv3(conv2)
conv3_2 = self.sigmoid(self.conv3(conv2))
spatial = conv3_1 * conv3_2
channel = self.ca(spatial)
sca = channel * conv2
out_layer = x + sca
return out_layer
class up_SCA(nn.Module):
def __init__(self, in_chan, out_chan, reduce=16, bilinear=True):
super(up_SCA, self).__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_chan//2, in_chan//2, 2, stride=2)
# self.conv = double_conv(in_ch, out_ch)
self.conv1 = nn.Conv2d(in_chan, out_chan, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU(True)
self.conv2 = nn.Conv2d(out_chan, out_chan, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(out_chan, out_chan, kernel_size=3, padding=1)
self.sigmoid = nn.Sigmoid()
self.ca = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(out_chan, out_chan // reduce, kernel_size=1, padding=0),
nn.ReLU(),
nn.Conv2d(out_chan // reduce, out_chan, kernel_size=1, padding=0),
nn.Sigmoid()
)
def forward(self, x1, x2):
x1 = self.up(x1)
diffX = x2.size()[2] - x1.size()[2]
diffY = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (0, diffY, 0, diffX))
x = torch.cat([x2, x1], dim=1)
conv1 = self.relu(self.conv1(x))
conv2 = self.relu(self.conv2(conv1))
conv3_1 = self.conv3(conv2)
conv3_2 = self.sigmoid(self.conv3(conv2))
spatial = conv3_1 * conv3_2
channel = self.ca(spatial)
sca = channel * conv2
out_layer = conv1 + sca
# x = self.conv(x)
return out_layer
# class outconv(nn.Module):
# def __init__(self, in_ch, out_ch):
# super(outconv, self).__init__()
# self.conv = nn.Conv2d(in_ch, out_ch, 1)
#
# def forward(self, x):
# x = self.conv(x)
# return x
##################################################################################
# class feature_extractor(nn.Module):
# def __init__(self, out_channels = 128):
# super(feature_extractor, self).__init__()
# resnet18 = models.resnet18(pretrained = True)
# num_ftrs = resnet18.fc.in_features
# layer = list(resnet18.children())[:-2]
# layer.append(nn.Conv2d(num_ftrs, out_channels, 1))
# self.feature_extractor = nn.Sequential(*layer)
# #print('feature extraction: \n',self.feature_extractor)
#
# def forward(self,x):
# feature = self.feature_extractor(x)
# return feature
##################################################################################Oct09-new add,mul layer
class operator_block(nn.Module):
def __init__(self, in_channels, out_channels):
super(operator_block, self).__init__()
# self.conv0 = nn.Conv2d(in_channels, in_channels, kernel_size=1)
self.conv1 = nn.Conv2d(out_channels, out_channels, kernel_size=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=7, padding=3)
self.conv3 = nn.Conv2d(out_channels, out_channels, kernel_size=5, padding=2)
self.conv4 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
self.relu = nn.LeakyReLU(0.2, True)
def forward(self, x):
# conv0 = self.relu(self.conv0(x))
conv1 = self.relu(self.conv1(x))
conv2 = self.relu(self.conv2(conv1))
conv2 = self.relu(self.conv2(conv2))
conv3 = self.relu(self.conv3(conv1))
conv3 = self.relu(self.conv3(conv3))
conv4 = self.relu(self.conv4(conv1))
conv4 = self.relu(self.conv4(conv4))
out = torch.cat((conv2, conv3, conv4), dim=1)
return out
class add_block(nn.Module):
def __init__(self, in_channels, out_channels):
super(add_block, self).__init__()
self.conv0 = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)
self.oper_blk = operator_block(in_channels=out_channels, out_channels=out_channels)
self.conv = nn.Conv2d(in_channels=out_channels*3, out_channels=out_channels, kernel_size=1)
self.relu = nn.LeakyReLU(0.2, True)
def forward(self, x):
conv0 = self.conv0(x)
operator = self.oper_blk(conv0)
conv = self.conv(operator)
out = conv + conv0
return out
class add_layer(nn.Module):
def __init__(self, pretrained=256, num_chan=64):
super(add_layer, self).__init__()
# self.conv1 = nn.Conv2d(3, num_chan, kernel_size=3, padding=1)
# self.add_blk1 = add_block(in_channels=pretrained, out_channels=128)
self.add_blk2 = add_block(in_channels=128, out_channels=num_chan)
# self.add_blk3 = add_block(in_channels=num_chan, out_channels=num_chan)
self.conv2 = nn.Conv2d(num_chan, 32, kernel_size=1)
self.conv3 = nn.Conv2d(32, 3, kernel_size=1)
self.relu = nn.LeakyReLU(0.2, True)
def forward(self, x):
# operator = self.conv1(x)
# add1 = self.add_blk1(x)
add2 = self.add_blk2(x)
# add3 = self.add_blk3(add2)
# add3 = operator + add1
conv = self.relu(self.conv2(add2))
out = (self.conv3(conv))
return out
class mul_block(nn.Module):
def __init__(self, in_channels, out_channels, reduce=16):
super(mul_block, self).__init__()
self.conv0 = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)
self.oper_blk = operator_block(in_channels=out_channels, out_channels=out_channels)
self.relu = nn.LeakyReLU(0.2, True)
self.pooling = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(3*out_channels, out_channels//reduce, kernel_size=1)
self.conv2 = nn.Conv2d(out_channels//reduce, out_channels, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
conv0 = self.conv0(x)
operator = self.oper_blk(conv0)
pooling = self.pooling(operator)
conv_1 = self.relu(self.conv1(pooling))
conv_2 = self.sigmoid(self.conv2(conv_1))
out = conv_2 * conv0
return out
class mul_layer(nn.Module):
def __init__(self, num_pretrained=256, num_chan=64):
super(mul_layer, self).__init__()
# self.conv1 = nn.Conv2d(3, num_chan, kernel_size=3, padding=1)
# self.mul_blk1 = mul_block(in_channels=num_pretrained, out_channels=128)
self.mul_blk2 = mul_block(in_channels=128, out_channels=num_chan)
# self.mul_blk3 = mul_block(in_channels=num_chan, out_channels=num_chan)
self.conv2 = nn.Conv2d(num_chan, 32, kernel_size=1)
self.conv3 = nn.Conv2d(32, 3, kernel_size=1)
self.relu = nn.LeakyReLU(0.2, True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# operator = self.conv1(x)
# mul1 = self.mul_blk1(x)
mul2 = self.mul_blk2(x)
# mul2 = mul1 + mul2
# mul3 = self.mul_blk3(mul2)
# mul3 = operator + mul1
conv = self.relu(self.conv2(mul2))
out = self.sigmoid(self.conv3(conv))
return out
##################################################################################
class operation_layer(nn.Module):
def __init__(self, in_channels):
super(operation_layer, self).__init__()
self.conv1 = nn.Conv2d(in_channels = in_channels, out_channels = 64, kernel_size = 3, padding = 1)
# self.batch_norm1 = nn.BatchNorm2d(64)
self.batch_norm1 = nn.InstanceNorm2d(64)
self.relu1 = nn.LeakyReLU(0.2, True)
self.conv2 = nn.Conv2d(in_channels = 64, out_channels = 3, kernel_size = 3, padding = 1)
# self.batch_norm2 = nn.BatchNorm2d(3)
self.batch_norm2 = nn.InstanceNorm2d(3)
self.relu2 = nn.ReLU(True)
def forward(self, x):
conv1 = self.relu1(self.batch_norm1(self.conv1(x)))
R_layer = (self.batch_norm2(self.conv2(conv1)))
return R_layer
class up_feature(nn.Module):
def __init__(self, in_channels, out_channels=3):#, up_size = (200,300)):
super(up_feature, self).__init__()
sequence = [
nn.Conv2d(in_channels=in_channels, out_channels=512, kernel_size=3, padding=1),
nn.LeakyReLU(0.2, True),
nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1), # 32x48
nn.LeakyReLU(0.2, True),
nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1), # 64x96
nn.LeakyReLU(0.2, True),
nn.ConvTranspose2d(128, 32, kernel_size=4, stride=2, padding=1), # 128x192
nn.LeakyReLU(0.2, True),
nn.ConvTranspose2d(32, 8, kernel_size=4, stride=2, padding=1), # 256x384
nn.LeakyReLU(0.2, True),
# nn.Upsample(up_size, mode = 'bilinear', align_corners=True),
nn.Conv2d(8, out_channels, kernel_size=1),
nn.Dropout2d(0.5)
]
self.sequence = nn.Sequential(*sequence)
def forward(self, x):
x = self.sequence(x)
return x
class channel_attention(nn.Module):
def __init__(self, in_channels = 15):
super(channel_attention, self).__init__()
sequence1 = [
nn.Conv2d(in_channels=in_channels, out_channels=128, kernel_size=1),
nn.BatchNorm2d(128), nn.LeakyReLU(0.2, True),
nn.Conv2d(in_channels = 128, out_channels = 64, kernel_size =1),
nn.BatchNorm2d(64), nn.LeakyReLU(0.2, True),
nn.Conv2d(in_channels=64, out_channels=32, kernel_size=1),
nn.BatchNorm2d(32), nn.LeakyReLU(0.2, True),
nn.Conv2d(in_channels = 32, out_channels = in_channels, kernel_size = 1),
# nn.Softmax2d()
]
self.model1 = nn.Sequential(*sequence1)
sequence2 = [
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels=in_channels, out_channels=in_channels//4, kernel_size=1),
# nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1),
nn.LeakyReLU(0.2, True),
nn.Conv2d(in_channels=in_channels//4, out_channels=in_channels, kernel_size=1),
# nn.Conv2d(in_channels=1, out_channels=in_channels, kernel_size=1),
nn.Sigmoid()
]
self.model2 = nn.Sequential(*sequence2)
def forward(self, x):
x = self.model1(x)
y = self.model2(x)
out = x * y
out0 = out[:,0:3,:,:]
out1 = out[:,3:6,:,:]
out2 = out[:,6:9,:,:]
return out0, out1, out2
class SCA_block(nn.Module):
def __init__(self, in_chan, out_chan, reduce=16):
super(SCA_block, self).__init__()
self.conv1 = nn.Conv2d(in_chan, out_chan, kernel_size=3, padding=1)
self.relu = nn.ReLU(True)
self.conv2 = nn.Conv2d(out_chan, out_chan, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(out_chan, out_chan, kernel_size=3, padding=1)
self.sigmoid = nn.Sigmoid()
self.ca = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(out_chan, out_chan//reduce, kernel_size=1, padding=0),
nn.ReLU(),
nn.Conv2d(out_chan//reduce, out_chan, kernel_size=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
conv1 = self.relu(self.conv1(x))
conv2 = self.relu(self.conv2(conv1))
conv3_1 = self.conv3(conv2)
conv3_2 = self.sigmoid(self.conv3(conv2))
spatial = conv3_1 * conv3_2
channel = self.ca(spatial)
sca = channel * conv2
out_layer = x + sca
return out_layer
class RCAN(nn.Module):
def __init__(self, args):
super(RCAN, self).__init__()
nChannel = args.nchannel
scale = args.scale
self.args = args
# Define Network
# ===========================================
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(nChannel, 64, kernel_size=7, padding=3)
# self.RG1 = residual_group(64, 64)
# self.RG2 = residual_group(64, 64)
# # self.RG3 = residual_group(64, 64)
self.SCAB1 = SCA_block(64, 64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 32, kernel_size=5, padding=2)
self.conv4 = nn.Conv2d(32, 3, kernel_size=3, padding=1)
# self.reset_params()
# ===========================================
def forward(self, x):
# Make a Network path
# ===========================================
x = self.relu(self.conv1(x))
sca1 = self.SCAB1(x)
sca2 = self.SCAB1(sca1)
sca3 = self.SCAB1(sca2)
sca3 = sca3 + sca2
sca4 = self.SCAB1(sca3)
sca4 = sca4 + sca1
sca5 = self.SCAB1(sca4)
sca5 = sca5 + x
x = self.relu(self.conv3(sca5))
# x = self.pixel_shuffle(x)
x = self.conv4(x)
# ===========================================
return x
# @staticmethod
# def weight_init(m):
# if isinstance(m, nn.Conv2d):
# init.xavier_normal_(m.weight)
# # init.constant(m.bias, 0)
#
# def reset_params(self):
# for i, m in enumerate(self.modules()):
# self.weight_init(m)
class residual_group(nn.Module):
def __init__(self, in_channels, out_channels):
super(residual_group, self).__init__()
self.rca_block1 = RCAB(in_channels, 64)
self.rca_block2 = RCAB(64, out_channels)
def forward(self, x):
rcab1 = self.rca_block1(x)
rcab2 = self.rca_block2(rcab1)
return x + rcab2
class RCAB(nn.Module):
def __init__(self, in_channels, out_channels):
super(RCAB, self).__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.ca_block = CA_block(64, out_channels)
# self.reset_params()
def forward(self, x):
conv1 = self.conv1(x)
conv1 = self.relu(conv1)
conv2 = self.conv2(conv1)
ca = self.ca_block(conv2)
return x + ca
# @staticmethod
# def weight_init(m):
# if isinstance(m, nn.Conv2d):
# init.xavier_normal_(m.weight)
# # init.constant(m.bias, 0)
#
# def reset_params(self):
# for i, m in enumerate(self.modules()):
# self.weight_init(m)
class CA_block(nn.Module):
def __init__(self, in_channels, out_channels):
super(CA_block, self).__init__()
# global average pooling
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# feature channel downscale and upscale --> channel weight
self.conv_down_up = nn.Sequential(
nn.Conv2d(in_channels, 16, kernel_size=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(16, out_channels, kernel_size=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_down_up(y)
return x * y
| [
"noreply@github.com"
] | tungvd345.noreply@github.com |
636022ef17714db27f131c08daa673606f4185d8 | 511b7b19ec49be34bec240ee7c7cf4178cd36ca3 | /gasolinestation/migrations/0013_auto_20200304_0909.py | fb6c52a8156995aa62443e5a937be261f2953067 | [] | no_license | francisguchie/360POS | 58de516fe52e83d6b99bd195d22c8aa902daee18 | 68f9e20ac263c75ec0c9b0fe75d7f648b8744ea8 | refs/heads/master | 2023-02-08T16:38:42.667538 | 2020-03-12T16:05:00 | 2020-03-12T16:05:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | # Generated by Django 3.0.3 on 2020-03-04 09:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gasolinestation', '0012_transactionsales'),
]
operations = [
migrations.AddField(
model_name='transactionsales',
name='dispensed_liter',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True),
),
migrations.AlterField(
model_name='transactionsales',
name='price',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True),
),
]
| [
"monde.lacanlalay@gmail.com"
] | monde.lacanlalay@gmail.com |
cf84225fbffedd219649f40d7ee33aca423ff344 | 0d9c0d0b0dedfa3da12f5850e8492b9554b8c383 | /tic_tac_toe_OOP.py | 50f996110a0b67cf69af408a649d2ce7b14f7e58 | [] | no_license | PCassiday88/CS506-Winter-21-TP | 7bdb61c850e6ae875b94049383120fe2659b9339 | 483b19e3afe5d3f2898b7e32791ef095d6ddbeae | refs/heads/main | 2023-03-21T09:41:12.428950 | 2021-03-13T06:32:50 | 2021-03-13T06:32:50 | 345,913,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,761 | py | #This version wis complete without AI
board = []
for square in range(10):
square = str(square)
board.append(square)
class Board:
def __init__(self):
pass
def show_Board(self, board):
print('-----------')
print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
print('-----------')
print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print('-----------')
print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print('-----------')
class Human:
def __init__(self):
pass
def makeMove(self, position):
pos = int(position)
if pos >= 1 and pos <= 9:
if (board[pos] == 'X' or board[pos] == 'O'): #Check to see if space is occupied
print(" ") #For appearance
print("You skip your turn for trying to flip a taken square")
else:
board[pos] = "X" #Space isn't occupied and the pos is within range
else: # If you pick a number outside of the range, you are given a chance to pick the pos again
print("Lets try that again")
pos = input("This time pick an open space between 1-9 ")
print(" ")
self.makeMove(pos) # Calls itself with new pos and game continues
class AI: #This class will eventually get the AI built in but at this stage we will control it
def __init__(self):
pass
def makeMove(self, position):
pos = int(position)
if pos >= 1 and pos <= 9:
if (board[pos] == 'X' or board[pos] == 'O'):
print("You skip your turn for trying to flip a taken square")
else:
board[pos] = "O"
else: # If you pick a number outside of the range, you are given a chance to pick the pos again
print("Lets try that again")
pos = input("This time pick an open space between 1-9 ")
print(" ")
self.makeMove(pos) # Calls itself with new pos and game continues
class Judge: #This class will be called to determine is a win or tie has occured
def __init__(self):
pass
def gamePlay(self, t, movesMade):
a = self.checkWinner(t)
if (a == True):
print(t + "'s have Won!!")
return True
if (a == False):
if (movesMade >= 9):
print("Tie Game!")
return False
else:
return False
def checkWinner(self, t): # t == player token
#rows going across
if (board[1] == t and board[2] == t and board[3] == t):
return True
if (board[4] == t and board[5] == t and board[6] == t):
return True
if (board[7] == t and board[8] == t and board[9] == t):
return True
#columns
if (board[1] == t and board[4] == t and board[7] == t):
return True
if (board[2] == t and board[5] == t and board[8] == t):
return True
if (board[3] == t and board[6] == t and board[9] == t):
return True
#diagonal
if (board[1] == t and board[5] == t and board[9] == t):
return True
if (board[3] == t and board[5] == t and board[7] == t):
return True
else:
return False
def main():
#Any move between 0-9 reflects moves made during game
# movesMade values of -1 and -2 are used to dictate messages and reset game play
# before resetting movesMade back to zero and a new game begins with the human
movesMade = 0
#Creating the board and player objects for game play
game = Board()
player1 = Human()
player2 = AI()
judge = Judge()
game.show_Board(board)
while (movesMade < 9):
move = input("Human Move ")
player1.makeMove(move)
game.show_Board(board)
movesMade += 1
if (judge.gamePlay("X", movesMade) == True):
decision = input("Would you like to play again? <Y/N> ").upper()
if (decision == "Y"): #If player wants to play again we clean the board
movesMade = -1 #Skips the AI move
for square in range(10): #Resets board to original values
board[square] = str(square)
else:
movesMade = -2
if (judge.gamePlay("X", movesMade) == False):
if (movesMade == 9):
decision = input("Would you like to play again? <Y/N> ").upper()
if (decision == "Y"): #If player wants to play again we clean the board
for square in range(10):
board[square] = str(square)
movesMade = -1 #To skip the AI move
else:
movesMade = -2 #To prompt the I am done with the game message
print(" ")
if (movesMade < 0):
if (movesMade == -2):
print("Thank you! Come play again weak human!") #Done with the game message
else:
print("Moves Made is: " + str(movesMade))
print(" ")
if (movesMade < 9 and movesMade >= 0): #Check to see if there are moves remaining
move = input("AI Move ")
player2.makeMove(move)
game.show_Board(board)
movesMade += 1
if (judge.gamePlay("O", movesMade) == True):
decision = input("Would you like to play again? <Y/N> ").upper()
if (decision == "Y"): #If player wants to play again we clean the board
movesMade = 0
for square in range(10): #Resets board to original values
board[square] = str(square)
else:
movesMade = -2
if (judge.gamePlay("X", movesMade) == False):
if (movesMade == 9):
decision = input("Would you like to play again? <Y/N> ").upper()
if (decision == "Y"): #If player wants to play again we clean the board
for square in range(10):
board[square] = str(square)
movesMade = 0
else:
movesMade = -2 #To prompt the I am done with the game message
print(" ")
if (movesMade < 0):
if (movesMade == -2):
print("Thank you! Come play again weak human!") #Done with the game message
else:
print("Moves Made is: " + str(movesMade))
print(" ")
if (movesMade == -1):
movesMade = 0 #Resets moves to zero and human starts new game
main()
# for j in range(len(board)): #This loop checks for moves that makes the AI win
# if board[j] == 'X' or board[j] == 'O' or board[j] == '0':
# posSquares.append(k)
# continue #Prevents us from considering squares that have a token or are the zero index
# else:
# posSquares.append(j) #filling container with all possible squares not filled with a player token
# board[j] = "O" #Temp set square
# if AI_judge.gamePlay("O", board, movesMade) == True: #Determine if that would make AI win
# return #If true, return because this move makes AI win
# if AI_judge.gamePlay("O", board, movesMade) == False:
# board[j] = str(j) #If move will not make AI win, set square to its previous value and keep looking
# for i in range(len(board)):
# #After checking for winning moves, check for moves that the AI needs to block or the human will win
# if board[i] == 'X' or board[i] == 'O' or board[i] == '0':
# continue
# else:
# board[i] = "X"
# if AI_judge.gamePlay("X", board, movesMade) == True:
# board[i] = "O" #If the move will result in a human win, mark the square with AI token
# return
# if AI_judge.gamePlay("X") == False:
# board[i] = str(i)
# else: #Likely inaccessible code but acts as a catch all if no if statement is entered somehow
# board[i] = str(i)
# #If a win or a block is not available, check to take a corner
# openCorners = []
# for i in range(len(board)):
# if board[i] == "1" or board[i] == "5" or board[i] == "21" or board[i] == "25":
# openCorners.append(i)
# if len(openCorners) > 0:
# self.randomSelection(openCorners, board)
# # board[move] = "O"
# # return
# return
# #If a win, block, or corner isn't available, take the center
# if 13 in board:
# move = 13
# board[move] = "O"
# return
#If none of the above options are available, take ant open edge
# posEdges = [2,3,4,6,11,16,10,15,20,22,23,24]
# openEdges = []
# for i in range(len(posSquares)):
# # for j in range(len(posEdges)):
# if board[j] == ' ':
# continue
# else:
# openEdges.append(j)
# if len(openEdges) > 0:
# self.randomSelection(openEdges, board)
# board[move] = "O"
# return
#If no edge is available, take any random open square
# if len(posSquares) > 0:
# self.randomSelection(posSquares)
# board[move] = "O"
# return | [
"patcassiday@gmail.com"
] | patcassiday@gmail.com |
bc77e7a35dfac6f9b3eef8dfadff882bd5412e64 | d0452eb707f82f892c236c7e70a15f561968cc05 | /conftest.py | 459d2ee7f091611be0629d8501f9c21e4108703a | [
"Apache-2.0"
] | permissive | Treshch1/python_traning | 0ff28442ad559c7e3ed2dfcb5de0fc430ecb71cb | de796861b7227fab176d342b67cf47acbd2b166f | refs/heads/master | 2020-06-16T12:26:59.431595 | 2019-08-23T19:24:03 | 2019-08-23T19:24:03 | 195,573,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,811 | py | import pytest
import json
import os.path
import importlib
import jsonpickle
from fixture.application import Application
from fixture.db import DbFixture
from fixture.orm import ORMFixture
fixture = None
target = None
def load_config(file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file)
with open(config_file) as f:
target = json.load(f)
return target
@pytest.fixture
def app(request):
global fixture
browser = request.config.getoption("--browser")
web_config = load_config(request.config.getoption("--target"))["web"]
if fixture is None or not fixture.is_valid():
fixture = Application(browser=browser, base_url=web_config["base_url"])
fixture.session.ensure_login(username=web_config["username"], password=web_config["password"])
return fixture
@pytest.fixture(scope='session')
def db(request):
db_config = load_config(request.config.getoption("--target"))["db"]
dbfixture = DbFixture(host=db_config["host"], name=db_config["name"],
username=db_config["username"], password=db_config["password"])
def fin():
dbfixture.destroy()
request.addfinalizer(fin)
return dbfixture
@pytest.fixture(scope='session')
def orm(request):
db_config = load_config(request.config.getoption("--target"))["db"]
ormfixture = ORMFixture(host=db_config["host"], name=db_config["name"],
username=db_config["username"], password=db_config["password"])
return ormfixture
@pytest.fixture(scope='session', autouse=True)
def stop(request):
def fin():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture
@pytest.fixture
def check_ui(request):
return request.config.getoption("--check_ui")
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox")
parser.addoption("--target", action="store", default="target.json")
parser.addoption("--check_ui", action="store_true")
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
test_data = load_from_module(fixture[5:])
metafunc.parametrize(fixture, test_data, ids=[str(x) for x in test_data])
elif fixture.startswith("json_"):
test_data = load_from_json(fixture[5:])
metafunc.parametrize(fixture, test_data, ids=[str(x) for x in test_data])
def load_from_module(module):
return importlib.import_module(f"data.{module}").test_data
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), f"data/{file}.json")) as f:
return jsonpickle.decode(f.read())
| [
"vladislavtreshcheyko@gmail.com"
] | vladislavtreshcheyko@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.